Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/hashmap/hashmap_tx.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* integer hash set implementation which uses only transaction APIs */
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <inttypes.h>
#include <libpmemobj.h>
#include "hashmap_tx.h"
#include "hashmap_internal.h"
/* layout definition */
TOID_DECLARE(struct buckets, HASHMAP_TX_TYPE_OFFSET + 1);
TOID_DECLARE(struct entry, HASHMAP_TX_TYPE_OFFSET + 2);
struct entry {
uint64_t key;
PMEMoid value;
/* next entry list pointer */
TOID(struct entry) next;
};
struct buckets {
/* number of buckets */
size_t nbuckets;
/* array of lists */
TOID(struct entry) bucket[];
};
struct hashmap_tx {
/* random number generator seed */
uint32_t seed;
/* hash function coefficients */
uint32_t hash_fun_a;
uint32_t hash_fun_b;
uint64_t hash_fun_p;
/* number of values inserted */
uint64_t count;
/* buckets */
TOID(struct buckets) buckets;
};
/*
* create_hashmap -- hashmap initializer
*/
static void
create_hashmap(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint32_t seed)
{
size_t len = INIT_BUCKETS_NUM;
size_t sz = sizeof(struct buckets) +
len * sizeof(TOID(struct entry));
TX_BEGIN(pop) {
TX_ADD(hashmap);
D_RW(hashmap)->seed = seed;
do {
D_RW(hashmap)->hash_fun_a = (uint32_t)rand();
} while (D_RW(hashmap)->hash_fun_a == 0);
D_RW(hashmap)->hash_fun_b = (uint32_t)rand();
D_RW(hashmap)->hash_fun_p = HASH_FUNC_COEFF_P;
D_RW(hashmap)->buckets = TX_ZALLOC(struct buckets, sz);
D_RW(D_RW(hashmap)->buckets)->nbuckets = len;
} TX_ONABORT {
fprintf(stderr, "%s: transaction aborted: %s\n", __func__,
pmemobj_errormsg());
abort();
} TX_END
}
/*
* hash -- the simplest hashing function,
* see https://en.wikipedia.org/wiki/Universal_hashing#Hashing_integers
*/
static uint64_t
hash(const TOID(struct hashmap_tx) *hashmap,
const TOID(struct buckets) *buckets, uint64_t value)
{
uint32_t a = D_RO(*hashmap)->hash_fun_a;
uint32_t b = D_RO(*hashmap)->hash_fun_b;
uint64_t p = D_RO(*hashmap)->hash_fun_p;
size_t len = D_RO(*buckets)->nbuckets;
return ((a * value + b) % p) % len;
}
/*
* hm_tx_rebuild -- rebuilds the hashmap with a new number of buckets
*/
static void
hm_tx_rebuild(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, size_t new_len)
{
TOID(struct buckets) buckets_old = D_RO(hashmap)->buckets;
if (new_len == 0)
new_len = D_RO(buckets_old)->nbuckets;
size_t sz_old = sizeof(struct buckets) +
D_RO(buckets_old)->nbuckets *
sizeof(TOID(struct entry));
size_t sz_new = sizeof(struct buckets) +
new_len * sizeof(TOID(struct entry));
TX_BEGIN(pop) {
TX_ADD_FIELD(hashmap, buckets);
TOID(struct buckets) buckets_new =
TX_ZALLOC(struct buckets, sz_new);
D_RW(buckets_new)->nbuckets = new_len;
pmemobj_tx_add_range(buckets_old.oid, 0, sz_old);
for (size_t i = 0; i < D_RO(buckets_old)->nbuckets; ++i) {
while (!TOID_IS_NULL(D_RO(buckets_old)->bucket[i])) {
TOID(struct entry) en =
D_RO(buckets_old)->bucket[i];
uint64_t h = hash(&hashmap, &buckets_new,
D_RO(en)->key);
D_RW(buckets_old)->bucket[i] = D_RO(en)->next;
TX_ADD_FIELD(en, next);
D_RW(en)->next = D_RO(buckets_new)->bucket[h];
D_RW(buckets_new)->bucket[h] = en;
}
}
D_RW(hashmap)->buckets = buckets_new;
TX_FREE(buckets_old);
} TX_ONABORT {
fprintf(stderr, "%s: transaction aborted: %s\n", __func__,
pmemobj_errormsg());
/*
* We don't need to do anything here, because everything is
* consistent. The only thing affected is performance.
*/
} TX_END
}
/*
* hm_tx_insert -- inserts specified value into the hashmap,
* returns:
* - 0 if successful,
* - 1 if value already existed,
* - -1 if something bad happened
*/
int
hm_tx_insert(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
uint64_t key, PMEMoid value)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
uint64_t h = hash(&hashmap, &buckets, key);
int num = 0;
for (var = D_RO(buckets)->bucket[h];
!TOID_IS_NULL(var);
var = D_RO(var)->next) {
if (D_RO(var)->key == key)
return 1;
num++;
}
int ret = 0;
TX_BEGIN(pop) {
TX_ADD_FIELD(D_RO(hashmap)->buckets, bucket[h]);
TX_ADD_FIELD(hashmap, count);
TOID(struct entry) e = TX_NEW(struct entry);
D_RW(e)->key = key;
D_RW(e)->value = value;
D_RW(e)->next = D_RO(buckets)->bucket[h];
D_RW(buckets)->bucket[h] = e;
D_RW(hashmap)->count++;
num++;
} TX_ONABORT {
fprintf(stderr, "transaction aborted: %s\n",
pmemobj_errormsg());
ret = -1;
} TX_END
if (ret)
return ret;
if (num > MAX_HASHSET_THRESHOLD ||
(num > MIN_HASHSET_THRESHOLD &&
D_RO(hashmap)->count > 2 * D_RO(buckets)->nbuckets))
hm_tx_rebuild(pop, hashmap, D_RO(buckets)->nbuckets * 2);
return 0;
}
/*
* hm_tx_remove -- removes specified value from the hashmap,
* returns:
* - key's value if successful,
* - OID_NULL if value didn't exist or if something bad happened
*/
PMEMoid
hm_tx_remove(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var, prev = TOID_NULL(struct entry);
uint64_t h = hash(&hashmap, &buckets, key);
for (var = D_RO(buckets)->bucket[h];
!TOID_IS_NULL(var);
prev = var, var = D_RO(var)->next) {
if (D_RO(var)->key == key)
break;
}
if (TOID_IS_NULL(var))
return OID_NULL;
int ret = 0;
PMEMoid retoid = D_RO(var)->value;
TX_BEGIN(pop) {
if (TOID_IS_NULL(prev))
TX_ADD_FIELD(D_RO(hashmap)->buckets, bucket[h]);
else
TX_ADD_FIELD(prev, next);
TX_ADD_FIELD(hashmap, count);
if (TOID_IS_NULL(prev))
D_RW(buckets)->bucket[h] = D_RO(var)->next;
else
D_RW(prev)->next = D_RO(var)->next;
D_RW(hashmap)->count--;
TX_FREE(var);
} TX_ONABORT {
fprintf(stderr, "transaction aborted: %s\n",
pmemobj_errormsg());
ret = -1;
} TX_END
if (ret)
return OID_NULL;
if (D_RO(hashmap)->count < D_RO(buckets)->nbuckets)
hm_tx_rebuild(pop, hashmap, D_RO(buckets)->nbuckets / 2);
return retoid;
}
/*
* hm_tx_foreach -- prints all values from the hashmap
*/
int
hm_tx_foreach(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
int ret = 0;
for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i) {
if (TOID_IS_NULL(D_RO(buckets)->bucket[i]))
continue;
for (var = D_RO(buckets)->bucket[i]; !TOID_IS_NULL(var);
var = D_RO(var)->next) {
ret = cb(D_RO(var)->key, D_RO(var)->value, arg);
if (ret)
break;
}
}
return ret;
}
/*
* hm_tx_debug -- prints complete hashmap state
*/
static void
hm_tx_debug(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, FILE *out)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
fprintf(out, "a: %u b: %u p: %" PRIu64 "\n", D_RO(hashmap)->hash_fun_a,
D_RO(hashmap)->hash_fun_b, D_RO(hashmap)->hash_fun_p);
fprintf(out, "count: %" PRIu64 ", buckets: %zu\n",
D_RO(hashmap)->count, D_RO(buckets)->nbuckets);
for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i) {
if (TOID_IS_NULL(D_RO(buckets)->bucket[i]))
continue;
int num = 0;
fprintf(out, "%zu: ", i);
for (var = D_RO(buckets)->bucket[i]; !TOID_IS_NULL(var);
var = D_RO(var)->next) {
fprintf(out, "%" PRIu64 " ", D_RO(var)->key);
num++;
}
fprintf(out, "(%d)\n", num);
}
}
/*
* hm_tx_get -- checks whether specified value is in the hashmap
*/
PMEMoid
hm_tx_get(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
uint64_t h = hash(&hashmap, &buckets, key);
for (var = D_RO(buckets)->bucket[h];
!TOID_IS_NULL(var);
var = D_RO(var)->next)
if (D_RO(var)->key == key)
return D_RO(var)->value;
return OID_NULL;
}
/*
* hm_tx_lookup -- checks whether specified value exists
*/
int
hm_tx_lookup(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
uint64_t h = hash(&hashmap, &buckets, key);
for (var = D_RO(buckets)->bucket[h];
!TOID_IS_NULL(var);
var = D_RO(var)->next)
if (D_RO(var)->key == key)
return 1;
return 0;
}
/*
* hm_tx_count -- returns number of elements
*/
size_t
hm_tx_count(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap)
{
return D_RO(hashmap)->count;
}
/*
* hm_tx_init -- recovers hashmap state, called after pmemobj_open
*/
int
hm_tx_init(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap)
{
srand(D_RO(hashmap)->seed);
return 0;
}
/*
* hm_tx_create -- allocates new hashmap
*/
int
hm_tx_create(PMEMobjpool *pop, TOID(struct hashmap_tx) *map, void *arg)
{
struct hashmap_args *args = (struct hashmap_args *)arg;
int ret = 0;
TX_BEGIN(pop) {
TX_ADD_DIRECT(map);
*map = TX_ZNEW(struct hashmap_tx);
uint32_t seed = args ? args->seed : 0;
create_hashmap(pop, *map, seed);
} TX_ONABORT {
ret = -1;
} TX_END
return ret;
}
/*
* hm_tx_check -- checks if specified persistent object is an
* instance of hashmap
*/
int
hm_tx_check(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap)
{
return TOID_IS_NULL(hashmap) || !TOID_VALID(hashmap);
}
/*
* hm_tx_cmd -- execute cmd for hashmap
*/
int
hm_tx_cmd(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap,
unsigned cmd, uint64_t arg)
{
switch (cmd) {
case HASHMAP_CMD_REBUILD:
hm_tx_rebuild(pop, hashmap, arg);
return 0;
case HASHMAP_CMD_DEBUG:
if (!arg)
return -EINVAL;
hm_tx_debug(pop, hashmap, (FILE *)arg);
return 0;
default:
return -EINVAL;
}
}
| 11,208 | 23.908889 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/hashmap/hashmap_rp.c
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Integer hash set implementation with open addressing Robin Hood collision
* resolution which uses action.h reserve/publish API.
*/
#include <assert.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <inttypes.h>
#include <libpmemobj.h>
#include "hashmap_rp.h"
#define TOMBSTONE_MASK (1ULL << 63)
#ifdef DEBUG
#define HM_ASSERT(cnd) assert(cnd)
#else
#define HM_ASSERT(cnd)
#endif
/* layout definition */
TOID_DECLARE(struct entry, HASHMAP_RP_TYPE_OFFSET + 1);
struct entry {
uint64_t key;
PMEMoid value;
uint64_t hash;
};
struct add_entry {
struct entry data;
/* position index in hashmap, where data should be inserted/updated */
size_t pos;
/* Action array to perform addition in set of actions */
struct pobj_action *actv;
/* Action array index counter */
size_t actv_cnt;
#ifdef DEBUG
/* Swaps counter for current insertion. Enabled in debug mode */
int swaps;
#endif
};
struct hashmap_rp {
/* number of values inserted */
uint64_t count;
/* container capacity */
uint64_t capacity;
/* resize threshold */
uint64_t resize_threshold;
/* entries */
TOID(struct entry) entries;
};
int *swaps_array = NULL;
#ifdef DEBUG
static inline int
is_power_of_2(uint64_t v)
{
return v && !(v & (v - 1));
}
#endif
/*
* entry_is_deleted -- checks 'tombstone' bit if hash is deleted
*/
static inline int
entry_is_deleted(uint64_t hash)
{
return (hash & TOMBSTONE_MASK) > 0;
}
/*
* entry_is_empty -- checks if entry is empty
*/
static inline int
entry_is_empty(uint64_t hash)
{
return hash == 0 || entry_is_deleted(hash);
}
/*
* increment_pos -- increment position index, skip 0
*/
static uint64_t
increment_pos(const struct hashmap_rp *hashmap, uint64_t pos)
{
HM_ASSERT(is_power_of_2(hashmap->capacity));
pos = (pos + 1) & (hashmap->capacity - 1);
return pos == 0 ? 1 : pos;
}
/*
* probe_distance -- returns probe number, an indicator how far from
* desired position given hash is stored in hashmap
*/
static int
probe_distance(const struct hashmap_rp *hashmap, uint64_t hash_key,
uint64_t slot_index)
{
uint64_t capacity = hashmap->capacity;
HM_ASSERT(is_power_of_2(hashmap->capacity));
return (int)(slot_index + capacity - hash_key) & (capacity - 1);
}
/*
* hash -- hash function based on Austin Appleby MurmurHash3 64-bit finalizer.
* Returned value is modified to work with special values for unused and
* and deleted hashes.
*/
static uint64_t
hash(const struct hashmap_rp *hashmap, uint64_t key)
{
key ^= key >> 33;
key *= 0xff51afd7ed558ccd;
key ^= key >> 33;
key *= 0xc4ceb9fe1a85ec53;
key ^= key >> 33;
HM_ASSERT(is_power_of_2(hashmap->capacity));
key &= hashmap->capacity - 1;
/* first, 'tombstone' bit is used to indicate deleted item */
key &= ~TOMBSTONE_MASK;
/*
* Ensure that we never return 0 as a hash, since we use 0 to
* indicate that element has never been used at all.
*/
return key == 0 ? 1 : key;
}
/*
* hashmap_create -- hashmap initializer
*/
static void
hashmap_create(PMEMobjpool *pop, TOID(struct hashmap_rp) *hashmap_p,
uint32_t seed)
{
struct pobj_action actv[4];
size_t actv_cnt = 0;
TOID(struct hashmap_rp) hashmap =
POBJ_RESERVE_NEW(pop, struct hashmap_rp, &actv[actv_cnt]);
if (TOID_IS_NULL(hashmap))
goto reserve_err;
actv_cnt++;
D_RW(hashmap)->count = 0;
D_RW(hashmap)->capacity = INIT_ENTRIES_NUM_RP;
D_RW(hashmap)->resize_threshold = (uint64_t)(INIT_ENTRIES_NUM_RP *
HASHMAP_RP_LOAD_FACTOR);
size_t sz = sizeof(struct entry) * D_RO(hashmap)->capacity;
/* init entries with zero in order to track unused hashes */
D_RW(hashmap)->entries = POBJ_XRESERVE_ALLOC(pop, struct entry, sz,
&actv[actv_cnt], POBJ_XALLOC_ZERO);
if (TOID_IS_NULL(D_RO(hashmap)->entries))
goto reserve_err;
actv_cnt++;
pmemobj_persist(pop, D_RW(hashmap), sizeof(hashmap));
pmemobj_set_value(pop, &actv[actv_cnt++], &hashmap_p->oid.pool_uuid_lo,
hashmap.oid.pool_uuid_lo);
pmemobj_set_value(pop, &actv[actv_cnt++], &hashmap_p->oid.off,
hashmap.oid.off);
pmemobj_publish(pop, actv, actv_cnt);
#ifdef DEBUG
swaps_array = (int *)calloc(INIT_ENTRIES_NUM_RP, sizeof(int));
if (!swaps_array)
abort();
#endif
return;
reserve_err:
fprintf(stderr, "hashmap alloc failed: %s\n", pmemobj_errormsg());
pmemobj_cancel(pop, actv, actv_cnt);
abort();
}
/*
* entry_update -- updates entry in given hashmap with given arguments
*/
static void
entry_update(PMEMobjpool *pop, struct hashmap_rp *hashmap,
struct add_entry *args, int rebuild)
{
HM_ASSERT(HASHMAP_RP_MAX_ACTIONS > args->actv_cnt + 4);
struct entry *entry_p = D_RW(hashmap->entries);
entry_p += args->pos;
if (rebuild == HASHMAP_RP_REBUILD) {
entry_p->key = args->data.key;
entry_p->value = args->data.value;
entry_p->hash = args->data.hash;
} else {
pmemobj_set_value(pop, args->actv + args->actv_cnt++,
&entry_p->key, args->data.key);
pmemobj_set_value(pop, args->actv + args->actv_cnt++,
&entry_p->value.pool_uuid_lo,
args->data.value.pool_uuid_lo);
pmemobj_set_value(pop, args->actv + args->actv_cnt++,
&entry_p->value.off, args->data.value.off);
pmemobj_set_value(pop, args->actv + args->actv_cnt++,
&entry_p->hash, args->data.hash);
}
#ifdef DEBUG
assert(sizeof(swaps_array) / sizeof(swaps_array[0])
> args->pos);
swaps_array[args->pos] = args->swaps;
#endif
}
/*
* entry_add -- increments given hashmap's elements counter and calls
* entry_update
*/
static void
entry_add(PMEMobjpool *pop, struct hashmap_rp *hashmap, struct add_entry *args,
int rebuild)
{
HM_ASSERT(HASHMAP_RP_MAX_ACTIONS > args->actv_cnt + 1);
if (rebuild == HASHMAP_RP_REBUILD)
hashmap->count++;
else {
pmemobj_set_value(pop, args->actv + args->actv_cnt++,
&hashmap->count, hashmap->count + 1);
}
entry_update(pop, hashmap, args, rebuild);
}
/*
* insert_helper -- inserts specified value into the hashmap
* If function was called during rebuild process, no redo logs will be used.
* returns:
* - 0 if successful,
* - 1 if value already existed
* - -1 on error
*/
static int
insert_helper(PMEMobjpool *pop, struct hashmap_rp *hashmap, uint64_t key,
PMEMoid value, int rebuild)
{
HM_ASSERT(hashmap->count + 1 < hashmap->resize_threshold);
struct pobj_action actv[HASHMAP_RP_MAX_ACTIONS];
struct add_entry args;
args.data.key = key;
args.data.value = value;
args.data.hash = hash(hashmap, key);
args.pos = args.data.hash;
if (rebuild != HASHMAP_RP_REBUILD) {
args.actv = actv;
args.actv_cnt = 0;
}
int dist = 0;
struct entry *entry_p = NULL;
#ifdef DEBUG
int swaps = 0;
#endif
for (int n = 0; n < HASHMAP_RP_MAX_SWAPS; ++n) {
entry_p = D_RW(hashmap->entries);
entry_p += args.pos;
#ifdef DEBUG
args.swaps = swaps;
#endif
/* Case 1: key already exists, override value */
if (!entry_is_empty(entry_p->hash) &&
entry_p->key == args.data.key) {
entry_update(pop, hashmap, &args, rebuild);
if (rebuild != HASHMAP_RP_REBUILD)
pmemobj_publish(pop, args.actv, args.actv_cnt);
return 1;
}
/* Case 2: slot is empty from the beginning */
if (entry_p->hash == 0) {
entry_add(pop, hashmap, &args, rebuild);
if (rebuild != HASHMAP_RP_REBUILD)
pmemobj_publish(pop, args.actv, args.actv_cnt);
return 0;
}
/*
* Case 3: existing element (or tombstone) has probed less than
* current element. Swap them (or put into tombstone slot) and
* keep going to find another slot for that element.
*/
int existing_dist = probe_distance(hashmap, entry_p->hash,
args.pos);
if (existing_dist < dist) {
if (entry_is_deleted(entry_p->hash)) {
entry_add(pop, hashmap, &args, rebuild);
if (rebuild != HASHMAP_RP_REBUILD)
pmemobj_publish(pop, args.actv,
args.actv_cnt);
return 0;
}
struct entry temp = *entry_p;
entry_update(pop, hashmap, &args, rebuild);
args.data = temp;
#ifdef DEBUG
swaps++;
#endif
dist = existing_dist;
}
/*
* Case 4: increment slot number and probe counter, keep going
* to find free slot
*/
args.pos = increment_pos(hashmap, args.pos);
dist += 1;
}
fprintf(stderr, "insertion requires too many swaps\n");
if (rebuild != HASHMAP_RP_REBUILD)
pmemobj_cancel(pop, args.actv, args.actv_cnt);
return -1;
}
/*
* index_lookup -- checks if given key exists in hashmap.
* Returns index number if key was found, 0 otherwise.
*/
static uint64_t
index_lookup(const struct hashmap_rp *hashmap, uint64_t key)
{
const uint64_t hash_lookup = hash(hashmap, key);
uint64_t pos = hash_lookup;
uint64_t dist = 0;
const struct entry *entry_p = NULL;
do {
entry_p = D_RO(hashmap->entries);
entry_p += pos;
if (entry_p->hash == hash_lookup && entry_p->key == key)
return pos;
pos = increment_pos(hashmap, pos);
} while (entry_p->hash != 0 &&
(dist++) <= probe_distance(hashmap, entry_p->hash, pos) - 1);
return 0;
}
/*
* entries_cache -- cache entries from second argument in entries from first
* argument
*/
static int
entries_cache(PMEMobjpool *pop, struct hashmap_rp *dest,
const struct hashmap_rp *src)
{
const struct entry *e_begin = D_RO(src->entries);
const struct entry *e_end = e_begin + src->capacity;
for (const struct entry *e = e_begin; e != e_end; ++e) {
if (entry_is_empty(e->hash))
continue;
if (insert_helper(pop, dest, e->key,
e->value, HASHMAP_RP_REBUILD) == -1)
return -1;
}
HM_ASSERT(src->count == dest->count);
return 0;
}
/*
* hm_rp_rebuild -- rebuilds the hashmap with a new capacity.
* Returns 0 on success, -1 otherwise.
*/
static int
hm_rp_rebuild(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
size_t capacity_new)
{
/*
* We will need 6 actions:
* - 1 action to set new capacity
* - 1 action to set new resize threshold
* - 1 action to alloc memory for new entries
* - 1 action to free old entries
* - 2 actions to set new oid pointing to new entries
*/
struct pobj_action actv[6];
size_t actv_cnt = 0;
size_t sz_alloc = sizeof(struct entry) * capacity_new;
uint64_t resize_threshold_new = (uint64_t)(capacity_new *
HASHMAP_RP_LOAD_FACTOR);
pmemobj_set_value(pop, &actv[actv_cnt++], &D_RW(hashmap)->capacity,
capacity_new);
pmemobj_set_value(pop, &actv[actv_cnt++],
&D_RW(hashmap)->resize_threshold, resize_threshold_new);
struct hashmap_rp hashmap_rebuild;
hashmap_rebuild.count = 0;
hashmap_rebuild.capacity = capacity_new;
hashmap_rebuild.resize_threshold = resize_threshold_new;
hashmap_rebuild.entries = POBJ_XRESERVE_ALLOC(pop, struct entry,
sz_alloc, &actv[actv_cnt],
POBJ_XALLOC_ZERO);
if (TOID_IS_NULL(hashmap_rebuild.entries)) {
fprintf(stderr, "hashmap rebuild failed: %s\n",
pmemobj_errormsg());
goto rebuild_err;
}
actv_cnt++;
#ifdef DEBUG
free(swaps_array);
swaps_array = (int *)calloc(capacity_new, sizeof(int));
if (!swaps_array)
goto rebuild_err;
#endif
if (entries_cache(pop, &hashmap_rebuild, D_RW(hashmap)) == -1)
goto rebuild_err;
pmemobj_persist(pop, D_RW(hashmap_rebuild.entries), sz_alloc);
pmemobj_defer_free(pop, D_RW(hashmap)->entries.oid, &actv[actv_cnt++]);
pmemobj_set_value(pop, &actv[actv_cnt++],
&D_RW(hashmap)->entries.oid.pool_uuid_lo,
hashmap_rebuild.entries.oid.pool_uuid_lo);
pmemobj_set_value(pop, &actv[actv_cnt++],
&D_RW(hashmap)->entries.oid.off,
hashmap_rebuild.entries.oid.off);
HM_ASSERT(sizeof(actv) / sizeof(actv[0]) >= actv_cnt);
pmemobj_publish(pop, actv, actv_cnt);
return 0;
rebuild_err:
pmemobj_cancel(pop, actv, actv_cnt);
#ifdef DEBUG
free(swaps_array);
#endif
return -1;
}
/*
* hm_rp_create -- initializes hashmap state, called after pmemobj_create
*/
int
hm_rp_create(PMEMobjpool *pop, TOID(struct hashmap_rp) *map, void *arg)
{
struct hashmap_args *args = (struct hashmap_args *)arg;
uint32_t seed = args ? args->seed : 0;
hashmap_create(pop, map, seed);
return 0;
}
/*
* hm_rp_check -- checks if specified persistent object is an instance of
* hashmap
*/
int
hm_rp_check(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap)
{
return TOID_IS_NULL(hashmap) || !TOID_VALID(hashmap);
}
/*
* hm_rp_init -- recovers hashmap state, called after pmemobj_open.
* Since hashmap_rp is performing rebuild/insertion completely or not at all,
* function is dummy and simply returns 0.
*/
int
hm_rp_init(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap)
{
return 0;
}
/*
* hm_rp_insert -- rebuilds hashmap if necessary and wraps insert_helper.
* returns:
* - 0 if successful,
* - 1 if value already existed
* - -1 if something bad happened
*/
int
hm_rp_insert(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
uint64_t key, PMEMoid value)
{
if (D_RO(hashmap)->count + 1 >= D_RO(hashmap)->resize_threshold) {
uint64_t capacity_new = D_RO(hashmap)->capacity * 2;
if (hm_rp_rebuild(pop, hashmap, capacity_new) != 0)
return -1;
}
return insert_helper(pop, D_RW(hashmap), key, value,
HASHMAP_RP_NO_REBUILD);
}
/*
* hm_rp_remove -- removes specified key from the hashmap,
* returns:
* - key's value if successful,
* - OID_NULL if value didn't exist or if something bad happened
*/
PMEMoid
hm_rp_remove(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
uint64_t key)
{
const uint64_t pos = index_lookup(D_RO(hashmap), key);
if (pos == 0)
return OID_NULL;
struct entry *entry_p = D_RW(D_RW(hashmap)->entries);
entry_p += pos;
PMEMoid ret = entry_p->value;
size_t actvcnt = 0;
struct pobj_action actv[5];
pmemobj_set_value(pop, &actv[actvcnt++], &entry_p->hash,
entry_p->hash | TOMBSTONE_MASK);
pmemobj_set_value(pop, &actv[actvcnt++],
&entry_p->value.pool_uuid_lo, 0);
pmemobj_set_value(pop, &actv[actvcnt++], &entry_p->value.off, 0);
pmemobj_set_value(pop, &actv[actvcnt++], &entry_p->key, 0);
pmemobj_set_value(pop, &actv[actvcnt++], &D_RW(hashmap)->count,
D_RW(hashmap)->count - 1);
HM_ASSERT(sizeof(actv) / sizeof(actv[0]) >= actvcnt);
pmemobj_publish(pop, actv, actvcnt);
uint64_t reduced_threshold = (uint64_t)
(((uint64_t)(D_RO(hashmap)->capacity / 2))
* HASHMAP_RP_LOAD_FACTOR);
if (reduced_threshold >= INIT_ENTRIES_NUM_RP &&
D_RW(hashmap)->count < reduced_threshold &&
hm_rp_rebuild(pop, hashmap, D_RO(hashmap)->capacity / 2))
return OID_NULL;
return ret;
}
/*
* hm_rp_get -- checks whether specified key is in the hashmap.
* Returns associated value if key exists, OID_NULL otherwise.
*/
PMEMoid
hm_rp_get(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
uint64_t key)
{
struct entry *entry_p =
(struct entry *)pmemobj_direct(D_RW(hashmap)->entries.oid);
uint64_t pos = index_lookup(D_RO(hashmap), key);
return pos == 0 ? OID_NULL : (entry_p + pos)->value;
}
/*
* hm_rp_lookup -- checks whether specified key is in the hashmap.
* Returns 1 if key was found, 0 otherwise.
*/
int
hm_rp_lookup(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
uint64_t key)
{
return index_lookup(D_RO(hashmap), key) != 0;
}
/*
* hm_rp_foreach -- calls cb for all values from the hashmap
*/
int
hm_rp_foreach(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
struct entry *entry_p =
(struct entry *)pmemobj_direct(D_RO(hashmap)->entries.oid);
int ret = 0;
for (size_t i = 0; i < D_RO(hashmap)->capacity; ++i, ++entry_p) {
uint64_t hash = entry_p->hash;
if (entry_is_empty(hash))
continue;
ret = cb(entry_p->key, entry_p->value, arg);
if (ret)
return ret;
}
return 0;
}
/*
* hm_rp_debug -- prints complete hashmap state
*/
static void
hm_rp_debug(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, FILE *out)
{
#ifdef DEBUG
fprintf(out, "debug: true, ");
#endif
fprintf(out, "capacity: %" PRIu64 ", count: %" PRIu64 "\n",
D_RO(hashmap)->capacity, D_RO(hashmap)->count);
struct entry *entry_p = D_RW((D_RW(hashmap)->entries));
for (size_t i = 0; i < D_RO(hashmap)->capacity; ++i, ++entry_p) {
uint64_t hash = entry_p->hash;
if (entry_is_empty(hash))
continue;
uint64_t key = entry_p->key;
#ifdef DEBUG
fprintf(out, "%zu: %" PRIu64 " hash: %" PRIu64 " dist:%" PRIu32
" swaps:%u\n", i, key, hash,
probe_distance(D_RO(hashmap), hash, i),
swaps_array[i]);
#else
fprintf(out, "%zu: %" PRIu64 " dist:%u \n", i, key,
probe_distance(D_RO(hashmap), hash, i));
#endif
}
}
/*
* hm_rp_count -- returns number of elements
*/
size_t
hm_rp_count(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap)
{
return D_RO(hashmap)->count;
}
/*
* hm_rp_cmd -- execute cmd for hashmap
*/
int
hm_rp_cmd(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap,
unsigned cmd, uint64_t arg)
{
switch (cmd) {
case HASHMAP_CMD_REBUILD:
hm_rp_rebuild(pop, hashmap, D_RO(hashmap)->capacity);
return 0;
case HASHMAP_CMD_DEBUG:
if (!arg)
return -EINVAL;
hm_rp_debug(pop, hashmap, (FILE *)arg);
return 0;
default:
return -EINVAL;
}
}
| 18,372 | 24.412172 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/hashmap/hashmap_atomic.h
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef HASHMAP_ATOMIC_H
#define HASHMAP_ATOMIC_H
#include <stddef.h>
#include <stdint.h>
#include <hashmap.h>
#include <libpmemobj.h>
#ifndef HASHMAP_ATOMIC_TYPE_OFFSET
#define HASHMAP_ATOMIC_TYPE_OFFSET 1000
#endif
struct hashmap_atomic;
TOID_DECLARE(struct hashmap_atomic, HASHMAP_ATOMIC_TYPE_OFFSET + 0);
int hm_atomic_check(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap);
int hm_atomic_create(PMEMobjpool *pop, TOID(struct hashmap_atomic) *map,
void *arg);
int hm_atomic_init(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap);
int hm_atomic_insert(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key, PMEMoid value);
PMEMoid hm_atomic_remove(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key);
PMEMoid hm_atomic_get(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key);
int hm_atomic_lookup(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key);
int hm_atomic_foreach(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
size_t hm_atomic_count(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap);
int hm_atomic_cmd(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
unsigned cmd, uint64_t arg);
#endif /* HASHMAP_ATOMIC_H */
| 2,899 | 42.939394 | 79 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/hashmap/hashmap_atomic.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* integer hash set implementation which uses only atomic APIs */
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <inttypes.h>
#include <libpmemobj.h>
#include "hashmap_atomic.h"
#include "hashmap_internal.h"
/* layout definition */
TOID_DECLARE(struct buckets, HASHMAP_ATOMIC_TYPE_OFFSET + 1);
TOID_DECLARE(struct entry, HASHMAP_ATOMIC_TYPE_OFFSET + 2);
struct entry {
uint64_t key;
PMEMoid value;
/* list pointer */
POBJ_LIST_ENTRY(struct entry) list;
};
struct entry_args {
uint64_t key;
PMEMoid value;
};
POBJ_LIST_HEAD(entries_head, struct entry);
struct buckets {
/* number of buckets */
size_t nbuckets;
/* array of lists */
struct entries_head bucket[];
};
struct hashmap_atomic {
/* random number generator seed */
uint32_t seed;
/* hash function coefficients */
uint32_t hash_fun_a;
uint32_t hash_fun_b;
uint64_t hash_fun_p;
/* number of values inserted */
uint64_t count;
/* whether "count" should be updated */
uint32_t count_dirty;
/* buckets */
TOID(struct buckets) buckets;
/* buckets, used during rehashing, null otherwise */
TOID(struct buckets) buckets_tmp;
};
/*
* create_entry -- entry initializer
*/
static int
create_entry(PMEMobjpool *pop, void *ptr, void *arg)
{
struct entry *e = (struct entry *)ptr;
struct entry_args *args = (struct entry_args *)arg;
e->key = args->key;
e->value = args->value;
memset(&e->list, 0, sizeof(e->list));
pmemobj_persist(pop, e, sizeof(*e));
return 0;
}
/*
* create_buckets -- buckets initializer
*/
static int
create_buckets(PMEMobjpool *pop, void *ptr, void *arg)
{
struct buckets *b = (struct buckets *)ptr;
b->nbuckets = *((size_t *)arg);
pmemobj_memset_persist(pop, &b->bucket, 0,
b->nbuckets * sizeof(b->bucket[0]));
pmemobj_persist(pop, &b->nbuckets, sizeof(b->nbuckets));
return 0;
}
/*
* create_hashmap -- hashmap initializer
*/
static void
create_hashmap(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint32_t seed)
{
D_RW(hashmap)->seed = seed;
do {
D_RW(hashmap)->hash_fun_a = (uint32_t)rand();
} while (D_RW(hashmap)->hash_fun_a == 0);
D_RW(hashmap)->hash_fun_b = (uint32_t)rand();
D_RW(hashmap)->hash_fun_p = HASH_FUNC_COEFF_P;
size_t len = INIT_BUCKETS_NUM;
size_t sz = sizeof(struct buckets) +
len * sizeof(struct entries_head);
if (POBJ_ALLOC(pop, &D_RW(hashmap)->buckets, struct buckets, sz,
create_buckets, &len)) {
fprintf(stderr, "root alloc failed: %s\n", pmemobj_errormsg());
abort();
}
pmemobj_persist(pop, D_RW(hashmap), sizeof(*D_RW(hashmap)));
}
/*
* hash -- the simplest hashing function,
* see https://en.wikipedia.org/wiki/Universal_hashing#Hashing_integers
*/
static uint64_t
hash(const TOID(struct hashmap_atomic) *hashmap,
const TOID(struct buckets) *buckets,
uint64_t value)
{
uint32_t a = D_RO(*hashmap)->hash_fun_a;
uint32_t b = D_RO(*hashmap)->hash_fun_b;
uint64_t p = D_RO(*hashmap)->hash_fun_p;
size_t len = D_RO(*buckets)->nbuckets;
return ((a * value + b) % p) % len;
}
/*
* hm_atomic_rebuild_finish -- finishes rebuild, assumes buckets_tmp is not null
*/
static void
hm_atomic_rebuild_finish(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap)
{
TOID(struct buckets) cur = D_RO(hashmap)->buckets;
TOID(struct buckets) tmp = D_RO(hashmap)->buckets_tmp;
for (size_t i = 0; i < D_RO(cur)->nbuckets; ++i) {
while (!POBJ_LIST_EMPTY(&D_RO(cur)->bucket[i])) {
TOID(struct entry) en =
POBJ_LIST_FIRST(&D_RO(cur)->bucket[i]);
uint64_t h = hash(&hashmap, &tmp, D_RO(en)->key);
if (POBJ_LIST_MOVE_ELEMENT_HEAD(pop,
&D_RW(cur)->bucket[i],
&D_RW(tmp)->bucket[h],
en, list, list)) {
fprintf(stderr, "move failed: %s\n",
pmemobj_errormsg());
abort();
}
}
}
POBJ_FREE(&D_RO(hashmap)->buckets);
D_RW(hashmap)->buckets = D_RO(hashmap)->buckets_tmp;
pmemobj_persist(pop, &D_RW(hashmap)->buckets,
sizeof(D_RW(hashmap)->buckets));
/*
* We have to set offset manually instead of substituting OID_NULL,
* because we won't be able to recover easily if crash happens after
* pool_uuid_lo, but before offset is set. Another reason why everyone
* should use transaction API.
* See recovery process in hm_init and TOID_IS_NULL macro definition.
*/
D_RW(hashmap)->buckets_tmp.oid.off = 0;
pmemobj_persist(pop, &D_RW(hashmap)->buckets_tmp,
sizeof(D_RW(hashmap)->buckets_tmp));
}
/*
* hm_atomic_rebuild -- rebuilds the hashmap with a new number of buckets
*/
static void
hm_atomic_rebuild(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
size_t new_len)
{
if (new_len == 0)
new_len = D_RO(D_RO(hashmap)->buckets)->nbuckets;
size_t sz = sizeof(struct buckets) +
new_len * sizeof(struct entries_head);
POBJ_ALLOC(pop, &D_RW(hashmap)->buckets_tmp, struct buckets, sz,
create_buckets, &new_len);
if (TOID_IS_NULL(D_RO(hashmap)->buckets_tmp)) {
fprintf(stderr,
"failed to allocate temporary space of size: %zu"
", %s\n",
new_len, pmemobj_errormsg());
return;
}
hm_atomic_rebuild_finish(pop, hashmap);
}
/*
* hm_atomic_insert -- inserts specified value into the hashmap,
* returns:
* - 0 if successful,
* - 1 if value already existed,
* - -1 if something bad happened
*/
int
hm_atomic_insert(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key, PMEMoid value)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
uint64_t h = hash(&hashmap, &buckets, key);
int num = 0;
POBJ_LIST_FOREACH(var, &D_RO(buckets)->bucket[h], list) {
if (D_RO(var)->key == key)
return 1;
num++;
}
D_RW(hashmap)->count_dirty = 1;
pmemobj_persist(pop, &D_RW(hashmap)->count_dirty,
sizeof(D_RW(hashmap)->count_dirty));
struct entry_args args;
args.key = key;
args.value = value;
PMEMoid oid = POBJ_LIST_INSERT_NEW_HEAD(pop,
&D_RW(buckets)->bucket[h],
list, sizeof(struct entry), create_entry, &args);
if (OID_IS_NULL(oid)) {
fprintf(stderr, "failed to allocate entry: %s\n",
pmemobj_errormsg());
return -1;
}
D_RW(hashmap)->count++;
pmemobj_persist(pop, &D_RW(hashmap)->count,
sizeof(D_RW(hashmap)->count));
D_RW(hashmap)->count_dirty = 0;
pmemobj_persist(pop, &D_RW(hashmap)->count_dirty,
sizeof(D_RW(hashmap)->count_dirty));
num++;
if (num > MAX_HASHSET_THRESHOLD ||
(num > MIN_HASHSET_THRESHOLD &&
D_RO(hashmap)->count > 2 * D_RO(buckets)->nbuckets))
hm_atomic_rebuild(pop, hashmap, D_RW(buckets)->nbuckets * 2);
return 0;
}
/*
* hm_atomic_remove -- removes specified value from the hashmap,
* returns:
* - key's value if successful,
* - OID_NULL if value didn't exist or if something bad happened
*/
PMEMoid
hm_atomic_remove(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
uint64_t h = hash(&hashmap, &buckets, key);
POBJ_LIST_FOREACH(var, &D_RW(buckets)->bucket[h], list) {
if (D_RO(var)->key == key)
break;
}
if (TOID_IS_NULL(var))
return OID_NULL;
D_RW(hashmap)->count_dirty = 1;
pmemobj_persist(pop, &D_RW(hashmap)->count_dirty,
sizeof(D_RW(hashmap)->count_dirty));
if (POBJ_LIST_REMOVE_FREE(pop, &D_RW(buckets)->bucket[h],
var, list)) {
fprintf(stderr, "list remove failed: %s\n",
pmemobj_errormsg());
return OID_NULL;
}
D_RW(hashmap)->count--;
pmemobj_persist(pop, &D_RW(hashmap)->count,
sizeof(D_RW(hashmap)->count));
D_RW(hashmap)->count_dirty = 0;
pmemobj_persist(pop, &D_RW(hashmap)->count_dirty,
sizeof(D_RW(hashmap)->count_dirty));
if (D_RO(hashmap)->count < D_RO(buckets)->nbuckets)
hm_atomic_rebuild(pop, hashmap, D_RO(buckets)->nbuckets / 2);
return D_RO(var)->value;
}
/*
* hm_atomic_foreach -- prints all values from the hashmap
*/
int
hm_atomic_foreach(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
int ret = 0;
for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i)
POBJ_LIST_FOREACH(var, &D_RO(buckets)->bucket[i], list) {
ret = cb(D_RO(var)->key, D_RO(var)->value, arg);
if (ret)
return ret;
}
return 0;
}
/*
* hm_atomic_debug -- prints complete hashmap state
*/
static void
hm_atomic_debug(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
FILE *out)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
fprintf(out, "a: %u b: %u p: %" PRIu64 "\n", D_RO(hashmap)->hash_fun_a,
D_RO(hashmap)->hash_fun_b, D_RO(hashmap)->hash_fun_p);
fprintf(out, "count: %" PRIu64 ", buckets: %zu\n",
D_RO(hashmap)->count, D_RO(buckets)->nbuckets);
for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i) {
if (POBJ_LIST_EMPTY(&D_RO(buckets)->bucket[i]))
continue;
int num = 0;
fprintf(out, "%zu: ", i);
POBJ_LIST_FOREACH(var, &D_RO(buckets)->bucket[i], list) {
fprintf(out, "%" PRIu64 " ", D_RO(var)->key);
num++;
}
fprintf(out, "(%d)\n", num);
}
}
/*
* hm_atomic_get -- checks whether specified value is in the hashmap
*/
PMEMoid
hm_atomic_get(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
uint64_t h = hash(&hashmap, &buckets, key);
POBJ_LIST_FOREACH(var, &D_RO(buckets)->bucket[h], list)
if (D_RO(var)->key == key)
return D_RO(var)->value;
return OID_NULL;
}
/*
* hm_atomic_lookup -- checks whether specified value is in the hashmap
*/
int
hm_atomic_lookup(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
uint64_t key)
{
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
TOID(struct entry) var;
uint64_t h = hash(&hashmap, &buckets, key);
POBJ_LIST_FOREACH(var, &D_RO(buckets)->bucket[h], list)
if (D_RO(var)->key == key)
return 1;
return 0;
}
/*
* hm_atomic_create -- initializes hashmap state, called after pmemobj_create
*/
int
hm_atomic_create(PMEMobjpool *pop, TOID(struct hashmap_atomic) *map, void *arg)
{
struct hashmap_args *args = (struct hashmap_args *)arg;
uint32_t seed = args ? args->seed : 0;
POBJ_ZNEW(pop, map, struct hashmap_atomic);
create_hashmap(pop, *map, seed);
return 0;
}
/*
* hm_atomic_init -- recovers hashmap state, called after pmemobj_open
*/
int
hm_atomic_init(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap)
{
srand(D_RO(hashmap)->seed);
/* handle rebuild interruption */
if (!TOID_IS_NULL(D_RO(hashmap)->buckets_tmp)) {
printf("rebuild, previous attempt crashed\n");
if (TOID_EQUALS(D_RO(hashmap)->buckets,
D_RO(hashmap)->buckets_tmp)) {
/* see comment in hm_rebuild_finish */
D_RW(hashmap)->buckets_tmp.oid.off = 0;
pmemobj_persist(pop, &D_RW(hashmap)->buckets_tmp,
sizeof(D_RW(hashmap)->buckets_tmp));
} else if (TOID_IS_NULL(D_RW(hashmap)->buckets)) {
D_RW(hashmap)->buckets = D_RW(hashmap)->buckets_tmp;
pmemobj_persist(pop, &D_RW(hashmap)->buckets,
sizeof(D_RW(hashmap)->buckets));
/* see comment in hm_rebuild_finish */
D_RW(hashmap)->buckets_tmp.oid.off = 0;
pmemobj_persist(pop, &D_RW(hashmap)->buckets_tmp,
sizeof(D_RW(hashmap)->buckets_tmp));
} else {
hm_atomic_rebuild_finish(pop, hashmap);
}
}
/* handle insert or remove interruption */
if (D_RO(hashmap)->count_dirty) {
printf("count dirty, recalculating\n");
TOID(struct entry) var;
TOID(struct buckets) buckets = D_RO(hashmap)->buckets;
uint64_t cnt = 0;
for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i)
POBJ_LIST_FOREACH(var, &D_RO(buckets)->bucket[i], list)
cnt++;
printf("old count: %" PRIu64 ", new count: %" PRIu64 "\n",
D_RO(hashmap)->count, cnt);
D_RW(hashmap)->count = cnt;
pmemobj_persist(pop, &D_RW(hashmap)->count,
sizeof(D_RW(hashmap)->count));
D_RW(hashmap)->count_dirty = 0;
pmemobj_persist(pop, &D_RW(hashmap)->count_dirty,
sizeof(D_RW(hashmap)->count_dirty));
}
return 0;
}
/*
* hm_atomic_check -- checks if specified persistent object is an
* instance of hashmap
*/
int
hm_atomic_check(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap)
{
return TOID_IS_NULL(hashmap) || !TOID_VALID(hashmap);
}
/*
* hm_atomic_count -- returns number of elements
*/
size_t
hm_atomic_count(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap)
{
return D_RO(hashmap)->count;
}
/*
* hm_atomic_cmd -- execute cmd for hashmap
*/
int
hm_atomic_cmd(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap,
unsigned cmd, uint64_t arg)
{
switch (cmd) {
case HASHMAP_CMD_REBUILD:
hm_atomic_rebuild(pop, hashmap, arg);
return 0;
case HASHMAP_CMD_DEBUG:
if (!arg)
return -EINVAL;
hm_atomic_debug(pop, hashmap, (FILE *)arg);
return 0;
default:
return -EINVAL;
}
}
| 14,340 | 25.45941 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/pmemlog/obj_pmemlog_simple.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_pmemlog_simple.c -- alternate pmemlog implementation based on pmemobj
*
* usage: obj_pmemlog_simple [co] file [cmd[:param]...]
*
* c - create file
* o - open file
*
* The "cmd" arguments match the pmemlog functions:
* a - append
* v - appendv
* r - rewind
* w - walk
* n - nbyte
* t - tell
* "a", "w" and "v" require a parameter string(s) separated by a colon
*/
#include <ex_common.h>
#include <sys/stat.h>
#include <string.h>
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <errno.h>
#include "libpmemobj.h"
#include "libpmem.h"
#include "libpmemlog.h"
#define USABLE_SIZE (9.0 / 10)
#define MAX_POOL_SIZE (((size_t)1024 * 1024 * 1024 * 16))
#define POOL_SIZE ((size_t)(1024 * 1024 * 100))
POBJ_LAYOUT_BEGIN(obj_pmemlog_simple);
POBJ_LAYOUT_ROOT(obj_pmemlog_simple, struct base);
POBJ_LAYOUT_TOID(obj_pmemlog_simple, struct log);
POBJ_LAYOUT_END(obj_pmemlog_simple);
/* log entry header */
struct log_hdr {
uint64_t write_offset; /* data write offset */
size_t data_size; /* size available for data */
};
/* struct log stores the entire log entry */
struct log {
struct log_hdr hdr;
char data[];
};
/* struct base has the lock and log OID */
struct base {
PMEMrwlock rwlock; /* lock covering entire log */
TOID(struct log) log;
};
/*
* pmemblk_map -- (internal) read or initialize the log pool
*/
static int
pmemlog_map(PMEMobjpool *pop, size_t fsize)
{
int retval = 0;
TOID(struct base)bp;
bp = POBJ_ROOT(pop, struct base);
/* log already initialized */
if (!TOID_IS_NULL(D_RO(bp)->log))
return retval;
size_t pool_size = (size_t)(fsize * USABLE_SIZE);
/* max size of a single allocation is 16GB */
if (pool_size > MAX_POOL_SIZE) {
errno = EINVAL;
return 1;
}
TX_BEGIN(pop) {
TX_ADD(bp);
D_RW(bp)->log = TX_ZALLOC(struct log, pool_size);
D_RW(D_RW(bp)->log)->hdr.data_size =
pool_size - sizeof(struct log_hdr);
} TX_ONABORT {
retval = -1;
} TX_END
return retval;
}
/*
* pmemlog_open -- pool open wrapper
*/
PMEMlogpool *
pmemlog_open(const char *path)
{
PMEMobjpool *pop = pmemobj_open(path,
POBJ_LAYOUT_NAME(obj_pmemlog_simple));
assert(pop != NULL);
struct stat buf;
if (stat(path, &buf)) {
perror("stat");
return NULL;
}
return pmemlog_map(pop, buf.st_size) ? NULL : (PMEMlogpool *)pop;
}
/*
* pmemlog_create -- pool create wrapper
*/
PMEMlogpool *
pmemlog_create(const char *path, size_t poolsize, mode_t mode)
{
PMEMobjpool *pop = pmemobj_create(path,
POBJ_LAYOUT_NAME(obj_pmemlog_simple),
poolsize, mode);
assert(pop != NULL);
struct stat buf;
if (stat(path, &buf)) {
perror("stat");
return NULL;
}
return pmemlog_map(pop, buf.st_size) ? NULL : (PMEMlogpool *)pop;
}
/*
* pool_close -- pool close wrapper
*/
void
pmemlog_close(PMEMlogpool *plp)
{
pmemobj_close((PMEMobjpool *)plp);
}
/*
* pmemlog_nbyte -- return usable size of a log memory pool
*/
size_t
pmemlog_nbyte(PMEMlogpool *plp)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
TOID(struct log) logp;
logp = D_RO(POBJ_ROOT(pop, struct base))->log;
return D_RO(logp)->hdr.data_size;
}
/*
* pmemlog_append -- add data to a log memory pool
*/
int
pmemlog_append(PMEMlogpool *plp, const void *buf, size_t count)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
int retval = 0;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
TOID(struct log) logp;
logp = D_RW(bp)->log;
/* check for overrun */
if ((D_RO(logp)->hdr.write_offset + count)
> D_RO(logp)->hdr.data_size) {
errno = ENOMEM;
return 1;
}
/* begin a transaction, also acquiring the write lock for the log */
TX_BEGIN_PARAM(pop, TX_PARAM_RWLOCK, &D_RW(bp)->rwlock, TX_PARAM_NONE) {
char *dst = D_RW(logp)->data + D_RO(logp)->hdr.write_offset;
/* add hdr to undo log */
TX_ADD_FIELD(logp, hdr);
/* copy and persist data */
pmemobj_memcpy_persist(pop, dst, buf, count);
/* set the new offset */
D_RW(logp)->hdr.write_offset += count;
} TX_ONABORT {
retval = -1;
} TX_END
return retval;
}
/*
* pmemlog_appendv -- add gathered data to a log memory pool
*/
int
pmemlog_appendv(PMEMlogpool *plp, const struct iovec *iov, int iovcnt)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
int retval = 0;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
uint64_t total_count = 0;
/* calculate required space */
for (int i = 0; i < iovcnt; ++i)
total_count += iov[i].iov_len;
TOID(struct log) logp;
logp = D_RW(bp)->log;
/* check for overrun */
if ((D_RO(logp)->hdr.write_offset + total_count)
> D_RO(logp)->hdr.data_size) {
errno = ENOMEM;
return 1;
}
/* begin a transaction, also acquiring the write lock for the log */
TX_BEGIN_PARAM(pop, TX_PARAM_RWLOCK, &D_RW(bp)->rwlock, TX_PARAM_NONE) {
TX_ADD(D_RW(bp)->log);
/* append the data */
for (int i = 0; i < iovcnt; ++i) {
char *buf = (char *)iov[i].iov_base;
size_t count = iov[i].iov_len;
char *dst = D_RW(logp)->data
+ D_RO(logp)->hdr.write_offset;
/* copy and persist data */
pmemobj_memcpy_persist(pop, dst, buf, count);
/* set the new offset */
D_RW(logp)->hdr.write_offset += count;
}
} TX_ONABORT {
retval = -1;
} TX_END
return retval;
}
/*
* pmemlog_tell -- return current write point in a log memory pool
*/
long long
pmemlog_tell(PMEMlogpool *plp)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
TOID(struct log) logp;
logp = D_RO(POBJ_ROOT(pop, struct base))->log;
return D_RO(logp)->hdr.write_offset;
}
/*
* pmemlog_rewind -- discard all data, resetting a log memory pool to empty
*/
void
pmemlog_rewind(PMEMlogpool *plp)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
/* begin a transaction, also acquiring the write lock for the log */
TX_BEGIN_PARAM(pop, TX_PARAM_RWLOCK, &D_RW(bp)->rwlock, TX_PARAM_NONE) {
/* add the hdr to the undo log */
TX_ADD_FIELD(D_RW(bp)->log, hdr);
/* reset the write offset */
D_RW(D_RW(bp)->log)->hdr.write_offset = 0;
} TX_END
}
/*
* pmemlog_walk -- walk through all data in a log memory pool
*
* chunksize of 0 means process_chunk gets called once for all data
* as a single chunk.
*/
void
pmemlog_walk(PMEMlogpool *plp, size_t chunksize,
int (*process_chunk)(const void *buf, size_t len, void *arg), void *arg)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
/* acquire a rdlock here */
int err;
if ((err = pmemobj_rwlock_rdlock(pop, &D_RW(bp)->rwlock)) != 0) {
errno = err;
return;
}
TOID(struct log) logp;
logp = D_RW(bp)->log;
size_t read_size = chunksize ? chunksize : D_RO(logp)->hdr.data_size;
char *read_ptr = D_RW(logp)->data;
const char *write_ptr = (D_RO(logp)->data
+ D_RO(logp)->hdr.write_offset);
while (read_ptr < write_ptr) {
read_size = MIN(read_size, (size_t)(write_ptr - read_ptr));
(*process_chunk)(read_ptr, read_size, arg);
read_ptr += read_size;
}
pmemobj_rwlock_unlock(pop, &D_RW(bp)->rwlock);
}
/*
* process_chunk -- (internal) process function for log_walk
*/
static int
process_chunk(const void *buf, size_t len, void *arg)
{
char *tmp = (char *)malloc(len + 1);
if (tmp == NULL) {
fprintf(stderr, "malloc error\n");
return 0;
}
memcpy(tmp, buf, len);
tmp[len] = '\0';
printf("log contains:\n");
printf("%s\n", tmp);
free(tmp);
return 1; /* continue */
}
/*
* count_iovec -- (internal) count the number of iovec items
*/
static int
count_iovec(char *arg)
{
int count = 1;
char *pch = strchr(arg, ':');
while (pch != NULL) {
++count;
pch = strchr(++pch, ':');
}
return count;
}
/*
* fill_iovec -- (internal) fill out the iovec
*/
static void
fill_iovec(struct iovec *iov, char *arg)
{
char *pch = strtok(arg, ":");
while (pch != NULL) {
iov->iov_base = pch;
iov->iov_len = strlen((char *)iov->iov_base);
++iov;
pch = strtok(NULL, ":");
}
}
int
main(int argc, char *argv[])
{
if (argc < 2) {
fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]);
return 1;
}
PMEMlogpool *plp;
if (strncmp(argv[1], "c", 1) == 0) {
plp = pmemlog_create(argv[2], POOL_SIZE, CREATE_MODE_RW);
} else if (strncmp(argv[1], "o", 1) == 0) {
plp = pmemlog_open(argv[2]);
} else {
fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]);
return 1;
}
if (plp == NULL) {
perror("pmemlog_create/pmemlog_open");
return 1;
}
/* process the command line arguments */
for (int i = 3; i < argc; i++) {
switch (*argv[i]) {
case 'a': {
printf("append: %s\n", argv[i] + 2);
if (pmemlog_append(plp, argv[i] + 2,
strlen(argv[i] + 2)))
fprintf(stderr, "pmemlog_append"
" error\n");
break;
}
case 'v': {
printf("appendv: %s\n", argv[i] + 2);
int count = count_iovec(argv[i] + 2);
struct iovec *iov = (struct iovec *)malloc(
count * sizeof(struct iovec));
if (iov == NULL) {
fprintf(stderr, "malloc error\n");
return 1;
}
fill_iovec(iov, argv[i] + 2);
if (pmemlog_appendv(plp, iov, count))
fprintf(stderr, "pmemlog_appendv"
" error\n");
free(iov);
break;
}
case 'r': {
printf("rewind\n");
pmemlog_rewind(plp);
break;
}
case 'w': {
printf("walk\n");
unsigned long walksize = strtoul(argv[i] + 2,
NULL, 10);
pmemlog_walk(plp, walksize, process_chunk,
NULL);
break;
}
case 'n': {
printf("nbytes: %zu\n", pmemlog_nbyte(plp));
break;
}
case 't': {
printf("offset: %lld\n", pmemlog_tell(plp));
break;
}
default: {
fprintf(stderr, "unrecognized command %s\n",
argv[i]);
break;
}
};
}
/* all done */
pmemlog_close(plp);
return 0;
}
| 11,235 | 22.906383 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/pmemlog/obj_pmemlog.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_pmemlog.c -- alternate pmemlog implementation based on pmemobj
*
* usage: obj_pmemlog [co] file [cmd[:param]...]
*
* c - create file
* o - open file
*
* The "cmd" arguments match the pmemlog functions:
* a - append
* v - appendv
* r - rewind
* w - walk
* n - nbyte
* t - tell
* "a" and "v" require a parameter string(s) separated by a colon
*/
#include <ex_common.h>
#include <sys/stat.h>
#include <string.h>
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include "libpmemobj.h"
#include "libpmem.h"
#include "libpmemlog.h"
#define LAYOUT_NAME "obj_pmemlog"
#define POOL_SIZE ((size_t)(1024 * 1024 * 100))
/* types of allocations */
enum types {
LOG_TYPE,
LOG_HDR_TYPE,
BASE_TYPE,
MAX_TYPES
};
/* log entry header */
struct log_hdr {
PMEMoid next; /* object ID of the next log buffer */
size_t size; /* size of this log buffer */
};
/* struct log stores the entire log entry */
struct log {
struct log_hdr hdr; /* entry header */
char data[]; /* log entry data */
};
/* struct base keeps track of the beginning of the log list */
struct base {
PMEMoid head; /* object ID of the first log buffer */
PMEMoid tail; /* object ID of the last log buffer */
PMEMrwlock rwlock; /* lock covering entire log */
size_t bytes_written; /* number of bytes stored in the pool */
};
/*
* pmemlog_open -- pool open wrapper
*/
PMEMlogpool *
pmemlog_open(const char *path)
{
return (PMEMlogpool *)pmemobj_open(path, LAYOUT_NAME);
}
/*
* pmemlog_create -- pool create wrapper
*/
PMEMlogpool *
pmemlog_create(const char *path, size_t poolsize, mode_t mode)
{
return (PMEMlogpool *)pmemobj_create(path, LAYOUT_NAME,
poolsize, mode);
}
/*
* pmemlog_close -- pool close wrapper
*/
void
pmemlog_close(PMEMlogpool *plp)
{
pmemobj_close((PMEMobjpool *)plp);
}
/*
* pmemlog_nbyte -- not available in this implementation
*/
size_t
pmemlog_nbyte(PMEMlogpool *plp)
{
/* N/A */
return 0;
}
/*
* pmemlog_append -- add data to a log memory pool
*/
int
pmemlog_append(PMEMlogpool *plp, const void *buf, size_t count)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
PMEMoid baseoid = pmemobj_root(pop, sizeof(struct base));
struct base *bp = pmemobj_direct(baseoid);
/* set the return point */
jmp_buf env;
if (setjmp(env)) {
/* end the transaction */
(void) pmemobj_tx_end();
return 1;
}
/* begin a transaction, also acquiring the write lock for the log */
if (pmemobj_tx_begin(pop, env, TX_PARAM_RWLOCK, &bp->rwlock,
TX_PARAM_NONE))
return -1;
/* allocate the new node to be inserted */
PMEMoid log = pmemobj_tx_alloc(count + sizeof(struct log_hdr),
LOG_TYPE);
struct log *logp = pmemobj_direct(log);
logp->hdr.size = count;
memcpy(logp->data, buf, count);
logp->hdr.next = OID_NULL;
/* add the modified root object to the undo log */
pmemobj_tx_add_range(baseoid, 0, sizeof(struct base));
if (bp->tail.off == 0) {
/* update head */
bp->head = log;
} else {
/* add the modified tail entry to the undo log */
pmemobj_tx_add_range(bp->tail, 0, sizeof(struct log));
((struct log *)pmemobj_direct(bp->tail))->hdr.next = log;
}
bp->tail = log; /* update tail */
bp->bytes_written += count;
pmemobj_tx_commit();
(void) pmemobj_tx_end();
return 0;
}
/*
* pmemlog_appendv -- add gathered data to a log memory pool
*/
int
pmemlog_appendv(PMEMlogpool *plp, const struct iovec *iov, int iovcnt)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
PMEMoid baseoid = pmemobj_root(pop, sizeof(struct base));
struct base *bp = pmemobj_direct(baseoid);
/* set the return point */
jmp_buf env;
if (setjmp(env)) {
/* end the transaction */
pmemobj_tx_end();
return 1;
}
/* begin a transaction, also acquiring the write lock for the log */
if (pmemobj_tx_begin(pop, env, TX_PARAM_RWLOCK, &bp->rwlock,
TX_PARAM_NONE))
return -1;
/* add the base object to the undo log - once for the transaction */
pmemobj_tx_add_range(baseoid, 0, sizeof(struct base));
/* add the tail entry once to the undo log, if it is set */
if (!OID_IS_NULL(bp->tail))
pmemobj_tx_add_range(bp->tail, 0, sizeof(struct log));
/* append the data */
for (int i = 0; i < iovcnt; ++i) {
char *buf = iov[i].iov_base;
size_t count = iov[i].iov_len;
/* allocate the new node to be inserted */
PMEMoid log = pmemobj_tx_alloc(count + sizeof(struct log_hdr),
LOG_TYPE);
struct log *logp = pmemobj_direct(log);
logp->hdr.size = count;
memcpy(logp->data, buf, count);
logp->hdr.next = OID_NULL;
if (bp->tail.off == 0) {
bp->head = log; /* update head */
} else {
((struct log *)pmemobj_direct(bp->tail))->hdr.next =
log;
}
bp->tail = log; /* update tail */
bp->bytes_written += count;
}
pmemobj_tx_commit();
(void) pmemobj_tx_end();
return 0;
}
/*
* pmemlog_tell -- returns the current write point for the log
*/
long long
pmemlog_tell(PMEMlogpool *plp)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
struct base *bp = pmemobj_direct(pmemobj_root(pop,
sizeof(struct base)));
if (pmemobj_rwlock_rdlock(pop, &bp->rwlock) != 0)
return 0;
long long bytes_written = bp->bytes_written;
pmemobj_rwlock_unlock(pop, &bp->rwlock);
return bytes_written;
}
/*
* pmemlog_rewind -- discard all data, resetting a log memory pool to empty
*/
void
pmemlog_rewind(PMEMlogpool *plp)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
PMEMoid baseoid = pmemobj_root(pop, sizeof(struct base));
struct base *bp = pmemobj_direct(baseoid);
/* set the return point */
jmp_buf env;
if (setjmp(env)) {
/* end the transaction */
pmemobj_tx_end();
return;
}
/* begin a transaction, also acquiring the write lock for the log */
if (pmemobj_tx_begin(pop, env, TX_PARAM_RWLOCK, &bp->rwlock,
TX_PARAM_NONE))
return;
/* add the root object to the undo log */
pmemobj_tx_add_range(baseoid, 0, sizeof(struct base));
/* free all log nodes */
while (bp->head.off != 0) {
PMEMoid nextoid =
((struct log *)pmemobj_direct(bp->head))->hdr.next;
pmemobj_tx_free(bp->head);
bp->head = nextoid;
}
bp->head = OID_NULL;
bp->tail = OID_NULL;
bp->bytes_written = 0;
pmemobj_tx_commit();
(void) pmemobj_tx_end();
}
/*
* pmemlog_walk -- walk through all data in a log memory pool
*
* As this implementation holds the size of each entry, the chunksize is ignored
* and the process_chunk function gets the actual entry length.
*/
void
pmemlog_walk(PMEMlogpool *plp, size_t chunksize,
int (*process_chunk)(const void *buf, size_t len, void *arg), void *arg)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
struct base *bp = pmemobj_direct(pmemobj_root(pop,
sizeof(struct base)));
if (pmemobj_rwlock_rdlock(pop, &bp->rwlock) != 0)
return;
/* process all chunks */
struct log *next = pmemobj_direct(bp->head);
while (next != NULL) {
(*process_chunk)(next->data, next->hdr.size, arg);
next = pmemobj_direct(next->hdr.next);
}
pmemobj_rwlock_unlock(pop, &bp->rwlock);
}
/*
* process_chunk -- (internal) process function for log_walk
*/
static int
process_chunk(const void *buf, size_t len, void *arg)
{
char *tmp = malloc(len + 1);
if (tmp == NULL) {
fprintf(stderr, "malloc error\n");
return 0;
}
memcpy(tmp, buf, len);
tmp[len] = '\0';
printf("log contains:\n");
printf("%s\n", tmp);
free(tmp);
return 1;
}
/*
* count_iovec -- (internal) count the number of iovec items
*/
static int
count_iovec(char *arg)
{
int count = 1;
char *pch = strchr(arg, ':');
while (pch != NULL) {
++count;
pch = strchr(++pch, ':');
}
return count;
}
/*
* fill_iovec -- (internal) fill out the iovec
*/
static void
fill_iovec(struct iovec *iov, char *arg)
{
char *pch = strtok(arg, ":");
while (pch != NULL) {
iov->iov_base = pch;
iov->iov_len = strlen((char *)iov->iov_base);
++iov;
pch = strtok(NULL, ":");
}
}
int
main(int argc, char *argv[])
{
if (argc < 2) {
fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]);
return 1;
}
PMEMlogpool *plp;
if (strncmp(argv[1], "c", 1) == 0) {
plp = pmemlog_create(argv[2], POOL_SIZE, CREATE_MODE_RW);
} else if (strncmp(argv[1], "o", 1) == 0) {
plp = pmemlog_open(argv[2]);
} else {
fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]);
return 1;
}
if (plp == NULL) {
perror("pmemlog_create/pmemlog_open");
return 1;
}
/* process the command line arguments */
for (int i = 3; i < argc; i++) {
switch (*argv[i]) {
case 'a': {
printf("append: %s\n", argv[i] + 2);
if (pmemlog_append(plp, argv[i] + 2,
strlen(argv[i] + 2)))
fprintf(stderr, "pmemlog_append"
" error\n");
break;
}
case 'v': {
printf("appendv: %s\n", argv[i] + 2);
int count = count_iovec(argv[i] + 2);
struct iovec *iov = calloc(count,
sizeof(struct iovec));
fill_iovec(iov, argv[i] + 2);
if (pmemlog_appendv(plp, iov, count))
fprintf(stderr, "pmemlog_appendv"
" error\n");
free(iov);
break;
}
case 'r': {
printf("rewind\n");
pmemlog_rewind(plp);
break;
}
case 'w': {
printf("walk\n");
pmemlog_walk(plp, 0, process_chunk, NULL);
break;
}
case 'n': {
printf("nbytes: %zu\n", pmemlog_nbyte(plp));
break;
}
case 't': {
printf("offset: %lld\n", pmemlog_tell(plp));
break;
}
default: {
fprintf(stderr, "unrecognized command %s\n",
argv[i]);
break;
}
};
}
/* all done */
pmemlog_close(plp);
return 0;
}
| 11,002 | 22.816017 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/pmemlog/obj_pmemlog_minimal.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_pmemlog_macros.c -- alternate pmemlog implementation based on pmemobj
*
* usage: obj_pmemlog_macros [co] file [cmd[:param]...]
*
* c - create file
* o - open file
*
* The "cmd" arguments match the pmemlog functions:
* a - append
* v - appendv
* r - rewind
* w - walk
* n - nbyte
* t - tell
* "a" and "v" require a parameter string(s) separated by a colon
*/
#include <ex_common.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <string.h>
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include "libpmemobj.h"
#include "libpmem.h"
#include "libpmemlog.h"
#define POOL_SIZE ((size_t)(1024 * 1024 * 100))
POBJ_LAYOUT_BEGIN(obj_pmemlog_minimal);
POBJ_LAYOUT_TOID(obj_pmemlog_minimal, struct log);
POBJ_LAYOUT_END(obj_pmemlog_minimal);
/* struct log stores the entire log entry */
struct log {
size_t size;
char data[];
};
/* structure containing arguments for the alloc constructor */
struct create_args {
size_t size;
const void *src;
};
/*
* create_log_entry -- (internal) constructor for the log entry
*/
static int
create_log_entry(PMEMobjpool *pop, void *ptr, void *arg)
{
struct log *logptr = ptr;
struct create_args *carg = arg;
logptr->size = carg->size;
pmemobj_persist(pop, &logptr->size, sizeof(logptr->size));
pmemobj_memcpy_persist(pop, logptr->data, carg->src, carg->size);
return 0;
}
/*
* pmemlog_open -- pool open wrapper
*/
PMEMlogpool *
pmemlog_open(const char *path)
{
return (PMEMlogpool *)pmemobj_open(path,
POBJ_LAYOUT_NAME(obj_pmemlog_minimal));
}
/*
* pmemlog_create -- pool create wrapper
*/
PMEMlogpool *
pmemlog_create(const char *path, size_t poolsize, mode_t mode)
{
return (PMEMlogpool *)pmemobj_create(path,
POBJ_LAYOUT_NAME(obj_pmemlog_minimal),
poolsize, mode);
}
/*
* pool_close -- pool close wrapper
*/
void
pmemlog_close(PMEMlogpool *plp)
{
pmemobj_close((PMEMobjpool *)plp);
}
/*
* pmemlog_nbyte -- not available in this implementation
*/
size_t
pmemlog_nbyte(PMEMlogpool *plp)
{
/* N/A */
return 0;
}
/*
* pmemlog_append -- add data to a log memory pool
*/
int
pmemlog_append(PMEMlogpool *plp, const void *buf, size_t count)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
struct create_args args = { count, buf };
size_t obj_size = sizeof(size_t) + count;
/* alloc-construct to an internal list */
PMEMoid obj;
pmemobj_alloc(pop, &obj, obj_size,
0, create_log_entry,
&args);
return 0;
}
/*
* pmemlog_appendv -- add gathered data to a log memory pool
*/
int
pmemlog_appendv(PMEMlogpool *plp, const struct iovec *iov, int iovcnt)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
/* append the data */
for (int i = 0; i < iovcnt; ++i) {
struct create_args args = { iov[i].iov_len, iov[i].iov_base };
size_t obj_size = sizeof(size_t) + args.size;
/* alloc-construct to an internal list */
pmemobj_alloc(pop, NULL, obj_size,
0, create_log_entry, &args);
}
return 0;
}
/*
* pmemlog_tell -- not available in this implementation
*/
long long
pmemlog_tell(PMEMlogpool *plp)
{
/* N/A */
return 0;
}
/*
* pmemlog_rewind -- discard all data, resetting a log memory pool to empty
*/
void
pmemlog_rewind(PMEMlogpool *plp)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
PMEMoid iter, next;
/* go through each list and remove all entries */
POBJ_FOREACH_SAFE(pop, iter, next) {
pmemobj_free(&iter);
}
}
/*
* pmemlog_walk -- walk through all data in a log memory pool
*
* As this implementation holds the size of each entry, the chunksize is ignored
* and the process_chunk function gets the actual entry length.
*/
void
pmemlog_walk(PMEMlogpool *plp, size_t chunksize,
int (*process_chunk)(const void *buf, size_t len, void *arg), void *arg)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
PMEMoid iter;
/* process each allocated object */
POBJ_FOREACH(pop, iter) {
struct log *logptr = pmemobj_direct(iter);
(*process_chunk)(logptr->data, logptr->size, arg);
}
}
/*
* process_chunk -- (internal) process function for log_walk
*/
static int
process_chunk(const void *buf, size_t len, void *arg)
{
char *tmp = malloc(len + 1);
if (tmp == NULL) {
fprintf(stderr, "malloc error\n");
return 0;
}
memcpy(tmp, buf, len);
tmp[len] = '\0';
printf("log contains:\n");
printf("%s\n", tmp);
free(tmp);
return 1; /* continue */
}
/*
* count_iovec -- (internal) count the number of iovec items
*/
static int
count_iovec(char *arg)
{
int count = 1;
char *pch = strchr(arg, ':');
while (pch != NULL) {
++count;
pch = strchr(++pch, ':');
}
return count;
}
/*
* fill_iovec -- (internal) fill out the iovec
*/
static void
fill_iovec(struct iovec *iov, char *arg)
{
char *pch = strtok(arg, ":");
while (pch != NULL) {
iov->iov_base = pch;
iov->iov_len = strlen((char *)iov->iov_base);
++iov;
pch = strtok(NULL, ":");
}
}
int
main(int argc, char *argv[])
{
if (argc < 2) {
fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]);
return 1;
}
PMEMlogpool *plp;
if (strncmp(argv[1], "c", 1) == 0) {
plp = pmemlog_create(argv[2], POOL_SIZE, CREATE_MODE_RW);
} else if (strncmp(argv[1], "o", 1) == 0) {
plp = pmemlog_open(argv[2]);
} else {
fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]);
return 1;
}
if (plp == NULL) {
perror("pmemlog_create/pmemlog_open");
return 1;
}
/* process the command line arguments */
for (int i = 3; i < argc; i++) {
switch (*argv[i]) {
case 'a': {
printf("append: %s\n", argv[i] + 2);
if (pmemlog_append(plp, argv[i] + 2,
strlen(argv[i] + 2)))
fprintf(stderr, "pmemlog_append"
" error\n");
break;
}
case 'v': {
printf("appendv: %s\n", argv[i] + 2);
int count = count_iovec(argv[i] + 2);
struct iovec *iov = calloc(count,
sizeof(struct iovec));
if (iov == NULL) {
fprintf(stderr, "malloc error\n");
return 1;
}
fill_iovec(iov, argv[i] + 2);
if (pmemlog_appendv(plp, iov, count))
fprintf(stderr, "pmemlog_appendv"
" error\n");
free(iov);
break;
}
case 'r': {
printf("rewind\n");
pmemlog_rewind(plp);
break;
}
case 'w': {
printf("walk\n");
pmemlog_walk(plp, 0, process_chunk, NULL);
break;
}
case 'n': {
printf("nbytes: %zu\n", pmemlog_nbyte(plp));
break;
}
case 't': {
printf("offset: %lld\n", pmemlog_tell(plp));
break;
}
default: {
fprintf(stderr, "unrecognized command %s\n",
argv[i]);
break;
}
};
}
/* all done */
pmemlog_close(plp);
return 0;
}
| 8,121 | 21.878873 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/pmemlog/obj_pmemlog_macros.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj_pmemlog_macros.c -- alternate pmemlog implementation based on pmemobj
*
* usage: obj_pmemlog_macros [co] file [cmd[:param]...]
*
* c - create file
* o - open file
*
* The "cmd" arguments match the pmemlog functions:
* a - append
* v - appendv
* r - rewind
* w - walk
* n - nbyte
* t - tell
* "a" and "v" require a parameter string(s) separated by a colon
*/
#include <ex_common.h>
#include <sys/stat.h>
#include <string.h>
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include "libpmemobj.h"
#include "libpmem.h"
#include "libpmemlog.h"
#define POOL_SIZE ((size_t)(1024 * 1024 * 100))
POBJ_LAYOUT_BEGIN(obj_pmemlog_macros);
POBJ_LAYOUT_ROOT(obj_pmemlog_macros, struct base);
POBJ_LAYOUT_TOID(obj_pmemlog_macros, struct log);
POBJ_LAYOUT_END(obj_pmemlog_macros);
/* log entry header */
struct log_hdr {
TOID(struct log) next; /* object ID of the next log buffer */
size_t size; /* size of this log buffer */
};
/* struct log stores the entire log entry */
struct log {
struct log_hdr hdr; /* entry header */
char data[]; /* log entry data */
};
/* struct base keeps track of the beginning of the log list */
struct base {
TOID(struct log) head; /* object ID of the first log buffer */
TOID(struct log) tail; /* object ID of the last log buffer */
PMEMrwlock rwlock; /* lock covering entire log */
size_t bytes_written; /* number of bytes stored in the pool */
};
/*
* pmemlog_open -- pool open wrapper
*/
PMEMlogpool *
pmemlog_open(const char *path)
{
return (PMEMlogpool *)pmemobj_open(path,
POBJ_LAYOUT_NAME(obj_pmemlog_macros));
}
/*
* pmemlog_create -- pool create wrapper
*/
PMEMlogpool *
pmemlog_create(const char *path, size_t poolsize, mode_t mode)
{
return (PMEMlogpool *)pmemobj_create(path,
POBJ_LAYOUT_NAME(obj_pmemlog_macros),
poolsize, mode);
}
/*
* pool_close -- pool close wrapper
*/
void
pmemlog_close(PMEMlogpool *plp)
{
pmemobj_close((PMEMobjpool *)plp);
}
/*
* pmemlog_nbyte -- not available in this implementation
*/
size_t
pmemlog_nbyte(PMEMlogpool *plp)
{
/* N/A */
return 0;
}
/*
* pmemlog_append -- add data to a log memory pool
*/
int
pmemlog_append(PMEMlogpool *plp, const void *buf, size_t count)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
int retval = 0;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
/* begin a transaction, also acquiring the write lock for the log */
TX_BEGIN_PARAM(pop, TX_PARAM_RWLOCK, &D_RW(bp)->rwlock, TX_PARAM_NONE) {
/* allocate the new node to be inserted */
TOID(struct log) logp;
logp = TX_ALLOC(struct log, count + sizeof(struct log_hdr));
D_RW(logp)->hdr.size = count;
memcpy(D_RW(logp)->data, buf, count);
D_RW(logp)->hdr.next = TOID_NULL(struct log);
/* add the modified root object to the undo log */
TX_ADD(bp);
if (TOID_IS_NULL(D_RO(bp)->tail)) {
/* update head */
D_RW(bp)->head = logp;
} else {
/* add the modified tail entry to the undo log */
TX_ADD(D_RW(bp)->tail);
D_RW(D_RW(bp)->tail)->hdr.next = logp;
}
D_RW(bp)->tail = logp; /* update tail */
D_RW(bp)->bytes_written += count;
} TX_ONABORT {
retval = -1;
} TX_END
return retval;
}
/*
* pmemlog_appendv -- add gathered data to a log memory pool
*/
int
pmemlog_appendv(PMEMlogpool *plp, const struct iovec *iov, int iovcnt)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
int retval = 0;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
/* begin a transaction, also acquiring the write lock for the log */
TX_BEGIN_PARAM(pop, TX_PARAM_RWLOCK, &D_RW(bp)->rwlock, TX_PARAM_NONE) {
/* add the base object and tail entry to the undo log */
TX_ADD(bp);
if (!TOID_IS_NULL(D_RO(bp)->tail))
TX_ADD(D_RW(bp)->tail);
/* append the data */
for (int i = 0; i < iovcnt; ++i) {
char *buf = (char *)iov[i].iov_base;
size_t count = iov[i].iov_len;
/* allocate the new node to be inserted */
TOID(struct log) logp;
logp = TX_ALLOC(struct log,
count + sizeof(struct log_hdr));
D_RW(logp)->hdr.size = count;
memcpy(D_RW(logp)->data, buf, count);
D_RW(logp)->hdr.next = TOID_NULL(struct log);
/* update head or tail accordingly */
if (TOID_IS_NULL(D_RO(bp)->tail))
D_RW(bp)->head = logp;
else
D_RW(D_RW(bp)->tail)->hdr.next = logp;
/* update tail */
D_RW(bp)->tail = logp;
D_RW(bp)->bytes_written += count;
}
} TX_ONABORT {
retval = -1;
} TX_END
return retval;
}
/*
* pmemlog_tell -- returns the current write point for the log
*/
long long
pmemlog_tell(PMEMlogpool *plp)
{
TOID(struct base) bp;
bp = POBJ_ROOT((PMEMobjpool *)plp, struct base);
return D_RO(bp)->bytes_written;
}
/*
* pmemlog_rewind -- discard all data, resetting a log memory pool to empty
*/
void
pmemlog_rewind(PMEMlogpool *plp)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
/* begin a transaction, also acquiring the write lock for the log */
TX_BEGIN_PARAM(pop, TX_PARAM_RWLOCK, &D_RW(bp)->rwlock, TX_PARAM_NONE) {
/* add the root object to the undo log */
TX_ADD(bp);
while (!TOID_IS_NULL(D_RO(bp)->head)) {
TOID(struct log) nextp;
nextp = D_RW(D_RW(bp)->head)->hdr.next;
TX_FREE(D_RW(bp)->head);
D_RW(bp)->head = nextp;
}
D_RW(bp)->head = TOID_NULL(struct log);
D_RW(bp)->tail = TOID_NULL(struct log);
D_RW(bp)->bytes_written = 0;
} TX_END
}
/*
* pmemlog_walk -- walk through all data in a log memory pool
*
* As this implementation holds the size of each entry, the chunksize is ignored
* and the process_chunk function gets the actual entry length.
*/
void
pmemlog_walk(PMEMlogpool *plp, size_t chunksize,
int (*process_chunk)(const void *buf, size_t len, void *arg), void *arg)
{
PMEMobjpool *pop = (PMEMobjpool *)plp;
TOID(struct base) bp;
bp = POBJ_ROOT(pop, struct base);
/* acquire a read lock */
if (pmemobj_rwlock_rdlock(pop, &D_RW(bp)->rwlock) != 0)
return;
TOID(struct log) next;
next = D_RO(bp)->head;
/* process all chunks */
while (!TOID_IS_NULL(next)) {
(*process_chunk)(D_RO(next)->data,
D_RO(next)->hdr.size, arg);
next = D_RO(next)->hdr.next;
}
pmemobj_rwlock_unlock(pop, &D_RW(bp)->rwlock);
}
/*
* process_chunk -- (internal) process function for log_walk
*/
static int
process_chunk(const void *buf, size_t len, void *arg)
{
char *tmp = (char *)malloc(len + 1);
if (tmp == NULL) {
fprintf(stderr, "malloc error\n");
return 0;
}
memcpy(tmp, buf, len);
tmp[len] = '\0';
printf("log contains:\n");
printf("%s\n", tmp);
free(tmp);
return 1; /* continue */
}
/*
* count_iovec -- (internal) count the number of iovec items
*/
static int
count_iovec(char *arg)
{
int count = 1;
char *pch = strchr(arg, ':');
while (pch != NULL) {
++count;
pch = strchr(++pch, ':');
}
return count;
}
/*
* fill_iovec -- (internal) fill out the iovec
*/
static void
fill_iovec(struct iovec *iov, char *arg)
{
char *pch = strtok(arg, ":");
while (pch != NULL) {
iov->iov_base = pch;
iov->iov_len = strlen((char *)iov->iov_base);
++iov;
pch = strtok(NULL, ":");
}
}
int
main(int argc, char *argv[])
{
if (argc < 2) {
fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]);
return 1;
}
PMEMlogpool *plp;
if (strncmp(argv[1], "c", 1) == 0) {
plp = pmemlog_create(argv[2], POOL_SIZE, CREATE_MODE_RW);
} else if (strncmp(argv[1], "o", 1) == 0) {
plp = pmemlog_open(argv[2]);
} else {
fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]);
return 1;
}
if (plp == NULL) {
perror("pmemlog_create/pmemlog_open");
return 1;
}
/* process the command line arguments */
for (int i = 3; i < argc; i++) {
switch (*argv[i]) {
case 'a': {
printf("append: %s\n", argv[i] + 2);
if (pmemlog_append(plp, argv[i] + 2,
strlen(argv[i] + 2)))
fprintf(stderr, "pmemlog_append"
" error\n");
break;
}
case 'v': {
printf("appendv: %s\n", argv[i] + 2);
int count = count_iovec(argv[i] + 2);
struct iovec *iov = (struct iovec *)malloc(
count * sizeof(struct iovec));
if (iov == NULL) {
fprintf(stderr, "malloc error\n");
break;
}
fill_iovec(iov, argv[i] + 2);
if (pmemlog_appendv(plp, iov, count))
fprintf(stderr, "pmemlog_appendv"
" error\n");
free(iov);
break;
}
case 'r': {
printf("rewind\n");
pmemlog_rewind(plp);
break;
}
case 'w': {
printf("walk\n");
pmemlog_walk(plp, 0, process_chunk, NULL);
break;
}
case 'n': {
printf("nbytes: %zu\n", pmemlog_nbyte(plp));
break;
}
case 't': {
printf("offset: %lld\n", pmemlog_tell(plp));
break;
}
default: {
fprintf(stderr, "unrecognized command %s\n",
argv[i]);
break;
}
};
}
/* all done */
pmemlog_close(plp);
return 0;
}
| 10,382 | 23.430588 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/libart/arttree_examine.c
|
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: arttree_examine.c
*
* Description: implementation of examine function for ART tree structures
*
* Author: Andreas Bluemle, Dieter Kasper
* Andreas.Bluemle.external@ts.fujitsu.com
* dieter.kasper@ts.fujitsu.com
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* ===========================================================================
*/
#include <stdio.h>
#include <libgen.h>
#include <string.h>
#include <unistd.h>
#include <inttypes.h>
#include <stdlib.h>
#include <getopt.h>
#include <stdint.h>
#include <stdbool.h>
#include "arttree_structures.h"
/*
* examine context
*/
struct examine_ctx {
struct pmem_context *pmem_ctx;
char *offset_string;
uint64_t offset;
char *type_name;
int32_t type;
int32_t hexdump;
};
static struct examine_ctx *ex_ctx = NULL;
struct examine {
const char *name;
const char *brief;
int (*func)(char *, struct examine_ctx *, off_t);
void (*help)(char *);
};
/* local functions */
static int examine_parse_args(char *appname, int ac, char *av[],
struct examine_ctx *ex_ctx);
static struct examine *get_examine(char *type_name);
static void print_usage(char *appname);
static void dump_PMEMoid(char *prefix, PMEMoid *oid);
static int examine_PMEMoid(char *appname, struct examine_ctx *ctx, off_t off);
static int examine_art_tree_root(char *appname,
struct examine_ctx *ctx, off_t off);
static int examine_art_node_u(char *appname,
struct examine_ctx *ctx, off_t off);
static int examine_art_node4(char *appname,
struct examine_ctx *ctx, off_t off);
static int examine_art_node16(char *appname,
struct examine_ctx *ctx, off_t off);
static int examine_art_node48(char *appname,
struct examine_ctx *ctx, off_t off);
static int examine_art_node256(char *appname,
struct examine_ctx *ctx, off_t off);
#if 0 /* XXX */
static int examine_art_node(char *appname,
struct examine_ctx *ctx, off_t off);
#else
static int examine_art_node(art_node *an);
#endif
static int examine_art_leaf(char *appname,
struct examine_ctx *ctx, off_t off);
static int examine_var_string(char *appname,
struct examine_ctx *ctx, off_t off);
/* global visible interface */
void arttree_examine_help(char *appname);
int arttree_examine_func(char *appname,
struct pmem_context *ctx, int ac, char *av[]);
static const char *arttree_examine_help_str =
"Examine data structures (objects) of ART tree\n"
"Arguments: <offset> <type>\n"
" <offset> offset of object in pmem file\n"
" <type> one of art_tree_root, art_node_u, art_node,"
" art_node4, art_node16, art_node48, art_node256, art_leaf\n"
;
static const struct option long_options[] = {
{"hexdump", no_argument, NULL, 'x'},
{NULL, 0, NULL, 0 },
};
static struct examine ex_funcs[] = {
{
.name = "PMEMobj",
.brief = "examine PMEMoid structure",
.func = examine_PMEMoid,
.help = NULL,
},
{
.name = "art_tree_root",
.brief = "examine art_tree_root structure",
.func = examine_art_tree_root,
.help = NULL,
},
{
.name = "art_node_u",
.brief = "examine art_node_u structure",
.func = examine_art_node_u,
.help = NULL,
},
{
.name = "art_node4",
.brief = "examine art_node4 structure",
.func = examine_art_node4,
.help = NULL,
},
{
.name = "art_node16",
.brief = "examine art_node16 structure",
.func = examine_art_node16,
.help = NULL,
},
{
.name = "art_node48",
.brief = "examine art_node48 structure",
.func = examine_art_node48,
.help = NULL,
},
{
.name = "art_node256",
.brief = "examine art_node256 structure",
.func = examine_art_node256,
.help = NULL,
},
{
.name = "art_leaf",
.brief = "examine art_leaf structure",
.func = examine_art_leaf,
.help = NULL,
},
{
.name = "var_string",
.brief = "examine var_string structure",
.func = examine_var_string,
.help = NULL,
},
};
/*
* number of arttree examine commands
*/
#define COMMANDS_NUMBER (sizeof(ex_funcs) / sizeof(ex_funcs[0]))
void
arttree_examine_help(char *appname)
{
printf("%s %s\n", appname, arttree_examine_help_str);
}
int
arttree_examine_func(char *appname, struct pmem_context *ctx,
int ac, char *av[])
{
int errors = 0;
off_t offset;
struct examine *ex;
if (ctx == NULL) {
return -1;
}
if (ex_ctx == NULL) {
ex_ctx = (struct examine_ctx *)
calloc(1, sizeof(struct examine_ctx));
if (ex_ctx == NULL) {
return -1;
}
}
ex_ctx->pmem_ctx = ctx;
if (examine_parse_args(appname, ac, av, ex_ctx) != 0) {
fprintf(stderr, "%s::%s: error parsing arguments\n",
appname, __FUNCTION__);
errors++;
}
if (!errors) {
offset = (off_t)strtol(ex_ctx->offset_string, NULL, 0);
ex = get_examine(ex_ctx->type_name);
if (ex != NULL) {
ex->func(appname, ex_ctx, offset);
}
}
return errors;
}
static int
examine_parse_args(char *appname, int ac, char *av[],
struct examine_ctx *ex_ctx)
{
int ret = 0;
int opt;
optind = 0;
while ((opt = getopt_long(ac, av, "x", long_options, NULL)) != -1) {
switch (opt) {
case 'x':
ex_ctx->hexdump = 1;
break;
default:
print_usage(appname);
ret = 1;
}
}
if (ret == 0) {
ex_ctx->offset_string = strdup(av[optind + 0]);
ex_ctx->type_name = strdup(av[optind + 1]);
}
return ret;
}
static void
print_usage(char *appname)
{
printf("%s: examine <offset> <type>\n", appname);
}
/*
* get_command -- returns command for specified command name
*/
static struct examine *
get_examine(char *type_name)
{
int i;
if (type_name == NULL) {
return NULL;
}
for (i = 0; i < COMMANDS_NUMBER; i++) {
if (strcmp(type_name, ex_funcs[i].name) == 0)
return &ex_funcs[i];
}
return NULL;
}
static void
dump_PMEMoid(char *prefix, PMEMoid *oid)
{
printf("%s { PMEMoid pool_uuid_lo %" PRIx64
" off 0x%" PRIx64 " = %" PRId64 " }\n",
prefix, oid->pool_uuid_lo, oid->off, oid->off);
}
static int
examine_PMEMoid(char *appname, struct examine_ctx *ctx, off_t off)
{
void *p = (void *)(ctx->pmem_ctx->addr + off);
dump_PMEMoid("PMEMoid", p);
return 0;
}
static int
examine_art_tree_root(char *appname, struct examine_ctx *ctx, off_t off)
{
art_tree_root *tree_root = (art_tree_root *)(ctx->pmem_ctx->addr + off);
printf("at offset 0x%llx, art_tree_root {\n", (long long)off);
printf(" size %d\n", tree_root->size);
dump_PMEMoid(" art_node_u", (PMEMoid *)&(tree_root->root));
printf("\n};\n");
return 0;
}
static int
examine_art_node_u(char *appname, struct examine_ctx *ctx, off_t off)
{
art_node_u *node_u = (art_node_u *)(ctx->pmem_ctx->addr + off);
printf("at offset 0x%llx, art_node_u {\n", (long long)off);
printf(" type %d [%s]\n", node_u->art_node_type,
art_node_names[node_u->art_node_type]);
printf(" tag %d\n", node_u->art_node_tag);
switch (node_u->art_node_type) {
case ART_NODE4:
dump_PMEMoid(" art_node4 oid",
&(node_u->u.an4.oid));
break;
case ART_NODE16:
dump_PMEMoid(" art_node16 oid",
&(node_u->u.an16.oid));
break;
case ART_NODE48:
dump_PMEMoid(" art_node48 oid",
&(node_u->u.an48.oid));
break;
case ART_NODE256:
dump_PMEMoid(" art_node256 oid",
&(node_u->u.an256.oid));
break;
case ART_LEAF:
dump_PMEMoid(" art_leaf oid",
&(node_u->u.al.oid));
break;
default: printf("ERROR: unknown node type\n");
break;
}
printf("\n};\n");
return 0;
}
static int
examine_art_node4(char *appname, struct examine_ctx *ctx, off_t off)
{
art_node4 *an4 = (art_node4 *)(ctx->pmem_ctx->addr + off);
printf("at offset 0x%llx, art_node4 {\n", (long long)off);
examine_art_node(&(an4->n));
printf("keys [");
for (int i = 0; i < 4; i++) {
printf("%c ", an4->keys[i]);
}
printf("]\nnodes [\n");
for (int i = 0; i < 4; i++) {
dump_PMEMoid(" art_node_u oid",
&(an4->children[i].oid));
}
printf("\n]");
printf("\n};\n");
return 0;
}
static int
examine_art_node16(char *appname, struct examine_ctx *ctx, off_t off)
{
art_node16 *an16 = (art_node16 *)(ctx->pmem_ctx->addr + off);
printf("at offset 0x%llx, art_node16 {\n", (long long)off);
examine_art_node(&(an16->n));
printf("keys [");
for (int i = 0; i < 16; i++) {
printf("%c ", an16->keys[i]);
}
printf("]\nnodes [\n");
for (int i = 0; i < 16; i++) {
dump_PMEMoid(" art_node_u oid",
&(an16->children[i].oid));
}
printf("\n]");
printf("\n};\n");
return 0;
}
static int
examine_art_node48(char *appname, struct examine_ctx *ctx, off_t off)
{
art_node48 *an48 = (art_node48 *)(ctx->pmem_ctx->addr + off);
printf("at offset 0x%llx, art_node48 {\n", (long long)off);
examine_art_node(&(an48->n));
printf("keys [");
for (int i = 0; i < 256; i++) {
printf("%c ", an48->keys[i]);
}
printf("]\nnodes [\n");
for (int i = 0; i < 48; i++) {
dump_PMEMoid(" art_node_u oid",
&(an48->children[i].oid));
}
printf("\n]");
printf("\n};\n");
return 0;
}
static int
examine_art_node256(char *appname, struct examine_ctx *ctx, off_t off)
{
art_node256 *an256 = (art_node256 *)(ctx->pmem_ctx->addr + off);
printf("at offset 0x%llx, art_node256 {\n", (long long)off);
examine_art_node(&(an256->n));
printf("nodes [\n");
for (int i = 0; i < 256; i++) {
dump_PMEMoid(" art_node_u oid",
&(an256->children[i].oid));
}
printf("\n]");
printf("\n};\n");
return 0;
}
#if 0 /* XXX */
static int
examine_art_node(char *appname, struct examine_ctx *ctx, off_t off)
{
art_node *an = (art_node *)(ctx->pmem_ctx->addr + off);
printf("at offset 0x%llx, art_node {\n", (long long)off);
printf(" num_children %d\n", an->num_children);
printf(" partial_len %d\n", an->partial_len);
printf(" partial [");
for (int i = 0; i < 10; i++) {
printf("%c ", an->partial[i]);
}
printf("\n]");
printf("\n};\n");
return 0;
}
#else
static int
examine_art_node(art_node *an)
{
printf("art_node {\n");
printf(" num_children %d\n", an->num_children);
printf(" partial_len %" PRIu32 "\n", an->partial_len);
printf(" partial [");
for (int i = 0; i < 10; i++) {
printf("%c ", an->partial[i]);
}
printf("\n]");
printf("\n};\n");
return 0;
}
#endif
static int
examine_art_leaf(char *appname, struct examine_ctx *ctx, off_t off)
{
art_leaf *al = (art_leaf *)(ctx->pmem_ctx->addr + off);
printf("at offset 0x%llx, art_leaf {\n", (long long)off);
dump_PMEMoid(" var_string key oid ", &(al->key.oid));
dump_PMEMoid(" var_string value oid", &(al->value.oid));
printf("\n};\n");
return 0;
}
static int
examine_var_string(char *appname, struct examine_ctx *ctx, off_t off)
{
var_string *vs = (var_string *)(ctx->pmem_ctx->addr + off);
printf("at offset 0x%llx, var_string {\n", (long long)off);
printf(" len %zu s [%s]", vs->len, vs->s);
printf("\n};\n");
return 0;
}
| 12,467 | 24.341463 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/libart/arttree_structures.c
|
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: arttree_structures.c
*
* Description: Examine pmem structures; structures and unions taken from
* the preprocessor output of a libpmemobj compatible program.
*
* Author: Andreas Bluemle, Dieter Kasper
* Andreas.Bluemle.external@ts.fujitsu.com
* dieter.kasper@ts.fujitsu.com
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* ===========================================================================
*/
#ifdef __FreeBSD__
#define _WITH_GETLINE
#endif
#include <stdio.h>
#include <fcntl.h>
#include <libgen.h>
#include <string.h>
#include <unistd.h>
#include <stdlib.h>
#include <getopt.h>
#include <stdint.h>
#include <stdbool.h>
#include <assert.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include "arttree_structures.h"
#include <stdarg.h>
#define APPNAME "examine_arttree"
#define SRCVERSION "0.2"
size_t art_node_sizes[art_node_types] = {
sizeof(art_node4),
sizeof(art_node16),
sizeof(art_node48),
sizeof(art_node256),
sizeof(art_leaf),
sizeof(art_node_u),
sizeof(art_node),
sizeof(art_tree_root),
sizeof(var_string),
};
char *art_node_names[art_node_types] = {
"art_node4",
"art_node16",
"art_node48",
"art_node256",
"art_leaf",
"art_node_u",
"art_node",
"art_tree_root",
"var_string"
};
/*
* long_options -- command line arguments
*/
static const struct option long_options[] = {
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0 },
};
/*
* command -- struct for commands definition
*/
struct command {
const char *name;
const char *brief;
int (*func)(char *, struct pmem_context *, int, char *[]);
void (*help)(char *);
};
/*
* number of arttree_structures commands
*/
#define COMMANDS_NUMBER (sizeof(commands) / sizeof(commands[0]))
static void print_help(char *appname);
static void print_usage(char *appname);
static void print_version(char *appname);
static int quit_func(char *appname, struct pmem_context *ctx,
int argc, char *argv[]);
static void quit_help(char *appname);
static int set_root_func(char *appname, struct pmem_context *ctx,
int argc, char *argv[]);
static void set_root_help(char *appname);
static int help_func(char *appname, struct pmem_context *ctx,
int argc, char *argv[]);
static void help_help(char *appname);
static struct command *get_command(char *cmd_str);
static int ctx_init(struct pmem_context *ctx, char *filename);
static int arttree_structures_func(char *appname, struct pmem_context *ctx,
int ac, char *av[]);
static void arttree_structures_help(char *appname);
static int arttree_info_func(char *appname, struct pmem_context *ctx,
int ac, char *av[]);
static void arttree_info_help(char *appname);
extern int arttree_examine_func();
extern void arttree_examine_help();
extern int arttree_search_func();
extern void arttree_search_help();
void outv_err(const char *fmt, ...);
void outv_err_vargs(const char *fmt, va_list ap);
static struct command commands[] = {
{
.name = "structures",
.brief = "print information about ART structures",
.func = arttree_structures_func,
.help = arttree_structures_help,
},
{
.name = "info",
.brief = "print information and statistics"
" about an ART tree pool",
.func = arttree_info_func,
.help = arttree_info_help,
},
{
.name = "examine",
.brief = "examine data structures from an ART tree",
.func = arttree_examine_func,
.help = arttree_examine_help,
},
{
.name = "search",
.brief = "search for a key in an ART tree",
.func = arttree_search_func,
.help = arttree_search_help,
},
{
.name = "set_root",
.brief = "define offset of root of an ART tree",
.func = set_root_func,
.help = set_root_help,
},
{
.name = "help",
.brief = "print help text about a command",
.func = help_func,
.help = help_help,
},
{
.name = "quit",
.brief = "quit ART tree structure examiner",
.func = quit_func,
.help = quit_help,
},
};
static struct pmem_context ctx;
/*
* outv_err -- print error message
*/
void
outv_err(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
outv_err_vargs(fmt, ap);
va_end(ap);
}
/*
* outv_err_vargs -- print error message
*/
void
outv_err_vargs(const char *fmt, va_list ap)
{
fprintf(stderr, "error: ");
vfprintf(stderr, fmt, ap);
if (!strchr(fmt, '\n'))
fprintf(stderr, "\n");
}
/*
* print_usage -- prints usage message
*/
static void
print_usage(char *appname)
{
printf("usage: %s [--help] <pmem file> <command> [<args>]\n", appname);
}
/*
* print_version -- prints version message
*/
static void
print_version(char *appname)
{
printf("%s %s\n", appname, SRCVERSION);
}
/*
* print_help -- prints help message
*/
static void
print_help(char *appname)
{
print_usage(appname);
print_version(appname);
printf("\n");
printf("Options:\n");
printf(" -h, --help display this help and exit\n");
printf("\n");
printf("The available commands are:\n");
int i;
for (i = 0; i < COMMANDS_NUMBER; i++)
printf("%s\t- %s\n", commands[i].name, commands[i].brief);
printf("\n");
}
/*
* set_root_help -- prints help message for set root command
*/
static void
set_root_help(char *appname)
{
printf("Usage: set_root <offset>\n");
printf(" define the offset of the art tree root\n");
}
/*
* set_root_func -- set_root define the offset of the art tree root
*/
static int
set_root_func(char *appname, struct pmem_context *ctx, int argc, char *argv[])
{
int retval = 0;
uint64_t root_offset;
if (argc == 2) {
root_offset = strtol(argv[1], NULL, 0);
ctx->art_tree_root_offset = root_offset;
} else {
set_root_help(appname);
retval = 1;
}
return retval;
}
/*
* quit_help -- prints help message for quit command
*/
static void
quit_help(char *appname)
{
printf("Usage: quit\n");
printf(" terminate arttree structure examiner\n");
}
/*
* quit_func -- quit arttree structure examiner
*/
static int
quit_func(char *appname, struct pmem_context *ctx, int argc, char *argv[])
{
printf("\n");
exit(0);
return 0;
}
/*
* help_help -- prints help message for help command
*/
static void
help_help(char *appname)
{
printf("Usage: %s help <command>\n", appname);
}
/*
* help_func -- prints help message for specified command
*/
static int
help_func(char *appname, struct pmem_context *ctx, int argc, char *argv[])
{
if (argc > 1) {
char *cmd_str = argv[1];
struct command *cmdp = get_command(cmd_str);
if (cmdp && cmdp->help) {
cmdp->help(appname);
return 0;
} else {
outv_err("No help text for '%s' command\n", cmd_str);
return -1;
}
} else {
print_help(appname);
return -1;
}
}
static const char *arttree_structures_help_str =
"Show information about known ART tree structures\n"
;
static void
arttree_structures_help(char *appname)
{
printf("%s %s\n", appname, arttree_structures_help_str);
}
static int
arttree_structures_func(char *appname, struct pmem_context *ctx,
int ac, char *av[])
{
(void) appname;
(void) ac;
(void) av;
printf(
"typedef struct pmemoid {\n"
" uint64_t pool_uuid_lo;\n"
" uint64_t off;\n"
"} PMEMoid;\n");
printf("sizeof(PMEMoid) = %zu\n\n\n", sizeof(PMEMoid));
printf(
"struct _art_node_u; typedef struct _art_node_u art_node_u;\n"
"struct _art_node_u { \n"
" uint8_t art_node_type; \n"
" uint8_t art_node_tag; \n"
"};\n");
printf("sizeof(art_node_u) = %zu\n\n\n", sizeof(art_node_u));
printf(
"struct _art_node; typedef struct _art_node art_node;\n"
"struct _art_node {\n"
" uint8_t type;\n"
" uint8_t num_children;\n"
" uint32_t partial_len;\n"
" unsigned char partial[10];\n"
"};\n");
printf("sizeof(art_node) = %zu\n\n\n", sizeof(art_node));
printf(
"typedef uint8_t _toid_art_node_toid_type_num[8];\n");
printf("sizeof(_toid_art_node_toid_type_num[8]) = %zu\n\n\n",
sizeof(_toid_art_node_toid_type_num[8]));
printf(
"union _toid_art_node_u_toid {\n"
" PMEMoid oid;\n"
" art_node_u *_type;\n"
" _toid_art_node_u_toid_type_num *_type_num;\n"
"};\n");
printf("sizeof(union _toid_art_node_u_toid) = %zu\n\n\n",
sizeof(union _toid_art_node_u_toid));
printf(
"typedef uint8_t _toid_art_node_toid_type_num[8];\n");
printf("sizeof(_toid_art_node_toid_type_num[8]) = %zu\n\n\n",
sizeof(_toid_art_node_toid_type_num[8]));
printf(
"union _toid_art_node_toid {\n"
" PMEMoid oid; \n"
" art_node *_type; \n"
" _toid_art_node_toid_type_num *_type_num;\n"
"};\n");
printf("sizeof(union _toid_art_node_toid) = %zu\n\n\n",
sizeof(union _toid_art_node_toid));
printf(
"struct _art_node4; typedef struct _art_node4 art_node4;\n"
"struct _art_node4 {\n"
" art_node n;\n"
" unsigned char keys[4];\n"
" union _toid_art_node_u_toid children[4];\n"
"};\n");
printf("sizeof(art_node4) = %zu\n\n\n", sizeof(art_node4));
printf(
"struct _art_node16; typedef struct _art_node16 art_node16;\n"
"struct _art_node16 {\n"
" art_node n;\n"
" unsigned char keys[16];\n"
" union _toid_art_node_u_toid children[16];\n"
"};\n");
printf("sizeof(art_node16) = %zu\n\n\n", sizeof(art_node16));
printf(
"struct _art_node48; typedef struct _art_node48 art_node48;\n"
"struct _art_node48 {\n"
" art_node n;\n"
" unsigned char keys[256];\n"
" union _toid_art_node_u_toid children[48];\n"
"};\n");
printf("sizeof(art_node48) = %zu\n\n\n", sizeof(art_node48));
printf(
"struct _art_node256; typedef struct _art_node256 art_node256;\n"
"struct _art_node256 {\n"
" art_ndoe n;\n"
" union _toid_art_node_u_toid children[256];\n"
"};\n");
printf("sizeof(art_node256) = %zu\n\n\n", sizeof(art_node256));
printf(
"struct _art_leaf; typedef struct _art_leaf art_leaf;\n"
"struct _art_leaf {\n"
" union _toid_var_string_toid value;\n"
" union _toid_var_string_toid key;\n"
"};\n");
printf("sizeof(art_leaf) = %zu\n\n\n", sizeof(art_leaf));
return 0;
}
static const char *arttree_info_help_str =
"Show information about known ART tree structures\n"
;
static void
arttree_info_help(char *appname)
{
printf("%s %s\n", appname, arttree_info_help_str);
}
static int
arttree_info_func(char *appname, struct pmem_context *ctx, int ac, char *av[])
{
printf("%s: %s not yet implemented\n", appname, __FUNCTION__);
return 0;
}
/*
* get_command -- returns command for specified command name
*/
static struct command *
get_command(char *cmd_str)
{
int i;
if (cmd_str == NULL) {
return NULL;
}
for (i = 0; i < COMMANDS_NUMBER; i++) {
if (strcmp(cmd_str, commands[i].name) == 0)
return &commands[i];
}
return NULL;
}
static int
ctx_init(struct pmem_context *ctx, char *filename)
{
int errors = 0;
if (filename == NULL)
errors++;
if (ctx == NULL)
errors++;
if (errors)
return errors;
ctx->filename = strdup(filename);
assert(ctx->filename != NULL);
ctx->fd = -1;
ctx->addr = NULL;
ctx->art_tree_root_offset = 0;
if (access(ctx->filename, F_OK) != 0)
return 1;
if ((ctx->fd = open(ctx->filename, O_RDONLY)) == -1)
return 1;
struct stat stbuf;
if (fstat(ctx->fd, &stbuf) < 0)
return 1;
ctx->psize = stbuf.st_size;
if ((ctx->addr = mmap(NULL, ctx->psize, PROT_READ,
MAP_SHARED, ctx->fd, 0)) == MAP_FAILED)
return 1;
return 0;
}
static void
ctx_fini(struct pmem_context *ctx)
{
munmap(ctx->addr, ctx->psize);
close(ctx->fd);
free(ctx->filename);
}
int
main(int ac, char *av[])
{
int opt;
int option_index;
int ret = 0;
size_t len;
ssize_t read;
char *cmd_str;
char *args[20];
int nargs;
char *line;
struct command *cmdp = NULL;
while ((opt = getopt_long(ac, av, "h",
long_options, &option_index)) != -1) {
switch (opt) {
case 'h':
print_help(APPNAME);
return 0;
default:
print_usage(APPNAME);
return -1;
}
}
if (optind >= ac) {
fprintf(stderr, "ERROR: missing arguments\n");
print_usage(APPNAME);
return -1;
}
ctx_init(&ctx, av[optind]);
if (optind + 1 < ac) {
/* execute command as given on command line */
cmd_str = av[optind + 1];
cmdp = get_command(cmd_str);
if (cmdp != NULL) {
ret = cmdp->func(APPNAME, &ctx, ac - 2, av + 2);
}
} else {
/* interactive mode: read commands and execute them */
line = NULL;
printf("\n> ");
while ((read = getline(&line, &len, stdin)) != -1) {
if (line[read - 1] == '\n') {
line[read - 1] = '\0';
}
args[0] = strtok(line, " ");
cmdp = get_command(args[0]);
if (cmdp == NULL) {
printf("[%s]: command not supported\n",
args[0] ? args[0] : "NULL");
printf("\n> ");
continue;
}
nargs = 1;
while (1) {
args[nargs] = strtok(NULL, " ");
if (args[nargs] == NULL) {
break;
}
nargs++;
}
ret = cmdp->func(APPNAME, &ctx, nargs, args);
printf("\n> ");
}
if (line != NULL) {
free(line);
}
}
ctx_fini(&ctx);
return ret;
}
| 14,727 | 22.754839 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/libart/art.c
|
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
* Copyright 2012, Armon Dadgar. All rights reserved.
* Copyright 2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: art.c
*
* Description: implement ART tree using libpmemobj based on libart
*
* Author: Andreas Bluemle, Dieter Kasper
* Andreas.Bluemle.external@ts.fujitsu.com
* dieter.kasper@ts.fujitsu.com
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
* ============================================================================
*/
/*
* based on https://github.com/armon/libart/src/art.c
*/
#include <assert.h>
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include <strings.h>
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <stdbool.h>
#include <fcntl.h>
#include <emmintrin.h>
#include <sys/types.h>
#include "libpmemobj.h"
#include "art.h"
TOID(var_string) null_var_string;
TOID(art_leaf) null_art_leaf;
TOID(art_node_u) null_art_node_u;
int art_tree_init(PMEMobjpool *pop, int *newpool);
TOID(art_node_u) make_leaf(PMEMobjpool *pop, const unsigned char *key,
int key_len, void *value, int val_len);
int fill_leaf(PMEMobjpool *pop, TOID(art_leaf) al, const unsigned char *key,
int key_len, void *value, int val_len);
TOID(art_node_u) alloc_node(PMEMobjpool *pop, art_node_type node_type);
TOID(var_string) art_insert(PMEMobjpool *pop, const unsigned char *key,
int key_len, void *value, int val_len);
TOID(var_string) art_delete(PMEMobjpool *pop, const unsigned char *key,
int key_len);
static TOID(var_string) recursive_insert(PMEMobjpool *pop,
TOID(art_node_u) n, TOID(art_node_u) *ref,
const unsigned char *key, int key_len,
void *value, int val_len, int depth, int *old_val);
static TOID(art_leaf) recursive_delete(PMEMobjpool *pop,
TOID(art_node_u) n, TOID(art_node_u) *ref,
const unsigned char *key, int key_len, int depth);
static int leaf_matches(TOID(art_leaf) n, const unsigned char *key,
int key_len, int depth);
static int longest_common_prefix(TOID(art_leaf) l1, TOID(art_leaf) l2,
int depth);
static int prefix_mismatch(TOID(art_node_u) n, unsigned char *key,
int key_len, int depth);
#ifdef LIBART_ITER_PREFIX
static int leaf_prefix_matches(TOID(art_leaf) n,
const unsigned char *prefix, int prefix_len);
#endif
static TOID(art_leaf) minimum(TOID(art_node_u) n_u);
static TOID(art_leaf) maximum(TOID(art_node_u) n_u);
static void copy_header(art_node *dest, art_node *src);
static void add_child(PMEMobjpool *pop, TOID(art_node_u) n,
TOID(art_node_u) *ref, unsigned char c,
TOID(art_node_u) child);
static void add_child4(PMEMobjpool *pop, TOID(art_node4) n,
TOID(art_node_u) *ref, unsigned char c,
TOID(art_node_u) child);
static void add_child16(PMEMobjpool *pop, TOID(art_node16) n,
TOID(art_node_u) *ref, unsigned char c,
TOID(art_node_u) child);
static void add_child48(PMEMobjpool *pop, TOID(art_node48) n,
TOID(art_node_u) *ref, unsigned char c,
TOID(art_node_u) child);
static void add_child256(PMEMobjpool *pop, TOID(art_node256) n,
TOID(art_node_u) *ref, unsigned char c,
TOID(art_node_u) child);
static void remove_child(PMEMobjpool *pop, TOID(art_node_u) n,
TOID(art_node_u) *ref, unsigned char c,
TOID(art_node_u) *l);
static void remove_child4(PMEMobjpool *pop, TOID(art_node4) n,
TOID(art_node_u) *ref, TOID(art_node_u) *l);
static void remove_child16(PMEMobjpool *pop, TOID(art_node16) n,
TOID(art_node_u) *ref, TOID(art_node_u) *l);
static void remove_child48(PMEMobjpool *pop, TOID(art_node48) n,
TOID(art_node_u) *ref, unsigned char c);
static void remove_child256(PMEMobjpool *pop, TOID(art_node256) n,
TOID(art_node_u) *ref, unsigned char c);
static TOID(art_node_u)* find_child(TOID(art_node_u) n, unsigned char c);
static int check_prefix(const art_node *n, const unsigned char *key,
int key_len, int depth);
static int leaf_matches(TOID(art_leaf) n, const unsigned char *key,
int key_len, int depth);
TOID(art_leaf) art_minimum(TOID(struct art_tree_root) t);
TOID(art_leaf) art_maximum(TOID(struct art_tree_root) t);
#if 0
static void destroy_node(TOID(art_node_u) n_u);
#endif
int art_iter(PMEMobjpool *pop, art_callback cb, void *data);
static void PMEMOIDcopy(PMEMoid *dest, const PMEMoid *src, const int n);
static void PMEMOIDmove(PMEMoid *dest, PMEMoid *src, const int n);
static void
PMEMOIDcopy(PMEMoid *dest, const PMEMoid *src, const int n)
{
int i;
for (i = 0; i < n; i++) {
dest[i] = src[i];
}
}
static void
PMEMOIDmove(PMEMoid *dest, PMEMoid *src, const int n)
{
int i;
if (dest > src) {
for (i = n - 1; i >= 0; i--) {
dest[i] = src[i];
}
} else {
for (i = 0; i < n; i++) {
dest[i] = src[i];
}
}
}
TOID(art_node_u)
alloc_node(PMEMobjpool *pop, art_node_type node_type)
{
TOID(art_node_u) node;
TOID(art_node4) an4;
TOID(art_node16) an16;
TOID(art_node48) an48;
TOID(art_node256) an256;
TOID(art_leaf) al;
node = TX_ZNEW(art_node_u);
D_RW(node)->art_node_type = (uint8_t)node_type;
switch (node_type) {
case NODE4:
an4 = TX_ZNEW(art_node4);
D_RW(node)->u.an4 = an4;
break;
case NODE16:
an16 = TX_ZNEW(art_node16);
D_RW(node)->u.an16 = an16;
break;
case NODE48:
an48 = TX_ZNEW(art_node48);
D_RW(node)->u.an48 = an48;
break;
case NODE256:
an256 = TX_ZNEW(art_node256);
D_RW(node)->u.an256 = an256;
break;
case art_leaf_t:
al = TX_ZNEW(art_leaf);
D_RW(node)->u.al = al;
break;
default:
/* invalid node type */
D_RW(node)->art_node_type = (uint8_t)art_node_types;
break;
}
return node;
}
int
art_tree_init(PMEMobjpool *pop, int *newpool)
{
int errors = 0;
TOID(struct art_tree_root) root;
if (pop == NULL) {
errors++;
}
null_var_string.oid = OID_NULL;
null_art_leaf.oid = OID_NULL;
null_art_node_u.oid = OID_NULL;
if (!errors) {
TX_BEGIN(pop) {
root = POBJ_ROOT(pop, struct art_tree_root);
if (*newpool) {
TX_ADD(root);
D_RW(root)->root.oid = OID_NULL;
D_RW(root)->size = 0;
*newpool = 0;
}
} TX_END
}
return errors;
}
#if 0
// Recursively destroys the tree
static void
destroy_node(TOID(art_node_u) n_u)
{
// Break if null
if (TOID_IS_NULL(n_u))
return;
// Special case leafs
if (IS_LEAF(D_RO(n_u))) {
TX_FREE(n_u);
return;
}
// Handle each node type
int i;
TOID(art_node4) an4;
TOID(art_node16) an16;
TOID(art_node48) an48;
TOID(art_node256) an256;
switch (D_RO(n_u)->art_node_type) {
case NODE4:
an4 = D_RO(n_u)->u.an4;
for (i = 0; i < D_RO(an4)->n.num_children; i++) {
destroy_node(D_RW(an4)->children[i]);
}
break;
case NODE16:
an16 = D_RO(n_u)->u.an16;
for (i = 0; i < D_RO(an16)->n.num_children; i++) {
destroy_node(D_RW(an16)->children[i]);
}
break;
case NODE48:
an48 = D_RO(n_u)->u.an48;
for (i = 0; i < D_RO(an48)->n.num_children; i++) {
destroy_node(D_RW(an48)->children[i]);
}
break;
case NODE256:
an256 = D_RO(n_u)->u.an256;
for (i = 0; i < D_RO(an256)->n.num_children; i++) {
if (!(TOID_IS_NULL(D_RO(an256)->children[i]))) {
destroy_node(D_RW(an256)->children[i]);
}
}
break;
default:
abort();
}
// Free ourself on the way up
TX_FREE(n_u);
}
/*
* Destroys an ART tree
* @return 0 on success.
*/
static int
art_tree_destroy(TOID(struct art_tree_root) t)
{
destroy_node(D_RO(t)->root);
return 0;
}
#endif
static TOID(art_node_u)*
find_child(TOID(art_node_u) n, unsigned char c)
{
int i;
int mask;
int bitfield;
TOID(art_node4) an4;
TOID(art_node16) an16;
TOID(art_node48) an48;
TOID(art_node256) an256;
switch (D_RO(n)->art_node_type) {
case NODE4:
an4 = D_RO(n)->u.an4;
for (i = 0; i < D_RO(an4)->n.num_children; i++) {
if (D_RO(an4)->keys[i] == c) {
return &(D_RW(an4)->children[i]);
}
}
break;
case NODE16: {
__m128i cmp;
an16 = D_RO(n)->u.an16;
// Compare the key to all 16 stored keys
cmp = _mm_cmpeq_epi8(_mm_set1_epi8(c),
_mm_loadu_si128((__m128i *)D_RO(an16)->keys));
// Use a mask to ignore children that don't exist
mask = (1 << D_RO(an16)->n.num_children) - 1;
bitfield = _mm_movemask_epi8(cmp) & mask;
/*
* If we have a match (any bit set) then we can
* return the pointer match using ctz to get the index.
*/
if (bitfield) {
return &(D_RW(an16)->children[__builtin_ctz(bitfield)]);
}
break;
}
case NODE48:
an48 = D_RO(n)->u.an48;
i = D_RO(an48)->keys[c];
if (i) {
return &(D_RW(an48)->children[i - 1]);
}
break;
case NODE256:
an256 = D_RO(n)->u.an256;
if (!TOID_IS_NULL(D_RO(an256)->children[c])) {
return &(D_RW(an256)->children[c]);
}
break;
default:
abort();
}
return &null_art_node_u;
}
static inline int
min(int a, int b)
{
return (a < b) ? a : b;
}
/*
* Returns the number of prefix characters shared between
* the key and node.
*/
static int
check_prefix(const art_node *n,
const unsigned char *key, int key_len, int depth)
{
int max_cmp = min(min(n->partial_len, MAX_PREFIX_LEN), key_len - depth);
int idx;
for (idx = 0; idx < max_cmp; idx++) {
if (n->partial[idx] != key[depth + idx])
return idx;
}
return idx;
}
/*
* Checks if a leaf matches
* @return 0 on success.
*/
static int
leaf_matches(TOID(art_leaf) n, const unsigned char *key, int key_len, int depth)
{
(void) depth;
// Fail if the key lengths are different
if (D_RO(D_RO(n)->key)->len != (uint32_t)key_len)
return 1;
// Compare the keys starting at the depth
return memcmp(D_RO(D_RO(n)->key)->s, key, key_len);
}
/*
* Searches for a value in the ART tree
* @arg t The tree
* @arg key The key
* @arg key_len The length of the key
* @return NULL if the item was not found, otherwise
* the value pointer is returned.
*/
TOID(var_string)
art_search(PMEMobjpool *pop, const unsigned char *key, int key_len)
{
TOID(struct art_tree_root)t = POBJ_ROOT(pop, struct art_tree_root);
TOID(art_node_u) *child;
TOID(art_node_u) n = D_RO(t)->root;
const art_node *n_an;
int prefix_len;
int depth = 0;
while (!TOID_IS_NULL(n)) {
// Might be a leaf
if (IS_LEAF(D_RO(n))) {
// n = LEAF_RAW(n);
// Check if the expanded path matches
if (!leaf_matches(D_RO(n)->u.al, key, key_len, depth)) {
return (D_RO(D_RO(n)->u.al))->value;
}
return null_var_string;
}
switch (D_RO(n)->art_node_type) {
case NODE4: n_an = &(D_RO(D_RO(n)->u.an4)->n); break;
case NODE16: n_an = &(D_RO(D_RO(n)->u.an16)->n); break;
case NODE48: n_an = &(D_RO(D_RO(n)->u.an48)->n); break;
case NODE256: n_an = &(D_RO(D_RO(n)->u.an256)->n); break;
default:
return null_var_string;
}
// Bail if the prefix does not match
if (n_an->partial_len) {
prefix_len = check_prefix(n_an, key, key_len, depth);
if (prefix_len !=
min(MAX_PREFIX_LEN, n_an->partial_len))
return null_var_string;
depth = depth + n_an->partial_len;
}
// Recursively search
child = find_child(n, key[depth]);
if (TOID_IS_NULL(*child)) {
n.oid = OID_NULL;
} else {
n = *child;
}
depth++;
}
return null_var_string;
}
// Find the minimum leaf under a node
static TOID(art_leaf)
minimum(TOID(art_node_u) n_u)
{
TOID(art_node4) an4;
TOID(art_node16) an16;
TOID(art_node48) an48;
TOID(art_node256) an256;
// Handle base cases
if (TOID_IS_NULL(n_u))
return null_art_leaf;
if (IS_LEAF(D_RO(n_u)))
return D_RO(n_u)->u.al;
int idx;
switch (D_RO(n_u)->art_node_type) {
case NODE4:
an4 = D_RO(n_u)->u.an4;
return minimum(D_RO(an4)->children[0]);
case NODE16:
an16 = D_RO(n_u)->u.an16;
return minimum(D_RO(an16)->children[0]);
case NODE48:
an48 = D_RO(n_u)->u.an48;
idx = 0;
while (!(D_RO(an48)->keys[idx]))
idx++;
idx = D_RO(an48)->keys[idx] - 1;
assert(idx < 48);
return minimum(D_RO(an48)->children[idx]);
case NODE256:
an256 = D_RO(n_u)->u.an256;
idx = 0;
while (!(TOID_IS_NULL(D_RO(an256)->children[idx])))
idx++;
return minimum(D_RO(an256)->children[idx]);
default:
abort();
}
}
// Find the maximum leaf under a node
static TOID(art_leaf)
maximum(TOID(art_node_u) n_u)
{
TOID(art_node4) an4;
TOID(art_node16) an16;
TOID(art_node48) an48;
TOID(art_node256) an256;
const art_node *n_an;
// Handle base cases
if (TOID_IS_NULL(n_u))
return null_art_leaf;
if (IS_LEAF(D_RO(n_u)))
return D_RO(n_u)->u.al;
int idx;
switch (D_RO(n_u)->art_node_type) {
case NODE4:
an4 = D_RO(n_u)->u.an4;
n_an = &(D_RO(an4)->n);
return maximum(D_RO(an4)->children[n_an->num_children - 1]);
case NODE16:
an16 = D_RO(n_u)->u.an16;
n_an = &(D_RO(an16)->n);
return maximum(D_RO(an16)->children[n_an->num_children - 1]);
case NODE48:
an48 = D_RO(n_u)->u.an48;
idx = 255;
while (!(D_RO(an48)->keys[idx]))
idx--;
idx = D_RO(an48)->keys[idx] - 1;
assert((idx >= 0) && (idx < 48));
return maximum(D_RO(an48)->children[idx]);
case NODE256:
an256 = D_RO(n_u)->u.an256;
idx = 255;
while (!(TOID_IS_NULL(D_RO(an256)->children[idx])))
idx--;
return maximum(D_RO(an256)->children[idx]);
default:
abort();
}
}
/*
* Returns the minimum valued leaf
*/
TOID(art_leaf)
art_minimum(TOID(struct art_tree_root) t)
{
return minimum(D_RO(t)->root);
}
/*
* Returns the maximum valued leaf
*/
TOID(art_leaf)
art_maximum(TOID(struct art_tree_root) t)
{
return maximum(D_RO(t)->root);
}
TOID(art_node_u)
make_leaf(PMEMobjpool *pop,
const unsigned char *key, int key_len, void *value, int val_len)
{
TOID(art_node_u)newleaf;
newleaf = alloc_node(pop, art_leaf_t);
fill_leaf(pop, D_RW(newleaf)->u.al, key, key_len, value, val_len);
return newleaf;
}
static int
longest_common_prefix(TOID(art_leaf) l1, TOID(art_leaf) l2, int depth)
{
TOID(var_string) l1_key = D_RO(l1)->key;
TOID(var_string) l2_key = D_RO(l2)->key;
int max_cmp;
int idx;
max_cmp = min(D_RO(l1_key)->len, D_RO(l2_key)->len) - depth;
for (idx = 0; idx < max_cmp; idx++) {
if (D_RO(l1_key)->s[depth + idx] !=
D_RO(l2_key)->s[depth + idx])
return idx;
}
return idx;
}
static void
copy_header(art_node *dest, art_node *src)
{
dest->num_children = src->num_children;
dest->partial_len = src->partial_len;
memcpy(dest->partial, src->partial,
min(MAX_PREFIX_LEN, src->partial_len));
}
static void
add_child256(PMEMobjpool *pop, TOID(art_node256) n, TOID(art_node_u) *ref,
unsigned char c, TOID(art_node_u) child)
{
art_node *n_an;
(void) ref;
TX_ADD(n);
n_an = &(D_RW(n)->n);
n_an->num_children++;
D_RW(n)->children[c] = child;
}
static void
add_child48(PMEMobjpool *pop, TOID(art_node48) n, TOID(art_node_u) *ref,
unsigned char c, TOID(art_node_u) child)
{
art_node *n_an;
n_an = &(D_RW(n)->n);
if (n_an->num_children < 48) {
int pos = 0;
TX_ADD(n);
while (!(TOID_IS_NULL(D_RO(n)->children[pos])))
pos++;
D_RW(n)->children[pos] = child;
D_RW(n)->keys[c] = pos + 1;
n_an->num_children++;
} else {
TOID(art_node_u) newnode_u = alloc_node(pop, NODE256);
TOID(art_node256) newnode = D_RO(newnode_u)->u.an256;
pmemobj_tx_add_range_direct(ref, sizeof(TOID(art_node_u)));
for (int i = 0; i < 256; i++) {
if (D_RO(n)->keys[i]) {
D_RW(newnode)->children[i] =
D_RO(n)->children[D_RO(n)->keys[i] - 1];
}
}
copy_header(&(D_RW(newnode)->n), n_an);
*ref = newnode_u;
TX_FREE(n);
add_child256(pop, newnode, ref, c, child);
}
}
static void
add_child16(PMEMobjpool *pop, TOID(art_node16) n, TOID(art_node_u)*ref,
unsigned char c, TOID(art_node_u) child)
{
art_node *n_an;
n_an = &(D_RW(n)->n);
if (n_an->num_children < 16) {
__m128i cmp;
TX_ADD(n);
// Compare the key to all 16 stored keys
cmp = _mm_cmplt_epi8(_mm_set1_epi8(c),
_mm_loadu_si128((__m128i *)(D_RO(n)->keys)));
// Use a mask to ignore children that don't exist
unsigned mask = (1 << n_an->num_children) - 1;
unsigned bitfield = _mm_movemask_epi8(cmp) & mask;
// Check if less than any
unsigned idx;
if (bitfield) {
idx = __builtin_ctz(bitfield);
memmove(&(D_RW(n)->keys[idx + 1]),
&(D_RO(n)->keys[idx]),
n_an->num_children - idx);
PMEMOIDmove(&(D_RW(n)->children[idx + 1].oid),
&(D_RW(n)->children[idx].oid),
n_an->num_children - idx);
} else {
idx = n_an->num_children;
}
// Set the child
D_RW(n)->keys[idx] = c;
D_RW(n)->children[idx] = child;
n_an->num_children++;
} else {
TOID(art_node_u) newnode_u = alloc_node(pop, NODE48);
TOID(art_node48) newnode = D_RO(newnode_u)->u.an48;
// Copy the child pointers and populate the key map
PMEMOIDcopy(&(D_RW(newnode)->children[0].oid),
&(D_RO(n)->children[0].oid),
n_an->num_children);
for (int i = 0; i < n_an->num_children; i++) {
D_RW(newnode)->keys[D_RO(n)->keys[i]] = i + 1;
}
copy_header(&(D_RW(newnode))->n, n_an);
*ref = newnode_u;
TX_FREE(n);
add_child48(pop, newnode, ref, c, child);
}
}
static void
add_child4(PMEMobjpool *pop, TOID(art_node4) n, TOID(art_node_u) *ref,
unsigned char c, TOID(art_node_u) child)
{
art_node *n_an;
n_an = &(D_RW(n)->n);
if (n_an->num_children < 4) {
int idx;
TX_ADD(n);
for (idx = 0; idx < n_an->num_children; idx++) {
if (c < D_RO(n)->keys[idx]) break;
}
// Shift to make room
memmove(D_RW(n)->keys + idx + 1, D_RO(n)->keys + idx,
n_an->num_children - idx);
assert((idx + 1) < 4);
PMEMOIDmove(&(D_RW(n)->children[idx + 1].oid),
&(D_RW(n)->children[idx].oid),
n_an->num_children - idx);
// Insert element
D_RW(n)->keys[idx] = c;
D_RW(n)->children[idx] = child;
n_an->num_children++;
} else {
TOID(art_node_u) newnode_u = alloc_node(pop, NODE16);
TOID(art_node16) newnode = D_RO(newnode_u)->u.an16;
pmemobj_tx_add_range_direct(ref, sizeof(TOID(art_node_u)));
// Copy the child pointers and the key map
PMEMOIDcopy(&(D_RW(newnode)->children[0].oid),
&(D_RO(n)->children[0].oid), n_an->num_children);
memcpy(D_RW(newnode)->keys, D_RO(n)->keys, n_an->num_children);
copy_header(&(D_RW(newnode)->n), n_an);
*ref = newnode_u;
TX_FREE(n);
add_child16(pop, newnode, ref, c, child);
}
}
static void
add_child(PMEMobjpool *pop, TOID(art_node_u) n, TOID(art_node_u) *ref,
unsigned char c, TOID(art_node_u) child)
{
switch (D_RO(n)->art_node_type) {
case NODE4:
add_child4(pop, D_RO(n)->u.an4, ref, c, child);
break;
case NODE16:
add_child16(pop, D_RO(n)->u.an16, ref, c, child);
break;
case NODE48:
add_child48(pop, D_RO(n)->u.an48, ref, c, child);
break;
case NODE256:
add_child256(pop, D_RO(n)->u.an256, ref, c, child);
break;
default:
abort();
}
}
static int
prefix_mismatch(TOID(art_node_u) n, unsigned char *key, int key_len, int depth)
{
const art_node *n_an;
int max_cmp;
int idx;
switch (D_RO(n)->art_node_type) {
case NODE4: n_an = &(D_RO(D_RO(n)->u.an4)->n); break;
case NODE16: n_an = &(D_RO(D_RO(n)->u.an16)->n); break;
case NODE48: n_an = &(D_RO(D_RO(n)->u.an48)->n); break;
case NODE256: n_an = &(D_RO(D_RO(n)->u.an256)->n); break;
default: return 0;
}
max_cmp = min(min(MAX_PREFIX_LEN, n_an->partial_len), key_len - depth);
for (idx = 0; idx < max_cmp; idx++) {
if (n_an->partial[idx] != key[depth + idx])
return idx;
}
// If the prefix is short we can avoid finding a leaf
if (n_an->partial_len > MAX_PREFIX_LEN) {
// Prefix is longer than what we've checked, find a leaf
TOID(art_leaf) l = minimum(n);
max_cmp = min(D_RO(D_RO(l)->key)->len, key_len) - depth;
for (; idx < max_cmp; idx++) {
if (D_RO(D_RO(l)->key)->s[idx + depth] !=
key[depth + idx])
return idx;
}
}
return idx;
}
static TOID(var_string)
recursive_insert(PMEMobjpool *pop, TOID(art_node_u) n, TOID(art_node_u) *ref,
const unsigned char *key, int key_len,
void *value, int val_len, int depth, int *old)
{
art_node *n_an;
TOID(var_string) retval;
// If we are at a NULL node, inject a leaf
if (TOID_IS_NULL(n)) {
*ref = make_leaf(pop, key, key_len, value, val_len);
TX_ADD(*ref);
SET_LEAF(D_RW(*ref));
retval = null_var_string;
return retval;
}
// If we are at a leaf, we need to replace it with a node
if (IS_LEAF(D_RO(n))) {
TOID(art_leaf)l = D_RO(n)->u.al;
// Check if we are updating an existing value
if (!leaf_matches(l, key, key_len, depth)) {
*old = 1;
retval = D_RO(l)->value;
TX_ADD(D_RW(l)->value);
COPY_BLOB(D_RW(l)->value, value, val_len);
return retval;
}
// New value, we must split the leaf into a node4
pmemobj_tx_add_range_direct(ref,
sizeof(TOID(art_node_u)));
TOID(art_node_u) newnode_u = alloc_node(pop, NODE4);
TOID(art_node4) newnode = D_RO(newnode_u)->u.an4;
art_node *newnode_n = &(D_RW(newnode)->n);
// Create a new leaf
TOID(art_node_u) l2_u =
make_leaf(pop, key, key_len, value, val_len);
TOID(art_leaf) l2 = D_RO(l2_u)->u.al;
// Determine longest prefix
int longest_prefix =
longest_common_prefix(l, l2, depth);
newnode_n->partial_len = longest_prefix;
memcpy(newnode_n->partial, key + depth,
min(MAX_PREFIX_LEN, longest_prefix));
// Add the leafs to the newnode node4
*ref = newnode_u;
add_child4(pop, newnode, ref,
D_RO(D_RO(l)->key)->s[depth + longest_prefix],
n);
add_child4(pop, newnode, ref,
D_RO(D_RO(l2)->key)->s[depth + longest_prefix],
l2_u);
return null_var_string;
}
// Check if given node has a prefix
switch (D_RO(n)->art_node_type) {
case NODE4: n_an = &(D_RW(D_RW(n)->u.an4)->n); break;
case NODE16: n_an = &(D_RW(D_RW(n)->u.an16)->n); break;
case NODE48: n_an = &(D_RW(D_RW(n)->u.an48)->n); break;
case NODE256: n_an = &(D_RW(D_RW(n)->u.an256)->n); break;
default: abort();
}
if (n_an->partial_len) {
// Determine if the prefixes differ, since we need to split
int prefix_diff =
prefix_mismatch(n, (unsigned char *)key, key_len, depth);
if ((uint32_t)prefix_diff >= n_an->partial_len) {
depth += n_an->partial_len;
goto RECURSE_SEARCH;
}
// Create a new node
pmemobj_tx_add_range_direct(ref,
sizeof(TOID(art_node_u)));
pmemobj_tx_add_range_direct(n_an, sizeof(art_node));
TOID(art_node_u) newnode_u = alloc_node(pop, NODE4);
TOID(art_node4) newnode = D_RO(newnode_u)->u.an4;
art_node *newnode_n = &(D_RW(newnode)->n);
*ref = newnode_u;
newnode_n->partial_len = prefix_diff;
memcpy(newnode_n->partial, n_an->partial,
min(MAX_PREFIX_LEN, prefix_diff));
// Adjust the prefix of the old node
if (n_an->partial_len <= MAX_PREFIX_LEN) {
add_child4(pop, newnode, ref,
n_an->partial[prefix_diff], n);
n_an->partial_len -= (prefix_diff + 1);
memmove(n_an->partial,
n_an->partial + prefix_diff + 1,
min(MAX_PREFIX_LEN, n_an->partial_len));
} else {
unsigned char *dst;
const unsigned char *src;
size_t len;
n_an->partial_len -= (prefix_diff + 1);
TOID(art_leaf) l = minimum(n);
add_child4(pop, newnode, ref,
D_RO(D_RO(l)->key)->s[depth + prefix_diff],
n);
dst = n_an->partial;
src =
&(D_RO(D_RO(l)->key)->s[depth + prefix_diff + 1 ]);
len = min(MAX_PREFIX_LEN, n_an->partial_len);
memcpy(dst, src, len);
}
// Insert the new leaf
TOID(art_node_u) l =
make_leaf(pop, key, key_len, value, val_len);
SET_LEAF(D_RW(l));
add_child4(pop, newnode, ref, key[depth + prefix_diff], l);
return null_var_string;
}
RECURSE_SEARCH:;
// Find a child to recurse to
TOID(art_node_u) *child = find_child(n, key[depth]);
if (!TOID_IS_NULL(*child)) {
return recursive_insert(pop, *child, child,
key, key_len, value, val_len, depth + 1, old);
}
// No child, node goes within us
TOID(art_node_u) l =
make_leaf(pop, key, key_len, value, val_len);
SET_LEAF(D_RW(l));
add_child(pop, n, ref, key[depth], l);
retval = null_var_string;
return retval;
}
/*
* Returns the size of the ART tree
*/
uint64_t
art_size(PMEMobjpool *pop)
{
TOID(struct art_tree_root) root;
root = POBJ_ROOT(pop, struct art_tree_root);
return D_RO(root)->size;
}
/*
* Inserts a new value into the ART tree
* @arg t The tree
* @arg key The key
* @arg key_len The length of the key
* @arg value Opaque value.
* @return NULL if the item was newly inserted, otherwise
* the old value pointer is returned.
*/
TOID(var_string)
art_insert(PMEMobjpool *pop,
const unsigned char *key, int key_len, void *value, int val_len)
{
int old_val = 0;
TOID(var_string) old;
TOID(struct art_tree_root) root;
TX_BEGIN(pop) {
root = POBJ_ROOT(pop, struct art_tree_root);
TX_ADD(root);
old = recursive_insert(pop, D_RO(root)->root,
&(D_RW(root)->root),
(const unsigned char *)key, key_len,
value, val_len, 0, &old_val);
if (!old_val)
D_RW(root)->size++;
} TX_ONABORT {
abort();
} TX_END
return old;
}
static void
remove_child256(PMEMobjpool *pop,
TOID(art_node256) n, TOID(art_node_u) *ref, unsigned char c)
{
art_node *n_an = &(D_RW(n)->n);
TX_ADD(n);
D_RW(n)->children[c].oid = OID_NULL;
n_an->num_children--;
// Resize to a node48 on underflow, not immediately to prevent
// trashing if we sit on the 48/49 boundary
if (n_an->num_children == 37) {
TOID(art_node_u) newnode_u = alloc_node(pop, NODE48);
TOID(art_node48) newnode_an48 = D_RO(newnode_u)->u.an48;
pmemobj_tx_add_range_direct(ref, sizeof(TOID(art_node_u)));
*ref = newnode_u;
copy_header(&(D_RW(newnode_an48)->n), n_an);
int pos = 0;
for (int i = 0; i < 256; i++) {
if (!TOID_IS_NULL(D_RO(n)->children[i])) {
assert(pos < 48);
D_RW(newnode_an48)->children[pos] =
D_RO(n)->children[i];
D_RW(newnode_an48)->keys[i] = pos + 1;
pos++;
}
}
TX_FREE(n);
}
}
static void
remove_child48(PMEMobjpool *pop,
TOID(art_node48) n, TOID(art_node_u) *ref, unsigned char c)
{
int pos = D_RO(n)->keys[c];
art_node *n_an = &(D_RW(n)->n);
TX_ADD(n);
D_RW(n)->keys[c] = 0;
D_RW(n)->children[pos - 1].oid = OID_NULL;
n_an->num_children--;
if (n_an->num_children == 12) {
TOID(art_node_u) newnode_u = alloc_node(pop, NODE16);
TOID(art_node16) newnode_an16 = D_RO(newnode_u)->u.an16;
pmemobj_tx_add_range_direct(ref, sizeof(TOID(art_node_u)));
*ref = newnode_u;
copy_header(&(D_RW(newnode_an16)->n), n_an);
int child = 0;
for (int i = 0; i < 256; i++) {
pos = D_RO(n)->keys[i];
if (pos) {
assert(child < 16);
D_RW(newnode_an16)->keys[child] = i;
D_RW(newnode_an16)->children[child] =
D_RO(n)->children[pos - 1];
child++;
}
}
TX_FREE(n);
}
}
static void
remove_child16(PMEMobjpool *pop,
TOID(art_node16) n, TOID(art_node_u) *ref, TOID(art_node_u) *l)
{
int pos = l - &(D_RO(n)->children[0]);
uint8_t num_children = ((D_RW(n)->n).num_children);
TX_ADD(n);
memmove(D_RW(n)->keys + pos, D_RO(n)->keys + pos + 1,
num_children - 1 - pos);
memmove(D_RW(n)->children + pos,
D_RO(n)->children + pos + 1,
(num_children - 1 - pos) * sizeof(void *));
((D_RW(n)->n).num_children)--;
if (--num_children == 3) {
TOID(art_node_u) newnode_u = alloc_node(pop, NODE4);
TOID(art_node4) newnode_an4 = D_RO(newnode_u)->u.an4;
pmemobj_tx_add_range_direct(ref, sizeof(TOID(art_node_u)));
*ref = newnode_u;
copy_header(&(D_RW(newnode_an4)->n), &(D_RW(n)->n));
memcpy(D_RW(newnode_an4)->keys, D_RO(n)->keys, 4);
memcpy(D_RW(newnode_an4)->children,
D_RO(n)->children, 4 * sizeof(TOID(art_node_u)));
TX_FREE(n);
}
}
static void
remove_child4(PMEMobjpool *pop,
TOID(art_node4) n, TOID(art_node_u) *ref, TOID(art_node_u) *l)
{
int pos = l - &(D_RO(n)->children[0]);
uint8_t *num_children = &((D_RW(n)->n).num_children);
TX_ADD(n);
memmove(D_RW(n)->keys + pos, D_RO(n)->keys + pos + 1,
*num_children - 1 - pos);
memmove(D_RW(n)->children + pos, D_RO(n)->children + pos + 1,
(*num_children - 1 - pos) * sizeof(void *));
(*num_children)--;
// Remove nodes with only a single child
if (*num_children == 1) {
TOID(art_node_u) child_u = D_RO(n)->children[0];
art_node *child = &(D_RW(D_RW(child_u)->u.an4)->n);
pmemobj_tx_add_range_direct(ref, sizeof(TOID(art_node_u)));
if (!IS_LEAF(D_RO(child_u))) {
// Concatenate the prefixes
int prefix = (D_RW(n)->n).partial_len;
if (prefix < MAX_PREFIX_LEN) {
(D_RW(n)->n).partial[prefix] =
D_RO(n)->keys[0];
prefix++;
}
if (prefix < MAX_PREFIX_LEN) {
int sub_prefix = min(child->partial_len,
MAX_PREFIX_LEN - prefix);
memcpy((D_RW(n)->n).partial + prefix,
child->partial, sub_prefix);
prefix += sub_prefix;
}
// Store the prefix in the child
memcpy(child->partial,
(D_RO(n)->n).partial, min(prefix, MAX_PREFIX_LEN));
child->partial_len += (D_RO(n)->n).partial_len + 1;
}
*ref = child_u;
TX_FREE(n);
}
}
static void
remove_child(PMEMobjpool *pop,
TOID(art_node_u) n, TOID(art_node_u) *ref,
unsigned char c, TOID(art_node_u) *l)
{
switch (D_RO(n)->art_node_type) {
case NODE4:
return remove_child4(pop, D_RO(n)->u.an4, ref, l);
case NODE16:
return remove_child16(pop, D_RO(n)->u.an16, ref, l);
case NODE48:
return remove_child48(pop, D_RO(n)->u.an48, ref, c);
case NODE256:
return remove_child256(pop, D_RO(n)->u.an256, ref, c);
default:
abort();
}
}
static TOID(art_leaf)
recursive_delete(PMEMobjpool *pop,
TOID(art_node_u) n, TOID(art_node_u) *ref,
const unsigned char *key, int key_len, int depth)
{
const art_node *n_an;
// Search terminated
if (TOID_IS_NULL(n))
return null_art_leaf;
// Handle hitting a leaf node
if (IS_LEAF(D_RO(n))) {
TOID(art_leaf) l = D_RO(n)->u.al;
if (!leaf_matches(l, key, key_len, depth)) {
*ref = null_art_node_u;
return l;
}
return null_art_leaf;
}
// get art_node component
switch (D_RO(n)->art_node_type) {
case NODE4: n_an = &(D_RO(D_RO(n)->u.an4)->n); break;
case NODE16: n_an = &(D_RO(D_RO(n)->u.an16)->n); break;
case NODE48: n_an = &(D_RO(D_RO(n)->u.an48)->n); break;
case NODE256: n_an = &(D_RO(D_RO(n)->u.an256)->n); break;
default: abort();
}
// Bail if the prefix does not match
if (n_an->partial_len) {
int prefix_len = check_prefix(n_an, key, key_len, depth);
if (prefix_len != min(MAX_PREFIX_LEN, n_an->partial_len)) {
return null_art_leaf;
}
depth = depth + n_an->partial_len;
}
// Find child node
TOID(art_node_u) *child = find_child(n, key[depth]);
if (TOID_IS_NULL(*child))
return null_art_leaf;
// If the child is leaf, delete from this node
if (IS_LEAF(D_RO(*child))) {
TOID(art_leaf)l = D_RO(*child)->u.al;
if (!leaf_matches(l, key, key_len, depth)) {
remove_child(pop, n, ref, key[depth], child);
return l;
}
return null_art_leaf;
} else {
// Recurse
return recursive_delete(pop, *child, child,
(const unsigned char *)key, key_len, depth + 1);
}
}
/*
* Deletes a value from the ART tree
* @arg t The tree
* @arg key The key
* @arg key_len The length of the key
* @return NULL if the item was not found, otherwise
* the value pointer is returned.
*/
TOID(var_string)
art_delete(PMEMobjpool *pop,
const unsigned char *key, int key_len)
{
TOID(struct art_tree_root)root = POBJ_ROOT(pop, struct art_tree_root);
TOID(art_leaf) l;
TOID(var_string) retval;
retval = null_var_string;
TX_BEGIN(pop) {
TX_ADD(root);
l = recursive_delete(pop, D_RO(root)->root,
&D_RW(root)->root, key, key_len, 0);
if (!TOID_IS_NULL(l)) {
D_RW(root)->size--;
TOID(var_string)old = D_RO(l)->value;
TX_FREE(l);
retval = old;
}
} TX_ONABORT {
abort();
} TX_END
return retval;
}
// Recursively iterates over the tree
static int
recursive_iter(TOID(art_node_u)n, art_callback cb, void *data)
{
const art_node *n_an;
TOID(art_node4) an4;
TOID(art_node16) an16;
TOID(art_node48) an48;
TOID(art_node256) an256;
TOID(art_leaf) l;
TOID(var_string) key;
TOID(var_string) value;
cb_data cbd;
// Handle base cases
if (TOID_IS_NULL(n)) {
return 0;
}
cbd.node = n;
cbd.child_idx = -1;
if (IS_LEAF(D_RO(n))) {
l = D_RO(n)->u.al;
key = D_RO(l)->key;
value = D_RO(l)->value;
return cb(&cbd, D_RO(key)->s, D_RO(key)->len,
D_RO(value)->s, D_RO(value)->len);
}
int idx, res;
switch (D_RO(n)->art_node_type) {
case NODE4:
an4 = D_RO(n)->u.an4;
n_an = &(D_RO(an4)->n);
for (int i = 0; i < n_an->num_children; i++) {
cbd.child_idx = i;
cb(&cbd, NULL, 0, NULL, 0);
res = recursive_iter(D_RO(an4)->children[i], cb, data);
if (res)
return res;
}
break;
case NODE16:
an16 = D_RO(n)->u.an16;
n_an = &(D_RO(an16)->n);
for (int i = 0; i < n_an->num_children; i++) {
cbd.child_idx = i;
cb(&cbd, NULL, 0, NULL, 0);
res = recursive_iter(D_RO(an16)->children[i], cb, data);
if (res)
return res;
}
break;
case NODE48:
an48 = D_RO(n)->u.an48;
for (int i = 0; i < 256; i++) {
idx = D_RO(an48)->keys[i];
if (!idx)
continue;
cbd.child_idx = idx - 1;
cb(&cbd, NULL, 0, NULL, 0);
res = recursive_iter(D_RO(an48)->children[idx - 1],
cb, data);
if (res)
return res;
}
break;
case NODE256:
an256 = D_RO(n)->u.an256;
for (int i = 0; i < 256; i++) {
if (TOID_IS_NULL(D_RO(an256)->children[i]))
continue;
cbd.child_idx = i;
cb(&cbd, NULL, 0, NULL, 0);
res = recursive_iter(D_RO(an256)->children[i],
cb, data);
if (res)
return res;
}
break;
default:
abort();
}
return 0;
}
/*
* Iterates through the entries pairs in the map,
* invoking a callback for each. The call back gets a
* key, value for each and returns an integer stop value.
* If the callback returns non-zero, then the iteration stops.
* @arg t The tree to iterate over
* @arg cb The callback function to invoke
* @arg data Opaque handle passed to the callback
* @return 0 on success, or the return of the callback.
*/
int
art_iter(PMEMobjpool *pop, art_callback cb, void *data)
{
TOID(struct art_tree_root) t = POBJ_ROOT(pop, struct art_tree_root);
return recursive_iter(D_RO(t)->root, cb, data);
}
#ifdef LIBART_ITER_PREFIX /* { */
/*
* Checks if a leaf prefix matches
* @return 0 on success.
*/
static int
leaf_prefix_matches(TOID(art_leaf) n,
const unsigned char *prefix, int prefix_len)
{
// Fail if the key length is too short
if (D_RO(D_RO(n)->key)->len < (uint32_t)prefix_len)
return 1;
// Compare the keys
return memcmp(D_RO(D_RO(n)->key)->s, prefix, prefix_len);
}
/*
* Iterates through the entries pairs in the map,
* invoking a callback for each that matches a given prefix.
* The call back gets a key, value for each and returns an integer stop value.
* If the callback returns non-zero, then the iteration stops.
* @arg t The tree to iterate over
* @arg prefix The prefix of keys to read
* @arg prefix_len The length of the prefix
* @arg cb The callback function to invoke
* @arg data Opaque handle passed to the callback
* @return 0 on success, or the return of the callback.
*/
int
art_iter_prefix(art_tree *t,
const unsigned char *key, int key_len, art_callback cb, void *data)
{
art_node **child;
art_node *n = t->root;
int prefix_len, depth = 0;
while (n) {
// Might be a leaf
if (IS_LEAF(n)) {
n = LEAF_RAW(n);
// Check if the expanded path matches
if (!leaf_prefix_matches((art_leaf *)n, key, key_len)) {
art_leaf *l = (art_leaf *)n;
return cb(data,
(const unsigned char *)l->key,
l->key_len, l->value);
}
return 0;
}
// If the depth matches the prefix, we need to handle this node
if (depth == key_len) {
art_leaf *l = minimum(n);
if (!leaf_prefix_matches(l, key, key_len))
return recursive_iter(n, cb, data);
return 0;
}
// Bail if the prefix does not match
if (n->partial_len) {
prefix_len = prefix_mismatch(n, key, key_len, depth);
// If there is no match, search is terminated
if (!prefix_len)
return 0;
// If we've matched the prefix, iterate on this node
else if (depth + prefix_len == key_len) {
return recursive_iter(n, cb, data);
}
// if there is a full match, go deeper
depth = depth + n->partial_len;
}
// Recursively search
child = find_child(n, key[depth]);
n = (child) ? *child : NULL;
depth++;
}
return 0;
}
#endif /* } LIBART_ITER_PREFIX */
int
fill_leaf(PMEMobjpool *pop, TOID(art_leaf) al,
const unsigned char *key, int key_len, void *value, int val_len)
{
int retval = 0;
size_t l_key;
size_t l_val;
TOID(var_string) Tkey;
TOID(var_string) Tval;
l_key = (sizeof(var_string) + key_len);
l_val = (sizeof(var_string) + val_len);
Tkey = TX_ALLOC(var_string, l_key);
Tval = TX_ALLOC(var_string, l_val);
COPY_BLOB(Tkey, key, key_len);
COPY_BLOB(Tval, value, val_len);
D_RW(al)->key = Tkey;
D_RW(al)->value = Tval;
return retval;
}
| 38,290 | 24.527333 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/libart/arttree_structures.h
|
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: arttree_structures.h
*
* Description: known structures of the ART tree
*
* Author: Andreas Bluemle, Dieter Kasper
* Andreas.Bluemle.external@ts.fujitsu.com
* dieter.kasper@ts.fujitsu.com
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* ===========================================================================
*/
#ifndef _ARTTREE_STRUCTURES_H
#define _ARTTREE_STRUCTURES_H
#define MAX_PREFIX_LEN 10
/*
* pmem_context -- structure for pmempool file
*/
struct pmem_context {
char *filename;
size_t psize;
int fd;
char *addr;
uint64_t art_tree_root_offset;
};
struct _art_node_u; typedef struct _art_node_u art_node_u;
struct _art_node; typedef struct _art_node art_node;
struct _art_node4; typedef struct _art_node4 art_node4;
struct _art_node16; typedef struct _art_node16 art_node16;
struct _art_node48; typedef struct _art_node48 art_node48;
struct _art_node256; typedef struct _art_node256 art_node256;
struct _var_string; typedef struct _var_string var_string;
struct _art_leaf; typedef struct _art_leaf art_leaf;
struct _art_tree_root; typedef struct _art_tree_root art_tree_root;
typedef uint8_t art_tree_root_toid_type_num[65535];
typedef uint8_t _toid_art_node_u_toid_type_num[2];
typedef uint8_t _toid_art_node_toid_type_num[3];
typedef uint8_t _toid_art_node4_toid_type_num[4];
typedef uint8_t _toid_art_node16_toid_type_num[5];
typedef uint8_t _toid_art_node48_toid_type_num[6];
typedef uint8_t _toid_art_node256_toid_type_num[7];
typedef uint8_t _toid_art_leaf_toid_type_num[8];
typedef uint8_t _toid_var_string_toid_type_num[9];
typedef struct pmemoid {
uint64_t pool_uuid_lo;
uint64_t off;
} PMEMoid;
union _toid_art_node_u_toid {
PMEMoid oid;
art_node_u *_type;
_toid_art_node_u_toid_type_num *_type_num;
};
union art_tree_root_toid {
PMEMoid oid;
struct art_tree_root *_type;
art_tree_root_toid_type_num *_type_num;
};
union _toid_art_node_toid {
PMEMoid oid;
art_node *_type;
_toid_art_node_toid_type_num *_type_num;
};
union _toid_art_node4_toid {
PMEMoid oid;
art_node4 *_type;
_toid_art_node4_toid_type_num *_type_num;
};
union _toid_art_node16_toid {
PMEMoid oid;
art_node16 *_type;
_toid_art_node16_toid_type_num *_type_num;
};
union _toid_art_node48_toid {
PMEMoid oid;
art_node48 *_type;
_toid_art_node48_toid_type_num *_type_num;
};
union _toid_art_node256_toid {
PMEMoid oid;
art_node256 *_type;
_toid_art_node256_toid_type_num *_type_num;
};
union _toid_var_string_toid {
PMEMoid oid;
var_string *_type;
_toid_var_string_toid_type_num *_type_num;
};
union _toid_art_leaf_toid {
PMEMoid oid;
art_leaf *_type;
_toid_art_leaf_toid_type_num *_type_num;
};
struct _art_tree_root {
int size;
union _toid_art_node_u_toid root;
};
struct _art_node {
uint8_t num_children;
uint32_t partial_len;
unsigned char partial[MAX_PREFIX_LEN];
};
struct _art_node4 {
art_node n;
unsigned char keys[4];
union _toid_art_node_u_toid children[4];
};
struct _art_node16 {
art_node n;
unsigned char keys[16];
union _toid_art_node_u_toid children[16];
};
struct _art_node48 {
art_node n;
unsigned char keys[256];
union _toid_art_node_u_toid children[48];
};
struct _art_node256 {
art_node n;
union _toid_art_node_u_toid children[256];
};
struct _var_string {
size_t len;
unsigned char s[];
};
struct _art_leaf {
union _toid_var_string_toid value;
union _toid_var_string_toid key;
};
struct _art_node_u {
uint8_t art_node_type;
uint8_t art_node_tag;
union {
union _toid_art_node4_toid an4;
union _toid_art_node16_toid an16;
union _toid_art_node48_toid an48;
union _toid_art_node256_toid an256;
union _toid_art_leaf_toid al;
} u;
};
typedef enum {
ART_NODE4 = 0,
ART_NODE16 = 1,
ART_NODE48 = 2,
ART_NODE256 = 3,
ART_LEAF = 4,
ART_NODE_U = 5,
ART_NODE = 6,
ART_TREE_ROOT = 7,
VAR_STRING = 8,
art_node_types = 9 /* number of different art_nodes */
} art_node_type;
#define VALID_NODE_TYPE(n) (((n) >= 0) && ((n) < art_node_types))
extern size_t art_node_sizes[];
extern char *art_node_names[];
#endif /* _ARTTREE_STRUCTURES_H */
| 5,879 | 25.849315 | 78 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/libart/arttree.c
|
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: arttree.c
*
* Description: implement ART tree using libpmemobj based on libart
*
* Author: Andreas Bluemle, Dieter Kasper
* Andreas.Bluemle.external@ts.fujitsu.com
* dieter.kasper@ts.fujitsu.com
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* ===========================================================================
*/
#include <assert.h>
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include <strings.h>
#ifdef __FreeBSD__
#define _WITH_GETLINE
#endif
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <stdbool.h>
#include <inttypes.h>
#include <fcntl.h>
#include <emmintrin.h>
#include <sys/types.h>
#include <sys/mman.h>
#include "libpmemobj.h"
#include "arttree.h"
/*
* dummy structure so far; this should correspond to the datastore
* structure as defined in examples/libpmemobj/tree_map/datastore
*/
struct datastore
{
void *priv;
};
/*
* context - main context of datastore
*/
struct ds_context
{
char *filename; /* name of pool file */
int mode; /* operation mode */
int insertions; /* number of insert operations to perform */
int newpool; /* complete new memory pool */
size_t psize; /* size of pool */
PMEMobjpool *pop; /* pmemobj handle */
bool fileio;
unsigned fmode;
int fd; /* file descriptor for file io mode */
char *addr; /* base mapping address for file io mode */
unsigned char *key; /* for SEARCH, INSERT and REMOVE */
uint32_t key_len;
unsigned char *value; /* for INSERT */
uint32_t val_len;
};
#define FILL (1 << 1)
#define DUMP (1 << 2)
#define GRAPH (1 << 3)
#define INSERT (1 << 4)
#define SEARCH (1 << 5)
#define REMOVE (1 << 6)
struct ds_context my_context;
extern TOID(var_string) null_var_string;
extern TOID(art_leaf) null_art_leaf;
extern TOID(art_node_u) null_art_node_u;
#define read_key(p) read_line(p)
#define read_value(p) read_line(p)
int initialize_context(struct ds_context *ctx, int ac, char *av[]);
int initialize_pool(struct ds_context *ctx);
int add_elements(struct ds_context *ctx);
int insert_element(struct ds_context *ctx);
int search_element(struct ds_context *ctx);
int delete_element(struct ds_context *ctx);
ssize_t read_line(unsigned char **line);
void exit_handler(struct ds_context *ctx);
int art_tree_map_init(struct datastore *ds, struct ds_context *ctx);
void pmemobj_ds_set_priv(struct datastore *ds, void *priv);
static int dump_art_leaf_callback(void *data,
const unsigned char *key, uint32_t key_len,
const unsigned char *val, uint32_t val_len);
static int dump_art_node_callback(void *data,
const unsigned char *key, uint32_t key_len,
const unsigned char *val, uint32_t val_len);
static void print_node_info(char *nodetype, uint64_t off, const art_node *an);
static int parse_keyval(struct ds_context *ctx, char *arg, int mode);
int
initialize_context(struct ds_context *ctx, int ac, char *av[])
{
int errors = 0;
int opt;
char mode;
if ((ctx == NULL) || (ac < 2)) {
errors++;
}
if (!errors) {
ctx->filename = NULL;
ctx->psize = PMEMOBJ_MIN_POOL;
ctx->newpool = 0;
ctx->pop = NULL;
ctx->fileio = false;
ctx->fmode = 0666;
ctx->mode = 0;
ctx->fd = -1;
}
if (!errors) {
while ((opt = getopt(ac, av, "s:m:n:")) != -1) {
switch (opt) {
case 'm':
mode = optarg[0];
if (mode == 'f') {
ctx->mode |= FILL;
} else if (mode == 'd') {
ctx->mode |= DUMP;
} else if (mode == 'g') {
ctx->mode |= GRAPH;
} else if (mode == 'i') {
ctx->mode |= INSERT;
parse_keyval(ctx, av[optind], INSERT);
optind++;
} else if (mode == 's') {
ctx->mode |= SEARCH;
parse_keyval(ctx, av[optind], SEARCH);
optind++;
} else if (mode == 'r') {
ctx->mode |= REMOVE;
parse_keyval(ctx, av[optind], REMOVE);
optind++;
} else {
errors++;
}
break;
case 'n': {
long insertions;
insertions = strtol(optarg, NULL, 0);
if (insertions > 0 && insertions < LONG_MAX) {
ctx->insertions = insertions;
}
break;
}
case 's': {
long poolsize;
poolsize = strtol(optarg, NULL, 0);
if (poolsize >= PMEMOBJ_MIN_POOL) {
ctx->psize = poolsize;
}
break;
}
default:
errors++;
break;
}
}
}
if (!errors) {
ctx->filename = strdup(av[optind]);
}
return errors;
}
static int parse_keyval(struct ds_context *ctx, char *arg, int mode)
{
int errors = 0;
char *p;
p = strtok(arg, ":");
if (p == NULL) {
errors++;
}
if (!errors) {
if (ctx->mode & (SEARCH|REMOVE|INSERT)) {
ctx->key = (unsigned char *)strdup(p);
assert(ctx->key != NULL);
ctx->key_len = strlen(p) + 1;
}
if (ctx->mode & INSERT) {
p = strtok(NULL, ":");
assert(p != NULL);
ctx->value = (unsigned char *)strdup(p);
assert(ctx->value != NULL);
ctx->val_len = strlen(p) + 1;
}
}
return errors;
}
void
exit_handler(struct ds_context *ctx)
{
if (!ctx->fileio) {
if (ctx->pop) {
pmemobj_close(ctx->pop);
}
} else {
if (ctx->fd > (-1)) {
close(ctx->fd);
}
}
}
int
art_tree_map_init(struct datastore *ds, struct ds_context *ctx)
{
int errors = 0;
char *error_string;
/* calculate a required pool size */
if (ctx->psize < PMEMOBJ_MIN_POOL)
ctx->psize = PMEMOBJ_MIN_POOL;
if (!ctx->fileio) {
if (access(ctx->filename, F_OK) != 0) {
error_string = "pmemobj_create";
ctx->pop = pmemobj_create(ctx->filename,
POBJ_LAYOUT_NAME(arttree_tx),
ctx->psize, ctx->fmode);
ctx->newpool = 1;
} else {
error_string = "pmemobj_open";
ctx->pop = pmemobj_open(ctx->filename,
POBJ_LAYOUT_NAME(arttree_tx));
}
if (ctx->pop == NULL) {
perror(error_string);
errors++;
}
} else {
int flags = O_CREAT | O_RDWR | O_SYNC;
/* Create a file if it does not exist. */
if ((ctx->fd = open(ctx->filename, flags, ctx->fmode)) < 0) {
perror(ctx->filename);
errors++;
}
/* allocate the pmem */
if ((errno = posix_fallocate(ctx->fd, 0, ctx->psize)) != 0) {
perror("posix_fallocate");
errors++;
}
/* map file to memory */
if ((ctx->addr = mmap(NULL, ctx->psize, PROT_READ, MAP_SHARED,
ctx->fd, 0)) == MAP_FAILED) {
perror("mmap");
errors++;
}
}
if (!errors) {
pmemobj_ds_set_priv(ds, ctx);
} else {
if (ctx->fileio) {
if (ctx->addr != NULL) {
munmap(ctx->addr, ctx->psize);
}
if (ctx->fd >= 0) {
close(ctx->fd);
}
} else {
if (ctx->pop) {
pmemobj_close(ctx->pop);
}
}
}
return errors;
}
/*
* pmemobj_ds_set_priv -- set private structure of datastore
*/
void
pmemobj_ds_set_priv(struct datastore *ds, void *priv)
{
ds->priv = priv;
}
struct datastore myds;
static void
usage(char *progname)
{
printf("usage: %s -m [f|d|g] file\n", progname);
printf(" -m mode known modes are\n");
printf(" f fill create and fill art tree\n");
printf(" i insert insert an element into the art tree\n");
printf(" s search search for a key in the art tree\n");
printf(" r remove remove an element from the art tree\n");
printf(" d dump dump art tree\n");
printf(" g graph dump art tree as a graphviz dot graph\n");
printf(" -n <number> number of key-value pairs to insert"
" into the art tree\n");
printf(" -s <size> size in bytes of the memory pool"
" (minimum and default: 8 MB)");
printf("\nfilling an art tree is done by reading key-value pairs\n"
"from standard input.\n"
"Both keys and values are single line only.\n");
}
int
main(int argc, char *argv[])
{
if (initialize_context(&my_context, argc, argv) != 0) {
usage(argv[0]);
return 1;
}
if (art_tree_map_init(&myds, &my_context) != 0) {
fprintf(stderr, "failed to initialize memory pool file\n");
return 1;
}
if (my_context.pop == NULL) {
perror("pool initialization");
return 1;
}
if (art_tree_init(my_context.pop, &my_context.newpool)) {
perror("pool setup");
return 1;
}
if ((my_context.mode & FILL)) {
if (add_elements(&my_context)) {
perror("add elements");
return 1;
}
}
if ((my_context.mode & INSERT)) {
if (insert_element(&my_context)) {
perror("insert elements");
return 1;
}
}
if ((my_context.mode & SEARCH)) {
if (search_element(&my_context)) {
perror("search elements");
return 1;
}
}
if ((my_context.mode & REMOVE)) {
if (delete_element(&my_context)) {
perror("delete elements");
return 1;
}
}
if (my_context.mode & DUMP) {
art_iter(my_context.pop, dump_art_leaf_callback, NULL);
}
if (my_context.mode & GRAPH) {
printf("digraph g {\nrankdir=LR;\n");
art_iter(my_context.pop, dump_art_node_callback, NULL);
printf("}");
}
exit_handler(&my_context);
return 0;
}
int
add_elements(struct ds_context *ctx)
{
PMEMobjpool *pop;
int errors = 0;
int i;
int key_len;
int val_len;
unsigned char *key;
unsigned char *value;
if (ctx == NULL) {
errors++;
} else if (ctx->pop == NULL) {
errors++;
}
if (!errors) {
pop = ctx->pop;
for (i = 0; i < ctx->insertions; i++) {
key = NULL;
value = NULL;
key_len = read_key(&key);
val_len = read_value(&value);
art_insert(pop, key, key_len, value, val_len);
if (key != NULL)
free(key);
if (value != NULL)
free(value);
}
}
return errors;
}
int
insert_element(struct ds_context *ctx)
{
PMEMobjpool *pop;
int errors = 0;
if (ctx == NULL) {
errors++;
} else if (ctx->pop == NULL) {
errors++;
}
if (!errors) {
pop = ctx->pop;
art_insert(pop, ctx->key, ctx->key_len,
ctx->value, ctx->val_len);
}
return errors;
}
int
search_element(struct ds_context *ctx)
{
PMEMobjpool *pop;
TOID(var_string) value;
int errors = 0;
if (ctx == NULL) {
errors++;
} else if (ctx->pop == NULL) {
errors++;
}
if (!errors) {
pop = ctx->pop;
printf("search key [%s]: ", (char *)ctx->key);
value = art_search(pop, ctx->key, ctx->key_len);
if (TOID_IS_NULL(value)) {
printf("not found\n");
} else {
printf("value [%s]\n", D_RO(value)->s);
}
}
return errors;
}
int
delete_element(struct ds_context *ctx)
{
PMEMobjpool *pop;
int errors = 0;
if (ctx == NULL) {
errors++;
} else if (ctx->pop == NULL) {
errors++;
}
if (!errors) {
pop = ctx->pop;
art_delete(pop, ctx->key, ctx->key_len);
}
return errors;
}
ssize_t
read_line(unsigned char **line)
{
size_t len = -1;
ssize_t read = -1;
*line = NULL;
if ((read = getline((char **)line, &len, stdin)) > 0) {
(*line)[read - 1] = '\0';
}
return read;
}
static int
dump_art_leaf_callback(void *data,
const unsigned char *key, uint32_t key_len,
const unsigned char *val, uint32_t val_len)
{
cb_data *cbd;
if (data != NULL) {
cbd = (cb_data *)data;
printf("node type %d ", D_RO(cbd->node)->art_node_type);
if (D_RO(cbd->node)->art_node_type == art_leaf_t) {
printf("key len %" PRIu32 " = [%s], value len %" PRIu32
" = [%s]",
key_len,
key != NULL ? (char *)key : (char *)"NULL",
val_len,
val != NULL ? (char *)val : (char *)"NULL");
}
printf("\n");
} else {
printf("key len %" PRIu32 " = [%s], value len %" PRIu32
" = [%s]\n",
key_len,
key != NULL ? (char *)key : (char *)"NULL",
val_len,
val != NULL ? (char *)val : (char *)"NULL");
}
return 0;
}
static void
print_node_info(char *nodetype, uint64_t off, const art_node *an)
{
int p_len, i;
p_len = an->partial_len;
printf("N%" PRIx64 " [label=\"%s at\\n0x%" PRIx64 "\\n%d children",
off, nodetype, off, an->num_children);
if (p_len != 0) {
printf("\\nlen %d", p_len);
printf(": ");
for (i = 0; i < p_len; i++) {
printf("%c", an->partial[i]);
}
}
printf("\"];\n");
}
static int
dump_art_node_callback(void *data,
const unsigned char *key, uint32_t key_len,
const unsigned char *val, uint32_t val_len)
{
cb_data *cbd;
const art_node *an;
TOID(art_node4) an4;
TOID(art_node16) an16;
TOID(art_node48) an48;
TOID(art_node256) an256;
TOID(art_leaf) al;
TOID(art_node_u) child;
TOID(var_string) oid_key;
TOID(var_string) oid_value;
if (data != NULL) {
cbd = (cb_data *)data;
switch (D_RO(cbd->node)->art_node_type) {
case NODE4:
an4 = D_RO(cbd->node)->u.an4;
an = &(D_RO(an4)->n);
child = D_RO(an4)->children[cbd->child_idx];
if (!TOID_IS_NULL(child)) {
print_node_info("node4",
cbd->node.oid.off, an);
printf("N%" PRIx64 " -> N%" PRIx64
" [label=\"%c\"];\n",
cbd->node.oid.off,
child.oid.off,
D_RO(an4)->keys[cbd->child_idx]);
}
break;
case NODE16:
an16 = D_RO(cbd->node)->u.an16;
an = &(D_RO(an16)->n);
child = D_RO(an16)->children[cbd->child_idx];
if (!TOID_IS_NULL(child)) {
print_node_info("node16",
cbd->node.oid.off, an);
printf("N%" PRIx64 " -> N%" PRIx64
" [label=\"%c\"];\n",
cbd->node.oid.off,
child.oid.off,
D_RO(an16)->keys[cbd->child_idx]);
}
break;
case NODE48:
an48 = D_RO(cbd->node)->u.an48;
an = &(D_RO(an48)->n);
child = D_RO(an48)->children[cbd->child_idx];
if (!TOID_IS_NULL(child)) {
print_node_info("node48",
cbd->node.oid.off, an);
printf("N%" PRIx64 " -> N%" PRIx64
" [label=\"%c\"];\n",
cbd->node.oid.off,
child.oid.off,
D_RO(an48)->keys[cbd->child_idx]);
}
break;
case NODE256:
an256 = D_RO(cbd->node)->u.an256;
an = &(D_RO(an256)->n);
child = D_RO(an256)->children[cbd->child_idx];
if (!TOID_IS_NULL(child)) {
print_node_info("node256",
cbd->node.oid.off, an);
printf("N%" PRIx64 " -> N%" PRIx64
" [label=\"0x%x\"];\n",
cbd->node.oid.off,
child.oid.off,
(char)((cbd->child_idx) & 0xff));
}
break;
case art_leaf_t:
al = D_RO(cbd->node)->u.al;
oid_key = D_RO(al)->key;
oid_value = D_RO(al)->value;
printf("N%" PRIx64 " [shape=box,"
"label=\"leaf at\\n0x%" PRIx64 "\"];\n",
cbd->node.oid.off, cbd->node.oid.off);
printf("N%" PRIx64 " [shape=box,"
"label=\"key at 0x%" PRIx64 ": %s\"];\n",
oid_key.oid.off, oid_key.oid.off,
D_RO(oid_key)->s);
printf("N%" PRIx64 " [shape=box,"
"label=\"value at 0x%" PRIx64 ": %s\"];\n",
oid_value.oid.off, oid_value.oid.off,
D_RO(oid_value)->s);
printf("N%" PRIx64 " -> N%" PRIx64 ";\n",
cbd->node.oid.off, oid_key.oid.off);
printf("N%" PRIx64 " -> N%" PRIx64 ";\n",
cbd->node.oid.off, oid_value.oid.off);
break;
default:
break;
}
} else {
printf("leaf: key len %" PRIu32
" = [%s], value len %" PRIu32 " = [%s]\n",
key_len, key, val_len, val);
}
return 0;
}
| 16,385 | 22.645022 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/libart/art.h
|
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
* Copyright 2012, Armon Dadgar. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: art.h
*
* Description: header file for art tree on pmem implementation
*
* Author: Andreas Bluemle, Dieter Kasper
* Andreas.Bluemle.external@ts.fujitsu.com
* dieter.kasper@ts.fujitsu.com
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* ===========================================================================
*/
/*
* based on https://github.com/armon/libart/src/art.h
*/
#ifndef _ART_H
#define _ART_H
#ifdef __cplusplus
extern "C" {
#endif
#define MAX_PREFIX_LEN 10
typedef enum {
NODE4 = 0,
NODE16 = 1,
NODE48 = 2,
NODE256 = 3,
art_leaf_t = 4,
art_node_types = 5 /* number of different art_nodes */
} art_node_type;
char *art_node_names[] = {
"art_node4",
"art_node16",
"art_node48",
"art_node256",
"art_leaf"
};
/*
* forward declarations; these are required when typedef shall be
* used instead of struct
*/
struct _art_node_u; typedef struct _art_node_u art_node_u;
struct _art_node; typedef struct _art_node art_node;
struct _art_node4; typedef struct _art_node4 art_node4;
struct _art_node16; typedef struct _art_node16 art_node16;
struct _art_node48; typedef struct _art_node48 art_node48;
struct _art_node256; typedef struct _art_node256 art_node256;
struct _art_leaf; typedef struct _art_leaf art_leaf;
struct _var_string; typedef struct _var_string var_string;
POBJ_LAYOUT_BEGIN(arttree_tx);
POBJ_LAYOUT_ROOT(arttree_tx, struct art_tree_root);
POBJ_LAYOUT_TOID(arttree_tx, art_node_u);
POBJ_LAYOUT_TOID(arttree_tx, art_node4);
POBJ_LAYOUT_TOID(arttree_tx, art_node16);
POBJ_LAYOUT_TOID(arttree_tx, art_node48);
POBJ_LAYOUT_TOID(arttree_tx, art_node256);
POBJ_LAYOUT_TOID(arttree_tx, art_leaf);
POBJ_LAYOUT_TOID(arttree_tx, var_string);
POBJ_LAYOUT_END(arttree_tx);
struct _var_string {
size_t len;
unsigned char s[];
};
/*
* This struct is included as part of all the various node sizes
*/
struct _art_node {
uint8_t num_children;
uint32_t partial_len;
unsigned char partial[MAX_PREFIX_LEN];
};
/*
* Small node with only 4 children
*/
struct _art_node4 {
art_node n;
unsigned char keys[4];
TOID(art_node_u) children[4];
};
/*
* Node with 16 children
*/
struct _art_node16 {
art_node n;
unsigned char keys[16];
TOID(art_node_u) children[16];
};
/*
* Node with 48 children, but a full 256 byte field.
*/
struct _art_node48 {
art_node n;
unsigned char keys[256];
TOID(art_node_u) children[48];
};
/*
* Full node with 256 children
*/
struct _art_node256 {
art_node n;
TOID(art_node_u) children[256];
};
/*
* Represents a leaf. These are of arbitrary size, as they include the key.
*/
struct _art_leaf {
TOID(var_string) value;
TOID(var_string) key;
};
struct _art_node_u {
uint8_t art_node_type;
uint8_t art_node_tag;
union {
TOID(art_node4) an4; /* starts with art_node */
TOID(art_node16) an16; /* starts with art_node */
TOID(art_node48) an48; /* starts with art_node */
TOID(art_node256) an256; /* starts with art_node */
TOID(art_leaf) al;
} u;
};
struct art_tree_root {
int size;
TOID(art_node_u) root;
};
typedef struct _cb_data {
TOID(art_node_u) node;
int child_idx;
} cb_data;
/*
* Macros to manipulate art_node tags
*/
#define IS_LEAF(x) (((x)->art_node_type == art_leaf_t))
#define SET_LEAF(x) (((x)->art_node_tag = art_leaf_t))
#define COPY_BLOB(_obj, _blob, _len) \
D_RW(_obj)->len = _len; \
TX_MEMCPY(D_RW(_obj)->s, _blob, _len); \
D_RW(_obj)->s[(_len) - 1] = '\0';
typedef int(*art_callback)(void *data,
const unsigned char *key, uint32_t key_len,
const unsigned char *value, uint32_t val_len);
extern int art_tree_init(PMEMobjpool *pop, int *newpool);
extern uint64_t art_size(PMEMobjpool *pop);
extern int art_iter(PMEMobjpool *pop, art_callback cb, void *data);
extern TOID(var_string) art_insert(PMEMobjpool *pop,
const unsigned char *key, int key_len,
void *value, int val_len);
extern TOID(var_string) art_search(PMEMobjpool *pop,
const unsigned char *key, int key_len);
extern TOID(var_string) art_delete(PMEMobjpool *pop,
const unsigned char *key, int key_len);
#ifdef __cplusplus
}
#endif
#endif /* _ART_H */
| 5,918 | 26.530233 | 78 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/libart/arttree_search.c
|
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: arttree_search.c
*
* Description: implementation of search function for ART tree
*
* Author: Andreas Bluemle, Dieter Kasper
* Andreas.Bluemle.external@ts.fujitsu.com
* dieter.kasper@ts.fujitsu.com
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* ===========================================================================
*/
#include <stdio.h>
#include <inttypes.h>
#include <libgen.h>
#include <string.h>
#include <unistd.h>
#include <stdlib.h>
#include <getopt.h>
#include <stdint.h>
#include <stdbool.h>
#include <assert.h>
#include <sys/mman.h>
#include "arttree_structures.h"
/*
* search context
*/
struct search_ctx {
struct pmem_context *pmem_ctx;
unsigned char *search_key;
int32_t hexdump;
};
static struct search_ctx *s_ctx = NULL;
struct search {
const char *name;
const char *brief;
char *(*func)(char *, struct search_ctx *);
void (*help)(char *);
};
/* local functions */
static int search_parse_args(char *appname, int ac, char *av[],
struct search_ctx *s_ctx);
static struct search *get_search(char *type_name);
static void print_usage(char *appname);
static void dump_PMEMoid(char *prefix, PMEMoid *oid);
static char *search_key(char *appname, struct search_ctx *ctx);
static int leaf_matches(struct search_ctx *ctx, art_leaf *n,
unsigned char *key, int key_len, int depth);
static int check_prefix(art_node *an,
unsigned char *key, int key_len, int depth);
static uint64_t find_child(art_node *n, int node_type, unsigned char key);
static void *get_node(struct search_ctx *ctx, int node_type, uint64_t off);
static uint64_t get_offset_an(art_node_u *au);
static void dump_PMEMoid(char *prefix, PMEMoid *oid);
static void dump_art_tree_root(char *prefix, uint64_t off, void *p);
/* global visible interface */
void arttree_search_help(char *appname);
int arttree_search_func(char *appname, struct pmem_context *ctx,
int ac, char *av[]);
static const char *arttree_search_help_str =
"Search for key in ART tree\n"
"Arguments: <key>\n"
" <key> key\n"
;
static const struct option long_options[] = {
{"hexdump", no_argument, NULL, 'x'},
{NULL, 0, NULL, 0 },
};
static struct search s_funcs[] = {
{
.name = "key",
.brief = "search for key",
.func = search_key,
.help = NULL,
}
};
/* Simple inlined function */
static inline int
min(int a, int b)
{
return (a < b) ? b : a;
}
/*
* number of arttree examine commands
*/
#define COMMANDS_NUMBER (sizeof(s_funcs) / sizeof(s_funcs[0]))
void
arttree_search_help(char *appname)
{
printf("%s %s\n", appname, arttree_search_help_str);
}
int
arttree_search_func(char *appname, struct pmem_context *ctx, int ac, char *av[])
{
int errors = 0;
struct search *s;
char *value;
value = NULL;
if (ctx == NULL) {
return -1;
}
if (s_ctx == NULL) {
s_ctx = (struct search_ctx *)malloc(sizeof(struct search_ctx));
if (s_ctx == NULL) {
return -1;
}
memset(s_ctx, 0, sizeof(struct search_ctx));
}
if (ctx->art_tree_root_offset == 0) {
fprintf(stderr, "search functions require knowledge"
" about the art_tree_root.\n");
fprintf(stderr, "Use \"set_root <offset>\""
" to define where the \nart_tree_root object"
" resides in the pmem file.\n");
errors++;
}
s_ctx->pmem_ctx = ctx;
if (search_parse_args(appname, ac, av, s_ctx) != 0) {
fprintf(stderr, "%s::%s: error parsing arguments\n",
appname, __FUNCTION__);
errors++;
}
if (!errors) {
s = get_search("key");
if (s != NULL) {
value = s->func(appname, s_ctx);
}
if (value != NULL) {
printf("key [%s] found, value [%s]\n",
s_ctx->search_key, value);
} else {
printf("key [%s] not found\n", s_ctx->search_key);
}
}
if (s_ctx->search_key != NULL) {
free(s_ctx->search_key);
}
free(s_ctx);
return errors;
}
static int
search_parse_args(char *appname, int ac, char *av[], struct search_ctx *s_ctx)
{
int ret = 0;
int opt;
optind = 0;
while ((opt = getopt_long(ac, av, "x", long_options, NULL)) != -1) {
switch (opt) {
case 'x':
s_ctx->hexdump = 1;
break;
default:
print_usage(appname);
ret = 1;
}
}
if (ret == 0) {
s_ctx->search_key = (unsigned char *)strdup(av[optind + 0]);
}
return ret;
}
static void
print_usage(char *appname)
{
printf("%s: search <key>\n", appname);
}
/*
* get_search -- returns command for specified command name
*/
static struct search *
get_search(char *type_name)
{
int i;
if (type_name == NULL) {
return NULL;
}
for (i = 0; i < COMMANDS_NUMBER; i++) {
if (strcmp(type_name, s_funcs[i].name) == 0)
return &s_funcs[i];
}
return NULL;
}
static void *
get_node(struct search_ctx *ctx, int node_type, uint64_t off)
{
if (!VALID_NODE_TYPE(node_type))
return NULL;
printf("%s at off 0x%" PRIx64 "\n", art_node_names[node_type], off);
return ctx->pmem_ctx->addr + off;
}
static int
leaf_matches(struct search_ctx *ctx, art_leaf *n,
unsigned char *key, int key_len, int depth)
{
var_string *n_key;
(void) depth;
n_key = (var_string *)get_node(ctx, VAR_STRING, n->key.oid.off);
if (n_key == NULL)
return 1;
// HACK for stupid null-terminated strings....
// else if (n_key->len != key_len)
// ret = 1;
if (n_key->len != key_len + 1)
return 1;
return memcmp(n_key->s, key, key_len);
}
static int
check_prefix(art_node *n, unsigned char *key, int key_len, int depth)
{
int max_cmp = min(min(n->partial_len, MAX_PREFIX_LEN), key_len - depth);
int idx;
for (idx = 0; idx < max_cmp; idx++) {
if (n->partial[idx] != key[depth + idx])
return idx;
}
return idx;
}
static uint64_t
find_child(art_node *n, int node_type, unsigned char c)
{
int i;
union {
art_node4 *p1;
art_node16 *p2;
art_node48 *p3;
art_node256 *p4;
} p;
printf("[%s] children %d search key %c [",
art_node_names[node_type], n->num_children, c);
switch (node_type) {
case ART_NODE4:
p.p1 = (art_node4 *)n;
for (i = 0; i < n->num_children; i++) {
printf("%c ", p.p1->keys[i]);
if (p.p1->keys[i] == c) {
printf("]\n");
return p.p1->children[i].oid.off;
}
}
break;
case ART_NODE16:
p.p2 = (art_node16 *)n;
for (i = 0; i < n->num_children; i++) {
printf("%c ", p.p2->keys[i]);
if (p.p2->keys[i] == c) {
printf("]\n");
return p.p2->children[i].oid.off;
}
}
break;
case ART_NODE48:
p.p3 = (art_node48 *)n;
i = p.p3->keys[c];
printf("%d ", p.p3->keys[c]);
if (i) {
printf("]\n");
return p.p3->children[i - 1].oid.off;
}
break;
case ART_NODE256:
p.p4 = (art_node256 *)n;
printf("0x%" PRIx64, p.p4->children[c].oid.off);
if (p.p4->children[c].oid.off != 0) {
printf("]\n");
return p.p4->children[c].oid.off;
}
break;
default:
abort();
}
printf("]\n");
return 0;
}
static uint64_t
get_offset_an(art_node_u *au)
{
uint64_t offset = 0;
switch (au->art_node_type) {
case ART_NODE4:
offset = au->u.an4.oid.off;
break;
case ART_NODE16:
offset = au->u.an16.oid.off;
break;
case ART_NODE48:
offset = au->u.an48.oid.off;
break;
case ART_NODE256:
offset = au->u.an256.oid.off;
break;
case ART_LEAF:
offset = au->u.al.oid.off;
break;
default:
break;
}
return offset;
}
static char *
search_key(char *appname, struct search_ctx *ctx)
{
int errors = 0;
void *p; /* something */
off_t p_off;
art_node_u *p_au; /* art_node_u */
off_t p_au_off;
void *p_an; /* specific art node from art_node_u */
off_t p_an_off;
art_node *an; /* art node */
var_string *n_value;
char *value;
int prefix_len;
int depth = 0;
int key_len;
uint64_t child_off;
key_len = strlen((char *)(ctx->search_key));
value = NULL;
p_off = ctx->pmem_ctx->art_tree_root_offset;
p = get_node(ctx, ART_TREE_ROOT, p_off);
assert(p != NULL);
dump_art_tree_root("art_tree_root", p_off, p);
p_au_off = ((art_tree_root *)p)->root.oid.off;
p_au = (art_node_u *)get_node(ctx, ART_NODE_U, p_au_off);
if (p_au == NULL)
errors++;
if (!errors) {
while (p_au) {
p_an_off = get_offset_an(p_au);
p_an = get_node(ctx, p_au->art_node_type, p_an_off);
assert(p_an != NULL);
if (p_au->art_node_type == ART_LEAF) {
if (!leaf_matches(ctx, (art_leaf *)p_an,
ctx->search_key, key_len, depth)) {
n_value = (var_string *)
get_node(ctx, VAR_STRING,
((art_leaf *)p_an)->value.oid.off);
return (char *)(n_value->s);
}
}
an = (art_node *)p_an;
if (an->partial_len) {
prefix_len = check_prefix(an, ctx->search_key,
key_len, depth);
if (prefix_len !=
min(MAX_PREFIX_LEN, an->partial_len)) {
return NULL;
}
depth = depth + an->partial_len;
}
child_off = find_child(an, p_au->art_node_type,
ctx->search_key[depth]);
if (child_off != 0) {
p_au_off = child_off;
p_au = get_node(ctx, ART_NODE_U, p_au_off);
} else {
p_au = NULL;
}
depth++;
}
}
if (errors) {
return NULL;
} else {
return value;
}
}
static void
dump_art_tree_root(char *prefix, uint64_t off, void *p)
{
art_tree_root *tree_root;
tree_root = (art_tree_root *)p;
printf("at offset 0x%" PRIx64 ", art_tree_root {\n", off);
printf(" size %d\n", tree_root->size);
dump_PMEMoid(" art_node_u", (PMEMoid *)&(tree_root->root));
printf("\n};\n");
}
static void
dump_PMEMoid(char *prefix, PMEMoid *oid)
{
printf("%s { PMEMoid pool_uuid_lo %" PRIx64
" off 0x%" PRIx64 " = %" PRId64 " }\n",
prefix, oid->pool_uuid_lo, oid->off, oid->off);
}
| 11,217 | 22.567227 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/libart/arttree.h
|
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: arttree.h
*
* Description: header file for art tree on pmem implementation
*
* Author: Andreas Bluemle, Dieter Kasper
* Andreas.Bluemle.external@ts.fujitsu.com
* dieter.kasper@ts.fujitsu.com
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* ===========================================================================
*/
#ifndef _ARTTREE_H
#define _ARTTREE_H
#ifdef __cplusplus
extern "C" {
#endif
#include "art.h"
#ifdef __cplusplus
}
#endif
#endif /* _ARTTREE_H */
| 2,256 | 34.825397 | 78 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/queue/queue.c
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* queue.c -- array based queue example
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <libpmemobj.h>
POBJ_LAYOUT_BEGIN(queue);
POBJ_LAYOUT_ROOT(queue, struct root);
POBJ_LAYOUT_TOID(queue, struct entry);
POBJ_LAYOUT_TOID(queue, struct queue);
POBJ_LAYOUT_END(queue);
struct entry { /* queue entry that contains arbitrary data */
size_t len; /* length of the data buffer */
char data[];
};
struct queue { /* array-based queue container */
size_t front; /* position of the first entry */
size_t back; /* position of the last entry */
size_t capacity; /* size of the entries array */
TOID(struct entry) entries[];
};
struct root {
TOID(struct queue) queue;
};
/*
* queue_constructor -- constructor of the queue container
*/
static int
queue_constructor(PMEMobjpool *pop, void *ptr, void *arg)
{
struct queue *q = ptr;
size_t *capacity = arg;
q->front = 0;
q->back = 0;
q->capacity = *capacity;
/* atomic API requires that objects are persisted in the constructor */
pmemobj_persist(pop, q, sizeof(*q));
return 0;
}
/*
* queue_new -- allocates a new queue container using the atomic API
*/
static int
queue_new(PMEMobjpool *pop, TOID(struct queue) *q, size_t nentries)
{
return POBJ_ALLOC(pop,
q,
struct queue,
sizeof(struct queue) + sizeof(TOID(struct entry)) * nentries,
queue_constructor,
&nentries);
}
/*
* queue_nentries -- returns the number of entries
*/
static size_t
queue_nentries(struct queue *queue)
{
return queue->back - queue->front;
}
/*
* queue_enqueue -- allocates and inserts a new entry into the queue
*/
static int
queue_enqueue(PMEMobjpool *pop, struct queue *queue,
const char *data, size_t len)
{
if (queue->capacity - queue_nentries(queue) == 0)
return -1; /* at capacity */
/* back is never decreased, need to calculate the real position */
size_t pos = queue->back % queue->capacity;
int ret = 0;
printf("inserting %zu: %s\n", pos, data);
TX_BEGIN(pop) {
/* let's first reserve the space at the end of the queue */
TX_ADD_DIRECT(&queue->back);
queue->back += 1;
/* now we can safely allocate and initialize the new entry */
TOID(struct entry) entry = TX_ALLOC(struct entry,
sizeof(struct entry) + len);
D_RW(entry)->len = len;
memcpy(D_RW(entry)->data, data, len);
/* and then snapshot the queue entry that we will modify */
TX_ADD_DIRECT(&queue->entries[pos]);
queue->entries[pos] = entry;
} TX_ONABORT { /* don't forget about error handling! ;) */
ret = -1;
} TX_END
return ret;
}
/*
* queue_dequeue - removes and frees the first element from the queue
*/
static int
queue_dequeue(PMEMobjpool *pop, struct queue *queue)
{
if (queue_nentries(queue) == 0)
return -1; /* no entries to remove */
/* front is also never decreased */
size_t pos = queue->front % queue->capacity;
int ret = 0;
printf("removing %zu: %s\n", pos, D_RO(queue->entries[pos])->data);
TX_BEGIN(pop) {
/* move the queue forward */
TX_ADD_DIRECT(&queue->front);
queue->front += 1;
/* and since this entry is now unreachable, free it */
TX_FREE(queue->entries[pos]);
/* notice that we do not change the PMEMoid itself */
} TX_ONABORT {
ret = -1;
} TX_END
return ret;
}
/*
* queue_show -- prints all queue entries
*/
static void
queue_show(PMEMobjpool *pop, struct queue *queue)
{
size_t nentries = queue_nentries(queue);
printf("Entries %zu/%zu\n", nentries, queue->capacity);
for (size_t i = queue->front; i < queue->back; ++i) {
size_t pos = i % queue->capacity;
printf("%zu: %s\n", pos, D_RO(queue->entries[pos])->data);
}
}
/* available queue operations */
enum queue_op {
UNKNOWN_QUEUE_OP,
QUEUE_NEW,
QUEUE_ENQUEUE,
QUEUE_DEQUEUE,
QUEUE_SHOW,
MAX_QUEUE_OP,
};
/* queue operations strings */
static const char *ops_str[MAX_QUEUE_OP] =
{"", "new", "enqueue", "dequeue", "show"};
/*
* parse_queue_op -- parses the operation string and returns matching queue_op
*/
static enum queue_op
queue_op_parse(const char *str)
{
for (int i = 0; i < MAX_QUEUE_OP; ++i)
if (strcmp(str, ops_str[i]) == 0)
return (enum queue_op)i;
return UNKNOWN_QUEUE_OP;
}
/*
* fail -- helper function to exit the application in the event of an error
*/
static void __attribute__((noreturn)) /* this function terminates */
fail(const char *msg)
{
fprintf(stderr, "%s\n", msg);
exit(EXIT_FAILURE);
}
int
main(int argc, char *argv[])
{
enum queue_op op;
if (argc < 3 || (op = queue_op_parse(argv[2])) == UNKNOWN_QUEUE_OP)
fail("usage: file-name [new <n>|show|enqueue <data>|dequeue]");
PMEMobjpool *pop = pmemobj_open(argv[1], POBJ_LAYOUT_NAME(queue));
if (pop == NULL)
fail("failed to open the pool");
TOID(struct root) root = POBJ_ROOT(pop, struct root);
struct root *rootp = D_RW(root);
size_t capacity;
switch (op) {
case QUEUE_NEW:
if (argc != 4)
fail("missing size of the queue");
char *end;
errno = 0;
capacity = strtoull(argv[3], &end, 0);
if (errno == ERANGE || *end != '\0')
fail("invalid size of the queue");
if (queue_new(pop, &rootp->queue, capacity) != 0)
fail("failed to create a new queue");
break;
case QUEUE_ENQUEUE:
if (argc != 4)
fail("missing new entry data");
if (D_RW(rootp->queue) == NULL)
fail("queue must exist");
if (queue_enqueue(pop, D_RW(rootp->queue),
argv[3], strlen(argv[3]) + 1) != 0)
fail("failed to insert new entry");
break;
case QUEUE_DEQUEUE:
if (D_RW(rootp->queue) == NULL)
fail("queue must exist");
if (queue_dequeue(pop, D_RW(rootp->queue)) != 0)
fail("failed to remove entry");
break;
case QUEUE_SHOW:
if (D_RW(rootp->queue) == NULL)
fail("queue must exist");
queue_show(pop, D_RW(rootp->queue));
break;
default:
assert(0); /* unreachable */
break;
}
pmemobj_close(pop);
return 0;
}
| 7,424 | 24.692042 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/linkedlist/pmemobj_list.h
|
/*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmemobj_list.h -- macro definitions for persistent
* singly linked list and tail queue
*/
#ifndef PMEMOBJ_LISTS_H
#define PMEMOBJ_LISTS_H
#include <libpmemobj.h>
/*
* This file defines two types of persistent data structures:
* singly-linked lists and tail queue.
*
* All macros defined in this file must be used within libpmemobj
* transactional API. Following snippet presents example of usage:
*
* TX_BEGIN(pop) {
* POBJ_TAILQ_INIT(head);
* } TX_ONABORT {
* abort();
* } TX_END
*
* SLIST TAILQ
* _HEAD + +
* _ENTRY + +
* _INIT + +
* _EMPTY + +
* _FIRST + +
* _NEXT + +
* _PREV - +
* _LAST - +
* _FOREACH + +
* _FOREACH_REVERSE - +
* _INSERT_HEAD + +
* _INSERT_BEFORE - +
* _INSERT_AFTER + +
* _INSERT_TAIL - +
* _MOVE_ELEMENT_HEAD - +
* _MOVE_ELEMENT_TAIL - +
* _REMOVE_HEAD + -
* _REMOVE + +
* _REMOVE_FREE + +
* _SWAP_HEAD_TAIL - +
*/
/*
* Singly-linked List definitions.
*/
#define POBJ_SLIST_HEAD(name, type)\
struct name {\
TOID(type) pe_first;\
}
#define POBJ_SLIST_ENTRY(type)\
struct {\
TOID(type) pe_next;\
}
/*
* Singly-linked List access methods.
*/
#define POBJ_SLIST_EMPTY(head) (TOID_IS_NULL((head)->pe_first))
#define POBJ_SLIST_FIRST(head) ((head)->pe_first)
#define POBJ_SLIST_NEXT(elm, field) (D_RO(elm)->field.pe_next)
/*
* Singly-linked List functions.
*/
#define POBJ_SLIST_INIT(head) do {\
TX_ADD_DIRECT(&(head)->pe_first);\
TOID_ASSIGN((head)->pe_first, OID_NULL);\
} while (0)
#define POBJ_SLIST_INSERT_HEAD(head, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
TX_ADD_DIRECT(&elm_ptr->field.pe_next);\
elm_ptr->field.pe_next = (head)->pe_first;\
TX_SET_DIRECT(head, pe_first, elm);\
} while (0)
#define POBJ_SLIST_INSERT_AFTER(slistelm, elm, field) do {\
TOID_TYPEOF(slistelm) *slistelm_ptr = D_RW(slistelm);\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
TX_ADD_DIRECT(&elm_ptr->field.pe_next);\
elm_ptr->field.pe_next = slistelm_ptr->field.pe_next;\
TX_ADD_DIRECT(&slistelm_ptr->field.pe_next);\
slistelm_ptr->field.pe_next = elm;\
} while (0)
#define POBJ_SLIST_REMOVE_HEAD(head, field) do {\
TX_ADD_DIRECT(&(head)->pe_first);\
(head)->pe_first = D_RO((head)->pe_first)->field.pe_next;\
} while (0)
#define POBJ_SLIST_REMOVE(head, elm, field) do {\
if (TOID_EQUALS((head)->pe_first, elm)) {\
POBJ_SLIST_REMOVE_HEAD(head, field);\
} else {\
TOID_TYPEOF(elm) *curelm_ptr = D_RW((head)->pe_first);\
while (!TOID_EQUALS(curelm_ptr->field.pe_next, elm))\
curelm_ptr = D_RW(curelm_ptr->field.pe_next);\
TX_ADD_DIRECT(&curelm_ptr->field.pe_next);\
curelm_ptr->field.pe_next = D_RO(elm)->field.pe_next;\
}\
} while (0)
#define POBJ_SLIST_REMOVE_FREE(head, elm, field) do {\
POBJ_SLIST_REMOVE(head, elm, field);\
TX_FREE(elm);\
} while (0)
#define POBJ_SLIST_FOREACH(var, head, field)\
for ((var) = POBJ_SLIST_FIRST(head);\
!TOID_IS_NULL(var);\
var = POBJ_SLIST_NEXT(var, field))
/*
* Tail-queue definitions.
*/
#define POBJ_TAILQ_ENTRY(type)\
struct {\
TOID(type) pe_next;\
TOID(type) pe_prev;\
}
#define POBJ_TAILQ_HEAD(name, type)\
struct name {\
TOID(type) pe_first;\
TOID(type) pe_last;\
}
/*
* Tail-queue access methods.
*/
#define POBJ_TAILQ_FIRST(head) ((head)->pe_first)
#define POBJ_TAILQ_LAST(head) ((head)->pe_last)
#define POBJ_TAILQ_EMPTY(head) (TOID_IS_NULL((head)->pe_first))
#define POBJ_TAILQ_NEXT(elm, field) (D_RO(elm)->field.pe_next)
#define POBJ_TAILQ_PREV(elm, field) (D_RO(elm)->field.pe_prev)
/*
* Tail-queue List internal methods.
*/
#define _POBJ_SWAP_PTR(elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
TX_ADD_DIRECT(&elm_ptr->field);\
__typeof__(elm) temp = elm_ptr->field.pe_prev;\
elm_ptr->field.pe_prev = elm_ptr->field.pe_next;\
elm_ptr->field.pe_next = temp;\
} while (0)
/*
* Tail-queue functions.
*/
#define POBJ_TAILQ_SWAP_HEAD_TAIL(head, field) do {\
__typeof__((head)->pe_first) temp = (head)->pe_first;\
TX_ADD_DIRECT(head);\
(head)->pe_first = (head)->pe_last;\
(head)->pe_last = temp;\
} while (0)
#define POBJ_TAILQ_FOREACH(var, head, field)\
for ((var) = POBJ_TAILQ_FIRST(head);\
!TOID_IS_NULL(var);\
var = POBJ_TAILQ_NEXT(var, field))
#define POBJ_TAILQ_FOREACH_REVERSE(var, head, field)\
for ((var) = POBJ_TAILQ_LAST(head);\
!TOID_IS_NULL(var);\
var = POBJ_TAILQ_PREV(var, field))
#define POBJ_TAILQ_INIT(head) do {\
TX_ADD_FIELD_DIRECT(head, pe_first);\
TOID_ASSIGN((head)->pe_first, OID_NULL);\
TX_ADD_FIELD_DIRECT(head, pe_last);\
TOID_ASSIGN((head)->pe_last, OID_NULL);\
} while (0)
#define POBJ_TAILQ_INSERT_HEAD(head, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
if (TOID_IS_NULL((head)->pe_first)) {\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_prev = (head)->pe_first;\
elm_ptr->field.pe_next = (head)->pe_first;\
TX_ADD_DIRECT(head);\
(head)->pe_first = elm;\
(head)->pe_last = elm;\
} else {\
TOID_TYPEOF(elm) *first = D_RW((head)->pe_first);\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_next = (head)->pe_first;\
elm_ptr->field.pe_prev = first->field.pe_prev;\
TX_ADD_DIRECT(&first->field.pe_prev);\
first->field.pe_prev = elm;\
TX_SET_DIRECT(head, pe_first, elm);\
}\
} while (0)
#define POBJ_TAILQ_INSERT_TAIL(head, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
if (TOID_IS_NULL((head)->pe_last)) {\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_prev = (head)->pe_last;\
elm_ptr->field.pe_next = (head)->pe_last;\
TX_ADD_DIRECT(head);\
(head)->pe_first = elm;\
(head)->pe_last = elm;\
} else {\
TOID_TYPEOF(elm) *last = D_RW((head)->pe_last);\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_prev = (head)->pe_last;\
elm_ptr->field.pe_next = last->field.pe_next;\
TX_ADD_DIRECT(&last->field.pe_next);\
last->field.pe_next = elm;\
TX_SET_DIRECT(head, pe_last, elm);\
}\
} while (0)
#define POBJ_TAILQ_INSERT_AFTER(listelm, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
TOID_TYPEOF(listelm) *listelm_ptr = D_RW(listelm);\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_prev = listelm;\
elm_ptr->field.pe_next = listelm_ptr->field.pe_next;\
if (TOID_IS_NULL(listelm_ptr->field.pe_next)) {\
TX_SET_DIRECT(head, pe_last, elm);\
} else {\
TOID_TYPEOF(elm) *next = D_RW(listelm_ptr->field.pe_next);\
TX_ADD_DIRECT(&next->field.pe_prev);\
next->field.pe_prev = elm;\
}\
TX_ADD_DIRECT(&listelm_ptr->field.pe_next);\
listelm_ptr->field.pe_next = elm;\
} while (0)
#define POBJ_TAILQ_INSERT_BEFORE(listelm, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
TOID_TYPEOF(listelm) *listelm_ptr = D_RW(listelm);\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_next = listelm;\
elm_ptr->field.pe_prev = listelm_ptr->field.pe_prev;\
if (TOID_IS_NULL(listelm_ptr->field.pe_prev)) {\
TX_SET_DIRECT(head, pe_first, elm);\
} else {\
TOID_TYPEOF(elm) *prev = D_RW(listelm_ptr->field.pe_prev);\
TX_ADD_DIRECT(&prev->field.pe_next);\
prev->field.pe_next = elm; \
}\
TX_ADD_DIRECT(&listelm_ptr->field.pe_prev);\
listelm_ptr->field.pe_prev = elm;\
} while (0)
#define POBJ_TAILQ_REMOVE(head, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
if (TOID_IS_NULL(elm_ptr->field.pe_prev) &&\
TOID_IS_NULL(elm_ptr->field.pe_next)) {\
TX_ADD_DIRECT(head);\
(head)->pe_first = elm_ptr->field.pe_prev;\
(head)->pe_last = elm_ptr->field.pe_next;\
} else {\
if (TOID_IS_NULL(elm_ptr->field.pe_prev)) {\
TX_SET_DIRECT(head, pe_first, elm_ptr->field.pe_next);\
TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\
TX_ADD_DIRECT(&next->field.pe_prev);\
next->field.pe_prev = elm_ptr->field.pe_prev;\
} else {\
TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\
TX_ADD_DIRECT(&prev->field.pe_next);\
prev->field.pe_next = elm_ptr->field.pe_next;\
}\
if (TOID_IS_NULL(elm_ptr->field.pe_next)) {\
TX_SET_DIRECT(head, pe_last, elm_ptr->field.pe_prev);\
TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\
TX_ADD_DIRECT(&prev->field.pe_next);\
prev->field.pe_next = elm_ptr->field.pe_next;\
} else {\
TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\
TX_ADD_DIRECT(&next->field.pe_prev);\
next->field.pe_prev = elm_ptr->field.pe_prev;\
}\
}\
} while (0)
#define POBJ_TAILQ_REMOVE_FREE(head, elm, field) do {\
POBJ_TAILQ_REMOVE(head, elm, field);\
TX_FREE(elm);\
} while (0)
/*
* 2 cases: only two elements, the rest possibilities
* including that elm is the last one
*/
#define POBJ_TAILQ_MOVE_ELEMENT_HEAD(head, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
if (TOID_EQUALS((head)->pe_last, elm) &&\
TOID_EQUALS(D_RO((head)->pe_first)->field.pe_next, elm)) {\
_POBJ_SWAP_PTR(elm, field);\
_POBJ_SWAP_PTR((head)->pe_first, field);\
POBJ_TAILQ_SWAP_HEAD_TAIL(head, field);\
} else {\
TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\
TX_ADD_DIRECT(&prev->field.pe_next);\
prev->field.pe_next = elm_ptr->field.pe_next;\
if (TOID_EQUALS((head)->pe_last, elm)) {\
TX_SET_DIRECT(head, pe_last, elm_ptr->field.pe_prev);\
} else {\
TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\
TX_ADD_DIRECT(&next->field.pe_prev);\
next->field.pe_prev = elm_ptr->field.pe_prev;\
}\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_prev = D_RO((head)->pe_first)->field.pe_prev;\
elm_ptr->field.pe_next = (head)->pe_first;\
TOID_TYPEOF(elm) *first = D_RW((head)->pe_first);\
TX_ADD_DIRECT(&first->field.pe_prev);\
first->field.pe_prev = elm;\
TX_SET_DIRECT(head, pe_first, elm);\
}\
} while (0)
#define POBJ_TAILQ_MOVE_ELEMENT_TAIL(head, elm, field) do {\
TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\
if (TOID_EQUALS((head)->pe_first, elm) &&\
TOID_EQUALS(D_RO((head)->pe_last)->field.pe_prev, elm)) {\
_POBJ_SWAP_PTR(elm, field);\
_POBJ_SWAP_PTR((head)->pe_last, field);\
POBJ_TAILQ_SWAP_HEAD_TAIL(head, field);\
} else {\
TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\
TX_ADD_DIRECT(&next->field.pe_prev);\
next->field.pe_prev = elm_ptr->field.pe_prev;\
if (TOID_EQUALS((head)->pe_first, elm)) {\
TX_SET_DIRECT(head, pe_first, elm_ptr->field.pe_next);\
} else { \
TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\
TX_ADD_DIRECT(&prev->field.pe_next);\
prev->field.pe_next = elm_ptr->field.pe_next;\
}\
TX_ADD_DIRECT(&elm_ptr->field);\
elm_ptr->field.pe_prev = (head)->pe_last;\
elm_ptr->field.pe_next = D_RO((head)->pe_last)->field.pe_next;\
__typeof__(elm_ptr) last = D_RW((head)->pe_last);\
TX_ADD_DIRECT(&last->field.pe_next);\
last->field.pe_next = elm;\
TX_SET_DIRECT(head, pe_last, elm);\
} \
} while (0)
#endif /* PMEMOBJ_LISTS_H */
| 12,758 | 32.313316 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/linkedlist/fifo.c
|
/*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* fifo.c - example of tail queue usage
*/
#include <ex_common.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "pmemobj_list.h"
POBJ_LAYOUT_BEGIN(list);
POBJ_LAYOUT_ROOT(list, struct fifo_root);
POBJ_LAYOUT_TOID(list, struct tqnode);
POBJ_LAYOUT_END(list);
POBJ_TAILQ_HEAD(tqueuehead, struct tqnode);
struct fifo_root {
struct tqueuehead head;
};
struct tqnode {
char data;
POBJ_TAILQ_ENTRY(struct tqnode) tnd;
};
static void
print_help(void)
{
printf("usage: fifo <pool> <option> [<type>]\n");
printf("\tAvailable options:\n");
printf("\tinsert, <character> Insert character into FIFO\n");
printf("\tremove, Remove element from FIFO\n");
printf("\tprint, Print all FIFO elements\n");
}
int
main(int argc, const char *argv[])
{
PMEMobjpool *pop;
const char *path;
if (argc < 3) {
print_help();
return 0;
}
path = argv[1];
if (file_exists(path) != 0) {
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(list),
PMEMOBJ_MIN_POOL, 0666)) == NULL) {
perror("failed to create pool\n");
return -1;
}
} else {
if ((pop = pmemobj_open(path,
POBJ_LAYOUT_NAME(list))) == NULL) {
perror("failed to open pool\n");
return -1;
}
}
TOID(struct fifo_root) root = POBJ_ROOT(pop, struct fifo_root);
struct tqueuehead *tqhead = &D_RW(root)->head;
TOID(struct tqnode) node;
if (strcmp(argv[2], "insert") == 0) {
if (argc == 4) {
TX_BEGIN(pop) {
node = TX_NEW(struct tqnode);
D_RW(node)->data = *argv[3];
POBJ_TAILQ_INSERT_HEAD(tqhead, node, tnd);
} TX_ONABORT {
abort();
} TX_END
printf("Added %c to FIFO\n", *argv[3]);
} else {
print_help();
}
} else if (strcmp(argv[2], "remove") == 0) {
if (POBJ_TAILQ_EMPTY(tqhead)) {
printf("FIFO is empty\n");
} else {
node = POBJ_TAILQ_LAST(tqhead);
TX_BEGIN(pop) {
POBJ_TAILQ_REMOVE_FREE(tqhead, node, tnd);
} TX_ONABORT {
abort();
} TX_END
printf("Removed element from FIFO\n");
}
} else if (strcmp(argv[2], "print") == 0) {
printf("Elements in FIFO:\n");
POBJ_TAILQ_FOREACH(node, tqhead, tnd) {
printf("%c\t", D_RO(node)->data);
}
printf("\n");
} else {
print_help();
}
pmemobj_close(pop);
return 0;
}
| 3,797 | 26.926471 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/map_hashmap_atomic.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map_hashmap_atomic.h -- common interface for maps
*/
#ifndef MAP_HASHMAP_ATOMIC_H
#define MAP_HASHMAP_ATOMIC_H
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
extern struct map_ops hashmap_atomic_ops;
#define MAP_HASHMAP_ATOMIC (&hashmap_atomic_ops)
#ifdef __cplusplus
}
#endif
#endif /* MAP_HASHMAP_ATOMIC_H */
| 1,936 | 34.218182 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/kv_server_test.sh
|
#!/usr/bin/env bash
#
# Copyright 2015-2016, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
set -euo pipefail
MAP=ctree
PORT=9100
POOL=$1
# start a new server instance
./kv_server $MAP $POOL $PORT &
# wait for the server to properly start
sleep 1
# insert a new key value pair and disconnect
RESP=`echo -e "INSERT foo bar\nGET foo\nBYE" | nc 127.0.0.1 $PORT`
echo $RESP
# remove previously inserted key value pair and shutdown the server
RESP=`echo -e "GET foo\nREMOVE foo\nGET foo\nKILL" | nc 127.0.0.1 $PORT`
echo $RESP
| 2,025 | 37.226415 | 73 |
sh
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/map_btree.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map_ctree.h -- common interface for maps
*/
#ifndef MAP_BTREE_H
#define MAP_BTREE_H
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
extern struct map_ops btree_map_ops;
#define MAP_BTREE (&btree_map_ops)
#ifdef __cplusplus
}
#endif
#endif /* MAP_BTREE_H */
| 1,881 | 33.218182 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/map_hashmap_tx.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map_hashmap_tx.c -- common interface for maps
*/
#include <map.h>
#include <hashmap_tx.h>
#include "map_hashmap_tx.h"
/*
* map_hm_tx_check -- wrapper for hm_tx_check
*/
static int
map_hm_tx_check(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct hashmap_tx) hashmap_tx;
TOID_ASSIGN(hashmap_tx, map.oid);
return hm_tx_check(pop, hashmap_tx);
}
/*
* map_hm_tx_count -- wrapper for hm_tx_count
*/
static size_t
map_hm_tx_count(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct hashmap_tx) hashmap_tx;
TOID_ASSIGN(hashmap_tx, map.oid);
return hm_tx_count(pop, hashmap_tx);
}
/*
* map_hm_tx_init -- wrapper for hm_tx_init
*/
static int
map_hm_tx_init(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct hashmap_tx) hashmap_tx;
TOID_ASSIGN(hashmap_tx, map.oid);
return hm_tx_init(pop, hashmap_tx);
}
/*
* map_hm_tx_create -- wrapper for hm_tx_create
*/
static int
map_hm_tx_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg)
{
TOID(struct hashmap_tx) *hashmap_tx =
(TOID(struct hashmap_tx) *)map;
return hm_tx_create(pop, hashmap_tx, arg);
}
/*
* map_hm_tx_insert -- wrapper for hm_tx_insert
*/
static int
map_hm_tx_insert(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value)
{
TOID(struct hashmap_tx) hashmap_tx;
TOID_ASSIGN(hashmap_tx, map.oid);
return hm_tx_insert(pop, hashmap_tx, key, value);
}
/*
* map_hm_tx_remove -- wrapper for hm_tx_remove
*/
static PMEMoid
map_hm_tx_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct hashmap_tx) hashmap_tx;
TOID_ASSIGN(hashmap_tx, map.oid);
return hm_tx_remove(pop, hashmap_tx, key);
}
/*
* map_hm_tx_get -- wrapper for hm_tx_get
*/
static PMEMoid
map_hm_tx_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct hashmap_tx) hashmap_tx;
TOID_ASSIGN(hashmap_tx, map.oid);
return hm_tx_get(pop, hashmap_tx, key);
}
/*
* map_hm_tx_lookup -- wrapper for hm_tx_lookup
*/
static int
map_hm_tx_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct hashmap_tx) hashmap_tx;
TOID_ASSIGN(hashmap_tx, map.oid);
return hm_tx_lookup(pop, hashmap_tx, key);
}
/*
* map_hm_tx_foreach -- wrapper for hm_tx_foreach
*/
static int
map_hm_tx_foreach(PMEMobjpool *pop, TOID(struct map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg)
{
TOID(struct hashmap_tx) hashmap_tx;
TOID_ASSIGN(hashmap_tx, map.oid);
return hm_tx_foreach(pop, hashmap_tx, cb, arg);
}
/*
* map_hm_tx_cmd -- wrapper for hm_tx_cmd
*/
static int
map_hm_tx_cmd(PMEMobjpool *pop, TOID(struct map) map,
unsigned cmd, uint64_t arg)
{
TOID(struct hashmap_tx) hashmap_tx;
TOID_ASSIGN(hashmap_tx, map.oid);
return hm_tx_cmd(pop, hashmap_tx, cmd, arg);
}
struct map_ops hashmap_tx_ops = {
/* .check = */ map_hm_tx_check,
/* .create = */ map_hm_tx_create,
/* .delete = */ NULL,
/* .init = */ map_hm_tx_init,
/* .insert = */ map_hm_tx_insert,
/* .insert_new = */ NULL,
/* .remove = */ map_hm_tx_remove,
/* .remove_free = */ NULL,
/* .clear = */ NULL,
/* .get = */ map_hm_tx_get,
/* .lookup = */ map_hm_tx_lookup,
/* .foreach = */ map_hm_tx_foreach,
/* .is_empty = */ NULL,
/* .count = */ map_hm_tx_count,
/* .cmd = */ map_hm_tx_cmd,
};
| 4,834 | 25.420765 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/map_rtree.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map_rtree.h -- common interface for maps
*/
#ifndef MAP_RTREE_H
#define MAP_RTREE_H
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
extern struct map_ops rtree_map_ops;
#define MAP_RTREE (&rtree_map_ops)
#ifdef __cplusplus
}
#endif
#endif /* MAP_RTREE_H */
| 1,881 | 33.218182 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/kv_server.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* kv_server.c -- persistent tcp key-value store server
*/
#include <uv.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "libpmemobj.h"
#include "map.h"
#include "map_ctree.h"
#include "map_btree.h"
#include "map_rtree.h"
#include "map_rbtree.h"
#include "map_hashmap_atomic.h"
#include "map_hashmap_tx.h"
#include "map_hashmap_rp.h"
#include "map_skiplist.h"
#include "kv_protocol.h"
#define COUNT_OF(x) (sizeof(x) / sizeof(0[x]))
POBJ_LAYOUT_BEGIN(kv_server);
POBJ_LAYOUT_ROOT(kv_server, struct root);
POBJ_LAYOUT_TOID(kv_server, struct map_value);
POBJ_LAYOUT_TOID(kv_server, uint64_t);
POBJ_LAYOUT_END(kv_server);
struct map_value {
uint64_t len;
char buf[];
};
struct root {
TOID(struct map) map;
};
static struct map_ctx *mapc;
static PMEMobjpool *pop;
static TOID(struct map) map;
static uv_tcp_t server;
static uv_loop_t *loop;
typedef int (*msg_handler)(uv_stream_t *client, const char *msg, size_t len);
struct write_req {
uv_write_t req;
uv_buf_t buf;
};
struct client_data {
char *buf; /* current message, always NULL terminated */
size_t buf_len; /* sizeof(buf) */
size_t len; /* actual length of the message (while parsing) */
};
/*
* djb2_hash -- string hashing function by Dan Bernstein
*/
static uint32_t
djb2_hash(const char *str)
{
uint32_t hash = 5381;
int c;
while ((c = *str++))
hash = ((hash << 5) + hash) + c;
return hash;
}
/*
* write_done_cb -- callback after message write completes
*/
static void
write_done_cb(uv_write_t *req, int status)
{
struct write_req *wr = (struct write_req *)req;
free(wr);
if (status == -1) {
printf("response failed");
}
}
/*
* client_close_cb -- callback after client tcp connection closes
*/
static void
client_close_cb(uv_handle_t *handle)
{
struct client_data *d = handle->data;
free(d->buf);
free(handle->data);
free(handle);
}
/*
* response_write -- response writing helper
*/
static void
response_write(uv_stream_t *client, char *resp, size_t len)
{
struct write_req *wr = malloc(sizeof(struct write_req));
assert(wr != NULL);
wr->buf = uv_buf_init(resp, len);
uv_write(&wr->req, client, &wr->buf, 1, write_done_cb);
}
/*
* response_msg -- predefined message writing helper
*/
static void
response_msg(uv_stream_t *client, enum resp_messages msg)
{
response_write(client, (char *)resp_msg[msg], strlen(resp_msg[msg]));
}
/*
* cmsg_insert_handler -- handler of INSERT client message
*/
static int
cmsg_insert_handler(uv_stream_t *client, const char *msg, size_t len)
{
int result = 0;
TX_BEGIN(pop) {
/*
* For simplicity sake the length of the value buffer is just
* a length of the message.
*/
TOID(struct map_value) val = TX_ZALLOC(struct map_value,
sizeof(struct map_value) + len);
char key[MAX_KEY_LEN];
int ret = sscanf(msg, "INSERT %254s %s\n", key, D_RW(val)->buf);
assert(ret == 2);
D_RW(val)->len = len;
/* properly terminate the value */
D_RW(val)->buf[strlen(D_RO(val)->buf)] = '\n';
map_insert(mapc, map, djb2_hash(key), val.oid);
} TX_ONABORT {
result = 1;
} TX_END
response_msg(client, result);
return 0;
}
/*
* cmsg_remove_handler -- handler of REMOVE client message
*/
static int
cmsg_remove_handler(uv_stream_t *client, const char *msg, size_t len)
{
char key[MAX_KEY_LEN] = {0};
int ret = sscanf(msg, "REMOVE %s\n", key);
assert(ret == 1);
int result = map_remove_free(mapc, map, djb2_hash(key));
response_msg(client, result);
return 0;
}
/*
* cmsg_get_handler -- handler of GET client message
*/
static int
cmsg_get_handler(uv_stream_t *client, const char *msg, size_t len)
{
char key[MAX_KEY_LEN];
int ret = sscanf(msg, "GET %s\n", key);
assert(ret == 1);
TOID(struct map_value) value;
TOID_ASSIGN(value, map_get(mapc, map, djb2_hash(key)));
if (TOID_IS_NULL(value)) {
response_msg(client, RESP_MSG_NULL);
} else {
response_write(client, D_RW(value)->buf, D_RO(value)->len);
}
return 0;
}
/*
* cmsg_bye_handler -- handler of BYE client message
*/
static int
cmsg_bye_handler(uv_stream_t *client, const char *msg, size_t len)
{
uv_close((uv_handle_t *)client, client_close_cb);
return 0;
}
/*
* cmsg_bye_handler -- handler of KILL client message
*/
static int
cmsg_kill_handler(uv_stream_t *client, const char *msg, size_t len)
{
uv_close((uv_handle_t *)client, client_close_cb);
uv_close((uv_handle_t *)&server, NULL);
return 0;
}
/* kv protocol implementation */
static msg_handler protocol_impl[MAX_CMSG] = {
cmsg_insert_handler,
cmsg_remove_handler,
cmsg_get_handler,
cmsg_bye_handler,
cmsg_kill_handler
};
/*
* cmsg_handle -- handles current client message
*/
static int
cmsg_handle(uv_stream_t *client, struct client_data *data)
{
int ret = 0;
int i;
for (i = 0; i < MAX_CMSG; ++i)
if (strncmp(kv_cmsg_token[i], data->buf,
strlen(kv_cmsg_token[i])) == 0)
break;
if (i == MAX_CMSG) {
response_msg(client, RESP_MSG_UNKNOWN);
} else {
ret = protocol_impl[i](client, data->buf, data->len);
}
data->len = 0; /* reset the message length */
return ret;
}
/*
* cmsg_handle_stream -- handle incoming tcp stream from clients
*/
static int
cmsg_handle_stream(uv_stream_t *client, struct client_data *data,
const char *buf, ssize_t nread)
{
char *last;
int ret;
size_t len;
/*
* A single read operation can contain zero or more operations, so this
* has to be handled appropriately. Client messages are terminated by
* newline character.
*/
while ((last = memchr(buf, '\n', nread)) != NULL) {
len = last - buf + 1;
nread -= len;
assert(data->len + len <= data->buf_len);
memcpy(data->buf + data->len, buf, len);
data->len += len;
if ((ret = cmsg_handle(client, data)) != 0)
return ret;
buf = last + 1;
}
if (nread != 0) {
memcpy(data->buf + data->len, buf, nread);
data->len += nread;
}
return 0;
}
static uv_buf_t msg_buf = {0};
/*
* get_read_buf_cb -- returns buffer for incoming client message
*/
static void
get_read_buf_cb(uv_handle_t *handle, size_t size, uv_buf_t *buf)
{
buf->base = msg_buf.base;
buf->len = msg_buf.len;
}
/*
* read_cb -- async tcp read from clients
*/
static void
read_cb(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf)
{
if (nread <= 0) {
printf("client connection closed\n");
uv_close((uv_handle_t *)client, client_close_cb);
return;
}
struct client_data *d = client->data;
if (d->buf_len < (d->len + nread + 1)) {
char *cbuf = realloc(d->buf, d->buf_len + nread + 1);
assert(cbuf != NULL);
/* zero only the new memory */
memset(cbuf + d->buf_len, 0, nread + 1);
d->buf_len += nread + 1;
d->buf = cbuf;
}
if (cmsg_handle_stream(client, client->data, buf->base, nread)) {
printf("client disconnect\n");
uv_close((uv_handle_t *)client, client_close_cb);
}
}
/*
* connection_cb -- async incoming client request
*/
static void
connection_cb(uv_stream_t *server, int status)
{
if (status != 0) {
printf("client connect error\n");
return;
}
printf("new client\n");
uv_tcp_t *client = malloc(sizeof(uv_tcp_t));
assert(client != NULL);
client->data = calloc(1, sizeof(struct client_data));
assert(client->data != NULL);
uv_tcp_init(loop, client);
if (uv_accept(server, (uv_stream_t *)client) == 0) {
uv_read_start((uv_stream_t *)client, get_read_buf_cb, read_cb);
} else {
uv_close((uv_handle_t *)client, client_close_cb);
}
}
static const struct {
struct map_ops *ops;
const char *name;
} maps[] = {
{MAP_HASHMAP_TX, "hashmap_tx"},
{MAP_HASHMAP_ATOMIC, "hashmap_atomic"},
{MAP_HASHMAP_RP, "hashmap_rp"},
{MAP_CTREE, "ctree"},
{MAP_BTREE, "btree"},
{MAP_RTREE, "rtree"},
{MAP_RBTREE, "rbtree"},
{MAP_SKIPLIST, "skiplist"}
};
/*
* get_map_ops_by_string -- parse the type string and return the associated ops
*/
static const struct map_ops *
get_map_ops_by_string(const char *type)
{
for (int i = 0; i < COUNT_OF(maps); ++i)
if (strcmp(maps[i].name, type) == 0)
return maps[i].ops;
return NULL;
}
#define KV_SIZE (PMEMOBJ_MIN_POOL)
#define MAX_READ_LEN (64 * 1024) /* 64 kilobytes */
int
main(int argc, char *argv[])
{
if (argc < 4) {
printf("usage: %s hashmap_tx|hashmap_atomic|hashmap_rp|"
"ctree|btree|rtree|rbtree|skiplist file-name port\n",
argv[0]);
return 1;
}
const char *path = argv[2];
const char *type = argv[1];
int port = atoi(argv[3]);
/* use only a single buffer for all incoming data */
void *read_buf = malloc(MAX_READ_LEN);
assert(read_buf != NULL);
msg_buf = uv_buf_init(read_buf, MAX_READ_LEN);
if (access(path, F_OK) != 0) {
pop = pmemobj_create(path, POBJ_LAYOUT_NAME(kv_server),
KV_SIZE, 0666);
if (pop == NULL) {
fprintf(stderr, "failed to create pool: %s\n",
pmemobj_errormsg());
return 1;
}
} else {
pop = pmemobj_open(path, POBJ_LAYOUT_NAME(kv_server));
if (pop == NULL) {
fprintf(stderr, "failed to open pool: %s\n",
pmemobj_errormsg());
return 1;
}
}
/* map context initialization */
mapc = map_ctx_init(get_map_ops_by_string(type), pop);
if (!mapc) {
pmemobj_close(pop);
fprintf(stderr, "map_ctx_init failed (wrong type?)\n");
return 1;
}
/* initialize the actual map */
TOID(struct root) root = POBJ_ROOT(pop, struct root);
if (TOID_IS_NULL(D_RO(root)->map)) {
/* create new if it doesn't exist (a fresh pool) */
map_create(mapc, &D_RW(root)->map, NULL);
}
map = D_RO(root)->map;
loop = uv_default_loop();
/* tcp server initialization */
uv_tcp_init(loop, &server);
struct sockaddr_in bind_addr;
uv_ip4_addr("0.0.0.0", port, &bind_addr);
int ret = uv_tcp_bind(&server, (const struct sockaddr *)&bind_addr, 0);
assert(ret == 0);
ret = uv_listen((uv_stream_t *)&server, SOMAXCONN, connection_cb);
assert(ret == 0);
ret = uv_run(loop, UV_RUN_DEFAULT);
assert(ret == 0);
/* no more events in the loop, release resources and quit */
uv_loop_delete(loop);
map_ctx_free(mapc);
pmemobj_close(pop);
free(read_buf);
return 0;
}
| 11,556 | 21.976143 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/map_skiplist.h
|
/*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map_skiplist.h -- common interface for maps
*/
#ifndef MAP_SKIPLIST_H
#define MAP_SKIPLIST_H
#include "map.h"
extern struct map_ops skiplist_map_ops;
#define MAP_SKIPLIST (&skiplist_map_ops)
#endif /* MAP_SKIPLIST_H */
| 1,828 | 37.914894 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/map.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map.h -- common interface for maps
*/
#ifndef MAP_H
#define MAP_H
#include <libpmemobj.h>
#ifdef __cplusplus
extern "C" {
#endif
#ifndef MAP_TYPE_OFFSET
#define MAP_TYPE_OFFSET 1000
#endif
TOID_DECLARE(struct map, MAP_TYPE_OFFSET + 0);
struct map;
struct map_ctx;
struct map_ops {
int(*check)(PMEMobjpool *pop, TOID(struct map) map);
int(*create)(PMEMobjpool *pop, TOID(struct map) *map, void *arg);
int(*destroy)(PMEMobjpool *pop, TOID(struct map) *map);
int(*init)(PMEMobjpool *pop, TOID(struct map) map);
int(*insert)(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value);
int(*insert_new)(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, size_t size,
unsigned type_num,
void(*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid(*remove)(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key);
int(*remove_free)(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key);
int(*clear)(PMEMobjpool *pop, TOID(struct map) map);
PMEMoid(*get)(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key);
int(*lookup)(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key);
int(*foreach)(PMEMobjpool *pop, TOID(struct map) map,
int(*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg);
int(*is_empty)(PMEMobjpool *pop, TOID(struct map) map);
size_t(*count)(PMEMobjpool *pop, TOID(struct map) map);
int(*cmd)(PMEMobjpool *pop, TOID(struct map) map,
unsigned cmd, uint64_t arg);
};
struct map_ctx {
PMEMobjpool *pop;
const struct map_ops *ops;
};
struct map_ctx *map_ctx_init(const struct map_ops *ops, PMEMobjpool *pop);
void map_ctx_free(struct map_ctx *mapc);
int map_check(struct map_ctx *mapc, TOID(struct map) map);
int map_create(struct map_ctx *mapc, TOID(struct map) *map, void *arg);
int map_destroy(struct map_ctx *mapc, TOID(struct map) *map);
int map_init(struct map_ctx *mapc, TOID(struct map) map);
int map_insert(struct map_ctx *mapc, TOID(struct map) map,
uint64_t key, PMEMoid value);
int map_insert_new(struct map_ctx *mapc, TOID(struct map) map,
uint64_t key, size_t size,
unsigned type_num,
void(*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid map_remove(struct map_ctx *mapc, TOID(struct map) map, uint64_t key);
int map_remove_free(struct map_ctx *mapc, TOID(struct map) map, uint64_t key);
int map_clear(struct map_ctx *mapc, TOID(struct map) map);
PMEMoid map_get(struct map_ctx *mapc, TOID(struct map) map, uint64_t key);
int map_lookup(struct map_ctx *mapc, TOID(struct map) map, uint64_t key);
int map_foreach(struct map_ctx *mapc, TOID(struct map) map,
int(*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg);
int map_is_empty(struct map_ctx *mapc, TOID(struct map) map);
size_t map_count(struct map_ctx *mapc, TOID(struct map) map);
int map_cmd(struct map_ctx *mapc, TOID(struct map) map,
unsigned cmd, uint64_t arg);
#ifdef __cplusplus
}
#endif
#endif /* MAP_H */
| 4,525 | 36.404959 | 78 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/map_hashmap_rp.h
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map_hashmap_rp.h -- common interface for maps
*/
#ifndef MAP_HASHMAP_RP_H
#define MAP_HASHMAP_RP_H
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
extern struct map_ops hashmap_rp_ops;
#define MAP_HASHMAP_RP (&hashmap_rp_ops)
#ifdef __cplusplus
}
#endif
#endif /* MAP_HASHMAP_RP_H */
| 1,903 | 33.618182 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/data_store.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* data_store.c -- tree_map example usage
*/
#include <ex_common.h>
#include <stdio.h>
#include <sys/stat.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <assert.h>
#include "map.h"
#include "map_ctree.h"
#include "map_btree.h"
#include "map_rbtree.h"
#include "map_hashmap_atomic.h"
#include "map_hashmap_tx.h"
#include "map_hashmap_rp.h"
#include "map_skiplist.h"
POBJ_LAYOUT_BEGIN(data_store);
POBJ_LAYOUT_ROOT(data_store, struct store_root);
POBJ_LAYOUT_TOID(data_store, struct store_item);
POBJ_LAYOUT_END(data_store);
#define MAX_INSERTS 500
static uint64_t nkeys;
static uint64_t keys[MAX_INSERTS];
struct store_item {
uint64_t item_data;
};
struct store_root {
TOID(struct map) map;
};
/*
* new_store_item -- transactionally creates and initializes new item
*/
static TOID(struct store_item)
new_store_item(void)
{
TOID(struct store_item) item = TX_NEW(struct store_item);
D_RW(item)->item_data = rand();
return item;
}
/*
* get_keys -- inserts the keys of the items by key order (sorted, descending)
*/
static int
get_keys(uint64_t key, PMEMoid value, void *arg)
{
keys[nkeys++] = key;
return 0;
}
/*
* dec_keys -- decrements the keys count for every item
*/
static int
dec_keys(uint64_t key, PMEMoid value, void *arg)
{
nkeys--;
return 0;
}
/*
* parse_map_type -- parse type of map
*/
static const struct map_ops *
parse_map_type(const char *type)
{
if (strcmp(type, "ctree") == 0)
return MAP_CTREE;
else if (strcmp(type, "btree") == 0)
return MAP_BTREE;
else if (strcmp(type, "rbtree") == 0)
return MAP_RBTREE;
else if (strcmp(type, "hashmap_atomic") == 0)
return MAP_HASHMAP_ATOMIC;
else if (strcmp(type, "hashmap_tx") == 0)
return MAP_HASHMAP_TX;
else if (strcmp(type, "hashmap_rp") == 0)
return MAP_HASHMAP_RP;
else if (strcmp(type, "skiplist") == 0)
return MAP_SKIPLIST;
return NULL;
}
int main(int argc, const char *argv[]) {
if (argc < 3) {
printf("usage: %s "
"<ctree|btree|rbtree|hashmap_atomic|hashmap_rp|"
"hashmap_tx|skiplist> file-name [nops]\n", argv[0]);
return 1;
}
const char *type = argv[1];
const char *path = argv[2];
const struct map_ops *map_ops = parse_map_type(type);
if (!map_ops) {
fprintf(stderr, "invalid container type -- '%s'\n", type);
return 1;
}
int nops = MAX_INSERTS;
if (argc > 3) {
nops = atoi(argv[3]);
if (nops <= 0 || nops > MAX_INSERTS) {
fprintf(stderr, "number of operations must be "
"in range 1..%d\n", MAX_INSERTS);
return 1;
}
}
PMEMobjpool *pop;
srand((unsigned)time(NULL));
if (file_exists(path) != 0) {
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(data_store),
PMEMOBJ_MIN_POOL, 0666)) == NULL) {
perror("failed to create pool\n");
return 1;
}
} else {
if ((pop = pmemobj_open(path,
POBJ_LAYOUT_NAME(data_store))) == NULL) {
perror("failed to open pool\n");
return 1;
}
}
TOID(struct store_root) root = POBJ_ROOT(pop, struct store_root);
struct map_ctx *mapc = map_ctx_init(map_ops, pop);
if (!mapc) {
perror("cannot allocate map context\n");
return 1;
}
/* delete the map if it exists */
if (!map_check(mapc, D_RW(root)->map))
map_destroy(mapc, &D_RW(root)->map);
/* insert random items in a transaction */
int aborted = 0;
TX_BEGIN(pop) {
map_create(mapc, &D_RW(root)->map, NULL);
for (int i = 0; i < nops; ++i) {
/* new_store_item is transactional! */
map_insert(mapc, D_RW(root)->map, rand(),
new_store_item().oid);
}
} TX_ONABORT {
perror("transaction aborted\n");
map_ctx_free(mapc);
aborted = 1;
} TX_END
if (aborted)
return -1;
/* count the items */
map_foreach(mapc, D_RW(root)->map, get_keys, NULL);
/* remove the items without outer transaction */
for (int i = 0; i < nkeys; ++i) {
PMEMoid item = map_remove(mapc, D_RW(root)->map, keys[i]);
assert(!OID_IS_NULL(item));
assert(OID_INSTANCEOF(item, struct store_item));
}
uint64_t old_nkeys = nkeys;
/* tree should be empty */
map_foreach(mapc, D_RW(root)->map, dec_keys, NULL);
assert(old_nkeys == nkeys);
map_ctx_free(mapc);
pmemobj_close(pop);
return 0;
}
| 5,713 | 24.508929 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/map_rtree.c
|
/*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map_rtree.c -- common interface for maps
*/
#include <rtree_map.h>
#include "map_rtree.h"
/*
* map_rtree_check -- wrapper for rtree_map_check
*/
static int
map_rtree_check(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_check(pop, rtree_map);
}
/*
* map_rtree_create -- wrapper for rtree_map_new
*/
static int
map_rtree_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg)
{
TOID(struct rtree_map) *rtree_map =
(TOID(struct rtree_map) *)map;
return rtree_map_create(pop, rtree_map, arg);
}
/*
* map_rtree_destroy -- wrapper for rtree_map_delete
*/
static int
map_rtree_destroy(PMEMobjpool *pop, TOID(struct map) *map)
{
TOID(struct rtree_map) *rtree_map =
(TOID(struct rtree_map) *)map;
return rtree_map_destroy(pop, rtree_map);
}
/*
* map_rtree_insert -- wrapper for rtree_map_insert
*/
static int
map_rtree_insert(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value)
{
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_insert(pop, rtree_map,
(unsigned char *)&key, sizeof(key), value);
}
/*
* map_rtree_insert_new -- wrapper for rtree_map_insert_new
*/
static int
map_rtree_insert_new(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, size_t size,
unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_insert_new(pop, rtree_map,
(unsigned char *)&key, sizeof(key), size,
type_num, constructor, arg);
}
/*
* map_rtree_remove -- wrapper for rtree_map_remove
*/
static PMEMoid
map_rtree_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_remove(pop, rtree_map,
(unsigned char *)&key, sizeof(key));
}
/*
* map_rtree_remove_free -- wrapper for rtree_map_remove_free
*/
static int
map_rtree_remove_free(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_remove_free(pop, rtree_map,
(unsigned char *)&key, sizeof(key));
}
/*
* map_rtree_clear -- wrapper for rtree_map_clear
*/
static int
map_rtree_clear(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_clear(pop, rtree_map);
}
/*
* map_rtree_get -- wrapper for rtree_map_get
*/
static PMEMoid
map_rtree_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_get(pop, rtree_map,
(unsigned char *)&key, sizeof(key));
}
/*
* map_rtree_lookup -- wrapper for rtree_map_lookup
*/
static int
map_rtree_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_lookup(pop, rtree_map,
(unsigned char *)&key, sizeof(key));
}
struct cb_arg2 {
int (*cb)(uint64_t key, PMEMoid value, void *arg);
void *arg;
};
/*
* map_rtree_foreach_cb -- wrapper for callback
*/
static int
map_rtree_foreach_cb(const unsigned char *key,
uint64_t key_size, PMEMoid value, void *arg2)
{
const struct cb_arg2 *const a2 = (const struct cb_arg2 *)arg2;
const uint64_t *const k2 = (uint64_t *)key;
return a2->cb(*k2, value, a2->arg);
}
/*
* map_rtree_foreach -- wrapper for rtree_map_foreach
*/
static int
map_rtree_foreach(PMEMobjpool *pop, TOID(struct map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg)
{
struct cb_arg2 arg2 = {cb, arg};
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_foreach(pop, rtree_map, map_rtree_foreach_cb, &arg2);
}
/*
* map_rtree_is_empty -- wrapper for rtree_map_is_empty
*/
static int
map_rtree_is_empty(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct rtree_map) rtree_map;
TOID_ASSIGN(rtree_map, map.oid);
return rtree_map_is_empty(pop, rtree_map);
}
struct map_ops rtree_map_ops = {
/* .check = */map_rtree_check,
/* .create = */map_rtree_create,
/* .destroy = */map_rtree_destroy,
/* .init = */NULL,
/* .insert = */map_rtree_insert,
/* .insert_new = */map_rtree_insert_new,
/* .remove = */map_rtree_remove,
/* .remove_free = */map_rtree_remove_free,
/* .clear = */map_rtree_clear,
/* .get = */map_rtree_get,
/* .lookup = */map_rtree_lookup,
/* .foreach = */map_rtree_foreach,
/* .is_empty = */map_rtree_is_empty,
/* .count = */NULL,
/* .cmd = */NULL,
};
| 6,215 | 25.338983 | 75 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/mapcli.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <ex_common.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <inttypes.h>
#include <libpmemobj.h>
#include "map.h"
#include "map_ctree.h"
#include "map_btree.h"
#include "map_rtree.h"
#include "map_rbtree.h"
#include "map_hashmap_atomic.h"
#include "map_hashmap_tx.h"
#include "map_hashmap_rp.h"
#include "map_skiplist.h"
#include "hashmap/hashmap.h"
#define PM_HASHSET_POOL_SIZE (160 * 1024 * 1024)
POBJ_LAYOUT_BEGIN(map);
POBJ_LAYOUT_ROOT(map, struct root);
POBJ_LAYOUT_END(map);
struct root {
TOID(struct map) map;
};
static PMEMobjpool *pop;
static struct map_ctx *mapc;
static TOID(struct root) root;
static TOID(struct map) map;
/*
* str_insert -- hs_insert wrapper which works on strings
*/
static void
str_insert(const char *str)
{
uint64_t key;
if (sscanf(str, "%" PRIu64, &key) > 0)
map_insert(mapc, map, key, OID_NULL);
else
fprintf(stderr, "insert: invalid syntax\n");
}
/*
* str_remove -- hs_remove wrapper which works on strings
*/
static void
str_remove(const char *str)
{
uint64_t key;
if (sscanf(str, "%" PRIu64, &key) > 0) {
int l = map_lookup(mapc, map, key);
if (l)
map_remove(mapc, map, key);
else
fprintf(stderr, "no such value\n");
} else
fprintf(stderr, "remove: invalid syntax\n");
}
/*
* str_check -- hs_check wrapper which works on strings
*/
static void
str_check(const char *str)
{
uint64_t key;
if (sscanf(str, "%" PRIu64, &key) > 0) {
int r = map_lookup(mapc, map, key);
printf("%d\n", r);
} else {
fprintf(stderr, "check: invalid syntax\n");
}
}
/*
* str_insert_random -- inserts specified (as string) number of random numbers
*/
static void
str_insert_random(const char *str)
{
uint64_t val;
if (sscanf(str, "%" PRIu64, &val) > 0)
for (uint64_t i = 0; i < val; ) {
uint64_t r = ((uint64_t)rand()) << 32 | rand();
int ret = map_insert(mapc, map, r, OID_NULL);
if (ret < 0)
break;
if (ret == 0)
i += 1;
}
else
fprintf(stderr, "random insert: invalid syntax\n");
}
/*
* rebuild -- rebuilds hashmap and measures execution time
*/
static void
rebuild(void)
{
printf("rebuild ");
fflush(stdout);
time_t t1 = time(NULL);
map_cmd(mapc, map, HASHMAP_CMD_REBUILD, 0);
printf("%" PRIu64"s\n", (uint64_t)(time(NULL) - t1));
}
/*
* str_rebuild -- hs_rebuild wrapper which executes specified number of times
*/
static void
str_rebuild(const char *str)
{
uint64_t val;
if (sscanf(str, "%" PRIu64, &val) > 0) {
for (uint64_t i = 0; i < val; ++i) {
printf("%2" PRIu64 " ", i);
rebuild();
}
} else {
rebuild();
}
}
static void
help(void)
{
printf("h - help\n");
printf("i $value - insert $value\n");
printf("r $value - remove $value\n");
printf("c $value - check $value, returns 0/1\n");
printf("n $value - insert $value random values\n");
printf("p - print all values\n");
printf("d - print debug info\n");
printf("b [$value] - rebuild $value (default: 1) times\n");
printf("q - quit\n");
}
static void
unknown_command(const char *str)
{
fprintf(stderr, "unknown command '%c', use 'h' for help\n", str[0]);
}
static int
hashmap_print(uint64_t key, PMEMoid value, void *arg)
{
printf("%" PRIu64 " ", key);
return 0;
}
static void
print_all(void)
{
if (mapc->ops->count)
printf("count: %zu\n", map_count(mapc, map));
map_foreach(mapc, map, hashmap_print, NULL);
printf("\n");
}
#define INPUT_BUF_LEN 1000
int
main(int argc, char *argv[])
{
if (argc < 3 || argc > 4) {
printf("usage: %s "
"hashmap_tx|hashmap_atomic|hashmap_rp|"
"ctree|btree|rtree|rbtree|skiplist"
" file-name [<seed>]\n", argv[0]);
return 1;
}
const struct map_ops *ops = NULL;
const char *path = argv[2];
const char *type = argv[1];
if (strcmp(type, "hashmap_tx") == 0) {
ops = MAP_HASHMAP_TX;
} else if (strcmp(type, "hashmap_atomic") == 0) {
ops = MAP_HASHMAP_ATOMIC;
} else if (strcmp(type, "hashmap_rp") == 0) {
ops = MAP_HASHMAP_RP;
} else if (strcmp(type, "ctree") == 0) {
ops = MAP_CTREE;
} else if (strcmp(type, "btree") == 0) {
ops = MAP_BTREE;
} else if (strcmp(type, "rtree") == 0) {
ops = MAP_RTREE;
} else if (strcmp(type, "rbtree") == 0) {
ops = MAP_RBTREE;
} else if (strcmp(type, "skiplist") == 0) {
ops = MAP_SKIPLIST;
} else {
fprintf(stderr, "invalid container type -- '%s'\n", type);
return 1;
}
if (file_exists(path) != 0) {
pop = pmemobj_create(path, POBJ_LAYOUT_NAME(map),
PM_HASHSET_POOL_SIZE, CREATE_MODE_RW);
if (pop == NULL) {
fprintf(stderr, "failed to create pool: %s\n",
pmemobj_errormsg());
return 1;
}
struct hashmap_args args;
if (argc > 3)
args.seed = atoi(argv[3]);
else
args.seed = (uint32_t)time(NULL);
srand(args.seed);
mapc = map_ctx_init(ops, pop);
if (!mapc) {
pmemobj_close(pop);
perror("map_ctx_init");
return 1;
}
root = POBJ_ROOT(pop, struct root);
printf("seed: %u\n", args.seed);
map_create(mapc, &D_RW(root)->map, &args);
map = D_RO(root)->map;
} else {
pop = pmemobj_open(path, POBJ_LAYOUT_NAME(map));
if (pop == NULL) {
fprintf(stderr, "failed to open pool: %s\n",
pmemobj_errormsg());
return 1;
}
mapc = map_ctx_init(ops, pop);
if (!mapc) {
pmemobj_close(pop);
perror("map_ctx_init");
return 1;
}
root = POBJ_ROOT(pop, struct root);
map = D_RO(root)->map;
}
char buf[INPUT_BUF_LEN];
if (isatty(fileno(stdout)))
printf("Type 'h' for help\n$ ");
while (fgets(buf, sizeof(buf), stdin)) {
if (buf[0] == 0 || buf[0] == '\n')
continue;
switch (buf[0]) {
case 'i':
str_insert(buf + 1);
break;
case 'r':
str_remove(buf + 1);
break;
case 'c':
str_check(buf + 1);
break;
case 'n':
str_insert_random(buf + 1);
break;
case 'p':
print_all();
break;
case 'd':
map_cmd(mapc, map, HASHMAP_CMD_DEBUG,
(uint64_t)stdout);
break;
case 'b':
str_rebuild(buf + 1);
break;
case 'q':
fclose(stdin);
break;
case 'h':
help();
break;
default:
unknown_command(buf);
break;
}
if (isatty(fileno(stdout)))
printf("$ ");
}
map_ctx_free(mapc);
pmemobj_close(pop);
return 0;
}
| 7,787 | 21.83871 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/map_hashmap_rp.c
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map_hashmap_rp.c -- common interface for maps
*/
#include <map.h>
#include <hashmap_rp.h>
#include "map_hashmap_rp.h"
/*
* map_hm_rp_check -- wrapper for hm_rp_check
*/
static int
map_hm_rp_check(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct hashmap_rp) hashmap_rp;
TOID_ASSIGN(hashmap_rp, map.oid);
return hm_rp_check(pop, hashmap_rp);
}
/*
* map_hm_rp_count -- wrapper for hm_rp_count
*/
static size_t
map_hm_rp_count(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct hashmap_rp) hashmap_rp;
TOID_ASSIGN(hashmap_rp, map.oid);
return hm_rp_count(pop, hashmap_rp);
}
/*
* map_hm_rp_init -- wrapper for hm_rp_init
*/
static int
map_hm_rp_init(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct hashmap_rp) hashmap_rp;
TOID_ASSIGN(hashmap_rp, map.oid);
return hm_rp_init(pop, hashmap_rp);
}
/*
* map_hm_rp_create -- wrapper for hm_rp_create
*/
static int
map_hm_rp_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg)
{
TOID(struct hashmap_rp) *hashmap_rp =
(TOID(struct hashmap_rp) *)map;
return hm_rp_create(pop, hashmap_rp, arg);
}
/*
* map_hm_rp_insert -- wrapper for hm_rp_insert
*/
static int
map_hm_rp_insert(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value)
{
TOID(struct hashmap_rp) hashmap_rp;
TOID_ASSIGN(hashmap_rp, map.oid);
return hm_rp_insert(pop, hashmap_rp, key, value);
}
/*
* map_hm_rp_remove -- wrapper for hm_rp_remove
*/
static PMEMoid
map_hm_rp_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct hashmap_rp) hashmap_rp;
TOID_ASSIGN(hashmap_rp, map.oid);
return hm_rp_remove(pop, hashmap_rp, key);
}
/*
* map_hm_rp_get -- wrapper for hm_rp_get
*/
static PMEMoid
map_hm_rp_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct hashmap_rp) hashmap_rp;
TOID_ASSIGN(hashmap_rp, map.oid);
return hm_rp_get(pop, hashmap_rp, key);
}
/*
* map_hm_rp_lookup -- wrapper for hm_rp_lookup
*/
static int
map_hm_rp_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct hashmap_rp) hashmap_rp;
TOID_ASSIGN(hashmap_rp, map.oid);
return hm_rp_lookup(pop, hashmap_rp, key);
}
/*
* map_hm_rp_foreach -- wrapper for hm_rp_foreach
*/
static int
map_hm_rp_foreach(PMEMobjpool *pop, TOID(struct map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg)
{
TOID(struct hashmap_rp) hashmap_rp;
TOID_ASSIGN(hashmap_rp, map.oid);
return hm_rp_foreach(pop, hashmap_rp, cb, arg);
}
/*
* map_hm_rp_cmd -- wrapper for hm_rp_cmd
*/
static int
map_hm_rp_cmd(PMEMobjpool *pop, TOID(struct map) map,
unsigned cmd, uint64_t arg)
{
TOID(struct hashmap_rp) hashmap_rp;
TOID_ASSIGN(hashmap_rp, map.oid);
return hm_rp_cmd(pop, hashmap_rp, cmd, arg);
}
struct map_ops hashmap_rp_ops = {
/* .check = */ map_hm_rp_check,
/* .create = */ map_hm_rp_create,
/* .destroy = */ NULL,
/* .init = */ map_hm_rp_init,
/* .insert = */ map_hm_rp_insert,
/* .insert_new = */ NULL,
/* .remove = */ map_hm_rp_remove,
/* .remove_free = */ NULL,
/* .clear = */ NULL,
/* .get = */ map_hm_rp_get,
/* .lookup = */ map_hm_rp_lookup,
/* .foreach = */ map_hm_rp_foreach,
/* .is_empty = */ NULL,
/* .count = */ map_hm_rp_count,
/* .cmd = */ map_hm_rp_cmd,
};
| 4,830 | 25.398907 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/map_skiplist.c
|
/*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map_skiplist.c -- common interface for maps
*/
#include <map.h>
#include <skiplist_map.h>
#include "map_skiplist.h"
/*
* map_skiplist_check -- wrapper for skiplist_map_check
*/
static int
map_skiplist_check(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_check(pop, skiplist_map);
}
/*
* map_skiplist_create -- wrapper for skiplist_map_new
*/
static int
map_skiplist_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg)
{
TOID(struct skiplist_map_node) *skiplist_map =
(TOID(struct skiplist_map_node) *)map;
return skiplist_map_create(pop, skiplist_map, arg);
}
/*
* map_skiplist_destroy -- wrapper for skiplist_map_delete
*/
static int
map_skiplist_destroy(PMEMobjpool *pop, TOID(struct map) *map)
{
TOID(struct skiplist_map_node) *skiplist_map =
(TOID(struct skiplist_map_node) *)map;
return skiplist_map_destroy(pop, skiplist_map);
}
/*
* map_skiplist_insert -- wrapper for skiplist_map_insert
*/
static int
map_skiplist_insert(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_insert(pop, skiplist_map, key, value);
}
/*
* map_skiplist_insert_new -- wrapper for skiplist_map_insert_new
*/
static int
map_skiplist_insert_new(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, size_t size,
unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_insert_new(pop, skiplist_map, key, size,
type_num, constructor, arg);
}
/*
* map_skiplist_remove -- wrapper for skiplist_map_remove
*/
static PMEMoid
map_skiplist_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_remove(pop, skiplist_map, key);
}
/*
* map_skiplist_remove_free -- wrapper for skiplist_map_remove_free
*/
static int
map_skiplist_remove_free(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_remove_free(pop, skiplist_map, key);
}
/*
* map_skiplist_clear -- wrapper for skiplist_map_clear
*/
static int
map_skiplist_clear(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_clear(pop, skiplist_map);
}
/*
* map_skiplist_get -- wrapper for skiplist_map_get
*/
static PMEMoid
map_skiplist_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_get(pop, skiplist_map, key);
}
/*
* map_skiplist_lookup -- wrapper for skiplist_map_lookup
*/
static int
map_skiplist_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_lookup(pop, skiplist_map, key);
}
/*
* map_skiplist_foreach -- wrapper for skiplist_map_foreach
*/
static int
map_skiplist_foreach(PMEMobjpool *pop, TOID(struct map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_foreach(pop, skiplist_map, cb, arg);
}
/*
* map_skiplist_is_empty -- wrapper for skiplist_map_is_empty
*/
static int
map_skiplist_is_empty(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct skiplist_map_node) skiplist_map;
TOID_ASSIGN(skiplist_map, map.oid);
return skiplist_map_is_empty(pop, skiplist_map);
}
struct map_ops skiplist_map_ops = {
/* .check = */ map_skiplist_check,
/* .create = */ map_skiplist_create,
/* .destroy = */ map_skiplist_destroy,
/* .init = */ NULL,
/* .insert = */ map_skiplist_insert,
/* .insert_new = */ map_skiplist_insert_new,
/* .remove = */ map_skiplist_remove,
/* .remove_free = */ map_skiplist_remove_free,
/* .clear = */ map_skiplist_clear,
/* .get = */ map_skiplist_get,
/* .lookup = */ map_skiplist_lookup,
/* .foreach = */ map_skiplist_foreach,
/* .is_empty = */ map_skiplist_is_empty,
/* .count = */ NULL,
/* .cmd = */ NULL,
};
| 6,003 | 27.454976 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/map_hashmap_tx.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map_hashmap_tx.h -- common interface for maps
*/
#ifndef MAP_HASHMAP_TX_H
#define MAP_HASHMAP_TX_H
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
extern struct map_ops hashmap_tx_ops;
#define MAP_HASHMAP_TX (&hashmap_tx_ops)
#ifdef __cplusplus
}
#endif
#endif /* MAP_HASHMAP_TX_H */
| 1,908 | 33.709091 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/map_rbtree.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map_rbtree.h -- common interface for maps
*/
#ifndef MAP_RBTREE_H
#define MAP_RBTREE_H
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
extern struct map_ops rbtree_map_ops;
#define MAP_RBTREE (&rbtree_map_ops)
#ifdef __cplusplus
}
#endif
#endif /* MAP_RBTREE_H */
| 1,888 | 33.345455 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/map_ctree.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map_ctree.c -- common interface for maps
*/
#include <map.h>
#include <ctree_map.h>
#include "map_ctree.h"
/*
* map_ctree_check -- wrapper for ctree_map_check
*/
static int
map_ctree_check(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_check(pop, ctree_map);
}
/*
* map_ctree_create -- wrapper for ctree_map_create
*/
static int
map_ctree_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg)
{
TOID(struct ctree_map) *ctree_map =
(TOID(struct ctree_map) *)map;
return ctree_map_create(pop, ctree_map, arg);
}
/*
* map_ctree_destroy -- wrapper for ctree_map_destroy
*/
static int
map_ctree_destroy(PMEMobjpool *pop, TOID(struct map) *map)
{
TOID(struct ctree_map) *ctree_map =
(TOID(struct ctree_map) *)map;
return ctree_map_destroy(pop, ctree_map);
}
/*
* map_ctree_insert -- wrapper for ctree_map_insert
*/
static int
map_ctree_insert(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_insert(pop, ctree_map, key, value);
}
/*
* map_ctree_insert_new -- wrapper for ctree_map_insert_new
*/
static int
map_ctree_insert_new(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, size_t size,
unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_insert_new(pop, ctree_map, key, size,
type_num, constructor, arg);
}
/*
* map_ctree_remove -- wrapper for ctree_map_remove
*/
static PMEMoid
map_ctree_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_remove(pop, ctree_map, key);
}
/*
* map_ctree_remove_free -- wrapper for ctree_map_remove_free
*/
static int
map_ctree_remove_free(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_remove_free(pop, ctree_map, key);
}
/*
* map_ctree_clear -- wrapper for ctree_map_clear
*/
static int
map_ctree_clear(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_clear(pop, ctree_map);
}
/*
* map_ctree_get -- wrapper for ctree_map_get
*/
static PMEMoid
map_ctree_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_get(pop, ctree_map, key);
}
/*
* map_ctree_lookup -- wrapper for ctree_map_lookup
*/
static int
map_ctree_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_lookup(pop, ctree_map, key);
}
/*
* map_ctree_foreach -- wrapper for ctree_map_foreach
*/
static int
map_ctree_foreach(PMEMobjpool *pop, TOID(struct map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_foreach(pop, ctree_map, cb, arg);
}
/*
* map_ctree_is_empty -- wrapper for ctree_map_is_empty
*/
static int
map_ctree_is_empty(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct ctree_map) ctree_map;
TOID_ASSIGN(ctree_map, map.oid);
return ctree_map_is_empty(pop, ctree_map);
}
struct map_ops ctree_map_ops = {
/* .check = */ map_ctree_check,
/* .create = */ map_ctree_create,
/* .destroy = */ map_ctree_destroy,
/* .init = */ NULL,
/* .insert = */ map_ctree_insert,
/* .insert_new = */ map_ctree_insert_new,
/* .remove = */ map_ctree_remove,
/* .remove_free = */ map_ctree_remove_free,
/* .clear = */ map_ctree_clear,
/* .get = */ map_ctree_get,
/* .lookup = */ map_ctree_lookup,
/* .foreach = */ map_ctree_foreach,
/* .is_empty = */ map_ctree_is_empty,
/* .count = */ NULL,
/* .cmd = */ NULL,
};
| 5,606 | 25.57346 | 75 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/map_btree.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map_btree.c -- common interface for maps
*/
#include <map.h>
#include <btree_map.h>
#include "map_btree.h"
/*
* map_btree_check -- wrapper for btree_map_check
*/
static int
map_btree_check(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_check(pop, btree_map);
}
/*
* map_btree_create -- wrapper for btree_map_create
*/
static int
map_btree_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg)
{
TOID(struct btree_map) *btree_map =
(TOID(struct btree_map) *)map;
return btree_map_create(pop, btree_map, arg);
}
/*
* map_btree_destroy -- wrapper for btree_map_destroy
*/
static int
map_btree_destroy(PMEMobjpool *pop, TOID(struct map) *map)
{
TOID(struct btree_map) *btree_map =
(TOID(struct btree_map) *)map;
return btree_map_destroy(pop, btree_map);
}
/*
* map_btree_insert -- wrapper for btree_map_insert
*/
static int
map_btree_insert(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_insert(pop, btree_map, key, value);
}
/*
* map_btree_insert_new -- wrapper for btree_map_insert_new
*/
static int
map_btree_insert_new(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, size_t size,
unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_insert_new(pop, btree_map, key, size,
type_num, constructor, arg);
}
/*
* map_btree_remove -- wrapper for btree_map_remove
*/
static PMEMoid
map_btree_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_remove(pop, btree_map, key);
}
/*
* map_btree_remove_free -- wrapper for btree_map_remove_free
*/
static int
map_btree_remove_free(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_remove_free(pop, btree_map, key);
}
/*
* map_btree_clear -- wrapper for btree_map_clear
*/
static int
map_btree_clear(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_clear(pop, btree_map);
}
/*
* map_btree_get -- wrapper for btree_map_get
*/
static PMEMoid
map_btree_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_get(pop, btree_map, key);
}
/*
* map_btree_lookup -- wrapper for btree_map_lookup
*/
static int
map_btree_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_lookup(pop, btree_map, key);
}
/*
* map_btree_foreach -- wrapper for btree_map_foreach
*/
static int
map_btree_foreach(PMEMobjpool *pop, TOID(struct map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_foreach(pop, btree_map, cb, arg);
}
/*
* map_btree_is_empty -- wrapper for btree_map_is_empty
*/
static int
map_btree_is_empty(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct btree_map) btree_map;
TOID_ASSIGN(btree_map, map.oid);
return btree_map_is_empty(pop, btree_map);
}
struct map_ops btree_map_ops = {
/* .check = */ map_btree_check,
/* .create = */ map_btree_create,
/* .destroy = */ map_btree_destroy,
/* .init = */ NULL,
/* .insert = */ map_btree_insert,
/* .insert_new = */ map_btree_insert_new,
/* .remove = */ map_btree_remove,
/* .remove_free = */ map_btree_remove_free,
/* .clear = */ map_btree_clear,
/* .get = */ map_btree_get,
/* .lookup = */ map_btree_lookup,
/* .foreach = */ map_btree_foreach,
/* .is_empty = */ map_btree_is_empty,
/* .count = */ NULL,
/* .cmd = */ NULL,
};
| 5,606 | 25.57346 | 75 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/map.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map.c -- common interface for maps
*/
#include <stdlib.h>
#include <stdio.h>
#include <libpmemobj.h>
#include "map.h"
#define ABORT_NOT_IMPLEMENTED(mapc, func)\
if ((mapc)->ops->func == NULL) {\
fprintf(stderr, "error: '%s'"\
" function not implemented\n", #func);\
exit(1);\
}
/*
* map_ctx_init -- initialize map context
*/
struct map_ctx *
map_ctx_init(const struct map_ops *ops, PMEMobjpool *pop)
{
if (!ops)
return NULL;
struct map_ctx *mapc = (struct map_ctx *)calloc(1, sizeof(*mapc));
if (!mapc)
return NULL;
mapc->ops = ops;
mapc->pop = pop;
return mapc;
}
/*
* map_ctx_free -- free map context
*/
void
map_ctx_free(struct map_ctx *mapc)
{
free(mapc);
}
/*
* map_create -- create new map
*/
int
map_create(struct map_ctx *mapc, TOID(struct map) *map, void *arg)
{
ABORT_NOT_IMPLEMENTED(mapc, create);
return mapc->ops->create(mapc->pop, map, arg);
}
/*
* map_destroy -- free the map
*/
int
map_destroy(struct map_ctx *mapc, TOID(struct map) *map)
{
ABORT_NOT_IMPLEMENTED(mapc, destroy);
return mapc->ops->destroy(mapc->pop, map);
}
/*
* map_init -- initialize map
*/
int
map_init(struct map_ctx *mapc, TOID(struct map) map)
{
ABORT_NOT_IMPLEMENTED(mapc, init);
return mapc->ops->init(mapc->pop, map);
}
/*
* map_check -- check if persistent object is a valid map object
*/
int
map_check(struct map_ctx *mapc, TOID(struct map) map)
{
ABORT_NOT_IMPLEMENTED(mapc, check);
return mapc->ops->check(mapc->pop, map);
}
/*
* map_insert -- insert key value pair
*/
int
map_insert(struct map_ctx *mapc, TOID(struct map) map,
uint64_t key, PMEMoid value)
{
ABORT_NOT_IMPLEMENTED(mapc, insert);
return mapc->ops->insert(mapc->pop, map, key, value);
}
/*
* map_insert_new -- allocate and insert key value pair
*/
int
map_insert_new(struct map_ctx *mapc, TOID(struct map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
ABORT_NOT_IMPLEMENTED(mapc, insert_new);
return mapc->ops->insert_new(mapc->pop, map, key, size,
type_num, constructor, arg);
}
/*
* map_remove -- remove key value pair
*/
PMEMoid
map_remove(struct map_ctx *mapc, TOID(struct map) map, uint64_t key)
{
ABORT_NOT_IMPLEMENTED(mapc, remove);
return mapc->ops->remove(mapc->pop, map, key);
}
/*
* map_remove_free -- remove and free key value pair
*/
int
map_remove_free(struct map_ctx *mapc, TOID(struct map) map, uint64_t key)
{
ABORT_NOT_IMPLEMENTED(mapc, remove_free);
return mapc->ops->remove_free(mapc->pop, map, key);
}
/*
* map_clear -- remove all key value pairs from map
*/
int
map_clear(struct map_ctx *mapc, TOID(struct map) map)
{
ABORT_NOT_IMPLEMENTED(mapc, clear);
return mapc->ops->clear(mapc->pop, map);
}
/*
* map_get -- get value of specified key
*/
PMEMoid
map_get(struct map_ctx *mapc, TOID(struct map) map, uint64_t key)
{
ABORT_NOT_IMPLEMENTED(mapc, get);
return mapc->ops->get(mapc->pop, map, key);
}
/*
* map_lookup -- check if specified key exists in map
*/
int
map_lookup(struct map_ctx *mapc, TOID(struct map) map, uint64_t key)
{
ABORT_NOT_IMPLEMENTED(mapc, lookup);
return mapc->ops->lookup(mapc->pop, map, key);
}
/*
* map_foreach -- iterate through all key value pairs in a map
*/
int
map_foreach(struct map_ctx *mapc, TOID(struct map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg)
{
ABORT_NOT_IMPLEMENTED(mapc, foreach);
return mapc->ops->foreach(mapc->pop, map, cb, arg);
}
/*
* map_is_empty -- check if map is empty
*/
int
map_is_empty(struct map_ctx *mapc, TOID(struct map) map)
{
ABORT_NOT_IMPLEMENTED(mapc, is_empty);
return mapc->ops->is_empty(mapc->pop, map);
}
/*
* map_count -- get number of key value pairs in map
*/
size_t
map_count(struct map_ctx *mapc, TOID(struct map) map)
{
ABORT_NOT_IMPLEMENTED(mapc, count);
return mapc->ops->count(mapc->pop, map);
}
/*
* map_cmd -- execute command specific for map type
*/
int
map_cmd(struct map_ctx *mapc, TOID(struct map) map, unsigned cmd, uint64_t arg)
{
ABORT_NOT_IMPLEMENTED(mapc, cmd);
return mapc->ops->cmd(mapc->pop, map, cmd, arg);
}
| 5,715 | 23.532189 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/map_hashmap_atomic.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map_hashmap_atomic.c -- common interface for maps
*/
#include <map.h>
#include <hashmap_atomic.h>
#include "map_hashmap_atomic.h"
/*
* map_hm_atomic_check -- wrapper for hm_atomic_check
*/
static int
map_hm_atomic_check(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct hashmap_atomic) hashmap_atomic;
TOID_ASSIGN(hashmap_atomic, map.oid);
return hm_atomic_check(pop, hashmap_atomic);
}
/*
* map_hm_atomic_count -- wrapper for hm_atomic_count
*/
static size_t
map_hm_atomic_count(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct hashmap_atomic) hashmap_atomic;
TOID_ASSIGN(hashmap_atomic, map.oid);
return hm_atomic_count(pop, hashmap_atomic);
}
/*
* map_hm_atomic_init -- wrapper for hm_atomic_init
*/
static int
map_hm_atomic_init(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct hashmap_atomic) hashmap_atomic;
TOID_ASSIGN(hashmap_atomic, map.oid);
return hm_atomic_init(pop, hashmap_atomic);
}
/*
* map_hm_atomic_new -- wrapper for hm_atomic_create
*/
static int
map_hm_atomic_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg)
{
TOID(struct hashmap_atomic) *hashmap_atomic =
(TOID(struct hashmap_atomic) *)map;
return hm_atomic_create(pop, hashmap_atomic, arg);
}
/*
* map_hm_atomic_insert -- wrapper for hm_atomic_insert
*/
static int
map_hm_atomic_insert(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value)
{
TOID(struct hashmap_atomic) hashmap_atomic;
TOID_ASSIGN(hashmap_atomic, map.oid);
return hm_atomic_insert(pop, hashmap_atomic, key, value);
}
/*
* map_hm_atomic_remove -- wrapper for hm_atomic_remove
*/
static PMEMoid
map_hm_atomic_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct hashmap_atomic) hashmap_atomic;
TOID_ASSIGN(hashmap_atomic, map.oid);
return hm_atomic_remove(pop, hashmap_atomic, key);
}
/*
* map_hm_atomic_get -- wrapper for hm_atomic_get
*/
static PMEMoid
map_hm_atomic_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct hashmap_atomic) hashmap_atomic;
TOID_ASSIGN(hashmap_atomic, map.oid);
return hm_atomic_get(pop, hashmap_atomic, key);
}
/*
* map_hm_atomic_lookup -- wrapper for hm_atomic_lookup
*/
static int
map_hm_atomic_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct hashmap_atomic) hashmap_atomic;
TOID_ASSIGN(hashmap_atomic, map.oid);
return hm_atomic_lookup(pop, hashmap_atomic, key);
}
/*
* map_hm_atomic_foreach -- wrapper for hm_atomic_foreach
*/
static int
map_hm_atomic_foreach(PMEMobjpool *pop, TOID(struct map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg)
{
TOID(struct hashmap_atomic) hashmap_atomic;
TOID_ASSIGN(hashmap_atomic, map.oid);
return hm_atomic_foreach(pop, hashmap_atomic, cb, arg);
}
/*
* map_hm_atomic_cmd -- wrapper for hm_atomic_cmd
*/
static int
map_hm_atomic_cmd(PMEMobjpool *pop, TOID(struct map) map,
unsigned cmd, uint64_t arg)
{
TOID(struct hashmap_atomic) hashmap_atomic;
TOID_ASSIGN(hashmap_atomic, map.oid);
return hm_atomic_cmd(pop, hashmap_atomic, cmd, arg);
}
struct map_ops hashmap_atomic_ops = {
/* .check = */ map_hm_atomic_check,
/* .create = */ map_hm_atomic_create,
/* .destroy = */ NULL,
/* .init = */ map_hm_atomic_init,
/* .insert = */ map_hm_atomic_insert,
/* .insert_new = */ NULL,
/* .remove = */ map_hm_atomic_remove,
/* .remove_free = */ NULL,
/* .clear = */ NULL,
/* .get = */ map_hm_atomic_get,
/* .lookup = */ map_hm_atomic_lookup,
/* .foreach = */ map_hm_atomic_foreach,
/* .is_empty = */ NULL,
/* .count = */ map_hm_atomic_count,
/* .cmd = */ map_hm_atomic_cmd,
};
| 5,208 | 27.464481 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/kv_protocol.h
|
/*
* Copyright 2015-2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* kv_protocol.h -- kv store text protocol
*/
#ifndef KV_PROTOCOL_H
#define KV_PROTOCOL_H
#include <stdint.h>
#define MAX_KEY_LEN 255
/*
* All client messages must start with a valid message token and be terminated
* by a newline character ('\n'). The message parser is case-sensitive.
*
* Server responds with newline terminated string literals.
* If invalid message token is received RESP_MSG_UNKNOWN is sent.
*/
enum kv_cmsg {
/*
* INSERT client message
* Syntax: INSERT [key] [value]\n
*
* The key is limited to 255 characters, the size of a value is limited
* by the pmemobj maximum allocation size (~16 gigabytes).
*
* Operation adds a new key value pair to the map.
* Returns RESP_MSG_SUCCESS if successful or RESP_MSG_FAIL otherwise.
*/
CMSG_INSERT,
/*
* REMOVE client message
* Syntax: REMOVE [key]\n
*
* Operation removes a key value pair from the map.
* Returns RESP_MSG_SUCCESS if successful or RESP_MSG_FAIL otherwise.
*/
CMSG_REMOVE,
/*
* GET client message
* Syntax: GET [key]\n
*
* Operation retrieves a key value pair from the map.
* Returns the value if found or RESP_MSG_NULL otherwise.
*/
CMSG_GET,
/*
* BYE client message
* Syntax: BYE\n
*
* Operation terminates the client connection.
* No return value.
*/
CMSG_BYE,
/*
* KILL client message
* Syntax: KILL\n
*
* Operation terminates the client connection and gracefully shutdowns
* the server.
* No return value.
*/
CMSG_KILL,
MAX_CMSG
};
enum resp_messages {
RESP_MSG_SUCCESS,
RESP_MSG_FAIL,
RESP_MSG_NULL,
RESP_MSG_UNKNOWN,
MAX_RESP_MSG
};
static const char *resp_msg[MAX_RESP_MSG] = {
[RESP_MSG_SUCCESS] = "SUCCESS\n",
[RESP_MSG_FAIL] = "FAIL\n",
[RESP_MSG_NULL] = "NULL\n",
[RESP_MSG_UNKNOWN] = "UNKNOWN\n"
};
static const char *kv_cmsg_token[MAX_CMSG] = {
[CMSG_INSERT] = "INSERT",
[CMSG_REMOVE] = "REMOVE",
[CMSG_GET] = "GET",
[CMSG_BYE] = "BYE",
[CMSG_KILL] = "KILL"
};
#endif /* KV_PROTOCOL_H */
| 3,597 | 26.676923 | 78 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/map_ctree.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map_ctree.h -- common interface for maps
*/
#ifndef MAP_CTREE_H
#define MAP_CTREE_H
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
extern struct map_ops ctree_map_ops;
#define MAP_CTREE (&ctree_map_ops)
#ifdef __cplusplus
}
#endif
#endif /* MAP_CTREE_H */
| 1,881 | 33.218182 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/map/map_rbtree.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* map_rbtree.c -- common interface for maps
*/
#include <map.h>
#include <rbtree_map.h>
#include "map_rbtree.h"
/*
* map_rbtree_check -- wrapper for rbtree_map_check
*/
static int
map_rbtree_check(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_check(pop, rbtree_map);
}
/*
* map_rbtree_create -- wrapper for rbtree_map_new
*/
static int
map_rbtree_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg)
{
TOID(struct rbtree_map) *rbtree_map =
(TOID(struct rbtree_map) *)map;
return rbtree_map_create(pop, rbtree_map, arg);
}
/*
* map_rbtree_destroy -- wrapper for rbtree_map_delete
*/
static int
map_rbtree_destroy(PMEMobjpool *pop, TOID(struct map) *map)
{
TOID(struct rbtree_map) *rbtree_map =
(TOID(struct rbtree_map) *)map;
return rbtree_map_destroy(pop, rbtree_map);
}
/*
* map_rbtree_insert -- wrapper for rbtree_map_insert
*/
static int
map_rbtree_insert(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, PMEMoid value)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_insert(pop, rbtree_map, key, value);
}
/*
* map_rbtree_insert_new -- wrapper for rbtree_map_insert_new
*/
static int
map_rbtree_insert_new(PMEMobjpool *pop, TOID(struct map) map,
uint64_t key, size_t size,
unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_insert_new(pop, rbtree_map, key, size,
type_num, constructor, arg);
}
/*
* map_rbtree_remove -- wrapper for rbtree_map_remove
*/
static PMEMoid
map_rbtree_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_remove(pop, rbtree_map, key);
}
/*
* map_rbtree_remove_free -- wrapper for rbtree_map_remove_free
*/
static int
map_rbtree_remove_free(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_remove_free(pop, rbtree_map, key);
}
/*
* map_rbtree_clear -- wrapper for rbtree_map_clear
*/
static int
map_rbtree_clear(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_clear(pop, rbtree_map);
}
/*
* map_rbtree_get -- wrapper for rbtree_map_get
*/
static PMEMoid
map_rbtree_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_get(pop, rbtree_map, key);
}
/*
* map_rbtree_lookup -- wrapper for rbtree_map_lookup
*/
static int
map_rbtree_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_lookup(pop, rbtree_map, key);
}
/*
* map_rbtree_foreach -- wrapper for rbtree_map_foreach
*/
static int
map_rbtree_foreach(PMEMobjpool *pop, TOID(struct map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg),
void *arg)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_foreach(pop, rbtree_map, cb, arg);
}
/*
* map_rbtree_is_empty -- wrapper for rbtree_map_is_empty
*/
static int
map_rbtree_is_empty(PMEMobjpool *pop, TOID(struct map) map)
{
TOID(struct rbtree_map) rbtree_map;
TOID_ASSIGN(rbtree_map, map.oid);
return rbtree_map_is_empty(pop, rbtree_map);
}
struct map_ops rbtree_map_ops = {
/* .check = */ map_rbtree_check,
/* .create = */ map_rbtree_create,
/* .destroy = */ map_rbtree_destroy,
/* .init = */ NULL,
/* .insert = */ map_rbtree_insert,
/* .insert_new = */ map_rbtree_insert_new,
/* .remove = */ map_rbtree_remove,
/* .remove_free = */ map_rbtree_remove_free,
/* .clear = */ map_rbtree_clear,
/* .get = */ map_rbtree_get,
/* .lookup = */ map_rbtree_lookup,
/* .foreach = */ map_rbtree_foreach,
/* .is_empty = */ map_rbtree_is_empty,
/* .count = */ NULL,
/* .cmd = */ NULL,
};
| 5,714 | 26.085308 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/string_store_tx/writer.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* writer.c -- example from introduction part 2
*/
#include <stdio.h>
#include <string.h>
#include <libpmemobj.h>
#include "layout.h"
int
main(int argc, char *argv[])
{
if (argc != 2) {
printf("usage: %s file-name\n", argv[0]);
return 1;
}
PMEMobjpool *pop = pmemobj_create(argv[1], LAYOUT_NAME,
PMEMOBJ_MIN_POOL, 0666);
if (pop == NULL) {
perror("pmemobj_create");
return 1;
}
PMEMoid root = pmemobj_root(pop, sizeof(struct my_root));
struct my_root *rootp = pmemobj_direct(root);
char buf[MAX_BUF_LEN] = {0};
if (scanf("%9s", buf) == EOF) {
fprintf(stderr, "EOF\n");
return 1;
}
TX_BEGIN(pop) {
pmemobj_tx_add_range(root, 0, sizeof(struct my_root));
memcpy(rootp->buf, buf, strlen(buf));
} TX_END
pmemobj_close(pop);
return 0;
}
| 2,381 | 29.935065 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/string_store_tx/reader.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* reader.c -- example from introduction part 2
*/
#include <stdio.h>
#include <string.h>
#include <libpmemobj.h>
#include "layout.h"
int
main(int argc, char *argv[])
{
if (argc != 2) {
printf("usage: %s file-name\n", argv[0]);
return 1;
}
PMEMobjpool *pop = pmemobj_open(argv[1], LAYOUT_NAME);
if (pop == NULL) {
perror("pmemobj_open");
return 1;
}
PMEMoid root = pmemobj_root(pop, sizeof(struct my_root));
struct my_root *rootp = pmemobj_direct(root);
printf("%s\n", rootp->buf);
pmemobj_close(pop);
return 0;
}
| 2,146 | 31.530303 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/string_store_tx/layout.h
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* layout.h -- example from introduction part 2
*/
#define LAYOUT_NAME "intro_2"
#define MAX_BUF_LEN 10
struct my_root {
char buf[MAX_BUF_LEN];
};
| 1,757 | 38.954545 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/string_store/writer.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* writer.c -- example from introduction part 1
*/
#include <stdio.h>
#include <string.h>
#include <libpmemobj.h>
#include "layout.h"
int
main(int argc, char *argv[])
{
if (argc != 2) {
printf("usage: %s file-name\n", argv[0]);
return 1;
}
PMEMobjpool *pop = pmemobj_create(argv[1], LAYOUT_NAME,
PMEMOBJ_MIN_POOL, 0666);
if (pop == NULL) {
perror("pmemobj_create");
return 1;
}
PMEMoid root = pmemobj_root(pop, sizeof(struct my_root));
struct my_root *rootp = pmemobj_direct(root);
char buf[MAX_BUF_LEN] = {0};
if (scanf("%9s", buf) == EOF) {
fprintf(stderr, "EOF\n");
return 1;
}
rootp->len = strlen(buf);
pmemobj_persist(pop, &rootp->len, sizeof(rootp->len));
pmemobj_memcpy_persist(pop, rootp->buf, buf, rootp->len);
pmemobj_close(pop);
return 0;
}
| 2,400 | 30.181818 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/string_store/reader.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* reader.c -- example from introduction part 1
*/
#include <stdio.h>
#include <string.h>
#include <libpmemobj.h>
#include "layout.h"
int
main(int argc, char *argv[])
{
if (argc != 2) {
printf("usage: %s file-name\n", argv[0]);
return 1;
}
PMEMobjpool *pop = pmemobj_open(argv[1], LAYOUT_NAME);
if (pop == NULL) {
perror("pmemobj_open");
return 1;
}
PMEMoid root = pmemobj_root(pop, sizeof(struct my_root));
struct my_root *rootp = pmemobj_direct(root);
if (rootp->len == strlen(rootp->buf))
printf("%s\n", rootp->buf);
pmemobj_close(pop);
return 0;
}
| 2,186 | 31.641791 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/string_store/layout.h
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* layout.h -- example from introduction part 1
*/
#define LAYOUT_NAME "intro_1"
#define MAX_BUF_LEN 10
struct my_root {
size_t len;
char buf[MAX_BUF_LEN];
};
| 1,770 | 38.355556 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/tree_map/ctree_map.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ctree_map.c -- Crit-bit trie implementation
*/
#include <ex_common.h>
#include <assert.h>
#include <errno.h>
#include <stdlib.h>
#include "ctree_map.h"
#define BIT_IS_SET(n, i) (!!((n) & (1ULL << (i))))
TOID_DECLARE(struct tree_map_node, CTREE_MAP_TYPE_OFFSET + 1);
struct tree_map_entry {
uint64_t key;
PMEMoid slot;
};
struct tree_map_node {
int diff; /* most significant differing bit */
struct tree_map_entry entries[2];
};
struct ctree_map {
struct tree_map_entry root;
};
/*
* find_crit_bit -- (internal) finds the most significant differing bit
*/
static int
find_crit_bit(uint64_t lhs, uint64_t rhs)
{
return find_last_set_64(lhs ^ rhs);
}
/*
* ctree_map_create -- allocates a new crit-bit tree instance
*/
int
ctree_map_create(PMEMobjpool *pop, TOID(struct ctree_map) *map, void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(map, sizeof(*map));
*map = TX_ZNEW(struct ctree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* ctree_map_clear_node -- (internal) clears this node and its children
*/
static void
ctree_map_clear_node(PMEMoid p)
{
if (OID_IS_NULL(p))
return;
if (OID_INSTANCEOF(p, struct tree_map_node)) {
TOID(struct tree_map_node) node;
TOID_ASSIGN(node, p);
ctree_map_clear_node(D_RW(node)->entries[0].slot);
ctree_map_clear_node(D_RW(node)->entries[1].slot);
}
pmemobj_tx_free(p);
}
/*
* ctree_map_clear -- removes all elements from the map
*/
int
ctree_map_clear(PMEMobjpool *pop, TOID(struct ctree_map) map)
{
TX_BEGIN(pop) {
ctree_map_clear_node(D_RW(map)->root.slot);
TX_ADD_FIELD(map, root);
D_RW(map)->root.slot = OID_NULL;
} TX_END
return 0;
}
/*
* ctree_map_destroy -- cleanups and frees crit-bit tree instance
*/
int
ctree_map_destroy(PMEMobjpool *pop, TOID(struct ctree_map) *map)
{
int ret = 0;
TX_BEGIN(pop) {
ctree_map_clear(pop, *map);
pmemobj_tx_add_range_direct(map, sizeof(*map));
TX_FREE(*map);
*map = TOID_NULL(struct ctree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* ctree_map_insert_leaf -- (internal) inserts a new leaf at the position
*/
static void
ctree_map_insert_leaf(struct tree_map_entry *p,
struct tree_map_entry e, int diff)
{
TOID(struct tree_map_node) new_node = TX_NEW(struct tree_map_node);
D_RW(new_node)->diff = diff;
int d = BIT_IS_SET(e.key, D_RO(new_node)->diff);
/* insert the leaf at the direction based on the critical bit */
D_RW(new_node)->entries[d] = e;
/* find the appropriate position in the tree to insert the node */
TOID(struct tree_map_node) node;
while (OID_INSTANCEOF(p->slot, struct tree_map_node)) {
TOID_ASSIGN(node, p->slot);
/* the critical bits have to be sorted */
if (D_RO(node)->diff < D_RO(new_node)->diff)
break;
p = &D_RW(node)->entries[BIT_IS_SET(e.key, D_RO(node)->diff)];
}
/* insert the found destination in the other slot */
D_RW(new_node)->entries[!d] = *p;
pmemobj_tx_add_range_direct(p, sizeof(*p));
p->key = 0;
p->slot = new_node.oid;
}
/*
* ctree_map_insert_new -- allocates a new object and inserts it into the tree
*/
int
ctree_map_insert_new(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid n = pmemobj_tx_alloc(size, type_num);
constructor(pop, pmemobj_direct(n), arg);
ctree_map_insert(pop, map, key, n);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* ctree_map_insert -- inserts a new key-value pair into the map
*/
int
ctree_map_insert(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key, PMEMoid value)
{
struct tree_map_entry *p = &D_RW(map)->root;
int ret = 0;
/* descend the path until a best matching key is found */
TOID(struct tree_map_node) node;
while (!OID_IS_NULL(p->slot) &&
OID_INSTANCEOF(p->slot, struct tree_map_node)) {
TOID_ASSIGN(node, p->slot);
p = &D_RW(node)->entries[BIT_IS_SET(key, D_RW(node)->diff)];
}
struct tree_map_entry e = {key, value};
TX_BEGIN(pop) {
if (p->key == 0 || p->key == key) {
pmemobj_tx_add_range_direct(p, sizeof(*p));
*p = e;
} else {
ctree_map_insert_leaf(&D_RW(map)->root, e,
find_crit_bit(p->key, key));
}
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* ctree_map_get_leaf -- (internal) searches for a leaf of the key
*/
static struct tree_map_entry *
ctree_map_get_leaf(TOID(struct ctree_map) map, uint64_t key,
struct tree_map_entry **parent)
{
struct tree_map_entry *n = &D_RW(map)->root;
struct tree_map_entry *p = NULL;
TOID(struct tree_map_node) node;
while (!OID_IS_NULL(n->slot) &&
OID_INSTANCEOF(n->slot, struct tree_map_node)) {
TOID_ASSIGN(node, n->slot);
p = n;
n = &D_RW(node)->entries[BIT_IS_SET(key, D_RW(node)->diff)];
}
if (n->key == key) {
if (parent)
*parent = p;
return n;
}
return NULL;
}
/*
* ctree_map_remove_free -- removes and frees an object from the tree
*/
int
ctree_map_remove_free(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid val = ctree_map_remove(pop, map, key);
pmemobj_tx_free(val);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* ctree_map_remove -- removes key-value pair from the map
*/
PMEMoid
ctree_map_remove(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key)
{
struct tree_map_entry *parent = NULL;
struct tree_map_entry *leaf = ctree_map_get_leaf(map, key, &parent);
if (leaf == NULL)
return OID_NULL;
PMEMoid ret = leaf->slot;
if (parent == NULL) { /* root */
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(leaf, sizeof(*leaf));
leaf->key = 0;
leaf->slot = OID_NULL;
} TX_END
} else {
/*
* In this situation:
* parent
* / \
* LEFT RIGHT
* there's no point in leaving the parent internal node
* so it's swapped with the remaining node and then also freed.
*/
TX_BEGIN(pop) {
struct tree_map_entry *dest = parent;
TOID(struct tree_map_node) node;
TOID_ASSIGN(node, parent->slot);
pmemobj_tx_add_range_direct(dest, sizeof(*dest));
*dest = D_RW(node)->entries[
D_RO(node)->entries[0].key == leaf->key];
TX_FREE(node);
} TX_END
}
return ret;
}
/*
* ctree_map_get -- searches for a value of the key
*/
PMEMoid
ctree_map_get(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key)
{
struct tree_map_entry *entry = ctree_map_get_leaf(map, key, NULL);
return entry ? entry->slot : OID_NULL;
}
/*
* ctree_map_lookup -- searches if a key exists
*/
int
ctree_map_lookup(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key)
{
struct tree_map_entry *entry = ctree_map_get_leaf(map, key, NULL);
return entry != NULL;
}
/*
* ctree_map_foreach_node -- (internal) recursively traverses tree
*/
static int
ctree_map_foreach_node(struct tree_map_entry e,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
int ret = 0;
if (OID_INSTANCEOF(e.slot, struct tree_map_node)) {
TOID(struct tree_map_node) node;
TOID_ASSIGN(node, e.slot);
if (ctree_map_foreach_node(D_RO(node)->entries[0],
cb, arg) == 0)
ctree_map_foreach_node(D_RO(node)->entries[1], cb, arg);
} else { /* leaf */
ret = cb(e.key, e.slot, arg);
}
return ret;
}
/*
* ctree_map_foreach -- initiates recursive traversal
*/
int
ctree_map_foreach(PMEMobjpool *pop, TOID(struct ctree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
if (OID_IS_NULL(D_RO(map)->root.slot))
return 0;
return ctree_map_foreach_node(D_RO(map)->root, cb, arg);
}
/*
* ctree_map_is_empty -- checks whether the tree map is empty
*/
int
ctree_map_is_empty(PMEMobjpool *pop, TOID(struct ctree_map) map)
{
return D_RO(map)->root.key == 0;
}
/*
* ctree_map_check -- check if given persistent object is a tree map
*/
int
ctree_map_check(PMEMobjpool *pop, TOID(struct ctree_map) map)
{
return TOID_IS_NULL(map) || !TOID_VALID(map);
}
| 9,557 | 22.835411 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/tree_map/ctree_map.h
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ctree_map.h -- TreeMap sorted collection implementation
*/
#ifndef CTREE_MAP_H
#define CTREE_MAP_H
#include <libpmemobj.h>
#ifndef CTREE_MAP_TYPE_OFFSET
#define CTREE_MAP_TYPE_OFFSET 1008
#endif
struct ctree_map;
TOID_DECLARE(struct ctree_map, CTREE_MAP_TYPE_OFFSET + 0);
int ctree_map_check(PMEMobjpool *pop, TOID(struct ctree_map) map);
int ctree_map_create(PMEMobjpool *pop, TOID(struct ctree_map) *map, void *arg);
int ctree_map_destroy(PMEMobjpool *pop, TOID(struct ctree_map) *map);
int ctree_map_insert(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key, PMEMoid value);
int ctree_map_insert_new(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid ctree_map_remove(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key);
int ctree_map_remove_free(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key);
int ctree_map_clear(PMEMobjpool *pop, TOID(struct ctree_map) map);
PMEMoid ctree_map_get(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key);
int ctree_map_lookup(PMEMobjpool *pop, TOID(struct ctree_map) map,
uint64_t key);
int ctree_map_foreach(PMEMobjpool *pop, TOID(struct ctree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
int ctree_map_is_empty(PMEMobjpool *pop, TOID(struct ctree_map) map);
#endif /* CTREE_MAP_H */
| 3,038 | 41.208333 | 79 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/tree_map/rtree_map.c
|
/*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rtree_map.c -- implementation of rtree
*/
#include <ex_common.h>
#include <assert.h>
#include <errno.h>
#include <stdlib.h>
#include <stdbool.h>
#include "rtree_map.h"
TOID_DECLARE(struct tree_map_node, RTREE_MAP_TYPE_OFFSET + 1);
/* Good values: 0x10 an 0x100, but implementation is bound to 0x100 */
#ifndef ALPHABET_SIZE
#define ALPHABET_SIZE 0x100
#endif
struct tree_map_node {
TOID(struct tree_map_node) slots[ALPHABET_SIZE];
unsigned has_value;
PMEMoid value;
uint64_t key_size;
unsigned char key[];
};
struct rtree_map {
TOID(struct tree_map_node) root;
};
/*
* rtree_map_create -- allocates a new rtree instance
*/
int
rtree_map_create(PMEMobjpool *pop, TOID(struct rtree_map) *map, void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
TX_ADD_DIRECT(map);
*map = TX_ZNEW(struct rtree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rtree_map_clear_node -- (internal) removes all elements from the node
*/
static void
rtree_map_clear_node(TOID(struct tree_map_node) node)
{
for (unsigned i = 0; i < ALPHABET_SIZE; i++) {
rtree_map_clear_node(D_RO(node)->slots[i]);
}
pmemobj_tx_add_range(node.oid, 0,
sizeof(struct tree_map_node) + D_RO(node)->key_size);
TX_FREE(node);
}
/*
* rtree_map_clear -- removes all elements from the map
*/
int
rtree_map_clear(PMEMobjpool *pop, TOID(struct rtree_map) map)
{
int ret = 0;
TX_BEGIN(pop) {
rtree_map_clear_node(D_RO(map)->root);
TX_ADD_FIELD(map, root);
D_RW(map)->root = TOID_NULL(struct tree_map_node);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rtree_map_destroy -- cleanups and frees rtree instance
*/
int
rtree_map_destroy(PMEMobjpool *pop, TOID(struct rtree_map) *map)
{
int ret = 0;
TX_BEGIN(pop) {
rtree_map_clear(pop, *map);
TX_ADD_DIRECT(map);
TX_FREE(*map);
*map = TOID_NULL(struct rtree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rtree_new_node -- (internal) inserts a node into an empty map
*/
static TOID(struct tree_map_node)
rtree_new_node(const unsigned char *key, uint64_t key_size,
PMEMoid value, unsigned has_value)
{
TOID(struct tree_map_node) node;
node = TX_ZALLOC(struct tree_map_node,
sizeof(struct tree_map_node) + key_size);
/*
* !!! Here should be: D_RO(node)->value
* ... because we don't change map
*/
D_RW(node)->value = value;
D_RW(node)->has_value = has_value;
D_RW(node)->key_size = key_size;
memcpy(D_RW(node)->key, key, key_size);
return node;
}
/*
* rtree_map_insert_empty -- (internal) inserts a node into an empty map
*/
static void
rtree_map_insert_empty(TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size, PMEMoid value)
{
TX_ADD_FIELD(map, root);
D_RW(map)->root = rtree_new_node(key, key_size, value, 1);
}
/*
* key_comm_len -- (internal) calculate the len of common part of keys
*/
static unsigned
key_comm_len(TOID(struct tree_map_node) node,
const unsigned char *key, uint64_t key_size)
{
unsigned i;
for (i = 0;
i < MIN(key_size, D_RO(node)->key_size) &&
key[i] == D_RO(node)->key[i];
i++)
;
return i;
}
/*
* rtree_map_insert_value -- (internal) inserts a pair into a tree
*/
static void
rtree_map_insert_value(TOID(struct tree_map_node) *node,
const unsigned char *key, uint64_t key_size, PMEMoid value)
{
unsigned i;
if (TOID_IS_NULL(*node)) {
TX_ADD_DIRECT(node);
*node = rtree_new_node(key, key_size, value, 1);
return;
}
i = key_comm_len(*node, key, key_size);
if (i != D_RO(*node)->key_size) {
/* Node does not exist. Let's add. */
TOID(struct tree_map_node) orig_node = *node;
TX_ADD_DIRECT(node);
if (i != key_size) {
*node = rtree_new_node(D_RO(orig_node)->key, i,
OID_NULL, 0);
} else {
*node = rtree_new_node(D_RO(orig_node)->key, i,
value, 1);
}
D_RW(*node)->slots[D_RO(orig_node)->key[i]] = orig_node;
TX_ADD_FIELD(orig_node, key_size);
D_RW(orig_node)->key_size -= i;
pmemobj_tx_add_range_direct(D_RW(orig_node)->key,
D_RO(orig_node)->key_size);
memmove(D_RW(orig_node)->key, D_RO(orig_node)->key + i,
D_RO(orig_node)->key_size);
if (i != key_size) {
D_RW(*node)->slots[key[i]] =
rtree_new_node(key + i, key_size - i, value, 1);
}
return;
}
if (i == key_size) {
if (OID_IS_NULL(D_RO(*node)->value) || D_RO(*node)->has_value) {
/* Just replace old value with new */
TX_ADD_FIELD(*node, value);
TX_ADD_FIELD(*node, has_value);
D_RW(*node)->value = value;
D_RW(*node)->has_value = 1;
} else {
/*
* Ignore. By the fact current value should be
* removed in advance, or handled in a different way.
*/
}
} else {
/* Recurse deeply */
return rtree_map_insert_value(&D_RW(*node)->slots[key[i]],
key + i, key_size - i, value);
}
}
/*
* rtree_map_is_empty -- checks whether the tree map is empty
*/
int
rtree_map_is_empty(PMEMobjpool *pop, TOID(struct rtree_map) map)
{
return TOID_IS_NULL(D_RO(map)->root);
}
/*
* rtree_map_insert -- inserts a new key-value pair into the map
*/
int
rtree_map_insert(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size, PMEMoid value)
{
int ret = 0;
TX_BEGIN(pop) {
if (rtree_map_is_empty(pop, map)) {
rtree_map_insert_empty(map, key, key_size, value);
} else {
rtree_map_insert_value(&D_RW(map)->root,
key, key_size, value);
}
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rtree_map_insert_new -- allocates a new object and inserts it into the tree
*/
int
rtree_map_insert_new(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size,
size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid n = pmemobj_tx_alloc(size, type_num);
constructor(pop, pmemobj_direct(n), arg);
rtree_map_insert(pop, map, key, key_size, n);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* is_leaf -- (internal) check a node for zero qty of children
*/
static bool
is_leaf(TOID(struct tree_map_node) node)
{
unsigned j;
for (j = 0;
j < ALPHABET_SIZE &&
TOID_IS_NULL(D_RO(node)->slots[j]);
j++)
;
return (j == ALPHABET_SIZE);
}
/*
* has_only_one_child -- (internal) check a node for qty of children
*/
static bool
has_only_one_child(TOID(struct tree_map_node) node, unsigned *child_idx)
{
unsigned j, child_qty;
for (j = 0, child_qty = 0; j < ALPHABET_SIZE; j++)
if (!TOID_IS_NULL(D_RO(node)->slots[j])) {
child_qty++;
*child_idx = j;
}
return (1 == child_qty);
}
/*
* remove_extra_node -- (internal) remove unneeded extra node
*/
static void
remove_extra_node(TOID(struct tree_map_node) *node)
{
unsigned child_idx;
TOID(struct tree_map_node) tmp, tmp_child;
/* Our node has child with only one child. */
tmp = *node;
has_only_one_child(tmp, &child_idx);
tmp_child = D_RO(tmp)->slots[child_idx];
/*
* That child's incoming label is appended to the ours incoming label
* and the child is removed.
*/
uint64_t new_key_size = D_RO(tmp)->key_size + D_RO(tmp_child)->key_size;
unsigned char *new_key = (unsigned char *)malloc(new_key_size);
assert(new_key != NULL);
memcpy(new_key, D_RO(tmp)->key, D_RO(tmp)->key_size);
memcpy(new_key + D_RO(tmp)->key_size,
D_RO(tmp_child)->key,
D_RO(tmp_child)->key_size);
TX_ADD_DIRECT(node);
*node = rtree_new_node(new_key, new_key_size,
D_RO(tmp_child)->value, D_RO(tmp_child)->has_value);
free(new_key);
TX_FREE(tmp);
memcpy(D_RW(*node)->slots,
D_RO(tmp_child)->slots,
sizeof(D_RO(tmp_child)->slots));
TX_FREE(tmp_child);
}
/*
* rtree_map_remove_node -- (internal) removes node from tree
*/
static PMEMoid
rtree_map_remove_node(TOID(struct rtree_map) map,
TOID(struct tree_map_node) *node,
const unsigned char *key, uint64_t key_size,
bool *check_for_child)
{
bool c4c;
unsigned i, child_idx;
PMEMoid ret = OID_NULL;
*check_for_child = false;
if (TOID_IS_NULL(*node))
return OID_NULL;
i = key_comm_len(*node, key, key_size);
if (i != D_RO(*node)->key_size)
/* Node does not exist */
return OID_NULL;
if (i == key_size) {
if (0 == D_RO(*node)->has_value)
return OID_NULL;
/* Node is found */
ret = D_RO(*node)->value;
/* delete node from tree */
TX_ADD_FIELD((*node), value);
TX_ADD_FIELD((*node), has_value);
D_RW(*node)->value = OID_NULL;
D_RW(*node)->has_value = 0;
if (is_leaf(*node)) {
pmemobj_tx_add_range(node->oid, 0,
sizeof(*node) + D_RO(*node)->key_size);
TX_FREE(*node);
TX_ADD_DIRECT(node);
(*node) = TOID_NULL(struct tree_map_node);
}
return ret;
}
/* Recurse deeply */
ret = rtree_map_remove_node(map,
&D_RW(*node)->slots[key[i]],
key + i, key_size - i,
&c4c);
if (c4c) {
/* Our node has child with only one child. Remove. */
remove_extra_node(&D_RW(*node)->slots[key[i]]);
return ret;
}
if (has_only_one_child(*node, &child_idx) &&
(0 == D_RO(*node)->has_value)) {
*check_for_child = true;
}
return ret;
}
/*
* rtree_map_remove -- removes key-value pair from the map
*/
PMEMoid
rtree_map_remove(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size)
{
PMEMoid ret = OID_NULL;
bool check_for_child;
if (TOID_IS_NULL(map))
return OID_NULL;
TX_BEGIN(pop) {
ret = rtree_map_remove_node(map,
&D_RW(map)->root, key, key_size,
&check_for_child);
if (check_for_child) {
/* Our root node has only one child. Remove. */
remove_extra_node(&D_RW(map)->root);
}
} TX_END
return ret;
}
/*
* rtree_map_remove_free -- removes and frees an object from the tree
*/
int
rtree_map_remove_free(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size)
{
int ret = 0;
if (TOID_IS_NULL(map))
return 1;
TX_BEGIN(pop) {
pmemobj_tx_free(rtree_map_remove(pop, map, key, key_size));
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rtree_map_get_in_node -- (internal) searches for a value in the node
*/
static PMEMoid
rtree_map_get_in_node(TOID(struct tree_map_node) node,
const unsigned char *key, uint64_t key_size)
{
unsigned i;
if (TOID_IS_NULL(node))
return OID_NULL;
i = key_comm_len(node, key, key_size);
if (i != D_RO(node)->key_size)
/* Node does not exist */
return OID_NULL;
if (i == key_size) {
/* Node is found */
return D_RO(node)->value;
} else {
/* Recurse deeply */
return rtree_map_get_in_node(D_RO(node)->slots[key[i]],
key + i, key_size - i);
}
}
/*
* rtree_map_get -- searches for a value of the key
*/
PMEMoid
rtree_map_get(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size)
{
if (TOID_IS_NULL(D_RO(map)->root))
return OID_NULL;
return rtree_map_get_in_node(D_RO(map)->root, key, key_size);
}
/*
* rtree_map_lookup_in_node -- (internal) searches for key if exists
*/
static int
rtree_map_lookup_in_node(TOID(struct tree_map_node) node,
const unsigned char *key, uint64_t key_size)
{
unsigned i;
if (TOID_IS_NULL(node))
return 0;
i = key_comm_len(node, key, key_size);
if (i != D_RO(node)->key_size)
/* Node does not exist */
return 0;
if (i == key_size) {
/* Node is found */
return 1;
}
/* Recurse deeply */
return rtree_map_lookup_in_node(D_RO(node)->slots[key[i]],
key + i, key_size - i);
}
/*
* rtree_map_lookup -- searches if key exists
*/
int
rtree_map_lookup(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size)
{
if (TOID_IS_NULL(D_RO(map)->root))
return 0;
return rtree_map_lookup_in_node(D_RO(map)->root, key, key_size);
}
/*
* rtree_map_foreach_node -- (internal) recursively traverses tree
*/
static int
rtree_map_foreach_node(const TOID(struct tree_map_node) node,
int (*cb)(const unsigned char *key, uint64_t key_size,
PMEMoid, void *arg),
void *arg)
{
unsigned i;
if (TOID_IS_NULL(node))
return 0;
for (i = 0; i < ALPHABET_SIZE; i++) {
if (rtree_map_foreach_node(D_RO(node)->slots[i], cb, arg) != 0)
return 1;
}
if (NULL != cb) {
if (cb(D_RO(node)->key, D_RO(node)->key_size,
D_RO(node)->value, arg) != 0)
return 1;
}
return 0;
}
/*
* rtree_map_foreach -- initiates recursive traversal
*/
int
rtree_map_foreach(PMEMobjpool *pop, TOID(struct rtree_map) map,
int (*cb)(const unsigned char *key, uint64_t key_size,
PMEMoid value, void *arg),
void *arg)
{
return rtree_map_foreach_node(D_RO(map)->root, cb, arg);
}
/*
* ctree_map_check -- check if given persistent object is a tree map
*/
int
rtree_map_check(PMEMobjpool *pop, TOID(struct rtree_map) map)
{
return TOID_IS_NULL(map) || !TOID_VALID(map);
}
| 14,236 | 21.420472 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/tree_map/btree_map.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* btree_map.c -- textbook implementation of btree /w preemptive splitting
*/
#include <assert.h>
#include <errno.h>
#include <stdio.h>
#include "btree_map.h"
TOID_DECLARE(struct tree_map_node, BTREE_MAP_TYPE_OFFSET + 1);
#define BTREE_ORDER 8 /* can't be odd */
#define BTREE_MIN ((BTREE_ORDER / 2) - 1) /* min number of keys per node */
struct tree_map_node_item {
uint64_t key;
PMEMoid value;
};
struct tree_map_node {
int n; /* number of occupied slots */
struct tree_map_node_item items[BTREE_ORDER - 1];
TOID(struct tree_map_node) slots[BTREE_ORDER];
};
struct btree_map {
TOID(struct tree_map_node) root;
};
/*
* set_empty_item -- (internal) sets null to the item
*/
static void
set_empty_item(struct tree_map_node_item *item)
{
item->key = 0;
item->value = OID_NULL;
}
/*
* btree_map_create -- allocates a new btree instance
*/
int
btree_map_create(PMEMobjpool *pop, TOID(struct btree_map) *map, void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(map, sizeof(*map));
*map = TX_ZNEW(struct btree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* btree_map_clear_node -- (internal) removes all elements from the node
*/
static void
btree_map_clear_node(TOID(struct tree_map_node) node)
{
for (int i = 0; i < D_RO(node)->n; ++i) {
btree_map_clear_node(D_RO(node)->slots[i]);
}
TX_FREE(node);
}
/*
* btree_map_clear -- removes all elements from the map
*/
int
btree_map_clear(PMEMobjpool *pop, TOID(struct btree_map) map)
{
int ret = 0;
TX_BEGIN(pop) {
btree_map_clear_node(D_RO(map)->root);
TX_ADD_FIELD(map, root);
D_RW(map)->root = TOID_NULL(struct tree_map_node);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* btree_map_destroy -- cleanups and frees btree instance
*/
int
btree_map_destroy(PMEMobjpool *pop, TOID(struct btree_map) *map)
{
int ret = 0;
TX_BEGIN(pop) {
btree_map_clear(pop, *map);
pmemobj_tx_add_range_direct(map, sizeof(*map));
TX_FREE(*map);
*map = TOID_NULL(struct btree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* btree_map_insert_item_at -- (internal) inserts an item at position
*/
static void
btree_map_insert_item_at(TOID(struct tree_map_node) node, int pos,
struct tree_map_node_item item)
{
D_RW(node)->items[pos] = item;
D_RW(node)->n += 1;
}
/*
* btree_map_insert_empty -- (internal) inserts an item into an empty node
*/
static void
btree_map_insert_empty(TOID(struct btree_map) map,
struct tree_map_node_item item)
{
TX_ADD_FIELD(map, root);
D_RW(map)->root = TX_ZNEW(struct tree_map_node);
btree_map_insert_item_at(D_RO(map)->root, 0, item);
}
/*
* btree_map_insert_node -- (internal) inserts and makes space for new node
*/
static void
btree_map_insert_node(TOID(struct tree_map_node) node, int p,
struct tree_map_node_item item,
TOID(struct tree_map_node) left, TOID(struct tree_map_node) right)
{
TX_ADD(node);
if (D_RO(node)->items[p].key != 0) { /* move all existing data */
memmove(&D_RW(node)->items[p + 1], &D_RW(node)->items[p],
sizeof(struct tree_map_node_item) * ((BTREE_ORDER - 2 - p)));
memmove(&D_RW(node)->slots[p + 1], &D_RW(node)->slots[p],
sizeof(TOID(struct tree_map_node)) * ((BTREE_ORDER - 1 - p)));
}
D_RW(node)->slots[p] = left;
D_RW(node)->slots[p + 1] = right;
btree_map_insert_item_at(node, p, item);
}
/*
* btree_map_create_split_node -- (internal) splits a node into two
*/
static TOID(struct tree_map_node)
btree_map_create_split_node(TOID(struct tree_map_node) node,
struct tree_map_node_item *m)
{
TOID(struct tree_map_node) right = TX_ZNEW(struct tree_map_node);
int c = (BTREE_ORDER / 2);
*m = D_RO(node)->items[c - 1]; /* select median item */
TX_ADD(node);
set_empty_item(&D_RW(node)->items[c - 1]);
/* move everything right side of median to the new node */
for (int i = c; i < BTREE_ORDER; ++i) {
if (i != BTREE_ORDER - 1) {
D_RW(right)->items[D_RW(right)->n++] =
D_RO(node)->items[i];
set_empty_item(&D_RW(node)->items[i]);
}
D_RW(right)->slots[i - c] = D_RO(node)->slots[i];
D_RW(node)->slots[i] = TOID_NULL(struct tree_map_node);
}
D_RW(node)->n = c - 1;
return right;
}
/*
* btree_map_find_dest_node -- (internal) finds a place to insert the new key at
*/
static TOID(struct tree_map_node)
btree_map_find_dest_node(TOID(struct btree_map) map,
TOID(struct tree_map_node) n, TOID(struct tree_map_node) parent,
uint64_t key, int *p)
{
if (D_RO(n)->n == BTREE_ORDER - 1) { /* node is full, perform a split */
struct tree_map_node_item m;
TOID(struct tree_map_node) right =
btree_map_create_split_node(n, &m);
if (!TOID_IS_NULL(parent)) {
btree_map_insert_node(parent, *p, m, n, right);
if (key > m.key) /* select node to continue search */
n = right;
} else { /* replacing root node, the tree grows in height */
TOID(struct tree_map_node) up =
TX_ZNEW(struct tree_map_node);
D_RW(up)->n = 1;
D_RW(up)->items[0] = m;
D_RW(up)->slots[0] = n;
D_RW(up)->slots[1] = right;
TX_ADD_FIELD(map, root);
D_RW(map)->root = up;
n = up;
}
}
int i;
for (i = 0; i < BTREE_ORDER - 1; ++i) {
*p = i;
/*
* The key either fits somewhere in the middle or at the
* right edge of the node.
*/
if (D_RO(n)->n == i || D_RO(n)->items[i].key > key) {
return TOID_IS_NULL(D_RO(n)->slots[i]) ? n :
btree_map_find_dest_node(map,
D_RO(n)->slots[i], n, key, p);
}
}
/*
* The key is bigger than the last node element, go one level deeper
* in the rightmost child.
*/
return btree_map_find_dest_node(map, D_RO(n)->slots[i], n, key, p);
}
/*
* btree_map_insert_item -- (internal) inserts and makes space for new item
*/
static void
btree_map_insert_item(TOID(struct tree_map_node) node, int p,
struct tree_map_node_item item)
{
TX_ADD(node);
if (D_RO(node)->items[p].key != 0) {
memmove(&D_RW(node)->items[p + 1], &D_RW(node)->items[p],
sizeof(struct tree_map_node_item) * ((BTREE_ORDER - 2 - p)));
}
btree_map_insert_item_at(node, p, item);
}
/*
* btree_map_is_empty -- checks whether the tree map is empty
*/
int
btree_map_is_empty(PMEMobjpool *pop, TOID(struct btree_map) map)
{
return TOID_IS_NULL(D_RO(map)->root) || D_RO(D_RO(map)->root)->n == 0;
}
/*
* btree_map_insert -- inserts a new key-value pair into the map
*/
int
btree_map_insert(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key, PMEMoid value)
{
struct tree_map_node_item item = {key, value};
TX_BEGIN(pop) {
if (btree_map_is_empty(pop, map)) {
btree_map_insert_empty(map, item);
} else {
int p; /* position at the dest node to insert */
TOID(struct tree_map_node) parent =
TOID_NULL(struct tree_map_node);
TOID(struct tree_map_node) dest =
btree_map_find_dest_node(map, D_RW(map)->root,
parent, key, &p);
btree_map_insert_item(dest, p, item);
}
} TX_END
return 0;
}
/*
* btree_map_rotate_right -- (internal) takes one element from right sibling
*/
static void
btree_map_rotate_right(TOID(struct tree_map_node) rsb,
TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
/* move the separator from parent to the deficient node */
struct tree_map_node_item sep = D_RO(parent)->items[p];
btree_map_insert_item(node, D_RO(node)->n, sep);
/* the first element of the right sibling is the new separator */
TX_ADD_FIELD(parent, items[p]);
D_RW(parent)->items[p] = D_RO(rsb)->items[0];
/* the nodes are not necessarily leafs, so copy also the slot */
TX_ADD_FIELD(node, slots[D_RO(node)->n]);
D_RW(node)->slots[D_RO(node)->n] = D_RO(rsb)->slots[0];
TX_ADD(rsb);
D_RW(rsb)->n -= 1; /* it loses one element, but still > min */
/* move all existing elements back by one array slot */
memmove(D_RW(rsb)->items, D_RO(rsb)->items + 1,
sizeof(struct tree_map_node_item) * (D_RO(rsb)->n));
memmove(D_RW(rsb)->slots, D_RO(rsb)->slots + 1,
sizeof(TOID(struct tree_map_node)) * (D_RO(rsb)->n + 1));
}
/*
* btree_map_rotate_left -- (internal) takes one element from left sibling
*/
static void
btree_map_rotate_left(TOID(struct tree_map_node) lsb,
TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
/* move the separator from parent to the deficient node */
struct tree_map_node_item sep = D_RO(parent)->items[p - 1];
btree_map_insert_item(node, 0, sep);
/* the last element of the left sibling is the new separator */
TX_ADD_FIELD(parent, items[p - 1]);
D_RW(parent)->items[p - 1] = D_RO(lsb)->items[D_RO(lsb)->n - 1];
/* rotate the node children */
memmove(D_RW(node)->slots + 1, D_RO(node)->slots,
sizeof(TOID(struct tree_map_node)) * (D_RO(node)->n));
/* the nodes are not necessarily leafs, so copy also the slot */
D_RW(node)->slots[0] = D_RO(lsb)->slots[D_RO(lsb)->n];
TX_ADD_FIELD(lsb, n);
D_RW(lsb)->n -= 1; /* it loses one element, but still > min */
}
/*
* btree_map_merge -- (internal) merges node and right sibling
*/
static void
btree_map_merge(TOID(struct btree_map) map, TOID(struct tree_map_node) rn,
TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
struct tree_map_node_item sep = D_RO(parent)->items[p];
TX_ADD(node);
/* add separator to the deficient node */
D_RW(node)->items[D_RW(node)->n++] = sep;
/* copy right sibling data to node */
memcpy(&D_RW(node)->items[D_RO(node)->n], D_RO(rn)->items,
sizeof(struct tree_map_node_item) * D_RO(rn)->n);
memcpy(&D_RW(node)->slots[D_RO(node)->n], D_RO(rn)->slots,
sizeof(TOID(struct tree_map_node)) * (D_RO(rn)->n + 1));
D_RW(node)->n += D_RO(rn)->n;
TX_FREE(rn); /* right node is now empty */
TX_ADD(parent);
D_RW(parent)->n -= 1;
/* move everything to the right of the separator by one array slot */
memmove(D_RW(parent)->items + p, D_RW(parent)->items + p + 1,
sizeof(struct tree_map_node_item) * (D_RO(parent)->n - p));
memmove(D_RW(parent)->slots + p + 1, D_RW(parent)->slots + p + 2,
sizeof(TOID(struct tree_map_node)) * (D_RO(parent)->n - p + 1));
/* if the parent is empty then the tree shrinks in height */
if (D_RO(parent)->n == 0 && TOID_EQUALS(parent, D_RO(map)->root)) {
TX_ADD(map);
TX_FREE(D_RO(map)->root);
D_RW(map)->root = node;
}
}
/*
* btree_map_rebalance -- (internal) performs tree rebalance
*/
static void
btree_map_rebalance(TOID(struct btree_map) map, TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
TOID(struct tree_map_node) rsb = p >= D_RO(parent)->n ?
TOID_NULL(struct tree_map_node) : D_RO(parent)->slots[p + 1];
TOID(struct tree_map_node) lsb = p == 0 ?
TOID_NULL(struct tree_map_node) : D_RO(parent)->slots[p - 1];
if (!TOID_IS_NULL(rsb) && D_RO(rsb)->n > BTREE_MIN)
btree_map_rotate_right(rsb, node, parent, p);
else if (!TOID_IS_NULL(lsb) && D_RO(lsb)->n > BTREE_MIN)
btree_map_rotate_left(lsb, node, parent, p);
else if (TOID_IS_NULL(rsb)) /* always merge with rightmost node */
btree_map_merge(map, node, lsb, parent, p - 1);
else
btree_map_merge(map, rsb, node, parent, p);
}
/*
* btree_map_get_leftmost_leaf -- (internal) searches for the successor
*/
static TOID(struct tree_map_node)
btree_map_get_leftmost_leaf(TOID(struct btree_map) map,
TOID(struct tree_map_node) n, TOID(struct tree_map_node) *p)
{
if (TOID_IS_NULL(D_RO(n)->slots[0]))
return n;
*p = n;
return btree_map_get_leftmost_leaf(map, D_RO(n)->slots[0], p);
}
/*
* btree_map_remove_from_node -- (internal) removes element from node
*/
static void
btree_map_remove_from_node(TOID(struct btree_map) map,
TOID(struct tree_map_node) node,
TOID(struct tree_map_node) parent, int p)
{
if (TOID_IS_NULL(D_RO(node)->slots[0])) { /* leaf */
TX_ADD(node);
if (D_RO(node)->n == 1 || p == BTREE_ORDER - 2) {
set_empty_item(&D_RW(node)->items[p]);
} else if (D_RO(node)->n != 1) {
memmove(&D_RW(node)->items[p],
&D_RW(node)->items[p + 1],
sizeof(struct tree_map_node_item) *
(D_RO(node)->n - p));
}
D_RW(node)->n -= 1;
return;
}
/* can't delete from non-leaf nodes, remove successor */
TOID(struct tree_map_node) rchild = D_RW(node)->slots[p + 1];
TOID(struct tree_map_node) lp = node;
TOID(struct tree_map_node) lm =
btree_map_get_leftmost_leaf(map, rchild, &lp);
TX_ADD_FIELD(node, items[p]);
D_RW(node)->items[p] = D_RO(lm)->items[0];
btree_map_remove_from_node(map, lm, lp, 0);
if (D_RO(lm)->n < BTREE_MIN) /* right child can be deficient now */
btree_map_rebalance(map, lm, lp,
TOID_EQUALS(lp, node) ? p + 1 : 0);
}
#define NODE_CONTAINS_ITEM(_n, _i, _k)\
((_i) != D_RO(_n)->n && D_RO(_n)->items[_i].key == (_k))
#define NODE_CHILD_CAN_CONTAIN_ITEM(_n, _i, _k)\
((_i) == D_RO(_n)->n || D_RO(_n)->items[_i].key > (_k)) &&\
!TOID_IS_NULL(D_RO(_n)->slots[_i])
/*
* btree_map_remove_item -- (internal) removes item from node
*/
static PMEMoid
btree_map_remove_item(TOID(struct btree_map) map,
TOID(struct tree_map_node) node, TOID(struct tree_map_node) parent,
uint64_t key, int p)
{
PMEMoid ret = OID_NULL;
for (int i = 0; i <= D_RO(node)->n; ++i) {
if (NODE_CONTAINS_ITEM(node, i, key)) {
ret = D_RO(node)->items[i].value;
btree_map_remove_from_node(map, node, parent, i);
break;
} else if (NODE_CHILD_CAN_CONTAIN_ITEM(node, i, key)) {
ret = btree_map_remove_item(map, D_RO(node)->slots[i],
node, key, i);
break;
}
}
/* check for deficient nodes walking up */
if (!TOID_IS_NULL(parent) && D_RO(node)->n < BTREE_MIN)
btree_map_rebalance(map, node, parent, p);
return ret;
}
/*
* btree_map_remove -- removes key-value pair from the map
*/
PMEMoid
btree_map_remove(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key)
{
PMEMoid ret = OID_NULL;
TX_BEGIN(pop) {
ret = btree_map_remove_item(map, D_RW(map)->root,
TOID_NULL(struct tree_map_node), key, 0);
} TX_END
return ret;
}
/*
* btree_map_get_in_node -- (internal) searches for a value in the node
*/
static PMEMoid
btree_map_get_in_node(TOID(struct tree_map_node) node, uint64_t key)
{
for (int i = 0; i <= D_RO(node)->n; ++i) {
if (NODE_CONTAINS_ITEM(node, i, key))
return D_RO(node)->items[i].value;
else if (NODE_CHILD_CAN_CONTAIN_ITEM(node, i, key))
return btree_map_get_in_node(D_RO(node)->slots[i], key);
}
return OID_NULL;
}
/*
* btree_map_get -- searches for a value of the key
*/
PMEMoid
btree_map_get(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key)
{
if (TOID_IS_NULL(D_RO(map)->root))
return OID_NULL;
return btree_map_get_in_node(D_RO(map)->root, key);
}
/*
* btree_map_lookup_in_node -- (internal) searches for key if exists
*/
static int
btree_map_lookup_in_node(TOID(struct tree_map_node) node, uint64_t key)
{
for (int i = 0; i <= D_RO(node)->n; ++i) {
if (NODE_CONTAINS_ITEM(node, i, key))
return 1;
else if (NODE_CHILD_CAN_CONTAIN_ITEM(node, i, key))
return btree_map_lookup_in_node(
D_RO(node)->slots[i], key);
}
return 0;
}
/*
* btree_map_lookup -- searches if key exists
*/
int
btree_map_lookup(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key)
{
if (TOID_IS_NULL(D_RO(map)->root))
return 0;
return btree_map_lookup_in_node(D_RO(map)->root, key);
}
/*
* btree_map_foreach_node -- (internal) recursively traverses tree
*/
static int
btree_map_foreach_node(const TOID(struct tree_map_node) p,
int (*cb)(uint64_t key, PMEMoid, void *arg), void *arg)
{
if (TOID_IS_NULL(p))
return 0;
for (int i = 0; i <= D_RO(p)->n; ++i) {
if (btree_map_foreach_node(D_RO(p)->slots[i], cb, arg) != 0)
return 1;
if (i != D_RO(p)->n && D_RO(p)->items[i].key != 0) {
if (cb(D_RO(p)->items[i].key, D_RO(p)->items[i].value,
arg) != 0)
return 1;
}
}
return 0;
}
/*
* btree_map_foreach -- initiates recursive traversal
*/
int
btree_map_foreach(PMEMobjpool *pop, TOID(struct btree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
return btree_map_foreach_node(D_RO(map)->root, cb, arg);
}
/*
* ctree_map_check -- check if given persistent object is a tree map
*/
int
btree_map_check(PMEMobjpool *pop, TOID(struct btree_map) map)
{
return TOID_IS_NULL(map) || !TOID_VALID(map);
}
/*
* btree_map_insert_new -- allocates a new object and inserts it into the tree
*/
int
btree_map_insert_new(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid n = pmemobj_tx_alloc(size, type_num);
constructor(pop, pmemobj_direct(n), arg);
btree_map_insert(pop, map, key, n);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* btree_map_remove_free -- removes and frees an object from the tree
*/
int
btree_map_remove_free(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid val = btree_map_remove(pop, map, key);
pmemobj_tx_free(val);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
| 18,459 | 25.988304 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/tree_map/rtree_map.h
|
/*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rtree_map.h -- Radix TreeMap collection implementation
*/
#ifndef RTREE_MAP_H
#define RTREE_MAP_H
#include <libpmemobj.h>
#ifndef RTREE_MAP_TYPE_OFFSET
#define RTREE_MAP_TYPE_OFFSET 1020
#endif
struct rtree_map;
TOID_DECLARE(struct rtree_map, RTREE_MAP_TYPE_OFFSET + 0);
int rtree_map_check(PMEMobjpool *pop, TOID(struct rtree_map) map);
int rtree_map_create(PMEMobjpool *pop, TOID(struct rtree_map) *map, void *arg);
int rtree_map_destroy(PMEMobjpool *pop, TOID(struct rtree_map) *map);
int rtree_map_insert(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size, PMEMoid value);
int rtree_map_insert_new(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size,
size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid rtree_map_remove(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_remove_free(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_clear(PMEMobjpool *pop, TOID(struct rtree_map) map);
PMEMoid rtree_map_get(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_lookup(PMEMobjpool *pop, TOID(struct rtree_map) map,
const unsigned char *key, uint64_t key_size);
int rtree_map_foreach(PMEMobjpool *pop, TOID(struct rtree_map) map,
int (*cb)(const unsigned char *key, uint64_t key_size,
PMEMoid value, void *arg),
void *arg);
int rtree_map_is_empty(PMEMobjpool *pop, TOID(struct rtree_map) map);
#endif /* RTREE_MAP_H */
| 3,254 | 42.4 | 79 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/tree_map/rbtree_map.h
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rbtree_map.h -- TreeMap sorted collection implementation
*/
#ifndef RBTREE_MAP_H
#define RBTREE_MAP_H
#include <libpmemobj.h>
#ifndef RBTREE_MAP_TYPE_OFFSET
#define RBTREE_MAP_TYPE_OFFSET 1016
#endif
struct rbtree_map;
TOID_DECLARE(struct rbtree_map, RBTREE_MAP_TYPE_OFFSET + 0);
int rbtree_map_check(PMEMobjpool *pop, TOID(struct rbtree_map) map);
int rbtree_map_create(PMEMobjpool *pop, TOID(struct rbtree_map) *map,
void *arg);
int rbtree_map_destroy(PMEMobjpool *pop, TOID(struct rbtree_map) *map);
int rbtree_map_insert(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key, PMEMoid value);
int rbtree_map_insert_new(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid rbtree_map_remove(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_remove_free(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_clear(PMEMobjpool *pop, TOID(struct rbtree_map) map);
PMEMoid rbtree_map_get(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_lookup(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key);
int rbtree_map_foreach(PMEMobjpool *pop, TOID(struct rbtree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
int rbtree_map_is_empty(PMEMobjpool *pop, TOID(struct rbtree_map) map);
#endif /* RBTREE_MAP_H */
| 3,072 | 41.09589 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/tree_map/btree_map.h
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* btree_map.h -- TreeMap sorted collection implementation
*/
#ifndef BTREE_MAP_H
#define BTREE_MAP_H
#include <libpmemobj.h>
#ifndef BTREE_MAP_TYPE_OFFSET
#define BTREE_MAP_TYPE_OFFSET 1012
#endif
struct btree_map;
TOID_DECLARE(struct btree_map, BTREE_MAP_TYPE_OFFSET + 0);
int btree_map_check(PMEMobjpool *pop, TOID(struct btree_map) map);
int btree_map_create(PMEMobjpool *pop, TOID(struct btree_map) *map, void *arg);
int btree_map_destroy(PMEMobjpool *pop, TOID(struct btree_map) *map);
int btree_map_insert(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key, PMEMoid value);
int btree_map_insert_new(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg);
PMEMoid btree_map_remove(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_remove_free(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_clear(PMEMobjpool *pop, TOID(struct btree_map) map);
PMEMoid btree_map_get(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_lookup(PMEMobjpool *pop, TOID(struct btree_map) map,
uint64_t key);
int btree_map_foreach(PMEMobjpool *pop, TOID(struct btree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg);
int btree_map_is_empty(PMEMobjpool *pop, TOID(struct btree_map) map);
#endif /* BTREE_MAP_H */
| 3,038 | 41.208333 | 79 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj/tree_map/rbtree_map.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rbtree.c -- red-black tree implementation /w sentinel nodes
*/
#include <assert.h>
#include <errno.h>
#include "rbtree_map.h"
TOID_DECLARE(struct tree_map_node, RBTREE_MAP_TYPE_OFFSET + 1);
#define NODE_P(_n)\
D_RW(_n)->parent
#define NODE_GRANDP(_n)\
NODE_P(NODE_P(_n))
#define NODE_PARENT_AT(_n, _rbc)\
D_RW(NODE_P(_n))->slots[_rbc]
#define NODE_PARENT_RIGHT(_n)\
NODE_PARENT_AT(_n, RB_RIGHT)
#define NODE_IS(_n, _rbc)\
TOID_EQUALS(_n, NODE_PARENT_AT(_n, _rbc))
#define NODE_IS_RIGHT(_n)\
TOID_EQUALS(_n, NODE_PARENT_RIGHT(_n))
#define NODE_LOCATION(_n)\
NODE_IS_RIGHT(_n)
#define RB_FIRST(_m)\
D_RW(D_RW(_m)->root)->slots[RB_LEFT]
#define NODE_IS_NULL(_n)\
TOID_EQUALS(_n, s)
enum rb_color {
COLOR_BLACK,
COLOR_RED,
MAX_COLOR
};
enum rb_children {
RB_LEFT,
RB_RIGHT,
MAX_RB
};
struct tree_map_node {
uint64_t key;
PMEMoid value;
enum rb_color color;
TOID(struct tree_map_node) parent;
TOID(struct tree_map_node) slots[MAX_RB];
};
struct rbtree_map {
TOID(struct tree_map_node) sentinel;
TOID(struct tree_map_node) root;
};
/*
* rbtree_map_create -- allocates a new red-black tree instance
*/
int
rbtree_map_create(PMEMobjpool *pop, TOID(struct rbtree_map) *map, void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
pmemobj_tx_add_range_direct(map, sizeof(*map));
*map = TX_ZNEW(struct rbtree_map);
TOID(struct tree_map_node) s = TX_ZNEW(struct tree_map_node);
D_RW(s)->color = COLOR_BLACK;
D_RW(s)->parent = s;
D_RW(s)->slots[RB_LEFT] = s;
D_RW(s)->slots[RB_RIGHT] = s;
TOID(struct tree_map_node) r = TX_ZNEW(struct tree_map_node);
D_RW(r)->color = COLOR_BLACK;
D_RW(r)->parent = s;
D_RW(r)->slots[RB_LEFT] = s;
D_RW(r)->slots[RB_RIGHT] = s;
D_RW(*map)->sentinel = s;
D_RW(*map)->root = r;
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rbtree_map_clear_node -- (internal) clears this node and its children
*/
static void
rbtree_map_clear_node(TOID(struct rbtree_map) map, TOID(struct tree_map_node) p)
{
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
if (!NODE_IS_NULL(D_RO(p)->slots[RB_LEFT]))
rbtree_map_clear_node(map, D_RO(p)->slots[RB_LEFT]);
if (!NODE_IS_NULL(D_RO(p)->slots[RB_RIGHT]))
rbtree_map_clear_node(map, D_RO(p)->slots[RB_RIGHT]);
TX_FREE(p);
}
/*
* rbtree_map_clear -- removes all elements from the map
*/
int
rbtree_map_clear(PMEMobjpool *pop, TOID(struct rbtree_map) map)
{
TX_BEGIN(pop) {
rbtree_map_clear_node(map, D_RW(map)->root);
TX_ADD_FIELD(map, root);
TX_ADD_FIELD(map, sentinel);
TX_FREE(D_RW(map)->sentinel);
D_RW(map)->root = TOID_NULL(struct tree_map_node);
D_RW(map)->sentinel = TOID_NULL(struct tree_map_node);
} TX_END
return 0;
}
/*
* rbtree_map_destroy -- cleanups and frees red-black tree instance
*/
int
rbtree_map_destroy(PMEMobjpool *pop, TOID(struct rbtree_map) *map)
{
int ret = 0;
TX_BEGIN(pop) {
rbtree_map_clear(pop, *map);
pmemobj_tx_add_range_direct(map, sizeof(*map));
TX_FREE(*map);
*map = TOID_NULL(struct rbtree_map);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rbtree_map_rotate -- (internal) performs a left/right rotation around a node
*/
static void
rbtree_map_rotate(TOID(struct rbtree_map) map,
TOID(struct tree_map_node) node, enum rb_children c)
{
TOID(struct tree_map_node) child = D_RO(node)->slots[!c];
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
TX_ADD(node);
TX_ADD(child);
D_RW(node)->slots[!c] = D_RO(child)->slots[c];
if (!TOID_EQUALS(D_RO(child)->slots[c], s))
TX_SET(D_RW(child)->slots[c], parent, node);
NODE_P(child) = NODE_P(node);
TX_SET(NODE_P(node), slots[NODE_LOCATION(node)], child);
D_RW(child)->slots[c] = node;
D_RW(node)->parent = child;
}
/*
* rbtree_map_insert_bst -- (internal) inserts a node in regular BST fashion
*/
static void
rbtree_map_insert_bst(TOID(struct rbtree_map) map, TOID(struct tree_map_node) n)
{
TOID(struct tree_map_node) parent = D_RO(map)->root;
TOID(struct tree_map_node) *dst = &RB_FIRST(map);
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
D_RW(n)->slots[RB_LEFT] = s;
D_RW(n)->slots[RB_RIGHT] = s;
while (!NODE_IS_NULL(*dst)) {
parent = *dst;
dst = &D_RW(*dst)->slots[D_RO(n)->key > D_RO(*dst)->key];
}
TX_SET(n, parent, parent);
pmemobj_tx_add_range_direct(dst, sizeof(*dst));
*dst = n;
}
/*
* rbtree_map_recolor -- (internal) restores red-black tree properties
*/
static TOID(struct tree_map_node)
rbtree_map_recolor(TOID(struct rbtree_map) map,
TOID(struct tree_map_node) n, enum rb_children c)
{
TOID(struct tree_map_node) uncle = D_RO(NODE_GRANDP(n))->slots[!c];
if (D_RO(uncle)->color == COLOR_RED) {
TX_SET(uncle, color, COLOR_BLACK);
TX_SET(NODE_P(n), color, COLOR_BLACK);
TX_SET(NODE_GRANDP(n), color, COLOR_RED);
return NODE_GRANDP(n);
} else {
if (NODE_IS(n, !c)) {
n = NODE_P(n);
rbtree_map_rotate(map, n, c);
}
TX_SET(NODE_P(n), color, COLOR_BLACK);
TX_SET(NODE_GRANDP(n), color, COLOR_RED);
rbtree_map_rotate(map, NODE_GRANDP(n), (enum rb_children)!c);
}
return n;
}
/*
* rbtree_map_insert -- inserts a new key-value pair into the map
*/
int
rbtree_map_insert(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key, PMEMoid value)
{
int ret = 0;
TX_BEGIN(pop) {
TOID(struct tree_map_node) n = TX_ZNEW(struct tree_map_node);
D_RW(n)->key = key;
D_RW(n)->value = value;
rbtree_map_insert_bst(map, n);
D_RW(n)->color = COLOR_RED;
while (D_RO(NODE_P(n))->color == COLOR_RED)
n = rbtree_map_recolor(map, n, (enum rb_children)
NODE_LOCATION(NODE_P(n)));
TX_SET(RB_FIRST(map), color, COLOR_BLACK);
} TX_END
return ret;
}
/*
* rbtree_map_successor -- (internal) returns the successor of a node
*/
static TOID(struct tree_map_node)
rbtree_map_successor(TOID(struct rbtree_map) map, TOID(struct tree_map_node) n)
{
TOID(struct tree_map_node) dst = D_RO(n)->slots[RB_RIGHT];
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
if (!TOID_EQUALS(s, dst)) {
while (!NODE_IS_NULL(D_RO(dst)->slots[RB_LEFT]))
dst = D_RO(dst)->slots[RB_LEFT];
} else {
dst = D_RO(n)->parent;
while (TOID_EQUALS(n, D_RO(dst)->slots[RB_RIGHT])) {
n = dst;
dst = NODE_P(dst);
}
if (TOID_EQUALS(dst, D_RO(map)->root))
return s;
}
return dst;
}
/*
* rbtree_map_find_node -- (internal) returns the node that contains the key
*/
static TOID(struct tree_map_node)
rbtree_map_find_node(TOID(struct rbtree_map) map, uint64_t key)
{
TOID(struct tree_map_node) dst = RB_FIRST(map);
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
while (!NODE_IS_NULL(dst)) {
if (D_RO(dst)->key == key)
return dst;
dst = D_RO(dst)->slots[key > D_RO(dst)->key];
}
return TOID_NULL(struct tree_map_node);
}
/*
* rbtree_map_repair_branch -- (internal) restores red-black tree in one branch
*/
static TOID(struct tree_map_node)
rbtree_map_repair_branch(TOID(struct rbtree_map) map,
TOID(struct tree_map_node) n, enum rb_children c)
{
TOID(struct tree_map_node) sb = NODE_PARENT_AT(n, !c); /* sibling */
if (D_RO(sb)->color == COLOR_RED) {
TX_SET(sb, color, COLOR_BLACK);
TX_SET(NODE_P(n), color, COLOR_RED);
rbtree_map_rotate(map, NODE_P(n), c);
sb = NODE_PARENT_AT(n, !c);
}
if (D_RO(D_RO(sb)->slots[RB_RIGHT])->color == COLOR_BLACK &&
D_RO(D_RO(sb)->slots[RB_LEFT])->color == COLOR_BLACK) {
TX_SET(sb, color, COLOR_RED);
return D_RO(n)->parent;
} else {
if (D_RO(D_RO(sb)->slots[!c])->color == COLOR_BLACK) {
TX_SET(D_RW(sb)->slots[c], color, COLOR_BLACK);
TX_SET(sb, color, COLOR_RED);
rbtree_map_rotate(map, sb, (enum rb_children)!c);
sb = NODE_PARENT_AT(n, !c);
}
TX_SET(sb, color, D_RO(NODE_P(n))->color);
TX_SET(NODE_P(n), color, COLOR_BLACK);
TX_SET(D_RW(sb)->slots[!c], color, COLOR_BLACK);
rbtree_map_rotate(map, NODE_P(n), c);
return RB_FIRST(map);
}
return n;
}
/*
* rbtree_map_repair -- (internal) restores red-black tree properties
* after remove
*/
static void
rbtree_map_repair(TOID(struct rbtree_map) map, TOID(struct tree_map_node) n)
{
/* if left, repair right sibling, otherwise repair left sibling. */
while (!TOID_EQUALS(n, RB_FIRST(map)) && D_RO(n)->color == COLOR_BLACK)
n = rbtree_map_repair_branch(map, n, (enum rb_children)
NODE_LOCATION(n));
TX_SET(n, color, COLOR_BLACK);
}
/*
* rbtree_map_remove -- removes key-value pair from the map
*/
PMEMoid
rbtree_map_remove(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key)
{
PMEMoid ret = OID_NULL;
TOID(struct tree_map_node) n = rbtree_map_find_node(map, key);
if (TOID_IS_NULL(n))
return ret;
ret = D_RO(n)->value;
TOID(struct tree_map_node) s = D_RO(map)->sentinel;
TOID(struct tree_map_node) r = D_RO(map)->root;
TOID(struct tree_map_node) y = (NODE_IS_NULL(D_RO(n)->slots[RB_LEFT]) ||
NODE_IS_NULL(D_RO(n)->slots[RB_RIGHT]))
? n : rbtree_map_successor(map, n);
TOID(struct tree_map_node) x = NODE_IS_NULL(D_RO(y)->slots[RB_LEFT]) ?
D_RO(y)->slots[RB_RIGHT] : D_RO(y)->slots[RB_LEFT];
TX_BEGIN(pop) {
TX_SET(x, parent, NODE_P(y));
if (TOID_EQUALS(NODE_P(x), r)) {
TX_SET(r, slots[RB_LEFT], x);
} else {
TX_SET(NODE_P(y), slots[NODE_LOCATION(y)], x);
}
if (D_RO(y)->color == COLOR_BLACK)
rbtree_map_repair(map, x);
if (!TOID_EQUALS(y, n)) {
TX_ADD(y);
D_RW(y)->slots[RB_LEFT] = D_RO(n)->slots[RB_LEFT];
D_RW(y)->slots[RB_RIGHT] = D_RO(n)->slots[RB_RIGHT];
D_RW(y)->parent = D_RO(n)->parent;
D_RW(y)->color = D_RO(n)->color;
TX_SET(D_RW(n)->slots[RB_LEFT], parent, y);
TX_SET(D_RW(n)->slots[RB_RIGHT], parent, y);
TX_SET(NODE_P(n), slots[NODE_LOCATION(n)], y);
}
TX_FREE(n);
} TX_END
return ret;
}
/*
* rbtree_map_get -- searches for a value of the key
*/
PMEMoid
rbtree_map_get(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key)
{
TOID(struct tree_map_node) node = rbtree_map_find_node(map, key);
if (TOID_IS_NULL(node))
return OID_NULL;
return D_RO(node)->value;
}
/*
* rbtree_map_lookup -- searches if key exists
*/
int
rbtree_map_lookup(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key)
{
TOID(struct tree_map_node) node = rbtree_map_find_node(map, key);
if (TOID_IS_NULL(node))
return 0;
return 1;
}
/*
* rbtree_map_foreach_node -- (internal) recursively traverses tree
*/
static int
rbtree_map_foreach_node(TOID(struct rbtree_map) map,
TOID(struct tree_map_node) p,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
int ret = 0;
if (TOID_EQUALS(p, D_RO(map)->sentinel))
return 0;
if ((ret = rbtree_map_foreach_node(map,
D_RO(p)->slots[RB_LEFT], cb, arg)) == 0) {
if ((ret = cb(D_RO(p)->key, D_RO(p)->value, arg)) == 0)
rbtree_map_foreach_node(map,
D_RO(p)->slots[RB_RIGHT], cb, arg);
}
return ret;
}
/*
* rbtree_map_foreach -- initiates recursive traversal
*/
int
rbtree_map_foreach(PMEMobjpool *pop, TOID(struct rbtree_map) map,
int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg)
{
return rbtree_map_foreach_node(map, RB_FIRST(map), cb, arg);
}
/*
* rbtree_map_is_empty -- checks whether the tree map is empty
*/
int
rbtree_map_is_empty(PMEMobjpool *pop, TOID(struct rbtree_map) map)
{
return TOID_IS_NULL(RB_FIRST(map));
}
/*
* rbtree_map_check -- check if given persistent object is a tree map
*/
int
rbtree_map_check(PMEMobjpool *pop, TOID(struct rbtree_map) map)
{
return TOID_IS_NULL(map) || !TOID_VALID(map);
}
/*
* rbtree_map_insert_new -- allocates a new object and inserts it into the tree
*/
int
rbtree_map_insert_new(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key, size_t size, unsigned type_num,
void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg),
void *arg)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid n = pmemobj_tx_alloc(size, type_num);
constructor(pop, pmemobj_direct(n), arg);
rbtree_map_insert(pop, map, key, n);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
/*
* rbtree_map_remove_free -- removes and frees an object from the tree
*/
int
rbtree_map_remove_free(PMEMobjpool *pop, TOID(struct rbtree_map) map,
uint64_t key)
{
int ret = 0;
TX_BEGIN(pop) {
PMEMoid val = rbtree_map_remove(pop, map, key);
pmemobj_tx_free(val);
} TX_ONABORT {
ret = 1;
} TX_END
return ret;
}
| 13,791 | 23.761221 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmemobj++/README.md
|
This folder contained examples for libpmemobj C++ bindings.
They have been moved to https://github.com/pmem/libpmemobj-cpp/tree/master/examples
| 144 | 47.333333 | 83 |
md
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmempool/manpage.c
|
/*
* Copyright 2016, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* manpage.c -- simple example for the libpmempool man page
*/
#include <stddef.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <libpmempool.h>
#define PATH "./pmem-fs/myfile"
#define CHECK_FLAGS (PMEMPOOL_CHECK_FORMAT_STR|PMEMPOOL_CHECK_REPAIR|\
PMEMPOOL_CHECK_VERBOSE)
int
main(int argc, char *argv[])
{
PMEMpoolcheck *ppc;
struct pmempool_check_status *status;
enum pmempool_check_result ret;
/* arguments for check */
struct pmempool_check_args args = {
.path = PATH,
.backup_path = NULL,
.pool_type = PMEMPOOL_POOL_TYPE_DETECT,
.flags = CHECK_FLAGS
};
/* initialize check context */
if ((ppc = pmempool_check_init(&args, sizeof(args))) == NULL) {
perror("pmempool_check_init");
exit(EXIT_FAILURE);
}
/* perform check and repair, answer 'yes' for each question */
while ((status = pmempool_check(ppc)) != NULL) {
switch (status->type) {
case PMEMPOOL_CHECK_MSG_TYPE_ERROR:
printf("%s\n", status->str.msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_INFO:
printf("%s\n", status->str.msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_QUESTION:
printf("%s\n", status->str.msg);
status->str.answer = "yes";
break;
default:
pmempool_check_end(ppc);
exit(EXIT_FAILURE);
}
}
/* finalize the check and get the result */
ret = pmempool_check_end(ppc);
switch (ret) {
case PMEMPOOL_CHECK_RESULT_CONSISTENT:
case PMEMPOOL_CHECK_RESULT_REPAIRED:
return 0;
default:
return 1;
}
}
| 3,070 | 30.659794 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmem/simple_copy.c
|
/*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* simple_copy.c -- show how to use pmem_memcpy_persist()
*
* usage: simple_copy src-file dst-file
*
* Reads 4k from src-file and writes it to dst-file.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#ifndef _WIN32
#include <unistd.h>
#else
#include <io.h>
#endif
#include <string.h>
#include <libpmem.h>
/* just copying 4k to pmem for this example */
#define BUF_LEN 4096
int
main(int argc, char *argv[])
{
int srcfd;
char buf[BUF_LEN];
char *pmemaddr;
size_t mapped_len;
int is_pmem;
int cc;
if (argc != 3) {
fprintf(stderr, "usage: %s src-file dst-file\n", argv[0]);
exit(1);
}
/* open src-file */
if ((srcfd = open(argv[1], O_RDONLY)) < 0) {
perror(argv[1]);
exit(1);
}
/* create a pmem file and memory map it */
if ((pmemaddr = pmem_map_file(argv[2], BUF_LEN,
PMEM_FILE_CREATE|PMEM_FILE_EXCL,
0666, &mapped_len, &is_pmem)) == NULL) {
perror("pmem_map_file");
exit(1);
}
/* read up to BUF_LEN from srcfd */
if ((cc = read(srcfd, buf, BUF_LEN)) < 0) {
pmem_unmap(pmemaddr, mapped_len);
perror("read");
exit(1);
}
/* write it to the pmem */
if (is_pmem) {
pmem_memcpy_persist(pmemaddr, buf, cc);
} else {
memcpy(pmemaddr, buf, cc);
pmem_msync(pmemaddr, cc);
}
close(srcfd);
pmem_unmap(pmemaddr, mapped_len);
exit(0);
}
| 2,975 | 26.813084 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmem/full_copy.c
|
/*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* full_copy.c -- show how to use pmem_memcpy_nodrain()
*
* usage: full_copy src-file dst-file
*
* Copies src-file to dst-file in 4k chunks.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#ifndef _WIN32
#include <unistd.h>
#else
#include <io.h>
#endif
#include <string.h>
#include <libpmem.h>
/* copying 4k at a time to pmem for this example */
#define BUF_LEN 4096
/*
* do_copy_to_pmem -- copy to pmem, postponing drain step until the end
*/
static void
do_copy_to_pmem(char *pmemaddr, int srcfd, off_t len)
{
char buf[BUF_LEN];
int cc;
/* copy the file, saving the last flush step to the end */
while ((cc = read(srcfd, buf, BUF_LEN)) > 0) {
pmem_memcpy_nodrain(pmemaddr, buf, cc);
pmemaddr += cc;
}
if (cc < 0) {
perror("read");
exit(1);
}
/* perform final flush step */
pmem_drain();
}
/*
* do_copy_to_non_pmem -- copy to a non-pmem memory mapped file
*/
static void
do_copy_to_non_pmem(char *addr, int srcfd, off_t len)
{
char *startaddr = addr;
char buf[BUF_LEN];
int cc;
/* copy the file, saving the last flush step to the end */
while ((cc = read(srcfd, buf, BUF_LEN)) > 0) {
memcpy(addr, buf, cc);
addr += cc;
}
if (cc < 0) {
perror("read");
exit(1);
}
/* flush it */
if (pmem_msync(startaddr, len) < 0) {
perror("pmem_msync");
exit(1);
}
}
int
main(int argc, char *argv[])
{
int srcfd;
struct stat stbuf;
char *pmemaddr;
size_t mapped_len;
int is_pmem;
if (argc != 3) {
fprintf(stderr, "usage: %s src-file dst-file\n", argv[0]);
exit(1);
}
/* open src-file */
if ((srcfd = open(argv[1], O_RDONLY)) < 0) {
perror(argv[1]);
exit(1);
}
/* find the size of the src-file */
if (fstat(srcfd, &stbuf) < 0) {
perror("fstat");
exit(1);
}
/* create a pmem file and memory map it */
if ((pmemaddr = pmem_map_file(argv[2], stbuf.st_size,
PMEM_FILE_CREATE|PMEM_FILE_EXCL,
0666, &mapped_len, &is_pmem)) == NULL) {
perror("pmem_map_file");
exit(1);
}
/* determine if range is true pmem, call appropriate copy routine */
if (is_pmem)
do_copy_to_pmem(pmemaddr, srcfd, stbuf.st_size);
else
do_copy_to_non_pmem(pmemaddr, srcfd, stbuf.st_size);
close(srcfd);
pmem_unmap(pmemaddr, mapped_len);
exit(0);
}
| 3,894 | 24.129032 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libpmem/manpage.c
|
/*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* manpage.c -- simple example for the libpmem man page
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#ifndef _WIN32
#include <unistd.h>
#else
#include <io.h>
#endif
#include <string.h>
#include <libpmem.h>
/* using 4k of pmem for this example */
#define PMEM_LEN 4096
#define PATH "/pmem-fs/myfile"
int
main(int argc, char *argv[])
{
char *pmemaddr;
size_t mapped_len;
int is_pmem;
/* create a pmem file and memory map it */
if ((pmemaddr = pmem_map_file(PATH, PMEM_LEN, PMEM_FILE_CREATE,
0666, &mapped_len, &is_pmem)) == NULL) {
perror("pmem_map_file");
exit(1);
}
/* store a string to the persistent memory */
strcpy(pmemaddr, "hello, persistent memory");
/* flush above strcpy to persistence */
if (is_pmem)
pmem_persist(pmemaddr, mapped_len);
else
pmem_msync(pmemaddr, mapped_len);
/*
* Delete the mappings. The region is also
* automatically unmapped when the process is
* terminated.
*/
pmem_unmap(pmemaddr, mapped_len);
}
| 2,656 | 29.895349 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libvmem/manpage.c
|
/*
* Copyright 2014-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* manpage.c -- simple example for the libvmem man page
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <libvmem.h>
int
main(int argc, char *argv[])
{
VMEM *vmp;
char *ptr;
/* create minimum size pool of memory */
if ((vmp = vmem_create("/pmem-fs",
VMEM_MIN_POOL)) == NULL) {
perror("vmem_create");
exit(1);
}
if ((ptr = vmem_malloc(vmp, 100)) == NULL) {
perror("vmem_malloc");
exit(1);
}
strcpy(ptr, "hello, world");
/* give the memory back */
vmem_free(vmp, ptr);
/* ... */
vmem_delete(vmp);
}
| 2,158 | 30.289855 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libvmem/libart/art.c
|
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
* Copyright 2012, Armon Dadgar. All rights reserved.
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ==========================================================================
*
* Filename: art.c
*
* Description: implement ART tree using libvmem based on libart
*
* Author: Andreas Bluemle, Dieter Kasper
* Andreas.Bluemle.external@ts.fujitsu.com
* dieter.kasper@ts.fujitsu.com
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
* ==========================================================================
*/
/*
* based on https://github.com/armon/libart/src/art.c
*/
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <stdio.h>
#include <emmintrin.h>
#include <assert.h>
#include "libvmem.h"
#include "art.h"
/*
* Macros to manipulate pointer tags
*/
#define IS_LEAF(x) (((uintptr_t)(x) & 1))
#define SET_LEAF(x) ((void *)((uintptr_t)(x) | 1))
#define LEAF_RAW(x) ((void *)((uintptr_t)(x) & ~1))
/*
* Allocates a node of the given type,
* initializes to zero and sets the type.
*/
static art_node *
alloc_node(VMEM *vmp, uint8_t type)
{
art_node *n;
switch (type) {
case NODE4:
n = vmem_calloc(vmp, 1, sizeof(art_node4));
break;
case NODE16:
n = vmem_calloc(vmp, 1, sizeof(art_node16));
break;
case NODE48:
n = vmem_calloc(vmp, 1, sizeof(art_node48));
break;
case NODE256:
n = vmem_calloc(vmp, 1, sizeof(art_node256));
break;
default:
abort();
}
assert(n != NULL);
n->type = type;
return n;
}
/*
* Initializes an ART tree
* @return 0 on success.
*/
int
art_tree_init(art_tree *t)
{
t->root = NULL;
t->size = 0;
return 0;
}
/*
* Recursively destroys the tree
*/
static void
destroy_node(VMEM *vmp, art_node *n)
{
// Break if null
if (!n)
return;
// Special case leafs
if (IS_LEAF(n)) {
vmem_free(vmp, LEAF_RAW(n));
return;
}
// Handle each node type
int i;
union {
art_node4 *p1;
art_node16 *p2;
art_node48 *p3;
art_node256 *p4;
} p;
switch (n->type) {
case NODE4:
p.p1 = (art_node4 *)n;
for (i = 0; i < n->num_children; i++) {
destroy_node(vmp, p.p1->children[i]);
}
break;
case NODE16:
p.p2 = (art_node16 *)n;
for (i = 0; i < n->num_children; i++) {
destroy_node(vmp, p.p2->children[i]);
}
break;
case NODE48:
p.p3 = (art_node48 *)n;
for (i = 0; i < n->num_children; i++) {
destroy_node(vmp, p.p3->children[i]);
}
break;
case NODE256:
p.p4 = (art_node256 *)n;
for (i = 0; i < 256; i++) {
if (p.p4->children[i])
destroy_node(vmp, p.p4->children[i]);
}
break;
default:
abort();
}
// Free ourself on the way up
vmem_free(vmp, n);
}
/*
* Destroys an ART tree
* @return 0 on success.
*/
int
art_tree_destroy(VMEM *vmp, art_tree *t)
{
destroy_node(vmp, t->root);
return 0;
}
/*
* Returns the size of the ART tree.
*/
static art_node **
find_child(art_node *n, unsigned char c)
{
__m128i cmp;
int i, mask, bitfield;
union {
art_node4 *p1;
art_node16 *p2;
art_node48 *p3;
art_node256 *p4;
} p;
switch (n->type) {
case NODE4:
p.p1 = (art_node4 *)n;
for (i = 0; i < n->num_children; i++) {
if (p.p1->keys[i] == c)
return &p.p1->children[i];
}
break;
case NODE16:
p.p2 = (art_node16 *)n;
// Compare the key to all 16 stored keys
cmp = _mm_cmpeq_epi8(_mm_set1_epi8(c),
_mm_loadu_si128((__m128i *)p.p2->keys));
// Use a mask to ignore children that don't exist
mask = (1 << n->num_children) - 1;
bitfield = _mm_movemask_epi8(cmp) & mask;
/*
* If we have a match (any bit set) then we can
* return the pointer match using ctz to get
* the index.
*/
if (bitfield)
return &p.p2->children[__builtin_ctz(bitfield)];
break;
case NODE48:
p.p3 = (art_node48 *)n;
i = p.p3->keys[c];
if (i)
return &p.p3->children[i - 1];
break;
case NODE256:
p.p4 = (art_node256 *)n;
if (p.p4->children[c])
return &p.p4->children[c];
break;
default:
abort();
}
return NULL;
}
// Simple inlined if
static inline int
min(int a, int b)
{
return (a < b) ? a : b;
}
/*
* Returns the number of prefix characters shared between
* the key and node.
*/
static int
check_prefix(const art_node *n, const unsigned char *key, int key_len,
int depth)
{
int max_cmp = min(min(n->partial_len, MAX_PREFIX_LEN),
key_len - depth);
int idx;
for (idx = 0; idx < max_cmp; idx++) {
if (n->partial[idx] != key[depth + idx])
return idx;
}
return idx;
}
/*
* Checks if a leaf matches
* @return 0 on success.
*/
static int
leaf_matches(const art_leaf *n, const unsigned char *key, int key_len,
int depth)
{
(void) depth;
// Fail if the key lengths are different
if (n->key_len != (uint32_t)key_len)
return 1;
// Compare the keys starting at the depth
return memcmp(n->key, key, key_len);
}
/*
* Searches for a value in the ART tree
* @arg t The tree
* @arg key The key
* @arg key_len The length of the key
* @return NULL if the item was not found, otherwise
* the value pointer is returned.
*/
void *
art_search(const art_tree *t, const unsigned char *key, int key_len)
{
art_node **child;
art_node *n = t->root;
int prefix_len, depth = 0;
while (n) {
// Might be a leaf
if (IS_LEAF(n)) {
n = LEAF_RAW(n);
// Check if the expanded path matches
if (!leaf_matches((art_leaf *)n,
key, key_len, depth)) {
return ((art_leaf *)n)->value;
}
return NULL;
}
// Bail if the prefix does not match
if (n->partial_len) {
prefix_len = check_prefix(n, key, key_len, depth);
if (prefix_len != min(MAX_PREFIX_LEN,
n->partial_len))
return NULL;
depth = depth + n->partial_len;
}
// Recursively search
child = find_child(n, key[depth]);
n = (child) ? *child : NULL;
depth++;
}
return NULL;
}
// Find the minimum leaf under a node
static art_leaf *
minimum(const art_node *n)
{
// Handle base cases
if (!n)
return NULL;
if (IS_LEAF(n))
return LEAF_RAW(n);
int idx;
switch (n->type) {
case NODE4:
return minimum(((art_node4 *)n)->children[0]);
case NODE16:
return minimum(((art_node16 *)n)->children[0]);
case NODE48:
idx = 0;
while (!((art_node48 *)n)->keys[idx])
idx++;
idx = ((art_node48 *)n)->keys[idx] - 1;
assert(idx < 48);
return minimum(((art_node48 *) n)->children[idx]);
case NODE256:
idx = 0;
while (!((art_node256 *)n)->children[idx])
idx++;
return minimum(((art_node256 *)n)->children[idx]);
default:
abort();
}
}
// Find the maximum leaf under a node
static art_leaf *
maximum(const art_node *n)
{
// Handle base cases
if (!n)
return NULL;
if (IS_LEAF(n))
return LEAF_RAW(n);
int idx;
switch (n->type) {
case NODE4:
return maximum(
((art_node4 *)n)->children[n->num_children - 1]);
case NODE16:
return maximum(
((art_node16 *)n)->children[n->num_children - 1]);
case NODE48:
idx = 255;
while (!((art_node48 *)n)->keys[idx])
idx--;
idx = ((art_node48 *)n)->keys[idx] - 1;
assert((idx >= 0) && (idx < 48));
return maximum(((art_node48 *)n)->children[idx]);
case NODE256:
idx = 255;
while (!((art_node256 *)n)->children[idx])
idx--;
return maximum(((art_node256 *)n)->children[idx]);
default:
abort();
}
}
/*
* Returns the minimum valued leaf
*/
art_leaf *
art_minimum(art_tree *t)
{
return minimum(t->root);
}
/*
* Returns the maximum valued leaf
*/
art_leaf *
art_maximum(art_tree *t)
{
return maximum(t->root);
}
static art_leaf *
make_leaf(VMEM *vmp, const unsigned char *key, int key_len, void *value,
int val_len)
{
art_leaf *l = vmem_malloc(vmp, sizeof(art_leaf) + key_len + val_len);
assert(l != NULL);
l->key_len = key_len;
l->val_len = val_len;
l->key = &(l->data[0]) + 0;
l->value = &(l->data[0]) + key_len;
memcpy(l->key, key, key_len);
memcpy(l->value, value, val_len);
return l;
}
static int
longest_common_prefix(art_leaf *l1, art_leaf *l2, int depth)
{
int max_cmp = min(l1->key_len, l2->key_len) - depth;
int idx;
for (idx = 0; idx < max_cmp; idx++) {
if (l1->key[depth + idx] != l2->key[depth + idx])
return idx;
}
return idx;
}
static void
copy_header(art_node *dest, art_node *src)
{
dest->num_children = src->num_children;
dest->partial_len = src->partial_len;
memcpy(dest->partial, src->partial,
min(MAX_PREFIX_LEN, src->partial_len));
}
static void
add_child256(VMEM *vmp, art_node256 *n, art_node **ref, unsigned char c,
void *child)
{
(void) ref;
n->n.num_children++;
n->children[c] = child;
}
static void
add_child48(VMEM *vmp, art_node48 *n, art_node **ref, unsigned char c,
void *child)
{
if (n->n.num_children < 48) {
int pos = 0;
while (n->children[pos])
pos++;
n->children[pos] = child;
n->keys[c] = pos + 1;
n->n.num_children++;
} else {
art_node256 *new = (art_node256 *)alloc_node(vmp, NODE256);
for (int i = 0; i < 256; i++) {
if (n->keys[i]) {
new->children[i] = n->children[n->keys[i] - 1];
}
}
copy_header((art_node *)new, (art_node *)n);
*ref = (art_node *)new;
vmem_free(vmp, n);
add_child256(vmp, new, ref, c, child);
}
}
static void
add_child16(VMEM *vmp, art_node16 *n, art_node **ref, unsigned char c,
void *child)
{
if (n->n.num_children < 16) {
__m128i cmp;
// Compare the key to all 16 stored keys
cmp = _mm_cmplt_epi8(_mm_set1_epi8(c),
_mm_loadu_si128((__m128i *)n->keys));
// Use a mask to ignore children that don't exist
unsigned mask = (1 << n->n.num_children) - 1;
unsigned bitfield = _mm_movemask_epi8(cmp) & mask;
// Check if less than any
unsigned idx;
if (bitfield) {
idx = __builtin_ctz(bitfield);
memmove(n->keys + idx + 1, n->keys + idx,
n->n.num_children - idx);
memmove(n->children + idx + 1, n->children + idx,
(n->n.num_children - idx) * sizeof(void *));
} else
idx = n->n.num_children;
// Set the child
n->keys[idx] = c;
n->children[idx] = child;
n->n.num_children++;
} else {
art_node48 *new = (art_node48 *)alloc_node(vmp, NODE48);
// Copy the child pointers and populate the key map
memcpy(new->children, n->children,
sizeof(void *) * n->n.num_children);
for (int i = 0; i < n->n.num_children; i++) {
new->keys[n->keys[i]] = i + 1;
}
copy_header((art_node *)new, (art_node *)n);
*ref = (art_node *) new;
vmem_free(vmp, n);
add_child48(vmp, new, ref, c, child);
}
}
static void
add_child4(VMEM *vmp, art_node4 *n, art_node **ref, unsigned char c,
void *child)
{
if (n->n.num_children < 4) {
int idx;
for (idx = 0; idx < n->n.num_children; idx++) {
if (c < n->keys[idx])
break;
}
// Shift to make room
memmove(n->keys + idx + 1, n->keys + idx,
n->n.num_children - idx);
memmove(n->children + idx + 1, n->children + idx,
(n->n.num_children - idx) * sizeof(void *));
// Insert element
n->keys[idx] = c;
n->children[idx] = child;
n->n.num_children++;
} else {
art_node16 *new = (art_node16 *)alloc_node(vmp, NODE16);
// Copy the child pointers and the key map
memcpy(new->children, n->children,
sizeof(void *) * n->n.num_children);
memcpy(new->keys, n->keys,
sizeof(unsigned char) * n->n.num_children);
copy_header((art_node *)new, (art_node *)n);
*ref = (art_node *)new;
vmem_free(vmp, n);
add_child16(vmp, new, ref, c, child);
}
}
static void
add_child(VMEM *vmp, art_node *n, art_node **ref, unsigned char c,
void *child)
{
switch (n->type) {
case NODE4:
return add_child4(vmp, (art_node4 *)n, ref, c, child);
case NODE16:
return add_child16(vmp, (art_node16 *)n, ref, c, child);
case NODE48:
return add_child48(vmp, (art_node48 *)n, ref, c, child);
case NODE256:
return add_child256(vmp, (art_node256 *)n, ref, c, child);
default:
abort();
}
}
/*
* Calculates the index at which the prefixes mismatch
*/
static int
prefix_mismatch(const art_node *n, const unsigned char *key, int key_len,
int depth)
{
int max_cmp = min(min(MAX_PREFIX_LEN, n->partial_len), key_len - depth);
int idx;
for (idx = 0; idx < max_cmp; idx++) {
if (n->partial[idx] != key[depth + idx])
return idx;
}
// If the prefix is short we can avoid finding a leaf
if (n->partial_len > MAX_PREFIX_LEN) {
// Prefix is longer than what we've checked, find a leaf
art_leaf *l = minimum(n);
assert(l != NULL);
max_cmp = min(l->key_len, key_len) - depth;
for (; idx < max_cmp; idx++) {
if (l->key[idx + depth] != key[depth + idx])
return idx;
}
}
return idx;
}
static void *
recursive_insert(VMEM *vmp, art_node *n, art_node **ref,
const unsigned char *key, int key_len, void *value,
int val_len, int depth, int *old)
{
// If we are at a NULL node, inject a leaf
if (!n) {
*ref = (art_node *)SET_LEAF(
make_leaf(vmp, key, key_len, value, val_len));
return NULL;
}
// If we are at a leaf, we need to replace it with a node
if (IS_LEAF(n)) {
art_leaf *l = LEAF_RAW(n);
// Check if we are updating an existing value
if (!leaf_matches(l, key, key_len, depth)) {
*old = 1;
void *old_val = l->value;
l->value = value;
return old_val;
}
// New value, we must split the leaf into a node4
art_node4 *new = (art_node4 *)alloc_node(vmp, NODE4);
// Create a new leaf
art_leaf *l2 = make_leaf(vmp, key, key_len, value, val_len);
// Determine longest prefix
int longest_prefix = longest_common_prefix(l, l2, depth);
new->n.partial_len = longest_prefix;
memcpy(new->n.partial, key + depth,
min(MAX_PREFIX_LEN, longest_prefix));
// Add the leafs to the new node4
*ref = (art_node *)new;
add_child4(vmp, new, ref, l->key[depth + longest_prefix],
SET_LEAF(l));
add_child4(vmp, new, ref, l2->key[depth + longest_prefix],
SET_LEAF(l2));
return NULL;
}
// Check if given node has a prefix
if (n->partial_len) {
// Determine if the prefixes differ, since we need to split
int prefix_diff = prefix_mismatch(n, key, key_len, depth);
if ((uint32_t)prefix_diff >= n->partial_len) {
depth += n->partial_len;
goto RECURSE_SEARCH;
}
// Create a new node
art_node4 *new = (art_node4 *)alloc_node(vmp, NODE4);
*ref = (art_node *)new;
new->n.partial_len = prefix_diff;
memcpy(new->n.partial, n->partial,
min(MAX_PREFIX_LEN, prefix_diff));
// Adjust the prefix of the old node
if (n->partial_len <= MAX_PREFIX_LEN) {
add_child4(vmp, new, ref,
n->partial[prefix_diff], n);
n->partial_len -= (prefix_diff + 1);
memmove(n->partial, n->partial + prefix_diff + 1,
min(MAX_PREFIX_LEN, n->partial_len));
} else {
n->partial_len -= (prefix_diff + 1);
art_leaf *l = minimum(n);
assert(l != NULL);
add_child4(vmp, new, ref,
l->key[depth + prefix_diff], n);
memcpy(n->partial, l->key + depth + prefix_diff + 1,
min(MAX_PREFIX_LEN, n->partial_len));
}
// Insert the new leaf
art_leaf *l = make_leaf(vmp, key, key_len, value, val_len);
add_child4(vmp, new, ref,
key[depth + prefix_diff], SET_LEAF(l));
return NULL;
}
RECURSE_SEARCH:;
// Find a child to recurse to
art_node **child = find_child(n, key[depth]);
if (child) {
return recursive_insert(vmp, *child, child, key, key_len,
value, val_len, depth + 1, old);
}
// No child, node goes within us
art_leaf *l = make_leaf(vmp, key, key_len, value, val_len);
add_child(vmp, n, ref, key[depth], SET_LEAF(l));
return NULL;
}
/*
* Inserts a new value into the ART tree
* @arg t The tree
* @arg key The key
* @arg key_len The length of the key
* @arg value Opaque value.
* @return NULL if the item was newly inserted, otherwise
* the old value pointer is returned.
*/
void *
art_insert(VMEM *vmp, art_tree *t, const unsigned char *key, int key_len,
void *value, int val_len)
{
int old_val = 0;
void *old = recursive_insert(vmp, t->root, &t->root, key, key_len,
value, val_len, 0, &old_val);
if (!old_val)
t->size++;
return old;
}
static void
remove_child256(VMEM *vmp, art_node256 *n, art_node **ref,
unsigned char c)
{
n->children[c] = NULL;
n->n.num_children--;
// Resize to a node48 on underflow, not immediately to prevent
// trashing if we sit on the 48/49 boundary
if (n->n.num_children == 37) {
art_node48 *new = (art_node48 *)alloc_node(vmp, NODE48);
*ref = (art_node *) new;
copy_header((art_node *)new, (art_node *)n);
int pos = 0;
for (int i = 0; i < 256; i++) {
if (n->children[i]) {
assert(pos < 48);
new->children[pos] = n->children[i];
new->keys[i] = pos + 1;
pos++;
}
}
vmem_free(vmp, n);
}
}
static void
remove_child48(VMEM *vmp, art_node48 *n, art_node **ref, unsigned char c)
{
int pos = n->keys[c];
n->keys[c] = 0;
n->children[pos - 1] = NULL;
n->n.num_children--;
if (n->n.num_children == 12) {
art_node16 *new = (art_node16 *)alloc_node(vmp, NODE16);
*ref = (art_node *)new;
copy_header((art_node *) new, (art_node *)n);
int child = 0;
for (int i = 0; i < 256; i++) {
pos = n->keys[i];
if (pos) {
assert(child < 16);
new->keys[child] = i;
new->children[child] = n->children[pos - 1];
child++;
}
}
vmem_free(vmp, n);
}
}
static void
remove_child16(VMEM *vmp, art_node16 *n, art_node **ref, art_node **l)
{
int pos = l - n->children;
memmove(n->keys + pos, n->keys + pos + 1, n->n.num_children - 1 - pos);
memmove(n->children + pos, n->children + pos + 1,
(n->n.num_children - 1 - pos) * sizeof(void *));
n->n.num_children--;
if (n->n.num_children == 3) {
art_node4 *new = (art_node4 *)alloc_node(vmp, NODE4);
*ref = (art_node *) new;
copy_header((art_node *)new, (art_node *)n);
memcpy(new->keys, n->keys, 4);
memcpy(new->children, n->children, 4 * sizeof(void *));
vmem_free(vmp, n);
}
}
static void
remove_child4(VMEM *vmp, art_node4 *n, art_node **ref, art_node **l)
{
int pos = l - n->children;
memmove(n->keys + pos, n->keys + pos + 1, n->n.num_children - 1 - pos);
memmove(n->children + pos, n->children + pos + 1,
(n->n.num_children - 1 - pos) * sizeof(void *));
n->n.num_children--;
// Remove nodes with only a single child
if (n->n.num_children == 1) {
art_node *child = n->children[0];
if (!IS_LEAF(child)) {
// Concatenate the prefixes
int prefix = n->n.partial_len;
if (prefix < MAX_PREFIX_LEN) {
n->n.partial[prefix] = n->keys[0];
prefix++;
}
if (prefix < MAX_PREFIX_LEN) {
int sub_prefix =
min(child->partial_len,
MAX_PREFIX_LEN - prefix);
memcpy(n->n.partial + prefix,
child->partial, sub_prefix);
prefix += sub_prefix;
}
// Store the prefix in the child
memcpy(child->partial, n->n.partial,
min(prefix, MAX_PREFIX_LEN));
child->partial_len += n->n.partial_len + 1;
}
*ref = child;
vmem_free(vmp, n);
}
}
static void
remove_child(VMEM *vmp, art_node *n, art_node **ref, unsigned char c,
art_node **l)
{
switch (n->type) {
case NODE4:
return remove_child4(vmp, (art_node4 *)n, ref, l);
case NODE16:
return remove_child16(vmp, (art_node16 *)n, ref, l);
case NODE48:
return remove_child48(vmp, (art_node48 *)n, ref, c);
case NODE256:
return remove_child256(vmp, (art_node256 *)n, ref, c);
default:
abort();
}
}
static art_leaf *
recursive_delete(VMEM *vmp, art_node *n, art_node **ref,
const unsigned char *key, int key_len, int depth)
{
// Search terminated
if (!n)
return NULL;
// Handle hitting a leaf node
if (IS_LEAF(n)) {
art_leaf *l = LEAF_RAW(n);
if (!leaf_matches(l, key, key_len, depth)) {
*ref = NULL;
return l;
}
return NULL;
}
// Bail if the prefix does not match
if (n->partial_len) {
int prefix_len = check_prefix(n, key, key_len, depth);
if (prefix_len != min(MAX_PREFIX_LEN, n->partial_len)) {
return NULL;
}
depth = depth + n->partial_len;
}
// Find child node
art_node **child = find_child(n, key[depth]);
if (!child)
return NULL;
// If the child is leaf, delete from this node
if (IS_LEAF(*child)) {
art_leaf *l = LEAF_RAW(*child);
if (!leaf_matches(l, key, key_len, depth)) {
remove_child(vmp, n, ref, key[depth], child);
return l;
}
return NULL;
// Recurse
} else {
return recursive_delete(vmp, *child, child, key,
key_len, depth + 1);
}
}
/*
* Deletes a value from the ART tree
* @arg t The tree
* @arg key The key
* @arg key_len The length of the key
* @return NULL if the item was not found, otherwise
* the value pointer is returned.
*/
void *
art_delete(VMEM *vmp, art_tree *t, const unsigned char *key, int key_len)
{
art_leaf *l = recursive_delete(vmp, t->root, &t->root, key, key_len, 0);
if (l) {
t->size--;
void *old = l->value;
vmem_free(vmp, l);
return old;
}
return NULL;
}
// Recursively iterates over the tree
static int
recursive_iter(art_node *n, art_callback cb, void *data)
{
// Handle base cases
if (!n)
return 0;
if (IS_LEAF(n)) {
art_leaf *l = LEAF_RAW(n);
return cb(data, (const unsigned char *)l->key, l->key_len,
l->value, l->val_len);
}
int idx, res;
switch (n->type) {
case NODE4:
for (int i = 0; i < n->num_children; i++) {
res = recursive_iter(((art_node4 *)n)->children[i],
cb, data);
if (res)
return res;
}
break;
case NODE16:
for (int i = 0; i < n->num_children; i++) {
res = recursive_iter(
((art_node16 *)n)->children[i],
cb, data);
if (res)
return res;
}
break;
case NODE48:
for (int i = 0; i < 256; i++) {
idx = ((art_node48 *)n)->keys[i];
if (!idx)
continue;
res = recursive_iter(
((art_node48 *)n)->children[idx - 1],
cb, data);
if (res)
return res;
}
break;
case NODE256:
for (int i = 0; i < 256; i++) {
if (!((art_node256 *)n)->children[i])
continue;
res = recursive_iter(
((art_node256 *)n)->children[i],
cb, data);
if (res)
return res;
}
break;
default:
abort();
}
return 0;
}
/*
* Iterates through the entries pairs in the map,
* invoking a callback for each. The call back gets a
* key, value for each and returns an integer stop value.
* If the callback returns non-zero, then the iteration stops.
* @arg t The tree to iterate over
* @arg cb The callback function to invoke
* @arg data Opaque handle passed to the callback
* @return 0 on success, or the return of the callback.
*/
int
art_iter(art_tree *t, art_callback cb, void *data)
{
return recursive_iter(t->root, cb, data);
}
// Recursively iterates over the tree
static int
recursive_iter2(art_node *n, art_callback cb, void *data)
{
cb_data _cbd, *cbd = &_cbd;
int first = 1;
// Handle base cases
if (!n)
return 0;
cbd->node = (void *)n;
cbd->node_type = n->type;
cbd->child_idx = -1;
if (IS_LEAF(n)) {
art_leaf *l = LEAF_RAW(n);
return cb(cbd, (const unsigned char *)l->key,
l->key_len, l->value, l->val_len);
}
int idx, res;
switch (n->type) {
case NODE4:
for (int i = 0; i < n->num_children; i++) {
cbd->first_child = first;
first = 0;
cbd->child_idx = i;
cb((void *)cbd, NULL, 0, NULL, 0);
res = recursive_iter2(((art_node4 *)n)->children[i],
cb, data);
if (res)
return res;
}
break;
case NODE16:
for (int i = 0; i < n->num_children; i++) {
cbd->first_child = first;
first = 0;
cbd->child_idx = i;
cb((void *)cbd, NULL, 0, NULL, 0);
res = recursive_iter2(((art_node16 *)n)->children[i],
cb, data);
if (res)
return res;
}
break;
case NODE48:
for (int i = 0; i < 256; i++) {
idx = ((art_node48 *)n)->keys[i];
if (!idx)
continue;
cbd->first_child = first;
first = 0;
cbd->child_idx = i;
cb((void *)cbd, NULL, 0, NULL, 0);
res = recursive_iter2(
((art_node48 *)n)->children[idx - 1],
cb, data);
if (res)
return res;
}
break;
case NODE256:
for (int i = 0; i < 256; i++) {
if (!((art_node256 *)n)->children[i])
continue;
cbd->first_child = first;
first = 0;
cbd->child_idx = i;
cb((void *)cbd, NULL, 0, NULL, 0);
res = recursive_iter2(
((art_node256 *)n)->children[i],
cb, data);
if (res)
return res;
}
break;
default:
abort();
}
return 0;
}
/*
* Iterates through the entries pairs in the map,
* invoking a callback for each. The call back gets a
* key, value for each and returns an integer stop value.
* If the callback returns non-zero, then the iteration stops.
* @arg t The tree to iterate over
* @arg cb The callback function to invoke
* @arg data Opaque handle passed to the callback
* @return 0 on success, or the return of the callback.
*/
int
art_iter2(art_tree *t, art_callback cb, void *data)
{
return recursive_iter2(t->root, cb, data);
}
/*
* Checks if a leaf prefix matches
* @return 0 on success.
*/
static int
leaf_prefix_matches(const art_leaf *n, const unsigned char *prefix,
int prefix_len)
{
// Fail if the key length is too short
if (n->key_len < (uint32_t)prefix_len)
return 1;
// Compare the keys
return memcmp(n->key, prefix, prefix_len);
}
/*
* Iterates through the entries pairs in the map,
* invoking a callback for each that matches a given prefix.
* The call back gets a key, value for each and returns an integer stop value.
* If the callback returns non-zero, then the iteration stops.
* @arg t The tree to iterate over
* @arg prefix The prefix of keys to read
* @arg prefix_len The length of the prefix
* @arg cb The callback function to invoke
* @arg data Opaque handle passed to the callback
* @return 0 on success, or the return of the callback.
*/
int
art_iter_prefix(art_tree *t, const unsigned char *key, int key_len,
art_callback cb, void *data)
{
art_node **child;
art_node *n = t->root;
int prefix_len, depth = 0;
while (n) {
// Might be a leaf
if (IS_LEAF(n)) {
n = LEAF_RAW(n);
// Check if the expanded path matches
if (!leaf_prefix_matches(
(art_leaf *)n, key, key_len)) {
art_leaf *l = (art_leaf *)n;
return cb(data, (const unsigned char *)l->key,
l->key_len, l->value, l->val_len);
}
return 0;
}
// If the depth matches the prefix, we need to handle this node
if (depth == key_len) {
art_leaf *l = minimum(n);
assert(l != NULL);
if (!leaf_prefix_matches(l, key, key_len))
return recursive_iter(n, cb, data);
return 0;
}
// Bail if the prefix does not match
if (n->partial_len) {
prefix_len = prefix_mismatch(n, key, key_len, depth);
// If there is no match, search is terminated
if (!prefix_len)
return 0;
// If we've matched the prefix, iterate on this node
else if (depth + prefix_len == key_len) {
return recursive_iter(n, cb, data);
}
// if there is a full match, go deeper
depth = depth + n->partial_len;
}
// Recursively search
child = find_child(n, key[depth]);
n = (child) ? *child : NULL;
depth++;
}
return 0;
}
| 28,304 | 22.548253 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libvmem/libart/arttree.c
|
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ===========================================================================
*
* Filename: arttree.c
*
* Description: implement ART tree using libpmemobj based on libart
*
* Author: Andreas Bluemle, Dieter Kasper
* Andreas.Bluemle.external@ts.fujitsu.com
* dieter.kasper@ts.fujitsu.com
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* ===========================================================================
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <assert.h>
#include <errno.h>
#include <unistd.h>
#include <ctype.h>
#include <string.h>
#include <strings.h>
#ifdef __FreeBSD__
#define _WITH_GETLINE
#endif
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <stdbool.h>
#include <inttypes.h>
#include <fcntl.h>
#include <emmintrin.h>
#include <stdarg.h>
#include "libvmem.h"
#include "arttree.h"
#define APPNAME "arttree"
#define SRCVERSION "0.1"
struct str2int_map {
char *name;
int value;
};
#define ART_NODE 0
#define ART_NODE4 1
#define ART_NODE16 2
#define ART_NODE48 3
#define ART_NODE256 4
#define ART_TREE_ROOT 5
#define ART_LEAF 6
struct str2int_map art_node_types[] = {
{"art_node", ART_NODE},
{"art_node4", ART_NODE4},
{"art_node16", ART_NODE16},
{"art_node48", ART_NODE48},
{"art_node256", ART_NODE256},
{"art_tree", ART_TREE_ROOT},
{"art_leaf", ART_LEAF},
{NULL, -1}
};
struct datastore
{
void *priv;
};
/*
* context - main context of datastore
*/
struct ds_context
{
char *dirname; /* name of pool file */
int mode; /* operation mode */
int insertions; /* number of insert operations to perform */
int newpool; /* complete new memory pool */
size_t psize; /* size of pool */
VMEM *vmp; /* handle to vmem pool */
art_tree *art_tree; /* art_tree root */
bool fileio;
unsigned fmode;
FILE *input;
FILE *output;
uint64_t address;
unsigned char *key;
int type;
int fd; /* file descriptor for file io mode */
};
#define FILL (1 << 1)
#define INTERACTIVE (1 << 3)
struct ds_context my_context;
#define read_key(c, p) read_line(c, p)
#define read_value(c, p) read_line(c, p)
static void usage(char *progname);
int initialize_context(struct ds_context *ctx, int ac, char *av[]);
int add_elements(struct ds_context *ctx);
ssize_t read_line(struct ds_context *ctx, unsigned char **line);
void exit_handler(struct ds_context *ctx);
int art_tree_map_init(struct datastore *ds, struct ds_context *ctx);
void pmemobj_ds_set_priv(struct datastore *ds, void *priv);
static int dump_art_leaf_callback(void *data, const unsigned char *key,
uint32_t key_len, const unsigned char *val, uint32_t val_len);
static int dump_art_tree_graph(void *data, const unsigned char *key,
uint32_t key_len, const unsigned char *val, uint32_t val_len);
static void print_node_info(char *nodetype, uint64_t addr, art_node *an);
static void print_help(char *appname);
static void print_version(char *appname);
static struct command *get_command(char *cmd_str);
static int help_func(char *appname, struct ds_context *ctx, int argc,
char *argv[]);
static void help_help(char *appname);
static int quit_func(char *appname, struct ds_context *ctx, int argc,
char *argv[]);
static void quit_help(char *appname);
static int set_output_func(char *appname, struct ds_context *ctx, int argc,
char *argv[]);
static void set_output_help(char *appname);
static int arttree_fill_func(char *appname, struct ds_context *ctx,
int ac, char *av[]);
static void arttree_fill_help(char *appname);
static int arttree_examine_func(char *appname, struct ds_context *ctx,
int ac, char *av[]);
static void arttree_examine_help(char *appname);
static int arttree_search_func(char *appname, struct ds_context *ctx,
int ac, char *av[]);
static void arttree_search_help(char *appname);
static int arttree_delete_func(char *appname, struct ds_context *ctx,
int ac, char *av[]);
static void arttree_delete_help(char *appname);
static int arttree_dump_func(char *appname, struct ds_context *ctx,
int ac, char *av[]);
static void arttree_dump_help(char *appname);
static int arttree_graph_func(char *appname, struct ds_context *ctx,
int ac, char *av[]);
static void arttree_graph_help(char *appname);
static int map_lookup(struct str2int_map *map, char *name);
static void arttree_examine(struct ds_context *ctx, void *addr, int node_type);
static void dump_art_tree_root(struct ds_context *ctx, art_tree *node);
static void dump_art_node(struct ds_context *ctx, art_node *node);
static void dump_art_node4(struct ds_context *ctx, art_node4 *node);
static void dump_art_node16(struct ds_context *ctx, art_node16 *node);
static void dump_art_node48(struct ds_context *ctx, art_node48 *node);
static void dump_art_node256(struct ds_context *ctx, art_node256 *node);
static void dump_art_leaf(struct ds_context *ctx, art_leaf *node);
static char *asciidump(unsigned char *s, int32_t len);
void outv_err(const char *fmt, ...);
void outv_err_vargs(const char *fmt, va_list ap);
/*
* command -- struct for commands definition
*/
struct command {
const char *name;
const char *brief;
int (*func)(char *, struct ds_context *, int, char *[]);
void (*help)(char *);
};
struct command commands[] = {
{
.name = "fill",
.brief = "create and fill an art tree",
.func = arttree_fill_func,
.help = arttree_fill_help,
},
{
.name = "dump",
.brief = "dump an art tree",
.func = arttree_dump_func,
.help = arttree_dump_help,
},
{
.name = "graph",
.brief = "dump an art tree for graphical conversion",
.func = arttree_graph_func,
.help = arttree_graph_help,
},
{
.name = "help",
.brief = "print help text about a command",
.func = help_func,
.help = help_help,
},
{
.name = "examine",
.brief = "examine art tree structures",
.func = arttree_examine_func,
.help = arttree_examine_help,
},
{
.name = "search",
.brief = "search for key in art tree",
.func = arttree_search_func,
.help = arttree_search_help,
},
{
.name = "delete",
.brief = "delete leaf with key from art tree",
.func = arttree_delete_func,
.help = arttree_delete_help,
},
{
.name = "set_output",
.brief = "set output file",
.func = set_output_func,
.help = set_output_help,
},
{
.name = "quit",
.brief = "quit arttree structure examiner",
.func = quit_func,
.help = quit_help,
},
};
/*
* number of arttree_structures commands
*/
#define COMMANDS_NUMBER (sizeof(commands) / sizeof(commands[0]))
int
initialize_context(struct ds_context *ctx, int ac, char *av[])
{
int errors = 0;
int opt;
char mode;
if ((ctx == NULL) || (ac < 2)) {
errors++;
}
if (!errors) {
ctx->dirname = NULL;
ctx->psize = VMEM_MIN_POOL;
ctx->newpool = 0;
ctx->vmp = NULL;
ctx->art_tree = NULL;
ctx->fileio = false;
ctx->fmode = 0666;
ctx->mode = 0;
ctx->input = stdin;
ctx->output = stdout;
ctx->fd = -1;
}
if (!errors) {
while ((opt = getopt(ac, av, "m:n:s:")) != -1) {
switch (opt) {
case 'm':
mode = optarg[0];
if (mode == 'f') {
ctx->mode |= FILL;
} else if (mode == 'i') {
ctx->mode |= INTERACTIVE;
} else {
errors++;
}
break;
case 'n': {
long insertions;
insertions = strtol(optarg, NULL, 0);
if (insertions > 0 && insertions < LONG_MAX) {
ctx->insertions = insertions;
}
break;
}
default:
errors++;
break;
}
}
}
if (optind >= ac) {
errors++;
}
if (!errors) {
ctx->dirname = strdup(av[optind]);
}
return errors;
}
void
exit_handler(struct ds_context *ctx)
{
if (!ctx->fileio) {
if (ctx->vmp) {
vmem_delete(ctx->vmp);
}
} else {
if (ctx->fd > - 1) {
close(ctx->fd);
}
}
}
int
art_tree_map_init(struct datastore *ds, struct ds_context *ctx)
{
int errors = 0;
/* calculate a required pool size */
if (ctx->psize < VMEM_MIN_POOL)
ctx->psize = VMEM_MIN_POOL;
if (!ctx->fileio) {
if (access(ctx->dirname, F_OK) == 0) {
ctx->vmp = vmem_create(ctx->dirname, ctx->psize);
if (ctx->vmp == NULL) {
perror("vmem_create");
errors++;
}
ctx->newpool = 1;
}
}
return errors;
}
/*
* pmemobj_ds_set_priv -- set private structure of datastore
*/
void
pmemobj_ds_set_priv(struct datastore *ds, void *priv)
{
ds->priv = priv;
}
struct datastore myds;
static void
usage(char *progname)
{
printf("usage: %s -m [f|d|g] dir\n", progname);
printf(" -m mode known modes are\n");
printf(" f fill create and fill art tree\n");
printf(" i interactive interact with art tree\n");
printf(" -n insertions number of key-value pairs to insert"
"into the tree\n");
printf(" -s size size of the vmem pool file "
"[minimum: VMEM_MIN_POOL=%ld]\n", VMEM_MIN_POOL);
printf("\nfilling an art tree is done by reading key value pairs\n"
"from standard input.\n"
"Both keys and values are single line only.\n");
}
/*
* print_version -- prints arttree version message
*/
static void
print_version(char *appname)
{
printf("%s %s\n", appname, SRCVERSION);
}
/*
* print_help -- prints arttree help message
*/
static void
print_help(char *appname)
{
usage(appname);
print_version(appname);
printf("\n");
printf("Options:\n");
printf(" -h, --help display this help and exit\n");
printf("\n");
printf("The available commands are:\n");
int i;
for (i = 0; i < COMMANDS_NUMBER; i++)
printf("%s\t- %s\n", commands[i].name, commands[i].brief);
printf("\n");
}
static int
map_lookup(struct str2int_map *map, char *name)
{
int idx;
int value = -1;
for (idx = 0; ; idx++) {
if (map[idx].name == NULL) {
break;
}
if (strcmp((const char *)map[idx].name,
(const char *)name) == 0) {
value = map[idx].value;
break;
}
}
return value;
}
/*
* get_command -- returns command for specified command name
*/
static struct command *
get_command(char *cmd_str)
{
int i;
if (cmd_str == NULL) {
return NULL;
}
for (i = 0; i < COMMANDS_NUMBER; i++) {
if (strcmp(cmd_str, commands[i].name) == 0)
return &commands[i];
}
return NULL;
}
/*
* quit_help -- prints help message for quit command
*/
static void
quit_help(char *appname)
{
printf("Usage: quit\n");
printf(" terminate interactive arttree function\n");
}
/*
* quit_func -- quit arttree function
*/
static int
quit_func(char *appname, struct ds_context *ctx, int argc, char *argv[])
{
printf("\n");
exit(0);
return 0;
}
static void
set_output_help(char *appname)
{
printf("set_output output redirection\n");
printf("Usage: set_output [<file_name>]\n");
printf(" redirect subsequent output to specified file\n");
printf(" if file_name is not specified,"
"then reset to standard output\n");
}
static int
set_output_func(char *appname, struct ds_context *ctx, int ac, char *av[])
{
int errors = 0;
if (ac == 1) {
if ((ctx->output != NULL) && (ctx->output != stdout)) {
(void) fclose(ctx->output);
}
ctx->output = stdout;
} else if (ac == 2) {
FILE *out_fp;
out_fp = fopen(av[1], "w+");
if (out_fp == (FILE *)NULL) {
outv_err("set_output: cannot open %s for writing\n",
av[1]);
errors++;
} else {
if ((ctx->output != NULL) && (ctx->output != stdout)) {
(void) fclose(ctx->output);
}
ctx->output = out_fp;
}
} else {
outv_err("set_output: too many arguments [%d]\n", ac);
errors++;
}
return errors;
}
/*
* help_help -- prints help message for help command
*/
static void
help_help(char *appname)
{
printf("Usage: %s help <command>\n", appname);
}
/*
* help_func -- prints help message for specified command
*/
static int
help_func(char *appname, struct ds_context *ctx, int argc, char *argv[])
{
if (argc > 1) {
char *cmd_str = argv[1];
struct command *cmdp = get_command(cmd_str);
if (cmdp && cmdp->help) {
cmdp->help(appname);
return 0;
} else {
outv_err("No help text for '%s' command\n", cmd_str);
return -1;
}
} else {
print_help(appname);
return -1;
}
}
static int
arttree_fill_func(char *appname, struct ds_context *ctx, int ac, char *av[])
{
int errors = 0;
int opt;
(void) appname;
optind = 0;
while ((opt = getopt(ac, av, "n:")) != -1) {
switch (opt) {
case 'n': {
long insertions;
insertions = strtol(optarg, NULL, 0);
if (insertions > 0 && insertions < LONG_MAX) {
ctx->insertions = insertions;
}
break;
}
default:
errors++;
break;
}
}
if (optind >= ac) {
outv_err("fill: missing input filename\n");
arttree_fill_help(appname);
errors++;
}
if (!errors) {
struct stat statbuf;
FILE *in_fp;
if (stat(av[optind], &statbuf)) {
outv_err("fill: cannot stat %s\n", av[optind]);
errors++;
} else {
in_fp = fopen(av[optind], "r");
if (in_fp == (FILE *)NULL) {
outv_err("fill: cannot open %s for reading\n",
av[optind]);
errors++;
} else {
if ((ctx->input != NULL) &&
(ctx->input != stdin)) {
(void) fclose(ctx->input);
}
ctx->input = in_fp;
}
}
}
if (!errors) {
if (add_elements(ctx)) {
perror("add elements");
errors++;
}
if ((ctx->input != NULL) && (ctx->input != stdin)) {
(void) fclose(ctx->input);
}
ctx->input = stdin;
}
return errors;
}
static void
arttree_fill_help(char *appname)
{
(void) appname;
printf("create and fill an art tree\n");
printf("Usage: fill [-n <insertions>] <input_file>\n");
printf(" <insertions> number of key-val pairs to fill"
"the art tree\n");
printf(" <input_file> input file for key-val pairs\n");
}
static char outbuf[1024];
static char *
asciidump(unsigned char *s, int32_t len)
{
char *p;
int l;
p = outbuf;
if ((s != 0) && (len > 0)) {
while (len--) {
if (isprint((*s)&0xff)) {
l = sprintf(p, "%c", (*s)&0xff);
} else {
l = sprintf(p, "\\%.2x", (*s)&0xff);
}
p += l;
s++;
}
}
*p = '\0';
p++;
return outbuf;
}
static void
dump_art_tree_root(struct ds_context *ctx, art_tree *node)
{
fprintf(ctx->output, "art_tree 0x%" PRIxPTR " {\n"
" size=%" PRId64 ";\n root=0x%" PRIxPTR ";\n}\n",
(uintptr_t)node, node->size, (uintptr_t)(node->root));
}
static void
dump_art_node(struct ds_context *ctx, art_node *node)
{
fprintf(ctx->output, "art_node 0x%" PRIxPTR " {\n"
" type=%s;\n"
" num_children=%d;\n"
" partial_len=%d;\n"
" partial=[%s];\n"
"}\n",
(uintptr_t)node, art_node_types[node->type].name,
node->num_children, node->partial_len,
asciidump(node->partial, node->partial_len));
}
static void
dump_art_node4(struct ds_context *ctx, art_node4 *node)
{
int i;
fprintf(ctx->output, "art_node4 0x%" PRIxPTR " {\n", (uintptr_t)node);
dump_art_node(ctx, &(node->n));
for (i = 0; i < node->n.num_children; i++) {
fprintf(ctx->output, " key[%d]=%s;\n",
i, asciidump(&(node->keys[i]), 1));
fprintf(ctx->output, " child[%d]=0x%" PRIxPTR ";\n",
i, (uintptr_t)(node->children[i]));
}
fprintf(ctx->output, "}\n");
}
static void
dump_art_node16(struct ds_context *ctx, art_node16 *node)
{
int i;
fprintf(ctx->output, "art_node16 0x%" PRIxPTR " {\n", (uintptr_t)node);
dump_art_node(ctx, &(node->n));
for (i = 0; i < node->n.num_children; i++) {
fprintf(ctx->output, " key[%d]=%s;\n",
i, asciidump(&(node->keys[i]), 1));
fprintf(ctx->output, " child[%d]=0x%" PRIxPTR ";\n",
i, (uintptr_t)(node->children[i]));
}
fprintf(ctx->output, "}\n");
}
static void
dump_art_node48(struct ds_context *ctx, art_node48 *node)
{
int i;
int idx;
fprintf(ctx->output, "art_node48 0x%" PRIxPTR " {\n", (uintptr_t)node);
dump_art_node(ctx, &(node->n));
for (i = 0; i < 256; i++) {
idx = node->keys[i];
if (!idx)
continue;
fprintf(ctx->output, " key[%d]=%s;\n",
i, asciidump((unsigned char *)(&i), 1));
fprintf(ctx->output, " child[%d]=0x%" PRIxPTR ";\n",
idx, (uintptr_t)(node->children[idx]));
}
fprintf(ctx->output, "}\n");
}
static void
dump_art_node256(struct ds_context *ctx, art_node256 *node)
{
int i;
fprintf(ctx->output, "art_node48 0x%" PRIxPTR " {\n", (uintptr_t)node);
dump_art_node(ctx, &(node->n));
for (i = 0; i < 256; i++) {
if (node->children[i] == NULL)
continue;
fprintf(ctx->output, " key[%i]=%s;\n",
i, asciidump((unsigned char *)(&i), 1));
fprintf(ctx->output, " child[%d]=0x%" PRIxPTR ";\n",
i, (uintptr_t)(node->children[i]));
}
fprintf(ctx->output, "}\n");
}
static void
dump_art_leaf(struct ds_context *ctx, art_leaf *node)
{
fprintf(ctx->output, "art_leaf 0x%" PRIxPTR " {\n"
" key_len=%u;\n"
" key=[%s];\n",
(uintptr_t)node,
node->key_len, asciidump(node->key, (int32_t)node->key_len));
fprintf(ctx->output, " val_len=%u;\n"
" value=[%s];\n"
"}\n",
node->val_len, asciidump(node->value, (int32_t)node->val_len));
}
static void
arttree_examine(struct ds_context *ctx, void *addr, int node_type)
{
if (addr == NULL)
return;
switch (node_type) {
case ART_TREE_ROOT:
dump_art_tree_root(ctx, (art_tree *)addr);
break;
case ART_NODE:
dump_art_node(ctx, (art_node *)addr);
break;
case ART_NODE4:
dump_art_node4(ctx, (art_node4 *)addr);
break;
case ART_NODE16:
dump_art_node16(ctx, (art_node16 *)addr);
break;
case ART_NODE48:
dump_art_node48(ctx, (art_node48 *)addr);
break;
case ART_NODE256:
dump_art_node256(ctx, (art_node256 *)addr);
break;
case ART_LEAF:
dump_art_leaf(ctx, (art_leaf *)addr);
break;
default: break;
}
fflush(ctx->output);
}
static int
arttree_examine_func(char *appname, struct ds_context *ctx, int ac, char *av[])
{
int errors = 0;
(void) appname;
if (ac > 1) {
if (ac < 3) {
outv_err("examine: missing argument\n");
arttree_examine_help(appname);
errors++;
} else {
ctx->address = (uint64_t)strtol(av[1], NULL, 0);
ctx->type = map_lookup(&(art_node_types[0]), av[2]);
}
} else {
ctx->address = (uint64_t)ctx->art_tree;
ctx->type = ART_TREE_ROOT;
}
if (!errors) {
if (ctx->output == NULL)
ctx->output = stdout;
arttree_examine(ctx, (void *)(ctx->address), ctx->type);
}
return errors;
}
static void
arttree_examine_help(char *appname)
{
(void) appname;
printf("examine structures of an art tree\n");
printf("Usage: examine <address> <type>\n");
printf(" <address> address of art tree structure to examine\n");
printf(" <type> input file for key-val pairs\n");
printf("Known types are\n art_tree\n art_node\n"
" art_node4\n art_node16\n art_node48\n art_node256\n"
" art_leaf\n");
printf("If invoked without arguments, then the root of the art tree"
" is dumped\n");
}
static int
arttree_search_func(char *appname, struct ds_context *ctx, int ac, char *av[])
{
void *p;
int errors = 0;
(void) appname;
if (ac > 1) {
ctx->key = (unsigned char *)strdup(av[1]);
assert(ctx->key != NULL);
} else {
outv_err("search: missing key\n");
arttree_search_help(appname);
errors++;
}
if (!errors) {
if (ctx->output == NULL)
ctx->output = stdout;
p = art_search(ctx->art_tree, ctx->key,
(int)strlen((const char *)ctx->key));
if (p != NULL) {
fprintf(ctx->output, "found key [%s]: ",
asciidump(ctx->key,
strlen((const char *)ctx->key)));
fprintf(ctx->output, "value [%s]\n",
asciidump((unsigned char *)p, 20));
} else {
fprintf(ctx->output, "not found key [%s]\n",
asciidump(ctx->key,
strlen((const char *)ctx->key)));
}
}
return errors;
}
static void
arttree_search_help(char *appname)
{
(void) appname;
printf("search for key in art tree\n");
printf("Usage: search <key>\n");
printf(" <key> the key to search for\n");
}
static int
arttree_delete_func(char *appname, struct ds_context *ctx, int ac, char *av[])
{
void *p;
int errors = 0;
(void) appname;
if (ac > 1) {
ctx->key = (unsigned char *)strdup(av[1]);
assert(ctx->key != NULL);
} else {
outv_err("delete: missing key\n");
arttree_delete_help(appname);
errors++;
}
if (!errors) {
if (ctx->output == NULL) ctx->output = stdout;
p = art_delete(ctx->vmp, ctx->art_tree, ctx->key,
(int)strlen((const char *)ctx->key));
if (p != NULL) {
fprintf(ctx->output, "delete leaf with key [%s]:",
asciidump(ctx->key,
strlen((const char *)ctx->key)));
fprintf(ctx->output, " value [%s]\n",
asciidump((unsigned char *)p, 20));
} else {
fprintf(ctx->output, "no leaf with key [%s]\n",
asciidump(ctx->key,
strlen((const char *)ctx->key)));
}
}
return errors;
}
static void
arttree_delete_help(char *appname)
{
(void) appname;
printf("delete leaf with key from art tree\n");
printf("Usage: delete <key>\n");
printf(" <key> the key of the leaf to delete\n");
}
static int
arttree_dump_func(char *appname, struct ds_context *ctx, int ac, char *av[])
{
(void) appname;
(void) ac;
(void) av;
art_iter(ctx->art_tree, dump_art_leaf_callback, NULL);
return 0;
}
static void
arttree_dump_help(char *appname)
{
(void) appname;
printf("dump all leafs of an art tree\n");
printf("Usage: dump\n");
printf("\nThis function uses the art_iter() interface to descend\n");
printf("to all leafs of the art tree\n");
}
static int
arttree_graph_func(char *appname, struct ds_context *ctx, int ac, char *av[])
{
(void) appname;
(void) ac;
(void) av;
fprintf(ctx->output, "digraph g {\nrankdir=LR;\n");
art_iter2(ctx->art_tree, dump_art_tree_graph, NULL);
fprintf(ctx->output, "}\n");
return 0;
}
static void
arttree_graph_help(char *appname)
{
(void) appname;
printf("dump art tree for graphical output (graphiviz/dot)\n");
printf("Usage: graph\n");
printf("\nThis function uses the art_iter2() interface to descend\n");
printf("through the art tree and produces output for graphviz/dot\n");
}
int
main(int argc, char *argv[])
{
if (initialize_context(&my_context, argc, argv) != 0) {
usage(argv[0]);
return 1;
}
if (art_tree_map_init(&myds, &my_context) != 0) {
fprintf(stderr, "failed to initialize memory pool file\n");
return 1;
}
if (my_context.vmp == NULL) {
perror("pool initialization");
return 1;
}
my_context.art_tree = (art_tree *)vmem_malloc(my_context.vmp,
sizeof(art_tree));
assert(my_context.art_tree != NULL);
if (art_tree_init(my_context.art_tree)) {
perror("art tree setup");
return 1;
}
if ((my_context.mode & INTERACTIVE)) {
char *line;
ssize_t read;
size_t len;
char *args[20];
int nargs;
struct command *cmdp;
/* interactive mode: read commands and execute them */
line = NULL;
printf("\n> ");
while ((read = getline(&line, &len, stdin)) != -1) {
if (line[read - 1] == '\n') {
line[read - 1] = '\0';
}
args[0] = strtok(line, " ");
cmdp = get_command(args[0]);
if (cmdp == NULL) {
printf("[%s]: command not supported\n",
args[0] ? args[0] : "NULL");
printf("\n> ");
continue;
}
nargs = 1;
while (1) {
args[nargs] = strtok(NULL, " ");
if (args[nargs] == NULL) {
break;
}
nargs++;
}
(void) cmdp->func(APPNAME, &my_context, nargs, args);
printf("\n> ");
}
if (line != NULL) {
free(line);
}
}
if ((my_context.mode & FILL)) {
if (add_elements(&my_context)) {
perror("add elements");
return 1;
}
}
exit_handler(&my_context);
return 0;
}
int
add_elements(struct ds_context *ctx)
{
int errors = 0;
int i;
int key_len;
int val_len;
unsigned char *key;
unsigned char *value;
if (ctx == NULL) {
errors++;
} else if (ctx->vmp == NULL) {
errors++;
}
if (!errors) {
for (i = 0; i < ctx->insertions; i++) {
key = NULL;
value = NULL;
key_len = read_key(ctx, &key);
val_len = read_value(ctx, &value);
art_insert(ctx->vmp, ctx->art_tree,
key, key_len, value, val_len);
if (key != NULL)
free(key);
if (value != NULL)
free(value);
}
}
return errors;
}
ssize_t
read_line(struct ds_context *ctx, unsigned char **line)
{
size_t len = -1;
ssize_t read = -1;
*line = NULL;
if ((read = getline((char **)line, &len, ctx->input)) > 0) {
(*line)[read - 1] = '\0';
}
return read - 1;
}
static int
dump_art_leaf_callback(void *data,
const unsigned char *key, uint32_t key_len,
const unsigned char *val, uint32_t val_len)
{
fprintf(my_context.output, "key len %" PRIu32 " = [%s], ",
key_len, asciidump((unsigned char *)key, key_len));
fprintf(my_context.output, "value len %" PRIu32 " = [%s]\n",
val_len, asciidump((unsigned char *)val, val_len));
fflush(my_context.output);
return 0;
}
/*
* Macros to manipulate pointer tags
*/
#define IS_LEAF(x) (((uintptr_t)(x) & 1))
#define SET_LEAF(x) ((void *)((uintptr_t)(x) | 1))
#define LEAF_RAW(x) ((void *)((uintptr_t)(x) & ~1))
unsigned char hexvals[] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
};
static void
print_node_info(char *nodetype, uint64_t addr, art_node *an)
{
int p_len;
p_len = an->partial_len;
fprintf(my_context.output,
"N%" PRIx64 " [label=\"%s at\\n0x%" PRIx64 "\\n%d children",
addr, nodetype, addr, an->num_children);
if (p_len != 0) {
fprintf(my_context.output, "\\nlen %d", p_len);
fprintf(my_context.output, ": ");
asciidump(an->partial, p_len);
}
fprintf(my_context.output, "\"];\n");
}
static int
dump_art_tree_graph(void *data,
const unsigned char *key, uint32_t key_len,
const unsigned char *val, uint32_t val_len)
{
cb_data *cbd;
art_node4 *an4;
art_node16 *an16;
art_node48 *an48;
art_node256 *an256;
art_leaf *al;
void *child;
int idx;
if (data == NULL)
return 0;
cbd = (cb_data *)data;
if (IS_LEAF(cbd->node)) {
al = LEAF_RAW(cbd->node);
fprintf(my_context.output,
"N%" PRIxPTR " [shape=box, "
"label=\"leaf at\\n0x%" PRIxPTR "\"];\n",
(uintptr_t)al, (uintptr_t)al);
fprintf(my_context.output,
"N%" PRIxPTR " [shape=box, "
"label=\"key at 0x%" PRIxPTR ": %s\"];\n",
(uintptr_t)al->key, (uintptr_t)al->key,
asciidump(al->key, al->key_len));
fprintf(my_context.output,
"N%" PRIxPTR " [shape=box, "
"label=\"value at 0x%" PRIxPTR ": %s\"];\n",
(uintptr_t)al->value, (uintptr_t)al->value,
asciidump(al->value, al->val_len));
fprintf(my_context.output,
"N%" PRIxPTR " -> N%" PRIxPTR ";\n",
(uintptr_t)al, (uintptr_t)al->key);
fprintf(my_context.output,
"N%" PRIxPTR " -> N%" PRIxPTR ";\n",
(uintptr_t)al, (uintptr_t)al->value);
return 0;
}
switch (cbd->node_type) {
case NODE4:
an4 = (art_node4 *)cbd->node;
child = (void *)(an4->children[cbd->child_idx]);
child = LEAF_RAW(child);
if (child != NULL) {
if (cbd->first_child)
print_node_info("node4",
(uint64_t)(cbd->node), &(an4->n));
fprintf(my_context.output,
"N%" PRIxPTR " -> N%" PRIxPTR " [label=\"%s\"];\n",
(uintptr_t)an4, (uintptr_t)child,
asciidump(&(an4->keys[cbd->child_idx]), 1));
}
break;
case NODE16:
an16 = (art_node16 *)cbd->node;
child = (void *)(an16->children[cbd->child_idx]);
child = LEAF_RAW(child);
if (child != NULL) {
if (cbd->first_child)
print_node_info("node16",
(uint64_t)(cbd->node), &(an16->n));
fprintf(my_context.output,
"N%" PRIxPTR " -> N%" PRIxPTR " [label=\"%s\"];\n",
(uintptr_t)an16, (uintptr_t)child,
asciidump(&(an16->keys[cbd->child_idx]), 1));
}
break;
case NODE48:
an48 = (art_node48 *)cbd->node;
idx = an48->keys[cbd->child_idx];
child = (void *) (an48->children[idx - 1]);
child = LEAF_RAW(child);
if (child != NULL) {
if (cbd->first_child)
print_node_info("node48",
(uint64_t)(cbd->node), &(an48->n));
fprintf(my_context.output,
"N%" PRIxPTR " -> N%" PRIxPTR " [label=\"%s\"];\n",
(uintptr_t)an48, (uintptr_t)child,
asciidump(&(hexvals[cbd->child_idx]), 1));
}
break;
case NODE256:
an256 = (art_node256 *)cbd->node;
child = (void *)(an256->children[cbd->child_idx]);
child = LEAF_RAW(child);
if (child != NULL) {
if (cbd->first_child)
print_node_info("node256",
(uint64_t)(cbd->node), &(an256->n));
fprintf(my_context.output,
"N%" PRIxPTR " -> N%" PRIxPTR " [label=\"%s\"];\n",
(uintptr_t)an256, (uintptr_t)child,
asciidump(&(hexvals[cbd->child_idx]), 1));
}
break;
default:
break;
}
return 0;
}
/*
* outv_err -- print error message
*/
void
outv_err(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
outv_err_vargs(fmt, ap);
va_end(ap);
}
/*
* outv_err_vargs -- print error message
*/
void
outv_err_vargs(const char *fmt, va_list ap)
{
fprintf(stderr, "error: ");
vfprintf(stderr, fmt, ap);
if (!strchr(fmt, '\n'))
fprintf(stderr, "\n");
}
| 31,704 | 23.128615 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libvmem/libart/art.h
|
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
* Copyright 2012, Armon Dadgar. All rights reserved.
* Copyright 2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ==========================================================================
*
* Filename: art.h
*
* Description: implement ART tree using libvmem based on libart
*
* Author: Andreas Bluemle, Dieter Kasper
* Andreas.Bluemle.external@ts.fujitsu.com
* dieter.kasper@ts.fujitsu.com
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
* ==========================================================================
*/
/*
* based on https://github.com/armon/libart/src/art.h
*/
#include <stdint.h>
#ifndef ART_H
#define ART_H
#ifdef __cplusplus
extern "C" {
#endif
#define NODE4 1
#define NODE16 2
#define NODE48 3
#define NODE256 4
#define MAX_PREFIX_LEN 10
#if defined(__GNUC__) && !defined(__clang__)
#if __STDC_VERSION__ >= 199901L && 402 == (__GNUC__ * 100 + __GNUC_MINOR__)
/*
* GCC 4.2.2's C99 inline keyword support is pretty broken; avoid. Introduced in
* GCC 4.2.something, fixed in 4.3.0. So checking for specific major.minor of
* 4.2 is fine.
*/
#define BROKEN_GCC_C99_INLINE
#endif
#endif
typedef int(*art_callback)(void *data, const unsigned char *key,
uint32_t key_len, const unsigned char *value,
uint32_t val_len);
/*
* This struct is included as part
* of all the various node sizes
*/
typedef struct {
uint8_t type;
uint8_t num_children;
uint32_t partial_len;
unsigned char partial[MAX_PREFIX_LEN];
} art_node;
/*
* Small node with only 4 children
*/
typedef struct {
art_node n;
unsigned char keys[4];
art_node *children[4];
} art_node4;
/*
* Node with 16 children
*/
typedef struct {
art_node n;
unsigned char keys[16];
art_node *children[16];
} art_node16;
/*
* Node with 48 children, but
* a full 256 byte field.
*/
typedef struct {
art_node n;
unsigned char keys[256];
art_node *children[48];
} art_node48;
/*
* Full node with 256 children
*/
typedef struct {
art_node n;
art_node *children[256];
} art_node256;
/*
* Represents a leaf. These are
* of arbitrary size, as they include the key.
*/
typedef struct {
uint32_t key_len;
uint32_t val_len;
unsigned char *key;
unsigned char *value;
unsigned char data[];
} art_leaf;
/*
* Main struct, points to root.
*/
typedef struct {
art_node *root;
uint64_t size;
} art_tree;
/*
* Initializes an ART tree
* @return 0 on success.
*/
int art_tree_init(art_tree *t);
/*
* DEPRECATED
* Initializes an ART tree
* @return 0 on success.
*/
#define init_art_tree(...) art_tree_init(__VA_ARGS__)
/*
* Destroys an ART tree
* @return 0 on success.
*/
int art_tree_destroy(VMEM *vmp, art_tree *t);
/*
* Returns the size of the ART tree.
*/
#ifdef BROKEN_GCC_C99_INLINE
#define art_size(t) ((t)->size)
#else
static inline uint64_t art_size(art_tree *t) {
return t->size;
}
#endif
/*
* Inserts a new value into the ART tree
* @arg t The tree
* @arg key The key
* @arg key_len The length of the key
* @arg value Opaque value.
* @return NULL if the item was newly inserted, otherwise
* the old value pointer is returned.
*/
void *art_insert(VMEM *vmp, art_tree *t, const unsigned char *key,
int key_len, void *value, int val_len);
/*
* Deletes a value from the ART tree
* @arg t The tree
* @arg key The key
* @arg key_len The length of the key
* @return NULL if the item was not found, otherwise
* the value pointer is returned.
*/
void *art_delete(VMEM *vmp, art_tree *t, const unsigned char *key,
int key_len);
/*
* Searches for a value in the ART tree
* @arg t The tree
* @arg key The key
* @arg key_len The length of the key
* @return NULL if the item was not found, otherwise
* the value pointer is returned.
*/
void *art_search(const art_tree *t, const unsigned char *key, int key_len);
/*
* Returns the minimum valued leaf
* @return The minimum leaf or NULL
*/
art_leaf *art_minimum(art_tree *t);
/*
* Returns the maximum valued leaf
* @return The maximum leaf or NULL
*/
art_leaf *art_maximum(art_tree *t);
/*
* Iterates through the entries pairs in the map,
* invoking a callback for each. The call back gets a
* key, value for each and returns an integer stop value.
* If the callback returns non-zero, then the iteration stops.
* @arg t The tree to iterate over
* @arg cb The callback function to invoke
* @arg data Opaque handle passed to the callback
* @return 0 on success, or the return of the callback.
*/
int art_iter(art_tree *t, art_callback cb, void *data);
typedef struct _cb_data {
int node_type;
int child_idx;
int first_child;
void *node;
} cb_data;
int art_iter2(art_tree *t, art_callback cb, void *data);
/*
* Iterates through the entries pairs in the map,
* invoking a callback for each that matches a given prefix.
* The call back gets a key, value for each and returns an integer stop value.
* If the callback returns non-zero, then the iteration stops.
* @arg t The tree to iterate over
* @arg prefix The prefix of keys to read
* @arg prefix_len The length of the prefix
* @arg cb The callback function to invoke
* @arg data Opaque handle passed to the callback
* @return 0 on success, or the return of the callback.
*/
int art_iter_prefix(art_tree *t, const unsigned char *prefix, int prefix_len,
art_callback cb, void *data);
#ifdef __cplusplus
}
#endif
#endif
| 6,963 | 25.279245 | 80 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/examples/libvmem/libart/arttree.h
|
/*
* Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ==========================================================================
*
* Filename: arttree.h
*
* Description: implement ART tree using libvmem based on libart
*
* Author: Andreas Bluemle, Dieter Kasper
* Andreas.Bluemle.external@ts.fujitsu.com
* dieter.kasper@ts.fujitsu.com
*
* Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH
* ==========================================================================
*/
#ifndef _ARTTREE_H
#define _ARTTREE_H
#ifdef __cplusplus
extern "C" {
#endif
#include "art.h"
#ifdef __cplusplus
}
#endif
#endif /* _ARTTREE_H */
| 2,239 | 35.721311 | 77 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem_ssh.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_ssh.h -- rpmem ssh transport layer header file
*/
#ifndef RPMEM_SSH_H
#define RPMEM_SSH_H 1
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_ssh;
struct rpmem_ssh *rpmem_ssh_open(const struct rpmem_target_info *info);
struct rpmem_ssh *rpmem_ssh_exec(const struct rpmem_target_info *info, ...);
struct rpmem_ssh *rpmem_ssh_execv(const struct rpmem_target_info *info,
const char **argv);
int rpmem_ssh_close(struct rpmem_ssh *rps);
int rpmem_ssh_send(struct rpmem_ssh *rps, const void *buff, size_t len);
int rpmem_ssh_recv(struct rpmem_ssh *rps, void *buff, size_t len);
int rpmem_ssh_monitor(struct rpmem_ssh *rps, int nonblock);
const char *rpmem_ssh_strerror(struct rpmem_ssh *rps, int oerrno);
#ifdef __cplusplus
}
#endif
#endif
| 2,381 | 36.21875 | 76 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem_fip.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_fip.h -- rpmem libfabric provider module header file
*/
#ifndef RPMEM_FIP_H
#define RPMEM_FIP_H
#include <stdint.h>
#include <netinet/in.h>
#include <sys/types.h>
#include <sys/socket.h>
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_fip;
struct rpmem_fip_attr {
enum rpmem_provider provider;
enum rpmem_persist_method persist_method;
void *laddr;
size_t size;
size_t buff_size;
unsigned nlanes;
void *raddr;
uint64_t rkey;
};
struct rpmem_fip *rpmem_fip_init(const char *node, const char *service,
struct rpmem_fip_attr *attr, unsigned *nlanes);
void rpmem_fip_fini(struct rpmem_fip *fip);
int rpmem_fip_connect(struct rpmem_fip *fip);
int rpmem_fip_close(struct rpmem_fip *fip);
int rpmem_fip_process_start(struct rpmem_fip *fip);
int rpmem_fip_process_stop(struct rpmem_fip *fip);
int rpmem_fip_persist(struct rpmem_fip *fip, size_t offset, size_t len,
unsigned lane, unsigned flags);
int rpmem_fip_read(struct rpmem_fip *fip, void *buff,
size_t len, size_t off, unsigned lane);
void rpmem_fip_probe_fork_safety(int *fork_unsafe);
#ifdef __cplusplus
}
#endif
#endif
| 2,714 | 31.710843 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem.c -- main source file for librpmem
*/
#include <stdlib.h>
#include <netdb.h>
#include <stdio.h>
#include <errno.h>
#include <limits.h>
#include <inttypes.h>
#include "librpmem.h"
#include "out.h"
#include "os.h"
#include "os_thread.h"
#include "util.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_obc.h"
#include "rpmem_fip.h"
#include "rpmem_fip_common.h"
#include "rpmem_ssh.h"
#include "rpmem_proto.h"
#define RPMEM_REMOVE_FLAGS_ALL (\
RPMEM_REMOVE_FORCE | \
RPMEM_REMOVE_POOL_SET \
)
#define RPMEM_CHECK_FORK() do {\
if (Rpmem_fork_unsafe) {\
ERR("libfabric is initialized without fork() support");\
return NULL;\
}\
} while (0)
/*
* rpmem_pool -- remote pool context
*/
struct rpmem_pool {
struct rpmem_obc *obc; /* out-of-band connection handle */
struct rpmem_fip *fip; /* fabric provider handle */
struct rpmem_target_info *info;
char fip_service[NI_MAXSERV];
enum rpmem_provider provider;
os_thread_t monitor;
int closing;
int no_headers;
/*
* Last error code, need to be volatile because it can
* be accessed by multiple threads.
*/
volatile int error;
};
/*
* env_get_bool -- parse value of specified environment variable as a bool
*
* Return values:
* 0 - defined, valp has value
* 1 - not defined
* -1 - parsing error
*/
static int
env_get_bool(const char *name, int *valp)
{
LOG(3, "name %s, valp %p", name, valp);
const char *env = os_getenv(name);
if (!env)
return 1;
char *endptr;
errno = 0;
long val = strtol(env, &endptr, 10);
if (*endptr != '\0' || errno)
goto err;
if (val < INT_MIN || val > INT_MAX)
goto err;
*valp = (int)val;
return 0;
err:
RPMEM_LOG(ERR, "!parsing '%s' environment variable failed", name);
return -1;
}
/*
* rpmem_get_provider -- returns provider based on node address and environment
*/
static enum rpmem_provider
rpmem_get_provider(const char *node)
{
LOG(3, "node %s", node);
struct rpmem_fip_probe probe;
enum rpmem_provider prov = RPMEM_PROV_UNKNOWN;
int ret = rpmem_fip_probe_get(node, &probe);
if (ret)
return prov;
/*
* The sockets provider can be used only if specified environment
* variable is set to 1.
*/
if (rpmem_fip_probe(probe, RPMEM_PROV_LIBFABRIC_SOCKETS)) {
int enable;
ret = env_get_bool(RPMEM_PROV_SOCKET_ENV, &enable);
if (!ret && enable) {
prov = RPMEM_PROV_LIBFABRIC_SOCKETS;
}
}
/*
* The verbs provider is enabled by default. If appropriate
* environment variable is set to 0, the verbs provider is disabled.
*
* The verbs provider has higher priority than sockets provider.
*/
if (rpmem_fip_probe(probe, RPMEM_PROV_LIBFABRIC_VERBS)) {
int enable;
ret = env_get_bool(RPMEM_PROV_VERBS_ENV, &enable);
if (ret == 1 || (!ret && enable))
prov = RPMEM_PROV_LIBFABRIC_VERBS;
}
return prov;
}
/*
* rpmem_monitor_thread -- connection monitor background thread
*/
static void *
rpmem_monitor_thread(void *arg)
{
LOG(3, "arg %p", arg);
RPMEMpool *rpp = arg;
int ret = rpmem_obc_monitor(rpp->obc, 0);
if (ret && !rpp->closing) {
RPMEM_LOG(ERR, "unexpected data received");
rpp->error = errno;
}
return NULL;
}
/*
* rpmem_common_init -- common routine for initialization
*/
static RPMEMpool *
rpmem_common_init(const char *target)
{
LOG(3, "target %s", target);
int ret;
RPMEMpool *rpp = calloc(1, sizeof(*rpp));
if (!rpp) {
ERR("!calloc");
goto err_malloc_rpmem;
}
rpp->info = rpmem_target_parse(target);
if (!rpp->info) {
ERR("!parsing target node address failed");
goto err_target_split;
}
rpp->provider = rpmem_get_provider(rpp->info->node);
if (rpp->provider == RPMEM_PROV_UNKNOWN) {
errno = ENOMEDIUM;
ERR("cannot find provider");
goto err_provider;
}
RPMEM_LOG(NOTICE, "provider: %s", rpmem_provider_to_str(rpp->provider));
if (rpp->provider == RPMEM_PROV_LIBFABRIC_SOCKETS) {
/* libfabric's sockets provider does not support IPv6 */
RPMEM_LOG(NOTICE, "forcing using IPv4");
rpp->info->flags |= RPMEM_FLAGS_USE_IPV4;
}
rpp->obc = rpmem_obc_init();
if (!rpp->obc) {
ERR("!out-of-band connection initialization failed");
goto err_obc_init;
}
RPMEM_LOG(INFO, "establishing out-of-band connection");
ret = rpmem_obc_connect(rpp->obc, rpp->info);
if (ret) {
ERR("!out-of-band connection failed");
goto err_obc_connect;
}
RPMEM_LOG(NOTICE, "out-of-band connection established");
return rpp;
err_obc_connect:
rpmem_obc_fini(rpp->obc);
err_obc_init:
err_provider:
rpmem_target_free(rpp->info);
err_target_split:
free(rpp);
err_malloc_rpmem:
return NULL;
}
/*
* rpmem_common_fini -- common routing for deinitialization
*/
static void
rpmem_common_fini(RPMEMpool *rpp, int join)
{
LOG(3, "rpp %p, join %d", rpp, join);
rpmem_obc_disconnect(rpp->obc);
if (join) {
int ret = os_thread_join(&rpp->monitor, NULL);
if (ret) {
errno = ret;
ERR("joining monitor thread failed");
}
}
rpmem_obc_fini(rpp->obc);
rpmem_target_free(rpp->info);
free(rpp);
}
/*
* rpmem_common_fip_init -- common routine for initializing fabric provider
*/
static int
rpmem_common_fip_init(RPMEMpool *rpp, struct rpmem_req_attr *req,
struct rpmem_resp_attr *resp, void *pool_addr, size_t pool_size,
unsigned *nlanes, size_t buff_size)
{
LOG(3, "rpp %p, req %p, resp %p, pool_addr %p, pool_size %zu, nlanes "
"%p", rpp, req, resp, pool_addr, pool_size, nlanes);
int ret;
struct rpmem_fip_attr fip_attr = {
.provider = req->provider,
.persist_method = resp->persist_method,
.laddr = pool_addr,
.size = pool_size,
.buff_size = buff_size,
.nlanes = min(*nlanes, resp->nlanes),
.raddr = (void *)resp->raddr,
.rkey = resp->rkey,
};
ret = snprintf(rpp->fip_service, sizeof(rpp->fip_service),
"%u", resp->port);
if (ret <= 0) {
ERR("snprintf: %d", ret);
goto err_port;
}
rpp->fip = rpmem_fip_init(rpp->info->node, rpp->fip_service,
&fip_attr, nlanes);
if (!rpp->fip) {
ERR("!in-band connection initialization failed");
ret = -1;
goto err_fip_init;
}
RPMEM_LOG(NOTICE, "final nlanes: %u", *nlanes);
RPMEM_LOG(INFO, "establishing in-band connection");
ret = rpmem_fip_connect(rpp->fip);
if (ret) {
ERR("!establishing in-band connection failed");
goto err_fip_connect;
}
RPMEM_LOG(NOTICE, "in-band connection established");
return 0;
err_fip_connect:
rpmem_fip_fini(rpp->fip);
err_fip_init:
err_port:
return ret;
}
/*
* rpmem_common_fip_fini -- common routine for deinitializing fabric provider
*/
static void
rpmem_common_fip_fini(RPMEMpool *rpp)
{
LOG(3, "rpp %p", rpp);
RPMEM_LOG(INFO, "closing in-band connection");
rpmem_fip_fini(rpp->fip);
RPMEM_LOG(NOTICE, "in-band connection closed");
}
/*
* rpmem_log_args -- log input arguments for rpmem_create and rpmem_open
*/
static void
rpmem_log_args(const char *req, const char *target, const char *pool_set_name,
void *pool_addr, size_t pool_size, unsigned nlanes)
{
LOG(3, "req %s, target %s, pool_set_name %s, pool_addr %p, pool_size "
"%zu, nlanes %d", req, target, pool_set_name, pool_addr,
pool_size, nlanes);
RPMEM_LOG(NOTICE, "%s request:", req);
RPMEM_LOG(NOTICE, "\ttarget: %s", target);
RPMEM_LOG(NOTICE, "\tpool set: %s", pool_set_name);
RPMEM_LOG(INFO, "\tpool addr: %p", pool_addr);
RPMEM_LOG(INFO, "\tpool size: %lu", pool_size);
RPMEM_LOG(NOTICE, "\tnlanes: %u", nlanes);
}
/*
* rpmem_log_resp -- log response attributes
*/
static void
rpmem_log_resp(const char *req, const struct rpmem_resp_attr *resp)
{
LOG(3, "req %s, resp %p", req, resp);
RPMEM_LOG(NOTICE, "%s request response:", req);
RPMEM_LOG(NOTICE, "\tnlanes: %u", resp->nlanes);
RPMEM_LOG(NOTICE, "\tport: %u", resp->port);
RPMEM_LOG(NOTICE, "\tpersist method: %s",
rpmem_persist_method_to_str(resp->persist_method));
RPMEM_LOG(NOTICE, "\tremote addr: 0x%" PRIx64, resp->raddr);
}
/*
* rpmem_check_args -- validate user's arguments
*/
static int
rpmem_check_args(void *pool_addr, size_t pool_size, unsigned *nlanes)
{
LOG(3, "pool_addr %p, pool_size %zu, nlanes %p", pool_addr, pool_size,
nlanes);
if (!pool_addr) {
errno = EINVAL;
ERR("invalid pool address");
return -1;
}
if (!IS_PAGE_ALIGNED((uintptr_t)pool_addr)) {
errno = EINVAL;
ERR("Pool address must be aligned to page size (%llu)",
Pagesize);
return -1;
}
if (!IS_PAGE_ALIGNED(pool_size)) {
errno = EINVAL;
ERR("Pool size must be aligned to page size (%llu)",
Pagesize);
return -1;
}
if (!pool_size) {
errno = EINVAL;
ERR("invalid pool size");
return -1;
}
if (!nlanes) {
errno = EINVAL;
ERR("lanes pointer cannot be NULL");
return -1;
}
if (!(*nlanes)) {
errno = EINVAL;
ERR("number of lanes must be positive");
return -1;
}
return 0;
}
/*
* rpmem_create -- create remote pool on target node
*
* target -- target node in format [<user>@]<target_name>[:<port>]
* pool_set_name -- remote pool set name
* pool_addr -- local pool memory address which will be replicated
* pool_size -- required pool size
* nlanes -- number of lanes
* create_attr -- pool attributes used for creating the pool on remote node
*/
RPMEMpool *
rpmem_create(const char *target, const char *pool_set_name,
void *pool_addr, size_t pool_size, unsigned *nlanes,
const struct rpmem_pool_attr *create_attr)
{
LOG(3, "target %s, pool_set_name %s, pool_addr %p, pool_size %zu, "
"nlanes %p, create_attr %p", target, pool_set_name,
pool_addr, pool_size, nlanes, create_attr);
RPMEM_CHECK_FORK();
rpmem_log_args("create", target, pool_set_name,
pool_addr, pool_size, *nlanes);
if (rpmem_check_args(pool_addr, pool_size, nlanes))
return NULL;
RPMEMpool *rpp = rpmem_common_init(target);
if (!rpp)
goto err_common_init;
size_t buff_size = RPMEM_DEF_BUFF_SIZE;
struct rpmem_req_attr req = {
.pool_size = pool_size,
.nlanes = min(*nlanes, Rpmem_max_nlanes),
.provider = rpp->provider,
.pool_desc = pool_set_name,
.buff_size = buff_size,
};
struct rpmem_resp_attr resp;
int ret = rpmem_obc_create(rpp->obc, &req, &resp, create_attr);
if (ret) {
RPMEM_LOG(ERR, "!create request failed");
goto err_obc_create;
}
if (create_attr == NULL ||
util_is_zeroed(create_attr, sizeof(*create_attr)))
rpp->no_headers = 1;
rpmem_log_resp("create", &resp);
ret = rpmem_common_fip_init(rpp, &req, &resp,
pool_addr, pool_size, nlanes, buff_size);
if (ret)
goto err_fip_init;
ret = os_thread_create(&rpp->monitor, NULL, rpmem_monitor_thread, rpp);
if (ret) {
errno = ret;
ERR("!starting monitor thread");
goto err_monitor;
}
return rpp;
err_monitor:
rpmem_common_fip_fini(rpp);
err_fip_init:
rpmem_obc_close(rpp->obc, RPMEM_CLOSE_FLAGS_REMOVE);
err_obc_create:
rpmem_common_fini(rpp, 0);
err_common_init:
return NULL;
}
/*
* rpmem_open -- open remote pool on target node
*
* target -- target node in format [<user>@]<target_name>[:<port>]
* pool_set_name -- remote pool set name
* pool_addr -- local pool memory address which will be replicated
* pool_size -- required pool size
* nlanes -- number of lanes
* open_attr -- pool attributes, received from remote host
*/
RPMEMpool *
rpmem_open(const char *target, const char *pool_set_name,
void *pool_addr, size_t pool_size, unsigned *nlanes,
struct rpmem_pool_attr *open_attr)
{
LOG(3, "target %s, pool_set_name %s, pool_addr %p, pool_size %zu, "
"nlanes %p, create_attr %p", target, pool_set_name,
pool_addr, pool_size, nlanes, open_attr);
RPMEM_CHECK_FORK();
rpmem_log_args("open", target, pool_set_name,
pool_addr, pool_size, *nlanes);
if (rpmem_check_args(pool_addr, pool_size, nlanes))
return NULL;
RPMEMpool *rpp = rpmem_common_init(target);
if (!rpp)
goto err_common_init;
size_t buff_size = RPMEM_DEF_BUFF_SIZE;
struct rpmem_req_attr req = {
.pool_size = pool_size,
.nlanes = min(*nlanes, Rpmem_max_nlanes),
.provider = rpp->provider,
.pool_desc = pool_set_name,
.buff_size = buff_size,
};
struct rpmem_resp_attr resp;
int ret = rpmem_obc_open(rpp->obc, &req, &resp, open_attr);
if (ret) {
RPMEM_LOG(ERR, "!open request failed");
goto err_obc_create;
}
if (open_attr == NULL || util_is_zeroed(open_attr, sizeof(*open_attr)))
rpp->no_headers = 1;
rpmem_log_resp("open", &resp);
ret = rpmem_common_fip_init(rpp, &req, &resp,
pool_addr, pool_size, nlanes, buff_size);
if (ret)
goto err_fip_init;
ret = os_thread_create(&rpp->monitor, NULL, rpmem_monitor_thread, rpp);
if (ret) {
errno = ret;
ERR("!starting monitor thread");
goto err_monitor;
}
return rpp;
err_monitor:
rpmem_common_fip_fini(rpp);
err_fip_init:
rpmem_obc_close(rpp->obc, 0);
err_obc_create:
rpmem_common_fini(rpp, 0);
err_common_init:
return NULL;
}
/*
* rpmem_close -- close remote pool on target node
*/
int
rpmem_close(RPMEMpool *rpp)
{
LOG(3, "rpp %p", rpp);
RPMEM_LOG(INFO, "closing out-of-band connection");
util_fetch_and_or32(&rpp->closing, 1);
rpmem_fip_close(rpp->fip);
int ret = rpmem_obc_close(rpp->obc, 0);
if (ret)
ERR("!close request failed");
RPMEM_LOG(NOTICE, "out-of-band connection closed");
rpmem_common_fip_fini(rpp);
rpmem_common_fini(rpp, 1);
return ret;
}
/*
* rpmem_persist -- persist operation on target node
*
* rpp -- remote pool handle
* offset -- offset in pool
* length -- length of persist operation
* lane -- lane number
*/
int
rpmem_persist(RPMEMpool *rpp, size_t offset, size_t length,
unsigned lane, unsigned flags)
{
LOG(3, "rpp %p, offset %zu, length %zu, lane %d, flags 0x%x",
rpp, offset, length, lane, flags);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
if (flags & RPMEM_FLAGS_MASK) {
ERR("invalid flags (0x%x)", flags);
errno = EINVAL;
return -1;
}
if (rpp->no_headers == 0 && offset < RPMEM_HDR_SIZE) {
ERR("offset (%zu) in pool is less than %d bytes", offset,
RPMEM_HDR_SIZE);
errno = EINVAL;
return -1;
}
/*
* By default use RDMA SEND persist mode which has atomicity
* guarantees. For relaxed persist use RDMA WRITE.
*/
unsigned mode = RPMEM_PERSIST_SEND;
if (flags & RPMEM_PERSIST_RELAXED)
mode = RPMEM_PERSIST_WRITE;
int ret = rpmem_fip_persist(rpp->fip, offset, length,
lane, mode);
if (unlikely(ret)) {
ERR("persist operation failed");
rpp->error = ret;
errno = rpp->error;
return -1;
}
return 0;
}
/*
* rpmem_deep_persist -- deep flush operation on target node
*
* rpp -- remote pool handle
* offset -- offset in pool
* length -- length of deep flush operation
* lane -- lane number
*/
int
rpmem_deep_persist(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane)
{
LOG(3, "rpp %p, offset %zu, length %zu, lane %d", rpp, offset, length,
lane);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
if (offset < RPMEM_HDR_SIZE) {
ERR("offset (%zu) in pool is less than %d bytes", offset,
RPMEM_HDR_SIZE);
errno = EINVAL;
return -1;
}
int ret = rpmem_fip_persist(rpp->fip, offset, length,
lane, RPMEM_DEEP_PERSIST);
if (unlikely(ret)) {
ERR("persist operation failed");
rpp->error = ret;
errno = rpp->error;
return -1;
}
return 0;
}
/*
* rpmem_read -- read data from remote pool:
*
* rpp -- remote pool handle
* buff -- output buffer
* offset -- offset in pool
* length -- length of read operation
*/
int
rpmem_read(RPMEMpool *rpp, void *buff, size_t offset,
size_t length, unsigned lane)
{
LOG(3, "rpp %p, buff %p, offset %zu, length %zu, lane %d", rpp, buff,
offset, length, lane);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
if (rpp->no_headers == 0 && offset < RPMEM_HDR_SIZE)
LOG(1, "reading from pool at offset (%zu) less than %d bytes",
offset, RPMEM_HDR_SIZE);
int ret = rpmem_fip_read(rpp->fip, buff, length, offset, lane);
if (unlikely(ret)) {
errno = ret;
ERR("!read operation failed");
rpp->error = ret;
return -1;
}
return 0;
}
/*
* rpmem_set_attr -- overwrite pool attributes on the remote node
*
* rpp -- remote pool handle
* attr -- new pool attributes for the pool on remote node
*/
int
rpmem_set_attr(RPMEMpool *rpp, const struct rpmem_pool_attr *attr)
{
LOG(3, "rpp %p, attr %p", rpp, attr);
if (unlikely(rpp->error)) {
errno = rpp->error;
return -1;
}
int ret = rpmem_obc_set_attr(rpp->obc, attr);
if (ret) {
RPMEM_LOG(ERR, "!set attributes request failed");
}
return ret;
}
/*
* rpmem_remove -- remove pool from remote node
*
* target -- target node in format [<user>@]<target_name>[:<port>]
* pool_set_name -- remote pool set name
* flags -- bitwise OR of one or more of the following flags:
* - RPMEM_REMOVE_FORCE
* - RPMEM_REMOVE_POOL_SET
*/
int
rpmem_remove(const char *target, const char *pool_set, int flags)
{
LOG(3, "target %s, pool_set %s, flags %d", target, pool_set, flags);
if (flags & ~(RPMEM_REMOVE_FLAGS_ALL)) {
ERR("invalid flags specified");
errno = EINVAL;
return -1;
}
struct rpmem_target_info *info = rpmem_target_parse(target);
if (!info) {
ERR("!parsing target node address failed");
goto err_target;
}
const char *argv[5];
argv[0] = "--remove";
argv[1] = pool_set;
const char **cur = &argv[2];
if (flags & RPMEM_REMOVE_FORCE)
*cur++ = "--force";
if (flags & RPMEM_REMOVE_POOL_SET)
*cur++ = "--pool-set";
*cur = NULL;
struct rpmem_ssh *ssh = rpmem_ssh_execv(info, argv);
if (!ssh) {
ERR("!executing ssh command failed");
goto err_ssh_exec;
}
int ret;
ret = rpmem_ssh_monitor(ssh, 0);
if (ret) {
ERR("!waiting for remote command failed");
goto err_ssh_monitor;
}
ret = rpmem_ssh_close(ssh);
if (ret) {
errno = EINVAL;
ERR("remote command failed");
goto err_ssh_close;
}
rpmem_target_free(info);
return 0;
err_ssh_monitor:
rpmem_ssh_close(ssh);
err_ssh_close:
err_ssh_exec:
rpmem_target_free(info);
err_target:
return -1;
}
| 19,528 | 22.557298 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem_fip.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_fip.c -- rpmem libfabric provider module source file
*/
#include <stdio.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <errno.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <limits.h>
#include <rdma/fabric.h>
#include <rdma/fi_domain.h>
#include <rdma/fi_endpoint.h>
#include <rdma/fi_cm.h>
#include <rdma/fi_errno.h>
#include <rdma/fi_rma.h>
#include "out.h"
#include "util.h"
#include "os_thread.h"
#include "os.h"
#include "rpmem_common.h"
#include "rpmem_fip_common.h"
#include "rpmem_proto.h"
#include "rpmem_util.h"
#include "rpmem_fip_msg.h"
#include "rpmem_fip.h"
#include "valgrind_internal.h"
#define RPMEM_FI_ERR(e, fmt, args...)\
ERR(fmt ": %s", ## args, fi_strerror((e)))
#define RPMEM_FI_CLOSE(f, fmt, args...) (\
{\
int oerrno = errno;\
int ret = fi_close(&(f)->fid);\
if (ret)\
RPMEM_FI_ERR(ret, fmt, ## args);\
errno = oerrno;\
ret;\
})
#define LANE_ALIGN_SIZE 64
#define LANE_ALIGN __attribute__((aligned(LANE_ALIGN_SIZE)))
#define RPMEM_RAW_BUFF_SIZE 4096
#define RPMEM_RAW_SIZE 8
typedef ssize_t (*rpmem_fip_persist_fn)(struct rpmem_fip *fip, size_t offset,
size_t len, unsigned lane, unsigned flags);
typedef int (*rpmem_fip_process_fn)(struct rpmem_fip *fip,
void *context, uint64_t flags);
typedef int (*rpmem_fip_init_fn)(struct rpmem_fip *fip);
typedef void (*rpmem_fip_fini_fn)(struct rpmem_fip *fip);
typedef ssize_t (*cq_read_fn)(struct fid_cq *cq, void *buf, size_t count);
static ssize_t
cq_read_infinite(struct fid_cq *cq, void *buf, size_t count)
{
return fi_cq_sread(cq, buf, count, NULL, -1);
}
/*
* rpmem_fip_ops -- operations specific for persistency method
*/
struct rpmem_fip_ops {
rpmem_fip_persist_fn persist;
rpmem_fip_process_fn process;
rpmem_fip_init_fn lanes_init;
rpmem_fip_init_fn lanes_init_mem;
rpmem_fip_fini_fn lanes_fini;
rpmem_fip_init_fn lanes_post;
};
/*
* rpmem_fip_lane -- base lane structure
*/
struct rpmem_fip_lane {
struct fid_ep *ep; /* endpoint */
struct fid_cq *cq; /* completion queue */
uint64_t event;
};
/*
* rpmem_fip_plane -- persist operation's lane
*/
struct rpmem_fip_plane {
struct rpmem_fip_lane base; /* base lane structure */
struct rpmem_fip_rma write; /* WRITE message */
struct rpmem_fip_rma read; /* READ message */
struct rpmem_fip_msg send; /* SEND message */
struct rpmem_fip_msg recv; /* RECV message */
} LANE_ALIGN;
/*
* rpmem_fip_rlane -- read operation's lane
*/
struct rpmem_fip_rlane {
struct rpmem_fip_lane base; /* base lane structure */
struct rpmem_fip_rma read; /* READ message */
};
struct rpmem_fip {
struct fi_info *fi; /* fabric interface information */
struct fid_fabric *fabric; /* fabric domain */
struct fid_domain *domain; /* fabric protection domain */
struct fid_eq *eq; /* event queue */
int closing; /* closing connections in progress */
size_t cq_size; /* completion queue size */
uint64_t raddr; /* remote memory base address */
uint64_t rkey; /* remote memory protection key */
void *laddr; /* local memory base address */
size_t size; /* memory size */
struct fid_mr *mr; /* local memory region */
void *mr_desc; /* local memory descriptor */
enum rpmem_persist_method persist_method;
struct rpmem_fip_ops *ops;
unsigned nlanes;
size_t buff_size;
struct rpmem_fip_plane *lanes;
os_thread_t monitor;
void *pmsg; /* persist message buffer */
size_t pmsg_size;
struct fid_mr *pmsg_mr; /* persist message memory region */
void *pmsg_mr_desc; /* persist message memory descriptor */
struct rpmem_msg_persist_resp *pres; /* persist response buffer */
struct fid_mr *pres_mr; /* persist response memory region */
void *pres_mr_desc; /* persist response memory descriptor */
void *raw_buff; /* READ-after-WRITE buffer */
struct fid_mr *raw_mr; /* RAW memory region */
void *raw_mr_desc; /* RAW memory descriptor */
cq_read_fn cq_read; /* CQ read function */
};
/*
* rpmem_fip_is_closing -- (internal) atomically reads and returns the
* closing flag
*/
static inline int
rpmem_fip_is_closing(struct rpmem_fip *fip)
{
int ret;
util_atomic_load_explicit32(&fip->closing, &ret, memory_order_acquire);
return ret;
}
/*
* rpmem_fip_set_closing -- (internal) atomically set the closing flag
*/
static inline void
rpmem_fip_set_closing(struct rpmem_fip *fip)
{
/*
* load and store without barriers should be good enough here.
* fetch_and_or are used as workaround for helgrind issue.
*/
util_fetch_and_or32(&fip->closing, 1);
}
/*
* rpmem_fip_lane_begin -- (internal) intialize list of events for lane
*/
static inline void
rpmem_fip_lane_begin(struct rpmem_fip_lane *lanep, uint64_t event)
{
lanep->event = event;
}
/*
* rpmem_fip_lane_init -- (internal) initialize single lane
*/
static int
rpmem_fip_lane_init(struct rpmem_fip *fip, struct rpmem_fip_lane *lanep)
{
int ret;
struct fi_cq_attr cq_attr = {
.size = fip->cq_size,
.flags = 0,
.format = FI_CQ_FORMAT_MSG,
.wait_obj = FI_WAIT_UNSPEC,
.signaling_vector = 0,
.wait_cond = FI_CQ_COND_NONE,
.wait_set = NULL,
};
/* create a completion queue */
ret = fi_cq_open(fip->domain, &cq_attr, &lanep->cq, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "opening completion queue");
goto err_cq_open;
}
/* create an endpoint */
ret = fi_endpoint(fip->domain, fip->fi, &lanep->ep, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "allocating endpoint");
goto err_endpoint;
}
/*
* Bind an event queue to an endpoint to get
* connection-related events for the endpoint.
*/
ret = fi_ep_bind(lanep->ep, &fip->eq->fid, 0);
if (ret) {
RPMEM_FI_ERR(ret, "binding event queue to endpoint");
goto err_ep_bind_eq;
}
/*
* Bind a completion queue to an endpoint to get completion
* events of specified inbound/outbound operations.
*
* FI_SELECTIVE_COMPLETION means all inbound/outbound operations
* must explicitly specify if the completion event should be
* generated or not using FI_COMPLETION flag.
*
* The completion events received are highly related to the
* persistency method used and are configured in lanes
* initialization specified for persistency method utilized.
*/
ret = fi_ep_bind(lanep->ep, &lanep->cq->fid,
FI_RECV | FI_TRANSMIT | FI_SELECTIVE_COMPLETION);
if (ret) {
RPMEM_FI_ERR(ret, "binding completion queue to endpoint");
goto err_ep_bind_cq;
}
/*
* Enable endpoint so it is possible to post inbound/outbound
* operations if required.
*/
ret = fi_enable(lanep->ep);
if (ret) {
RPMEM_FI_ERR(ret, "activating endpoint");
goto err_fi_enable;
}
return 0;
err_fi_enable:
err_ep_bind_cq:
err_ep_bind_eq:
err_endpoint:
RPMEM_FI_CLOSE(lanep->cq, "closing completion queue");
err_cq_open:
return -1;
}
/*
* rpmem_fip_lane_fini -- (internal) deinitialize single lane
*/
static int
rpmem_fip_lane_fini(struct rpmem_fip_lane *lanep)
{
int ret;
int lret = 0;
ret = RPMEM_FI_CLOSE(lanep->ep, "closing endpoint");
if (ret)
lret = ret;
ret = RPMEM_FI_CLOSE(lanep->cq, "closing completion queue");
if (ret)
lret = ret;
return lret;
}
/*
* rpmem_fip_lane_wait -- (internal) wait for specific event on completion queue
*/
static int
rpmem_fip_lane_wait(struct rpmem_fip *fip, struct rpmem_fip_lane *lanep,
uint64_t e)
{
ssize_t sret = 0;
struct fi_cq_err_entry err;
const char *str_err;
int ret = 0;
struct fi_cq_msg_entry cq_entry;
while (lanep->event & e) {
if (unlikely(rpmem_fip_is_closing(fip)))
return ECONNRESET;
sret = fip->cq_read(lanep->cq, &cq_entry, 1);
if (unlikely(sret == -FI_EAGAIN) || sret == 0)
continue;
if (unlikely(sret < 0)) {
ret = (int)sret;
goto err_cq_read;
}
lanep->event &= ~cq_entry.flags;
}
return 0;
err_cq_read:
sret = fi_cq_readerr(lanep->cq, &err, 0);
if (sret < 0) {
RPMEM_FI_ERR((int)sret, "error reading from completion queue: "
"cannot read error from event queue");
goto err;
}
str_err = fi_cq_strerror(lanep->cq, err.prov_errno, NULL, NULL, 0);
RPMEM_LOG(ERR, "error reading from completion queue: %s", str_err);
err:
if (unlikely(rpmem_fip_is_closing(fip)))
return ECONNRESET; /* it will be passed to errno */
return ret;
}
/*
* rpmem_fip_set_nlanes -- (internal) set maximum number of lanes supported
*/
static void
rpmem_fip_set_nlanes(struct rpmem_fip *fip, unsigned nlanes)
{
size_t max_nlanes = rpmem_fip_max_nlanes(fip->fi);
RPMEM_ASSERT(max_nlanes < UINT_MAX);
fip->nlanes = min((unsigned)max_nlanes, nlanes);
}
/*
* rpmem_fip_getinfo -- (internal) get fabric interface information
*/
static int
rpmem_fip_getinfo(struct rpmem_fip *fip, const char *node, const char *service,
enum rpmem_provider provider, enum rpmem_persist_method pm)
{
int ret = -1;
struct fi_info *hints = rpmem_fip_get_hints(provider);
if (!hints) {
RPMEM_LOG(ERR, "!getting fabric interface information hints");
goto err_hints;
}
hints->tx_attr->size = rpmem_fip_tx_size(pm, RPMEM_FIP_NODE_CLIENT);
hints->rx_attr->size = rpmem_fip_rx_size(pm, RPMEM_FIP_NODE_CLIENT);
ret = fi_getinfo(RPMEM_FIVERSION, node, service,
0, hints, &fip->fi);
if (ret) {
RPMEM_FI_ERR(ret, "getting fabric interface information");
goto err_fi_getinfo;
}
rpmem_fip_print_info(fip->fi);
/* fallback to free the hints */
err_fi_getinfo:
fi_freeinfo(hints);
err_hints:
return ret;
}
/*
* rpmem_fip_init_fabric_res -- (internal) initialize common fabric resources
*/
static int
rpmem_fip_init_fabric_res(struct rpmem_fip *fip)
{
int ret;
ret = fi_fabric(fip->fi->fabric_attr, &fip->fabric, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "opening fabric domain");
goto err_fi_fabric;
}
ret = fi_domain(fip->fabric, fip->fi, &fip->domain, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "opening fabric access domain");
goto err_fi_domain;
}
struct fi_eq_attr eq_attr = {
.size = 0, /* use default value */
.flags = 0,
.wait_obj = FI_WAIT_UNSPEC,
.signaling_vector = 0,
.wait_set = NULL,
};
ret = fi_eq_open(fip->fabric, &eq_attr, &fip->eq, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "opening event queue");
goto err_eq_open;
}
return 0;
err_eq_open:
RPMEM_FI_CLOSE(fip->domain, "closing fabric access domain");
err_fi_domain:
RPMEM_FI_CLOSE(fip->fabric, "closing fabric domain");
err_fi_fabric:
return ret;
}
/*
* rpmem_fip_fini_fabric_res -- (internal) deinitialize common fabric resources
*/
static void
rpmem_fip_fini_fabric_res(struct rpmem_fip *fip)
{
RPMEM_FI_CLOSE(fip->eq, "closing event queue");
RPMEM_FI_CLOSE(fip->domain, "closing fabric access domain");
RPMEM_FI_CLOSE(fip->fabric, "closing fabric domain");
}
/*
* rpmem_fip_init_memory -- (internal) initialize common memory resources
*/
static int
rpmem_fip_init_memory(struct rpmem_fip *fip)
{
ASSERTne(Pagesize, 0);
int ret;
/*
* Register local memory space. The local memory will be used
* with WRITE operation in rpmem_fip_persist function thus
* the FI_WRITE access flag.
*/
ret = fi_mr_reg(fip->domain, fip->laddr, fip->size,
FI_WRITE, 0, 0, 0, &fip->mr, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "registrating memory");
return ret;
}
/* get local memory descriptor */
fip->mr_desc = fi_mr_desc(fip->mr);
return 0;
}
/*
* rpmem_fip_fini_memory -- (internal) deinitialize common memory resources
*/
static void
rpmem_fip_fini_memory(struct rpmem_fip *fip)
{
RPMEM_FI_CLOSE(fip->mr, "unregistering memory");
}
/*
* rpmem_fip_lanes_init_common -- (internal) initialize common lanes resources
*/
static int
rpmem_fip_lanes_init_common(struct rpmem_fip *fip)
{
int ret;
fip->lanes = calloc(fip->nlanes, sizeof(*fip->lanes));
if (!fip->lanes) {
RPMEM_LOG(ERR, "!allocating lanes");
goto err_alloc_lanes;
}
unsigned i;
for (i = 0; i < fip->nlanes; i++) {
ret = rpmem_fip_lane_init(fip, &fip->lanes[i].base);
if (ret)
goto err_lane_init;
}
return 0;
err_lane_init:
for (unsigned j = 0; j < i; j++)
rpmem_fip_lane_fini(&fip->lanes[i].base);
free(fip->lanes);
err_alloc_lanes:
return -1;
}
/*
* rpmem_fip_lanes_fini_common -- (internal) deinitialize common lanes
* resrouces
*/
static int
rpmem_fip_lanes_fini_common(struct rpmem_fip *fip)
{
int lret = 0;
int ret;
for (unsigned i = 0; i < fip->nlanes; i++) {
ret = rpmem_fip_lane_fini(&fip->lanes[i].base);
if (ret)
lret = ret;
}
free(fip->lanes);
return lret;
}
/*
* rpmem_fip_lanes_init -- (internal) initialize lanes
*/
static int
rpmem_fip_lanes_init(struct rpmem_fip *fip)
{
int ret;
ret = rpmem_fip_lanes_init_common(fip);
if (ret)
return ret;
ret = fip->ops->lanes_init(fip);
if (ret)
goto err_init_lanes;
return 0;
err_init_lanes:
rpmem_fip_lanes_fini_common(fip);
return ret;
}
/*
* rpmem_fip_lane_connect -- (internal) connect on a single lane
*/
static int
rpmem_fip_lane_connect(struct rpmem_fip *fip, struct rpmem_fip_lane *lanep)
{
struct fi_eq_cm_entry entry;
int ret;
ret = fi_connect(lanep->ep, fip->fi->dest_addr, NULL, 0);
if (ret) {
RPMEM_FI_ERR(ret, "initiating connection request");
return ret;
}
return rpmem_fip_read_eq_check(fip->eq, &entry, FI_CONNECTED,
&lanep->ep->fid,
RPMEM_CONNECT_TIMEOUT);
}
/*
* rpmem_fip_lanes_connect -- (internal) establish connections on all lanes
*/
static int
rpmem_fip_lanes_connect(struct rpmem_fip *fip)
{
int ret;
for (unsigned i = 0; i < fip->nlanes; i++) {
struct rpmem_fip_lane *lanep = &fip->lanes[i].base;
ret = rpmem_fip_lane_connect(fip, lanep);
if (ret)
return ret;
}
return 0;
}
/*
* rpmem_fip_lanes_shutdown -- shutdown all endpoints
*/
static int
rpmem_fip_lanes_shutdown(struct rpmem_fip *fip)
{
int ret;
int lret = 0;
for (unsigned i = 0; i < fip->nlanes; i++) {
ret = fi_shutdown(fip->lanes[i].base.ep, 0);
if (ret) {
RPMEM_FI_ERR(ret, "disconnecting endpoint");
lret = ret;
}
}
return lret;
}
/*
* rpmem_fip_monitor_thread -- (internal) monitor in-band connection
*/
static void *
rpmem_fip_monitor_thread(void *arg)
{
struct rpmem_fip *fip = (struct rpmem_fip *)arg;
struct fi_eq_cm_entry entry;
uint32_t event;
int ret;
while (!rpmem_fip_is_closing(fip)) {
ret = rpmem_fip_read_eq(fip->eq, &entry, &event,
RPMEM_MONITOR_TIMEOUT);
if (unlikely(ret == 0) && event == FI_SHUTDOWN) {
RPMEM_LOG(ERR, "event queue got FI_SHUTDOWN");
/* mark in-band connection as closing */
rpmem_fip_set_closing(fip);
for (unsigned i = 0; i < fip->nlanes; i++) {
fi_cq_signal(fip->lanes[i].base.cq);
}
}
}
return NULL;
}
/*
* rpmem_fip_monitor_init -- (internal) initialize in-band monitor
*/
static int
rpmem_fip_monitor_init(struct rpmem_fip *fip)
{
errno = os_thread_create(&fip->monitor, NULL, rpmem_fip_monitor_thread,
fip);
if (errno) {
RPMEM_LOG(ERR, "!connenction monitor thread");
return -1;
}
return 0;
}
/*
* rpmem_fip_monitor_fini -- (internal) finalize in-band monitor
*/
static int
rpmem_fip_monitor_fini(struct rpmem_fip *fip)
{
rpmem_fip_set_closing(fip);
int ret = os_thread_join(&fip->monitor, NULL);
if (ret) {
RPMEM_LOG(ERR, "joining monitor thread failed");
}
return ret;
}
/*
* rpmem_fip_init_lanes_common -- (internal) initialize lanes
*/
static int
rpmem_fip_init_lanes_common(struct rpmem_fip *fip)
{
ASSERTne(Pagesize, 0);
int ret = 0;
/* allocate persist messages buffer */
fip->pmsg_size = roundup(sizeof(struct rpmem_msg_persist) +
fip->buff_size, (size_t)64);
size_t msg_size = fip->nlanes * fip->pmsg_size;
msg_size = PAGE_ALIGNED_UP_SIZE(msg_size);
errno = posix_memalign((void **)&fip->pmsg, Pagesize, msg_size);
if (errno) {
RPMEM_LOG(ERR, "!allocating messages buffer");
ret = -1;
goto err_malloc_pmsg;
}
/*
* Register persist messages buffer. The persist messages
* are sent to daemon thus the FI_SEND access flag.
*/
ret = fi_mr_reg(fip->domain, fip->pmsg, msg_size, FI_SEND,
0, 0, 0, &fip->pmsg_mr, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "registering messages buffer");
goto err_fi_mr_reg_pmsg;
}
/* get persist messages buffer local descriptor */
fip->pmsg_mr_desc = fi_mr_desc(fip->pmsg_mr);
/* allocate persist response messages buffer */
size_t msg_resp_size = fip->nlanes *
sizeof(struct rpmem_msg_persist_resp);
msg_resp_size = PAGE_ALIGNED_UP_SIZE(msg_resp_size);
errno = posix_memalign((void **)&fip->pres, Pagesize, msg_resp_size);
if (errno) {
RPMEM_LOG(ERR, "!allocating messages response buffer");
ret = -1;
goto err_malloc_pres;
}
/*
* Register persist messages response buffer. The persist response
* messages are received from daemon thus the FI_RECV access flag.
*/
ret = fi_mr_reg(fip->domain, fip->pres, msg_resp_size, FI_RECV,
0, 0, 0, &fip->pres_mr, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "registering messages response buffer");
goto err_fi_mr_reg_pres;
}
/* get persist response messages buffer local descriptor */
fip->pres_mr_desc = fi_mr_desc(fip->pres_mr);
return 0;
err_fi_mr_reg_pres:
free(fip->pres);
err_malloc_pres:
RPMEM_FI_CLOSE(fip->pmsg_mr, "unregistering messages buffer");
err_fi_mr_reg_pmsg:
free(fip->pmsg);
err_malloc_pmsg:
return ret;
}
/*
* rpmem_fip_get_pmsg -- return persist message buffer
*/
static inline struct rpmem_msg_persist *
rpmem_fip_get_pmsg(struct rpmem_fip *fip, size_t idx)
{
return (struct rpmem_msg_persist *)
((uintptr_t)fip->pmsg + idx * fip->pmsg_size);
}
/*
* rpmem_fip_init_mem_lanes_gpspm -- initialize lanes rma structures
*/
static int
rpmem_fip_init_mem_lanes_gpspm(struct rpmem_fip *fip)
{
/*
* Initialize all required structures for:
* WRITE, SEND and RECV operations.
*
* If the completion is required the FI_COMPLETION flag and
* appropriate context should be used.
*
* In GPSPM only the RECV and SEND completions are required.
*
* For RECV the context is RECV operation structure used for
* fi_recvmsg(3) function call.
*
* For SEND the context is lane structure.
*
* The received buffer contains a lane id which is used
* to obtain a lane which must be signaled that operation
* has been completed.
*/
unsigned i;
for (i = 0; i < fip->nlanes; i++) {
/* WRITE */
rpmem_fip_rma_init(&fip->lanes[i].write,
fip->mr_desc, 0,
fip->rkey,
&fip->lanes[i],
0);
/* SEND */
rpmem_fip_msg_init(&fip->lanes[i].send,
fip->pmsg_mr_desc, 0,
&fip->lanes[i],
rpmem_fip_get_pmsg(fip, i),
0 /* size must be provided when sending msg */,
FI_COMPLETION);
/* RECV */
rpmem_fip_msg_init(&fip->lanes[i].recv,
fip->pres_mr_desc, 0,
&fip->lanes[i].recv,
&fip->pres[i],
sizeof(fip->pres[i]),
FI_COMPLETION);
}
return 0;
}
/*
* rpmem_fip_fini_lanes_common -- (internal) deinitialize lanes for GPSPM
*/
static void
rpmem_fip_fini_lanes_common(struct rpmem_fip *fip)
{
RPMEM_FI_CLOSE(fip->pmsg_mr, "unregistering messages buffer");
RPMEM_FI_CLOSE(fip->pres_mr, "unregistering messages "
"response buffer");
free(fip->pmsg);
free(fip->pres);
}
/*
* rpmem_fip_init_lanes_apm -- (internal) initialize lanes for APM
*/
static int
rpmem_fip_init_lanes_apm(struct rpmem_fip *fip)
{
ASSERTne(Pagesize, 0);
int ret;
ret = rpmem_fip_init_lanes_common(fip);
if (ret)
goto err_init_lanes_common;
ASSERT(IS_PAGE_ALIGNED(RPMEM_RAW_BUFF_SIZE));
errno = posix_memalign((void **)&fip->raw_buff, Pagesize,
RPMEM_RAW_BUFF_SIZE);
if (errno) {
RPMEM_LOG(ERR, "!allocating APM RAW buffer");
goto err_malloc_raw;
}
/* register read-after-write buffer */
ret = fi_mr_reg(fip->domain, fip->raw_buff, RPMEM_RAW_BUFF_SIZE,
FI_REMOTE_WRITE, 0, 0, 0, &fip->raw_mr, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "registering APM read buffer");
goto err_fi_raw_mr;
}
/* get read-after-write buffer local descriptor */
fip->raw_mr_desc = fi_mr_desc(fip->raw_mr);
return 0;
err_fi_raw_mr:
free(fip->raw_buff);
err_malloc_raw:
rpmem_fip_fini_lanes_common(fip);
err_init_lanes_common:
return -1;
}
/*
* rpmem_fip_init_mem_lanes_apm -- initialize lanes rma structures
*/
static int
rpmem_fip_init_mem_lanes_apm(struct rpmem_fip *fip)
{
/*
* Initialize all required structures for:
* WRITE and READ operations.
*
* If the completion is required the FI_COMPLETION flag and
* appropriate context should be used.
*
* In APM only the READ completion is required.
* The context is a lane structure.
*/
for (unsigned i = 0; i < fip->nlanes; i++) {
/* WRITE */
rpmem_fip_rma_init(&fip->lanes[i].write,
fip->mr_desc, 0,
fip->rkey,
&fip->lanes[i],
0);
/* READ */
rpmem_fip_rma_init(&fip->lanes[i].read,
fip->raw_mr_desc, 0,
fip->rkey,
&fip->lanes[i],
FI_COMPLETION);
/* SEND */
rpmem_fip_msg_init(&fip->lanes[i].send,
fip->pmsg_mr_desc, 0,
&fip->lanes[i],
rpmem_fip_get_pmsg(fip, i),
fip->pmsg_size,
FI_COMPLETION);
/* RECV */
rpmem_fip_msg_init(&fip->lanes[i].recv,
fip->pres_mr_desc, 0,
&fip->lanes[i].recv,
&fip->pres[i],
sizeof(fip->pres[i]),
FI_COMPLETION);
}
return 0;
}
/*
* rpmem_fip_fini_lanes_apm -- (internal) deinitialize lanes for APM
*/
static void
rpmem_fip_fini_lanes_apm(struct rpmem_fip *fip)
{
RPMEM_FI_CLOSE(fip->raw_mr, "unregistering APM read buffer");
free(fip->raw_buff);
rpmem_fip_fini_lanes_common(fip);
}
/*
* rpmem_fip_persist_raw -- (internal) perform persist operation using
* READ after WRITE mechanism
*/
static int
rpmem_fip_persist_raw(struct rpmem_fip *fip, size_t offset,
size_t len, unsigned lane)
{
struct rpmem_fip_plane *lanep = &fip->lanes[lane];
int ret;
void *laddr = (void *)((uintptr_t)fip->laddr + offset);
uint64_t raddr = fip->raddr + offset;
rpmem_fip_lane_begin(&lanep->base, FI_READ);
/* WRITE for requested memory region */
ret = rpmem_fip_writemsg(lanep->base.ep,
&lanep->write, laddr, len, raddr);
if (unlikely(ret)) {
RPMEM_FI_ERR(ret, "RMA write");
return ret;
}
/* READ to read-after-write buffer */
ret = rpmem_fip_readmsg(lanep->base.ep, &lanep->read, fip->raw_buff,
RPMEM_RAW_SIZE, fip->raddr);
if (unlikely(ret)) {
RPMEM_FI_ERR(ret, "RMA read");
return ret;
}
/* wait for READ completion */
ret = rpmem_fip_lane_wait(fip, &lanep->base, FI_READ);
if (unlikely(ret)) {
ERR("waiting for READ completion failed");
return ret;
}
return ret;
}
/*
* rpmem_fip_post_resp -- (internal) post persist response message buffer
*/
static inline int
rpmem_fip_post_resp(struct rpmem_fip *fip,
struct rpmem_fip_plane *lanep)
{
int ret = rpmem_fip_recvmsg(lanep->base.ep, &lanep->recv);
if (unlikely(ret)) {
RPMEM_FI_ERR(ret, "posting recv buffer");
return ret;
}
return 0;
}
/*
* rpmem_fip_persist_saw -- (internal) perform persist operation using
* SEND after WRITE mechanism
*/
static int
rpmem_fip_persist_saw(struct rpmem_fip *fip, size_t offset,
size_t len, unsigned lane, unsigned flags)
{
struct rpmem_fip_plane *lanep = &fip->lanes[lane];
void *laddr = (void *)((uintptr_t)fip->laddr + offset);
uint64_t raddr = fip->raddr + offset;
struct rpmem_msg_persist *msg;
int ret;
ret = rpmem_fip_lane_wait(fip, &lanep->base, FI_SEND);
if (unlikely(ret)) {
ERR("waiting for SEND completion failed");
return ret;
}
rpmem_fip_lane_begin(&lanep->base, FI_RECV | FI_SEND);
/* WRITE for requested memory region */
ret = rpmem_fip_writemsg(lanep->base.ep,
&lanep->write, laddr, len, raddr);
if (unlikely(ret)) {
RPMEM_FI_ERR((int)ret, "RMA write");
return ret;
}
/* SEND persist message */
msg = rpmem_fip_msg_get_pmsg(&lanep->send);
msg->flags = flags;
msg->lane = lane;
msg->addr = raddr;
msg->size = len;
ret = rpmem_fip_sendmsg(lanep->base.ep, &lanep->send, sizeof(*msg));
if (unlikely(ret)) {
RPMEM_FI_ERR(ret, "MSG send");
return ret;
}
/* wait for persist operation completion */
ret = rpmem_fip_lane_wait(fip, &lanep->base, FI_RECV);
if (unlikely(ret)) {
ERR("waiting for RECV completion failed");
return ret;
}
ret = rpmem_fip_post_resp(fip, lanep);
if (unlikely(ret)) {
ERR("posting RECV buffer failed");
return ret;
}
return 0;
}
/*
* rpmem_fip_persist_send -- (internal) perform persist operation using
* RDMA SEND operation with data inlined in the message buffer.
*/
static int
rpmem_fip_persist_send(struct rpmem_fip *fip, size_t offset,
size_t len, unsigned lane, unsigned flags)
{
RPMEM_ASSERT(len <= fip->buff_size);
struct rpmem_fip_plane *lanep = &fip->lanes[lane];
void *laddr = (void *)((uintptr_t)fip->laddr + offset);
uint64_t raddr = fip->raddr + offset;
struct rpmem_msg_persist *msg;
int ret;
ret = rpmem_fip_lane_wait(fip, &lanep->base, FI_SEND);
if (unlikely(ret)) {
ERR("waiting for SEND completion failed");
return ret;
}
rpmem_fip_lane_begin(&lanep->base, FI_RECV | FI_SEND);
/* SEND persist message */
msg = rpmem_fip_msg_get_pmsg(&lanep->send);
msg->flags = flags;
msg->lane = lane;
msg->addr = raddr;
msg->size = len;
memcpy(msg->data, laddr, len);
ret = rpmem_fip_sendmsg(lanep->base.ep, &lanep->send,
sizeof(*msg) + len);
if (unlikely(ret)) {
RPMEM_FI_ERR(ret, "MSG send");
return ret;
}
/* wait for persist operation completion */
ret = rpmem_fip_lane_wait(fip, &lanep->base, FI_RECV);
if (unlikely(ret)) {
ERR("waiting for RECV completion failed");
return ret;
}
ret = rpmem_fip_post_resp(fip, lanep);
if (unlikely(ret)) {
ERR("posting RECV buffer failed");
return ret;
}
return 0;
}
/*
* rpmem_fip_persist_gpspm_sockets -- (internal) perform persist operation
* for GPSPM - sockets provider implementation which doesn't use the
* inline persist operation
*/
static ssize_t
rpmem_fip_persist_gpspm_sockets(struct rpmem_fip *fip, size_t offset,
size_t len, unsigned lane, unsigned flags)
{
unsigned mode = flags & RPMEM_PERSIST_MASK;
if (mode == RPMEM_PERSIST_SEND)
flags = (flags & ~RPMEM_PERSIST_MASK) | RPMEM_PERSIST_WRITE;
/* Limit len to the max value of the return type. */
len = min(len, SSIZE_MAX);
int ret = rpmem_fip_persist_saw(fip, offset, len, lane, flags);
if (ret)
return -abs(ret);
return (ssize_t)len;
}
/*
* rpmem_fip_persist_apm_sockets -- (internal) perform persist operation
* for APM - sockets provider implementation which doesn't use the
* inline persist operation
*/
static ssize_t
rpmem_fip_persist_apm_sockets(struct rpmem_fip *fip, size_t offset,
size_t len, unsigned lane, unsigned flags)
{
/* Limit len to the max value of the return type. */
len = min(len, SSIZE_MAX);
int ret = rpmem_fip_persist_raw(fip, offset, len, lane);
if (ret)
return -abs(ret);
return (ssize_t)len;
}
/*
* rpmem_fip_persist_gpspm -- (internal) perform persist operation for GPSPM
*/
static ssize_t
rpmem_fip_persist_gpspm(struct rpmem_fip *fip, size_t offset,
size_t len, unsigned lane, unsigned flags)
{
int ret;
/* Limit len to the max value of the return type. */
len = min(len, SSIZE_MAX);
unsigned mode = flags & RPMEM_PERSIST_MASK;
if (mode == RPMEM_PERSIST_SEND) {
len = min(len, fip->buff_size);
ret = rpmem_fip_persist_send(fip, offset, len, lane, flags);
} else {
ret = rpmem_fip_persist_saw(fip, offset, len, lane, flags);
}
if (ret)
return -abs(ret);
return (ssize_t)len;
}
/*
* rpmem_fip_persist_apm -- (internal) perform persist operation for APM
*/
static ssize_t
rpmem_fip_persist_apm(struct rpmem_fip *fip, size_t offset,
size_t len, unsigned lane, unsigned flags)
{
int ret;
/* Limit len to the max value of the return type. */
len = min(len, SSIZE_MAX);
unsigned mode = flags & RPMEM_PERSIST_MASK;
if (unlikely(mode == RPMEM_DEEP_PERSIST))
ret = rpmem_fip_persist_saw(fip, offset, len, lane, flags);
else if (mode == RPMEM_PERSIST_SEND) {
len = min(len, fip->buff_size);
ret = rpmem_fip_persist_send(fip, offset, len, lane, flags);
} else {
ret = rpmem_fip_persist_raw(fip, offset, len, lane);
}
if (ret)
return -abs(ret);
return (ssize_t)len;
}
/*
* rpmem_fip_post_lanes_common -- (internal) post all persist response message
* buffers
*/
static int
rpmem_fip_post_lanes_common(struct rpmem_fip *fip)
{
int ret = 0;
for (unsigned i = 0; i < fip->nlanes; i++) {
ret = rpmem_fip_post_resp(fip, &fip->lanes[i]);
if (ret)
break;
}
return ret;
}
/*
* rpmem_fip_ops -- some operations specific for persistency method used
*/
static struct rpmem_fip_ops rpmem_fip_ops[MAX_RPMEM_PROV][MAX_RPMEM_PM] = {
[RPMEM_PROV_LIBFABRIC_VERBS] = {
[RPMEM_PM_GPSPM] = {
.persist = rpmem_fip_persist_gpspm,
.lanes_init = rpmem_fip_init_lanes_common,
.lanes_init_mem = rpmem_fip_init_mem_lanes_gpspm,
.lanes_fini = rpmem_fip_fini_lanes_common,
.lanes_post = rpmem_fip_post_lanes_common,
},
[RPMEM_PM_APM] = {
.persist = rpmem_fip_persist_apm,
.lanes_init = rpmem_fip_init_lanes_apm,
.lanes_init_mem = rpmem_fip_init_mem_lanes_apm,
.lanes_fini = rpmem_fip_fini_lanes_apm,
.lanes_post = rpmem_fip_post_lanes_common,
},
},
[RPMEM_PROV_LIBFABRIC_SOCKETS] = {
[RPMEM_PM_GPSPM] = {
.persist = rpmem_fip_persist_gpspm_sockets,
.lanes_init = rpmem_fip_init_lanes_common,
.lanes_init_mem = rpmem_fip_init_mem_lanes_gpspm,
.lanes_fini = rpmem_fip_fini_lanes_common,
.lanes_post = rpmem_fip_post_lanes_common,
},
[RPMEM_PM_APM] = {
.persist = rpmem_fip_persist_apm_sockets,
.lanes_init = rpmem_fip_init_lanes_apm,
.lanes_init_mem = rpmem_fip_init_mem_lanes_apm,
.lanes_fini = rpmem_fip_fini_lanes_apm,
.lanes_post = rpmem_fip_post_lanes_common,
},
}
};
/*
* rpmem_fip_set_attr -- (internal) set required attributes
*/
static void
rpmem_fip_set_attr(struct rpmem_fip *fip, struct rpmem_fip_attr *attr)
{
fip->raddr = (uint64_t)attr->raddr;
fip->rkey = attr->rkey;
fip->laddr = attr->laddr;
fip->size = attr->size;
fip->buff_size = attr->buff_size;
fip->persist_method = attr->persist_method;
rpmem_fip_set_nlanes(fip, attr->nlanes);
/* one for read operation */
fip->cq_size = rpmem_fip_cq_size(fip->persist_method,
RPMEM_FIP_NODE_CLIENT);
fip->ops = &rpmem_fip_ops[attr->provider][fip->persist_method];
}
/*
* rpmem_fip_init -- initialize fabric provider
*/
struct rpmem_fip *
rpmem_fip_init(const char *node, const char *service,
struct rpmem_fip_attr *attr, unsigned *nlanes)
{
int ret;
struct rpmem_fip *fip = calloc(1, sizeof(*fip));
if (!fip) {
RPMEM_LOG(ERR, "!allocating fabric handle");
return NULL;
}
ret = rpmem_fip_getinfo(fip, node, service,
attr->provider, attr->persist_method);
if (ret)
goto err_getinfo;
fip->cq_read = attr->provider == RPMEM_PROV_LIBFABRIC_VERBS ?
fi_cq_read : cq_read_infinite;
rpmem_fip_set_attr(fip, attr);
*nlanes = fip->nlanes;
ret = rpmem_fip_init_fabric_res(fip);
if (ret)
goto err_init_fabric_res;
ret = rpmem_fip_lanes_init(fip);
if (ret)
goto err_init_lanes;
return fip;
err_init_lanes:
rpmem_fip_fini_fabric_res(fip);
err_init_fabric_res:
fi_freeinfo(fip->fi);
err_getinfo:
free(fip);
return NULL;
}
/*
* rpmem_fip_fini -- deinitialize fabric provider
*/
void
rpmem_fip_fini(struct rpmem_fip *fip)
{
fip->ops->lanes_fini(fip);
rpmem_fip_lanes_fini_common(fip);
rpmem_fip_fini_fabric_res(fip);
fi_freeinfo(fip->fi);
free(fip);
}
/*
* rpmem_fip_connect -- connect to remote peer
*/
int
rpmem_fip_connect(struct rpmem_fip *fip)
{
int ret;
ret = rpmem_fip_lanes_connect(fip);
if (ret)
goto err_lanes_connect;
ret = rpmem_fip_monitor_init(fip);
if (ret)
goto err_monitor;
ret = rpmem_fip_init_memory(fip);
if (ret)
goto err_init_memory;
ret = fip->ops->lanes_init_mem(fip);
if (ret)
goto err_init_lanes_mem;
ret = fip->ops->lanes_post(fip);
if (ret)
goto err_lanes_post;
return 0;
err_lanes_post:
err_init_lanes_mem:
rpmem_fip_fini_memory(fip);
err_init_memory:
rpmem_fip_monitor_fini(fip);
err_monitor:
rpmem_fip_lanes_shutdown(fip);
err_lanes_connect:
return ret;
}
/*
* rpmem_fip_close -- close connection to remote peer
*/
int
rpmem_fip_close(struct rpmem_fip *fip)
{
int ret;
int lret = 0;
if (unlikely(rpmem_fip_is_closing(fip)))
goto close_monitor;
rpmem_fip_fini_memory(fip);
ret = rpmem_fip_lanes_shutdown(fip);
if (ret)
lret = ret;
close_monitor:
/* close fip monitor */
ret = rpmem_fip_monitor_fini(fip);
if (ret)
lret = ret;
return lret;
}
/*
* rpmem_fip_persist -- perform remote persist operation
*/
int
rpmem_fip_persist(struct rpmem_fip *fip, size_t offset, size_t len,
unsigned lane, unsigned flags)
{
RPMEM_ASSERT((flags & RPMEM_PERSIST_MASK) <= RPMEM_PERSIST_MAX);
if (unlikely(rpmem_fip_is_closing(fip)))
return ECONNRESET; /* it will be passed to errno */
RPMEM_ASSERT(lane < fip->nlanes);
if (unlikely(lane >= fip->nlanes))
return EINVAL; /* it will be passed to errno */
if (unlikely(offset > fip->size || offset + len > fip->size))
return EINVAL; /* it will be passed to errno */
if (unlikely(len == 0)) {
return 0;
}
int ret = 0;
while (len > 0) {
size_t tmplen = min(len, fip->fi->ep_attr->max_msg_size);
ssize_t r = fip->ops->persist(fip, offset, tmplen, lane, flags);
if (r < 0) {
RPMEM_LOG(ERR, "persist operation failed");
ret = (int)r;
goto err;
}
tmplen = (size_t)r;
offset += tmplen;
len -= tmplen;
}
err:
if (unlikely(rpmem_fip_is_closing(fip)))
return ECONNRESET; /* it will be passed to errno */
return ret;
}
/*
* rpmem_fip_read -- perform read operation
*/
int
rpmem_fip_read(struct rpmem_fip *fip, void *buff, size_t len,
size_t off, unsigned lane)
{
int ret;
if (unlikely(rpmem_fip_is_closing(fip)))
return ECONNRESET; /* it will be passed to errno */
RPMEM_ASSERT(lane < fip->nlanes);
if (unlikely(lane >= fip->nlanes))
return EINVAL; /* it will be passed to errno */
if (unlikely(len == 0)) {
return 0;
}
size_t rd_buff_len = len < fip->fi->ep_attr->max_msg_size ?
len : fip->fi->ep_attr->max_msg_size;
void *rd_buff; /* buffer for read operation */
struct fid_mr *rd_mr; /* read buffer memory region */
void *rd_mr_desc; /* read buffer memory descriptor */
struct rpmem_fip_rlane rd_lane;
/* allocate buffer for read operation */
errno = posix_memalign((void **)&rd_buff, Pagesize,
rd_buff_len);
if (errno) {
RPMEM_LOG(ERR, "!allocating read buffer");
ret = errno;
goto err_malloc_rd_buff;
}
/*
* Register buffer for read operation.
* The read operation utilizes READ operation thus
* the FI_REMOTE_WRITE flag.
*/
ret = fi_mr_reg(fip->domain, rd_buff,
rd_buff_len, FI_REMOTE_WRITE,
0, 0, 0, &rd_mr, NULL);
if (ret) {
RPMEM_FI_ERR(ret, "registrating read buffer");
goto err_rd_mr;
}
/* get read buffer local memory descriptor */
rd_mr_desc = fi_mr_desc(rd_mr);
/*
* Initialize READ message. The completion is required in order
* to signal thread that READ operation has been completed.
*/
rpmem_fip_rma_init(&rd_lane.read, rd_mr_desc, 0,
fip->rkey, &rd_lane, FI_COMPLETION);
size_t rd = 0;
uint8_t *cbuff = buff;
struct rpmem_fip_lane *lanep = &fip->lanes[lane].base;
while (rd < len) {
size_t rd_len = len - rd < rd_buff_len ?
len - rd : rd_buff_len;
size_t rd_off = off + rd;
uint64_t raddr = fip->raddr + rd_off;
rpmem_fip_lane_begin(lanep, FI_READ);
ret = rpmem_fip_readmsg(lanep->ep, &rd_lane.read,
rd_buff, rd_len, raddr);
if (ret) {
RPMEM_FI_ERR(ret, "RMA read");
goto err_readmsg;
}
VALGRIND_DO_MAKE_MEM_DEFINED(rd_buff, rd_len);
ret = rpmem_fip_lane_wait(fip, lanep, FI_READ);
if (ret) {
ERR("error when processing read request");
goto err_lane_wait;
}
memcpy(&cbuff[rd], rd_buff, rd_len);
rd += rd_len;
}
ret = 0;
err_lane_wait:
err_readmsg:
RPMEM_FI_CLOSE(rd_mr, "unregistering memory");
err_rd_mr:
free(rd_buff);
err_malloc_rd_buff:
if (unlikely(rpmem_fip_is_closing(fip)))
return ECONNRESET; /* it will be passed to errno */
return ret;
}
/*
* parse_bool -- convert string value to boolean
*/
static int
parse_bool(const char *str_value)
{
if (strcmp(str_value, "0") == 0 ||
strcasecmp(str_value, "false") == 0 ||
strcasecmp(str_value, "no") == 0 ||
strcasecmp(str_value, "off") == 0) {
return 0;
}
if (strcmp(str_value, "1") == 0 ||
strcasecmp(str_value, "true") == 0 ||
strcasecmp(str_value, "yes") == 0 ||
strcasecmp(str_value, "on") == 0) {
return 1;
}
return -1;
}
/*
* rpmem_fip_param_get -- read environment variable in the libfabric way
*
* - If parameter does not exist the output value is not changed.
* - If the environment variable is not set the output value is not changed.
* - If the environment variable is set and its value is not correct the output
* value is set to error value.
* - If the environment variable is set and its value is correct the output
* value is set according to the environment variable value.
*/
static void
rpmem_fip_param_get(const char *var_name, int *value)
{
struct fi_param *params;
int count;
int ret = fi_getparams(¶ms, &count);
if (ret != FI_SUCCESS) {
RPMEM_FI_ERR(ret, "getting fabric parameters list");
return;
}
for (int i = 0; i < count; ++i) {
if (strcmp(params[i].name, var_name) != 0)
continue;
if (!params[i].value) {
break;
}
*value = parse_bool(params[i].value);
break;
}
fi_freeparams(params);
}
#define LIBFABRIC_FORK_UNSAFE_VAR "FI_FORK_UNSAFE"
/*
* rpmem_fip_probe_fork_safety -- probe if libfabric is fork safe
*/
void
rpmem_fip_probe_fork_safety(int *fork_unsafe)
{
*fork_unsafe = 0; /* false by default */
rpmem_fip_param_get(LIBFABRIC_FORK_UNSAFE_VAR, fork_unsafe);
}
| 38,913 | 22.844363 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem.h
|
/*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem.h -- internal definitions for librpmem
*/
#define RPMEM_LOG_PREFIX "librpmem"
#define RPMEM_LOG_LEVEL_VAR "RPMEM_LOG_LEVEL"
#define RPMEM_LOG_FILE_VAR "RPMEM_LOG_FILE"
| 1,784 | 43.625 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem_util.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_util.h -- util functions for librpmem header file
*/
#ifndef RPMEM_UTIL_H
#define RPMEM_UTIL_H 1
#ifdef __cplusplus
extern "C" {
#endif
enum {
LERR = 1,
LWARN = 2,
LNOTICE = 3,
LINFO = 4,
_LDBG = 10,
};
#define RPMEM_LOG(level, fmt, args...) LOG(L##level, fmt, ## args)
#define RPMEM_DBG(fmt, args...) LOG(_LDBG, fmt, ## args)
#define RPMEM_FATAL(fmt, args...) FATAL(fmt, ## args)
#define RPMEM_ASSERT(cond) ASSERT(cond)
#define RPMEM_FLAGS_ALL RPMEM_PERSIST_RELAXED
#define RPMEM_FLAGS_MASK ((unsigned)(~RPMEM_FLAGS_ALL))
const char *rpmem_util_proto_errstr(enum rpmem_err err);
int rpmem_util_proto_errno(enum rpmem_err err);
void rpmem_util_cmds_init(void);
void rpmem_util_cmds_fini(void);
const char *rpmem_util_cmd_get(void);
void rpmem_util_get_env_max_nlanes(unsigned *max_nlanes);
#ifdef __cplusplus
}
#endif
#endif
| 2,455 | 32.643836 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem_util.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_util.c -- util functions for librpmem source file
*/
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <stdint.h>
#include "out.h"
#include "os.h"
#include "librpmem.h"
#include "rpmem_proto.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
static struct rpmem_err_str_errno {
int err;
const char *str;
} rpmem_err_str_errno[MAX_RPMEM_ERR] = {
[RPMEM_SUCCESS] = {
.err = 0,
.str = "Success",
},
[RPMEM_ERR_BADPROTO] = {
.err = EPROTONOSUPPORT,
.str = "Protocol version number mismatch",
},
[RPMEM_ERR_BADNAME] = {
.err = EINVAL,
.str = "Invalid pool descriptor",
},
[RPMEM_ERR_BADSIZE] = {
.err = EFBIG,
.str = "Invalid pool size",
},
[RPMEM_ERR_BADNLANES] = {
.err = EINVAL,
.str = "Invalid number of lanes",
},
[RPMEM_ERR_BADPROVIDER] = {
.err = EINVAL,
.str = "Invalid provider",
},
[RPMEM_ERR_FATAL] = {
.err = EREMOTEIO,
.str = "Fatal error",
},
[RPMEM_ERR_FATAL_CONN] = {
.err = ECONNABORTED,
.str = "Fatal in-band connection error",
},
[RPMEM_ERR_BUSY] = {
.err = EBUSY,
.str = "Pool already in use",
},
[RPMEM_ERR_EXISTS] = {
.err = EEXIST,
.str = "Pool already exists",
},
[RPMEM_ERR_PROVNOSUP] = {
.err = EMEDIUMTYPE,
.str = "Provider not supported",
},
[RPMEM_ERR_NOEXIST] = {
.err = ENOENT,
.str = "Pool set or its part doesn't exist or it is "
"unavailable",
},
[RPMEM_ERR_NOACCESS] = {
.err = EACCES,
.str = "Pool set permission denied",
},
[RPMEM_ERR_POOL_CFG] = {
.err = EINVAL,
.str = "Invalid pool set configuration",
},
};
static char *Rpmem_cmds;
static char **Rpmem_cmd_arr;
static size_t Rpmem_current_cmd;
static size_t Rpmem_ncmds;
#define RPMEM_CMD_SEPARATOR '|'
/*
* rpmem_util_proto_errstr -- return error string for error code
*/
const char *
rpmem_util_proto_errstr(enum rpmem_err err)
{
RPMEM_ASSERT(err < MAX_RPMEM_ERR);
const char *ret = rpmem_err_str_errno[err].str;
RPMEM_ASSERT(ret);
return ret;
}
/*
* rpmem_util_proto_errno -- return appropriate errno value for error code
*/
int
rpmem_util_proto_errno(enum rpmem_err err)
{
RPMEM_ASSERT(err < MAX_RPMEM_ERR);
return rpmem_err_str_errno[err].err;
}
/*
* rpmem_util_cmds_inc -- increase size of array for rpmem commands
*/
static void
rpmem_util_cmds_inc(void)
{
Rpmem_ncmds++;
Rpmem_cmd_arr = realloc(Rpmem_cmd_arr,
Rpmem_ncmds * sizeof(*Rpmem_cmd_arr));
if (!Rpmem_cmd_arr)
RPMEM_FATAL("!realloc");
}
/*
* rpmem_util_cmds_init -- read a RPMEM_CMD from the environment variable
*/
void
rpmem_util_cmds_init(void)
{
char *cmd = os_getenv(RPMEM_CMD_ENV);
if (!cmd)
cmd = RPMEM_DEF_CMD;
Rpmem_cmds = strdup(cmd);
if (!Rpmem_cmds)
RPMEM_FATAL("!strdup");
char *next = Rpmem_cmds;
while (next) {
rpmem_util_cmds_inc();
Rpmem_cmd_arr[Rpmem_ncmds - 1] = next;
next = strchr(next, RPMEM_CMD_SEPARATOR);
if (next) {
*next = '\0';
next++;
}
}
}
/*
* rpmem_util_env_fini -- release RPMEM_CMD copy
*/
void
rpmem_util_cmds_fini(void)
{
RPMEM_ASSERT(Rpmem_cmds);
RPMEM_ASSERT(Rpmem_cmd_arr);
RPMEM_ASSERT(Rpmem_current_cmd < Rpmem_ncmds);
free(Rpmem_cmds);
Rpmem_cmds = NULL;
free(Rpmem_cmd_arr);
Rpmem_cmd_arr = NULL;
Rpmem_ncmds = 0;
Rpmem_current_cmd = 0;
}
/*
* rpmem_util_cmd_get -- get a next command from RPMEM_CMD
*
* RPMEM_CMD can contain multiple commands separated by RPMEM_CMD_SEPARATOR.
* Commands from RPMEM_CMD are read sequentially and used to establish out of
* band connections to remote nodes in the order read from a poolset file.
*
*/
const char *
rpmem_util_cmd_get(void)
{
RPMEM_ASSERT(Rpmem_cmds);
RPMEM_ASSERT(Rpmem_cmd_arr);
RPMEM_ASSERT(Rpmem_current_cmd < Rpmem_ncmds);
char *ret = Rpmem_cmd_arr[Rpmem_current_cmd];
Rpmem_current_cmd = (Rpmem_current_cmd + 1) % Rpmem_ncmds;
return ret;
}
/*
* rpmem_util_get_env_max_nlanes -- read the maximum number of lanes from
* RPMEM_MAX_NLANES
*/
void
rpmem_util_get_env_max_nlanes(unsigned *max_nlanes)
{
char *env_nlanes = os_getenv(RPMEM_MAX_NLANES_ENV);
if (env_nlanes && env_nlanes[0] != '\0') {
char *endptr;
errno = 0;
long nlanes = strtol(env_nlanes, &endptr, 10);
if (endptr[0] != '\0' || nlanes <= 0 ||
(errno == ERANGE &&
(nlanes == LONG_MAX || nlanes == LONG_MIN))) {
RPMEM_LOG(ERR, "%s variable must be a positive integer",
RPMEM_MAX_NLANES_ENV);
} else {
*max_nlanes = (unsigned)nlanes;
}
}
}
| 6,009 | 22.944223 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem_obc.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc.h -- rpmem out-of-band connection client header file
*/
#ifndef RPMEM_OBC_H
#define RPMEM_OBC_H 1
#include <sys/types.h>
#include <sys/socket.h>
#include "librpmem.h"
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_obc;
struct rpmem_obc *rpmem_obc_init(void);
void rpmem_obc_fini(struct rpmem_obc *rpc);
int rpmem_obc_connect(struct rpmem_obc *rpc,
const struct rpmem_target_info *info);
int rpmem_obc_disconnect(struct rpmem_obc *rpc);
int rpmem_obc_monitor(struct rpmem_obc *rpc, int nonblock);
int rpmem_obc_create(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
const struct rpmem_pool_attr *pool_attr);
int rpmem_obc_open(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
struct rpmem_pool_attr *pool_attr);
int rpmem_obc_set_attr(struct rpmem_obc *rpc,
const struct rpmem_pool_attr *pool_attr);
int rpmem_obc_close(struct rpmem_obc *rpc, int flags);
#ifdef __cplusplus
}
#endif
#endif
| 2,615 | 32.974026 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/librpmem.c
|
/*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* librpmem.c -- entry points for librpmem
*/
#include <stdio.h>
#include <stdint.h>
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_fip.h"
#include "util.h"
#include "out.h"
/*
* librpmem_init -- load-time initialization for librpmem
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
librpmem_init(void)
{
util_init();
out_init(RPMEM_LOG_PREFIX, RPMEM_LOG_LEVEL_VAR, RPMEM_LOG_FILE_VAR,
RPMEM_MAJOR_VERSION, RPMEM_MINOR_VERSION);
LOG(3, NULL);
rpmem_util_cmds_init();
rpmem_util_get_env_max_nlanes(&Rpmem_max_nlanes);
rpmem_fip_probe_fork_safety(&Rpmem_fork_unsafe);
RPMEM_LOG(NOTICE, "Libfabric is %sfork safe",
Rpmem_fork_unsafe ? "not " : "");
}
/*
* librpmem_fini -- librpmem cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
librpmem_fini(void)
{
LOG(3, NULL);
rpmem_util_cmds_fini();
out_fini();
}
/*
* rpmem_check_version -- see if library meets application version requirements
*/
const char *
rpmem_check_version(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != RPMEM_MAJOR_VERSION) {
ERR("librpmem major version mismatch (need %u, found %u)",
major_required, RPMEM_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > RPMEM_MINOR_VERSION) {
ERR("librpmem minor version mismatch (need %u, found %u)",
minor_required, RPMEM_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
/*
* rpmem_errormsg -- return the last error message
*/
const char *
rpmem_errormsg(void)
{
return out_get_errormsg();
}
| 3,326 | 27.681034 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem_obc.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_obc.c -- rpmem out-of-band connection client source file
*/
#include <stdlib.h>
#include <netdb.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include "librpmem.h"
#include "rpmem.h"
#include "rpmem_common.h"
#include "rpmem_obc.h"
#include "rpmem_proto.h"
#include "rpmem_util.h"
#include "rpmem_ssh.h"
#include "out.h"
#include "sys_util.h"
#include "util.h"
/*
* rpmem_obc -- rpmem out-of-band client connection handle
*/
struct rpmem_obc {
struct rpmem_ssh *ssh;
};
/*
* rpmem_obc_is_connected -- (internal) return non-zero value if client is
* connected
*/
static inline int
rpmem_obc_is_connected(struct rpmem_obc *rpc)
{
return rpc->ssh != NULL;
}
/*
* rpmem_obc_check_ibc_attr -- (internal) check in-band connection
* attributes
*/
static int
rpmem_obc_check_ibc_attr(struct rpmem_msg_ibc_attr *ibc)
{
if (ibc->port == 0 || ibc->port > UINT16_MAX) {
ERR("invalid port number received -- %u", ibc->port);
errno = EPROTO;
return -1;
}
if (ibc->persist_method != RPMEM_PM_GPSPM &&
ibc->persist_method != RPMEM_PM_APM) {
ERR("invalid persistency method received -- %u",
ibc->persist_method);
errno = EPROTO;
return -1;
}
return 0;
}
/*
* rpmem_obc_check_port -- (internal) verify target node port number
*/
static int
rpmem_obc_check_port(const struct rpmem_target_info *info)
{
if (!(info->flags & RPMEM_HAS_SERVICE))
return 0;
if (*info->service == '\0') {
ERR("invalid port number -- '%s'", info->service);
goto err;
}
errno = 0;
char *endptr;
long port = strtol(info->service, &endptr, 10);
if (errno || *endptr != '\0') {
ERR("invalid port number -- '%s'", info->service);
goto err;
}
if (port < 1) {
ERR("port number must be positive -- '%s'", info->service);
goto err;
}
if (port > UINT16_MAX) {
ERR("port number too large -- '%s'", info->service);
goto err;
}
return 0;
err:
errno = EINVAL;
return -1;
}
/*
* rpmem_obc_close_conn -- (internal) close connection
*/
static void
rpmem_obc_close_conn(struct rpmem_obc *rpc)
{
rpmem_ssh_close(rpc->ssh);
(void) util_fetch_and_and64(&rpc->ssh, 0);
}
/*
* rpmem_obc_init_msg_hdr -- (internal) initialize message header
*/
static void
rpmem_obc_set_msg_hdr(struct rpmem_msg_hdr *hdrp,
enum rpmem_msg_type type, size_t size)
{
hdrp->type = type;
hdrp->size = size;
}
/*
* rpmem_obc_set_pool_desc -- (internal) fill the pool descriptor field
*/
static void
rpmem_obc_set_pool_desc(struct rpmem_msg_pool_desc *pool_desc,
const char *desc, size_t size)
{
RPMEM_ASSERT(size <= UINT32_MAX);
RPMEM_ASSERT(size > 0);
pool_desc->size = (uint32_t)size;
memcpy(pool_desc->desc, desc, size);
pool_desc->desc[size - 1] = '\0';
}
/*
* rpmem_obc_alloc_create_msg -- (internal) allocate and fill create request
* message
*/
static struct rpmem_msg_create *
rpmem_obc_alloc_create_msg(const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr, size_t *msg_sizep)
{
size_t pool_desc_size = strlen(req->pool_desc) + 1;
size_t msg_size = sizeof(struct rpmem_msg_create) + pool_desc_size;
struct rpmem_msg_create *msg = malloc(msg_size);
if (!msg) {
ERR("!cannot allocate create request message");
return NULL;
}
rpmem_obc_set_msg_hdr(&msg->hdr, RPMEM_MSG_TYPE_CREATE, msg_size);
msg->c.major = RPMEM_PROTO_MAJOR;
msg->c.minor = RPMEM_PROTO_MINOR;
msg->c.pool_size = req->pool_size;
msg->c.nlanes = req->nlanes;
msg->c.provider = req->provider;
msg->c.buff_size = req->buff_size;
rpmem_obc_set_pool_desc(&msg->pool_desc,
req->pool_desc, pool_desc_size);
if (pool_attr) {
pack_rpmem_pool_attr(pool_attr, &msg->pool_attr);
} else {
RPMEM_LOG(INFO, "using zeroed pool attributes");
memset(&msg->pool_attr, 0, sizeof(msg->pool_attr));
}
*msg_sizep = msg_size;
return msg;
}
/*
* rpmem_obc_check_req -- (internal) check request attributes
*/
static int
rpmem_obc_check_req(const struct rpmem_req_attr *req)
{
if (req->provider >= MAX_RPMEM_PROV) {
ERR("invalid provider specified -- %u", req->provider);
errno = EINVAL;
return -1;
}
return 0;
}
/*
* rpmem_obj_check_hdr_resp -- (internal) check response message header
*/
static int
rpmem_obc_check_hdr_resp(struct rpmem_msg_hdr_resp *resp,
enum rpmem_msg_type type, size_t size)
{
if (resp->type != type) {
ERR("invalid message type received -- %u", resp->type);
errno = EPROTO;
return -1;
}
if (resp->size != size) {
ERR("invalid message size received -- %lu", resp->size);
errno = EPROTO;
return -1;
}
if (resp->status >= MAX_RPMEM_ERR) {
ERR("invalid status received -- %u", resp->status);
errno = EPROTO;
return -1;
}
if (resp->status) {
enum rpmem_err status = (enum rpmem_err)resp->status;
ERR("%s", rpmem_util_proto_errstr(status));
errno = rpmem_util_proto_errno(status);
return -1;
}
return 0;
}
/*
* rpmem_obc_check_create_resp -- (internal) check create response message
*/
static int
rpmem_obc_check_create_resp(struct rpmem_msg_create_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_CREATE_RESP,
sizeof(struct rpmem_msg_create_resp)))
return -1;
if (rpmem_obc_check_ibc_attr(&resp->ibc))
return -1;
return 0;
}
/*
* rpmem_obc_get_res -- (internal) read response attributes
*/
static void
rpmem_obc_get_res(struct rpmem_resp_attr *res,
struct rpmem_msg_ibc_attr *ibc)
{
res->port = (unsigned short)ibc->port;
res->rkey = ibc->rkey;
res->raddr = ibc->raddr;
res->persist_method =
(enum rpmem_persist_method)ibc->persist_method;
res->nlanes = ibc->nlanes;
}
/*
* rpmem_obc_alloc_open_msg -- (internal) allocate and fill open request message
*/
static struct rpmem_msg_open *
rpmem_obc_alloc_open_msg(const struct rpmem_req_attr *req,
const struct rpmem_pool_attr *pool_attr, size_t *msg_sizep)
{
size_t pool_desc_size = strlen(req->pool_desc) + 1;
size_t msg_size = sizeof(struct rpmem_msg_open) + pool_desc_size;
struct rpmem_msg_open *msg = malloc(msg_size);
if (!msg) {
ERR("!cannot allocate open request message");
return NULL;
}
rpmem_obc_set_msg_hdr(&msg->hdr, RPMEM_MSG_TYPE_OPEN, msg_size);
msg->c.major = RPMEM_PROTO_MAJOR;
msg->c.minor = RPMEM_PROTO_MINOR;
msg->c.pool_size = req->pool_size;
msg->c.nlanes = req->nlanes;
msg->c.provider = req->provider;
msg->c.buff_size = req->buff_size;
rpmem_obc_set_pool_desc(&msg->pool_desc,
req->pool_desc, pool_desc_size);
*msg_sizep = msg_size;
return msg;
}
/*
* rpmem_obc_check_open_resp -- (internal) check open response message
*/
static int
rpmem_obc_check_open_resp(struct rpmem_msg_open_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_OPEN_RESP,
sizeof(struct rpmem_msg_open_resp)))
return -1;
if (rpmem_obc_check_ibc_attr(&resp->ibc))
return -1;
return 0;
}
/*
* rpmem_obc_check_close_resp -- (internal) check close response message
*/
static int
rpmem_obc_check_close_resp(struct rpmem_msg_close_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_CLOSE_RESP,
sizeof(struct rpmem_msg_close_resp)))
return -1;
return 0;
}
/*
* rpmem_obc_check_set_attr_resp -- (internal) check set attributes response
* message
*/
static int
rpmem_obc_check_set_attr_resp(struct rpmem_msg_set_attr_resp *resp)
{
if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_SET_ATTR_RESP,
sizeof(struct rpmem_msg_set_attr_resp)))
return -1;
return 0;
}
/*
* rpmem_obc_init -- initialize rpmem obc handle
*/
struct rpmem_obc *
rpmem_obc_init(void)
{
struct rpmem_obc *rpc = calloc(1, sizeof(*rpc));
if (!rpc) {
RPMEM_LOG(ERR, "!allocation of rpmem obc failed");
return NULL;
}
return rpc;
}
/*
* rpmem_obc_fini -- destroy rpmem obc handle
*
* This function must be called with connection already closed - after calling
* the rpmem_obc_disconnect or after receiving relevant value from
* rpmem_obc_monitor.
*/
void
rpmem_obc_fini(struct rpmem_obc *rpc)
{
free(rpc);
}
/*
* rpmem_obc_connect -- connect to target node
*
* Connects to target node, the target must be in the following format:
* <addr>[:<port>]. If the port number is not specified the default
* ssh port will be used. The <addr> is translated into IP address.
*
* Returns an error if connection is already established.
*/
int
rpmem_obc_connect(struct rpmem_obc *rpc, const struct rpmem_target_info *info)
{
if (rpmem_obc_is_connected(rpc)) {
errno = EALREADY;
goto err_notconnected;
}
if (rpmem_obc_check_port(info))
goto err_port;
rpc->ssh = rpmem_ssh_open(info);
if (!rpc->ssh)
goto err_ssh_open;
return 0;
err_ssh_open:
err_port:
err_notconnected:
return -1;
}
/*
* rpmem_obc_disconnect -- close the connection to target node
*
* Returns error if socket is not connected.
*/
int
rpmem_obc_disconnect(struct rpmem_obc *rpc)
{
if (rpmem_obc_is_connected(rpc)) {
rpmem_obc_close_conn(rpc);
return 0;
}
errno = ENOTCONN;
return -1;
}
/*
* rpmem_obc_monitor -- monitor connection with target node
*
* The nonblock variable indicates whether this function should return
* immediately (= 1) or may block (= 0).
*
* If the function detects that socket was closed by remote peer it is
* closed on local side and set to -1, so there is no need to call
* rpmem_obc_disconnect function. Please take a look at functions'
* descriptions to see which functions cannot be used if the connection
* has been already closed.
*
* This function expects there is no data pending on socket, if any data
* is pending this function returns an error and sets errno to EPROTO.
*
* Return values:
* 0 - not connected
* 1 - connected
* < 0 - error
*/
int
rpmem_obc_monitor(struct rpmem_obc *rpc, int nonblock)
{
if (!rpmem_obc_is_connected(rpc))
return 0;
return rpmem_ssh_monitor(rpc->ssh, nonblock);
}
/*
* rpmem_obc_create -- perform create request operation
*
* Returns error if connection has not been established yet.
*/
int
rpmem_obc_create(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
const struct rpmem_pool_attr *pool_attr)
{
if (!rpmem_obc_is_connected(rpc)) {
ERR("out-of-band connection not established");
errno = ENOTCONN;
goto err_notconnected;
}
if (rpmem_obc_check_req(req))
goto err_req;
size_t msg_size;
struct rpmem_msg_create *msg =
rpmem_obc_alloc_create_msg(req, pool_attr, &msg_size);
if (!msg)
goto err_alloc_msg;
RPMEM_LOG(INFO, "sending create request message");
rpmem_hton_msg_create(msg);
if (rpmem_ssh_send(rpc->ssh, msg, msg_size)) {
ERR("!sending create request message failed");
goto err_msg_send;
}
RPMEM_LOG(NOTICE, "create request message sent");
RPMEM_LOG(INFO, "receiving create request response");
struct rpmem_msg_create_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp,
sizeof(resp))) {
ERR("!receiving create request response failed");
goto err_msg_recv;
}
RPMEM_LOG(NOTICE, "create request response received");
rpmem_ntoh_msg_create_resp(&resp);
if (rpmem_obc_check_create_resp(&resp))
goto err_msg_resp;
rpmem_obc_get_res(res, &resp.ibc);
free(msg);
return 0;
err_msg_resp:
err_msg_recv:
err_msg_send:
free(msg);
err_alloc_msg:
err_req:
err_notconnected:
return -1;
}
/*
* rpmem_obc_open -- perform open request operation
*
* Returns error if connection is not already established.
*/
int
rpmem_obc_open(struct rpmem_obc *rpc,
const struct rpmem_req_attr *req,
struct rpmem_resp_attr *res,
struct rpmem_pool_attr *pool_attr)
{
if (!rpmem_obc_is_connected(rpc)) {
ERR("out-of-band connection not established");
errno = ENOTCONN;
goto err_notconnected;
}
if (rpmem_obc_check_req(req))
goto err_req;
size_t msg_size;
struct rpmem_msg_open *msg =
rpmem_obc_alloc_open_msg(req, pool_attr, &msg_size);
if (!msg)
goto err_alloc_msg;
RPMEM_LOG(INFO, "sending open request message");
rpmem_hton_msg_open(msg);
if (rpmem_ssh_send(rpc->ssh, msg, msg_size)) {
ERR("!sending open request message failed");
goto err_msg_send;
}
RPMEM_LOG(NOTICE, "open request message sent");
RPMEM_LOG(INFO, "receiving open request response");
struct rpmem_msg_open_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp, sizeof(resp))) {
ERR("!receiving open request response failed");
goto err_msg_recv;
}
RPMEM_LOG(NOTICE, "open request response received");
rpmem_ntoh_msg_open_resp(&resp);
if (rpmem_obc_check_open_resp(&resp))
goto err_msg_resp;
rpmem_obc_get_res(res, &resp.ibc);
if (pool_attr)
unpack_rpmem_pool_attr(&resp.pool_attr, pool_attr);
free(msg);
return 0;
err_msg_resp:
err_msg_recv:
err_msg_send:
free(msg);
err_alloc_msg:
err_req:
err_notconnected:
return -1;
}
/*
* rpmem_obc_set_attr -- perform set attributes request operation
*
* Returns error if connection is not already established.
*/
int
rpmem_obc_set_attr(struct rpmem_obc *rpc,
const struct rpmem_pool_attr *pool_attr)
{
if (!rpmem_obc_is_connected(rpc)) {
ERR("out-of-band connection not established");
errno = ENOTCONN;
goto err_notconnected;
}
struct rpmem_msg_set_attr msg;
rpmem_obc_set_msg_hdr(&msg.hdr, RPMEM_MSG_TYPE_SET_ATTR, sizeof(msg));
if (pool_attr) {
memcpy(&msg.pool_attr, pool_attr, sizeof(msg.pool_attr));
} else {
RPMEM_LOG(INFO, "using zeroed pool attributes");
memset(&msg.pool_attr, 0, sizeof(msg.pool_attr));
}
RPMEM_LOG(INFO, "sending set attributes request message");
rpmem_hton_msg_set_attr(&msg);
if (rpmem_ssh_send(rpc->ssh, &msg, sizeof(msg))) {
ERR("!sending set attributes request message failed");
goto err_msg_send;
}
RPMEM_LOG(NOTICE, "set attributes request message sent");
RPMEM_LOG(INFO, "receiving set attributes request response");
struct rpmem_msg_set_attr_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp,
sizeof(resp))) {
ERR("!receiving set attributes request response failed");
goto err_msg_recv;
}
RPMEM_LOG(NOTICE, "set attributes request response received");
rpmem_ntoh_msg_set_attr_resp(&resp);
if (rpmem_obc_check_set_attr_resp(&resp))
goto err_msg_resp;
return 0;
err_msg_resp:
err_msg_recv:
err_msg_send:
err_notconnected:
return -1;
}
/*
* rpmem_obc_close -- perform close request operation
*
* Returns error if connection is not already established.
*
* NOTE: this function does not close the connection, but sends close request
* message to remote node and receives a response. The connection must be
* closed using rpmem_obc_disconnect function.
*/
int
rpmem_obc_close(struct rpmem_obc *rpc, int flags)
{
if (!rpmem_obc_is_connected(rpc)) {
errno = ENOTCONN;
return -1;
}
struct rpmem_msg_close msg;
rpmem_obc_set_msg_hdr(&msg.hdr, RPMEM_MSG_TYPE_CLOSE, sizeof(msg));
msg.flags = (uint32_t)flags;
RPMEM_LOG(INFO, "sending close request message");
rpmem_hton_msg_close(&msg);
if (rpmem_ssh_send(rpc->ssh, &msg, sizeof(msg))) {
RPMEM_LOG(ERR, "!sending close request failed");
return -1;
}
RPMEM_LOG(NOTICE, "close request message sent");
RPMEM_LOG(INFO, "receiving close request response");
struct rpmem_msg_close_resp resp;
if (rpmem_ssh_recv(rpc->ssh, &resp,
sizeof(resp))) {
RPMEM_LOG(ERR, "!receiving close request response failed");
return -1;
}
RPMEM_LOG(NOTICE, "close request response received");
rpmem_ntoh_msg_close_resp(&resp);
if (rpmem_obc_check_close_resp(&resp))
return -1;
return 0;
}
| 16,926 | 22.908192 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem_cmd.c
|
/*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_cmd.c -- simple interface for running an executable in child process
*/
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <stdint.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <signal.h>
#include "util.h"
#include "out.h"
#include "os.h"
#include "rpmem_common.h"
#include "rpmem_util.h"
#include "rpmem_cmd.h"
/*
* rpmem_cmd_init -- initialize command
*/
struct rpmem_cmd *
rpmem_cmd_init(void)
{
struct rpmem_cmd *cmd = calloc(1, sizeof(*cmd));
if (!cmd) {
RPMEM_LOG(ERR, "allocating command buffer");
goto err_alloc_cmd;
}
return cmd;
err_alloc_cmd:
return NULL;
}
/*
* rpmem_cmd_fini -- deinitialize command
*/
void
rpmem_cmd_fini(struct rpmem_cmd *cmd)
{
for (int i = 0; i < cmd->args.argc; i++)
free(cmd->args.argv[i]);
free(cmd->args.argv);
free(cmd);
}
/*
* rpmem_cmd_push -- push back command's argument
*/
int
rpmem_cmd_push(struct rpmem_cmd *cmd, const char *arg)
{
size_t argv_count = (size_t)cmd->args.argc + 2;
char **argv = realloc(cmd->args.argv, argv_count * sizeof(char *));
if (!argv) {
RPMEM_LOG(ERR, "reallocating command argv");
goto err_realloc;
}
cmd->args.argv = argv;
char *arg_dup = strdup(arg);
if (!arg_dup) {
RPMEM_LOG(ERR, "allocating argument");
goto err_strdup;
}
cmd->args.argv[cmd->args.argc] = arg_dup;
cmd->args.argc++;
cmd->args.argv[cmd->args.argc] = NULL;
return 0;
err_strdup:
err_realloc:
return -1;
}
/*
* rpmem_cmd_log -- print executing command
*/
static void
rpmem_cmd_log(struct rpmem_cmd *cmd)
{
RPMEM_ASSERT(cmd->args.argc > 0);
size_t size = 0;
for (int i = 0; i < cmd->args.argc; i++) {
size += strlen(cmd->args.argv[i]) + 1;
}
char *buff = malloc(size);
if (!buff) {
RPMEM_LOG(ERR, "allocating log buffer for command");
return;
}
size_t pos = 0;
for (int i = 0; pos < size && i < cmd->args.argc; i++) {
int ret = snprintf(&buff[pos], size - pos, "%s%s",
cmd->args.argv[i], i == cmd->args.argc - 1 ?
"" : " ");
if (ret < 0) {
RPMEM_LOG(ERR, "printing command's argument failed");
goto out;
}
pos += (size_t)ret;
}
RPMEM_LOG(INFO, "executing command '%s'", buff);
out:
free(buff);
}
/*
* rpmem_cmd_run -- run command and connect with stdin, stdout and stderr
* using unix sockets.
*
* The communication with child process is done via socketpairs on
* stdin, stdout and stderr. The socketpairs are used instead of pipes
* because reading from disconnected pipe causes a SIGPIPE signal.
* When using socketpair it is possible to read data using recv(3)
* function with MSG_NOSIGNAL flag, which doesn't send a signal.
*/
int
rpmem_cmd_run(struct rpmem_cmd *cmd)
{
int fd_in[2];
int fd_out[2];
int fd_err[2];
rpmem_cmd_log(cmd);
/* socketpair for stdin */
int ret = socketpair(AF_UNIX, SOCK_STREAM, 0, fd_in);
if (ret < 0) {
RPMEM_LOG(ERR, "creating pipe for stdin");
goto err_pipe_in;
}
/* parent process stdin socket */
cmd->fd_in = fd_in[1];
/* socketpair for stdout */
ret = socketpair(AF_UNIX, SOCK_STREAM, 0, fd_out);
if (ret < 0) {
RPMEM_LOG(ERR, "creating pipe for stdout");
goto err_pipe_out;
}
/* parent process stdout socket */
cmd->fd_out = fd_out[0];
/* socketpair for stderr */
ret = socketpair(AF_UNIX, SOCK_STREAM, 0, fd_err);
if (ret < 0) {
RPMEM_LOG(ERR, "creating pipe for stderr");
goto err_pipe_err;
}
/* socketpair for stderr */
cmd->fd_err = fd_err[0];
cmd->pid = fork();
if (cmd->pid == -1) {
RPMEM_LOG(ERR, "forking command");
goto err_fork;
}
if (!cmd->pid) {
dup2(fd_in[0], 0);
dup2(fd_out[1], 1);
dup2(fd_err[1], 2);
execvp(cmd->args.argv[0], cmd->args.argv);
exit(EXIT_FAILURE);
}
os_close(fd_in[0]);
os_close(fd_out[1]);
os_close(fd_err[1]);
return 0;
err_fork:
os_close(fd_err[0]);
os_close(fd_err[1]);
err_pipe_err:
os_close(fd_out[0]);
os_close(fd_out[1]);
err_pipe_out:
os_close(fd_in[0]);
os_close(fd_in[1]);
err_pipe_in:
return -1;
}
/*
* rpmem_cmd_wait -- wait for process to change state
*/
int
rpmem_cmd_wait(struct rpmem_cmd *cmd, int *status)
{
if (cmd->pid <= 0)
return -1;
if (waitpid(cmd->pid, status, 0) != cmd->pid)
return -1;
return 0;
}
/*
* rpmem_cmd_term -- terminate process by sending SIGINT signal
*/
int
rpmem_cmd_term(struct rpmem_cmd *cmd)
{
os_close(cmd->fd_in);
os_close(cmd->fd_out);
os_close(cmd->fd_err);
RPMEM_ASSERT(cmd->pid > 0);
return kill(cmd->pid, SIGINT);
}
| 6,070 | 21.996212 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem_cmd.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_cmd.h -- helper module for invoking separate process
*/
#ifndef RPMEM_CMD_H
#define RPMEM_CMD_H 1
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
struct rpmem_cmd {
int fd_in; /* stdin */
int fd_out; /* stdout */
int fd_err; /* stderr */
struct {
char **argv;
int argc;
} args; /* command arguments */
pid_t pid; /* pid of process */
};
struct rpmem_cmd *rpmem_cmd_init(void);
int rpmem_cmd_push(struct rpmem_cmd *cmd, const char *arg);
int rpmem_cmd_run(struct rpmem_cmd *cmd);
int rpmem_cmd_term(struct rpmem_cmd *cmd);
int rpmem_cmd_wait(struct rpmem_cmd *cmd, int *status);
void rpmem_cmd_fini(struct rpmem_cmd *cmd);
#ifdef __cplusplus
}
#endif
#endif
| 2,304 | 32.405797 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/librpmem/rpmem_ssh.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpmem_ssh.c -- rpmem ssh transport layer source file
*/
#include <unistd.h>
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <stdint.h>
#include <sys/types.h>
#include <sys/wait.h>
#include "util.h"
#include "os.h"
#include "out.h"
#include "rpmem_common.h"
#include "rpmem_ssh.h"
#include "rpmem_cmd.h"
#include "rpmem_util.h"
#define ERR_BUFF_LEN 4095
/* +1 in order to be sure it is always null-terminated */
static char error_str[ERR_BUFF_LEN + 1];
struct rpmem_ssh {
struct rpmem_cmd *cmd;
};
/*
* get_ssh -- return ssh command name
*/
static const char *
get_ssh(void)
{
char *cmd = os_getenv(RPMEM_SSH_ENV);
if (!cmd)
cmd = RPMEM_DEF_SSH;
return cmd;
}
/*
* get_user_at_node -- returns string containing user@node
*/
static char *
get_user_at_node(const struct rpmem_target_info *info)
{
char *user_at_node = NULL;
if (info->flags & RPMEM_HAS_USER) {
size_t ulen = strlen(info->user);
size_t nlen = strlen(info->node);
size_t len = ulen + 1 + nlen + 1;
user_at_node = malloc(len);
if (!user_at_node)
goto err_malloc;
int ret = snprintf(user_at_node, len, "%s@%s",
info->user, info->node);
if (ret < 0 || (size_t)ret + 1 != len)
goto err_printf;
} else {
user_at_node = strdup(info->node);
if (!user_at_node)
goto err_malloc;
}
return user_at_node;
err_printf:
free(user_at_node);
err_malloc:
return NULL;
}
/*
* get_cmd -- return an RPMEM_CMD with appended list of arguments
*/
static char *
get_cmd(const char **argv)
{
const char *env_cmd = rpmem_util_cmd_get();
char *cmd = strdup(env_cmd);
if (!cmd)
return NULL;
size_t cmd_len = strlen(cmd) + 1;
const char *arg;
while ((arg = *argv++) != NULL) {
size_t len = strlen(arg);
size_t new_cmd_len = cmd_len + len + 1;
char *tmp = realloc(cmd, new_cmd_len);
if (!tmp)
goto err;
cmd = tmp;
/* append the argument to the command */
cmd[cmd_len - 1] = ' ';
memcpy(&cmd[cmd_len], arg, len);
cmd[cmd_len + len] = '\0';
cmd_len = new_cmd_len;
}
return cmd;
err:
free(cmd);
return NULL;
}
/*
* valist_to_argv -- convert va_list to argv array
*/
static const char **
valist_to_argv(va_list args)
{
const char **argv = malloc(sizeof(const char *));
if (!argv)
return NULL;
argv[0] = NULL;
size_t nargs = 0;
const char *arg;
while ((arg = va_arg(args, const char *)) != NULL) {
nargs++;
const char **tmp = realloc(argv,
(nargs + 1) * sizeof(const char *));
if (!tmp)
goto err;
argv = tmp;
argv[nargs - 1] = arg;
argv[nargs] = NULL;
}
return argv;
err:
free(argv);
return NULL;
}
/*
* rpmem_ssh_execv -- open ssh connection and run $RPMEMD_CMD with
* additional NULL-terminated list of arguments.
*/
struct rpmem_ssh *
rpmem_ssh_execv(const struct rpmem_target_info *info, const char **argv)
{
struct rpmem_ssh *rps = calloc(1, sizeof(*rps));
if (!rps)
goto err_zalloc;
char *user_at_node = get_user_at_node(info);
if (!user_at_node)
goto err_user_node;
rps->cmd = rpmem_cmd_init();
if (!rps->cmd)
goto err_cmd_init;
char *cmd = get_cmd(argv);
if (!cmd)
goto err_cmd;
int ret = rpmem_cmd_push(rps->cmd, get_ssh());
if (ret)
goto err_push;
if (info->flags & RPMEM_HAS_SERVICE) {
/* port number is optional */
ret = rpmem_cmd_push(rps->cmd, "-p");
if (ret)
goto err_push;
ret = rpmem_cmd_push(rps->cmd, info->service);
if (ret)
goto err_push;
}
/*
* Disable allocating pseudo-terminal in order to transfer binary
* data safely.
*/
ret = rpmem_cmd_push(rps->cmd, "-T");
if (ret)
goto err_push;
if (info->flags & RPMEM_FLAGS_USE_IPV4) {
ret = rpmem_cmd_push(rps->cmd, "-4");
if (ret)
goto err_push;
}
/* fail if password required for authentication */
ret = rpmem_cmd_push(rps->cmd, "-oBatchMode=yes");
if (ret)
goto err_push;
ret = rpmem_cmd_push(rps->cmd, user_at_node);
if (ret)
goto err_push;
ret = rpmem_cmd_push(rps->cmd, cmd);
if (ret)
goto err_push;
ret = rpmem_cmd_run(rps->cmd);
if (ret)
goto err_run;
free(user_at_node);
free(cmd);
return rps;
err_run:
err_push:
free(cmd);
err_cmd:
rpmem_cmd_fini(rps->cmd);
err_cmd_init:
free(user_at_node);
err_user_node:
free(rps);
err_zalloc:
return NULL;
}
/*
* rpmem_ssh_exec -- open ssh connection and run $RPMEMD_CMD with
* additional NULL-terminated list of arguments.
*/
struct rpmem_ssh *
rpmem_ssh_exec(const struct rpmem_target_info *info, ...)
{
struct rpmem_ssh *ssh;
va_list args;
va_start(args, info);
const char **argv = valist_to_argv(args);
if (argv)
ssh = rpmem_ssh_execv(info, argv);
else
ssh = NULL;
va_end(args);
free(argv);
return ssh;
}
/*
* rpmem_ssh_open -- open ssh connection with specified node and wait for status
*/
struct rpmem_ssh *
rpmem_ssh_open(const struct rpmem_target_info *info)
{
struct rpmem_ssh *ssh = rpmem_ssh_exec(info, NULL);
if (!ssh)
return NULL;
/*
* Read initial status from invoked command.
* This is for synchronization purposes and to make it possible
* to inform client that command's initialization failed.
*/
int32_t status;
int ret = rpmem_ssh_recv(ssh, &status, sizeof(status));
if (ret) {
if (ret == 1 || errno == ECONNRESET)
ERR("%s", rpmem_ssh_strerror(ssh, errno));
else
ERR("!%s", info->node);
goto err_recv_status;
}
if (status) {
ERR("%s: unexpected status received -- '%d'",
info->node, status);
errno = status;
goto err_status;
}
RPMEM_LOG(INFO, "received status: %u", status);
return ssh;
err_recv_status:
err_status:
rpmem_ssh_close(ssh);
return NULL;
}
/*
* rpmem_ssh_close -- close ssh connection
*/
int
rpmem_ssh_close(struct rpmem_ssh *rps)
{
int ret;
rpmem_cmd_term(rps->cmd);
rpmem_cmd_wait(rps->cmd, &ret);
rpmem_cmd_fini(rps->cmd);
free(rps);
if (WIFEXITED(ret))
return WEXITSTATUS(ret);
if (WIFSIGNALED(ret)) {
ERR("signal received -- %d", WTERMSIG(ret));
return -1;
}
ERR("exit status -- %d", WEXITSTATUS(ret));
return -1;
}
/*
* rpmem_ssh_send -- send data using ssh transport layer
*
* The data is encoded using base64.
*/
int
rpmem_ssh_send(struct rpmem_ssh *rps, const void *buff, size_t len)
{
int ret = rpmem_xwrite(rps->cmd->fd_in, buff, len, MSG_NOSIGNAL);
if (ret == 1) {
errno = ECONNRESET;
} else if (ret < 0) {
if (errno == EPIPE)
errno = ECONNRESET;
}
return ret;
}
/*
* rpmem_ssh_recv -- receive data using ssh transport layer
*
* The received data is decoded using base64.
*/
int
rpmem_ssh_recv(struct rpmem_ssh *rps, void *buff, size_t len)
{
int ret = rpmem_xread(rps->cmd->fd_out, buff,
len, MSG_NOSIGNAL);
if (ret == 1) {
errno = ECONNRESET;
} else if (ret < 0) {
if (errno == EPIPE)
errno = ECONNRESET;
}
return ret;
}
/*
* rpmem_ssh_monitor -- check connection state of ssh
*
* Return value:
* 0 - disconnected
* 1 - connected
* <0 - error
*/
int
rpmem_ssh_monitor(struct rpmem_ssh *rps, int nonblock)
{
uint32_t buff;
int flags = MSG_PEEK;
if (nonblock)
flags |= MSG_DONTWAIT;
int ret = rpmem_xread(rps->cmd->fd_out, &buff, sizeof(buff), flags);
if (!ret) {
errno = EPROTO;
return -1;
}
if (ret < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK)
return 1;
else
return ret;
}
return 0;
}
/*
* rpmem_ssh_strerror -- read error using stderr channel
*/
const char *
rpmem_ssh_strerror(struct rpmem_ssh *rps, int oerrno)
{
size_t len = 0;
ssize_t ret;
while ((ret = read(rps->cmd->fd_err, error_str + len,
ERR_BUFF_LEN - len))) {
if (ret < 0)
return "reading error string failed";
len += (size_t)ret;
}
error_str[len] = '\0';
if (len == 0) {
if (oerrno) {
char buff[UTIL_MAX_ERR_MSG];
util_strerror(oerrno, buff, UTIL_MAX_ERR_MSG);
snprintf(error_str, ERR_BUFF_LEN,
"%s", buff);
} else {
snprintf(error_str, ERR_BUFF_LEN,
"unknown error");
}
} else {
/* get rid of new line and carriage return chars */
char *cr = strchr(error_str, '\r');
if (cr)
*cr = '\0';
char *nl = strchr(error_str, '\n');
if (nl)
*nl = '\0';
}
return error_str;
}
| 9,655 | 19.67666 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemcto/cto.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* cto.c -- memory pool & allocation entry points for libpmemcto
*/
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
#include <stdint.h>
#include <fcntl.h>
#include <unistd.h>
#include <wchar.h>
#include "libpmemcto.h"
#include "libpmem.h"
#include "jemalloc.h"
#include "util.h"
#include "util_pmem.h"
#include "set.h"
#include "out.h"
#include "cto.h"
#include "mmap.h"
#include "sys_util.h"
#include "valgrind_internal.h"
#include "os_thread.h"
#include "os_deep.h"
/* default hint address for mmap() when PMEM_MMAP_HINT is not specified */
#define CTO_MMAP_HINT ((void *)0x10000000000)
static const struct pool_attr Cto_create_attr = {
CTO_HDR_SIG,
CTO_FORMAT_MAJOR,
CTO_FORMAT_FEAT_DEFAULT,
{0}, {0}, {0}, {0}, {0}
};
static const struct pool_attr Cto_open_attr = {
CTO_HDR_SIG,
CTO_FORMAT_MAJOR,
CTO_FORMAT_FEAT_CHECK,
{0}, {0}, {0}, {0}, {0}
};
static os_mutex_t Pool_lock; /* guards pmemcto_create and pmemcto_open */
/*
* cto_print_jemalloc_messages -- (internal) custom print function for jemalloc
*
* Prints traces from jemalloc. All traces from jemalloc are error messages.
*/
static void
cto_print_jemalloc_messages(void *ignore, const char *s)
{
ERR("%s", s);
}
/*
* cto_print_jemalloc_stats --(internal) print function for jemalloc statistics
*
* Prints statistics from jemalloc. All statistics are printed with level 0.
*/
static void
cto_print_jemalloc_stats(void *ignore, const char *s)
{
LOG_NONL(0, "%s", s);
}
/*
* cto_init -- load-time initialization for cto
*
* Called automatically by the run-time loader.
*/
void
cto_init(void)
{
COMPILE_ERROR_ON(offsetof(struct pmemcto, set) !=
POOL_HDR_SIZE + CTO_DSC_P_SIZE);
util_mutex_init(&Pool_lock);
/* set up jemalloc messages to a custom print function */
je_cto_malloc_message = cto_print_jemalloc_messages;
}
/*
* cto_fini -- libpmemcto cleanup routine
*
* Called automatically when the process terminates.
*/
void
cto_fini(void)
{
LOG(3, NULL);
util_mutex_destroy(&Pool_lock);
}
/*
* cto_descr_create -- (internal) create cto memory pool descriptor
*/
static int
cto_descr_create(PMEMctopool *pcp, const char *layout, size_t poolsize)
{
LOG(3, "pcp %p layout \"%s\" poolsize %zu", pcp, layout, poolsize);
ASSERTeq(poolsize % Pagesize, 0);
/* opaque info lives at the beginning of mapped memory pool */
void *dscp = (void *)((uintptr_t)pcp +
sizeof(struct pool_hdr));
/* create required metadata */
memset(dscp, 0, CTO_DSC_P_SIZE);
if (layout)
strncpy(pcp->layout, layout, PMEMCTO_MAX_LAYOUT - 1);
pcp->addr = (uint64_t)pcp;
pcp->size = poolsize;
pcp->root = (uint64_t)NULL;
pcp->consistent = 0;
/* store non-volatile part of pool's descriptor */
util_persist(pcp->is_pmem, dscp, CTO_DSC_P_SIZE);
return 0;
}
/*
* cto_descr_check -- (internal) validate cto pool descriptor
*/
static int
cto_descr_check(PMEMctopool *pcp, const char *layout, size_t poolsize)
{
LOG(3, "pcp %p layout \"%s\" poolsize %zu", pcp, layout, poolsize);
if (layout && strncmp(pcp->layout, layout, PMEMCTO_MAX_LAYOUT)) {
ERR("wrong layout (\"%s\") pool created with layout \"%s\"",
layout, pcp->layout);
errno = EINVAL;
return -1;
}
if (pcp->consistent == 0) {
ERR("inconsistent pool");
errno = EINVAL;
return -1;
}
if ((void *)pcp->addr == NULL) {
ERR("invalid mapping address");
errno = EINVAL;
return -1;
}
/*
* The pool could be created using older version of the library, when
* the minimum pool size was different.
*/
if (pcp->size < PMEMCTO_MIN_POOL) {
LOG(4, "mapping size is less than minimum (%zu < %zu)",
pcp->size, PMEMCTO_MIN_POOL);
}
if (pcp->size != poolsize) {
ERR("mapping size does not match pool size: %zu != %zu",
pcp->size, poolsize);
errno = EINVAL;
return -1;
}
if ((void *)pcp->root != NULL &&
((char *)pcp->root < ((char *)pcp->addr + CTO_DSC_SIZE_ALIGNED) ||
(char *)pcp->root >= ((char *)pcp->addr + pcp->size))) {
ERR("invalid root pointer");
errno = EINVAL;
return -1;
}
LOG(4, "addr %p size %zu root %p", (void *)pcp->addr, pcp->size,
(void *)pcp->root);
return 0;
}
/*
* cto_runtime_init -- (internal) initialize cto memory pool runtime data
*/
static int
cto_runtime_init(PMEMctopool *pcp, int rdonly, int is_pmem)
{
LOG(3, "pcp %p rdonly %d is_pmem %d", pcp, rdonly, is_pmem);
/* reset consistency flag */
pcp->consistent = 0;
os_part_deep_common(REP(pcp->set, 0), 0,
&pcp->consistent, sizeof(pcp->consistent), 1);
/*
* If possible, turn off all permissions on the pool header page.
*
* The prototype PMFS doesn't allow this when large pages are in
* use. It is not considered an error if this fails.
*/
RANGE_NONE((void *)pcp->addr, sizeof(struct pool_hdr), pcp->is_dev_dax);
return 0;
}
/*
* pmemcto_create -- create a cto memory pool
*/
#ifndef _WIN32
static inline
#endif
PMEMctopool *
pmemcto_createU(const char *path, const char *layout, size_t poolsize,
mode_t mode)
{
LOG(3, "path \"%s\" layout \"%s\" poolsize %zu mode %o",
path, layout, poolsize, mode);
struct pool_set *set;
/* check length of layout */
if (layout && (strlen(layout) >= PMEMCTO_MAX_LAYOUT)) {
ERR("Layout too long");
errno = EINVAL;
return NULL;
}
util_mutex_lock(&Pool_lock);
/*
* Since pmemcto_create and pmemcto_open are guarded by the lock,
* we can safely modify the global Mmap_hint variable and restore
* it once the pool is created.
*/
int old_no_random = Mmap_no_random;
if (!Mmap_no_random) {
Mmap_no_random = 1;
Mmap_hint = CTO_MMAP_HINT; /* XXX: add randomization */
}
if (util_pool_create(&set, path, poolsize, PMEMCTO_MIN_POOL,
PMEMCTO_MIN_PART, &Cto_create_attr, NULL,
REPLICAS_DISABLED) != 0) {
LOG(2, "cannot create pool or pool set");
Mmap_no_random = old_no_random;
util_mutex_unlock(&Pool_lock);
return NULL;
}
Mmap_no_random = old_no_random;
util_mutex_unlock(&Pool_lock);
ASSERT(set->nreplicas > 0);
struct pool_replica *rep = set->replica[0];
PMEMctopool *pcp = rep->part[0].addr;
VALGRIND_REMOVE_PMEM_MAPPING(&pcp->addr,
sizeof(struct pmemcto) -
((uintptr_t)&pcp->addr - (uintptr_t)&pcp->hdr));
pcp->set = set;
pcp->is_pmem = rep->is_pmem;
pcp->is_dev_dax = rep->part[0].is_dev_dax;
/* is_dev_dax implies is_pmem */
ASSERT(!pcp->is_dev_dax || pcp->is_pmem);
if (set->nreplicas > 1) {
errno = ENOTSUP;
ERR("!replicas not supported");
goto err;
}
/* create pool descriptor */
if (cto_descr_create(pcp, layout, rep->repsize) != 0) {
LOG(2, "descriptor creation failed");
goto err;
}
/* initialize runtime parts */
if (cto_runtime_init(pcp, 0, rep->is_pmem) != 0) {
ERR("pool initialization failed");
goto err;
}
/* Prepare pool for jemalloc - empty */
if (je_cto_pool_create(
(void *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
rep->repsize - CTO_DSC_SIZE_ALIGNED,
set->zeroed, 1) == NULL) {
ERR("pool creation failed");
goto err;
}
if (util_poolset_chmod(set, mode))
goto err;
util_poolset_fdclose(set);
LOG(3, "pcp %p", pcp);
return pcp;
err:
LOG(4, "error clean up");
int oerrno = errno;
util_mutex_lock(&Pool_lock);
util_poolset_close(set, DELETE_CREATED_PARTS);
util_mutex_unlock(&Pool_lock);
errno = oerrno;
return NULL;
}
#ifndef _WIN32
/*
* pmemcto_create -- create a log memory pool
*/
PMEMctopool *
pmemcto_create(const char *path, const char *layout, size_t poolsize,
mode_t mode)
{
return pmemcto_createU(path, layout, poolsize, mode);
}
#else
/*
* pmemcto_createW -- create a log memory pool
*/
PMEMctopool *
pmemcto_createW(const wchar_t *path, const wchar_t *layout, size_t poolsize,
mode_t mode)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
char *ulayout = NULL;
if (layout != NULL) {
ulayout = util_toUTF8(layout);
if (ulayout == NULL) {
util_free_UTF8(upath);
return NULL;
}
}
PMEMctopool *ret = pmemcto_createU(upath, ulayout, poolsize, mode);
util_free_UTF8(upath);
util_free_UTF8(ulayout);
return ret;
}
#endif
/*
* cto_open_noinit -- (internal) open a cto memory pool w/o initialization
*
* This routine opens the pool, but does not any run-time initialization.
*/
static PMEMctopool *
cto_open_noinit(const char *path, const char *layout, unsigned flags,
void *addr)
{
LOG(3, "path \"%s\" layout \"%s\" flags 0x%x addr %p",
path, layout, flags, addr);
struct pool_set *set;
if (util_pool_open(&set, path, PMEMCTO_MIN_POOL, &Cto_open_attr,
NULL, addr, flags) != 0) {
LOG(2, "cannot open pool or pool set");
return NULL;
}
ASSERT(set->nreplicas > 0);
struct pool_replica *rep = set->replica[0];
PMEMctopool *pcp = rep->part[0].addr;
VALGRIND_REMOVE_PMEM_MAPPING(&pcp->addr,
sizeof(struct pmemcto) -
((uintptr_t)&pcp->addr - (uintptr_t)&pcp->hdr));
ASSERTeq(pcp->size, rep->repsize);
pcp->set = set;
pcp->is_pmem = rep->is_pmem;
pcp->is_dev_dax = rep->part[0].is_dev_dax;
/* is_dev_dax implies is_pmem */
ASSERT(!pcp->is_dev_dax || pcp->is_pmem);
if (set->nreplicas > 1) {
errno = ENOTSUP;
ERR("!replicas not supported");
goto err;
}
/* validate pool descriptor */
if (cto_descr_check(pcp, layout, set->poolsize) != 0) {
LOG(2, "descriptor check failed");
goto err;
}
util_poolset_fdclose(set);
LOG(3, "pcp %p", pcp);
return pcp;
err:
LOG(4, "error clean up");
int oerrno = errno;
util_poolset_close(set, DO_NOT_DELETE_PARTS);
errno = oerrno;
return NULL;
}
/*
* cto_open_common -- (internal) open a cto memory pool
*
* This routine does all the work, but takes a cow flag so internal
* calls can map a read-only pool if required.
*/
static PMEMctopool *
cto_open_common(const char *path, const char *layout, unsigned flags)
{
LOG(3, "path \"%s\" layout \"%s\" flags 0x%x", path, layout, flags);
PMEMctopool *pcp;
struct pool_set *set;
/*
* XXX: Opening/mapping the pool twice is not the coolest solution,
* but it makes it easier to support both single-file pools and
* pool sets.
*/
util_mutex_lock(&Pool_lock);
/* open pool set to check consistency and to get the mapping address */
if ((pcp = cto_open_noinit(path, layout, flags, NULL)) == NULL) {
LOG(2, "cannot open pool or pool set");
util_mutex_unlock(&Pool_lock);
return NULL;
}
/* get the last mapping address */
void *mapaddr = (void *)pcp->addr;
LOG(4, "mapping address: %p", mapaddr);
int oerrno = errno;
util_poolset_close(pcp->set, DO_NOT_DELETE_PARTS);
errno = oerrno;
/* open the pool once again using the mapping address as a hint */
if ((pcp = cto_open_noinit(path, layout, flags, mapaddr)) == NULL) {
LOG(2, "cannot open pool or pool set");
util_mutex_unlock(&Pool_lock);
return NULL;
}
util_mutex_unlock(&Pool_lock);
set = pcp->set;
if ((void *)pcp->addr != pcp) {
ERR("cannot mmap at the same address: %p != %p",
pcp, (void *)pcp->addr);
errno = ENOMEM;
goto err;
}
/* initialize runtime parts */
if (cto_runtime_init(pcp, set->rdonly, set->replica[0]->is_pmem) != 0) {
ERR("pool initialization failed");
goto err;
}
/*
* Initially, treat this memory region as undefined.
* Once jemalloc initializes its metadata, it will also mark
* registered free chunks (usable heap space) as unaddressable.
*/
VALGRIND_DO_MAKE_MEM_UNDEFINED(
(void *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
set->poolsize - CTO_DSC_SIZE_ALIGNED);
/* Prepare pool for jemalloc */
if (je_cto_pool_create(
(void *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
set->poolsize - CTO_DSC_SIZE_ALIGNED, 0, 0) == NULL) {
ERR("pool creation failed");
util_unmap((void *)pcp->addr, pcp->size);
goto err;
}
util_poolset_fdclose(set);
LOG(3, "pcp %p", pcp);
return pcp;
err:
LOG(4, "error clean up");
oerrno = errno;
util_mutex_lock(&Pool_lock);
util_poolset_close(set, DO_NOT_DELETE_PARTS);
util_mutex_unlock(&Pool_lock);
errno = oerrno;
return NULL;
}
#ifndef _WIN32
/*
* pmemcto_open -- open an existing log memory pool
*/
PMEMctopool *
pmemcto_open(const char *path, const char *layout)
{
LOG(3, "path \"%s\" layout \"%s\"", path, layout);
return cto_open_common(path, layout, 0);
}
#else
/*
* pmemcto_openU -- open an existing cto memory pool
*/
PMEMctopool *
pmemcto_openU(const char *path, const char *layout)
{
LOG(3, "path \"%s\" layout \"%s\"", path, layout);
return cto_open_common(path, layout, 0);
}
/*
* pmemcto_openW -- open an existing log memory pool
*/
PMEMctopool *
pmemcto_openW(const wchar_t *path, const wchar_t *layout)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
char *ulayout = NULL;
if (layout != NULL) {
ulayout = util_toUTF8(layout);
if (ulayout == NULL) {
util_free_UTF8(upath);
return NULL;
}
}
PMEMctopool *ret = pmemcto_openU(upath, ulayout);
util_free_UTF8(upath);
util_free_UTF8(ulayout);
return ret;
}
#endif
/*
* pmemcto_close -- close a cto memory pool
*/
void
pmemcto_close(PMEMctopool *pcp)
{
LOG(3, "pcp %p", pcp);
int ret = je_cto_pool_delete(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED));
if (ret != 0) {
ERR("invalid pool handle: %p", pcp);
errno = EINVAL;
return;
}
/* deep flush the entire pool to persistence */
RANGE_RW((void *)pcp->addr, sizeof(struct pool_hdr), pcp->is_dev_dax);
VALGRIND_DO_MAKE_MEM_DEFINED(pcp->addr, pcp->size);
/* so far, there could be only one replica in CTO pool set */
struct pool_replica *rep = REP(pcp->set, 0);
for (unsigned p = 0; p < rep->nparts; p++) {
struct pool_set_part *part = PART(rep, p);
os_part_deep_common(rep, p, part->addr, part->size, 1);
}
/* set consistency flag */
pcp->consistent = 1;
os_part_deep_common(REP(pcp->set, 0), 0,
&pcp->consistent, sizeof(pcp->consistent), 1);
util_mutex_lock(&Pool_lock);
util_poolset_close(pcp->set, DO_NOT_DELETE_PARTS);
util_mutex_unlock(&Pool_lock);
}
/*
* pmemcto_set_root_pointer -- saves pointer to root object
*/
void
pmemcto_set_root_pointer(PMEMctopool *pcp, void *ptr)
{
LOG(3, "pcp %p ptr %p", pcp, ptr);
#ifdef DEBUG
/* XXX: an error also in non-debug build? (return 0 or -1) */
ASSERT(ptr == NULL ||
((char *)ptr >= ((char *)pcp->addr + CTO_DSC_SIZE_ALIGNED) &&
(char *)ptr < ((char *)pcp->addr + pcp->size)));
#endif
pcp->root = (uint64_t)ptr;
}
/*
* pmemcto_get_root_pointer -- returns pointer to root object
*/
void *
pmemcto_get_root_pointer(PMEMctopool *pcp)
{
LOG(3, "pcp %p", pcp);
LOG(4, "root ptr %p", (void *)pcp->root);
return (void *)pcp->root;
}
/*
* pmemcto_checkU -- memory pool consistency check
*/
#ifndef _WIN32
static inline
#endif
int
pmemcto_checkU(const char *path, const char *layout)
{
LOG(3, "path \"%s\" layout \"%s\"", path, layout);
PMEMctopool *pcp = cto_open_common(path, layout, POOL_OPEN_COW);
if (pcp == NULL)
return -1; /* errno set by pmemcto_open_common() */
int consistent = je_cto_pool_check(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED));
pmemcto_close(pcp);
if (consistent)
LOG(4, "pool consistency check OK");
return consistent;
}
#ifndef _WIN32
/*
* pmemcto_check -- cto memory pool consistency check
*
* Returns true if consistent, zero if inconsistent, -1/error if checking
* cannot happen due to other errors.
*/
int
pmemcto_check(const char *path, const char *layout)
{
return pmemcto_checkU(path, layout);
}
#else
/*
* pmemcto_checkW -- cto memory pool consistency check
*/
int
pmemcto_checkW(const wchar_t *path, const wchar_t *layout)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return -1;
char *ulayout = NULL;
if (layout != NULL) {
ulayout = util_toUTF8(layout);
if (ulayout == NULL) {
util_free_UTF8(upath);
return -1;
}
}
int ret = pmemcto_checkU(upath, ulayout);
util_free_UTF8(upath);
util_free_UTF8(ulayout);
return ret;
}
#endif
/*
* pmemcto_stats_print -- spew memory allocator stats for a pool
*/
void
pmemcto_stats_print(PMEMctopool *pcp, const char *opts)
{
LOG(3, "vmp %p opts \"%s\"", pcp, opts ? opts : "");
je_cto_pool_malloc_stats_print(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
cto_print_jemalloc_stats, NULL, opts);
}
/*
* pmemcto_malloc -- allocate memory
*/
void *
pmemcto_malloc(PMEMctopool *pcp, size_t size)
{
LOG(3, "pcp %p size %zu", pcp, size);
return je_cto_pool_malloc(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
size);
}
/*
* pmemcto_free -- free memory
*/
void
pmemcto_free(PMEMctopool *pcp, void *ptr)
{
LOG(3, "pcp %p ptr %p", pcp, ptr);
je_cto_pool_free((pool_t *)(
(uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED), ptr);
}
/*
* pmemcto_calloc -- allocate zeroed memory
*/
void *
pmemcto_calloc(PMEMctopool *pcp, size_t nmemb, size_t size)
{
LOG(3, "pcp %p nmemb %zu size %zu", pcp, nmemb, size);
return je_cto_pool_calloc(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
nmemb, size);
}
/*
* pmemcto_realloc -- resize a memory allocation
*/
void *
pmemcto_realloc(PMEMctopool *pcp, void *ptr, size_t size)
{
LOG(3, "pcp %p ptr %p size %zu", pcp, ptr, size);
return je_cto_pool_ralloc(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
ptr, size);
}
/*
* pmemcto_aligned_alloc -- allocate aligned memory
*/
void *
pmemcto_aligned_alloc(PMEMctopool *pcp, size_t alignment, size_t size)
{
LOG(3, "pcp %p alignment %zu size %zu", pcp, alignment, size);
return je_cto_pool_aligned_alloc(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
alignment, size);
}
/*
* pmemcto_strdup -- allocate memory for copy of string
*/
char *
pmemcto_strdup(PMEMctopool *pcp, const char *s)
{
LOG(3, "pcp %p s %p", pcp, s);
size_t size = strlen(s) + 1;
void *retaddr = je_cto_pool_malloc(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
size);
if (retaddr == NULL)
return NULL;
return (char *)memcpy(retaddr, s, size);
}
/*
* pmemcto_wcsdup -- allocate memory for copy of widechar string
*/
wchar_t *
pmemcto_wcsdup(PMEMctopool *pcp, const wchar_t *s)
{
LOG(3, "pcp %p s %p", pcp, s);
size_t size = (wcslen(s) + 1) * sizeof(wchar_t);
void *retaddr = je_cto_pool_malloc(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED),
size);
if (retaddr == NULL)
return NULL;
return (wchar_t *)memcpy(retaddr, s, size);
}
/*
* pmemcto_malloc_usable_size -- get usable size of allocation
*/
size_t
pmemcto_malloc_usable_size(PMEMctopool *pcp, void *ptr)
{
LOG(3, "pcp %p ptr %p", pcp, ptr);
return je_cto_pool_malloc_usable_size(
(pool_t *)((uintptr_t)pcp + CTO_DSC_SIZE_ALIGNED), ptr);
}
| 20,165 | 22.503497 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemcto/cto.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* cto.h -- internal definitions for libpmemcto module
*/
#ifndef LIBPMEMCTO_CTO_H
#define LIBPMEMCTO_CTO_H 1
#include "os_thread.h"
#include "util.h"
#include "pool_hdr.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PMEMCTO_LOG_PREFIX "libpmemcto"
#define PMEMCTO_LOG_LEVEL_VAR "PMEMCTO_LOG_LEVEL"
#define PMEMCTO_LOG_FILE_VAR "PMEMCTO_LOG_FILE"
/* attributes of the cto memory pool format for the pool header */
#define CTO_HDR_SIG "PMEMCTO" /* must be 8 bytes including '\0' */
#define CTO_FORMAT_MAJOR 1
#define CTO_FORMAT_FEAT_DEFAULT \
{0x0000, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000}
#define CTO_FORMAT_FEAT_CHECK \
{0x0000, POOL_FEAT_INCOMPAT_VALID, 0x0000}
static const features_t cto_format_feat_default = CTO_FORMAT_FEAT_DEFAULT;
/* size of the persistent part of PMEMOBJ pool descriptor (2kB) */
#define CTO_DSC_P_SIZE 2048
/* size of unused part of the persistent part of PMEMOBJ pool descriptor */
#define CTO_DSC_P_UNUSED (CTO_DSC_P_SIZE - PMEMCTO_MAX_LAYOUT - 28)
/*
* XXX: We don't care about portable data types, as the pool may only be open
* on the same platform.
* Assuming the shutdown state / consistent flag is updated in a fail-safe
* manner, there is no need to checksum the persistent part of the descriptor.
*/
struct pmemcto {
struct pool_hdr hdr; /* memory pool header */
/* persistent part of PMEMCTO pool descriptor (2kB) */
char layout[PMEMCTO_MAX_LAYOUT];
uint64_t addr; /* mapped region */
uint64_t size; /* size of mapped region */
uint64_t root; /* root pointer */
uint8_t consistent; /* successfully flushed before exit */
unsigned char unused[CTO_DSC_P_UNUSED]; /* must be zero */
/* some run-time state, allocated out of memory pool... */
struct pool_set *set; /* pool set info */
int is_pmem; /* true if pool is PMEM */
int rdonly; /* true if pool is opened read-only */
int is_dev_dax; /* true if mapped on device dax */
};
/* data area starts at this alignment after the struct pmemcto above */
#define CTO_FORMAT_DATA_ALIGN ((uintptr_t)4096)
#define CTO_DSC_SIZE (sizeof(struct pmemcto) - sizeof(struct pool_hdr))
#define CTO_DSC_SIZE_ALIGNED\
roundup(sizeof(struct pmemcto), CTO_FORMAT_DATA_ALIGN)
void cto_init(void);
void cto_fini(void);
#ifdef _WIN32
/*
* On Linux we have separate jemalloc builds for libvmem, libvmmalloc
* and libpmemcto, with different function name prefixes. This is to avoid
* symbol collisions in case of static linking of those libraries.
* On Windows we don't provide statically linked libraries, so there is
* no need to have separate jemalloc builds. However, since libpmemcto
* links to jemalloc symbols with "je_cto" prefix, we have to do renaming
* here (unless there is a better solution).
*/
#define je_cto_pool_create je_vmem_pool_create
#define je_cto_pool_delete je_vmem_pool_delete
#define je_cto_pool_malloc je_vmem_pool_malloc
#define je_cto_pool_calloc je_vmem_pool_calloc
#define je_cto_pool_ralloc je_vmem_pool_ralloc
#define je_cto_pool_aligned_alloc je_vmem_pool_aligned_alloc
#define je_cto_pool_free je_vmem_pool_free
#define je_cto_pool_malloc_usable_size je_vmem_pool_malloc_usable_size
#define je_cto_pool_malloc_stats_print je_vmem_pool_malloc_stats_print
#define je_cto_pool_extend je_vmem_pool_extend
#define je_cto_pool_set_alloc_funcs je_vmem_pool_set_alloc_funcs
#define je_cto_pool_check je_vmem_pool_check
#define je_cto_malloc_message je_vmem_malloc_message
#endif
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMCTO_CTO_H */
| 5,089 | 37.270677 | 78 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemcto/libpmemcto_main.c
|
/*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libpmemcto_main.c -- entry point for libpmemcto.dll
*
* XXX - This is a placeholder. All the library initialization/cleanup
* that is done in library ctors/dtors, as well as TLS initialization
* should be moved here.
*/
void libpmemcto_init(void);
void libpmemcto_fini(void);
int APIENTRY
DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved)
{
switch (dwReason) {
case DLL_PROCESS_ATTACH:
libpmemcto_init();
break;
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
break;
case DLL_PROCESS_DETACH:
libpmemcto_fini();
break;
}
return TRUE;
}
| 2,184 | 34.241935 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemcto/libpmemcto.c
|
/*
* Copyright 2016-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libpmemcto.c -- basic libpmemcto functions
*/
#include <stdio.h>
#include <stdint.h>
#include "libpmemcto.h"
#include "pmemcommon.h"
#include "cto.h"
#include "jemalloc.h"
/*
* libpmemcto_init -- load-time initialization for log
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
libpmemcto_init(void)
{
common_init(PMEMCTO_LOG_PREFIX, PMEMCTO_LOG_LEVEL_VAR,
PMEMCTO_LOG_FILE_VAR, PMEMCTO_MAJOR_VERSION,
PMEMCTO_MINOR_VERSION);
cto_init();
LOG(3, NULL);
}
/*
* libpmemcto_fini -- libpmemcto cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
libpmemcto_fini(void)
{
LOG(3, NULL);
cto_fini();
common_fini();
}
/*
* pmemcto_check_versionU -- see if lib meets application version requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemcto_check_versionU(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != PMEMCTO_MAJOR_VERSION) {
ERR("libpmemcto major version mismatch (need %u, found %u)",
major_required, PMEMCTO_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > PMEMCTO_MINOR_VERSION) {
ERR("libpmemcto minor version mismatch (need %u, found %u)",
minor_required, PMEMCTO_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* pmemcto_check_version -- see if lib meets application version requirements
*/
const char *
pmemcto_check_version(unsigned major_required, unsigned minor_required)
{
return pmemcto_check_versionU(major_required, minor_required);
}
#else
/*
* pmemcto_check_versionW -- see if lib meets application version requirements
*/
const wchar_t *
pmemcto_check_versionW(unsigned major_required, unsigned minor_required)
{
if (pmemcto_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* pmemcto_set_funcs -- allow overriding libpmemcto's call to malloc, etc.
*/
void
pmemcto_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s),
void (*print_func)(const char *s))
{
LOG(3, NULL);
util_set_alloc_funcs(malloc_func, free_func,
realloc_func, strdup_func);
out_set_print_func(print_func);
je_cto_pool_set_alloc_funcs(malloc_func, free_func);
}
/*
* pmemcto_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemcto_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* pmemcto_errormsg -- return last error message
*/
const char *
pmemcto_errormsg(void)
{
return pmemcto_errormsgU();
}
#else
/*
* pmemcto_errormsgW -- return last error message as wchar_t
*/
const wchar_t *
pmemcto_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
| 4,501 | 24.725714 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemblk/blk.h
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* blk.h -- internal definitions for libpmem blk module
*/
#ifndef BLK_H
#define BLK_H 1
#include <stddef.h>
#include "ctl.h"
#include "os_thread.h"
#include "pool_hdr.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PMEMBLK_LOG_PREFIX "libpmemblk"
#define PMEMBLK_LOG_LEVEL_VAR "PMEMBLK_LOG_LEVEL"
#define PMEMBLK_LOG_FILE_VAR "PMEMBLK_LOG_FILE"
/* attributes of the blk memory pool format for the pool header */
#define BLK_HDR_SIG "PMEMBLK" /* must be 8 bytes including '\0' */
#define BLK_FORMAT_MAJOR 1
#define BLK_FORMAT_FEAT_DEFAULT \
{0x0000, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000}
#define BLK_FORMAT_FEAT_CHECK \
{0x0000, POOL_FEAT_INCOMPAT_VALID, 0x0000}
static const features_t blk_format_feat_default = BLK_FORMAT_FEAT_DEFAULT;
struct pmemblk {
struct pool_hdr hdr; /* memory pool header */
/* root info for on-media format... */
uint32_t bsize; /* block size */
/* flag indicating if the pool was zero-initialized */
int is_zeroed;
/* some run-time state, allocated out of memory pool... */
void *addr; /* mapped region */
size_t size; /* size of mapped region */
int is_pmem; /* true if pool is PMEM */
int rdonly; /* true if pool is opened read-only */
void *data; /* post-header data area */
size_t datasize; /* size of data area */
size_t nlba; /* number of LBAs in pool */
struct btt *bttp; /* btt handle */
unsigned nlane; /* number of lanes */
unsigned next_lane; /* used to rotate through lanes */
os_mutex_t *locks; /* one per lane */
int is_dev_dax; /* true if mapped on device dax */
struct ctl *ctl; /* top level node of the ctl tree structure */
struct pool_set *set; /* pool set info */
#ifdef DEBUG
/* held during read/write mprotected sections */
os_mutex_t write_lock;
#endif
};
/* data area starts at this alignment after the struct pmemblk above */
#define BLK_FORMAT_DATA_ALIGN ((uintptr_t)4096)
#ifdef __cplusplus
}
#endif
#endif
| 3,519 | 32.207547 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemblk/libpmemblk.c
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libpmemblk.c -- pmem entry points for libpmemblk
*/
#include <stdio.h>
#include <stdint.h>
#include "libpmemblk.h"
#include "ctl_global.h"
#include "pmemcommon.h"
#include "blk.h"
/*
* The variable from which the config is directly loaded. The string
* cannot contain any comments or extraneous white characters.
*/
#define BLK_CONFIG_ENV_VARIABLE "PMEMBLK_CONF"
/*
* The variable that points to a config file from which the config is loaded.
*/
#define BLK_CONFIG_FILE_ENV_VARIABLE "PMEMBLK_CONF_FILE"
/*
* blk_ctl_init_and_load -- (static) initializes CTL and loads configuration
* from env variable and file
*/
static int
blk_ctl_init_and_load(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
if (pbp != NULL && (pbp->ctl = ctl_new()) == NULL) {
LOG(2, "!ctl_new");
return -1;
}
char *env_config = os_getenv(BLK_CONFIG_ENV_VARIABLE);
if (env_config != NULL) {
if (ctl_load_config_from_string(pbp ? pbp->ctl : NULL,
pbp, env_config) != 0) {
LOG(2, "unable to parse config stored in %s "
"environment variable",
BLK_CONFIG_ENV_VARIABLE);
goto err;
}
}
char *env_config_file = os_getenv(BLK_CONFIG_FILE_ENV_VARIABLE);
if (env_config_file != NULL && env_config_file[0] != '\0') {
if (ctl_load_config_from_file(pbp ? pbp->ctl : NULL,
pbp, env_config_file) != 0) {
LOG(2, "unable to parse config stored in %s "
"file (from %s environment variable)",
env_config_file,
BLK_CONFIG_FILE_ENV_VARIABLE);
goto err;
}
}
return 0;
err:
if (pbp)
ctl_delete(pbp->ctl);
return -1;
}
/*
* libpmemblk_init -- (internal) load-time initialization for blk
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
libpmemblk_init(void)
{
ctl_global_register();
if (blk_ctl_init_and_load(NULL))
FATAL("error: %s", pmemblk_errormsg());
common_init(PMEMBLK_LOG_PREFIX, PMEMBLK_LOG_LEVEL_VAR,
PMEMBLK_LOG_FILE_VAR, PMEMBLK_MAJOR_VERSION,
PMEMBLK_MINOR_VERSION);
LOG(3, NULL);
}
/*
* libpmemblk_fini -- libpmemblk cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
libpmemblk_fini(void)
{
LOG(3, NULL);
common_fini();
}
/*
* pmemblk_check_versionU -- see if lib meets application version requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemblk_check_versionU(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != PMEMBLK_MAJOR_VERSION) {
ERR("libpmemblk major version mismatch (need %u, found %u)",
major_required, PMEMBLK_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > PMEMBLK_MINOR_VERSION) {
ERR("libpmemblk minor version mismatch (need %u, found %u)",
minor_required, PMEMBLK_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* pmemblk_check_version -- see if lib meets application version requirements
*/
const char *
pmemblk_check_version(unsigned major_required, unsigned minor_required)
{
return pmemblk_check_versionU(major_required, minor_required);
}
#else
/*
* pmemblk_check_versionW -- see if lib meets application version requirements
*/
const wchar_t *
pmemblk_check_versionW(unsigned major_required, unsigned minor_required)
{
if (pmemblk_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* pmemblk_set_funcs -- allow overriding libpmemblk's call to malloc, etc.
*/
void
pmemblk_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s))
{
LOG(3, NULL);
util_set_alloc_funcs(malloc_func, free_func, realloc_func, strdup_func);
}
/*
* pmemblk_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
pmemblk_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* pmemblk_errormsg -- return last error message
*/
const char *
pmemblk_errormsg(void)
{
return pmemblk_errormsgU();
}
#else
/*
* pmemblk_errormsgW -- return last error message as wchar_t
*/
const wchar_t *
pmemblk_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
| 5,833 | 24.365217 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemblk/btt.h
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* btt.h -- btt module definitions
*/
#ifndef BTT_H
#define BTT_H 1
#ifdef __cplusplus
extern "C" {
#endif
/* callback functions passed to btt_init() */
struct ns_callback {
int (*nsread)(void *ns, unsigned lane,
void *buf, size_t count, uint64_t off);
int (*nswrite)(void *ns, unsigned lane,
const void *buf, size_t count, uint64_t off);
int (*nszero)(void *ns, unsigned lane, size_t count, uint64_t off);
ssize_t (*nsmap)(void *ns, unsigned lane, void **addrp,
size_t len, uint64_t off);
void (*nssync)(void *ns, unsigned lane, void *addr, size_t len);
int ns_is_zeroed;
};
struct btt_info;
struct btt *btt_init(uint64_t rawsize, uint32_t lbasize, uint8_t parent_uuid[],
unsigned maxlane, void *ns, const struct ns_callback *ns_cbp);
unsigned btt_nlane(struct btt *bttp);
size_t btt_nlba(struct btt *bttp);
int btt_read(struct btt *bttp, unsigned lane, uint64_t lba, void *buf);
int btt_write(struct btt *bttp, unsigned lane, uint64_t lba, const void *buf);
int btt_set_zero(struct btt *bttp, unsigned lane, uint64_t lba);
int btt_set_error(struct btt *bttp, unsigned lane, uint64_t lba);
int btt_check(struct btt *bttp);
void btt_fini(struct btt *bttp);
uint64_t btt_flog_size(uint32_t nfree);
uint64_t btt_map_size(uint32_t external_nlba);
uint64_t btt_arena_datasize(uint64_t arena_size, uint32_t nfree);
int btt_info_set(struct btt_info *info, uint32_t external_lbasize,
uint32_t nfree, uint64_t arena_size, uint64_t space_left);
struct btt_flog *btt_flog_get_valid(struct btt_flog *flog_pair, int *next);
int map_entry_is_initial(uint32_t map_entry);
void btt_info_convert2h(struct btt_info *infop);
void btt_info_convert2le(struct btt_info *infop);
void btt_flog_convert2h(struct btt_flog *flogp);
void btt_flog_convert2le(struct btt_flog *flogp);
#ifdef __cplusplus
}
#endif
#endif
| 3,423 | 37.47191 | 79 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemblk/btt.c
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* btt.c -- block translation table providing atomic block updates
*
* This is a user-space implementation of the BTT mechanism providing
* single block powerfail write atomicity, as described by:
* The NVDIMM Namespace Specification
*
* To use this module, the caller must provide five routines for
* accessing the namespace containing the data (in this context,
* "namespace" refers to the storage containing the BTT layout, such
* as a file). All namespace I/O is done by these callbacks:
*
* nsread Read count bytes from namespace at offset off
* nswrite Write count bytes to namespace at offset off
* nszero Zero count bytes in namespace at offset off
* nsmap Return direct access to a range of a namespace
* nssync Flush changes made to an nsmap'd range
*
* Data written by the nswrite callback is flushed out to the media
* (made durable) when the call returns. Data written directly via
* the nsmap callback must be flushed explicitly using nssync.
*
* The caller passes these callbacks, along with information such as
* namespace size and UUID to btt_init() and gets back an opaque handle
* which is then used with the rest of the entry points.
*
* Here is a brief list of the entry points to this module:
*
* btt_nlane Returns number of concurrent threads allowed
*
* btt_nlba Returns the usable size, as a count of LBAs
*
* btt_read Reads a single block at a given LBA
*
* btt_write Writes a single block (atomically) at a given LBA
*
* btt_set_zero Sets a block to read back as zeros
*
* btt_set_error Sets a block to return error on read
*
* btt_check Checks the BTT metadata for consistency
*
* btt_fini Frees run-time state, done using namespace
*
* If the caller is multi-threaded, it must only allow btt_nlane() threads
* to enter this module at a time, each assigned a unique "lane" number
* between 0 and btt_nlane() - 1.
*
* There are a number of static routines defined in this module. Here's
* a brief overview of the most important routines:
*
* read_layout Checks for valid BTT layout and builds run-time state.
* A number of helper functions are used by read_layout
* to handle various parts of the metadata:
* read_info
* read_arenas
* read_arena
* read_flogs
* read_flog_pair
*
* write_layout Generates a new BTT layout when one doesn't exist.
* Once a new layout is written, write_layout uses
* the same helper functions above to construct the
* run-time state.
*
* invalid_lba Range check done by each entry point that takes
* an LBA.
*
* lba_to_arena_lba
* Find the arena and LBA in that arena for a given
* external LBA. This is the heart of the arena
* range matching logic.
*
* flog_update Update the BTT free list/log combined data structure
* (known as the "flog"). This is the heart of the
* logic that makes writes powerfail atomic.
*
* map_lock These routines provide atomic access to the BTT map
* map_unlock data structure in an area.
* map_abort
*
* map_entry_setf Common code for btt_set_zero() and btt_set_error().
*
* zero_block Generate a block of all zeros (instead of actually
* doing a read), when the metadata indicates the
* block should read as zeros.
*
* build_rtt These routines construct the run-time tracking
* build_map_locks data structures used during I/O.
*/
#include <inttypes.h>
#include <stdio.h>
#include <sys/param.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <stdint.h>
#include <endian.h>
#include "out.h"
#include "uuid.h"
#include "btt.h"
#include "btt_layout.h"
#include "sys_util.h"
#include "util.h"
/*
* The opaque btt handle containing state tracked by this module
* for the btt namespace. This is created by btt_init(), handed to
* all the other btt_* entry points, and deleted by btt_fini().
*/
struct btt {
unsigned nlane; /* number of concurrent threads allowed per btt */
/*
* The laidout flag indicates whether the namespace contains valid BTT
* metadata. It is initialized by read_layout() and if no valid layout
* is found, all reads return zeros and the first write will write the
* BTT layout. The layout_write_mutex protects the laidout flag so
* only one write threads ends up writing the initial metadata by
* calling write_layout().
*/
os_mutex_t layout_write_mutex;
int laidout;
/*
* UUID of the BTT
*/
uint8_t uuid[BTTINFO_UUID_LEN];
/*
* UUID of the containing namespace, used to validate BTT metadata.
*/
uint8_t parent_uuid[BTTINFO_UUID_LEN];
/*
* Parameters controlling/describing the BTT layout.
*/
uint64_t rawsize; /* size of containing namespace */
uint32_t lbasize; /* external LBA size */
uint32_t nfree; /* available flog entries */
uint64_t nlba; /* total number of external LBAs */
unsigned narena; /* number of arenas */
/* run-time state kept for each arena */
struct arena {
uint32_t flags; /* arena flags (btt_info) */
uint32_t external_nlba; /* LBAs that live in this arena */
uint32_t internal_lbasize;
uint32_t internal_nlba;
/*
* The following offsets are relative to the beginning of
* the encapsulating namespace. This is different from
* how these offsets are stored on-media, where they are
* relative to the start of the arena. The offset are
* converted by read_layout() to make them more convenient
* for run-time use.
*/
uint64_t startoff; /* offset to start of arena */
uint64_t dataoff; /* offset to arena data area */
uint64_t mapoff; /* offset to area map */
uint64_t flogoff; /* offset to area flog */
uint64_t nextoff; /* offset to next arena */
/*
* Run-time flog state. Indexed by lane.
*
* The write path uses the flog to find the free block
* it writes to before atomically making it the new
* active block for an external LBA.
*
* The read path doesn't use the flog at all.
*/
struct flog_runtime {
struct btt_flog flog; /* current info */
uint64_t entries[2]; /* offsets for flog pair */
int next; /* next write (0 or 1) */
} *flogs;
/*
* Read tracking table. Indexed by lane.
*
* Before using a free block found in the flog, the write path
* scans the rtt to see if there are any outstanding reads on
* that block (reads that started before the block was freed by
* a concurrent write). Unused slots in the rtt are indicated
* by setting the error bit, BTT_MAP_ENTRY_ERROR, so that the
* entry won't match any post-map LBA when checked.
*/
uint32_t volatile *rtt;
/*
* Map locking. Indexed by pre-map LBA modulo nlane.
*/
os_mutex_t *map_locks;
/*
* Arena info block locking.
*/
os_mutex_t info_lock;
} *arenas;
/*
* Callbacks for doing I/O to namespace. These are provided by
* the code calling the BTT module, which passes them in to
* btt_init(). All namespace I/O is done using these.
*
* The opaque namespace handle "ns" was provided by the code calling
* the BTT module and is passed to each callback to identify the
* namespace being accessed.
*/
void *ns;
const struct ns_callback *ns_cbp;
};
/*
* Signature for arena info blocks. Total size is 16 bytes, including
* the '\0' added to the string by the declaration (the last two bytes
* of the string are '\0').
*/
static const char Sig[] = BTTINFO_SIG;
/*
* Zeroed out flog entry, used when initializing the flog.
*/
static const struct btt_flog Zflog;
/*
* Lookup table and macro for looking up sequence numbers. These are
* the 2-bit numbers that cycle between 01, 10, and 11.
*
* To advance a sequence number to the next number, use something like:
* seq = NSEQ(seq);
*/
static const unsigned Nseq[] = { 0, 2, 3, 1 };
#define NSEQ(seq) (Nseq[(seq) & 3])
/*
* get_map_lock_num -- (internal) Calculate offset into map_locks[]
*
* map_locks[] contains nfree locks which are used to protect the map
* from concurrent access to the same cache line. The index into
* map_locks[] is calculated by looking at the byte offset into the map
* (premap_lba * BTT_MAP_ENTRY_SIZE), figuring out how many cache lines
* that is into the map that is (dividing by BTT_MAP_LOCK_ALIGN), and
* then selecting one of nfree locks (the modulo at the end).
*
* The extra cast is to keep gcc from generating a false positive
* 64-32 bit conversion error when -fsanitize is set.
*/
static inline uint32_t
get_map_lock_num(uint32_t premap_lba, uint32_t nfree)
{
return (uint32_t)(premap_lba * BTT_MAP_ENTRY_SIZE / BTT_MAP_LOCK_ALIGN)
% nfree;
}
/*
* invalid_lba -- (internal) set errno and return true if lba is invalid
*
* This function is used at the top of the entry points where an external
* LBA is provided, like this:
*
* if (invalid_lba(bttp, lba))
* return -1;
*/
static int
invalid_lba(struct btt *bttp, uint64_t lba)
{
LOG(3, "bttp %p lba %" PRIu64, bttp, lba);
if (lba >= bttp->nlba) {
ERR("lba out of range (nlba %" PRIu64 ")", bttp->nlba);
errno = EINVAL;
return 1;
}
return 0;
}
/*
* read_info -- (internal) convert btt_info to host byte order & validate
*
* Returns true if info block is valid, and all the integer fields are
* converted to host byte order. If the info block is not valid, this
* routine returns false and the info block passed in is left in an
* unknown state.
*/
static int
read_info(struct btt *bttp, struct btt_info *infop)
{
LOG(3, "infop %p", infop);
if (memcmp(infop->sig, Sig, BTTINFO_SIG_LEN)) {
LOG(3, "signature invalid");
return 0;
}
if (memcmp(infop->parent_uuid, bttp->parent_uuid, BTTINFO_UUID_LEN)) {
LOG(3, "parent UUID mismatch");
return 0;
}
/* to be valid, the fields must checksum correctly */
if (!util_checksum(infop, sizeof(*infop), &infop->checksum, 0, 0)) {
LOG(3, "invalid checksum");
return 0;
}
/* to be valid, info block must have a major version of at least 1 */
if ((infop->major = le16toh(infop->major)) == 0) {
LOG(3, "invalid major version (0)");
return 0;
}
infop->flags = le32toh(infop->flags);
infop->minor = le16toh(infop->minor);
infop->external_lbasize = le32toh(infop->external_lbasize);
infop->external_nlba = le32toh(infop->external_nlba);
infop->internal_lbasize = le32toh(infop->internal_lbasize);
infop->internal_nlba = le32toh(infop->internal_nlba);
infop->nfree = le32toh(infop->nfree);
infop->infosize = le32toh(infop->infosize);
infop->nextoff = le64toh(infop->nextoff);
infop->dataoff = le64toh(infop->dataoff);
infop->mapoff = le64toh(infop->mapoff);
infop->flogoff = le64toh(infop->flogoff);
infop->infooff = le64toh(infop->infooff);
return 1;
}
/*
* map_entry_is_zero -- (internal) checks if map_entry is in zero state
*/
static inline int
map_entry_is_zero(uint32_t map_entry)
{
return (map_entry & ~BTT_MAP_ENTRY_LBA_MASK) == BTT_MAP_ENTRY_ZERO;
}
/*
* map_entry_is_error -- (internal) checks if map_entry is in error state
*/
static inline int
map_entry_is_error(uint32_t map_entry)
{
return (map_entry & ~BTT_MAP_ENTRY_LBA_MASK) == BTT_MAP_ENTRY_ERROR;
}
/*
* map_entry_is_initial -- checks if map_entry is in initial state
*/
int
map_entry_is_initial(uint32_t map_entry)
{
return (map_entry & ~BTT_MAP_ENTRY_LBA_MASK) == 0;
}
/*
* map_entry_is_zero_or_initial -- (internal) checks if map_entry is in initial
* or zero state
*/
static inline int
map_entry_is_zero_or_initial(uint32_t map_entry)
{
uint32_t entry_flags = map_entry & ~BTT_MAP_ENTRY_LBA_MASK;
return entry_flags == 0 || entry_flags == BTT_MAP_ENTRY_ZERO;
}
/*
* btt_flog_get_valid -- return valid and current flog entry
*/
struct btt_flog *
btt_flog_get_valid(struct btt_flog *flog_pair, int *next)
{
/*
* Interesting cases:
* - no valid seq numbers: layout consistency error
* - one valid seq number: that's the current entry
* - two valid seq numbers: higher number is current entry
* - identical seq numbers: layout consistency error
*/
if (flog_pair[0].seq == flog_pair[1].seq) {
return NULL;
} else if (flog_pair[0].seq == 0) {
/* singleton valid flog at flog_pair[1] */
*next = 0;
return &flog_pair[1];
} else if (flog_pair[1].seq == 0) {
/* singleton valid flog at flog_pair[0] */
*next = 1;
return &flog_pair[0];
} else if (NSEQ(flog_pair[0].seq) == flog_pair[1].seq) {
/* flog_pair[1] has the later sequence number */
*next = 0;
return &flog_pair[1];
} else {
/* flog_pair[0] has the later sequence number */
*next = 1;
return &flog_pair[0];
}
}
/*
* read_flog_pair -- (internal) load up a single flog pair
*
* Zero is returned on success, otherwise -1/errno.
*/
static int
read_flog_pair(struct btt *bttp, unsigned lane, struct arena *arenap,
uint64_t flog_off, struct flog_runtime *flog_runtimep, uint32_t flognum)
{
LOG(5, "bttp %p lane %u arenap %p flog_off %" PRIu64 " runtimep %p "
"flognum %u", bttp, lane, arenap, flog_off, flog_runtimep,
flognum);
flog_runtimep->entries[0] = flog_off;
flog_runtimep->entries[1] = flog_off + sizeof(struct btt_flog);
if (lane >= bttp->nfree) {
ERR("invalid lane %u among nfree %d", lane, bttp->nfree);
errno = EINVAL;
return -1;
}
if (flog_off == 0) {
ERR("invalid flog offset %" PRIu64, flog_off);
errno = EINVAL;
return -1;
}
struct btt_flog flog_pair[2];
if ((*bttp->ns_cbp->nsread)(bttp->ns, lane, flog_pair,
sizeof(flog_pair), flog_off) < 0)
return -1;
btt_flog_convert2h(&flog_pair[0]);
if (invalid_lba(bttp, flog_pair[0].lba))
return -1;
btt_flog_convert2h(&flog_pair[1]);
if (invalid_lba(bttp, flog_pair[1].lba))
return -1;
LOG(6, "flog_pair[0] flog_off %" PRIu64 " old_map %u new_map %u seq %u",
flog_off, flog_pair[0].old_map,
flog_pair[0].new_map, flog_pair[0].seq);
LOG(6, "flog_pair[1] old_map %u new_map %u seq %u",
flog_pair[1].old_map, flog_pair[1].new_map,
flog_pair[1].seq);
struct btt_flog *currentp = btt_flog_get_valid(flog_pair,
&flog_runtimep->next);
if (currentp == NULL) {
ERR("flog layout error: bad seq numbers %d %d",
flog_pair[0].seq, flog_pair[1].seq);
arenap->flags |= BTTINFO_FLAG_ERROR;
return 0;
}
LOG(6, "run-time flog next is %d", flog_runtimep->next);
/* copy current flog into run-time flog state */
flog_runtimep->flog = *currentp;
LOG(9, "read flog[%u]: lba %u old %u%s%s%s new %u%s%s%s", flognum,
currentp->lba,
currentp->old_map & BTT_MAP_ENTRY_LBA_MASK,
(map_entry_is_error(currentp->old_map)) ? " ERROR" : "",
(map_entry_is_zero(currentp->old_map)) ? " ZERO" : "",
(map_entry_is_initial(currentp->old_map)) ? " INIT" : "",
currentp->new_map & BTT_MAP_ENTRY_LBA_MASK,
(map_entry_is_error(currentp->new_map)) ? " ERROR" : "",
(map_entry_is_zero(currentp->new_map)) ? " ZERO" : "",
(map_entry_is_initial(currentp->new_map)) ? " INIT" : "");
/*
* Decide if the current flog info represents a completed
* operation or an incomplete operation. If completed, the
* old_map field will contain the free block to be used for
* the next write. But if the operation didn't complete (indicated
* by the map entry not being updated), then the operation is
* completed now by updating the map entry.
*
* A special case, used by flog entries when first created, is
* when old_map == new_map. This counts as a complete entry
* and doesn't require reading the map to see if recovery is
* required.
*/
if (currentp->old_map == currentp->new_map) {
LOG(9, "flog[%u] entry complete (initial state)", flognum);
return 0;
}
/* convert pre-map LBA into an offset into the map */
uint64_t map_entry_off = arenap->mapoff +
BTT_MAP_ENTRY_SIZE * currentp->lba;
/* read current map entry */
uint32_t entry;
if ((*bttp->ns_cbp->nsread)(bttp->ns, lane, &entry,
sizeof(entry), map_entry_off) < 0)
return -1;
entry = le32toh(entry);
/* map entry in initial state */
if (map_entry_is_initial(entry))
entry = currentp->lba | BTT_MAP_ENTRY_NORMAL;
if (currentp->new_map != entry && currentp->old_map == entry) {
/* last update didn't complete */
LOG(9, "recover flog[%u]: map[%u]: %u",
flognum, currentp->lba, currentp->new_map);
/*
* Recovery step is to complete the transaction by
* updating the map entry.
*/
entry = htole32(currentp->new_map);
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, &entry,
sizeof(uint32_t), map_entry_off) < 0)
return -1;
}
return 0;
}
/*
* flog_update -- (internal) write out an updated flog entry
*
* The flog entries are not checksummed. Instead, increasing sequence
* numbers are used to atomically switch the active flog entry between
* the first and second struct btt_flog in each slot. In order for this
* to work, the sequence number must be updated only after all the other
* fields in the flog are updated. So the writes to the flog are broken
* into two writes, one for the first three fields (lba, old_map, new_map)
* and, only after those fields are known to be written durably, the
* second write for the seq field is done.
*
* Returns 0 on success, otherwise -1/errno.
*/
static int
flog_update(struct btt *bttp, unsigned lane, struct arena *arenap,
uint32_t lba, uint32_t old_map, uint32_t new_map)
{
LOG(3, "bttp %p lane %u arenap %p lba %u old_map %u new_map %u",
bttp, lane, arenap, lba, old_map, new_map);
/* construct new flog entry in little-endian byte order */
struct btt_flog new_flog;
new_flog.lba = lba;
new_flog.old_map = old_map;
new_flog.new_map = new_map;
new_flog.seq = NSEQ(arenap->flogs[lane].flog.seq);
btt_flog_convert2le(&new_flog);
uint64_t new_flog_off =
arenap->flogs[lane].entries[arenap->flogs[lane].next];
/* write out first two fields first */
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, &new_flog,
sizeof(uint32_t) * 2, new_flog_off) < 0)
return -1;
new_flog_off += sizeof(uint32_t) * 2;
/* write out new_map and seq field to make it active */
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, &new_flog.new_map,
sizeof(uint32_t) * 2, new_flog_off) < 0)
return -1;
/* flog entry written successfully, update run-time state */
arenap->flogs[lane].next = 1 - arenap->flogs[lane].next;
arenap->flogs[lane].flog.lba = lba;
arenap->flogs[lane].flog.old_map = old_map;
arenap->flogs[lane].flog.new_map = new_map;
arenap->flogs[lane].flog.seq = NSEQ(arenap->flogs[lane].flog.seq);
LOG(9, "update flog[%u]: lba %u old %u%s%s%s new %u%s%s%s", lane, lba,
old_map & BTT_MAP_ENTRY_LBA_MASK,
(map_entry_is_error(old_map)) ? " ERROR" : "",
(map_entry_is_zero(old_map)) ? " ZERO" : "",
(map_entry_is_initial(old_map)) ? " INIT" : "",
new_map & BTT_MAP_ENTRY_LBA_MASK,
(map_entry_is_error(new_map)) ? " ERROR" : "",
(map_entry_is_zero(new_map)) ? " ZERO" : "",
(map_entry_is_initial(new_map)) ? " INIT" : "");
return 0;
}
/*
* arena_setf -- (internal) updates the given flag for the arena info block
*/
static int
arena_setf(struct btt *bttp, struct arena *arenap, unsigned lane, uint32_t setf)
{
LOG(3, "bttp %p arenap %p lane %u setf 0x%x", bttp, arenap, lane, setf);
/* update runtime state */
util_fetch_and_or32(&arenap->flags, setf);
if (!bttp->laidout) {
/* no layout yet to update */
return 0;
}
/*
* Read, modify and write out the info block
* at both the beginning and end of the arena.
*/
uint64_t arena_off = arenap->startoff;
struct btt_info info;
/* protect from simultaneous writes to the layout */
util_mutex_lock(&arenap->info_lock);
if ((*bttp->ns_cbp->nsread)(bttp->ns, lane, &info,
sizeof(info), arena_off) < 0) {
goto err;
}
uint64_t infooff = le64toh(info.infooff);
/* update flags */
info.flags |= htole32(setf);
/* update checksum */
util_checksum(&info, sizeof(info), &info.checksum, 1, 0);
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, &info,
sizeof(info), arena_off) < 0) {
goto err;
}
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, &info,
sizeof(info), arena_off + infooff) < 0) {
goto err;
}
util_mutex_unlock(&arenap->info_lock);
return 0;
err:
util_mutex_unlock(&arenap->info_lock);
return -1;
}
/*
* set_arena_error -- (internal) set the error flag for the given arena
*/
static int
set_arena_error(struct btt *bttp, struct arena *arenap, unsigned lane)
{
LOG(3, "bttp %p arena %p lane %u", bttp, arenap, lane);
return arena_setf(bttp, arenap, lane, BTTINFO_FLAG_ERROR);
}
/*
* read_flogs -- (internal) load up all the flog entries for an arena
*
* Zero is returned on success, otherwise -1/errno.
*/
static int
read_flogs(struct btt *bttp, unsigned lane, struct arena *arenap)
{
if ((arenap->flogs = Zalloc(bttp->nfree *
sizeof(struct flog_runtime))) == NULL) {
ERR("!Malloc for %u flog entries", bttp->nfree);
return -1;
}
/*
* Load up the flog state. read_flog_pair() will determine if
* any recovery steps are required take them on the in-memory
* data structures it creates. Sets error flag when it
* determines an invalid state.
*/
uint64_t flog_off = arenap->flogoff;
struct flog_runtime *flog_runtimep = arenap->flogs;
for (uint32_t i = 0; i < bttp->nfree; i++) {
if (read_flog_pair(bttp, lane, arenap, flog_off,
flog_runtimep, i) < 0) {
set_arena_error(bttp, arenap, lane);
return -1;
}
/* prepare for next time around the loop */
flog_off += roundup(2 * sizeof(struct btt_flog),
BTT_FLOG_PAIR_ALIGN);
flog_runtimep++;
}
return 0;
}
/*
* build_rtt -- (internal) construct a read tracking table for an arena
*
* Zero is returned on success, otherwise -1/errno.
*
* The rtt is big enough to hold an entry for each free block (nfree)
* since nlane can't be bigger than nfree. nlane may end up smaller,
* in which case some of the high rtt entries will be unused.
*/
static int
build_rtt(struct btt *bttp, struct arena *arenap)
{
if ((arenap->rtt = Malloc(bttp->nfree * sizeof(uint32_t)))
== NULL) {
ERR("!Malloc for %d rtt entries", bttp->nfree);
return -1;
}
for (uint32_t lane = 0; lane < bttp->nfree; lane++)
arenap->rtt[lane] = BTT_MAP_ENTRY_ERROR;
util_synchronize();
return 0;
}
/*
* build_map_locks -- (internal) construct map locks
*
* Zero is returned on success, otherwise -1/errno.
*/
static int
build_map_locks(struct btt *bttp, struct arena *arenap)
{
if ((arenap->map_locks =
Malloc(bttp->nfree * sizeof(*arenap->map_locks)))
== NULL) {
ERR("!Malloc for %d map_lock entries", bttp->nfree);
return -1;
}
for (uint32_t lane = 0; lane < bttp->nfree; lane++)
util_mutex_init(&arenap->map_locks[lane]);
return 0;
}
/*
* read_arena -- (internal) load up an arena and build run-time state
*
* Zero is returned on success, otherwise -1/errno.
*/
static int
read_arena(struct btt *bttp, unsigned lane, uint64_t arena_off,
struct arena *arenap)
{
LOG(3, "bttp %p lane %u arena_off %" PRIu64 " arenap %p",
bttp, lane, arena_off, arenap);
struct btt_info info;
if ((*bttp->ns_cbp->nsread)(bttp->ns, lane, &info, sizeof(info),
arena_off) < 0)
return -1;
arenap->flags = le32toh(info.flags);
arenap->external_nlba = le32toh(info.external_nlba);
arenap->internal_lbasize = le32toh(info.internal_lbasize);
arenap->internal_nlba = le32toh(info.internal_nlba);
arenap->startoff = arena_off;
arenap->dataoff = arena_off + le64toh(info.dataoff);
arenap->mapoff = arena_off + le64toh(info.mapoff);
arenap->flogoff = arena_off + le64toh(info.flogoff);
arenap->nextoff = arena_off + le64toh(info.nextoff);
if (read_flogs(bttp, lane, arenap) < 0)
return -1;
if (build_rtt(bttp, arenap) < 0)
return -1;
if (build_map_locks(bttp, arenap) < 0)
return -1;
/* initialize the per arena info block lock */
util_mutex_init(&arenap->info_lock);
return 0;
}
/*
* util_convert2h_btt_info -- convert btt_info to host byte order
*/
void
btt_info_convert2h(struct btt_info *infop)
{
infop->flags = le32toh(infop->flags);
infop->major = le16toh(infop->major);
infop->minor = le16toh(infop->minor);
infop->external_lbasize = le32toh(infop->external_lbasize);
infop->external_nlba = le32toh(infop->external_nlba);
infop->internal_lbasize = le32toh(infop->internal_lbasize);
infop->internal_nlba = le32toh(infop->internal_nlba);
infop->nfree = le32toh(infop->nfree);
infop->infosize = le32toh(infop->infosize);
infop->nextoff = le64toh(infop->nextoff);
infop->dataoff = le64toh(infop->dataoff);
infop->mapoff = le64toh(infop->mapoff);
infop->flogoff = le64toh(infop->flogoff);
infop->infooff = le64toh(infop->infooff);
}
/*
* btt_info_convert2le -- convert btt_info to little-endian byte order
*/
void
btt_info_convert2le(struct btt_info *infop)
{
infop->flags = le32toh(infop->flags);
infop->major = le16toh(infop->major);
infop->minor = le16toh(infop->minor);
infop->external_lbasize = le32toh(infop->external_lbasize);
infop->external_nlba = le32toh(infop->external_nlba);
infop->internal_lbasize = le32toh(infop->internal_lbasize);
infop->internal_nlba = le32toh(infop->internal_nlba);
infop->nfree = le32toh(infop->nfree);
infop->infosize = le32toh(infop->infosize);
infop->nextoff = le64toh(infop->nextoff);
infop->dataoff = le64toh(infop->dataoff);
infop->mapoff = le64toh(infop->mapoff);
infop->flogoff = le64toh(infop->flogoff);
infop->infooff = le64toh(infop->infooff);
}
/*
* btt_flog_convert2h -- convert btt_flog to host byte order
*/
void
btt_flog_convert2h(struct btt_flog *flogp)
{
flogp->lba = le32toh(flogp->lba);
flogp->old_map = le32toh(flogp->old_map);
flogp->new_map = le32toh(flogp->new_map);
flogp->seq = le32toh(flogp->seq);
}
/*
* btt_flog_convert2le -- convert btt_flog to LE byte order
*/
void
btt_flog_convert2le(struct btt_flog *flogp)
{
flogp->lba = htole32(flogp->lba);
flogp->old_map = htole32(flogp->old_map);
flogp->new_map = htole32(flogp->new_map);
flogp->seq = htole32(flogp->seq);
}
/*
* read_arenas -- (internal) load up all arenas and build run-time state
*
* On entry, layout must be known to be valid, and the number of arenas
* must be known. Zero is returned on success, otherwise -1/errno.
*/
static int
read_arenas(struct btt *bttp, unsigned lane, unsigned narena)
{
LOG(3, "bttp %p lane %u narena %d", bttp, lane, narena);
if ((bttp->arenas = Zalloc(narena * sizeof(*bttp->arenas))) == NULL) {
ERR("!Malloc for %u arenas", narena);
goto err;
}
uint64_t arena_off = 0;
struct arena *arenap = bttp->arenas;
for (unsigned i = 0; i < narena; i++) {
if (read_arena(bttp, lane, arena_off, arenap) < 0)
goto err;
/* prepare for next time around the loop */
arena_off = arenap->nextoff;
arenap++;
}
bttp->laidout = 1;
return 0;
err:
LOG(4, "error clean up");
int oerrno = errno;
if (bttp->arenas) {
for (unsigned i = 0; i < bttp->narena; i++) {
if (bttp->arenas[i].flogs)
Free(bttp->arenas[i].flogs);
if (bttp->arenas[i].rtt)
Free((void *)bttp->arenas[i].rtt);
if (bttp->arenas[i].map_locks)
Free((void *)bttp->arenas[i].map_locks);
}
Free(bttp->arenas);
bttp->arenas = NULL;
}
errno = oerrno;
return -1;
}
/*
* internal_lbasize -- (internal) calculate internal LBA size
*/
static inline uint32_t
internal_lbasize(uint32_t external_lbasize)
{
uint32_t internal_lbasize = external_lbasize;
if (internal_lbasize < BTT_MIN_LBA_SIZE)
internal_lbasize = BTT_MIN_LBA_SIZE;
internal_lbasize =
roundup(internal_lbasize, BTT_INTERNAL_LBA_ALIGNMENT);
/* check for overflow */
if (internal_lbasize < BTT_INTERNAL_LBA_ALIGNMENT) {
errno = EINVAL;
ERR("!Invalid lba size after alignment: %u ", internal_lbasize);
return 0;
}
return internal_lbasize;
}
/*
* btt_flog_size -- calculate flog data size
*/
uint64_t
btt_flog_size(uint32_t nfree)
{
uint64_t flog_size = nfree * roundup(2 * sizeof(struct btt_flog),
BTT_FLOG_PAIR_ALIGN);
return roundup(flog_size, BTT_ALIGNMENT);
}
/*
* btt_map_size -- calculate map data size
*/
uint64_t
btt_map_size(uint32_t external_nlba)
{
return roundup(external_nlba * BTT_MAP_ENTRY_SIZE, BTT_ALIGNMENT);
}
/*
* btt_arena_datasize -- whole arena size without BTT Info header, backup and
* flog means size of blocks and map
*/
uint64_t
btt_arena_datasize(uint64_t arena_size, uint32_t nfree)
{
return arena_size - 2 * sizeof(struct btt_info) - btt_flog_size(nfree);
}
/*
* btt_info_set_params -- (internal) calculate and set BTT Info
* external_lbasize, internal_lbasize, nfree, infosize, external_nlba and
* internal_nlba
*/
static int
btt_info_set_params(struct btt_info *info, uint32_t external_lbasize,
uint32_t internal_lbasize, uint32_t nfree, uint64_t arena_size)
{
info->external_lbasize = external_lbasize;
info->internal_lbasize = internal_lbasize;
info->nfree = nfree;
info->infosize = sizeof(*info);
uint64_t arena_data_size = btt_arena_datasize(arena_size, nfree);
/* allow for map alignment padding */
uint64_t internal_nlba = (arena_data_size - BTT_ALIGNMENT) /
(info->internal_lbasize + BTT_MAP_ENTRY_SIZE);
/* ensure the number of blocks is at least 2*nfree */
if (internal_nlba < 2 * nfree) {
errno = EINVAL;
ERR("!number of internal blocks: %" PRIu64
" expected at least %u",
internal_nlba, 2 * nfree);
return -1;
}
ASSERT(internal_nlba <= UINT32_MAX);
uint32_t internal_nlba_u32 = (uint32_t)internal_nlba;
info->internal_nlba = internal_nlba_u32;
/* external LBA does not include free blocks */
info->external_nlba = internal_nlba_u32 - info->nfree;
ASSERT((arena_data_size - btt_map_size(info->external_nlba)) /
internal_lbasize >= internal_nlba);
return 0;
}
/*
* btt_info_set_offs -- (internal) calculate and set the BTT Info dataoff,
* nextoff, infooff, flogoff and mapoff. These are all relative to the
* beginning of the arena.
*/
static void
btt_info_set_offs(struct btt_info *info, uint64_t arena_size,
uint64_t space_left)
{
info->dataoff = info->infosize;
/* set offset to next valid arena */
if (space_left >= BTT_MIN_SIZE)
info->nextoff = arena_size;
else
info->nextoff = 0;
info->infooff = arena_size - sizeof(struct btt_info);
info->flogoff = info->infooff - btt_flog_size(info->nfree);
info->mapoff = info->flogoff - btt_map_size(info->external_nlba);
ASSERTeq(btt_arena_datasize(arena_size, info->nfree) -
btt_map_size(info->external_nlba), info->mapoff -
info->dataoff);
}
/*
* btt_info_set -- set BTT Info params and offsets
*/
int
btt_info_set(struct btt_info *info, uint32_t external_lbasize,
uint32_t nfree, uint64_t arena_size, uint64_t space_left)
{
/* calculate internal LBA size */
uint32_t internal_lba_size = internal_lbasize(external_lbasize);
if (internal_lba_size == 0)
return -1;
/* set params and offsets */
if (btt_info_set_params(info, external_lbasize,
internal_lba_size, nfree, arena_size))
return -1;
btt_info_set_offs(info, arena_size, space_left);
return 0;
}
/*
* write_layout -- (internal) write out the initial btt metadata layout
*
* Called with write == 1 only once in the life time of a btt namespace, when
* the first write happens. The caller of this routine is responsible for
* locking out multiple threads. This routine doesn't read anything -- by the
* time it is called, it is known there's no layout in the namespace and a new
* layout should be written.
*
* Calling with write == 0 tells this routine to do the calculations for
* bttp->narena and bttp->nlba, but don't write out any metadata.
*
* If successful, sets bttp->layout to 1 and returns 0. Otherwise -1
* is returned and errno is set, and bttp->layout remains 0 so that
* later attempts to write will try again to create the layout.
*/
static int
write_layout(struct btt *bttp, unsigned lane, int write)
{
LOG(3, "bttp %p lane %u write %d", bttp, lane, write);
ASSERT(bttp->rawsize >= BTT_MIN_SIZE);
ASSERT(bttp->nfree);
/*
* If a new layout is being written, generate the BTT's UUID.
*/
if (write) {
int ret = util_uuid_generate(bttp->uuid);
if (ret < 0) {
LOG(2, "util_uuid_generate failed");
return -1;
}
}
/*
* The number of arenas is the number of full arena of
* size BTT_MAX_ARENA that fit into rawsize and then, if
* the remainder is at least BTT_MIN_SIZE in size, then
* that adds one more arena.
*/
bttp->narena = (unsigned)(bttp->rawsize / BTT_MAX_ARENA);
if (bttp->rawsize % BTT_MAX_ARENA >= BTT_MIN_SIZE)
bttp->narena++;
LOG(4, "narena %u", bttp->narena);
uint32_t internal_lba_size = internal_lbasize(bttp->lbasize);
if (internal_lba_size == 0)
return -1;
LOG(4, "adjusted internal_lbasize %u", internal_lba_size);
uint64_t total_nlba = 0;
uint64_t rawsize = bttp->rawsize;
unsigned arena_num = 0;
uint64_t arena_off = 0;
/*
* for each arena...
*/
while (rawsize >= BTT_MIN_SIZE) {
LOG(4, "layout arena %u", arena_num);
uint64_t arena_rawsize = rawsize;
if (arena_rawsize > BTT_MAX_ARENA) {
arena_rawsize = BTT_MAX_ARENA;
}
rawsize -= arena_rawsize;
arena_num++;
struct btt_info info;
memset(&info, '\0', sizeof(info));
if (btt_info_set_params(&info, bttp->lbasize,
internal_lba_size, bttp->nfree, arena_rawsize))
return -1;
LOG(4, "internal_nlba %u external_nlba %u",
info.internal_nlba, info.external_nlba);
total_nlba += info.external_nlba;
/*
* The rest of the loop body calculates metadata structures
* and lays it out for this arena. So only continue if
* the write flag is set.
*/
if (!write)
continue;
btt_info_set_offs(&info, arena_rawsize, rawsize);
LOG(4, "nextoff 0x%016" PRIx64, info.nextoff);
LOG(4, "dataoff 0x%016" PRIx64, info.dataoff);
LOG(4, "mapoff 0x%016" PRIx64, info.mapoff);
LOG(4, "flogoff 0x%016" PRIx64, info.flogoff);
LOG(4, "infooff 0x%016" PRIx64, info.infooff);
/* zero map if ns is not zero-initialized */
if (!bttp->ns_cbp->ns_is_zeroed) {
uint64_t mapsize = btt_map_size(info.external_nlba);
if ((*bttp->ns_cbp->nszero)(bttp->ns, lane, mapsize,
info.mapoff) < 0)
return -1;
}
/* write out the initial flog */
uint64_t flog_entry_off = arena_off + info.flogoff;
uint32_t next_free_lba = info.external_nlba;
for (uint32_t i = 0; i < bttp->nfree; i++) {
struct btt_flog flog;
flog.lba = htole32(i);
flog.old_map = flog.new_map =
htole32(next_free_lba | BTT_MAP_ENTRY_ZERO);
flog.seq = htole32(1);
/*
* Write both btt_flog structs in the pair, writing
* the second one as all zeros.
*/
LOG(6, "flog[%u] entry off %" PRIu64
" initial %u + zero = %u",
i, flog_entry_off,
next_free_lba,
next_free_lba | BTT_MAP_ENTRY_ZERO);
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, &flog,
sizeof(flog), flog_entry_off) < 0)
return -1;
flog_entry_off += sizeof(flog);
LOG(6, "flog[%u] entry off %" PRIu64 " zeros",
i, flog_entry_off);
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, &Zflog,
sizeof(Zflog), flog_entry_off) < 0)
return -1;
flog_entry_off += sizeof(flog);
flog_entry_off = roundup(flog_entry_off,
BTT_FLOG_PAIR_ALIGN);
next_free_lba++;
}
/*
* Construct the BTT info block and write it out
* at both the beginning and end of the arena.
*/
memcpy(info.sig, Sig, BTTINFO_SIG_LEN);
memcpy(info.uuid, bttp->uuid, BTTINFO_UUID_LEN);
memcpy(info.parent_uuid, bttp->parent_uuid, BTTINFO_UUID_LEN);
info.major = BTTINFO_MAJOR_VERSION;
info.minor = BTTINFO_MINOR_VERSION;
btt_info_convert2le(&info);
util_checksum(&info, sizeof(info), &info.checksum, 1, 0);
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, &info,
sizeof(info), arena_off) < 0)
return -1;
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, &info,
sizeof(info), arena_off + info.infooff) < 0)
return -1;
arena_off += info.nextoff;
}
ASSERTeq(bttp->narena, arena_num);
bttp->nlba = total_nlba;
if (write) {
/*
* The layout is written now, so load up the arenas.
*/
return read_arenas(bttp, lane, bttp->narena);
}
return 0;
}
/*
* read_layout -- (internal) load up layout info from btt namespace
*
* Called once when the btt namespace is opened for use.
* Sets bttp->layout to 0 if no valid layout is found, 1 otherwise.
*
* Any recovery actions required (as indicated by the flog state) are
* performed by this routine.
*
* Any quick checks for layout consistency are performed by this routine
* (quick enough to be done each time a BTT area is opened for use, not
* like the slow consistency checks done by btt_check()).
*
* Returns 0 if no errors are encountered accessing the namespace (in this
* context, detecting there's no layout is not an error if the nsread function
* didn't have any problems doing the reads). Otherwise, -1 is returned
* and errno is set.
*/
static int
read_layout(struct btt *bttp, unsigned lane)
{
LOG(3, "bttp %p", bttp);
ASSERT(bttp->rawsize >= BTT_MIN_SIZE);
unsigned narena = 0;
uint32_t smallest_nfree = UINT32_MAX;
uint64_t rawsize = bttp->rawsize;
uint64_t total_nlba = 0;
uint64_t arena_off = 0;
bttp->nfree = BTT_DEFAULT_NFREE;
/*
* For each arena, see if there's a valid info block
*/
while (rawsize >= BTT_MIN_SIZE) {
narena++;
struct btt_info info;
if ((*bttp->ns_cbp->nsread)(bttp->ns, lane, &info,
sizeof(info), arena_off) < 0)
return -1;
if (!read_info(bttp, &info)) {
/*
* Failed to find complete BTT metadata. Just
* calculate the narena and nlba values that will
* result when write_layout() gets called. This
* allows checks against nlba to work correctly
* even before the layout is written.
*/
return write_layout(bttp, lane, 0);
}
if (info.external_lbasize != bttp->lbasize) {
/* can't read it assuming the wrong block size */
ERR("inconsistent lbasize");
errno = EINVAL;
return -1;
}
if (info.nfree == 0) {
ERR("invalid nfree");
errno = EINVAL;
return -1;
}
if (info.external_nlba == 0) {
ERR("invalid external_nlba");
errno = EINVAL;
return -1;
}
if (info.nextoff && (info.nextoff != BTT_MAX_ARENA)) {
ERR("invalid arena size");
errno = EINVAL;
return -1;
}
if (info.nfree < smallest_nfree)
smallest_nfree = info.nfree;
total_nlba += info.external_nlba;
arena_off += info.nextoff;
if (info.nextoff == 0)
break;
if (info.nextoff > rawsize) {
ERR("invalid next arena offset");
errno = EINVAL;
return -1;
}
rawsize -= info.nextoff;
}
ASSERT(narena);
bttp->narena = narena;
bttp->nlba = total_nlba;
/*
* All arenas were valid. nfree should be the smallest value found
* among different arenas.
*/
if (smallest_nfree < bttp->nfree)
bttp->nfree = smallest_nfree;
/*
* Load up arenas.
*/
return read_arenas(bttp, lane, narena);
}
/*
* zero_block -- (internal) satisfy a read with a block of zeros
*
* Returns 0 on success, otherwise -1/errno.
*/
static int
zero_block(struct btt *bttp, void *buf)
{
LOG(3, "bttp %p", bttp);
memset(buf, '\0', bttp->lbasize);
return 0;
}
/*
* lba_to_arena_lba -- (internal) calculate the arena & pre-map LBA
*
* This routine takes the external LBA and matches it to the
* appropriate arena, adjusting the lba for use within that arena.
*
* If successful, zero is returned, *arenapp is a pointer to the appropriate
* arena struct in the run-time state, and *premap_lbap is the LBA adjusted
* to an arena-internal LBA (also known as the pre-map LBA). Otherwise
* -1/errno.
*/
static int
lba_to_arena_lba(struct btt *bttp, uint64_t lba,
struct arena **arenapp, uint32_t *premap_lbap)
{
LOG(3, "bttp %p lba %" PRIu64, bttp, lba);
ASSERT(bttp->laidout);
unsigned arena;
for (arena = 0; arena < bttp->narena; arena++)
if (lba < bttp->arenas[arena].external_nlba)
break;
else
lba -= bttp->arenas[arena].external_nlba;
ASSERT(arena < bttp->narena);
*arenapp = &bttp->arenas[arena];
ASSERT(lba <= UINT32_MAX);
*premap_lbap = (uint32_t)lba;
LOG(3, "arenap %p pre-map LBA %u", *arenapp, *premap_lbap);
return 0;
}
/*
* btt_init -- prepare a btt namespace for use, returning an opaque handle
*
* Returns handle on success, otherwise NULL/errno.
*
* When submitted a pristine namespace it will be formatted implicitly when
* touched for the first time.
*
* If arenas have different nfree values, we will be using the lowest one
* found as limiting to the overall "bandwidth".
*/
struct btt *
btt_init(uint64_t rawsize, uint32_t lbasize, uint8_t parent_uuid[],
unsigned maxlane, void *ns, const struct ns_callback *ns_cbp)
{
LOG(3, "rawsize %" PRIu64 " lbasize %u", rawsize, lbasize);
if (rawsize < BTT_MIN_SIZE) {
ERR("rawsize smaller than BTT_MIN_SIZE %u", BTT_MIN_SIZE);
errno = EINVAL;
return NULL;
}
struct btt *bttp = Zalloc(sizeof(*bttp));
if (bttp == NULL) {
ERR("!Malloc %zu bytes", sizeof(*bttp));
return NULL;
}
util_mutex_init(&bttp->layout_write_mutex);
memcpy(bttp->parent_uuid, parent_uuid, BTTINFO_UUID_LEN);
bttp->rawsize = rawsize;
bttp->lbasize = lbasize;
bttp->ns = ns;
bttp->ns_cbp = ns_cbp;
/*
* Load up layout, if it exists.
*
* Whether read_layout() finds a valid layout or not, it finishes
* updating these layout-related fields:
* bttp->nfree
* bttp->nlba
* bttp->narena
* since these fields are used even before a valid layout it written.
*/
if (read_layout(bttp, 0) < 0) {
btt_fini(bttp); /* free up any allocations */
return NULL;
}
bttp->nlane = bttp->nfree;
/* maxlane, if provided, is an upper bound on nlane */
if (maxlane && bttp->nlane > maxlane)
bttp->nlane = maxlane;
LOG(3, "success, bttp %p nlane %u", bttp, bttp->nlane);
return bttp;
}
/*
* btt_nlane -- return the number of "lanes" for this btt namespace
*
* The number of lanes is the number of threads allowed in this module
* concurrently for a given btt. Each thread executing this code must
* have a unique "lane" number assigned to it between 0 and btt_nlane() - 1.
*/
unsigned
btt_nlane(struct btt *bttp)
{
LOG(3, "bttp %p", bttp);
return bttp->nlane;
}
/*
* btt_nlba -- return the number of usable blocks in a btt namespace
*
* Valid LBAs to pass to btt_read() and btt_write() are 0 through
* btt_nlba() - 1.
*/
size_t
btt_nlba(struct btt *bttp)
{
LOG(3, "bttp %p", bttp);
return bttp->nlba;
}
/*
* btt_read -- read a block from a btt namespace
*
* Returns 0 on success, otherwise -1/errno.
*/
int
btt_read(struct btt *bttp, unsigned lane, uint64_t lba, void *buf)
{
LOG(3, "bttp %p lane %u lba %" PRIu64, bttp, lane, lba);
if (invalid_lba(bttp, lba))
return -1;
/* if there's no layout written yet, all reads come back as zeros */
if (!bttp->laidout)
return zero_block(bttp, buf);
/* find which arena LBA lives in, and the offset to the map entry */
struct arena *arenap;
uint32_t premap_lba;
uint64_t map_entry_off;
if (lba_to_arena_lba(bttp, lba, &arenap, &premap_lba) < 0)
return -1;
/* convert pre-map LBA into an offset into the map */
map_entry_off = arenap->mapoff + BTT_MAP_ENTRY_SIZE * premap_lba;
/*
* Read the current map entry to get the post-map LBA for the data
* block read.
*/
uint32_t entry;
if ((*bttp->ns_cbp->nsread)(bttp->ns, lane, &entry,
sizeof(entry), map_entry_off) < 0)
return -1;
entry = le32toh(entry);
/*
* Retries come back to the top of this loop (for a rare case where
* the map is changed by another thread doing writes to the same LBA).
*/
while (1) {
if (map_entry_is_error(entry)) {
ERR("EIO due to map entry error flag");
errno = EIO;
return -1;
}
if (map_entry_is_zero_or_initial(entry))
return zero_block(bttp, buf);
/*
* Record the post-map LBA in the read tracking table during
* the read. The write will check entries in the read tracking
* table before allocating a block for a write, waiting for
* outstanding reads on that block to complete.
*
* Since we already checked for error, zero, and initial
* states above, the entry must have both error and zero
* bits set at this point (BTT_MAP_ENTRY_NORMAL). We store
* the entry that way, with those bits set, in the rtt and
* btt_write() will check for it the same way, with the bits
* both set.
*/
arenap->rtt[lane] = entry;
util_synchronize();
/*
* In case this thread was preempted between reading entry and
* storing it in the rtt, check to see if the map changed. If
* it changed, the block about to be read is at least free now
* (in the flog, but that's okay since the data will still be
* undisturbed) and potentially allocated and being used for
* another write (data disturbed, so not okay to continue).
*/
uint32_t latest_entry;
if ((*bttp->ns_cbp->nsread)(bttp->ns, lane, &latest_entry,
sizeof(latest_entry), map_entry_off) < 0) {
arenap->rtt[lane] = BTT_MAP_ENTRY_ERROR;
return -1;
}
latest_entry = le32toh(latest_entry);
if (entry == latest_entry)
break; /* map stayed the same */
else
entry = latest_entry; /* try again */
}
/*
* It is safe to read the block now, since the rtt protects the
* block from getting re-allocated to something else by a write.
*/
uint64_t data_block_off =
arenap->dataoff + (uint64_t)(entry & BTT_MAP_ENTRY_LBA_MASK) *
arenap->internal_lbasize;
int readret = (*bttp->ns_cbp->nsread)(bttp->ns, lane, buf,
bttp->lbasize, data_block_off);
/* done with read, so clear out rtt entry */
arenap->rtt[lane] = BTT_MAP_ENTRY_ERROR;
return readret;
}
/*
* map_lock -- (internal) grab the map_lock and read a map entry
*/
static int
map_lock(struct btt *bttp, unsigned lane, struct arena *arenap,
uint32_t *entryp, uint32_t premap_lba)
{
LOG(3, "bttp %p lane %u arenap %p premap_lba %u",
bttp, lane, arenap, premap_lba);
uint64_t map_entry_off =
arenap->mapoff + BTT_MAP_ENTRY_SIZE * premap_lba;
uint32_t map_lock_num = get_map_lock_num(premap_lba, bttp->nfree);
util_mutex_lock(&arenap->map_locks[map_lock_num]);
/* read the old map entry */
if ((*bttp->ns_cbp->nsread)(bttp->ns, lane, entryp,
sizeof(uint32_t), map_entry_off) < 0) {
util_mutex_unlock(&arenap->map_locks[map_lock_num]);
return -1;
}
/* if map entry is in its initial state return premap_lba */
if (map_entry_is_initial(*entryp))
*entryp = htole32(premap_lba | BTT_MAP_ENTRY_NORMAL);
LOG(9, "locked map[%d]: %u%s%s", premap_lba,
*entryp & BTT_MAP_ENTRY_LBA_MASK,
(map_entry_is_error(*entryp)) ? " ERROR" : "",
(map_entry_is_zero(*entryp)) ? " ZERO" : "");
return 0;
}
/*
* map_abort -- (internal) drop the map_lock without updating the entry
*/
static void
map_abort(struct btt *bttp, unsigned lane, struct arena *arenap,
uint32_t premap_lba)
{
LOG(3, "bttp %p lane %u arenap %p premap_lba %u",
bttp, lane, arenap, premap_lba);
util_mutex_unlock(&arenap->map_locks[get_map_lock_num(premap_lba,
bttp->nfree)]);
}
/*
* map_unlock -- (internal) update the map and drop the map_lock
*/
static int
map_unlock(struct btt *bttp, unsigned lane, struct arena *arenap,
uint32_t entry, uint32_t premap_lba)
{
LOG(3, "bttp %p lane %u arenap %p entry %u premap_lba %u",
bttp, lane, arenap, entry, premap_lba);
uint64_t map_entry_off =
arenap->mapoff + BTT_MAP_ENTRY_SIZE * premap_lba;
/* write the new map entry */
int err = (*bttp->ns_cbp->nswrite)(bttp->ns, lane, &entry,
sizeof(uint32_t), map_entry_off);
util_mutex_unlock(&arenap->map_locks[get_map_lock_num(premap_lba,
bttp->nfree)]);
LOG(9, "unlocked map[%d]: %u%s%s", premap_lba,
entry & BTT_MAP_ENTRY_LBA_MASK,
(map_entry_is_error(entry)) ? " ERROR" : "",
(map_entry_is_zero(entry)) ? " ZERO" : "");
return err;
}
/*
* btt_write -- write a block to a btt namespace
*
* Returns 0 on success, otherwise -1/errno.
*/
int
btt_write(struct btt *bttp, unsigned lane, uint64_t lba, const void *buf)
{
LOG(3, "bttp %p lane %u lba %" PRIu64, bttp, lane, lba);
if (invalid_lba(bttp, lba))
return -1;
/* first write through here will initialize the metadata layout */
if (!bttp->laidout) {
int err = 0;
util_mutex_lock(&bttp->layout_write_mutex);
if (!bttp->laidout)
err = write_layout(bttp, lane, 1);
util_mutex_unlock(&bttp->layout_write_mutex);
if (err < 0)
return err;
}
/* find which arena LBA lives in, and the offset to the map entry */
struct arena *arenap;
uint32_t premap_lba;
if (lba_to_arena_lba(bttp, lba, &arenap, &premap_lba) < 0)
return -1;
/* if the arena is in an error state, writing is not allowed */
if (arenap->flags & BTTINFO_FLAG_ERROR_MASK) {
ERR("EIO due to btt_info error flags 0x%x",
arenap->flags & BTTINFO_FLAG_ERROR_MASK);
errno = EIO;
return -1;
}
/*
* This routine was passed a unique "lane" which is an index
* into the flog. That means the free block held by flog[lane]
* is assigned to this thread and to no other threads (no additional
* locking required). So start by performing the write to the
* free block. It is only safe to write to a free block if it
* doesn't appear in the read tracking table, so scan that first
* and if found, wait for the thread reading from it to finish.
*/
uint32_t free_entry = (arenap->flogs[lane].flog.old_map &
BTT_MAP_ENTRY_LBA_MASK) | BTT_MAP_ENTRY_NORMAL;
LOG(3, "free_entry %u (before mask %u)", free_entry,
arenap->flogs[lane].flog.old_map);
/* wait for other threads to finish any reads on free block */
for (unsigned i = 0; i < bttp->nlane; i++)
while (arenap->rtt[i] == free_entry)
;
/* it is now safe to perform write to the free block */
uint64_t data_block_off = arenap->dataoff +
(uint64_t)(free_entry & BTT_MAP_ENTRY_LBA_MASK) *
arenap->internal_lbasize;
if ((*bttp->ns_cbp->nswrite)(bttp->ns, lane, buf,
bttp->lbasize, data_block_off) < 0)
return -1;
/*
* Make the new block active atomically by updating the on-media flog
* and then updating the map.
*/
uint32_t old_entry;
if (map_lock(bttp, lane, arenap, &old_entry, premap_lba) < 0)
return -1;
old_entry = le32toh(old_entry);
/* update the flog */
if (flog_update(bttp, lane, arenap, premap_lba,
old_entry, free_entry) < 0) {
map_abort(bttp, lane, arenap, premap_lba);
return -1;
}
if (map_unlock(bttp, lane, arenap, htole32(free_entry),
premap_lba) < 0) {
/*
* A critical write error occurred, set the arena's
* info block error bit.
*/
set_arena_error(bttp, arenap, lane);
errno = EIO;
return -1;
}
return 0;
}
/*
* map_entry_setf -- (internal) set a given flag on a map entry
*
* Returns 0 on success, otherwise -1/errno.
*/
static int
map_entry_setf(struct btt *bttp, unsigned lane, uint64_t lba, uint32_t setf)
{
LOG(3, "bttp %p lane %u lba %" PRIu64 " setf 0x%x",
bttp, lane, lba, setf);
if (invalid_lba(bttp, lba))
return -1;
if (!bttp->laidout) {
/*
* No layout is written yet. If the flag being set
* is the zero flag, it is superfluous since all blocks
* read as zero at this point.
*/
if (setf == BTT_MAP_ENTRY_ZERO)
return 0;
/*
* Treat this like the first write and write out
* the metadata layout at this point.
*/
int err = 0;
util_mutex_lock(&bttp->layout_write_mutex);
if (!bttp->laidout)
err = write_layout(bttp, lane, 1);
util_mutex_unlock(&bttp->layout_write_mutex);
if (err < 0)
return err;
}
/* find which arena LBA lives in, and the offset to the map entry */
struct arena *arenap;
uint32_t premap_lba;
if (lba_to_arena_lba(bttp, lba, &arenap, &premap_lba) < 0)
return -1;
/* if the arena is in an error state, writing is not allowed */
if (arenap->flags & BTTINFO_FLAG_ERROR_MASK) {
ERR("EIO due to btt_info error flags 0x%x",
arenap->flags & BTTINFO_FLAG_ERROR_MASK);
errno = EIO;
return -1;
}
/*
* Set the flags in the map entry. To do this, read the
* current map entry, set the flags, and write out the update.
*/
uint32_t old_entry;
uint32_t new_entry;
if (map_lock(bttp, lane, arenap, &old_entry, premap_lba) < 0)
return -1;
old_entry = le32toh(old_entry);
if (setf == BTT_MAP_ENTRY_ZERO &&
map_entry_is_zero_or_initial(old_entry)) {
map_abort(bttp, lane, arenap, premap_lba);
return 0; /* block already zero, nothing to do */
}
/* create the new map entry */
new_entry = (old_entry & BTT_MAP_ENTRY_LBA_MASK) | setf;
if (map_unlock(bttp, lane, arenap, htole32(new_entry), premap_lba) < 0)
return -1;
return 0;
}
/*
* btt_set_zero -- mark a block as zeroed in a btt namespace
*
* Returns 0 on success, otherwise -1/errno.
*/
int
btt_set_zero(struct btt *bttp, unsigned lane, uint64_t lba)
{
LOG(3, "bttp %p lane %u lba %" PRIu64, bttp, lane, lba);
return map_entry_setf(bttp, lane, lba, BTT_MAP_ENTRY_ZERO);
}
/*
* btt_set_error -- mark a block as in an error state in a btt namespace
*
* Returns 0 on success, otherwise -1/errno.
*/
int
btt_set_error(struct btt *bttp, unsigned lane, uint64_t lba)
{
LOG(3, "bttp %p lane %u lba %" PRIu64, bttp, lane, lba);
return map_entry_setf(bttp, lane, lba, BTT_MAP_ENTRY_ERROR);
}
/*
* check_arena -- (internal) perform a consistency check on an arena
*/
static int
check_arena(struct btt *bttp, struct arena *arenap)
{
LOG(3, "bttp %p arenap %p", bttp, arenap);
int consistent = 1;
uint64_t map_entry_off = arenap->mapoff;
uint32_t bitmapsize = howmany(arenap->internal_nlba, 8);
uint8_t *bitmap = Zalloc(bitmapsize);
if (bitmap == NULL) {
ERR("!Malloc for bitmap");
return -1;
}
/*
* Go through every post-map LBA mentioned in the map and make sure
* there are no duplicates. bitmap is used to track which LBAs have
* been seen so far.
*/
uint32_t *mapp = NULL;
ssize_t mlen;
int next_index = 0;
size_t remaining = 0;
for (uint32_t i = 0; i < arenap->external_nlba; i++) {
uint32_t entry;
if (remaining == 0) {
/* request a mapping of remaining map area */
size_t req_len =
(arenap->external_nlba - i) * sizeof(uint32_t);
mlen = (*bttp->ns_cbp->nsmap)(bttp->ns, 0,
(void **)&mapp, req_len, map_entry_off);
if (mlen < 0)
return -1;
remaining = (size_t)mlen;
next_index = 0;
}
entry = le32toh(mapp[next_index]);
/* for debug, dump non-zero map entries at log level 11 */
if (map_entry_is_zero_or_initial(entry) == 0)
LOG(11, "map[%d]: %u%s", i,
entry & BTT_MAP_ENTRY_LBA_MASK,
(map_entry_is_error(entry)) ? " ERROR" : "");
/* this is an uninitialized map entry, set the default value */
if (map_entry_is_initial(entry))
entry = i;
else
entry &= BTT_MAP_ENTRY_LBA_MASK;
/* check if entry is valid */
if (entry >= arenap->internal_nlba) {
ERR("map[%d] entry out of bounds: %u", i, entry);
errno = EINVAL;
return -1;
}
if (util_isset(bitmap, entry)) {
ERR("map[%d] duplicate entry: %u", i, entry);
consistent = 0;
} else
util_setbit(bitmap, entry);
map_entry_off += sizeof(uint32_t);
next_index++;
ASSERT(remaining >= sizeof(uint32_t));
remaining -= sizeof(uint32_t);
}
/*
* Go through the free blocks in the flog, adding them to bitmap
* and checking for duplications. It is sufficient to read the
* run-time flog here, avoiding more calls to nsread.
*/
for (uint32_t i = 0; i < bttp->nfree; i++) {
uint32_t entry = arenap->flogs[i].flog.old_map;
entry &= BTT_MAP_ENTRY_LBA_MASK;
if (util_isset(bitmap, entry)) {
ERR("flog[%u] duplicate entry: %u", i, entry);
consistent = 0;
} else
util_setbit(bitmap, entry);
}
/*
* Make sure every possible post-map LBA was accounted for
* in the two loops above.
*/
for (uint32_t i = 0; i < arenap->internal_nlba; i++)
if (util_isclr(bitmap, i)) {
ERR("unreferenced lba: %d", i);
consistent = 0;
}
Free(bitmap);
return consistent;
}
/*
* btt_check -- perform a consistency check on a btt namespace
*
* This routine contains a fairly high-impact set of consistency checks.
* It may use a good amount of dynamic memory and CPU time performing
* the checks. Any lightweight, quick consistency checks are included
* in read_layout() so they happen every time the BTT area is opened
* for use.
*
* Returns true if consistent, zero if inconsistent, -1/error if checking
* cannot happen due to other errors.
*
* No lane number required here because only one thread is allowed -- all
* other threads must be locked out of all btt routines for this btt
* namespace while this is running.
*/
int
btt_check(struct btt *bttp)
{
LOG(3, "bttp %p", bttp);
int consistent = 1;
if (!bttp->laidout) {
/* consistent by definition */
LOG(3, "no layout yet");
return consistent;
}
/* XXX report issues found during read_layout (from flags) */
/* for each arena... */
struct arena *arenap = bttp->arenas;
for (unsigned i = 0; i < bttp->narena; i++, arenap++) {
/*
* Perform the consistency checks for the arena.
*/
int retval = check_arena(bttp, arenap);
if (retval < 0)
return retval;
else if (retval == 0)
consistent = 0;
}
/* XXX stub */
return consistent;
}
/*
* btt_fini -- delete opaque btt info, done using btt namespace
*/
void
btt_fini(struct btt *bttp)
{
LOG(3, "bttp %p", bttp);
if (bttp->arenas) {
for (unsigned i = 0; i < bttp->narena; i++) {
if (bttp->arenas[i].flogs)
Free(bttp->arenas[i].flogs);
if (bttp->arenas[i].rtt)
Free((void *)bttp->arenas[i].rtt);
if (bttp->arenas[i].rtt)
Free((void *)bttp->arenas[i].map_locks);
}
Free(bttp->arenas);
}
Free(bttp);
}
| 58,763 | 27.238347 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemblk/btt_layout.h
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* btt_layout.h -- block translation table on-media layout definitions
*/
/*
* Layout of BTT info block. All integers are stored little-endian.
*/
#ifndef BTT_LAYOUT_H
#define BTT_LAYOUT_H 1
#ifdef __cplusplus
extern "C" {
#endif
#define BTT_ALIGNMENT ((uintptr_t)4096) /* alignment of all BTT structures */
#define BTTINFO_SIG_LEN 16
#define BTTINFO_UUID_LEN 16
#define BTTINFO_UNUSED_LEN 3968
#define BTTINFO_SIG "BTT_ARENA_INFO\0"
struct btt_info {
char sig[BTTINFO_SIG_LEN]; /* must be "BTT_ARENA_INFO\0\0" */
uint8_t uuid[BTTINFO_UUID_LEN]; /* BTT UUID */
uint8_t parent_uuid[BTTINFO_UUID_LEN]; /* UUID of container */
uint32_t flags; /* see flag bits below */
uint16_t major; /* major version */
uint16_t minor; /* minor version */
uint32_t external_lbasize; /* advertised LBA size (bytes) */
uint32_t external_nlba; /* advertised LBAs in this arena */
uint32_t internal_lbasize; /* size of data area blocks (bytes) */
uint32_t internal_nlba; /* number of blocks in data area */
uint32_t nfree; /* number of free blocks */
uint32_t infosize; /* size of this info block */
/*
* The following offsets are relative to the beginning of
* the btt_info block.
*/
uint64_t nextoff; /* offset to next arena (or zero) */
uint64_t dataoff; /* offset to arena data area */
uint64_t mapoff; /* offset to area map */
uint64_t flogoff; /* offset to area flog */
uint64_t infooff; /* offset to backup info block */
char unused[BTTINFO_UNUSED_LEN]; /* must be zero */
uint64_t checksum; /* Fletcher64 of all fields */
};
/*
* Definitions for flags mask for btt_info structure above.
*/
#define BTTINFO_FLAG_ERROR 0x00000001 /* error state (read-only) */
#define BTTINFO_FLAG_ERROR_MASK 0x00000001 /* all error bits */
/*
* Current on-media format versions.
*/
#define BTTINFO_MAJOR_VERSION 1
#define BTTINFO_MINOR_VERSION 1
/*
* Layout of a BTT "flog" entry. All integers are stored little-endian.
*
* The "nfree" field in the BTT info block determines how many of these
* flog entries there are, and each entry consists of two of the following
* structs (entry updates alternate between the two structs), padded up
* to a cache line boundary to isolate adjacent updates.
*/
#define BTT_FLOG_PAIR_ALIGN ((uintptr_t)64)
struct btt_flog {
uint32_t lba; /* last pre-map LBA using this entry */
uint32_t old_map; /* old post-map LBA (the freed block) */
uint32_t new_map; /* new post-map LBA */
uint32_t seq; /* sequence number (01, 10, 11) */
};
/*
* Layout of a BTT "map" entry. 4-byte internal LBA offset, little-endian.
*/
#define BTT_MAP_ENTRY_SIZE 4
#define BTT_MAP_ENTRY_ERROR 0x40000000U
#define BTT_MAP_ENTRY_ZERO 0x80000000U
#define BTT_MAP_ENTRY_NORMAL 0xC0000000U
#define BTT_MAP_ENTRY_LBA_MASK 0x3fffffffU
#define BTT_MAP_LOCK_ALIGN ((uintptr_t)64)
/*
* BTT layout properties...
*/
#define BTT_MIN_SIZE ((1u << 20) * 16)
#define BTT_MAX_ARENA (1ull << 39) /* 512GB per arena */
#define BTT_MIN_LBA_SIZE (size_t)512
#define BTT_INTERNAL_LBA_ALIGNMENT 256U
#define BTT_DEFAULT_NFREE 256
#ifdef __cplusplus
}
#endif
#endif
| 4,712 | 33.40146 | 77 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemblk/libpmemblk_main.c
|
/*
* Copyright 2015-2017, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* libpmemblk_main.c -- entry point for libpmemblk.dll
*
* XXX - This is a placeholder. All the library initialization/cleanup
* that is done in library ctors/dtors, as well as TLS initialization
* should be moved here.
*/
void libpmemblk_init(void);
void libpmemblk_fini(void);
int APIENTRY
DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved)
{
switch (dwReason) {
case DLL_PROCESS_ATTACH:
libpmemblk_init();
break;
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
break;
case DLL_PROCESS_DETACH:
libpmemblk_fini();
break;
}
return TRUE;
}
| 2,184 | 34.241935 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemblk/blk.c
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* blk.c -- block memory pool entry points for libpmem
*/
#include <inttypes.h>
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/param.h>
#include <unistd.h>
#include <errno.h>
#include <time.h>
#include <stdint.h>
#include <endian.h>
#include <stdbool.h>
#include "libpmem.h"
#include "libpmemblk.h"
#include "mmap.h"
#include "set.h"
#include "out.h"
#include "btt.h"
#include "blk.h"
#include "util.h"
#include "sys_util.h"
#include "util_pmem.h"
#include "valgrind_internal.h"
static const struct pool_attr Blk_create_attr = {
BLK_HDR_SIG,
BLK_FORMAT_MAJOR,
BLK_FORMAT_FEAT_DEFAULT,
{0}, {0}, {0}, {0}, {0}
};
static const struct pool_attr Blk_open_attr = {
BLK_HDR_SIG,
BLK_FORMAT_MAJOR,
BLK_FORMAT_FEAT_CHECK,
{0}, {0}, {0}, {0}, {0}
};
/*
* lane_enter -- (internal) acquire a unique lane number
*/
static void
lane_enter(PMEMblkpool *pbp, unsigned *lane)
{
unsigned mylane;
mylane = util_fetch_and_add32(&pbp->next_lane, 1) % pbp->nlane;
/* lane selected, grab the per-lane lock */
util_mutex_lock(&pbp->locks[mylane]);
*lane = mylane;
}
/*
* lane_exit -- (internal) drop lane lock
*/
static void
lane_exit(PMEMblkpool *pbp, unsigned mylane)
{
util_mutex_unlock(&pbp->locks[mylane]);
}
/*
* nsread -- (internal) read data from the namespace encapsulating the BTT
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static int
nsread(void *ns, unsigned lane, void *buf, size_t count, uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off);
if (off + count > pbp->datasize) {
ERR("offset + count (%zu) past end of data area (%zu)",
(size_t)off + count, pbp->datasize);
errno = EINVAL;
return -1;
}
memcpy(buf, (char *)pbp->data + off, count);
return 0;
}
/*
* nswrite -- (internal) write data to the namespace encapsulating the BTT
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static int
nswrite(void *ns, unsigned lane, const void *buf, size_t count,
uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off);
if (off + count > pbp->datasize) {
ERR("offset + count (%zu) past end of data area (%zu)",
(size_t)off + count, pbp->datasize);
errno = EINVAL;
return -1;
}
void *dest = (char *)pbp->data + off;
#ifdef DEBUG
/* grab debug write lock */
util_mutex_lock(&pbp->write_lock);
#endif
/* unprotect the memory (debug version only) */
RANGE_RW(dest, count, pbp->is_dev_dax);
if (pbp->is_pmem)
pmem_memcpy_nodrain(dest, buf, count);
else
memcpy(dest, buf, count);
/* protect the memory again (debug version only) */
RANGE_RO(dest, count, pbp->is_dev_dax);
#ifdef DEBUG
/* release debug write lock */
util_mutex_unlock(&pbp->write_lock);
#endif
if (pbp->is_pmem)
pmem_drain();
else
pmem_msync(dest, count);
return 0;
}
/*
* nsmap -- (internal) allow direct access to a range of a namespace
*
* The caller requests a range to be "mapped" but the return value
* may indicate a smaller amount (in which case the caller is expected
* to call back later for another mapping).
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static ssize_t
nsmap(void *ns, unsigned lane, void **addrp, size_t len, uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(12, "pbp %p lane %u len %zu off %" PRIu64, pbp, lane, len, off);
ASSERT(((ssize_t)len) >= 0);
if (off + len >= pbp->datasize) {
ERR("offset + len (%zu) past end of data area (%zu)",
(size_t)off + len, pbp->datasize - 1);
errno = EINVAL;
return -1;
}
/*
* Since the entire file is memory-mapped, this callback
* can always provide the entire length requested.
*/
*addrp = (char *)pbp->data + off;
LOG(12, "returning addr %p", *addrp);
return (ssize_t)len;
}
/*
* nssync -- (internal) flush changes made to a namespace range
*
* This is used in conjunction with the addresses handed out by
* nsmap() above. There's no need to sync things written via
* nswrite() since those changes are flushed each time nswrite()
* is called.
*
* This routine is provided to btt_init() to allow the btt module to
* do I/O on the memory pool containing the BTT layout.
*/
static void
nssync(void *ns, unsigned lane, void *addr, size_t len)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(12, "pbp %p lane %u addr %p len %zu", pbp, lane, addr, len);
if (pbp->is_pmem)
pmem_persist(addr, len);
else
pmem_msync(addr, len);
}
/*
* nszero -- (internal) zero data in the namespace encapsulating the BTT
*
* This routine is provided to btt_init() to allow the btt module to
* zero the memory pool containing the BTT layout.
*/
static int
nszero(void *ns, unsigned lane, size_t count, uint64_t off)
{
struct pmemblk *pbp = (struct pmemblk *)ns;
LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off);
if (off + count > pbp->datasize) {
ERR("offset + count (%zu) past end of data area (%zu)",
(size_t)off + count, pbp->datasize);
errno = EINVAL;
return -1;
}
void *dest = (char *)pbp->data + off;
/* unprotect the memory (debug version only) */
RANGE_RW(dest, count, pbp->is_dev_dax);
pmem_memset_persist(dest, 0, count);
/* protect the memory again (debug version only) */
RANGE_RO(dest, count, pbp->is_dev_dax);
return 0;
}
/* callbacks for btt_init() */
static struct ns_callback ns_cb = {
.nsread = nsread,
.nswrite = nswrite,
.nszero = nszero,
.nsmap = nsmap,
.nssync = nssync,
.ns_is_zeroed = 0
};
/*
* blk_descr_create -- (internal) create block memory pool descriptor
*/
static void
blk_descr_create(PMEMblkpool *pbp, uint32_t bsize, int zeroed)
{
LOG(3, "pbp %p bsize %u zeroed %d", pbp, bsize, zeroed);
/* create the required metadata */
pbp->bsize = htole32(bsize);
util_persist(pbp->is_pmem, &pbp->bsize, sizeof(bsize));
pbp->is_zeroed = zeroed;
util_persist(pbp->is_pmem, &pbp->is_zeroed, sizeof(pbp->is_zeroed));
}
/*
* blk_descr_check -- (internal) validate block memory pool descriptor
*/
static int
blk_descr_check(PMEMblkpool *pbp, size_t *bsize)
{
LOG(3, "pbp %p bsize %zu", pbp, *bsize);
size_t hdr_bsize = le32toh(pbp->bsize);
if (*bsize && *bsize != hdr_bsize) {
ERR("wrong bsize (%zu), pool created with bsize %zu",
*bsize, hdr_bsize);
errno = EINVAL;
return -1;
}
*bsize = hdr_bsize;
LOG(3, "using block size from header: %zu", *bsize);
return 0;
}
/*
* blk_runtime_init -- (internal) initialize block memory pool runtime data
*/
static int
blk_runtime_init(PMEMblkpool *pbp, size_t bsize, int rdonly)
{
LOG(3, "pbp %p bsize %zu rdonly %d",
pbp, bsize, rdonly);
/* remove volatile part of header */
VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr,
sizeof(struct pmemblk) -
sizeof(struct pool_hdr) -
sizeof(pbp->bsize) -
sizeof(pbp->is_zeroed));
/*
* Use some of the memory pool area for run-time info. This
* run-time state is never loaded from the file, it is always
* created here, so no need to worry about byte-order.
*/
pbp->rdonly = rdonly;
pbp->data = (char *)pbp->addr +
roundup(sizeof(*pbp), BLK_FORMAT_DATA_ALIGN);
ASSERT(((char *)pbp->addr + pbp->size) >= (char *)pbp->data);
pbp->datasize = (size_t)
(((char *)pbp->addr + pbp->size) - (char *)pbp->data);
LOG(4, "data area %p data size %zu bsize %zu",
pbp->data, pbp->datasize, bsize);
long ncpus = sysconf(_SC_NPROCESSORS_ONLN);
if (ncpus < 1)
ncpus = 1;
ns_cb.ns_is_zeroed = pbp->is_zeroed;
/* things free by "goto err" if not NULL */
struct btt *bttp = NULL;
os_mutex_t *locks = NULL;
bttp = btt_init(pbp->datasize, (uint32_t)bsize, pbp->hdr.poolset_uuid,
(unsigned)ncpus * 2, pbp, &ns_cb);
if (bttp == NULL)
goto err; /* btt_init set errno, called LOG */
pbp->bttp = bttp;
pbp->nlane = btt_nlane(pbp->bttp);
pbp->next_lane = 0;
if ((locks = Malloc(pbp->nlane * sizeof(*locks))) == NULL) {
ERR("!Malloc for lane locks");
goto err;
}
for (unsigned i = 0; i < pbp->nlane; i++)
util_mutex_init(&locks[i]);
pbp->locks = locks;
#ifdef DEBUG
/* initialize debug lock */
util_mutex_init(&pbp->write_lock);
#endif
/*
* If possible, turn off all permissions on the pool header page.
*
* The prototype PMFS doesn't allow this when large pages are in
* use. It is not considered an error if this fails.
*/
RANGE_NONE(pbp->addr, sizeof(struct pool_hdr), pbp->is_dev_dax);
/* the data area should be kept read-only for debug version */
RANGE_RO(pbp->data, pbp->datasize, pbp->is_dev_dax);
return 0;
err:
LOG(4, "error clean up");
int oerrno = errno;
if (bttp)
btt_fini(bttp);
errno = oerrno;
return -1;
}
/*
* pmemblk_createU -- create a block memory pool
*/
#ifndef _WIN32
static inline
#endif
PMEMblkpool *
pmemblk_createU(const char *path, size_t bsize, size_t poolsize, mode_t mode)
{
LOG(3, "path %s bsize %zu poolsize %zu mode %o",
path, bsize, poolsize, mode);
/* check if bsize is valid */
if (bsize == 0) {
ERR("Invalid block size %zu", bsize);
errno = EINVAL;
return NULL;
}
if (bsize > UINT32_MAX) {
ERR("Invalid block size %zu", bsize);
errno = EINVAL;
return NULL;
}
struct pool_set *set;
if (util_pool_create(&set, path, poolsize, PMEMBLK_MIN_POOL,
PMEMBLK_MIN_PART, &Blk_create_attr, NULL,
REPLICAS_DISABLED) != 0) {
LOG(2, "cannot create pool or pool set");
return NULL;
}
ASSERT(set->nreplicas > 0);
struct pool_replica *rep = set->replica[0];
PMEMblkpool *pbp = rep->part[0].addr;
VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr,
sizeof(struct pmemblk) -
((uintptr_t)&pbp->addr - (uintptr_t)&pbp->hdr));
pbp->addr = pbp;
pbp->size = rep->repsize;
pbp->set = set;
pbp->is_pmem = rep->is_pmem;
pbp->is_dev_dax = rep->part[0].is_dev_dax;
/* is_dev_dax implies is_pmem */
ASSERT(!pbp->is_dev_dax || pbp->is_pmem);
/* create pool descriptor */
blk_descr_create(pbp, (uint32_t)bsize, set->zeroed);
/* initialize runtime parts */
if (blk_runtime_init(pbp, bsize, 0) != 0) {
ERR("pool initialization failed");
goto err;
}
if (util_poolset_chmod(set, mode))
goto err;
util_poolset_fdclose(set);
LOG(3, "pbp %p", pbp);
return pbp;
err:
LOG(4, "error clean up");
int oerrno = errno;
util_poolset_close(set, DELETE_CREATED_PARTS);
errno = oerrno;
return NULL;
}
#ifndef _WIN32
/*
* pmemblk_create -- create a block memory pool
*/
PMEMblkpool *
pmemblk_create(const char *path, size_t bsize, size_t poolsize, mode_t mode)
{
return pmemblk_createU(path, bsize, poolsize, mode);
}
#else
/*
* pmemblk_createW -- create a block memory pool
*/
PMEMblkpool *
pmemblk_createW(const wchar_t *path, size_t bsize, size_t poolsize,
mode_t mode)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
PMEMblkpool *ret = pmemblk_createU(upath, bsize, poolsize, mode);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* blk_open_common -- (internal) open a block memory pool
*
* This routine does all the work, but takes a cow flag so internal
* calls can map a read-only pool if required.
*
* Passing in bsize == 0 means a valid pool header must exist (which
* will supply the block size).
*/
static PMEMblkpool *
blk_open_common(const char *path, size_t bsize, unsigned flags)
{
LOG(3, "path %s bsize %zu flags 0x%x", path, bsize, flags);
struct pool_set *set;
if (util_pool_open(&set, path, PMEMBLK_MIN_PART, &Blk_open_attr,
NULL, NULL, flags) != 0) {
LOG(2, "cannot open pool or pool set");
return NULL;
}
ASSERT(set->nreplicas > 0);
struct pool_replica *rep = set->replica[0];
PMEMblkpool *pbp = rep->part[0].addr;
VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr,
sizeof(struct pmemblk) -
((uintptr_t)&pbp->addr - (uintptr_t)&pbp->hdr));
pbp->addr = pbp;
pbp->size = rep->repsize;
pbp->set = set;
pbp->is_pmem = rep->is_pmem;
pbp->is_dev_dax = rep->part[0].is_dev_dax;
/* is_dev_dax implies is_pmem */
ASSERT(!pbp->is_dev_dax || pbp->is_pmem);
if (set->nreplicas > 1) {
errno = ENOTSUP;
ERR("!replicas not supported");
goto err;
}
/* validate pool descriptor */
if (blk_descr_check(pbp, &bsize) != 0) {
LOG(2, "descriptor check failed");
goto err;
}
/* initialize runtime parts */
if (blk_runtime_init(pbp, bsize, set->rdonly) != 0) {
ERR("pool initialization failed");
goto err;
}
util_poolset_fdclose(set);
LOG(3, "pbp %p", pbp);
return pbp;
err:
LOG(4, "error clean up");
int oerrno = errno;
util_poolset_close(set, DO_NOT_DELETE_PARTS);
errno = oerrno;
return NULL;
}
/*
* pmemblk_openU -- open a block memory pool
*/
#ifndef _WIN32
static inline
#endif
PMEMblkpool *
pmemblk_openU(const char *path, size_t bsize)
{
LOG(3, "path %s bsize %zu", path, bsize);
return blk_open_common(path, bsize, 0);
}
#ifndef _WIN32
/*
* pmemblk_open -- open a block memory pool
*/
PMEMblkpool *
pmemblk_open(const char *path, size_t bsize)
{
return pmemblk_openU(path, bsize);
}
#else
/*
* pmemblk_openW -- open a block memory pool
*/
PMEMblkpool *
pmemblk_openW(const wchar_t *path, size_t bsize)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
PMEMblkpool *ret = pmemblk_openU(upath, bsize);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmemblk_close -- close a block memory pool
*/
void
pmemblk_close(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
btt_fini(pbp->bttp);
if (pbp->locks) {
for (unsigned i = 0; i < pbp->nlane; i++)
os_mutex_destroy(&pbp->locks[i]);
Free((void *)pbp->locks);
}
#ifdef DEBUG
/* destroy debug lock */
os_mutex_destroy(&pbp->write_lock);
#endif
util_poolset_close(pbp->set, DO_NOT_DELETE_PARTS);
}
/*
* pmemblk_bsize -- return size of block for specified pool
*/
size_t
pmemblk_bsize(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
return le32toh(pbp->bsize);
}
/*
* pmemblk_nblock -- return number of usable blocks in a block memory pool
*/
size_t
pmemblk_nblock(PMEMblkpool *pbp)
{
LOG(3, "pbp %p", pbp);
return btt_nlba(pbp->bttp);
}
/*
* pmemblk_read -- read a block in a block memory pool
*/
int
pmemblk_read(PMEMblkpool *pbp, void *buf, long long blockno)
{
LOG(3, "pbp %p buf %p blockno %lld", pbp, buf, blockno);
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_read(pbp->bttp, lane, (uint64_t)blockno, buf);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_write -- write a block (atomically) in a block memory pool
*/
int
pmemblk_write(PMEMblkpool *pbp, const void *buf, long long blockno)
{
LOG(3, "pbp %p buf %p blockno %lld", pbp, buf, blockno);
if (pbp->rdonly) {
ERR("EROFS (pool is read-only)");
errno = EROFS;
return -1;
}
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_write(pbp->bttp, lane, (uint64_t)blockno, buf);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_set_zero -- zero a block in a block memory pool
*/
int
pmemblk_set_zero(PMEMblkpool *pbp, long long blockno)
{
LOG(3, "pbp %p blockno %lld", pbp, blockno);
if (pbp->rdonly) {
ERR("EROFS (pool is read-only)");
errno = EROFS;
return -1;
}
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_set_zero(pbp->bttp, lane, (uint64_t)blockno);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_set_error -- set the error state on a block in a block memory pool
*/
int
pmemblk_set_error(PMEMblkpool *pbp, long long blockno)
{
LOG(3, "pbp %p blockno %lld", pbp, blockno);
if (pbp->rdonly) {
ERR("EROFS (pool is read-only)");
errno = EROFS;
return -1;
}
if (blockno < 0) {
ERR("negative block number");
errno = EINVAL;
return -1;
}
unsigned lane;
lane_enter(pbp, &lane);
int err = btt_set_error(pbp->bttp, lane, (uint64_t)blockno);
lane_exit(pbp, lane);
return err;
}
/*
* pmemblk_checkU -- block memory pool consistency check
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_checkU(const char *path, size_t bsize)
{
LOG(3, "path \"%s\" bsize %zu", path, bsize);
/* map the pool read-only */
PMEMblkpool *pbp = blk_open_common(path, bsize, POOL_OPEN_COW);
if (pbp == NULL)
return -1; /* errno set by blk_open_common() */
int retval = btt_check(pbp->bttp);
int oerrno = errno;
pmemblk_close(pbp);
errno = oerrno;
return retval;
}
#ifndef _WIN32
/*
* pmemblk_check -- block memory pool consistency check
*/
int
pmemblk_check(const char *path, size_t bsize)
{
return pmemblk_checkU(path, bsize);
}
#else
/*
* pmemblk_checkW -- block memory pool consistency check
*/
int
pmemblk_checkW(const wchar_t *path, size_t bsize)
{
char *upath = util_toUTF8(path);
if (upath == NULL)
return -1;
int ret = pmemblk_checkU(upath, bsize);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmemblk_ctl_getU -- programmatically executes a read ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_ctl_getU(PMEMblkpool *pbp, const char *name, void *arg)
{
LOG(3, "pbp %p name %s arg %p", pbp, name, arg);
return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_READ, arg);
}
/*
* pmemblk_ctl_setU -- programmatically executes a write ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_ctl_setU(PMEMblkpool *pbp, const char *name, void *arg)
{
LOG(3, "pbp %p name %s arg %p", pbp, name, arg);
return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_WRITE, arg);
}
/*
* pmemblk_ctl_execU -- programmatically executes a runnable ctl query
*/
#ifndef _WIN32
static inline
#endif
int
pmemblk_ctl_execU(PMEMblkpool *pbp, const char *name, void *arg)
{
LOG(3, "pbp %p name %s arg %p", pbp, name, arg);
return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_RUNNABLE, arg);
}
#ifndef _WIN32
/*
* pmemblk_ctl_get -- programmatically executes a read ctl query
*/
int
pmemblk_ctl_get(PMEMblkpool *pbp, const char *name, void *arg)
{
return pmemblk_ctl_getU(pbp, name, arg);
}
/*
* pmemblk_ctl_set -- programmatically executes a write ctl query
*/
int
pmemblk_ctl_set(PMEMblkpool *pbp, const char *name, void *arg)
{
return pmemblk_ctl_setU(pbp, name, arg);
}
/*
* pmemblk_ctl_exec -- programmatically executes a runnable ctl query
*/
int
pmemblk_ctl_exec(PMEMblkpool *pbp, const char *name, void *arg)
{
return pmemblk_ctl_execU(pbp, name, arg);
}
#else
/*
* pmemblk_ctl_getW -- programmatically executes a read ctl query
*/
int
pmemblk_ctl_getW(PMEMblkpool *pbp, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemblk_ctl_getU(pbp, uname, arg);
util_free_UTF8(uname);
return ret;
}
/*
* pmemblk_ctl_setW -- programmatically executes a write ctl query
*/
int
pmemblk_ctl_setW(PMEMblkpool *pbp, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemblk_ctl_setU(pbp, uname, arg);
util_free_UTF8(uname);
return ret;
}
/*
* pmemblk_ctl_execW -- programmatically executes a runnable ctl query
*/
int
pmemblk_ctl_execW(PMEMblkpool *pbp, const wchar_t *name, void *arg)
{
char *uname = util_toUTF8(name);
if (uname == NULL)
return -1;
int ret = pmemblk_ctl_execU(pbp, uname, arg);
util_free_UTF8(uname);
return ret;
}
#endif
| 21,240 | 21.195402 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/container_ravl.c
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* container_ravl.c -- implementation of ravl-based block container
*/
#include "container_ravl.h"
#include "ravl.h"
#include "out.h"
#include "sys_util.h"
struct block_container_ravl {
struct block_container super;
struct ravl *tree;
};
/*
* container_compare_memblocks -- (internal) compares two memory blocks
*/
static int
container_compare_memblocks(const void *lhs, const void *rhs)
{
const struct memory_block *l = lhs;
const struct memory_block *r = rhs;
int64_t diff = (int64_t)l->size_idx - (int64_t)r->size_idx;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->zone_id - (int64_t)r->zone_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->chunk_id - (int64_t)r->chunk_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->block_off - (int64_t)r->block_off;
if (diff != 0)
return diff > 0 ? 1 : -1;
return 0;
}
/*
* container_ravl_insert_block -- (internal) inserts a new memory block
* into the container
*/
static int
container_ravl_insert_block(struct block_container *bc,
const struct memory_block *m)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
struct memory_block *e = m->m_ops->get_user_data(m);
VALGRIND_DO_MAKE_MEM_DEFINED(e, sizeof(*e));
VALGRIND_ADD_TO_TX(e, sizeof(*e));
*e = *m;
VALGRIND_SET_CLEAN(e, sizeof(*e));
VALGRIND_REMOVE_FROM_TX(e, sizeof(*e));
return ravl_insert(c->tree, e);
}
/*
* container_ravl_get_rm_block_bestfit -- (internal) removes and returns the
* best-fit memory block for size
*/
static int
container_ravl_get_rm_block_bestfit(struct block_container *bc,
struct memory_block *m)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
struct ravl_node *n = ravl_find(c->tree, m,
RAVL_PREDICATE_GREATER_EQUAL);
if (n == NULL)
return ENOMEM;
struct memory_block *e = ravl_data(n);
*m = *e;
ravl_remove(c->tree, n);
return 0;
}
/*
* container_ravl_get_rm_block_exact --
* (internal) removes exact match memory block
*/
static int
container_ravl_get_rm_block_exact(struct block_container *bc,
const struct memory_block *m)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
struct ravl_node *n = ravl_find(c->tree, m, RAVL_PREDICATE_EQUAL);
if (n == NULL)
return ENOMEM;
ravl_remove(c->tree, n);
return 0;
}
/*
* container_ravl_get_block_exact -- (internal) finds exact match memory block
*/
static int
container_ravl_get_block_exact(struct block_container *bc,
const struct memory_block *m)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
return ravl_find(c->tree, m, RAVL_PREDICATE_EQUAL) ? 0 : ENOMEM;
}
/*
* container_ravl_is_empty -- (internal) checks whether the container is empty
*/
static int
container_ravl_is_empty(struct block_container *bc)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
return ravl_empty(c->tree);
}
/*
* container_ravl_rm_all -- (internal) removes all elements from the tree
*/
static void
container_ravl_rm_all(struct block_container *bc)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
ravl_clear(c->tree);
}
/*
* container_ravl_delete -- (internal) deletes the container
*/
static void
container_ravl_destroy(struct block_container *bc)
{
struct block_container_ravl *c =
(struct block_container_ravl *)bc;
ravl_delete(c->tree);
Free(bc);
}
/*
* Tree-based block container used to provide best-fit functionality to the
* bucket. The time complexity for this particular container is O(k) where k is
* the length of the key.
*
* The get methods also guarantee that the block with lowest possible address
* that best matches the requirements is provided.
*/
static struct block_container_ops container_ravl_ops = {
.insert = container_ravl_insert_block,
.get_rm_exact = container_ravl_get_rm_block_exact,
.get_rm_bestfit = container_ravl_get_rm_block_bestfit,
.get_exact = container_ravl_get_block_exact,
.is_empty = container_ravl_is_empty,
.rm_all = container_ravl_rm_all,
.destroy = container_ravl_destroy,
};
/*
* container_new_ravl -- allocates and initializes a ravl container
*/
struct block_container *
container_new_ravl(struct palloc_heap *heap)
{
struct block_container_ravl *bc = Malloc(sizeof(*bc));
if (bc == NULL)
goto error_container_malloc;
bc->super.heap = heap;
bc->super.c_ops = &container_ravl_ops;
bc->tree = ravl_new(container_compare_memblocks);
if (bc->tree == NULL)
goto error_ravl_new;
return (struct block_container *)&bc->super;
error_ravl_new:
Free(bc);
error_container_malloc:
return NULL;
}
| 6,213 | 25.784483 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/tx.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* tx.c -- transactions implementation
*/
#include <inttypes.h>
#include <wchar.h>
#include "queue.h"
#include "ravl.h"
#include "obj.h"
#include "out.h"
#include "pmalloc.h"
#include "tx.h"
#include "valgrind_internal.h"
#include "memops.h"
struct tx_data {
SLIST_ENTRY(tx_data) tx_entry;
jmp_buf env;
};
struct tx {
PMEMobjpool *pop;
enum pobj_tx_stage stage;
int last_errnum;
struct lane *lane;
SLIST_HEAD(txl, tx_lock_data) tx_locks;
SLIST_HEAD(txd, tx_data) tx_entries;
struct ravl *ranges;
VEC(, struct pobj_action) actions;
pmemobj_tx_callback stage_callback;
void *stage_callback_arg;
int first_snapshot;
};
/*
* get_tx -- (internal) returns current transaction
*
* This function should be used only in high-level functions.
*/
static struct tx *
get_tx()
{
static __thread struct tx tx;
return &tx;
}
struct tx_lock_data {
union {
PMEMmutex *mutex;
PMEMrwlock *rwlock;
} lock;
enum pobj_tx_param lock_type;
SLIST_ENTRY(tx_lock_data) tx_lock;
};
struct tx_alloc_args {
uint64_t flags;
const void *copy_ptr;
size_t copy_size;
};
#define COPY_ARGS(flags, copy_ptr, copy_size)\
(struct tx_alloc_args){flags, copy_ptr, copy_size}
#define ALLOC_ARGS(flags)\
(struct tx_alloc_args){flags, NULL, 0}
struct tx_range_def {
uint64_t offset;
uint64_t size;
uint64_t flags;
};
/*
* tx_range_def_cmp -- compares two snapshot ranges
*/
static int
tx_range_def_cmp(const void *lhs, const void *rhs)
{
const struct tx_range_def *l = lhs;
const struct tx_range_def *r = rhs;
if (l->offset > r->offset)
return 1;
else if (l->offset < r->offset)
return -1;
return 0;
}
/*
* tx_params_new -- creates a new transactional parameters instance and fills it
* with default values.
*/
struct tx_parameters *
tx_params_new(void)
{
struct tx_parameters *tx_params = Malloc(sizeof(*tx_params));
if (tx_params == NULL)
return NULL;
tx_params->cache_size = TX_DEFAULT_RANGE_CACHE_SIZE;
return tx_params;
}
/*
* tx_params_delete -- deletes transactional parameters instance
*/
void
tx_params_delete(struct tx_parameters *tx_params)
{
Free(tx_params);
}
static void
obj_tx_abort(int errnum, int user);
/*
* obj_tx_abort_err -- (internal) pmemobj_tx_abort variant that returns
* error code
*/
static inline int
obj_tx_abort_err(int errnum)
{
obj_tx_abort(errnum, 0);
return errnum;
}
/*
* obj_tx_abort_null -- (internal) pmemobj_tx_abort variant that returns
* null PMEMoid
*/
static inline PMEMoid
obj_tx_abort_null(int errnum)
{
obj_tx_abort(errnum, 0);
return OID_NULL;
}
/* ASSERT_IN_TX -- checks whether there's open transaction */
#define ASSERT_IN_TX(tx) do {\
if ((tx)->stage == TX_STAGE_NONE)\
FATAL("%s called outside of transaction", __func__);\
} while (0)
/* ASSERT_TX_STAGE_WORK -- checks whether current transaction stage is WORK */
#define ASSERT_TX_STAGE_WORK(tx) do {\
if ((tx)->stage != TX_STAGE_WORK)\
FATAL("%s called in invalid stage %d", __func__, (tx)->stage);\
} while (0)
/*
* tx_action_add -- (internal) reserve space and add a new tx action
*/
static struct pobj_action *
tx_action_add(struct tx *tx)
{
size_t entries_size = (VEC_SIZE(&tx->actions) + 1) *
sizeof(struct ulog_entry_val);
if (operation_reserve(tx->lane->external, entries_size) != 0)
return NULL;
VEC_INC_BACK(&tx->actions);
return &VEC_BACK(&tx->actions);
}
/*
* tx_action_remove -- (internal) remove last tx action
*/
static void
tx_action_remove(struct tx *tx)
{
VEC_POP_BACK(&tx->actions);
}
/*
* constructor_tx_alloc -- (internal) constructor for normal alloc
*/
static int
constructor_tx_alloc(void *ctx, void *ptr, size_t usable_size, void *arg)
{
LOG(5, NULL);
ASSERTne(ptr, NULL);
ASSERTne(arg, NULL);
struct tx_alloc_args *args = arg;
/* do not report changes to the new object */
VALGRIND_ADD_TO_TX(ptr, usable_size);
if (args->flags & POBJ_FLAG_ZERO)
memset(ptr, 0, usable_size);
if (args->copy_ptr && args->copy_size != 0) {
memcpy(ptr, args->copy_ptr, args->copy_size);
}
return 0;
}
struct tx_range_data {
void *begin;
void *end;
SLIST_ENTRY(tx_range_data) tx_range;
};
SLIST_HEAD(txr, tx_range_data);
/*
* tx_remove_range -- (internal) removes specified range from ranges list
*/
static void
tx_remove_range(struct txr *tx_ranges, void *begin, void *end)
{
struct tx_range_data *txr = SLIST_FIRST(tx_ranges);
while (txr) {
if (begin >= txr->end || end < txr->begin) {
txr = SLIST_NEXT(txr, tx_range);
continue;
}
LOG(4, "detected PMEM lock in undo log; "
"range %p-%p, lock %p-%p",
txr->begin, txr->end, begin, end);
/* split the range into new ones */
if (begin > txr->begin) {
struct tx_range_data *txrn = Malloc(sizeof(*txrn));
if (txrn == NULL)
FATAL("!Malloc");
txrn->begin = txr->begin;
txrn->end = begin;
LOG(4, "range split; %p-%p", txrn->begin, txrn->end);
SLIST_INSERT_HEAD(tx_ranges, txrn, tx_range);
}
if (end < txr->end) {
struct tx_range_data *txrn = Malloc(sizeof(*txrn));
if (txrn == NULL)
FATAL("!Malloc");
txrn->begin = end;
txrn->end = txr->end;
LOG(4, "range split; %p-%p", txrn->begin, txrn->end);
SLIST_INSERT_HEAD(tx_ranges, txrn, tx_range);
}
struct tx_range_data *next = SLIST_NEXT(txr, tx_range);
/* remove the original range from the list */
SLIST_REMOVE(tx_ranges, txr, tx_range_data, tx_range);
Free(txr);
txr = next;
}
}
/*
* tx_restore_range -- (internal) restore a single range from undo log
*
* If the snapshot contains any PMEM locks that are held by the current
* transaction, they won't be overwritten with the saved data to avoid changing
* their state. Those locks will be released in tx_end().
*/
static void
tx_restore_range(PMEMobjpool *pop, struct tx *tx, struct ulog_entry_buf *range)
{
COMPILE_ERROR_ON(sizeof(PMEMmutex) != _POBJ_CL_SIZE);
COMPILE_ERROR_ON(sizeof(PMEMrwlock) != _POBJ_CL_SIZE);
COMPILE_ERROR_ON(sizeof(PMEMcond) != _POBJ_CL_SIZE);
struct txr tx_ranges;
SLIST_INIT(&tx_ranges);
struct tx_range_data *txr;
txr = Malloc(sizeof(*txr));
if (txr == NULL) {
FATAL("!Malloc");
}
uint64_t range_offset = ulog_entry_offset(&range->base);
txr->begin = OBJ_OFF_TO_PTR(pop, range_offset);
txr->end = (char *)txr->begin + range->size;
SLIST_INSERT_HEAD(&tx_ranges, txr, tx_range);
struct tx_lock_data *txl;
/* check if there are any locks within given memory range */
SLIST_FOREACH(txl, &tx->tx_locks, tx_lock) {
void *lock_begin = txl->lock.mutex;
/* all PMEM locks have the same size */
void *lock_end = (char *)lock_begin + _POBJ_CL_SIZE;
tx_remove_range(&tx_ranges, lock_begin, lock_end);
}
ASSERT(!SLIST_EMPTY(&tx_ranges));
void *dst_ptr = OBJ_OFF_TO_PTR(pop, range_offset);
while (!SLIST_EMPTY(&tx_ranges)) {
txr = SLIST_FIRST(&tx_ranges);
SLIST_REMOVE_HEAD(&tx_ranges, tx_range);
/* restore partial range data from snapshot */
ASSERT((char *)txr->begin >= (char *)dst_ptr);
uint8_t *src = &range->data[
(char *)txr->begin - (char *)dst_ptr];
ASSERT((char *)txr->end >= (char *)txr->begin);
size_t size = (size_t)((char *)txr->end - (char *)txr->begin);
pmemops_memcpy(&pop->p_ops, txr->begin, src, size, 0);
Free(txr);
}
}
/*
* tx_undo_entry_apply -- applies modifications of a single ulog entry
*/
static int
tx_undo_entry_apply(struct ulog_entry_base *e, void *arg,
const struct pmem_ops *p_ops)
{
struct ulog_entry_buf *eb;
switch (ulog_entry_type(e)) {
case ULOG_OPERATION_BUF_CPY:
eb = (struct ulog_entry_buf *)e;
tx_restore_range(p_ops->base, get_tx(), eb);
break;
case ULOG_OPERATION_AND:
case ULOG_OPERATION_OR:
case ULOG_OPERATION_SET:
case ULOG_OPERATION_BUF_SET:
default:
ASSERT(0);
}
return 0;
}
/*
* tx_abort_set -- (internal) abort all set operations
*/
static void
tx_abort_set(PMEMobjpool *pop, struct lane *lane)
{
LOG(7, NULL);
ulog_foreach_entry((struct ulog *)&lane->layout->undo,
tx_undo_entry_apply, NULL, &pop->p_ops);
operation_finish(lane->undo);
}
/*
* tx_flush_range -- (internal) flush one range
*/
static void
tx_flush_range(void *data, void *ctx)
{
PMEMobjpool *pop = ctx;
struct tx_range_def *range = data;
if (!(range->flags & POBJ_FLAG_NO_FLUSH)) {
pmemops_flush(&pop->p_ops, OBJ_OFF_TO_PTR(pop, range->offset),
range->size);
}
VALGRIND_REMOVE_FROM_TX(OBJ_OFF_TO_PTR(pop, range->offset),
range->size);
}
/*
* tx_clean_range -- (internal) clean one range
*/
static void
tx_clean_range(void *data, void *ctx)
{
PMEMobjpool *pop = ctx;
struct tx_range_def *range = data;
VALGRIND_REMOVE_FROM_TX(OBJ_OFF_TO_PTR(pop, range->offset),
range->size);
VALGRIND_SET_CLEAN(OBJ_OFF_TO_PTR(pop, range->offset), range->size);
}
/*
* tx_pre_commit -- (internal) do pre-commit operations
*/
static void
tx_pre_commit(struct tx *tx)
{
LOG(5, NULL);
/* Flush all regions and destroy the whole tree. */
ravl_delete_cb(tx->ranges, tx_flush_range, tx->pop);
tx->ranges = NULL;
}
/*
* tx_abort -- (internal) abort all allocated objects
*/
static void
tx_abort(PMEMobjpool *pop, struct lane *lane)
{
LOG(7, NULL);
struct tx *tx = get_tx();
tx_abort_set(pop, lane);
ravl_delete_cb(tx->ranges, tx_clean_range, pop);
palloc_cancel(&pop->heap,
VEC_ARR(&tx->actions), VEC_SIZE(&tx->actions));
VEC_CLEAR(&tx->actions);
tx->ranges = NULL;
}
/*
* tx_get_pop -- returns the current transaction's pool handle, NULL if not
* within a transaction.
*/
PMEMobjpool *
tx_get_pop(void)
{
return get_tx()->pop;
}
/*
* add_to_tx_and_lock -- (internal) add lock to the transaction and acquire it
*/
static int
add_to_tx_and_lock(struct tx *tx, enum pobj_tx_param type, void *lock)
{
LOG(15, NULL);
int retval = 0;
struct tx_lock_data *txl;
/* check if the lock is already on the list */
SLIST_FOREACH(txl, &tx->tx_locks, tx_lock) {
if (memcmp(&txl->lock, &lock, sizeof(lock)) == 0)
return 0;
}
txl = Malloc(sizeof(*txl));
if (txl == NULL)
return ENOMEM;
txl->lock_type = type;
switch (txl->lock_type) {
case TX_PARAM_MUTEX:
txl->lock.mutex = lock;
retval = pmemobj_mutex_lock(tx->pop,
txl->lock.mutex);
if (retval) {
errno = retval;
ERR("!pmemobj_mutex_lock");
}
break;
case TX_PARAM_RWLOCK:
txl->lock.rwlock = lock;
retval = pmemobj_rwlock_wrlock(tx->pop,
txl->lock.rwlock);
if (retval) {
errno = retval;
ERR("!pmemobj_rwlock_wrlock");
}
break;
default:
ERR("Unrecognized lock type");
ASSERT(0);
break;
}
SLIST_INSERT_HEAD(&tx->tx_locks, txl, tx_lock);
return retval;
}
/*
* release_and_free_tx_locks -- (internal) release and remove all locks from the
* transaction
*/
static void
release_and_free_tx_locks(struct tx *tx)
{
LOG(15, NULL);
while (!SLIST_EMPTY(&tx->tx_locks)) {
struct tx_lock_data *tx_lock = SLIST_FIRST(&tx->tx_locks);
SLIST_REMOVE_HEAD(&tx->tx_locks, tx_lock);
switch (tx_lock->lock_type) {
case TX_PARAM_MUTEX:
pmemobj_mutex_unlock(tx->pop,
tx_lock->lock.mutex);
break;
case TX_PARAM_RWLOCK:
pmemobj_rwlock_unlock(tx->pop,
tx_lock->lock.rwlock);
break;
default:
ERR("Unrecognized lock type");
ASSERT(0);
break;
}
Free(tx_lock);
}
}
/*
* tx_lane_ranges_insert_def -- (internal) allocates and inserts a new range
* definition into the ranges tree
*/
static int
tx_lane_ranges_insert_def(PMEMobjpool *pop, struct tx *tx,
const struct tx_range_def *rdef)
{
LOG(3, "rdef->offset %"PRIu64" rdef->size %"PRIu64,
rdef->offset, rdef->size);
int ret = ravl_emplace_copy(tx->ranges, rdef);
if (ret == EEXIST)
FATAL("invalid state of ranges tree");
return ret;
}
/*
* tx_alloc_common -- (internal) common function for alloc and zalloc
*/
static PMEMoid
tx_alloc_common(struct tx *tx, size_t size, type_num_t type_num,
palloc_constr constructor, struct tx_alloc_args args)
{
LOG(3, NULL);
if (size > PMEMOBJ_MAX_ALLOC_SIZE) {
ERR("requested size too large");
return obj_tx_abort_null(ENOMEM);
}
PMEMobjpool *pop = tx->pop;
struct pobj_action *action = tx_action_add(tx);
if (action == NULL)
return obj_tx_abort_null(ENOMEM);
if (palloc_reserve(&pop->heap, size, constructor, &args, type_num, 0,
CLASS_ID_FROM_FLAG(args.flags), action) != 0)
goto err_oom;
/* allocate object to undo log */
PMEMoid retoid = OID_NULL;
retoid.off = action->heap.offset;
retoid.pool_uuid_lo = pop->uuid_lo;
size = palloc_usable_size(&pop->heap, retoid.off);
const struct tx_range_def r = {retoid.off, size, args.flags};
if (tx_lane_ranges_insert_def(pop, tx, &r) != 0)
goto err_oom;
return retoid;
err_oom:
tx_action_remove(tx);
ERR("out of memory");
return obj_tx_abort_null(ENOMEM);
}
/*
* tx_realloc_common -- (internal) common function for tx realloc
*/
static PMEMoid
tx_realloc_common(struct tx *tx, PMEMoid oid, size_t size, uint64_t type_num,
palloc_constr constructor_alloc,
palloc_constr constructor_realloc,
uint64_t flags)
{
LOG(3, NULL);
if (size > PMEMOBJ_MAX_ALLOC_SIZE) {
ERR("requested size too large");
return obj_tx_abort_null(ENOMEM);
}
/* if oid is NULL just alloc */
if (OBJ_OID_IS_NULL(oid))
return tx_alloc_common(tx, size, (type_num_t)type_num,
constructor_alloc, ALLOC_ARGS(flags));
ASSERT(OBJ_OID_IS_VALID(tx->pop, oid));
/* if size is 0 just free */
if (size == 0) {
if (pmemobj_tx_free(oid)) {
ERR("pmemobj_tx_free failed");
return oid;
} else {
return OID_NULL;
}
}
/* oid is not NULL and size is not 0 so do realloc by alloc and free */
void *ptr = OBJ_OFF_TO_PTR(tx->pop, oid.off);
size_t old_size = palloc_usable_size(&tx->pop->heap, oid.off);
size_t copy_size = old_size < size ? old_size : size;
PMEMoid new_obj = tx_alloc_common(tx, size, (type_num_t)type_num,
constructor_realloc, COPY_ARGS(flags, ptr, copy_size));
if (!OBJ_OID_IS_NULL(new_obj)) {
if (pmemobj_tx_free(oid)) {
ERR("pmemobj_tx_free failed");
VEC_POP_BACK(&tx->actions);
return OID_NULL;
}
}
return new_obj;
}
/*
* pmemobj_tx_begin -- initializes new transaction
*/
int
pmemobj_tx_begin(PMEMobjpool *pop, jmp_buf env, ...)
{
LOG(3, NULL);
int err = 0;
struct tx *tx = get_tx();
if (tx->stage == TX_STAGE_WORK) {
ASSERTne(tx->lane, NULL);
if (tx->pop != pop) {
ERR("nested transaction for different pool");
return obj_tx_abort_err(EINVAL);
}
VALGRIND_START_TX;
} else if (tx->stage == TX_STAGE_NONE) {
VALGRIND_START_TX;
lane_hold(pop, &tx->lane);
operation_start(tx->lane->undo);
VEC_INIT(&tx->actions);
SLIST_INIT(&tx->tx_entries);
SLIST_INIT(&tx->tx_locks);
tx->ranges = ravl_new_sized(tx_range_def_cmp,
sizeof(struct tx_range_def));
tx->pop = pop;
tx->first_snapshot = 1;
} else {
FATAL("Invalid stage %d to begin new transaction", tx->stage);
}
struct tx_data *txd = Malloc(sizeof(*txd));
if (txd == NULL) {
err = errno;
ERR("!Malloc");
goto err_abort;
}
tx->last_errnum = 0;
if (env != NULL)
memcpy(txd->env, env, sizeof(jmp_buf));
else
memset(txd->env, 0, sizeof(jmp_buf));
SLIST_INSERT_HEAD(&tx->tx_entries, txd, tx_entry);
tx->stage = TX_STAGE_WORK;
/* handle locks */
va_list argp;
va_start(argp, env);
enum pobj_tx_param param_type;
while ((param_type = va_arg(argp, enum pobj_tx_param)) !=
TX_PARAM_NONE) {
if (param_type == TX_PARAM_CB) {
pmemobj_tx_callback cb =
va_arg(argp, pmemobj_tx_callback);
void *arg = va_arg(argp, void *);
if (tx->stage_callback &&
(tx->stage_callback != cb ||
tx->stage_callback_arg != arg)) {
FATAL("transaction callback is already set, "
"old %p new %p old_arg %p new_arg %p",
tx->stage_callback, cb,
tx->stage_callback_arg, arg);
}
tx->stage_callback = cb;
tx->stage_callback_arg = arg;
} else {
err = add_to_tx_and_lock(tx, param_type,
va_arg(argp, void *));
if (err) {
va_end(argp);
goto err_abort;
}
}
}
va_end(argp);
ASSERT(err == 0);
return 0;
err_abort:
if (tx->stage == TX_STAGE_WORK)
obj_tx_abort(err, 0);
else
tx->stage = TX_STAGE_ONABORT;
return err;
}
/*
* pmemobj_tx_lock -- get lane from pool and add lock to transaction.
*/
int
pmemobj_tx_lock(enum pobj_tx_param type, void *lockp)
{
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
return add_to_tx_and_lock(tx, type, lockp);
}
/*
* obj_tx_callback -- (internal) executes callback associated with current stage
*/
static void
obj_tx_callback(struct tx *tx)
{
if (!tx->stage_callback)
return;
struct tx_data *txd = SLIST_FIRST(&tx->tx_entries);
/* is this the outermost transaction? */
if (SLIST_NEXT(txd, tx_entry) == NULL)
tx->stage_callback(tx->pop, tx->stage, tx->stage_callback_arg);
}
/*
* pmemobj_tx_stage -- returns current transaction stage
*/
enum pobj_tx_stage
pmemobj_tx_stage(void)
{
LOG(3, NULL);
return get_tx()->stage;
}
/*
* obj_tx_abort -- aborts current transaction
*/
static void
obj_tx_abort(int errnum, int user)
{
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
ASSERT(tx->lane != NULL);
if (errnum == 0)
errnum = ECANCELED;
tx->stage = TX_STAGE_ONABORT;
struct tx_data *txd = SLIST_FIRST(&tx->tx_entries);
if (SLIST_NEXT(txd, tx_entry) == NULL) {
/* this is the outermost transaction */
/* process the undo log */
tx_abort(tx->pop, tx->lane);
lane_release(tx->pop);
tx->lane = NULL;
}
tx->last_errnum = errnum;
errno = errnum;
if (user)
ERR("!explicit transaction abort");
/* ONABORT */
obj_tx_callback(tx);
if (!util_is_zeroed(txd->env, sizeof(jmp_buf)))
longjmp(txd->env, errnum);
}
/*
* pmemobj_tx_abort -- aborts current transaction
*
* Note: this function should not be called from inside of pmemobj.
*/
void
pmemobj_tx_abort(int errnum)
{
PMEMOBJ_API_START();
obj_tx_abort(errnum, 1);
PMEMOBJ_API_END();
}
/*
* pmemobj_tx_errno -- returns last transaction error code
*/
int
pmemobj_tx_errno(void)
{
LOG(3, NULL);
return get_tx()->last_errnum;
}
static void
tx_post_commit(struct tx *tx)
{
operation_finish(tx->lane->undo);
VEC_CLEAR(&tx->actions);
}
/*
* pmemobj_tx_commit -- commits current transaction
*/
void
pmemobj_tx_commit(void)
{
LOG(3, NULL);
PMEMOBJ_API_START();
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
/* WORK */
obj_tx_callback(tx);
ASSERT(tx->lane != NULL);
struct tx_data *txd = SLIST_FIRST(&tx->tx_entries);
if (SLIST_NEXT(txd, tx_entry) == NULL) {
/* this is the outermost transaction */
PMEMobjpool *pop = tx->pop;
/* pre-commit phase */
tx_pre_commit(tx);
pmemops_drain(&pop->p_ops);
operation_start(tx->lane->external);
palloc_publish(&pop->heap, VEC_ARR(&tx->actions),
VEC_SIZE(&tx->actions), tx->lane->external);
tx_post_commit(tx);
lane_release(pop);
tx->lane = NULL;
}
tx->stage = TX_STAGE_ONCOMMIT;
/* ONCOMMIT */
obj_tx_callback(tx);
PMEMOBJ_API_END();
}
/*
* pmemobj_tx_end -- ends current transaction
*/
int
pmemobj_tx_end(void)
{
LOG(3, NULL);
struct tx *tx = get_tx();
if (tx->stage == TX_STAGE_WORK)
FATAL("pmemobj_tx_end called without pmemobj_tx_commit");
if (tx->pop == NULL)
FATAL("pmemobj_tx_end called without pmemobj_tx_begin");
if (tx->stage_callback &&
(tx->stage == TX_STAGE_ONCOMMIT ||
tx->stage == TX_STAGE_ONABORT)) {
tx->stage = TX_STAGE_FINALLY;
obj_tx_callback(tx);
}
struct tx_data *txd = SLIST_FIRST(&tx->tx_entries);
SLIST_REMOVE_HEAD(&tx->tx_entries, tx_entry);
Free(txd);
VALGRIND_END_TX;
if (SLIST_EMPTY(&tx->tx_entries)) {
ASSERTeq(tx->lane, NULL);
release_and_free_tx_locks(tx);
tx->pop = NULL;
tx->stage = TX_STAGE_NONE;
VEC_DELETE(&tx->actions);
if (tx->stage_callback) {
pmemobj_tx_callback cb = tx->stage_callback;
void *arg = tx->stage_callback_arg;
tx->stage_callback = NULL;
tx->stage_callback_arg = NULL;
cb(tx->pop, TX_STAGE_NONE, arg);
}
} else {
/* resume the next transaction */
tx->stage = TX_STAGE_WORK;
/* abort called within inner transaction, waterfall the error */
if (tx->last_errnum)
obj_tx_abort(tx->last_errnum, 0);
}
return tx->last_errnum;
}
/*
* pmemobj_tx_process -- processes current transaction stage
*/
int current_tx1 = 1 ;
void
pmemobj_tx_process(void)
{
current_tx1 = 1;
LOG(5, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
switch (tx->stage) {
case TX_STAGE_NONE:
break;
case TX_STAGE_WORK:
pmemobj_tx_commit();
break;
case TX_STAGE_ONABORT:
case TX_STAGE_ONCOMMIT:
tx->stage = TX_STAGE_FINALLY;
obj_tx_callback(tx);
break;
case TX_STAGE_FINALLY:
tx->stage = TX_STAGE_NONE;
break;
default:
ASSERT(0);
}
}
/*
* vg_verify_initialized -- when executed under Valgrind verifies that
* the buffer has been initialized; explicit check at snapshotting time,
* because Valgrind may find it much later when it's impossible to tell
* for which snapshot it triggered
*/
static void
vg_verify_initialized(PMEMobjpool *pop, const struct tx_range_def *def)
{
#if VG_MEMCHECK_ENABLED
if (!On_valgrind)
return;
VALGRIND_DO_DISABLE_ERROR_REPORTING;
char *start = (char *)pop + def->offset;
char *uninit = (char *)VALGRIND_CHECK_MEM_IS_DEFINED(start, def->size);
if (uninit) {
VALGRIND_PRINTF(
"Snapshotting uninitialized data in range <%p,%p> (<offset:0x%lx,size:0x%lx>)\n",
start, start + def->size, def->offset, def->size);
if (uninit != start)
VALGRIND_PRINTF("Uninitialized data starts at: %p\n",
uninit);
VALGRIND_DO_ENABLE_ERROR_REPORTING;
VALGRIND_CHECK_MEM_IS_DEFINED(start, def->size);
} else {
VALGRIND_DO_ENABLE_ERROR_REPORTING;
}
#endif
}
/*
* pmemobj_tx_add_snapshot -- (internal) creates a variably sized snapshot
*/
static int
pmemobj_tx_add_snapshot(struct tx *tx, struct tx_range_def *snapshot)
{
vg_verify_initialized(tx->pop, snapshot);
/*
* If we are creating the first snapshot, setup a redo log action to
* clear the first entry so that the undo log becomes
* invalid once the redo log is processed.
*/
if (tx->first_snapshot) {
struct pobj_action *action = tx_action_add(tx);
if (action == NULL)
return -1;
/* first entry of the first ulog */
struct ulog_entry_base *e =
(struct ulog_entry_base *)tx->lane->layout->undo.data;
palloc_set_value(&tx->pop->heap, action,
&e->offset, 0);
tx->first_snapshot = 0;
}
/*
* Depending on the size of the block, either allocate an
* entire new object or use cache.
*/
void *ptr = OBJ_OFF_TO_PTR(tx->pop, snapshot->offset);
VALGRIND_ADD_TO_TX(ptr, snapshot->size);
return operation_add_buffer(tx->lane->undo, ptr, ptr, snapshot->size,
ULOG_OPERATION_BUF_CPY);
}
/*
* pmemobj_tx_add_common -- (internal) common code for adding persistent memory
* into the transaction
*/
static int
pmemobj_tx_add_common(struct tx *tx, struct tx_range_def *args)
{
LOG(15, NULL);
if (args->size > PMEMOBJ_MAX_ALLOC_SIZE) {
ERR("snapshot size too large");
return obj_tx_abort_err(EINVAL);
}
if (args->offset < tx->pop->heap_offset ||
(args->offset + args->size) >
(tx->pop->heap_offset + tx->pop->heap_size)) {
ERR("object outside of heap");
return obj_tx_abort_err(EINVAL);
}
int ret = 0;
/*
* Search existing ranges backwards starting from the end of the
* snapshot.
*/
struct tx_range_def r = *args;
struct tx_range_def search = {0, 0, 0};
/*
* If the range is directly adjacent to an existing one,
* they can be merged, so search for less or equal elements.
*/
enum ravl_predicate p = RAVL_PREDICATE_LESS_EQUAL;
struct ravl_node *nprev = NULL;
while (r.size != 0) {
search.offset = r.offset + r.size;
struct ravl_node *n = ravl_find(tx->ranges, &search, p);
/*
* We have to skip searching for LESS_EQUAL because
* the snapshot we would find is the one that was just
* created.
*/
p = RAVL_PREDICATE_LESS;
struct tx_range_def *f = n ? ravl_data(n) : NULL;
size_t fend = f == NULL ? 0: f->offset + f->size;
size_t rend = r.offset + r.size;
if (fend == 0 || fend < r.offset) {
/*
* If found no range or the found range is not
* overlapping or adjacent on the left side, we can just
* create the entire r.offset + r.size snapshot.
*
* Snapshot:
* --+-
* Existing ranges:
* ---- (no ranges)
* or +--- (no overlap)
* or ---+ (adjacent on on right side)
*/
if (nprev != NULL) {
/*
* But, if we have an existing adjacent snapshot
* on the right side, we can just extend it to
* include the desired range.
*/
struct tx_range_def *fprev = ravl_data(nprev);
ASSERTeq(rend, fprev->offset);
fprev->offset -= r.size;
fprev->size += r.size;
} else {
/*
* If we don't have anything adjacent, create
* a new range in the tree.
*/
ret = tx_lane_ranges_insert_def(tx->pop,
tx, &r);
if (ret != 0)
break;
}
ret = pmemobj_tx_add_snapshot(tx, &r);
break;
} else if (fend <= rend) {
/*
* If found range has its end inside of the desired
* snapshot range, we can extend the found range by the
* size leftover on the left side.
*
* Snapshot:
* --+++--
* Existing ranges:
* +++---- (overlap on left)
* or ---+--- (found snapshot is inside)
* or ---+-++ (inside, and adjacent on the rigt)
* or +++++-- (desired snapshot is inside)
*
*/
struct tx_range_def snapshot = *args;
snapshot.offset = fend;
/* the side not yet covered by an existing snapshot */
snapshot.size = rend - fend;
/* the number of bytes intersecting in both ranges */
size_t intersection = fend - MAX(f->offset, r.offset);
r.size -= intersection + snapshot.size;
f->size += snapshot.size;
if (snapshot.size != 0) {
ret = pmemobj_tx_add_snapshot(tx, &snapshot);
if (ret != 0)
break;
}
/*
* If there's a snapshot adjacent on right side, merge
* the two ranges together.
*/
if (nprev != NULL) {
struct tx_range_def *fprev = ravl_data(nprev);
ASSERTeq(rend, fprev->offset);
f->size += fprev->size;
ravl_remove(tx->ranges, nprev);
}
} else if (fend >= r.offset) {
/*
* If found range has its end extending beyond the
* desired snapshot.
*
* Snapshot:
* --+++--
* Existing ranges:
* -----++ (adjacent on the right)
* or ----++- (overlapping on the right)
* or ----+++ (overlapping and adjacent on the right)
* or --+++++ (desired snapshot is inside)
*
* Notice that we cannot create a snapshot based solely
* on this information without risking overwritting an
* existing one. We have to continue iterating, but we
* keep the information about adjacent snapshots in the
* nprev variable.
*/
size_t overlap = rend - MAX(f->offset, r.offset);
r.size -= overlap;
} else {
ASSERT(0);
}
nprev = n;
}
if (ret != 0) {
ERR("out of memory");
return obj_tx_abort_err(ENOMEM);
}
return 0;
}
/*
* pmemobj_tx_add_range_direct -- adds persistent memory range into the
* transaction
*/
#include <sys/mman.h>
static void setpage(void * addr){
uint64_t pageNo = ((uint64_t)addr)/4096;
unsigned long * pageStart = (unsigned long *)(pageNo*4096);
mprotect(pageStart, 4096, PROT_READ);
return;
}
/*
* pmemobj_tx_add_range_direct -- adds persistent memory range into the
* transaction
*/
int page_fault_counter1 = 0;
//extern int current_tx1 = 1 ;
int
pmemobj_tx_add_range_direct(const void *ptr, size_t size)
{
if(current_tx1){
get_tx();
setpage(ptr);
current_tx1 = 0;
}
return 0;
}
/*
* pmemobj_tx_xadd_range_direct -- adds persistent memory range into the
* transaction
*/
int page_fault_counter2 = 0;
int
pmemobj_tx_xadd_range_direct(const void *ptr, size_t size, uint64_t flags)
{
if(current_tx1){
setpage(ptr);
current_tx1 = 0;
}
return 0;
}
/*
* pmemobj_tx_add_range -- adds persistent memory range into the transaction
*/
int page_fault_counter3 = 0;
int
pmemobj_tx_add_range(PMEMoid oid, uint64_t hoff, size_t size)
{
PMEMOBJ_API_START();
struct tx *tx = get_tx();
if(current_tx1){
setpage(oid.off + hoff + (char *)tx->pop);
current_tx1 = 0;
}
PMEMOBJ_API_END();
return 0;
}
//ndp pmdk
/*
#include <sys/mman.h>
static void setpage(void * addr){
uint64_t pageNo = ((uint64_t)addr)/4096;
unsigned long * pageStart = (unsigned long *)(pageNo*4096);
mprotect(pageStart, 4096, PROT_READ);
return;
}
int page_fault_counter1 = 0;
int
pmemobj_tx_add_range_direct(const void *ptr, size_t size)
{
if(page_fault_counter1 == 10){
setpage(ptr);
page_fault_counter1 = 0;
}
//printf("a");
page_fault_counter1++;
LOG(3, NULL);
PMEMOBJ_API_START();
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
PMEMobjpool *pop = tx->pop;
if (!OBJ_PTR_FROM_POOL(pop, ptr)) {
ERR("object outside of pool");
int ret = obj_tx_abort_err(EINVAL);
PMEMOBJ_API_END();
return ret;
}
struct tx_range_def args = {
.offset = (uint64_t)((char *)ptr - (char *)pop),
.size = size,
.flags = 0,
};
int ret = pmemobj_tx_add_common(tx, &args);
PMEMOBJ_API_END();
return ret;
}
int page_fault_counter2 = 0;
int
pmemobj_tx_xadd_range_direct(const void *ptr, size_t size, uint64_t flags)
{
if(page_fault_counter2 == 10){
setpage(ptr);
page_fault_counter2 = 0;
}
page_fault_counter2++;
//printf("a");
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
PMEMOBJ_API_START();
int ret;
if (!OBJ_PTR_FROM_POOL(tx->pop, ptr)) {
ERR("object outside of pool");
ret = obj_tx_abort_err(EINVAL);
PMEMOBJ_API_END();
return ret;
}
if (flags & ~POBJ_XADD_VALID_FLAGS) {
ERR("unknown flags 0x%" PRIx64, flags & ~POBJ_XADD_VALID_FLAGS);
ret = obj_tx_abort_err(EINVAL);
PMEMOBJ_API_END();
return ret;
}
struct tx_range_def args = {
.offset = (uint64_t)((char *)ptr - (char *)tx->pop),
.size = size,
.flags = flags,
};
ret = pmemobj_tx_add_common(tx, &args);
PMEMOBJ_API_END();
return ret;
}
int page_fault_counter3 = 0;
int
pmemobj_tx_add_range(PMEMoid oid, uint64_t hoff, size_t size)
{
LOG(3, NULL);
PMEMOBJ_API_START();
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
if (oid.pool_uuid_lo != tx->pop->uuid_lo) {
ERR("invalid pool uuid");
int ret = obj_tx_abort_err(EINVAL);
PMEMOBJ_API_END();
return ret;
}
ASSERT(OBJ_OID_IS_VALID(tx->pop, oid));
struct tx_range_def args = {
.offset = oid.off + hoff,
.size = size,
.flags = 0,
};
int ret = pmemobj_tx_add_common(tx, &args);
PMEMOBJ_API_END();
if(page_fault_counter3 == 10){
// setpage(oid.off + hoff + (char *)tx->pop);
page_fault_counter3 = 0;
}
page_fault_counter3++;
return ret;
} */
/*
* pmemobj_tx_xadd_range -- adds persistent memory range into the transaction
*/
int
pmemobj_tx_xadd_range(PMEMoid oid, uint64_t hoff, size_t size, uint64_t flags)
{
//printf("a");
LOG(3, NULL);
PMEMOBJ_API_START();
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
int ret;
if (oid.pool_uuid_lo != tx->pop->uuid_lo) {
ERR("invalid pool uuid");
ret = obj_tx_abort_err(EINVAL);
PMEMOBJ_API_END();
return ret;
}
ASSERT(OBJ_OID_IS_VALID(tx->pop, oid));
if (flags & ~POBJ_XADD_VALID_FLAGS) {
ERR("unknown flags 0x%" PRIx64, flags & ~POBJ_XADD_VALID_FLAGS);
ret = obj_tx_abort_err(EINVAL);
PMEMOBJ_API_END();
return ret;
}
struct tx_range_def args = {
.offset = oid.off + hoff,
.size = size,
.flags = flags,
};
ret = pmemobj_tx_add_common(tx, &args);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_tx_alloc -- allocates a new object
*/
PMEMoid
pmemobj_tx_alloc(size_t size, uint64_t type_num)
{
LOG(3, NULL);
PMEMOBJ_API_START();
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
PMEMoid oid;
if (size == 0) {
ERR("allocation with size 0");
oid = obj_tx_abort_null(EINVAL);
PMEMOBJ_API_END();
return oid;
}
oid = tx_alloc_common(tx, size, (type_num_t)type_num,
constructor_tx_alloc, ALLOC_ARGS(0));
PMEMOBJ_API_END();
return oid;
}
/*
* pmemobj_tx_zalloc -- allocates a new zeroed object
*/
PMEMoid
pmemobj_tx_zalloc(size_t size, uint64_t type_num)
{
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
PMEMOBJ_API_START();
PMEMoid oid;
if (size == 0) {
ERR("allocation with size 0");
oid = obj_tx_abort_null(EINVAL);
PMEMOBJ_API_END();
return oid;
}
oid = tx_alloc_common(tx, size, (type_num_t)type_num,
constructor_tx_alloc, ALLOC_ARGS(POBJ_FLAG_ZERO));
PMEMOBJ_API_END();
return oid;
}
/*
* pmemobj_tx_xalloc -- allocates a new object
*/
PMEMoid
pmemobj_tx_xalloc(size_t size, uint64_t type_num, uint64_t flags)
{
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
PMEMOBJ_API_START();
PMEMoid oid;
if (size == 0) {
ERR("allocation with size 0");
oid = obj_tx_abort_null(EINVAL);
PMEMOBJ_API_END();
return oid;
}
if (flags & ~POBJ_TX_XALLOC_VALID_FLAGS) {
ERR("unknown flags 0x%" PRIx64,
flags & ~POBJ_TX_XALLOC_VALID_FLAGS);
oid = obj_tx_abort_null(EINVAL);
PMEMOBJ_API_END();
return oid;
}
oid = tx_alloc_common(tx, size, (type_num_t)type_num,
constructor_tx_alloc, ALLOC_ARGS(flags));
PMEMOBJ_API_END();
return oid;
}
/*
* pmemobj_tx_realloc -- resizes an existing object
*/
PMEMoid
pmemobj_tx_realloc(PMEMoid oid, size_t size, uint64_t type_num)
{
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
PMEMOBJ_API_START();
PMEMoid ret = tx_realloc_common(tx, oid, size, type_num,
constructor_tx_alloc, constructor_tx_alloc, 0);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_zrealloc -- resizes an existing object, any new space is zeroed.
*/
PMEMoid
pmemobj_tx_zrealloc(PMEMoid oid, size_t size, uint64_t type_num)
{
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
PMEMOBJ_API_START();
PMEMoid ret = tx_realloc_common(tx, oid, size, type_num,
constructor_tx_alloc, constructor_tx_alloc,
POBJ_FLAG_ZERO);
PMEMOBJ_API_END();
return ret;
}
/*
* pmemobj_tx_strdup -- allocates a new object with duplicate of the string s.
*/
PMEMoid
pmemobj_tx_strdup(const char *s, uint64_t type_num)
{
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
PMEMOBJ_API_START();
PMEMoid oid;
if (NULL == s) {
ERR("cannot duplicate NULL string");
oid = obj_tx_abort_null(EINVAL);
PMEMOBJ_API_END();
return oid;
}
size_t len = strlen(s);
if (len == 0) {
oid = tx_alloc_common(tx, sizeof(char), (type_num_t)type_num,
constructor_tx_alloc,
ALLOC_ARGS(POBJ_FLAG_ZERO));
PMEMOBJ_API_END();
return oid;
}
size_t size = (len + 1) * sizeof(char);
oid = tx_alloc_common(tx, size, (type_num_t)type_num,
constructor_tx_alloc, COPY_ARGS(0, s, size));
PMEMOBJ_API_END();
return oid;
}
/*
* pmemobj_tx_wcsdup -- allocates a new object with duplicate of the wide
* character string s.
*/
PMEMoid
pmemobj_tx_wcsdup(const wchar_t *s, uint64_t type_num)
{
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
PMEMOBJ_API_END();
PMEMoid oid;
if (NULL == s) {
ERR("cannot duplicate NULL string");
oid = obj_tx_abort_null(EINVAL);
PMEMOBJ_API_END();
return oid;
}
size_t len = wcslen(s);
if (len == 0) {
oid = tx_alloc_common(tx, sizeof(wchar_t),
(type_num_t)type_num, constructor_tx_alloc,
ALLOC_ARGS(POBJ_FLAG_ZERO));
PMEMOBJ_API_END();
return oid;
}
size_t size = (len + 1) * sizeof(wchar_t);
oid = tx_alloc_common(tx, size, (type_num_t)type_num,
constructor_tx_alloc, COPY_ARGS(0, s, size));
PMEMOBJ_API_END();
return oid;
}
/*
* pmemobj_tx_free -- frees an existing object
*/
int
pmemobj_tx_free(PMEMoid oid)
{
LOG(3, NULL);
struct tx *tx = get_tx();
ASSERT_IN_TX(tx);
ASSERT_TX_STAGE_WORK(tx);
if (OBJ_OID_IS_NULL(oid))
return 0;
PMEMobjpool *pop = tx->pop;
if (pop->uuid_lo != oid.pool_uuid_lo) {
ERR("invalid pool uuid");
return obj_tx_abort_err(EINVAL);
}
ASSERT(OBJ_OID_IS_VALID(pop, oid));
PMEMOBJ_API_START();
struct pobj_action *action;
struct tx_range_def range = {oid.off, 0, 0};
struct ravl_node *n = ravl_find(tx->ranges, &range,
RAVL_PREDICATE_EQUAL);
/*
* If attempting to free an object allocated within the same
* transaction, simply cancel the alloc and remove it from the actions.
*/
if (n != NULL) {
VEC_FOREACH_BY_PTR(action, &tx->actions) {
if (action->type == POBJ_ACTION_TYPE_HEAP &&
action->heap.offset == oid.off) {
struct tx_range_def *r = ravl_data(n);
void *ptr = OBJ_OFF_TO_PTR(pop, r->offset);
VALGRIND_SET_CLEAN(ptr, r->size);
VALGRIND_REMOVE_FROM_TX(ptr, r->size);
ravl_remove(tx->ranges, n);
palloc_cancel(&pop->heap, action, 1);
VEC_ERASE_BY_PTR(&tx->actions, action);
PMEMOBJ_API_END();
return 0;
}
}
}
action = tx_action_add(tx);
if (action == NULL) {
int ret = obj_tx_abort_err(errno);
PMEMOBJ_API_END();
return ret;
}
palloc_defer_free(&pop->heap, oid.off, action);
PMEMOBJ_API_END();
return 0;
}
/*
* pmemobj_tx_publish -- publishes actions inside of a transaction
*/
int
pmemobj_tx_publish(struct pobj_action *actv, size_t actvcnt)
{
struct tx *tx = get_tx();
ASSERT_TX_STAGE_WORK(tx);
PMEMOBJ_API_START();
size_t entries_size = (VEC_SIZE(&tx->actions) + actvcnt) *
sizeof(struct ulog_entry_val);
if (operation_reserve(tx->lane->external, entries_size) != 0) {
PMEMOBJ_API_END();
return -1;
}
for (size_t i = 0; i < actvcnt; ++i) {
VEC_PUSH_BACK(&tx->actions, actv[i]);
}
PMEMOBJ_API_END();
return 0;
}
/*
* CTL_READ_HANDLER(size) -- gets the cache size transaction parameter
*/
static int
CTL_READ_HANDLER(size)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t *arg_out = arg;
*arg_out = (ssize_t)pop->tx_params->cache_size;
return 0;
}
/*
* CTL_WRITE_HANDLER(size) -- sets the cache size transaction parameter
*/
static int
CTL_WRITE_HANDLER(size)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
ssize_t arg_in = *(int *)arg;
if (arg_in < 0 || arg_in > (ssize_t)PMEMOBJ_MAX_ALLOC_SIZE) {
errno = EINVAL;
ERR("invalid cache size, must be between 0 and max alloc size");
return -1;
}
size_t argu = (size_t)arg_in;
pop->tx_params->cache_size = argu;
return 0;
}
static struct ctl_argument CTL_ARG(size) = CTL_ARG_LONG_LONG;
/*
* CTL_READ_HANDLER(threshold) -- gets the cache threshold transaction parameter
*/
static int
CTL_READ_HANDLER(threshold)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
LOG(1, "tx.cache.threshold parameter is depracated");
return 0;
}
/*
* CTL_WRITE_HANDLER(threshold) -- depracated
*/
static int
CTL_WRITE_HANDLER(threshold)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
LOG(1, "tx.cache.threshold parameter is depracated");
return 0;
}
static struct ctl_argument CTL_ARG(threshold) = CTL_ARG_LONG_LONG;
static const struct ctl_node CTL_NODE(cache)[] = {
CTL_LEAF_RW(size),
CTL_LEAF_RW(threshold),
CTL_NODE_END
};
/*
* CTL_READ_HANDLER(skip_expensive_checks) -- returns "skip_expensive_checks"
* var from pool ctl
*/
static int
CTL_READ_HANDLER(skip_expensive_checks)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int *arg_out = arg;
*arg_out = pop->tx_debug_skip_expensive_checks;
return 0;
}
/*
* CTL_WRITE_HANDLER(skip_expensive_checks) -- stores "skip_expensive_checks"
* var in pool ctl
*/
static int
CTL_WRITE_HANDLER(skip_expensive_checks)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int arg_in = *(int *)arg;
pop->tx_debug_skip_expensive_checks = arg_in;
return 0;
}
static struct ctl_argument CTL_ARG(skip_expensive_checks) = CTL_ARG_BOOLEAN;
static const struct ctl_node CTL_NODE(debug)[] = {
CTL_LEAF_RW(skip_expensive_checks),
CTL_NODE_END
};
/*
* CTL_WRITE_HANDLER(queue_depth) -- returns the depth of the post commit queue
*/
static int
CTL_READ_HANDLER(queue_depth)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
return 0;
}
/*
* CTL_WRITE_HANDLER(queue_depth) -- sets the depth of the post commit queue
*/
static int
CTL_WRITE_HANDLER(queue_depth)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
return 0;
}
static struct ctl_argument CTL_ARG(queue_depth) = CTL_ARG_INT;
/*
* CTL_READ_HANDLER(worker) -- launches the post commit worker thread function
*/
static int
CTL_READ_HANDLER(worker)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
return 0;
}
/*
* CTL_READ_HANDLER(stop) -- stops all post commit workers
*/
static int
CTL_READ_HANDLER(stop)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
return 0;
}
static const struct ctl_node CTL_NODE(post_commit)[] = {
CTL_LEAF_RW(queue_depth),
CTL_LEAF_RO(worker),
CTL_LEAF_RO(stop),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(tx)[] = {
CTL_CHILD(debug),
CTL_CHILD(cache),
CTL_CHILD(post_commit),
CTL_NODE_END
};
/*
* tx_ctl_register -- registers ctl nodes for "tx" module
*/
void
tx_ctl_register(PMEMobjpool *pop)
{
CTL_REGISTER_MODULE(pop->ctl, tx);
}
| 43,060 | 20.76997 | 84 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/heap.c
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* heap.c -- heap implementation
*/
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include <float.h>
#include "queue.h"
#include "heap.h"
#include "out.h"
#include "util.h"
#include "sys_util.h"
#include "valgrind_internal.h"
#include "recycler.h"
#include "container_ravl.h"
#include "container_seglists.h"
#include "alloc_class.h"
#include "os_thread.h"
#include "set.h"
#define MAX_RUN_LOCKS MAX_CHUNK
#define MAX_RUN_LOCKS_VG 1024 /* avoid perf issues /w drd */
/*
* This is the value by which the heap might grow once we hit an OOM.
*/
#define HEAP_DEFAULT_GROW_SIZE (1 << 27) /* 128 megabytes */
/*
* Arenas store the collection of buckets for allocation classes. Each thread
* is assigned an arena on its first allocator operation.
*/
struct arena {
/* one bucket per allocation class */
struct bucket *buckets[MAX_ALLOCATION_CLASSES];
size_t nthreads;
};
struct heap_rt {
struct alloc_class_collection *alloc_classes;
/* DON'T use these two variable directly! */
struct bucket *default_bucket;
struct arena *arenas;
/* protects assignment of arenas */
os_mutex_t arenas_lock;
/* stores a pointer to one of the arenas */
os_tls_key_t thread_arena;
struct recycler *recyclers[MAX_ALLOCATION_CLASSES];
os_mutex_t run_locks[MAX_RUN_LOCKS];
unsigned nlocks;
unsigned nzones;
unsigned zones_exhausted;
unsigned narenas;
};
/*
* heap_alloc_classes -- returns the allocation classes collection
*/
struct alloc_class_collection *
heap_alloc_classes(struct palloc_heap *heap)
{
return heap->rt->alloc_classes;
}
/*
* heap_arena_init -- (internal) initializes arena instance
*/
static void
heap_arena_init(struct arena *arena)
{
arena->nthreads = 0;
for (int i = 0; i < MAX_ALLOCATION_CLASSES; ++i)
arena->buckets[i] = NULL;
}
/*
* heap_arena_destroy -- (internal) destroys arena instance
*/
static void
heap_arena_destroy(struct arena *arena)
{
for (int i = 0; i < MAX_ALLOCATION_CLASSES; ++i)
if (arena->buckets[i] != NULL)
bucket_delete(arena->buckets[i]);
}
/*
* heap_get_best_class -- returns the alloc class that best fits the
* requested size
*/
struct alloc_class *
heap_get_best_class(struct palloc_heap *heap, size_t size)
{
return alloc_class_by_alloc_size(heap->rt->alloc_classes, size);
}
/*
* heap_thread_arena_destructor -- (internal) removes arena thread assignment
*/
static void
heap_thread_arena_destructor(void *arg)
{
struct arena *a = arg;
util_fetch_and_sub64(&a->nthreads, 1);
}
/*
* heap_thread_arena_assign -- (internal) assigns the least used arena
* to current thread
*
* To avoid complexities with regards to races in the search for the least
* used arena, a lock is used, but the nthreads counter of the arena is still
* bumped using atomic instruction because it can happen in parallel to a
* destructor of a thread, which also touches that variable.
*/
static struct arena *
heap_thread_arena_assign(struct heap_rt *heap)
{
util_mutex_lock(&heap->arenas_lock);
struct arena *least_used = NULL;
struct arena *a;
for (unsigned i = 0; i < heap->narenas; ++i) {
a = &heap->arenas[i];
if (least_used == NULL || a->nthreads < least_used->nthreads)
least_used = a;
}
LOG(4, "assigning %p arena to current thread", least_used);
util_fetch_and_add64(&least_used->nthreads, 1);
util_mutex_unlock(&heap->arenas_lock);
os_tls_set(heap->thread_arena, least_used);
return least_used;
}
/*
* heap_thread_arena -- (internal) returns the arena assigned to the current
* thread
*/
static struct arena *
heap_thread_arena(struct heap_rt *heap)
{
struct arena *a;
if ((a = os_tls_get(heap->thread_arena)) == NULL)
a = heap_thread_arena_assign(heap);
return a;
}
/*
* heap_bucket_acquire_by_id -- fetches by id a bucket exclusive for the thread
* until heap_bucket_release is called
*/
struct bucket *
heap_bucket_acquire_by_id(struct palloc_heap *heap, uint8_t class_id)
{
struct heap_rt *rt = heap->rt;
struct bucket *b;
if (class_id == DEFAULT_ALLOC_CLASS_ID) {
b = rt->default_bucket;
} else {
struct arena *arena = heap_thread_arena(heap->rt);
ASSERTne(arena->buckets, NULL);
b = arena->buckets[class_id];
}
util_mutex_lock(&b->lock);
return b;
}
/*
* heap_bucket_acquire_by_id -- fetches by class a bucket exclusive for the
* thread until heap_bucket_release is called
*/
struct bucket *
heap_bucket_acquire(struct palloc_heap *heap, struct alloc_class *c)
{
return heap_bucket_acquire_by_id(heap, c->id);
}
/*
* heap_bucket_release -- puts the bucket back into the heap
*/
void
heap_bucket_release(struct palloc_heap *heap, struct bucket *b)
{
util_mutex_unlock(&b->lock);
}
/*
* heap_get_run_lock -- returns the lock associated with memory block
*/
os_mutex_t *
heap_get_run_lock(struct palloc_heap *heap, uint32_t chunk_id)
{
return &heap->rt->run_locks[chunk_id % heap->rt->nlocks];
}
/*
* heap_max_zone -- (internal) calculates how many zones can the heap fit
*/
static unsigned
heap_max_zone(size_t size)
{
unsigned max_zone = 0;
size -= sizeof(struct heap_header);
while (size >= ZONE_MIN_SIZE) {
max_zone++;
size -= size <= ZONE_MAX_SIZE ? size : ZONE_MAX_SIZE;
}
return max_zone;
}
/*
* zone_calc_size_idx -- (internal) calculates zone size index
*/
static uint32_t
zone_calc_size_idx(uint32_t zone_id, unsigned max_zone, size_t heap_size)
{
ASSERT(max_zone > 0);
if (zone_id < max_zone - 1)
return MAX_CHUNK;
ASSERT(heap_size >= zone_id * ZONE_MAX_SIZE);
size_t zone_raw_size = heap_size - zone_id * ZONE_MAX_SIZE;
ASSERT(zone_raw_size >= (sizeof(struct zone_header) +
sizeof(struct chunk_header) * MAX_CHUNK));
zone_raw_size -= sizeof(struct zone_header) +
sizeof(struct chunk_header) * MAX_CHUNK;
size_t zone_size_idx = zone_raw_size / CHUNKSIZE;
ASSERT(zone_size_idx <= UINT32_MAX);
return (uint32_t)zone_size_idx;
}
/*
* heap_zone_init -- (internal) writes zone's first chunk and header
*/
static void
heap_zone_init(struct palloc_heap *heap, uint32_t zone_id,
uint32_t first_chunk_id)
{
struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);
uint32_t size_idx = zone_calc_size_idx(zone_id, heap->rt->nzones,
*heap->sizep);
ASSERT(size_idx - first_chunk_id > 0);
memblock_huge_init(heap, first_chunk_id, zone_id,
size_idx - first_chunk_id);
struct zone_header nhdr = {
.size_idx = size_idx,
.magic = ZONE_HEADER_MAGIC,
};
z->header = nhdr; /* write the entire header (8 bytes) at once */
pmemops_persist(&heap->p_ops, &z->header, sizeof(z->header));
}
/*
* heap_memblock_insert_block -- (internal) bucket insert wrapper for callbacks
*/
static int
heap_memblock_insert_block(const struct memory_block *m, void *b)
{
return bucket_insert_block(b, m);
}
/*
* heap_run_create -- (internal) initializes a new run on an existing free chunk
*/
static int
heap_run_create(struct palloc_heap *heap, struct bucket *b,
struct memory_block *m)
{
*m = memblock_run_init(heap,
m->chunk_id, m->zone_id, m->size_idx,
b->aclass->flags, b->aclass->unit_size,
b->aclass->run.alignment);
if (m->m_ops->iterate_free(m, heap_memblock_insert_block, b) != 0) {
b->c_ops->rm_all(b->container);
return -1;
}
return 0;
}
/*
* heap_run_reuse -- (internal) reuses existing run
*/
static int
heap_run_reuse(struct palloc_heap *heap, struct bucket *b,
const struct memory_block *m)
{
int ret = 0;
ASSERTeq(m->type, MEMORY_BLOCK_RUN);
os_mutex_t *lock = m->m_ops->get_lock(m);
util_mutex_lock(lock);
ret = m->m_ops->iterate_free(m, heap_memblock_insert_block, b);
util_mutex_unlock(lock);
if (ret == 0) {
b->active_memory_block->m = *m;
b->is_active = 1;
} else {
b->c_ops->rm_all(b->container);
}
return ret;
}
/*
* heap_free_chunk_reuse -- reuses existing free chunk
*/
int
heap_free_chunk_reuse(struct palloc_heap *heap,
struct bucket *bucket,
struct memory_block *m)
{
/*
* Perform coalescing just in case there
* are any neighboring free chunks.
*/
struct memory_block nm = heap_coalesce_huge(heap, bucket, m);
if (nm.size_idx != m->size_idx) {
m->m_ops->prep_hdr(&nm, MEMBLOCK_FREE, NULL);
}
*m = nm;
return bucket_insert_block(bucket, m);
}
/*
* heap_run_into_free_chunk -- (internal) creates a new free chunk in place of
* a run.
*/
static void
heap_run_into_free_chunk(struct palloc_heap *heap,
struct bucket *bucket,
struct memory_block *m)
{
struct chunk_header *hdr = heap_get_chunk_hdr(heap, m);
m->block_off = 0;
m->size_idx = hdr->size_idx;
/*
* The only thing this could race with is heap_memblock_on_free()
* because that function is called after processing the operation,
* which means that a different thread might immediately call this
* function if the free() made the run empty.
* We could forgo this lock if it weren't for helgrind which needs it
* to establish happens-before relation for the chunk metadata.
*/
os_mutex_t *lock = m->m_ops->get_lock(m);
util_mutex_lock(lock);
*m = memblock_huge_init(heap, m->chunk_id, m->zone_id, m->size_idx);
heap_free_chunk_reuse(heap, bucket, m);
util_mutex_unlock(lock);
}
/*
* heap_reclaim_run -- checks the run for available memory if unclaimed.
*
* Returns 1 if reclaimed chunk, 0 otherwise.
*/
static int
heap_reclaim_run(struct palloc_heap *heap, struct memory_block *m)
{
struct chunk_run *run = heap_get_chunk_run(heap, m);
struct chunk_header *hdr = heap_get_chunk_hdr(heap, m);
struct alloc_class *c = alloc_class_by_run(
heap->rt->alloc_classes,
run->hdr.block_size, hdr->flags, m->size_idx);
struct recycler_element e = recycler_element_new(heap, m);
if (c == NULL) {
uint32_t size_idx = m->size_idx;
struct run_bitmap b;
m->m_ops->get_bitmap(m, &b);
ASSERTeq(size_idx, m->size_idx);
return e.free_space == b.nbits;
}
if (e.free_space == c->run.nallocs)
return 1;
if (recycler_put(heap->rt->recyclers[c->id], m, e) < 0)
ERR("lost runtime tracking info of %u run due to OOM", c->id);
return 0;
}
/*
* heap_reclaim_zone_garbage -- (internal) creates volatile state of unused runs
*/
static void
heap_reclaim_zone_garbage(struct palloc_heap *heap, struct bucket *bucket,
uint32_t zone_id)
{
struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);
for (uint32_t i = 0; i < z->header.size_idx; ) {
struct chunk_header *hdr = &z->chunk_headers[i];
ASSERT(hdr->size_idx != 0);
struct memory_block m = MEMORY_BLOCK_NONE;
m.zone_id = zone_id;
m.chunk_id = i;
m.size_idx = hdr->size_idx;
memblock_rebuild_state(heap, &m);
m.m_ops->reinit_chunk(&m);
switch (hdr->type) {
case CHUNK_TYPE_RUN:
if (heap_reclaim_run(heap, &m) != 0) {
heap_run_into_free_chunk(heap, bucket,
&m);
}
break;
case CHUNK_TYPE_FREE:
heap_free_chunk_reuse(heap, bucket, &m);
break;
case CHUNK_TYPE_USED:
break;
default:
ASSERT(0);
}
i = m.chunk_id + m.size_idx; /* hdr might have changed */
}
}
/*
* heap_populate_bucket -- (internal) creates volatile state of memory blocks
*/
static int
heap_populate_bucket(struct palloc_heap *heap, struct bucket *bucket)
{
struct heap_rt *h = heap->rt;
/* at this point we are sure that there's no more memory in the heap */
if (h->zones_exhausted == h->nzones)
return ENOMEM;
uint32_t zone_id = h->zones_exhausted++;
struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);
/* ignore zone and chunk headers */
VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(z, sizeof(z->header) +
sizeof(z->chunk_headers));
if (z->header.magic != ZONE_HEADER_MAGIC)
heap_zone_init(heap, zone_id, 0);
heap_reclaim_zone_garbage(heap, bucket, zone_id);
/*
* It doesn't matter that this function might not have found any
* free blocks because there is still potential that subsequent calls
* will find something in later zones.
*/
return 0;
}
/*
* heap_recycle_unused -- recalculate scores in the recycler and turn any
* empty runs into free chunks
*
* If force is not set, this function might effectively be a noop if not enough
* of space was freed.
*/
static int
heap_recycle_unused(struct palloc_heap *heap, struct recycler *recycler,
struct bucket *defb, int force)
{
struct empty_runs r = recycler_recalc(recycler, force);
if (VEC_SIZE(&r) == 0)
return ENOMEM;
struct bucket *nb = defb == NULL ? heap_bucket_acquire_by_id(heap,
DEFAULT_ALLOC_CLASS_ID) : NULL;
ASSERT(defb != NULL || nb != NULL);
struct memory_block *nm;
VEC_FOREACH_BY_PTR(nm, &r) {
heap_run_into_free_chunk(heap, defb ? defb : nb, nm);
}
if (nb != NULL)
heap_bucket_release(heap, nb);
VEC_DELETE(&r);
return 0;
}
/*
* heap_reclaim_garbage -- (internal) creates volatile state of unused runs
*/
static int
heap_reclaim_garbage(struct palloc_heap *heap, struct bucket *bucket)
{
int ret = ENOMEM;
struct recycler *r;
for (size_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
if ((r = heap->rt->recyclers[i]) == NULL)
continue;
if (heap_recycle_unused(heap, r, bucket, 1) == 0)
ret = 0;
}
return ret;
}
/*
* heap_ensure_huge_bucket_filled --
* (internal) refills the default bucket if needed
*/
static int
heap_ensure_huge_bucket_filled(struct palloc_heap *heap, struct bucket *bucket)
{
if (heap_reclaim_garbage(heap, bucket) == 0)
return 0;
if (heap_populate_bucket(heap, bucket) == 0)
return 0;
int extend;
if ((extend = heap_extend(heap, bucket, heap->growsize)) < 0)
return ENOMEM;
if (extend == 1)
return 0;
/*
* Extending the pool does not automatically add the chunks into the
* runtime state of the bucket - we need to traverse the new zone if
* it was created.
*/
if (heap_populate_bucket(heap, bucket) == 0)
return 0;
return ENOMEM;
}
/*
* heap_reuse_from_recycler -- (internal) try reusing runs that are currently
* in the recycler
*/
static int
heap_reuse_from_recycler(struct palloc_heap *heap,
struct bucket *b, uint32_t units, int force)
{
struct memory_block m = MEMORY_BLOCK_NONE;
m.size_idx = units;
struct recycler *r = heap->rt->recyclers[b->aclass->id];
if (!force && recycler_get(r, &m) == 0)
return heap_run_reuse(heap, b, &m);
heap_recycle_unused(heap, r, NULL, force);
if (recycler_get(r, &m) == 0)
return heap_run_reuse(heap, b, &m);
return ENOMEM;
}
/*
* heap_ensure_run_bucket_filled -- (internal) refills the bucket if needed
*/
static int
heap_ensure_run_bucket_filled(struct palloc_heap *heap, struct bucket *b,
uint32_t units)
{
ASSERTeq(b->aclass->type, CLASS_RUN);
int ret = 0;
/* get rid of the active block in the bucket */
if (b->is_active) {
b->c_ops->rm_all(b->container);
if (b->active_memory_block->nresv != 0) {
struct recycler *r = heap->rt->recyclers[b->aclass->id];
recycler_pending_put(r, b->active_memory_block);
b->active_memory_block =
Zalloc(sizeof(struct memory_block_reserved));
} else {
struct memory_block *m = &b->active_memory_block->m;
if (heap_reclaim_run(heap, m)) {
struct bucket *defb =
heap_bucket_acquire_by_id(heap,
DEFAULT_ALLOC_CLASS_ID);
heap_run_into_free_chunk(heap, defb, m);
heap_bucket_release(heap, defb);
}
}
b->is_active = 0;
}
if (heap_reuse_from_recycler(heap, b, units, 0) == 0)
goto out;
/* search in the next zone before attempting to create a new run */
struct bucket *defb = heap_bucket_acquire_by_id(heap,
DEFAULT_ALLOC_CLASS_ID);
heap_populate_bucket(heap, defb);
heap_bucket_release(heap, defb);
if (heap_reuse_from_recycler(heap, b, units, 0) == 0)
goto out;
struct memory_block m = MEMORY_BLOCK_NONE;
m.size_idx = b->aclass->run.size_idx;
defb = heap_bucket_acquire_by_id(heap,
DEFAULT_ALLOC_CLASS_ID);
/* cannot reuse an existing run, create a new one */
if (heap_get_bestfit_block(heap, defb, &m) == 0) {
ASSERTeq(m.block_off, 0);
if (heap_run_create(heap, b, &m) != 0) {
heap_bucket_release(heap, defb);
return ENOMEM;
}
b->active_memory_block->m = m;
b->is_active = 1;
heap_bucket_release(heap, defb);
goto out;
}
heap_bucket_release(heap, defb);
if (heap_reuse_from_recycler(heap, b, units, 0) == 0)
goto out;
ret = ENOMEM;
out:
return ret;
}
/*
* heap_memblock_on_free -- bookkeeping actions executed at every free of a
* block
*/
void
heap_memblock_on_free(struct palloc_heap *heap, const struct memory_block *m)
{
if (m->type != MEMORY_BLOCK_RUN)
return;
struct chunk_header *hdr = heap_get_chunk_hdr(heap, m);
struct chunk_run *run = heap_get_chunk_run(heap, m);
ASSERTeq(hdr->type, CHUNK_TYPE_RUN);
struct alloc_class *c = alloc_class_by_run(
heap->rt->alloc_classes,
run->hdr.block_size, hdr->flags, hdr->size_idx);
if (c == NULL)
return;
recycler_inc_unaccounted(heap->rt->recyclers[c->id], m);
}
/*
* heap_split_block -- (internal) splits unused part of the memory block
*/
static void
heap_split_block(struct palloc_heap *heap, struct bucket *b,
struct memory_block *m, uint32_t units)
{
ASSERT(units <= UINT16_MAX);
ASSERT(units > 0);
if (b->aclass->type == CLASS_RUN) {
ASSERT((uint64_t)m->block_off + (uint64_t)units <= UINT32_MAX);
struct memory_block r = {m->chunk_id, m->zone_id,
m->size_idx - units, (uint32_t)(m->block_off + units),
NULL, NULL, 0, 0};
memblock_rebuild_state(heap, &r);
if (bucket_insert_block(b, &r) != 0)
LOG(2,
"failed to allocate memory block runtime tracking info");
} else {
uint32_t new_chunk_id = m->chunk_id + units;
uint32_t new_size_idx = m->size_idx - units;
*m = memblock_huge_init(heap, m->chunk_id, m->zone_id, units);
struct memory_block n = memblock_huge_init(heap,
new_chunk_id, m->zone_id, new_size_idx);
if (bucket_insert_block(b, &n) != 0)
LOG(2,
"failed to allocate memory block runtime tracking info");
}
m->size_idx = units;
}
/*
* heap_get_bestfit_block --
* extracts a memory block of equal size index
*/
int
heap_get_bestfit_block(struct palloc_heap *heap, struct bucket *b,
struct memory_block *m)
{
uint32_t units = m->size_idx;
while (b->c_ops->get_rm_bestfit(b->container, m) != 0) {
if (b->aclass->type == CLASS_HUGE) {
if (heap_ensure_huge_bucket_filled(heap, b) != 0)
return ENOMEM;
} else {
if (heap_ensure_run_bucket_filled(heap, b, units) != 0)
return ENOMEM;
}
}
ASSERT(m->size_idx >= units);
if (units != m->size_idx)
heap_split_block(heap, b, m, units);
m->m_ops->ensure_header_type(m, b->aclass->header_type);
m->header_type = b->aclass->header_type;
return 0;
}
/*
* heap_get_adjacent_free_block -- locates adjacent free memory block in heap
*/
static int
heap_get_adjacent_free_block(struct palloc_heap *heap,
const struct memory_block *in, struct memory_block *out, int prev)
{
struct zone *z = ZID_TO_ZONE(heap->layout, in->zone_id);
struct chunk_header *hdr = &z->chunk_headers[in->chunk_id];
out->zone_id = in->zone_id;
if (prev) {
if (in->chunk_id == 0)
return ENOENT;
struct chunk_header *prev_hdr =
&z->chunk_headers[in->chunk_id - 1];
out->chunk_id = in->chunk_id - prev_hdr->size_idx;
if (z->chunk_headers[out->chunk_id].type != CHUNK_TYPE_FREE)
return ENOENT;
out->size_idx = z->chunk_headers[out->chunk_id].size_idx;
} else { /* next */
if (in->chunk_id + hdr->size_idx == z->header.size_idx)
return ENOENT;
out->chunk_id = in->chunk_id + hdr->size_idx;
if (z->chunk_headers[out->chunk_id].type != CHUNK_TYPE_FREE)
return ENOENT;
out->size_idx = z->chunk_headers[out->chunk_id].size_idx;
}
memblock_rebuild_state(heap, out);
return 0;
}
/*
* heap_coalesce -- (internal) merges adjacent memory blocks
*/
static struct memory_block
heap_coalesce(struct palloc_heap *heap,
const struct memory_block *blocks[], int n)
{
struct memory_block ret = MEMORY_BLOCK_NONE;
const struct memory_block *b = NULL;
ret.size_idx = 0;
for (int i = 0; i < n; ++i) {
if (blocks[i] == NULL)
continue;
b = b ? b : blocks[i];
ret.size_idx += blocks[i] ? blocks[i]->size_idx : 0;
}
ASSERTne(b, NULL);
ret.chunk_id = b->chunk_id;
ret.zone_id = b->zone_id;
ret.block_off = b->block_off;
memblock_rebuild_state(heap, &ret);
return ret;
}
/*
* heap_coalesce_huge -- finds neighbours of a huge block, removes them from the
* volatile state and returns the resulting block
*/
struct memory_block
heap_coalesce_huge(struct palloc_heap *heap, struct bucket *b,
const struct memory_block *m)
{
const struct memory_block *blocks[3] = {NULL, m, NULL};
struct memory_block prev = MEMORY_BLOCK_NONE;
if (heap_get_adjacent_free_block(heap, m, &prev, 1) == 0 &&
b->c_ops->get_rm_exact(b->container, &prev) == 0) {
blocks[0] = &prev;
}
struct memory_block next = MEMORY_BLOCK_NONE;
if (heap_get_adjacent_free_block(heap, m, &next, 0) == 0 &&
b->c_ops->get_rm_exact(b->container, &next) == 0) {
blocks[2] = &next;
}
return heap_coalesce(heap, blocks, 3);
}
/*
* heap_end -- returns first address after heap
*/
void *
heap_end(struct palloc_heap *h)
{
ASSERT(h->rt->nzones > 0);
struct zone *last_zone = ZID_TO_ZONE(h->layout, h->rt->nzones - 1);
return &last_zone->chunks[last_zone->header.size_idx];
}
/*
* heap_get_narenas -- (internal) returns the number of arenas to create
*/
static unsigned
heap_get_narenas(void)
{
long cpus = sysconf(_SC_NPROCESSORS_ONLN);
if (cpus < 1)
cpus = 1;
unsigned arenas = (unsigned)cpus;
LOG(4, "creating %u arenas", arenas);
return arenas;
}
/*
* heap_create_alloc_class_buckets -- allocates all cache bucket
* instances of the specified type
*/
int
heap_create_alloc_class_buckets(struct palloc_heap *heap, struct alloc_class *c)
{
struct heap_rt *h = heap->rt;
if (c->type == CLASS_RUN) {
h->recyclers[c->id] = recycler_new(heap, c->run.nallocs);
if (h->recyclers[c->id] == NULL)
goto error_recycler_new;
}
int i;
for (i = 0; i < (int)h->narenas; ++i) {
h->arenas[i].buckets[c->id] = bucket_new(
container_new_seglists(heap), c);
if (h->arenas[i].buckets[c->id] == NULL)
goto error_cache_bucket_new;
}
return 0;
error_cache_bucket_new:
recycler_delete(h->recyclers[c->id]);
for (i -= 1; i >= 0; --i) {
bucket_delete(h->arenas[i].buckets[c->id]);
}
error_recycler_new:
return -1;
}
/*
* heap_buckets_init -- (internal) initializes bucket instances
*/
int
heap_buckets_init(struct palloc_heap *heap)
{
struct heap_rt *h = heap->rt;
for (uint8_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = alloc_class_by_id(h->alloc_classes, i);
if (c != NULL) {
if (heap_create_alloc_class_buckets(heap, c) != 0)
goto error_bucket_create;
}
}
h->default_bucket = bucket_new(container_new_ravl(heap),
alloc_class_by_id(h->alloc_classes, DEFAULT_ALLOC_CLASS_ID));
if (h->default_bucket == NULL)
goto error_bucket_create;
return 0;
error_bucket_create:
for (unsigned i = 0; i < h->narenas; ++i)
heap_arena_destroy(&h->arenas[i]);
return -1;
}
/*
* heap_extend -- extend the heap by the given size
*
* Returns 0 if the current zone has been extended, 1 if a new zone had to be
* created, -1 if unsuccessful.
*
* If this function has to create a new zone, it will NOT populate buckets with
* the new chunks.
*/
int
heap_extend(struct palloc_heap *heap, struct bucket *b, size_t size)
{
void *nptr = util_pool_extend(heap->set, &size, PMEMOBJ_MIN_PART);
if (nptr == NULL)
return -1;
*heap->sizep += size;
pmemops_persist(&heap->p_ops, heap->sizep, sizeof(*heap->sizep));
/*
* If interrupted after changing the size, the heap will just grow
* automatically on the next heap_boot.
*/
uint32_t nzones = heap_max_zone(*heap->sizep);
uint32_t zone_id = nzones - 1;
struct zone *z = ZID_TO_ZONE(heap->layout, zone_id);
uint32_t chunk_id = heap->rt->nzones == nzones ? z->header.size_idx : 0;
heap_zone_init(heap, zone_id, chunk_id);
if (heap->rt->nzones != nzones) {
heap->rt->nzones = nzones;
return 0;
}
struct chunk_header *hdr = &z->chunk_headers[chunk_id];
struct memory_block m = MEMORY_BLOCK_NONE;
m.chunk_id = chunk_id;
m.zone_id = zone_id;
m.block_off = 0;
m.size_idx = hdr->size_idx;
memblock_rebuild_state(heap, &m);
heap_free_chunk_reuse(heap, b, &m);
return 1;
}
/*
* heap_zone_update_if_needed -- updates the zone metadata if the pool has been
* extended.
*/
static void
heap_zone_update_if_needed(struct palloc_heap *heap)
{
struct zone *z;
for (uint32_t i = 0; i < heap->rt->nzones; ++i) {
z = ZID_TO_ZONE(heap->layout, i);
if (z->header.magic != ZONE_HEADER_MAGIC)
continue;
size_t size_idx = zone_calc_size_idx(i, heap->rt->nzones,
*heap->sizep);
if (size_idx == z->header.size_idx)
continue;
heap_zone_init(heap, i, z->header.size_idx);
}
}
/*
* heap_boot -- opens the heap region of the pmemobj pool
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
heap_boot(struct palloc_heap *heap, void *heap_start, uint64_t heap_size,
uint64_t *sizep, void *base, struct pmem_ops *p_ops,
struct stats *stats, struct pool_set *set)
{
/*
* The size can be 0 if interrupted during heap_init or this is the
* first time booting the heap with the persistent size field.
*/
if (*sizep == 0) {
*sizep = heap_size;
pmemops_persist(p_ops, sizep, sizeof(*sizep));
}
if (heap_size < *sizep) {
ERR("mapped region smaller than the heap size");
return EINVAL;
}
struct heap_rt *h = Malloc(sizeof(*h));
int err;
if (h == NULL) {
err = ENOMEM;
goto error_heap_malloc;
}
h->alloc_classes = alloc_class_collection_new();
if (h->alloc_classes == NULL) {
err = ENOMEM;
goto error_alloc_classes_new;
}
h->narenas = heap_get_narenas();
h->arenas = Malloc(sizeof(struct arena) * h->narenas);
if (h->arenas == NULL) {
err = ENOMEM;
goto error_arenas_malloc;
}
h->nzones = heap_max_zone(heap_size);
h->zones_exhausted = 0;
h->nlocks = On_valgrind ? MAX_RUN_LOCKS_VG : MAX_RUN_LOCKS;
for (unsigned i = 0; i < h->nlocks; ++i)
util_mutex_init(&h->run_locks[i]);
util_mutex_init(&h->arenas_lock);
os_tls_key_create(&h->thread_arena, heap_thread_arena_destructor);
heap->p_ops = *p_ops;
heap->layout = heap_start;
heap->rt = h;
heap->sizep = sizep;
heap->base = base;
heap->stats = stats;
heap->set = set;
heap->growsize = HEAP_DEFAULT_GROW_SIZE;
heap->alloc_pattern = PALLOC_CTL_DEBUG_NO_PATTERN;
VALGRIND_DO_CREATE_MEMPOOL(heap->layout, 0, 0);
for (unsigned i = 0; i < h->narenas; ++i)
heap_arena_init(&h->arenas[i]);
for (unsigned i = 0; i < MAX_ALLOCATION_CLASSES; ++i)
h->recyclers[i] = NULL;
heap_zone_update_if_needed(heap);
return 0;
error_arenas_malloc:
alloc_class_collection_delete(h->alloc_classes);
error_alloc_classes_new:
Free(h);
heap->rt = NULL;
error_heap_malloc:
return err;
}
/*
* heap_write_header -- (internal) creates a clean header
*/
static void
heap_write_header(struct heap_header *hdr)
{
struct heap_header newhdr = {
.signature = HEAP_SIGNATURE,
.major = HEAP_MAJOR,
.minor = HEAP_MINOR,
.unused = 0,
.chunksize = CHUNKSIZE,
.chunks_per_zone = MAX_CHUNK,
.reserved = {0},
.checksum = 0
};
util_checksum(&newhdr, sizeof(newhdr), &newhdr.checksum, 1, 0);
*hdr = newhdr;
}
/*
* heap_init -- initializes the heap
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
heap_init(void *heap_start, uint64_t heap_size, uint64_t *sizep,
struct pmem_ops *p_ops)
{
if (heap_size < HEAP_MIN_SIZE)
return EINVAL;
VALGRIND_DO_MAKE_MEM_UNDEFINED(heap_start, heap_size);
struct heap_layout *layout = heap_start;
heap_write_header(&layout->header);
pmemops_persist(p_ops, &layout->header, sizeof(struct heap_header));
unsigned zones = heap_max_zone(heap_size);
for (unsigned i = 0; i < zones; ++i) {
struct zone *zone = ZID_TO_ZONE(layout, i);
pmemops_memset(p_ops, &zone->header, 0,
sizeof(struct zone_header), 0);
pmemops_memset(p_ops, &zone->chunk_headers, 0,
sizeof(struct chunk_header), 0);
/* only explicitly allocated chunks should be accessible */
VALGRIND_DO_MAKE_MEM_NOACCESS(&zone->chunk_headers,
sizeof(struct chunk_header));
}
*sizep = heap_size;
pmemops_persist(p_ops, sizep, sizeof(*sizep));
return 0;
}
/*
* heap_cleanup -- cleanups the volatile heap state
*/
void
heap_cleanup(struct palloc_heap *heap)
{
struct heap_rt *rt = heap->rt;
alloc_class_collection_delete(rt->alloc_classes);
bucket_delete(rt->default_bucket);
for (unsigned i = 0; i < rt->narenas; ++i)
heap_arena_destroy(&rt->arenas[i]);
for (unsigned i = 0; i < rt->nlocks; ++i)
util_mutex_destroy(&rt->run_locks[i]);
util_mutex_destroy(&rt->arenas_lock);
os_tls_key_delete(rt->thread_arena);
Free(rt->arenas);
for (int i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
if (heap->rt->recyclers[i] == NULL)
continue;
recycler_delete(rt->recyclers[i]);
}
VALGRIND_DO_DESTROY_MEMPOOL(heap->layout);
Free(rt);
heap->rt = NULL;
}
/*
* heap_verify_header -- (internal) verifies if the heap header is consistent
*/
static int
heap_verify_header(struct heap_header *hdr)
{
if (util_checksum(hdr, sizeof(*hdr), &hdr->checksum, 0, 0) != 1) {
ERR("heap: invalid header's checksum");
return -1;
}
if (memcmp(hdr->signature, HEAP_SIGNATURE, HEAP_SIGNATURE_LEN) != 0) {
ERR("heap: invalid signature");
return -1;
}
return 0;
}
/*
* heap_verify_zone_header --
* (internal) verifies if the zone header is consistent
*/
static int
heap_verify_zone_header(struct zone_header *hdr)
{
if (hdr->magic != ZONE_HEADER_MAGIC) /* not initialized */
return 0;
if (hdr->size_idx == 0) {
ERR("heap: invalid zone size");
return -1;
}
return 0;
}
/*
* heap_verify_chunk_header --
* (internal) verifies if the chunk header is consistent
*/
static int
heap_verify_chunk_header(struct chunk_header *hdr)
{
if (hdr->type == CHUNK_TYPE_UNKNOWN) {
ERR("heap: invalid chunk type");
return -1;
}
if (hdr->type >= MAX_CHUNK_TYPE) {
ERR("heap: unknown chunk type");
return -1;
}
if (hdr->flags & ~CHUNK_FLAGS_ALL_VALID) {
ERR("heap: invalid chunk flags");
return -1;
}
return 0;
}
/*
* heap_verify_zone -- (internal) verifies if the zone is consistent
*/
static int
heap_verify_zone(struct zone *zone)
{
if (zone->header.magic == 0)
return 0; /* not initialized, and that is OK */
if (zone->header.magic != ZONE_HEADER_MAGIC) {
ERR("heap: invalid zone magic");
return -1;
}
if (heap_verify_zone_header(&zone->header))
return -1;
uint32_t i;
for (i = 0; i < zone->header.size_idx; ) {
if (heap_verify_chunk_header(&zone->chunk_headers[i]))
return -1;
i += zone->chunk_headers[i].size_idx;
}
if (i != zone->header.size_idx) {
ERR("heap: chunk sizes mismatch");
return -1;
}
return 0;
}
/*
* heap_check -- verifies if the heap is consistent and can be opened properly
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
heap_check(void *heap_start, uint64_t heap_size)
{
if (heap_size < HEAP_MIN_SIZE) {
ERR("heap: invalid heap size");
return -1;
}
struct heap_layout *layout = heap_start;
if (heap_verify_header(&layout->header))
return -1;
for (unsigned i = 0; i < heap_max_zone(heap_size); ++i) {
if (heap_verify_zone(ZID_TO_ZONE(layout, i)))
return -1;
}
return 0;
}
/*
* heap_check_remote -- verifies if the heap of a remote pool is consistent
* and can be opened properly
*
* If successful function returns zero. Otherwise an error number is returned.
*/
int
heap_check_remote(void *heap_start, uint64_t heap_size, struct remote_ops *ops)
{
if (heap_size < HEAP_MIN_SIZE) {
ERR("heap: invalid heap size");
return -1;
}
struct heap_layout *layout = heap_start;
struct heap_header header;
if (ops->read(ops->ctx, ops->base, &header, &layout->header,
sizeof(struct heap_header))) {
ERR("heap: obj_read_remote error");
return -1;
}
if (heap_verify_header(&header))
return -1;
struct zone *zone_buff = (struct zone *)Malloc(sizeof(struct zone));
if (zone_buff == NULL) {
ERR("heap: zone_buff malloc error");
return -1;
}
for (unsigned i = 0; i < heap_max_zone(heap_size); ++i) {
if (ops->read(ops->ctx, ops->base, zone_buff,
ZID_TO_ZONE(layout, i), sizeof(struct zone))) {
ERR("heap: obj_read_remote error");
goto out;
}
if (heap_verify_zone(zone_buff)) {
goto out;
}
}
Free(zone_buff);
return 0;
out:
Free(zone_buff);
return -1;
}
/*
* heap_zone_foreach_object -- (internal) iterates through objects in a zone
*/
static int
heap_zone_foreach_object(struct palloc_heap *heap, object_callback cb,
void *arg, struct memory_block *m)
{
struct zone *zone = ZID_TO_ZONE(heap->layout, m->zone_id);
if (zone->header.magic == 0)
return 0;
for (; m->chunk_id < zone->header.size_idx; ) {
struct chunk_header *hdr = heap_get_chunk_hdr(heap, m);
memblock_rebuild_state(heap, m);
m->size_idx = hdr->size_idx;
if (m->m_ops->iterate_used(m, cb, arg) != 0)
return 1;
m->chunk_id += m->size_idx;
m->block_off = 0;
}
return 0;
}
/*
* heap_foreach_object -- (internal) iterates through objects in the heap
*/
void
heap_foreach_object(struct palloc_heap *heap, object_callback cb, void *arg,
struct memory_block m)
{
for (; m.zone_id < heap->rt->nzones; ++m.zone_id) {
if (heap_zone_foreach_object(heap, cb, arg, &m) != 0)
break;
m.chunk_id = 0;
}
}
#if VG_MEMCHECK_ENABLED
/*
* heap_vg_open -- notifies Valgrind about heap layout
*/
void
heap_vg_open(struct palloc_heap *heap, object_callback cb,
void *arg, int objects)
{
ASSERTne(cb, NULL);
VALGRIND_DO_MAKE_MEM_UNDEFINED(heap->layout, *heap->sizep);
struct heap_layout *layout = heap->layout;
VALGRIND_DO_MAKE_MEM_DEFINED(&layout->header, sizeof(layout->header));
unsigned zones = heap_max_zone(*heap->sizep);
struct memory_block m = MEMORY_BLOCK_NONE;
for (unsigned i = 0; i < zones; ++i) {
struct zone *z = ZID_TO_ZONE(layout, i);
uint32_t chunks;
m.zone_id = i;
m.chunk_id = 0;
VALGRIND_DO_MAKE_MEM_DEFINED(&z->header, sizeof(z->header));
if (z->header.magic != ZONE_HEADER_MAGIC)
continue;
chunks = z->header.size_idx;
for (uint32_t c = 0; c < chunks; ) {
struct chunk_header *hdr = &z->chunk_headers[c];
/* define the header before rebuilding state */
VALGRIND_DO_MAKE_MEM_DEFINED(hdr, sizeof(*hdr));
m.chunk_id = c;
m.size_idx = hdr->size_idx;
memblock_rebuild_state(heap, &m);
m.m_ops->vg_init(&m, objects, cb, arg);
m.block_off = 0;
ASSERT(hdr->size_idx > 0);
c += hdr->size_idx;
}
/* mark all unused chunk headers after last as not accessible */
VALGRIND_DO_MAKE_MEM_NOACCESS(&z->chunk_headers[chunks],
(MAX_CHUNK - chunks) * sizeof(struct chunk_header));
}
}
#endif
| 36,149 | 22.798552 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/heap_layout.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* heap_layout.h -- internal definitions for heap layout
*/
#ifndef LIBPMEMOBJ_HEAP_LAYOUT_H
#define LIBPMEMOBJ_HEAP_LAYOUT_H 1
#include <stddef.h>
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
#define HEAP_MAJOR 1
#define HEAP_MINOR 0
#define MAX_CHUNK (UINT16_MAX - 7) /* has to be multiple of 8 */
#define CHUNK_BASE_ALIGNMENT 1024
#define CHUNKSIZE ((size_t)1024 * 256) /* 256 kilobytes */
#define MAX_MEMORY_BLOCK_SIZE (MAX_CHUNK * CHUNKSIZE)
#define HEAP_SIGNATURE_LEN 16
#define HEAP_SIGNATURE "MEMORY_HEAP_HDR\0"
#define ZONE_HEADER_MAGIC 0xC3F0A2D2
#define ZONE_MIN_SIZE (sizeof(struct zone) + sizeof(struct chunk))
#define ZONE_MAX_SIZE (sizeof(struct zone) + sizeof(struct chunk) * MAX_CHUNK)
#define HEAP_MIN_SIZE (sizeof(struct heap_layout) + ZONE_MIN_SIZE)
/* Base bitmap values, relevant for both normal and flexible bitmaps */
#define RUN_BITS_PER_VALUE 64U
#define RUN_BASE_METADATA_VALUES\
((unsigned)(sizeof(struct chunk_run_header) / sizeof(uint64_t)))
#define RUN_BASE_METADATA_SIZE (sizeof(struct chunk_run_header))
#define RUN_CONTENT_SIZE (CHUNKSIZE - RUN_BASE_METADATA_SIZE)
/*
* Calculates the size in bytes of a single run instance, including bitmap
*/
#define RUN_CONTENT_SIZE_BYTES(size_idx)\
(RUN_CONTENT_SIZE + (((size_idx) - 1) * CHUNKSIZE))
/* Default bitmap values, specific for old, non-flexible, bitmaps */
#define RUN_DEFAULT_METADATA_VALUES 40 /* in 8 byte words, 320 bytes total */
#define RUN_DEFAULT_BITMAP_VALUES \
(RUN_DEFAULT_METADATA_VALUES - RUN_BASE_METADATA_VALUES)
#define RUN_DEFAULT_BITMAP_SIZE (sizeof(uint64_t) * RUN_DEFAULT_BITMAP_VALUES)
#define RUN_DEFAULT_BITMAP_NBITS\
(RUN_BITS_PER_VALUE * RUN_DEFAULT_BITMAP_VALUES)
#define RUN_DEFAULT_SIZE \
(CHUNKSIZE - RUN_BASE_METADATA_SIZE - RUN_DEFAULT_BITMAP_SIZE)
/*
* Calculates the size in bytes of a single run instance, without bitmap,
* but only for the default fixed-bitmap algorithm
*/
#define RUN_DEFAULT_SIZE_BYTES(size_idx)\
(RUN_DEFAULT_SIZE + (((size_idx) - 1) * CHUNKSIZE))
#define CHUNK_MASK ((CHUNKSIZE) - 1)
#define CHUNK_ALIGN_UP(value) ((((value) + CHUNK_MASK) & ~CHUNK_MASK))
enum chunk_flags {
CHUNK_FLAG_COMPACT_HEADER = 0x0001,
CHUNK_FLAG_HEADER_NONE = 0x0002,
CHUNK_FLAG_ALIGNED = 0x0004,
CHUNK_FLAG_FLEX_BITMAP = 0x0008,
};
#define CHUNK_FLAGS_ALL_VALID (\
CHUNK_FLAG_COMPACT_HEADER |\
CHUNK_FLAG_HEADER_NONE |\
CHUNK_FLAG_ALIGNED |\
CHUNK_FLAG_FLEX_BITMAP\
)
enum chunk_type {
CHUNK_TYPE_UNKNOWN,
CHUNK_TYPE_FOOTER, /* not actual chunk type */
CHUNK_TYPE_FREE,
CHUNK_TYPE_USED,
CHUNK_TYPE_RUN,
CHUNK_TYPE_RUN_DATA,
MAX_CHUNK_TYPE
};
struct chunk {
uint8_t data[CHUNKSIZE];
};
struct chunk_run_header {
uint64_t block_size;
uint64_t alignment; /* valid only /w CHUNK_FLAG_ALIGNED */
};
struct chunk_run {
struct chunk_run_header hdr;
uint8_t content[RUN_CONTENT_SIZE]; /* bitmap + data */
};
struct chunk_header {
uint16_t type;
uint16_t flags;
uint32_t size_idx;
};
struct zone_header {
uint32_t magic;
uint32_t size_idx;
uint8_t reserved[56];
};
struct zone {
struct zone_header header;
struct chunk_header chunk_headers[MAX_CHUNK];
struct chunk chunks[];
};
struct heap_header {
char signature[HEAP_SIGNATURE_LEN];
uint64_t major;
uint64_t minor;
uint64_t unused; /* might be garbage */
uint64_t chunksize;
uint64_t chunks_per_zone;
uint8_t reserved[960];
uint64_t checksum;
};
struct heap_layout {
struct heap_header header;
struct zone zone0; /* first element of zones array */
};
#define ALLOC_HDR_SIZE_SHIFT (48ULL)
#define ALLOC_HDR_FLAGS_MASK (((1ULL) << ALLOC_HDR_SIZE_SHIFT) - 1)
struct allocation_header_legacy {
uint8_t unused[8];
uint64_t size;
uint8_t unused2[32];
uint64_t root_size;
uint64_t type_num;
};
#define ALLOC_HDR_COMPACT_SIZE sizeof(struct allocation_header_compact)
struct allocation_header_compact {
uint64_t size;
uint64_t extra;
};
enum header_type {
HEADER_LEGACY,
HEADER_COMPACT,
HEADER_NONE,
MAX_HEADER_TYPES
};
static const size_t header_type_to_size[MAX_HEADER_TYPES] = {
sizeof(struct allocation_header_legacy),
sizeof(struct allocation_header_compact),
0
};
static const enum chunk_flags header_type_to_flag[MAX_HEADER_TYPES] = {
(enum chunk_flags)0,
CHUNK_FLAG_COMPACT_HEADER,
CHUNK_FLAG_HEADER_NONE
};
static inline struct zone *
ZID_TO_ZONE(struct heap_layout *layout, size_t zone_id)
{
return (struct zone *)
((uintptr_t)&layout->zone0 + ZONE_MAX_SIZE * zone_id);
}
static inline struct chunk_header *
GET_CHUNK_HDR(struct heap_layout *layout, size_t zone_id, unsigned chunk_id)
{
return &ZID_TO_ZONE(layout, zone_id)->chunk_headers[chunk_id];
}
static inline struct chunk *
GET_CHUNK(struct heap_layout *layout, size_t zone_id, unsigned chunk_id)
{
return &ZID_TO_ZONE(layout, zone_id)->chunks[chunk_id];
}
static inline struct chunk_run *
GET_CHUNK_RUN(struct heap_layout *layout, size_t zone_id, unsigned chunk_id)
{
return (struct chunk_run *)GET_CHUNK(layout, zone_id, chunk_id);
}
#ifdef __cplusplus
}
#endif
#endif
| 6,620 | 27.055085 | 78 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/alloc_class.h
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* alloc_class.h -- internal definitions for allocation classes
*/
#ifndef LIBPMEMOBJ_ALLOC_CLASS_H
#define LIBPMEMOBJ_ALLOC_CLASS_H 1
#include <stddef.h>
#include <stdint.h>
#include <sys/types.h>
#include "heap_layout.h"
#ifdef __cplusplus
extern "C" {
#endif
#define MAX_ALLOCATION_CLASSES (UINT8_MAX)
#define DEFAULT_ALLOC_CLASS_ID (0)
#define RUN_UNIT_MAX RUN_BITS_PER_VALUE
struct alloc_class_collection;
enum alloc_class_type {
CLASS_UNKNOWN,
CLASS_HUGE,
CLASS_RUN,
MAX_ALLOC_CLASS_TYPES
};
struct alloc_class {
uint8_t id;
uint16_t flags;
size_t unit_size;
enum header_type header_type;
enum alloc_class_type type;
/* run-specific data */
struct {
uint32_t size_idx; /* size index of a single run instance */
size_t alignment; /* required alignment of objects */
unsigned nallocs; /* number of allocs per run */
} run;
};
struct alloc_class_collection *alloc_class_collection_new(void);
void alloc_class_collection_delete(struct alloc_class_collection *ac);
struct alloc_class *alloc_class_by_run(
struct alloc_class_collection *ac,
size_t unit_size, uint16_t flags, uint32_t size_idx);
struct alloc_class *alloc_class_by_alloc_size(
struct alloc_class_collection *ac, size_t size);
struct alloc_class *alloc_class_by_id(
struct alloc_class_collection *ac, uint8_t id);
int alloc_class_reserve(struct alloc_class_collection *ac, uint8_t id);
int alloc_class_find_first_free_slot(struct alloc_class_collection *ac,
uint8_t *slot);
ssize_t
alloc_class_calc_size_idx(struct alloc_class *c, size_t size);
struct alloc_class *
alloc_class_new(int id, struct alloc_class_collection *ac,
enum alloc_class_type type, enum header_type htype,
size_t unit_size, size_t alignment,
uint32_t size_idx);
void alloc_class_delete(struct alloc_class_collection *ac,
struct alloc_class *c);
#ifdef __cplusplus
}
#endif
#endif
| 3,468 | 29.699115 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/recycler.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* recycler.c -- implementation of run recycler
*/
#include "heap.h"
#include "recycler.h"
#include "vec.h"
#include "out.h"
#include "util.h"
#include "sys_util.h"
#include "ravl.h"
#include "valgrind_internal.h"
#define THRESHOLD_MUL 4
/*
* recycler_element_cmp -- compares two recycler elements
*/
static int
recycler_element_cmp(const void *lhs, const void *rhs)
{
const struct recycler_element *l = lhs;
const struct recycler_element *r = rhs;
int64_t diff = (int64_t)l->max_free_block - (int64_t)r->max_free_block;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->free_space - (int64_t)r->free_space;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->zone_id - (int64_t)r->zone_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
diff = (int64_t)l->chunk_id - (int64_t)r->chunk_id;
if (diff != 0)
return diff > 0 ? 1 : -1;
return 0;
}
struct recycler {
struct ravl *runs;
struct palloc_heap *heap;
/*
* How many unaccounted units there *might* be inside of the memory
* blocks stored in the recycler.
* The value is not meant to be accurate, but rather a rough measure on
* how often should the memory block scores be recalculated.
*
* Per-chunk unaccounted units are shared for all zones, which might
* lead to some unnecessary recalculations.
*/
size_t unaccounted_units[MAX_CHUNK];
size_t unaccounted_total;
size_t nallocs;
size_t recalc_threshold;
VEC(, struct recycler_element) recalc;
VEC(, struct memory_block_reserved *) pending;
os_mutex_t lock;
};
/*
* recycler_new -- creates new recycler instance
*/
struct recycler *
recycler_new(struct palloc_heap *heap, size_t nallocs)
{
struct recycler *r = Malloc(sizeof(struct recycler));
if (r == NULL)
goto error_alloc_recycler;
r->runs = ravl_new_sized(recycler_element_cmp,
sizeof(struct recycler_element));
if (r->runs == NULL)
goto error_alloc_tree;
r->heap = heap;
r->nallocs = nallocs;
r->recalc_threshold = nallocs * THRESHOLD_MUL;
r->unaccounted_total = 0;
memset(&r->unaccounted_units, 0, sizeof(r->unaccounted_units));
VEC_INIT(&r->recalc);
VEC_INIT(&r->pending);
os_mutex_init(&r->lock);
return r;
error_alloc_tree:
Free(r);
error_alloc_recycler:
return NULL;
}
/*
* recycler_delete -- deletes recycler instance
*/
void
recycler_delete(struct recycler *r)
{
VEC_DELETE(&r->recalc);
struct memory_block_reserved *mr;
VEC_FOREACH(mr, &r->pending) {
Free(mr);
}
VEC_DELETE(&r->pending);
os_mutex_destroy(&r->lock);
ravl_delete(r->runs);
Free(r);
}
/*
* recycler_element_new -- calculates how many free bytes does a run have and
* what's the largest request that the run can handle, returns that as
* recycler element struct
*/
struct recycler_element
recycler_element_new(struct palloc_heap *heap, const struct memory_block *m)
{
/*
* Counting of the clear bits can race with a concurrent deallocation
* that operates on the same run. This race is benign and has absolutely
* no effect on the correctness of this algorithm. Ideally, we would
* avoid grabbing the lock, but helgrind gets very confused if we
* try to disable reporting for this function.
*/
os_mutex_t *lock = m->m_ops->get_lock(m);
util_mutex_lock(lock);
struct recycler_element e = {
.free_space = 0,
.max_free_block = 0,
.chunk_id = m->chunk_id,
.zone_id = m->zone_id,
};
m->m_ops->calc_free(m, &e.free_space, &e.max_free_block);
util_mutex_unlock(lock);
return e;
}
/*
* recycler_put -- inserts new run into the recycler
*/
int
recycler_put(struct recycler *r, const struct memory_block *m,
struct recycler_element element)
{
int ret = 0;
util_mutex_lock(&r->lock);
ret = ravl_emplace_copy(r->runs, &element);
util_mutex_unlock(&r->lock);
return ret;
}
/*
* recycler_pending_check -- iterates through pending memory blocks, checks
* the reservation status, and puts it in the recycler if the there
* are no more unfulfilled reservations for the block.
*/
static void
recycler_pending_check(struct recycler *r)
{
struct memory_block_reserved *mr = NULL;
size_t pos;
VEC_FOREACH_BY_POS(pos, &r->pending) {
mr = VEC_ARR(&r->pending)[pos];
if (mr->nresv == 0) {
struct recycler_element e = recycler_element_new(
r->heap, &mr->m);
if (ravl_emplace_copy(r->runs, &e) != 0) {
ERR("unable to track run %u due to OOM",
mr->m.chunk_id);
}
Free(mr);
VEC_ERASE_BY_POS(&r->pending, pos);
}
}
}
/*
* recycler_get -- retrieves a chunk from the recycler
*/
int
recycler_get(struct recycler *r, struct memory_block *m)
{
int ret = 0;
util_mutex_lock(&r->lock);
recycler_pending_check(r);
struct recycler_element e = { .max_free_block = m->size_idx, 0, 0, 0};
struct ravl_node *n = ravl_find(r->runs, &e,
RAVL_PREDICATE_GREATER_EQUAL);
if (n == NULL) {
ret = ENOMEM;
goto out;
}
struct recycler_element *ne = ravl_data(n);
m->chunk_id = ne->chunk_id;
m->zone_id = ne->zone_id;
ravl_remove(r->runs, n);
struct chunk_header *hdr = heap_get_chunk_hdr(r->heap, m);
m->size_idx = hdr->size_idx;
memblock_rebuild_state(r->heap, m);
out:
util_mutex_unlock(&r->lock);
return ret;
}
/*
* recycler_pending_put -- places the memory block in the pending container
*/
void
recycler_pending_put(struct recycler *r,
struct memory_block_reserved *m)
{
util_mutex_lock(&r->lock);
if (VEC_PUSH_BACK(&r->pending, m) != 0)
ASSERT(0); /* XXX: fix after refactoring */
util_mutex_unlock(&r->lock);
}
/*
* recycler_recalc -- recalculates the scores of runs in the recycler to match
* the updated persistent state
*/
struct empty_runs
recycler_recalc(struct recycler *r, int force)
{
struct empty_runs runs;
VEC_INIT(&runs);
uint64_t units = r->unaccounted_total;
if (units == 0 || (!force && units < (r->recalc_threshold)))
return runs;
if (util_mutex_trylock(&r->lock) != 0)
return runs;
/* If the search is forced, recalculate everything */
uint64_t search_limit = force ? UINT64_MAX : units;
uint64_t found_units = 0;
struct memory_block nm = MEMORY_BLOCK_NONE;
struct ravl_node *n;
struct recycler_element next = {0, 0, 0, 0};
enum ravl_predicate p = RAVL_PREDICATE_GREATER_EQUAL;
do {
if ((n = ravl_find(r->runs, &next, p)) == NULL)
break;
p = RAVL_PREDICATE_GREATER;
struct recycler_element *ne = ravl_data(n);
next = *ne;
uint64_t chunk_units = r->unaccounted_units[ne->chunk_id];
if (!force && chunk_units == 0)
continue;
uint32_t existing_free_space = ne->free_space;
nm.chunk_id = ne->chunk_id;
nm.zone_id = ne->zone_id;
memblock_rebuild_state(r->heap, &nm);
struct recycler_element e = recycler_element_new(r->heap, &nm);
ASSERT(e.free_space >= existing_free_space);
uint64_t free_space_diff = e.free_space - existing_free_space;
found_units += free_space_diff;
if (free_space_diff == 0)
continue;
/*
* Decrease the per chunk_id counter by the number of nallocs
* found, increased by the blocks potentially freed in the
* active memory block. Cap the sub value to prevent overflow.
*/
util_fetch_and_sub64(&r->unaccounted_units[nm.chunk_id],
MIN(chunk_units, free_space_diff + r->nallocs));
ravl_remove(r->runs, n);
if (e.free_space == r->nallocs) {
memblock_rebuild_state(r->heap, &nm);
if (VEC_PUSH_BACK(&runs, nm) != 0)
ASSERT(0); /* XXX: fix after refactoring */
} else {
VEC_PUSH_BACK(&r->recalc, e);
}
} while (found_units < search_limit);
struct recycler_element *e;
VEC_FOREACH_BY_PTR(e, &r->recalc) {
ravl_emplace_copy(r->runs, e);
}
VEC_CLEAR(&r->recalc);
util_mutex_unlock(&r->lock);
util_fetch_and_sub64(&r->unaccounted_total, units);
return runs;
}
/*
* recycler_inc_unaccounted -- increases the number of unaccounted units in the
* recycler
*/
void
recycler_inc_unaccounted(struct recycler *r, const struct memory_block *m)
{
util_fetch_and_add64(&r->unaccounted_total, m->size_idx);
util_fetch_and_add64(&r->unaccounted_units[m->chunk_id],
m->size_idx);
}
| 9,575 | 24.604278 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/ctl_debug.c
|
/*
* Copyright 2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* ctl_debug.c -- implementation of the debug CTL namespace
*/
#include "ctl.h"
#include "ctl_debug.h"
#include "obj.h"
/*
* CTL_WRITE_HANDLER(alloc_pattern) -- sets the alloc_pattern field in heap
*/
static int
CTL_WRITE_HANDLER(alloc_pattern)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int arg_in = *(int *)arg;
pop->heap.alloc_pattern = arg_in;
return 0;
}
/*
* CTL_READ_HANDLER(alloc_pattern) -- returns alloc_pattern heap field
*/
static int
CTL_READ_HANDLER(alloc_pattern)(void *ctx,
enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)
{
PMEMobjpool *pop = ctx;
int *arg_out = arg;
*arg_out = pop->heap.alloc_pattern;
return 0;
}
static struct ctl_argument CTL_ARG(alloc_pattern) = CTL_ARG_LONG_LONG;
static const struct ctl_node CTL_NODE(heap)[] = {
CTL_LEAF_RW(alloc_pattern),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(debug)[] = {
CTL_CHILD(heap),
CTL_NODE_END
};
/*
* debug_ctl_register -- registers ctl nodes for "debug" module
*/
void
debug_ctl_register(PMEMobjpool *pop)
{
CTL_REGISTER_MODULE(pop->ctl, debug);
}
| 2,753 | 29.263736 | 75 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/alloc_class.c
|
/*
* Copyright 2016-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* alloc_class.c -- implementation of allocation classes
*/
#include <float.h>
#include <string.h>
#include "alloc_class.h"
#include "heap_layout.h"
#include "util.h"
#include "out.h"
#include "bucket.h"
#include "cuckoo.h"
#define RUN_CLASS_KEY_PACK(map_idx_s, flags_s, size_idx_s)\
((uint64_t)(map_idx_s) << 32 |\
(uint64_t)(flags_s) << 16 |\
(uint64_t)(size_idx_s))
/*
* Value used to mark a reserved spot in the bucket array.
*/
#define ACLASS_RESERVED ((void *)0xFFFFFFFFULL)
/*
* The last size that is handled by runs.
*/
#define MAX_RUN_SIZE (CHUNKSIZE * 10)
/*
* Maximum number of bytes the allocation class generation algorithm can decide
* to waste in a single run chunk.
*/
#define MAX_RUN_WASTED_BYTES 1024
/*
* Allocation categories are used for allocation classes generation. Each one
* defines the biggest handled size (in bytes) and step pct of the generation
* process. The step percentage defines maximum allowed external fragmentation
* for the category.
*/
#define MAX_ALLOC_CATEGORIES 9
/*
* The first size (in byes) which is actually used in the allocation
* class generation algorithm. All smaller sizes use the first predefined bucket
* with the smallest run unit size.
*/
#define FIRST_GENERATED_CLASS_SIZE 128
/*
* The granularity of the allocation class generation algorithm.
*/
#define ALLOC_BLOCK_SIZE_GEN 64
/*
* The first predefined allocation class size
*/
#define MIN_UNIT_SIZE 128
static struct {
size_t size;
float step;
} categories[MAX_ALLOC_CATEGORIES] = {
/* dummy category - the first allocation class is predefined */
{FIRST_GENERATED_CLASS_SIZE, 0.05f},
{1024, 0.05f},
{2048, 0.05f},
{4096, 0.05f},
{8192, 0.05f},
{16384, 0.05f},
{32768, 0.05f},
{131072, 0.05f},
{393216, 0.05f},
};
#define RUN_UNIT_MAX_ALLOC 8U
/*
* Every allocation has to be a multiple of at least 8 because we need to
* ensure proper alignment of every pmem structure.
*/
#define ALLOC_BLOCK_SIZE 16
/*
* Converts size (in bytes) to number of allocation blocks.
*/
#define SIZE_TO_CLASS_MAP_INDEX(_s, _g) (1 + (((_s) - 1) / (_g)))
/*
* Target number of allocations per run instance.
*/
#define RUN_MIN_NALLOCS 200
/*
* Hard limit of chunks per single run.
*/
#define RUN_SIZE_IDX_CAP (16)
#define ALLOC_CLASS_DEFAULT_FLAGS CHUNK_FLAG_FLEX_BITMAP
struct alloc_class_collection {
size_t granularity;
struct alloc_class *aclasses[MAX_ALLOCATION_CLASSES];
/*
* The last size (in bytes) that is handled by runs, everything bigger
* uses the default class.
*/
size_t last_run_max_size;
/* maps allocation classes to allocation sizes, excluding the header! */
uint8_t *class_map_by_alloc_size;
/* maps allocation classes to run unit sizes */
struct cuckoo *class_map_by_unit_size;
int fail_on_missing_class;
int autogenerate_on_missing_class;
};
/*
* alloc_class_find_first_free_slot -- searches for the
* first available allocation class slot
*
* This function must be thread-safe because allocation classes can be created
* at runtime.
*/
int
alloc_class_find_first_free_slot(struct alloc_class_collection *ac,
uint8_t *slot)
{
LOG(10, NULL);
for (int n = 0; n < MAX_ALLOCATION_CLASSES; ++n) {
if (util_bool_compare_and_swap64(&ac->aclasses[n],
NULL, ACLASS_RESERVED)) {
*slot = (uint8_t)n;
return 0;
}
}
return -1;
}
/*
* alloc_class_reserve -- reserve the specified class id
*/
int
alloc_class_reserve(struct alloc_class_collection *ac, uint8_t id)
{
LOG(10, NULL);
return util_bool_compare_and_swap64(&ac->aclasses[id],
NULL, ACLASS_RESERVED) ? 0 : -1;
}
/*
* alloc_class_reservation_clear -- removes the reservation on class id
*/
static void
alloc_class_reservation_clear(struct alloc_class_collection *ac, int id)
{
LOG(10, NULL);
int ret = util_bool_compare_and_swap64(&ac->aclasses[id],
ACLASS_RESERVED, NULL);
ASSERT(ret);
}
/*
* alloc_class_new -- creates a new allocation class
*/
struct alloc_class *
alloc_class_new(int id, struct alloc_class_collection *ac,
enum alloc_class_type type, enum header_type htype,
size_t unit_size, size_t alignment,
uint32_t size_idx)
{
LOG(10, NULL);
struct alloc_class *c = Malloc(sizeof(*c));
if (c == NULL)
goto error_class_alloc;
c->unit_size = unit_size;
c->header_type = htype;
c->type = type;
c->flags = (uint16_t)
(header_type_to_flag[c->header_type] |
(alignment ? CHUNK_FLAG_ALIGNED : 0)) |
ALLOC_CLASS_DEFAULT_FLAGS;
switch (type) {
case CLASS_HUGE:
id = DEFAULT_ALLOC_CLASS_ID;
break;
case CLASS_RUN:
c->run.alignment = alignment;
struct run_bitmap b;
memblock_run_bitmap(&size_idx, c->flags, unit_size,
alignment, NULL, &b);
c->run.nallocs = b.nbits;
c->run.size_idx = size_idx;
uint8_t slot = (uint8_t)id;
if (id < 0 && alloc_class_find_first_free_slot(ac,
&slot) != 0)
goto error_class_alloc;
id = slot;
size_t map_idx = SIZE_TO_CLASS_MAP_INDEX(c->unit_size,
ac->granularity);
ASSERT(map_idx <= UINT32_MAX);
uint32_t map_idx_s = (uint32_t)map_idx;
uint16_t size_idx_s = (uint16_t)size_idx;
uint16_t flags_s = (uint16_t)c->flags;
uint64_t k = RUN_CLASS_KEY_PACK(map_idx_s,
flags_s, size_idx_s);
if (cuckoo_insert(ac->class_map_by_unit_size,
k, c) != 0) {
ERR("unable to register allocation class");
goto error_map_insert;
}
break;
default:
ASSERT(0);
}
c->id = (uint8_t)id;
ac->aclasses[c->id] = c;
return c;
error_map_insert:
Free(c);
error_class_alloc:
if (id >= 0)
alloc_class_reservation_clear(ac, id);
return NULL;
}
/*
* alloc_class_delete -- (internal) deletes an allocation class
*/
void
alloc_class_delete(struct alloc_class_collection *ac,
struct alloc_class *c)
{
LOG(10, NULL);
ac->aclasses[c->id] = NULL;
Free(c);
}
/*
* alloc_class_find_or_create -- (internal) searches for the
* biggest allocation class for which unit_size is evenly divisible by n.
* If no such class exists, create one.
*/
static struct alloc_class *
alloc_class_find_or_create(struct alloc_class_collection *ac, size_t n)
{
LOG(10, NULL);
COMPILE_ERROR_ON(MAX_ALLOCATION_CLASSES > UINT8_MAX);
uint64_t required_size_bytes = n * RUN_MIN_NALLOCS;
uint32_t required_size_idx = 1;
if (required_size_bytes > RUN_DEFAULT_SIZE) {
required_size_bytes -= RUN_DEFAULT_SIZE;
required_size_idx +=
CALC_SIZE_IDX(CHUNKSIZE, required_size_bytes);
if (required_size_idx > RUN_SIZE_IDX_CAP)
required_size_idx = RUN_SIZE_IDX_CAP;
}
for (int i = MAX_ALLOCATION_CLASSES - 1; i >= 0; --i) {
struct alloc_class *c = ac->aclasses[i];
if (c == NULL || c->type == CLASS_HUGE ||
c->run.size_idx < required_size_idx)
continue;
if (n % c->unit_size == 0 &&
n / c->unit_size <= RUN_UNIT_MAX_ALLOC)
return c;
}
/*
* In order to minimize the wasted space at the end of the run the
* run data size must be divisible by the allocation class unit size
* with the smallest possible remainder, preferably 0.
*/
struct run_bitmap b;
size_t runsize_bytes = 0;
do {
if (runsize_bytes != 0) /* don't increase on first iteration */
n += ALLOC_BLOCK_SIZE_GEN;
uint32_t size_idx = required_size_idx;
memblock_run_bitmap(&size_idx, ALLOC_CLASS_DEFAULT_FLAGS, n, 0,
NULL, &b);
runsize_bytes = RUN_CONTENT_SIZE_BYTES(size_idx) - b.size;
} while ((runsize_bytes % n) > MAX_RUN_WASTED_BYTES);
/*
* Now that the desired unit size is found the existing classes need
* to be searched for possible duplicates. If a class that can handle
* the calculated size already exists, simply return that.
*/
for (int i = 1; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = ac->aclasses[i];
if (c == NULL || c->type == CLASS_HUGE)
continue;
if (n / c->unit_size <= RUN_UNIT_MAX_ALLOC &&
n % c->unit_size == 0)
return c;
if (c->unit_size == n)
return c;
}
return alloc_class_new(-1, ac, CLASS_RUN, HEADER_COMPACT, n, 0,
required_size_idx);
}
/*
* alloc_class_find_min_frag -- searches for an existing allocation
* class that will provide the smallest internal fragmentation for the given
* size.
*/
static struct alloc_class *
alloc_class_find_min_frag(struct alloc_class_collection *ac, size_t n)
{
LOG(10, NULL);
struct alloc_class *best_c = NULL;
size_t lowest_waste = SIZE_MAX;
ASSERTne(n, 0);
/*
* Start from the largest buckets in order to minimize unit size of
* allocated memory blocks.
*/
for (int i = MAX_ALLOCATION_CLASSES - 1; i >= 0; --i) {
struct alloc_class *c = ac->aclasses[i];
/* can't use alloc classes /w no headers by default */
if (c == NULL || c->header_type == HEADER_NONE)
continue;
size_t real_size = n + header_type_to_size[c->header_type];
size_t units = CALC_SIZE_IDX(c->unit_size, real_size);
/* can't exceed the maximum allowed run unit max */
if (c->type == CLASS_RUN && units > RUN_UNIT_MAX_ALLOC)
continue;
if (c->unit_size * units == real_size)
return c;
size_t waste = (c->unit_size * units) - real_size;
/*
* If we assume that the allocation class is only ever going to
* be used with exactly one size, the effective internal
* fragmentation would be increased by the leftover
* memory at the end of the run.
*/
if (c->type == CLASS_RUN) {
size_t wasted_units = c->run.nallocs % units;
size_t wasted_bytes = wasted_units * c->unit_size;
size_t waste_avg_per_unit = wasted_bytes /
c->run.nallocs;
waste += waste_avg_per_unit;
}
if (best_c == NULL || lowest_waste > waste) {
best_c = c;
lowest_waste = waste;
}
}
ASSERTne(best_c, NULL);
return best_c;
}
/*
* alloc_class_collection_new -- creates a new collection of allocation classes
*/
struct alloc_class_collection *
alloc_class_collection_new()
{
LOG(10, NULL);
struct alloc_class_collection *ac = Zalloc(sizeof(*ac));
if (ac == NULL)
return NULL;
ac->granularity = ALLOC_BLOCK_SIZE;
ac->last_run_max_size = MAX_RUN_SIZE;
ac->fail_on_missing_class = 0;
ac->autogenerate_on_missing_class = 1;
size_t maps_size = (MAX_RUN_SIZE / ac->granularity) + 1;
if ((ac->class_map_by_alloc_size = Malloc(maps_size)) == NULL)
goto error;
if ((ac->class_map_by_unit_size = cuckoo_new()) == NULL)
goto error;
memset(ac->class_map_by_alloc_size, 0xFF, maps_size);
if (alloc_class_new(-1, ac, CLASS_HUGE, HEADER_COMPACT,
CHUNKSIZE, 0, 1) == NULL)
goto error;
struct alloc_class *predefined_class =
alloc_class_new(-1, ac, CLASS_RUN, HEADER_COMPACT,
MIN_UNIT_SIZE, 0, 1);
if (predefined_class == NULL)
goto error;
for (size_t i = 0; i < FIRST_GENERATED_CLASS_SIZE / ac->granularity;
++i) {
ac->class_map_by_alloc_size[i] = predefined_class->id;
}
/*
* Based on the defined categories, a set of allocation classes is
* created. The unit size of those classes is depended on the category
* initial size and step.
*/
size_t granularity_mask = ALLOC_BLOCK_SIZE_GEN - 1;
for (int c = 1; c < MAX_ALLOC_CATEGORIES; ++c) {
size_t n = categories[c - 1].size + ALLOC_BLOCK_SIZE_GEN;
do {
if (alloc_class_find_or_create(ac, n) == NULL)
goto error;
float stepf = (float)n * categories[c].step;
size_t stepi = (size_t)stepf;
stepi = (stepf - (float)stepi < FLT_EPSILON) ?
stepi : stepi + 1;
n += (stepi + (granularity_mask)) & ~granularity_mask;
} while (n <= categories[c].size);
}
/*
* Find the largest alloc class and use it's unit size as run allocation
* threshold.
*/
uint8_t largest_aclass_slot;
for (largest_aclass_slot = MAX_ALLOCATION_CLASSES - 1;
largest_aclass_slot > 0 &&
ac->aclasses[largest_aclass_slot] == NULL;
--largest_aclass_slot) {
/* intentional NOP */
}
struct alloc_class *c = ac->aclasses[largest_aclass_slot];
/*
* The actual run might contain less unit blocks than the theoretical
* unit max variable. This may be the case for very large unit sizes.
*/
size_t real_unit_max = c->run.nallocs < RUN_UNIT_MAX_ALLOC ?
c->run.nallocs : RUN_UNIT_MAX_ALLOC;
size_t theoretical_run_max_size = c->unit_size * real_unit_max;
ac->last_run_max_size = MAX_RUN_SIZE > theoretical_run_max_size ?
theoretical_run_max_size : MAX_RUN_SIZE;
#ifdef DEBUG
/*
* Verify that each bucket's unit size points back to the bucket by the
* bucket map. This must be true for the default allocation classes,
* otherwise duplicate buckets will be created.
*/
for (size_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = ac->aclasses[i];
if (c != NULL && c->type == CLASS_RUN) {
ASSERTeq(i, c->id);
ASSERTeq(alloc_class_by_run(ac, c->unit_size,
c->flags, c->run.size_idx), c);
}
}
#endif
return ac;
error:
alloc_class_collection_delete(ac);
return NULL;
}
/*
* alloc_class_collection_delete -- deletes the allocation class collection and
* all of the classes within it
*/
void
alloc_class_collection_delete(struct alloc_class_collection *ac)
{
LOG(10, NULL);
for (size_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) {
struct alloc_class *c = ac->aclasses[i];
if (c != NULL) {
alloc_class_delete(ac, c);
}
}
cuckoo_delete(ac->class_map_by_unit_size);
Free(ac->class_map_by_alloc_size);
Free(ac);
}
/*
* alloc_class_assign_by_size -- (internal) chooses the allocation class that
* best approximates the provided size
*/
static struct alloc_class *
alloc_class_assign_by_size(struct alloc_class_collection *ac,
size_t size)
{
LOG(10, NULL);
size_t class_map_index = SIZE_TO_CLASS_MAP_INDEX(size,
ac->granularity);
struct alloc_class *c = alloc_class_find_min_frag(ac,
class_map_index * ac->granularity);
ASSERTne(c, NULL);
/*
* We don't lock this array because locking this section here and then
* bailing out if someone else was faster would be still slower than
* just calculating the class and failing to assign the variable.
* We are using a compare and swap so that helgrind/drd don't complain.
*/
util_bool_compare_and_swap64(
&ac->class_map_by_alloc_size[class_map_index],
MAX_ALLOCATION_CLASSES, c->id);
return c;
}
/*
* alloc_class_by_alloc_size -- returns allocation class that is assigned
* to handle an allocation of the provided size
*/
struct alloc_class *
alloc_class_by_alloc_size(struct alloc_class_collection *ac, size_t size)
{
if (size < ac->last_run_max_size) {
uint8_t class_id = ac->class_map_by_alloc_size[
SIZE_TO_CLASS_MAP_INDEX(size, ac->granularity)];
if (class_id == MAX_ALLOCATION_CLASSES) {
if (ac->fail_on_missing_class)
return NULL;
else if (ac->autogenerate_on_missing_class)
return alloc_class_assign_by_size(ac, size);
else
return ac->aclasses[DEFAULT_ALLOC_CLASS_ID];
}
return ac->aclasses[class_id];
} else {
return ac->aclasses[DEFAULT_ALLOC_CLASS_ID];
}
}
/*
* alloc_class_by_run -- returns the allocation class that has the given
* unit size
*/
struct alloc_class *
alloc_class_by_run(struct alloc_class_collection *ac,
size_t unit_size, uint16_t flags, uint32_t size_idx)
{
size_t map_idx = SIZE_TO_CLASS_MAP_INDEX(unit_size, ac->granularity);
ASSERT(map_idx <= UINT32_MAX);
uint32_t map_idx_s = (uint32_t)map_idx;
ASSERT(size_idx <= UINT16_MAX);
uint16_t size_idx_s = (uint16_t)size_idx;
uint16_t flags_s = (uint16_t)flags;
return cuckoo_get(ac->class_map_by_unit_size,
RUN_CLASS_KEY_PACK(map_idx_s, flags_s, size_idx_s));
}
/*
* alloc_class_by_id -- returns the allocation class with an id
*/
struct alloc_class *
alloc_class_by_id(struct alloc_class_collection *ac, uint8_t id)
{
return ac->aclasses[id];
}
/*
* alloc_class_calc_size_idx -- calculates how many units does the size require
*/
ssize_t
alloc_class_calc_size_idx(struct alloc_class *c, size_t size)
{
uint32_t size_idx = CALC_SIZE_IDX(c->unit_size,
size + header_type_to_size[c->header_type]);
if (c->type == CLASS_RUN) {
if (c->header_type == HEADER_NONE && size_idx != 1)
return -1;
else if (size_idx > RUN_UNIT_MAX)
return -1;
else if (size_idx > c->run.nallocs)
return -1;
}
return size_idx;
}
| 17,578 | 25.554381 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/container_seglists.h
|
/*
* Copyright 2015-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* container_seglists.h -- internal definitions for
* segregated lists block container
*/
#ifndef LIBPMEMOBJ_CONTAINER_SEGLISTS_H
#define LIBPMEMOBJ_CONTAINER_SEGLISTS_H 1
#include "container.h"
#ifdef __cplusplus
extern "C" {
#endif
struct block_container *container_new_seglists(struct palloc_heap *heap);
#ifdef __cplusplus
}
#endif
#endif /* LIBPMEMOBJ_CONTAINER_SEGLISTS_H */
| 1,994 | 35.944444 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmemobj/obj.h
|
/*
* Copyright 2014-2018, Intel Corporation
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* obj.h -- internal definitions for obj module
*/
#ifndef LIBPMEMOBJ_OBJ_H
#define LIBPMEMOBJ_OBJ_H 1
#include <stddef.h>
#include <stdint.h>
#include "lane.h"
#include "pool_hdr.h"
#include "pmalloc.h"
#include "ctl.h"
#include "sync.h"
#include "stats.h"
#include "ctl_debug.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PMEMOBJ_LOG_PREFIX "libpmemobj"
#define PMEMOBJ_LOG_LEVEL_VAR "PMEMOBJ_LOG_LEVEL"
#define PMEMOBJ_LOG_FILE_VAR "PMEMOBJ_LOG_FILE"
/* attributes of the obj memory pool format for the pool header */
#define OBJ_HDR_SIG "PMEMOBJ" /* must be 8 bytes including '\0' */
#define OBJ_FORMAT_MAJOR 5
#define OBJ_FORMAT_FEAT_DEFAULT \
{0x0000, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000}
#define OBJ_FORMAT_FEAT_CHECK \
{0x0000, POOL_FEAT_INCOMPAT_VALID, 0x0000}
static const features_t obj_format_feat_default = OBJ_FORMAT_FEAT_CHECK;
/* size of the persistent part of PMEMOBJ pool descriptor (2kB) */
#define OBJ_DSC_P_SIZE 2048
/* size of unused part of the persistent part of PMEMOBJ pool descriptor */
#define OBJ_DSC_P_UNUSED (OBJ_DSC_P_SIZE - PMEMOBJ_MAX_LAYOUT - 40)
#define OBJ_LANES_OFFSET 8192 /* lanes offset (8kB) */
#define OBJ_NLANES 1024 /* number of lanes */
#define OBJ_OFF_TO_PTR(pop, off) ((void *)((uintptr_t)(pop) + (off)))
#define OBJ_PTR_TO_OFF(pop, ptr) ((uintptr_t)(ptr) - (uintptr_t)(pop))
#define OBJ_OID_IS_NULL(oid) ((oid).off == 0)
#define OBJ_LIST_EMPTY(head) OBJ_OID_IS_NULL((head)->pe_first)
#define OBJ_OFF_FROM_HEAP(pop, off)\
((off) >= (pop)->heap_offset &&\
(off) < (pop)->heap_offset + (pop)->heap_size)
#define OBJ_OFF_FROM_LANES(pop, off)\
((off) >= (pop)->lanes_offset &&\
(off) < (pop)->lanes_offset +\
(pop)->nlanes * sizeof(struct lane_layout))
#define OBJ_PTR_FROM_POOL(pop, ptr)\
((uintptr_t)(ptr) >= (uintptr_t)(pop) &&\
(uintptr_t)(ptr) < (uintptr_t)(pop) +\
(pop)->heap_offset + (pop)->heap_size)
#define OBJ_OFF_IS_VALID(pop, off)\
(OBJ_OFF_FROM_HEAP(pop, off) ||\
(OBJ_PTR_TO_OFF(pop, &(pop)->root_offset) == (off)) ||\
(OBJ_PTR_TO_OFF(pop, &(pop)->root_size) == (off)) ||\
(OBJ_OFF_FROM_LANES(pop, off)))
#define OBJ_PTR_IS_VALID(pop, ptr)\
OBJ_OFF_IS_VALID(pop, OBJ_PTR_TO_OFF(pop, ptr))
typedef void (*persist_local_fn)(const void *, size_t);
typedef void (*flush_local_fn)(const void *, size_t);
typedef void (*drain_local_fn)(void);
typedef void *(*memcpy_local_fn)(void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memmove_local_fn)(void *dest, const void *src, size_t len,
unsigned flags);
typedef void *(*memset_local_fn)(void *dest, int c, size_t len, unsigned flags);
typedef int (*persist_remote_fn)(PMEMobjpool *pop, const void *addr,
size_t len, unsigned lane, unsigned flags);
typedef uint64_t type_num_t;
#define CONVERSION_FLAG_OLD_SET_CACHE ((1ULL) << 0)
struct pmemobjpool {
struct pool_hdr hdr; /* memory pool header */
/* persistent part of PMEMOBJ pool descriptor (2kB) */
char layout[PMEMOBJ_MAX_LAYOUT];
uint64_t lanes_offset;
uint64_t nlanes;
uint64_t heap_offset;
uint64_t unused3;
unsigned char unused[OBJ_DSC_P_UNUSED]; /* must be zero */
uint64_t checksum; /* checksum of above fields */
uint64_t root_offset;
/* unique runID for this program run - persistent but not checksummed */
uint64_t run_id;
uint64_t root_size;
/*
* These flags can be set from a conversion tool and are set only for
* the first recovery of the pool.
*/
uint64_t conversion_flags;
uint64_t heap_size;
struct stats_persistent stats_persistent;
char pmem_reserved[496]; /* must be zeroed */
/* some run-time state, allocated out of memory pool... */
void *addr; /* mapped region */
int is_pmem; /* true if pool is PMEM */
int rdonly; /* true if pool is opened read-only */
struct palloc_heap heap;
struct lane_descriptor lanes_desc;
uint64_t uuid_lo;
int is_dev_dax; /* true if mapped on device dax */
struct ctl *ctl; /* top level node of the ctl tree structure */
struct stats *stats;
struct pool_set *set; /* pool set info */
struct pmemobjpool *replica; /* next replica */
/* per-replica functions: pmem or non-pmem */
persist_local_fn persist_local; /* persist function */
flush_local_fn flush_local; /* flush function */
drain_local_fn drain_local; /* drain function */
memcpy_local_fn memcpy_local; /* persistent memcpy function */
memmove_local_fn memmove_local; /* persistent memmove function */
memset_local_fn memset_local; /* persistent memset function */
/* for 'master' replica: with or without data replication */
struct pmem_ops p_ops;
PMEMmutex rootlock; /* root object lock */
int is_master_replica;
int has_remote_replicas;
/* remote replica section */
void *rpp; /* RPMEMpool opaque handle if it is a remote replica */
uintptr_t remote_base; /* beginning of the remote pool */
char *node_addr; /* address of a remote node */
char *pool_desc; /* descriptor of a poolset */
persist_remote_fn persist_remote; /* remote persist function */
int vg_boot;
int tx_debug_skip_expensive_checks;
struct tx_parameters *tx_params;
/*
* Locks are dynamically allocated on FreeBSD. Keep track so
* we can free them on pmemobj_close.
*/
PMEMmutex_internal *mutex_head;
PMEMrwlock_internal *rwlock_head;
PMEMcond_internal *cond_head;
/* padding to align size of this structure to page boundary */
/* sizeof(unused2) == 8192 - offsetof(struct pmemobjpool, unused2) */
char unused2[992];
};
/*
* Stored in the 'size' field of oobh header, determines whether the object
* is internal or not. Internal objects are skipped in pmemobj iteration
* functions.
*/
#define OBJ_INTERNAL_OBJECT_MASK ((1ULL) << 15)
#define CLASS_ID_FROM_FLAG(flag)\
((uint16_t)((flag) >> 48))
/*
* pmemobj_get_uuid_lo -- (internal) evaluates XOR sum of least significant
* 8 bytes with most significant 8 bytes.
*/
static inline uint64_t
pmemobj_get_uuid_lo(PMEMobjpool *pop)
{
uint64_t uuid_lo = 0;
for (int i = 0; i < 8; i++) {
uuid_lo = (uuid_lo << 8) |
(pop->hdr.poolset_uuid[i] ^
pop->hdr.poolset_uuid[8 + i]);
}
return uuid_lo;
}
/*
* OBJ_OID_IS_VALID -- (internal) checks if 'oid' is valid
*/
static inline int
OBJ_OID_IS_VALID(PMEMobjpool *pop, PMEMoid oid)
{
return OBJ_OID_IS_NULL(oid) ||
(oid.pool_uuid_lo == pop->uuid_lo &&
oid.off >= pop->heap_offset &&
oid.off < pop->heap_offset + pop->heap_size);
}
static inline int
OBJ_OFF_IS_VALID_FROM_CTX(void *ctx, uint64_t offset)
{
PMEMobjpool *pop = (PMEMobjpool *)ctx;
return OBJ_OFF_IS_VALID(pop, offset);
}
void obj_init(void);
void obj_fini(void);
int obj_read_remote(void *ctx, uintptr_t base, void *dest, void *addr,
size_t length);
/*
* (debug helper macro) logs notice message if used inside a transaction
*/
#ifdef DEBUG
#define _POBJ_DEBUG_NOTICE_IN_TX()\
_pobj_debug_notice(__func__, NULL, 0)
#else
#define _POBJ_DEBUG_NOTICE_IN_TX() do {} while (0)
#endif
#ifdef __cplusplus
}
#endif
#endif
| 8,524 | 29.887681 | 80 |
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.