Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmempool/sync.c
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * sync.c -- a module for poolset synchronizing */ #include <stdio.h> #include <stdint.h> #include <sys/stat.h> #include <sys/mman.h> #include <unistd.h> #include <fcntl.h> #include <limits.h> #include "libpmem.h" #include "replica.h" #include "out.h" #include "os.h" #include "util_pmem.h" #include "util.h" #ifdef USE_RPMEM #include "rpmem_common.h" #include "rpmem_ssh.h" #endif #define BB_DATA_STR "offset 0x%llx, length 0x%x, nhealthy %i" /* defines 'struct bb_vec' - the vector of the 'struct bad_block' structures */ VEC(bb_vec, struct bad_block); /* * validate_args -- (internal) check whether passed arguments are valid */ static int validate_args(struct pool_set *set) { LOG(3, "set %p", set); ASSERTne(set, NULL); /* the checks below help detect use of incorrect poolset file */ /* * check if all parts in the poolset are large enough * (now replication works only for pmemobj pools) */ if (replica_check_part_sizes(set, PMEMOBJ_MIN_POOL)) { LOG(2, "part sizes check failed"); goto err; } /* * check if all directories for part files exist */ if (replica_check_part_dirs(set)) { LOG(2, "part directories check failed"); goto err; } return 0; err: if (errno == 0) errno = EINVAL; return -1; } /* * sync_copy_data -- (internal) copy data from the healthy replica * to the broken one */ static int sync_copy_data(void *src_addr, void *dst_addr, size_t off, size_t len, struct pool_replica *rep_h, struct pool_replica *rep, const struct pool_set_part *part) { LOG(3, "src_addr %p dst_addr %p off %zu len %zu " "rep_h %p rep %p part %p", src_addr, dst_addr, off, len, rep_h, rep, part); int ret; if (rep->remote) { LOG(10, "copying data (offset 0x%zx length 0x%zx) to remote node -- '%s' on '%s'", off, len, rep->remote->pool_desc, rep->remote->node_addr); ret = Rpmem_persist(rep->remote->rpp, off, len, 0, 0); if (ret) { LOG(1, "copying data to remote node failed -- '%s' on '%s'", rep->remote->pool_desc, rep->remote->node_addr); return -1; } } else if (rep_h->remote) { LOG(10, "reading data (offset 0x%zx length 0x%zx) from remote node -- '%s' on '%s'", off, len, rep_h->remote->pool_desc, rep_h->remote->node_addr); ret = Rpmem_read(rep_h->remote->rpp, dst_addr, off, len, 0); if (ret) { LOG(1, "reading data from remote node failed -- '%s' on '%s'", rep_h->remote->pool_desc, rep_h->remote->node_addr); return -1; } } else { LOG(10, "copying data (offset 0x%zx length 0x%zx) from local replica -- '%s'", off, len, rep_h->part[0].path); /* copy all data */ memcpy(dst_addr, src_addr, len); util_persist(part->is_dev_dax, dst_addr, len); } return 0; } /* * sync_recreate_header -- (internal) recreate the header */ static int sync_recreate_header(struct pool_set *set, unsigned r, unsigned p, struct pool_hdr *src_hdr) { LOG(3, "set %p replica %u part %u src_hdr %p", set, r, p, src_hdr); struct pool_attr attr; util_pool_hdr2attr(&attr, src_hdr); if (util_header_create(set, r, p, &attr, 1) != 0) { LOG(1, "part headers create failed for replica %u part %u", r, p); errno = EINVAL; return -1; } return 0; } /* * sync_mark_replica_no_badblocks -- (internal) mark replica as not having * bad blocks */ static void sync_mark_replica_no_badblocks(unsigned repn, struct poolset_health_status *set_hs) { LOG(3, "repn %u set_hs %p", repn, set_hs); struct replica_health_status *rhs = REP_HEALTH(set_hs, repn); if (rhs->flags & HAS_BAD_BLOCKS) { rhs->flags &= ~HAS_BAD_BLOCKS; LOG(4, "replica %u has no bad blocks now", repn); } } /* * sync_mark_part_no_badblocks -- (internal) mark part as not having bad blocks */ static void sync_mark_part_no_badblocks(unsigned repn, unsigned partn, struct poolset_health_status *set_hs) { LOG(3, "repn %u partn %u set_hs %p", repn, partn, set_hs); struct replica_health_status *rhs = REP_HEALTH(set_hs, repn); if (rhs->part[PART_HEALTHidx(rhs, partn)].flags & HAS_BAD_BLOCKS) { rhs->part[PART_HEALTHidx(rhs, partn)].flags &= ~HAS_BAD_BLOCKS; LOG(4, "replica %u part %u has no bad blocks now", repn, partn); } } /* * sync_recalc_badblocks -- (internal) recalculate offset and length * of bad blocks to absolute ones * (relative to the beginning of the pool) */ static int sync_recalc_badblocks(struct pool_set *set, struct poolset_health_status *set_hs) { LOG(3, "set %p set_hs %p", set, set_hs); /* header size for all headers but the first one */ size_t hdrsize = (set->options & (OPTION_SINGLEHDR | OPTION_NOHDRS)) ? 0 : Mmap_align; for (unsigned r = 0; r < set->nreplicas; ++r) { struct pool_replica *rep = REP(set, r); struct replica_health_status *rep_hs = set_hs->replica[r]; for (unsigned p = 0; p < rep->nparts; ++p) { struct part_health_status *phs = &rep_hs->part[p]; if (!replica_part_has_bad_blocks(phs)) { /* skip parts with no bad blocks */ continue; } ASSERTne(phs->bbs.bb_cnt, 0); ASSERTne(phs->bbs.bbv, NULL); LOG(10, "Replica %u part %u HAS %u bad blocks", r, p, phs->bbs.bb_cnt); size_t part_off = replica_get_part_offset(set, r, p); for (unsigned i = 0; i < phs->bbs.bb_cnt; i++) { LOG(10, "relative bad block #%i: offset %llu, length %u", i, phs->bbs.bbv[i].offset, phs->bbs.bbv[i].length); size_t off = phs->bbs.bbv[i].offset; size_t len = phs->bbs.bbv[i].length; if (len + off <= hdrsize) continue; /* parts #>0 are mapped without the header */ if (p > 0 && hdrsize > 0) { if (off >= hdrsize) { /* * Bad block does not overlap * with the header, so only * adjust the offset. */ off -= hdrsize; } else { /* * Bad block overlaps * with the header, * so adjust the length * and zero the offset. */ len -= hdrsize - off; off = 0; } } replica_align_badblock_offset_length(&off, &len, set, r, p); phs->bbs.bbv[i].offset = part_off + off; phs->bbs.bbv[i].length = (unsigned)len; LOG(10, "absolute bad block #%i: offset 0x%llx, length 0x%x", i, phs->bbs.bbv[i].offset, phs->bbs.bbv[i].length); } } } return 0; } /* * sync_badblocks_find_healthy_replica -- (internal) look for a healthy replica * for each bad block * * This function looks for a healthy replica for each bad block. Bad blocks * can overlap across replicas, so each bad block may have to be divided * into smaller parts which can be fixed using different healthy replica. * * Key variables: * - bbv_all[] - array containing all (possibly divided) bad blocks * from all previous replicas. * - bbv_aux[] - array containing all (possibly divided) bad blocks * from all previous parts of the current replica merged with * these bad blocks from bbv_all[] that have offsets less or equal * the greatest bad block's offset in the previous part. * * This function merges bad blocks from bbv_all[] with bad blocks * from the current part and writes the outcome bad blocks to bbv_aux[]. * Only bad blocks with offsets less or equal the greatest bad block's offset * in the current part will be moved from bbv_all[] to bbv_aux[]. * The rest of them has to be moved at the end by sync_badblocks_move_vec(). * * bbv_aux[] becomes new bbv_all[] and bbv_aux[] is zeroed * before checking the next replica (bbv_all = bbv_aux; bbv_aux = 0). * * For example (all replicas have only one part): * - bbv_all with rep#0: |__----___________----__| * - merged with rep#1: |____----_______----____| * - gives such bbv_aux: |__11--00_______00--11__| * - merged with rep#2: |__________---__________| * - gives such bbv_aux: |__112200__000__002211__| (all bad blocks can be fixed) * * where: * '_' stands for a healthy block (no bad block) * '-' stands for a bad block with nhealthy == NO_HEALTHY_REPLICA * 'N' stands for a bad block with nhealthy == N (can be fixed using rep#N) */ static int sync_badblocks_find_healthy_replica(struct part_health_status *phs, int rep, struct bb_vec *pbbv_all, struct bb_vec *pbbv_aux, unsigned *i_all) { LOG(3, "phs %p rep %i pbbv_all %p pbbv_aux %p i_all %i", phs, rep, pbbv_all, pbbv_aux, *i_all); struct bad_block bb_add; /* the element which is being added */ struct bad_block bb_new; /* a new element */ struct bad_block *pbb_all; /* current element of bbv_all[] */ unsigned long long beg_prev; unsigned long long end_prev; unsigned long long beg_new; unsigned long long end_new; unsigned len_prev; unsigned len_new; size_t size_all = VEC_SIZE(pbbv_all); if (size_all == 0) { /* there were no bad blocks so far, so fill up bbv_aux[] */ for (unsigned i = 0; i < phs->bbs.bb_cnt; i++) { bb_add = phs->bbs.bbv[i]; if (rep > 0) /* bad block can be fixed with replica #0 */ bb_add.nhealthy = 0; if (VEC_PUSH_BACK(pbbv_aux, bb_add)) return -1; LOG(10, "added bad block (prev-empty): " BB_DATA_STR, bb_add.offset, bb_add.length, bb_add.nhealthy); } } else { if (*i_all < size_all) { pbb_all = VEC_GET(pbbv_all, (*i_all)++); } else { pbb_all = NULL; } for (unsigned i = 0; i < phs->bbs.bb_cnt; i++) { bb_new = phs->bbs.bbv[i]; LOG(10, " * (%u) inserting new bad block: " BB_DATA_STR, i + 1, bb_new.offset, bb_new.length, bb_new.nhealthy); if (pbb_all == NULL || pbb_all->length == 0) { if (*i_all < size_all) pbb_all = VEC_GET(pbbv_all, (*i_all)++); else pbb_all = NULL; } /* all from bbv_all before the bb_new */ while (pbb_all != NULL && pbb_all->offset + pbb_all->length - 1 < bb_new.offset) { if (pbb_all->nhealthy == NO_HEALTHY_REPLICA) /* can be fixed with this replica */ pbb_all->nhealthy = rep; if (VEC_PUSH_BACK(pbbv_aux, *pbb_all)) return -1; LOG(10, "added bad block (prev-before): " BB_DATA_STR, pbb_all->offset, pbb_all->length, pbb_all->nhealthy); if (*i_all < size_all) { pbb_all = VEC_GET(pbbv_all, (*i_all)++); } else { pbb_all = NULL; break; } } beg_new = bb_new.offset; len_new = bb_new.length; end_new = beg_new + len_new - 1; /* all pbb_all overlapping with the bb_new */ while (len_new > 0 && pbb_all != NULL) { beg_prev = pbb_all->offset; len_prev = pbb_all->length; end_prev = beg_prev + len_prev - 1; /* check if new overlaps with prev */ if (end_prev < beg_new || end_new < beg_prev) break; /* * 1st part: non-overlapping part * of pbb_all or bb_new */ if (beg_prev < beg_new) { /* non-overlapping part of pbb_all */ bb_add.offset = beg_prev; bb_add.length = (unsigned) (beg_new - beg_prev); if (pbb_all->nhealthy != NO_HEALTHY_REPLICA) { bb_add.nhealthy = pbb_all->nhealthy; } else { /* * It can be fixed with * this replica. */ bb_add.nhealthy = rep; } if (VEC_PUSH_BACK(pbbv_aux, bb_add)) return -1; LOG(10, "added bad block (prev-only): " BB_DATA_STR, bb_add.offset, bb_add.length, bb_add.nhealthy); beg_prev += bb_add.length; len_prev -= bb_add.length; } else if (beg_new < beg_prev) { /* non-overlapping part of bb_new */ bb_add.offset = beg_new; bb_add.length = (unsigned) (beg_prev - beg_new); if (rep == 0) { bb_add.nhealthy = NO_HEALTHY_REPLICA; } else { /* * It can be fixed with any * previous replica, so let's * choose replia #0. */ bb_add.nhealthy = 0; } if (VEC_PUSH_BACK(pbbv_aux, bb_add)) return -1; LOG(10, "added bad block (new-only): " BB_DATA_STR, bb_add.offset, bb_add.length, bb_add.nhealthy); beg_new += bb_add.length; len_new -= bb_add.length; } /* * 2nd part: overlapping part * of pbb_all and bb_new */ if (len_prev <= len_new) { bb_add.offset = beg_prev; bb_add.length = len_prev; beg_new += len_prev; len_new -= len_prev; /* whole pbb_all was added */ len_prev = 0; } else { bb_add.offset = beg_new; bb_add.length = len_new; beg_prev += len_new; len_prev -= len_new; /* whole bb_new was added */ len_new = 0; } bb_add.nhealthy = pbb_all->nhealthy; if (VEC_PUSH_BACK(pbbv_aux, bb_add)) return -1; LOG(10, "added bad block (common): " BB_DATA_STR, bb_add.offset, bb_add.length, bb_add.nhealthy); /* update pbb_all */ pbb_all->offset = beg_prev; pbb_all->length = len_prev; if (len_prev == 0) { if (*i_all < size_all) pbb_all = VEC_GET(pbbv_all, (*i_all)++); else pbb_all = NULL; } } /* the rest of the bb_new */ if (len_new > 0) { bb_add.offset = beg_new; bb_add.length = len_new; if (rep > 0) /* it can be fixed with replica #0 */ bb_add.nhealthy = 0; else bb_add.nhealthy = NO_HEALTHY_REPLICA; if (VEC_PUSH_BACK(pbbv_aux, bb_add)) return -1; LOG(10, "added bad block (new-rest): " BB_DATA_STR, bb_add.offset, bb_add.length, bb_add.nhealthy); } } if (pbb_all != NULL && pbb_all->length > 0 && *i_all > 0) /* this pbb_all will be used again in the next part */ (*i_all)--; } return 0; } /* * sync_badblocks_assign_healthy_replica -- (internal) assign healthy replica * for each bad block */ static int sync_badblocks_assign_healthy_replica(struct part_health_status *phs, int rep, struct bb_vec *pbbv_all, unsigned *i_all) { LOG(3, "phs %p rep %i pbbv_all %p i_all %i", phs, rep, pbbv_all, *i_all); struct bad_block bb_new; /* a new element */ struct bad_block bb_old; /* an old element */ struct bad_block *pbb_all; /* current element of bbv_all[] */ unsigned length_left; struct bb_vec bbv_new = VEC_INITIALIZER; size_t size_all = VEC_SIZE(pbbv_all); pbb_all = VEC_GET(pbbv_all, *i_all); for (unsigned i = 0; i < phs->bbs.bb_cnt; i++) { bb_old = phs->bbs.bbv[i]; LOG(10, "assigning old bad block: " BB_DATA_STR, bb_old.offset, bb_old.length, bb_old.nhealthy); /* * Skip all bad blocks from bbv_all with offsets * less than the offset of the current bb_old. */ while (pbb_all->offset < bb_old.offset) { /* (*i_all) has to be less than (size_all - 1) */ ASSERT(*i_all < size_all - 1); pbb_all = VEC_GET(pbbv_all, ++(*i_all)); } bb_new.offset = bb_old.offset; length_left = bb_old.length; while (length_left > 0) { LOG(10, "checking saved bad block: " BB_DATA_STR, pbb_all->offset, pbb_all->length, pbb_all->nhealthy); ASSERTeq(pbb_all->offset, bb_new.offset); ASSERT(pbb_all->length <= length_left); bb_new.length = pbb_all->length; bb_new.nhealthy = pbb_all->nhealthy; if (VEC_PUSH_BACK(&bbv_new, bb_new)) goto error_exit; LOG(10, "added new bad block: " BB_DATA_STR, bb_new.offset, bb_new.length, bb_new.nhealthy); bb_new.offset += bb_new.length; length_left -= bb_new.length; if (length_left == 0) continue; /* (*i_all) has to be less than (size_all - 1) */ ASSERT(*i_all < size_all - 1); pbb_all = VEC_GET(pbbv_all, ++(*i_all)); } } Free(phs->bbs.bbv); phs->bbs.bbv = VEC_ARR(&bbv_new); phs->bbs.bb_cnt = (unsigned)VEC_SIZE(&bbv_new); LOG(10, "added %u new bad blocks", phs->bbs.bb_cnt); return 0; error_exit: VEC_DELETE(&bbv_new); return -1; } /* * sync_badblocks_move_vec -- (internal) move bad blocks from vector pbbv_all * to vector pbbv_aux */ static int sync_badblocks_move_vec(struct bb_vec *pbbv_all, struct bb_vec *pbbv_aux, unsigned i_all, unsigned rep) { LOG(3, "pbbv_all %p pbbv_aux %p i_all %u rep %u", pbbv_all, pbbv_aux, i_all, rep); size_t size_all = VEC_SIZE(pbbv_all); struct bad_block *pbb_all; while (i_all < size_all) { pbb_all = VEC_GET(pbbv_all, i_all++); if (pbb_all->length == 0) continue; if (pbb_all->nhealthy == NO_HEALTHY_REPLICA && rep > 0) /* it can be fixed using the last replica */ pbb_all->nhealthy = (int)rep; if (VEC_PUSH_BACK(pbbv_aux, *pbb_all)) return -1; LOG(10, "added bad block (prev-after): " BB_DATA_STR, pbb_all->offset, pbb_all->length, pbb_all->nhealthy); } return 0; } /* * sync_check_bad_blocks_overlap -- (internal) check if there are uncorrectable * bad blocks (bad blocks overlapping * in all replicas) */ static int sync_check_bad_blocks_overlap(struct pool_set *set, struct poolset_health_status *set_hs) { LOG(3, "set %p set_hs %p", set, set_hs); struct bb_vec bbv_all = VEC_INITIALIZER; struct bb_vec bbv_aux = VEC_INITIALIZER; int ret = -1; for (unsigned r = 0; r < set->nreplicas; ++r) { struct pool_replica *rep = REP(set, r); struct replica_health_status *rep_hs = set_hs->replica[r]; unsigned i_all = 0; /* index in bbv_all */ for (unsigned p = 0; p < rep->nparts; ++p) { struct part_health_status *phs = &rep_hs->part[p]; if (!replica_part_has_bad_blocks(phs)) { /* skip parts with no bad blocks */ continue; } ASSERTne(phs->bbs.bb_cnt, 0); ASSERTne(phs->bbs.bbv, NULL); LOG(10, "Replica %u part %u HAS %u bad blocks", r, p, phs->bbs.bb_cnt); /* * This function merges bad blocks from bbv_all * with bad blocks from the current part * and writes the outcome bad blocks to bbv_aux. * Only bad blocks with offsets less or equal * the greatest bad block's offset in the current part * will be moved from bbv_all to bbv_aux. * The rest of them has to be moved at the end * by sync_badblocks_move_vec() below. */ if (sync_badblocks_find_healthy_replica(phs, (int)r, &bbv_all, &bbv_aux, &i_all)) goto exit; } /* * Move the rest of bad blocks from bbv_all to bbv_aux * (for more details see the comment above). * All these bad blocks can be fixed using the last replica 'r'. */ if (sync_badblocks_move_vec(&bbv_all, &bbv_aux, i_all, r)) return -1; /* bbv_aux becomes a new bbv_all */ VEC_MOVE(&bbv_all, &bbv_aux); i_all = 0; } ret = 0; /* check if there is an uncorrectable bad block */ size_t size_all = VEC_SIZE(&bbv_all); for (unsigned i = 0; i < size_all; i++) { struct bad_block *pbb_all = VEC_GET(&bbv_all, i); if (pbb_all->nhealthy == NO_HEALTHY_REPLICA) { ret = 1; /* this bad block cannot be fixed */ LOG(1, "uncorrectable bad block found: offset 0x%llx, length 0x%x", pbb_all->offset, pbb_all->length); goto exit; } } /* * All bad blocks can be fixed, * so assign healthy replica for each of them. */ for (unsigned r = 0; r < set->nreplicas; ++r) { struct pool_replica *rep = REP(set, r); struct replica_health_status *rep_hs = set_hs->replica[r]; if (!replica_has_bad_blocks(r, set_hs)) { /* skip replicas with no bad blocks */ continue; } unsigned i_all = 0; /* index in bbv_all */ for (unsigned p = 0; p < rep->nparts; ++p) { struct part_health_status *phs = &rep_hs->part[p]; if (!replica_part_has_bad_blocks(phs)) { /* skip parts with no bad blocks */ continue; } if (sync_badblocks_assign_healthy_replica(phs, (int)r, &bbv_all, &i_all)) goto exit; } } exit: VEC_DELETE(&bbv_aux); VEC_DELETE(&bbv_all); return ret; } /* * sync_badblocks_data -- (internal) clear bad blocks in replica */ static int sync_badblocks_data(struct pool_set *set, struct poolset_health_status *set_hs) { LOG(3, "set %p, set_hs %p", set, set_hs); struct pool_replica *rep_h; for (unsigned r = 0; r < set->nreplicas; ++r) { struct pool_replica *rep = REP(set, r); struct replica_health_status *rep_hs = set_hs->replica[r]; for (unsigned p = 0; p < rep->nparts; ++p) { struct part_health_status *phs = &rep_hs->part[p]; if (!replica_part_has_bad_blocks(phs)) { /* skip parts with no bad blocks */ continue; } ASSERTne(phs->bbs.bb_cnt, 0); ASSERTne(phs->bbs.bbv, NULL); const struct pool_set_part *part = &rep->part[p]; size_t part_off = replica_get_part_offset(set, r, p); for (unsigned i = 0; i < phs->bbs.bb_cnt; i++) { size_t off = phs->bbs.bbv[i].offset - part_off; size_t len = phs->bbs.bbv[i].length; ASSERT(phs->bbs.bbv[i].nhealthy >= 0); rep_h = REP(set, (unsigned)phs->bbs.bbv[i].nhealthy); void *src_addr = ADDR_SUM(rep_h->part[0].addr, part_off + off); void *dst_addr = ADDR_SUM(part->addr, off); if (sync_copy_data(src_addr, dst_addr, part_off + off, len, rep_h, rep, part)) return -1; } /* free array of bad blocks */ Free(phs->bbs.bbv); phs->bbs.bbv = NULL; /* mark part as having no bad blocks */ sync_mark_part_no_badblocks(r, p, set_hs); } /* mark replica as having no bad blocks */ sync_mark_replica_no_badblocks(r, set_hs); } LOG(1, "all bad blocks have been fixed"); if (replica_remove_all_recovery_files(set_hs)) { LOG(1, "removing bad block recovery files failed"); return -1; } return 0; } /* * recreate_broken_parts -- (internal) create parts in place of the broken ones */ static int recreate_broken_parts(struct pool_set *set, struct poolset_health_status *set_hs, int fix_bad_blocks) { LOG(3, "set %p set_hs %p fix_bad_blocks %i", set, set_hs, fix_bad_blocks); for (unsigned r = 0; r < set_hs->nreplicas; ++r) { if (set->replica[r]->remote) continue; struct pool_replica *broken_r = set->replica[r]; for (unsigned p = 0; p < set_hs->replica[r]->nparts; ++p) { /* skip unbroken parts */ if (!replica_is_part_broken(r, p, set_hs)) continue; /* remove parts from broken replica */ if (replica_remove_part(set, r, p, fix_bad_blocks)) { LOG(2, "cannot remove part"); return -1; } /* create removed part and open it */ if (util_part_open(&broken_r->part[p], 0, 1 /* create */)) { LOG(2, "cannot open/create parts"); return -1; } sync_mark_part_no_badblocks(r, p, set_hs); } } return 0; } /* * fill_struct_part_uuids -- (internal) set part uuids in pool_set structure */ static void fill_struct_part_uuids(struct pool_set *set, unsigned repn, struct poolset_health_status *set_hs) { LOG(3, "set %p, repn %u, set_hs %p", set, repn, set_hs); struct pool_replica *rep = REP(set, repn); struct pool_hdr *hdrp; for (unsigned p = 0; p < rep->nhdrs; ++p) { /* skip broken parts */ if (replica_is_part_broken(repn, p, set_hs)) continue; hdrp = HDR(rep, p); memcpy(rep->part[p].uuid, hdrp->uuid, POOL_HDR_UUID_LEN); } } /* * is_uuid_already_used -- (internal) check if given uuid is assigned to * any of the earlier replicas */ static int is_uuid_already_used(uuid_t uuid, struct pool_set *set, unsigned repn) { for (unsigned r = 0; r < repn; ++r) { if (uuidcmp(uuid, PART(REP(set, r), 0)->uuid) == 0) return 1; } return 0; } /* * fill_struct_broken_part_uuids -- (internal) set part uuids in pool_set * structure */ static int fill_struct_broken_part_uuids(struct pool_set *set, unsigned repn, struct poolset_health_status *set_hs, unsigned flags) { LOG(3, "set %p, repn %u, set_hs %p, flags %u", set, repn, set_hs, flags); struct pool_replica *rep = REP(set, repn); struct pool_hdr *hdrp; for (unsigned p = 0; p < rep->nhdrs; ++p) { /* skip unbroken parts */ if (!replica_is_part_broken(repn, p, set_hs)) continue; /* check if part was damaged or was added by transform */ if (replica_is_poolset_transformed(flags)) { /* generate new uuid for this part */ if (util_uuid_generate(rep->part[p].uuid) < 0) { ERR("cannot generate pool set part UUID"); errno = EINVAL; return -1; } continue; } if (!replica_is_part_broken(repn, p - 1, set_hs) && !(set->options & OPTION_SINGLEHDR)) { /* try to get part uuid from the previous part */ hdrp = HDRP(rep, p); memcpy(rep->part[p].uuid, hdrp->next_part_uuid, POOL_HDR_UUID_LEN); } else if (!replica_is_part_broken(repn, p + 1, set_hs) && !(set->options & OPTION_SINGLEHDR)) { /* try to get part uuid from the next part */ hdrp = HDRN(rep, p); memcpy(rep->part[p].uuid, hdrp->prev_part_uuid, POOL_HDR_UUID_LEN); } else if (p == 0 && !replica_is_part_broken(repn - 1, 0, set_hs)) { /* try to get part uuid from the previous replica */ hdrp = HDR(REPP(set, repn), 0); if (is_uuid_already_used(hdrp->next_repl_uuid, set, repn)) { ERR( "repeated uuid - some replicas were created with a different poolset file"); errno = EINVAL; return -1; } memcpy(rep->part[p].uuid, hdrp->next_repl_uuid, POOL_HDR_UUID_LEN); } else if (p == 0 && !replica_is_part_broken(repn + 1, 0, set_hs)) { /* try to get part uuid from the next replica */ hdrp = HDR(REPN(set, repn), 0); if (is_uuid_already_used(hdrp->prev_repl_uuid, set, repn)) { ERR( "repeated uuid - some replicas were created with a different poolset file"); errno = EINVAL; return -1; } memcpy(rep->part[p].uuid, hdrp->prev_repl_uuid, POOL_HDR_UUID_LEN); } else { /* generate new uuid for this part */ if (util_uuid_generate(rep->part[p].uuid) < 0) { ERR("cannot generate pool set part UUID"); errno = EINVAL; return -1; } } } return 0; } /* * fill_struct_uuids -- (internal) fill fields in pool_set needed for further * altering of uuids */ static int fill_struct_uuids(struct pool_set *set, unsigned src_replica, struct poolset_health_status *set_hs, unsigned flags) { LOG(3, "set %p, src_replica %u, set_hs %p, flags %u", set, src_replica, set_hs, flags); /* set poolset uuid */ struct pool_hdr *src_hdr0 = HDR(REP(set, src_replica), 0); memcpy(set->uuid, src_hdr0->poolset_uuid, POOL_HDR_UUID_LEN); /* set unbroken parts' uuids */ for (unsigned r = 0; r < set->nreplicas; ++r) { fill_struct_part_uuids(set, r, set_hs); } /* set broken parts' uuids */ for (unsigned r = 0; r < set->nreplicas; ++r) { if (fill_struct_broken_part_uuids(set, r, set_hs, flags)) return -1; } return 0; } /* * create_headers_for_broken_parts -- (internal) create headers for all new * parts created in place of the broken ones */ static int create_headers_for_broken_parts(struct pool_set *set, unsigned src_replica, struct poolset_health_status *set_hs) { LOG(3, "set %p, src_replica %u, set_hs %p", set, src_replica, set_hs); struct pool_hdr *src_hdr = HDR(REP(set, src_replica), 0); for (unsigned r = 0; r < set_hs->nreplicas; ++r) { /* skip unbroken replicas */ if (!replica_is_replica_broken(r, set_hs) && !replica_has_bad_blocks(r, set_hs)) continue; for (unsigned p = 0; p < set_hs->replica[r]->nhdrs; p++) { /* skip unbroken parts */ if (!replica_is_part_broken(r, p, set_hs) && !replica_part_has_corrupted_header(r, p, set_hs)) continue; if (sync_recreate_header(set, r, p, src_hdr)) return -1; } } return 0; } /* * copy_data_to_broken_parts -- (internal) copy data to all parts created * in place of the broken ones */ static int copy_data_to_broken_parts(struct pool_set *set, unsigned healthy_replica, unsigned flags, struct poolset_health_status *set_hs) { LOG(3, "set %p, healthy_replica %u, flags %u, set_hs %p", set, healthy_replica, flags, set_hs); /* get pool size from healthy replica */ size_t poolsize = set->poolsize; for (unsigned r = 0; r < set_hs->nreplicas; ++r) { /* skip unbroken and consistent replicas */ if (replica_is_replica_healthy(r, set_hs)) continue; struct pool_replica *rep = REP(set, r); struct pool_replica *rep_h = REP(set, healthy_replica); for (unsigned p = 0; p < rep->nparts; ++p) { /* skip unbroken parts from consistent replicas */ if (!replica_is_part_broken(r, p, set_hs) && replica_is_replica_consistent(r, set_hs)) continue; const struct pool_set_part *part = &rep->part[p]; size_t off = replica_get_part_data_offset(set, r, p); size_t len = replica_get_part_data_len(set, r, p); /* do not allow copying too much data */ if (off >= poolsize) continue; if (off + len > poolsize || rep->remote) len = poolsize - off; /* * First part of replica is mapped * with header */ size_t fpoff = (p == 0) ? POOL_HDR_SIZE : 0; void *src_addr = ADDR_SUM(rep_h->part[0].addr, off); void *dst_addr = ADDR_SUM(part->addr, fpoff); if (sync_copy_data(src_addr, dst_addr, off, len, rep_h, rep, part)) return -1; } } return 0; } /* * grant_created_parts_perm -- (internal) set RW permission rights to all * the parts created in place of the broken ones */ static int grant_created_parts_perm(struct pool_set *set, unsigned src_repn, struct poolset_health_status *set_hs) { LOG(3, "set %p, src_repn %u, set_hs %p", set, src_repn, set_hs); /* choose the default permissions */ mode_t def_mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP; /* get permissions of the first part of the source replica */ mode_t src_mode; os_stat_t sb; if (os_stat(PART(REP(set, src_repn), 0)->path, &sb) != 0) { ERR("cannot check file permissions of %s (replica %u, part %u)", PART(REP(set, src_repn), 0)->path, src_repn, 0); src_mode = def_mode; } else { src_mode = sb.st_mode; } /* set permissions to all recreated parts */ for (unsigned r = 0; r < set_hs->nreplicas; ++r) { /* skip unbroken replicas */ if (!replica_is_replica_broken(r, set_hs)) continue; if (set->replica[r]->remote) continue; for (unsigned p = 0; p < set_hs->replica[r]->nparts; p++) { /* skip parts which were not created */ if (!PART(REP(set, r), p)->created) continue; LOG(4, "setting permissions for part %u, replica %u", p, r); /* set rights to those of existing part files */ if (os_chmod(PART(REP(set, r), p)->path, src_mode)) { ERR( "cannot set permission rights for created parts: replica %u, part %u", r, p); errno = EPERM; return -1; } } } return 0; } /* * update_parts_linkage -- (internal) set uuids linking recreated parts within * a replica */ static int update_parts_linkage(struct pool_set *set, unsigned repn, struct poolset_health_status *set_hs) { LOG(3, "set %p, repn %u, set_hs %p", set, repn, set_hs); struct pool_replica *rep = REP(set, repn); for (unsigned p = 0; p < rep->nhdrs; ++p) { struct pool_hdr *hdrp = HDR(rep, p); struct pool_hdr *prev_hdrp = HDRP(rep, p); struct pool_hdr *next_hdrp = HDRN(rep, p); /* set uuids in the current part */ memcpy(hdrp->prev_part_uuid, PARTP(rep, p)->uuid, POOL_HDR_UUID_LEN); memcpy(hdrp->next_part_uuid, PARTN(rep, p)->uuid, POOL_HDR_UUID_LEN); util_checksum(hdrp, sizeof(*hdrp), &hdrp->checksum, 1, POOL_HDR_CSUM_END_OFF(hdrp)); /* set uuids in the previous part */ memcpy(prev_hdrp->next_part_uuid, PART(rep, p)->uuid, POOL_HDR_UUID_LEN); util_checksum(prev_hdrp, sizeof(*prev_hdrp), &prev_hdrp->checksum, 1, POOL_HDR_CSUM_END_OFF(prev_hdrp)); /* set uuids in the next part */ memcpy(next_hdrp->prev_part_uuid, PART(rep, p)->uuid, POOL_HDR_UUID_LEN); util_checksum(next_hdrp, sizeof(*next_hdrp), &next_hdrp->checksum, 1, POOL_HDR_CSUM_END_OFF(next_hdrp)); /* store pool's header */ util_persist(PART(rep, p)->is_dev_dax, hdrp, sizeof(*hdrp)); util_persist(PARTP(rep, p)->is_dev_dax, prev_hdrp, sizeof(*prev_hdrp)); util_persist(PARTN(rep, p)->is_dev_dax, next_hdrp, sizeof(*next_hdrp)); } return 0; } /* * update_replicas_linkage -- (internal) update uuids linking replicas */ static int update_replicas_linkage(struct pool_set *set, unsigned repn) { LOG(3, "set %p, repn %u", set, repn); struct pool_replica *rep = REP(set, repn); struct pool_replica *prev_r = REPP(set, repn); struct pool_replica *next_r = REPN(set, repn); ASSERT(rep->nparts > 0); ASSERT(prev_r->nparts > 0); ASSERT(next_r->nparts > 0); /* set uuids in the current replica */ for (unsigned p = 0; p < rep->nhdrs; ++p) { struct pool_hdr *hdrp = HDR(rep, p); memcpy(hdrp->prev_repl_uuid, PART(prev_r, 0)->uuid, POOL_HDR_UUID_LEN); memcpy(hdrp->next_repl_uuid, PART(next_r, 0)->uuid, POOL_HDR_UUID_LEN); util_checksum(hdrp, sizeof(*hdrp), &hdrp->checksum, 1, POOL_HDR_CSUM_END_OFF(hdrp)); /* store pool's header */ util_persist(PART(rep, p)->is_dev_dax, hdrp, sizeof(*hdrp)); } /* set uuids in the previous replica */ for (unsigned p = 0; p < prev_r->nhdrs; ++p) { struct pool_hdr *prev_hdrp = HDR(prev_r, p); memcpy(prev_hdrp->next_repl_uuid, PART(rep, 0)->uuid, POOL_HDR_UUID_LEN); util_checksum(prev_hdrp, sizeof(*prev_hdrp), &prev_hdrp->checksum, 1, POOL_HDR_CSUM_END_OFF(prev_hdrp)); /* store pool's header */ util_persist(PART(prev_r, p)->is_dev_dax, prev_hdrp, sizeof(*prev_hdrp)); } /* set uuids in the next replica */ for (unsigned p = 0; p < next_r->nhdrs; ++p) { struct pool_hdr *next_hdrp = HDR(next_r, p); memcpy(next_hdrp->prev_repl_uuid, PART(rep, 0)->uuid, POOL_HDR_UUID_LEN); util_checksum(next_hdrp, sizeof(*next_hdrp), &next_hdrp->checksum, 1, POOL_HDR_CSUM_END_OFF(next_hdrp)); /* store pool's header */ util_persist(PART(next_r, p)->is_dev_dax, next_hdrp, sizeof(*next_hdrp)); } return 0; } /* * update_poolset_uuids -- (internal) update poolset uuid in recreated parts */ static int update_poolset_uuids(struct pool_set *set, unsigned repn, struct poolset_health_status *set_hs) { LOG(3, "set %p, repn %u, set_hs %p", set, repn, set_hs); struct pool_replica *rep = REP(set, repn); for (unsigned p = 0; p < rep->nhdrs; ++p) { struct pool_hdr *hdrp = HDR(rep, p); memcpy(hdrp->poolset_uuid, set->uuid, POOL_HDR_UUID_LEN); util_checksum(hdrp, sizeof(*hdrp), &hdrp->checksum, 1, POOL_HDR_CSUM_END_OFF(hdrp)); /* store pool's header */ util_persist(PART(rep, p)->is_dev_dax, hdrp, sizeof(*hdrp)); } return 0; } /* * update_remote_headers -- (internal) update headers of existing remote * replicas */ static int update_remote_headers(struct pool_set *set) { LOG(3, "set %p", set); for (unsigned r = 0; r < set->nreplicas; ++ r) { /* skip local or just created replicas */ if (REP(set, r)->remote == NULL || PART(REP(set, r), 0)->created == 1) continue; if (util_update_remote_header(set, r)) { LOG(1, "updating header of a remote replica no. %u failed", r); return -1; } } return 0; } /* * update_uuids -- (internal) set all uuids that might have changed or be unset * after recreating parts */ static int update_uuids(struct pool_set *set, struct poolset_health_status *set_hs) { LOG(3, "set %p, set_hs %p", set, set_hs); for (unsigned r = 0; r < set->nreplicas; ++r) { if (!replica_is_replica_healthy(r, set_hs)) update_parts_linkage(set, r, set_hs); update_replicas_linkage(set, r); update_poolset_uuids(set, r, set_hs); } if (update_remote_headers(set)) return -1; return 0; } /* * remove_remote -- (internal) remove remote pool */ static int remove_remote(const char *target, const char *pool_set) { LOG(3, "target %s, pool_set %s", target, pool_set); #ifdef USE_RPMEM struct rpmem_target_info *info = rpmem_target_parse(target); if (!info) goto err_parse; struct rpmem_ssh *ssh = rpmem_ssh_exec(info, "--remove", pool_set, "--force", NULL); if (!ssh) { goto err_ssh_exec; } if (rpmem_ssh_monitor(ssh, 0)) goto err_ssh_monitor; int ret = rpmem_ssh_close(ssh); rpmem_target_free(info); return ret; err_ssh_monitor: rpmem_ssh_close(ssh); err_ssh_exec: rpmem_target_free(info); err_parse: return -1; #else FATAL("remote replication not supported"); return -1; #endif } /* * open_remote_replicas -- (internal) open all unbroken remote replicas */ static int open_remote_replicas(struct pool_set *set, struct poolset_health_status *set_hs) { LOG(3, "set %p, set_hs %p", set, set_hs); for (unsigned r = 0; r < set->nreplicas; r++) { struct pool_replica *rep = set->replica[r]; if (!rep->remote) continue; if (!replica_is_replica_healthy(r, set_hs)) continue; unsigned nlanes = REMOTE_NLANES; int ret = util_poolset_remote_replica_open(set, r, set->poolsize, 0, &nlanes); if (ret) { LOG(1, "Opening '%s' on '%s' failed", rep->remote->pool_desc, rep->remote->node_addr); return ret; } } return 0; } /* * create_remote_replicas -- (internal) recreate all broken replicas */ static int create_remote_replicas(struct pool_set *set, struct poolset_health_status *set_hs, unsigned flags) { LOG(3, "set %p, set_hs %p", set, set_hs); for (unsigned r = 0; r < set->nreplicas; r++) { struct pool_replica *rep = set->replica[r]; if (!rep->remote) continue; if (replica_is_replica_healthy(r, set_hs)) continue; if (!replica_is_poolset_transformed(flags)) { /* ignore errors from remove operation */ remove_remote(rep->remote->node_addr, rep->remote->pool_desc); } unsigned nlanes = REMOTE_NLANES; int ret = util_poolset_remote_replica_open(set, r, set->poolsize, 1, &nlanes); if (ret) { LOG(1, "Creating '%s' on '%s' failed", rep->remote->pool_desc, rep->remote->node_addr); return ret; } } return 0; } /* * sync_replica -- synchronize data across replicas within a poolset */ int replica_sync(struct pool_set *set, struct poolset_health_status *s_hs, unsigned flags) { LOG(3, "set %p, flags %u", set, flags); int ret = 0; struct poolset_health_status *set_hs = NULL; /* check if we already know the poolset health status */ if (s_hs == NULL) { /* validate poolset before checking its health */ if (validate_args(set)) return -1; /* examine poolset's health */ if (replica_check_poolset_health(set, &set_hs, 1 /* called from sync */, flags)) { LOG(1, "poolset health check failed"); return -1; } /* check if poolset is broken; if not, nothing to do */ if (replica_is_poolset_healthy(set_hs)) { LOG(1, "poolset is healthy"); goto out; } } else { set_hs = s_hs; } /* find a replica with healthy header; it will be the source of data */ unsigned healthy_replica = replica_find_healthy_replica(set_hs); unsigned healthy_header = healthy_replica; if (healthy_header == UNDEF_REPLICA) { healthy_header = replica_find_replica_healthy_header(set_hs); if (healthy_header == UNDEF_REPLICA) { ERR("no healthy replica found"); errno = EINVAL; ret = -1; goto out; } } /* in dry-run mode we can stop here */ if (is_dry_run(flags)) { LOG(1, "Sync in dry-run mode finished successfully"); goto out; } /* recreate broken parts */ if (recreate_broken_parts(set, set_hs, fix_bad_blocks(flags))) { ERR("recreating broken parts failed"); ret = -1; goto out; } /* open all part files */ if (replica_open_poolset_part_files(set)) { ERR("opening poolset part files failed"); ret = -1; goto out; } /* map all replicas */ if (util_poolset_open(set)) { ERR("opening poolset failed"); ret = -1; goto out; } /* this is required for opening remote pools */ set->poolsize = set_hs->replica[healthy_header]->pool_size; LOG(3, "setting the pool size (%zu) from replica #%u", set->poolsize, healthy_header); /* open all remote replicas */ if (open_remote_replicas(set, set_hs)) { ERR("opening remote replicas failed"); ret = -1; goto out; } /* recalculate offset and length of bad blocks */ if (sync_recalc_badblocks(set, set_hs)) { LOG(1, "syncing bad blocks data failed"); ret = -1; goto out; } /* * Check if there are uncorrectable bad blocks * (bad blocks overlapping in all replicas). */ int status = sync_check_bad_blocks_overlap(set, set_hs); if (status == -1) { LOG(1, "checking bad blocks failed"); ret = -1; goto out; } if (status == 1) { ERR( "a part of the pool has uncorrectable errors in all replicas"); errno = EINVAL; ret = -1; goto out; } LOG(3, "bad blocks do not overlap"); /* sync data in bad blocks */ if (sync_badblocks_data(set, set_hs)) { LOG(1, "syncing bad blocks data failed"); ret = -1; goto out; } /* find one good replica; it will be the source of data */ healthy_replica = replica_find_healthy_replica(set_hs); if (healthy_replica == UNDEF_REPLICA) { ERR("no healthy replica found"); errno = EINVAL; ret = -1; goto out; } /* update uuid fields in the set structure with part headers */ if (fill_struct_uuids(set, healthy_replica, set_hs, flags)) { ERR("gathering uuids failed"); ret = -1; goto out; } /* create headers for broken parts */ if (create_headers_for_broken_parts(set, healthy_replica, set_hs)) { ERR("creating headers for broken parts failed"); ret = -1; goto out; } /* create all remote replicas */ if (create_remote_replicas(set, set_hs, flags)) { ERR("creating remote replicas failed"); ret = -1; goto out; } /* check and copy data if possible */ if (copy_data_to_broken_parts(set, healthy_replica, flags, set_hs)) { ERR("copying data to broken parts failed"); ret = -1; goto out; } /* update uuids of replicas and parts */ if (update_uuids(set, set_hs)) { ERR("updating uuids failed"); ret = -1; goto out; } /* grant permissions to all created parts */ if (grant_created_parts_perm(set, healthy_replica, set_hs)) { ERR("granting permissions to created parts failed"); ret = -1; } out: if (s_hs == NULL) replica_free_poolset_health_status(set_hs); return ret; }
43,824
25.148568
81
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmempool/check_btt_info.c
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * check_btt_info.c -- check BTT Info */ #include <stdlib.h> #include <stdint.h> #include <endian.h> #include "out.h" #include "btt.h" #include "libpmempool.h" #include "pmempool.h" #include "pool.h" #include "check_util.h" enum question { Q_RESTORE_FROM_BACKUP, Q_REGENERATE, Q_REGENERATE_CHECKSUM, Q_RESTORE_FROM_HEADER }; /* * location_release -- (internal) release check_btt_info_loc allocations */ static void location_release(location *loc) { free(loc->arenap); loc->arenap = NULL; } /* * btt_info_checksum -- (internal) check BTT Info checksum */ static int btt_info_checksum(PMEMpoolcheck *ppc, location *loc) { LOG(3, NULL); loc->arenap = calloc(1, sizeof(struct arena)); if (!loc->arenap) { ERR("!calloc"); ppc->result = CHECK_RESULT_INTERNAL_ERROR; CHECK_ERR(ppc, "cannot allocate memory for arena"); goto error_cleanup; } /* read the BTT Info header at well known offset */ if (pool_read(ppc->pool, &loc->arenap->btt_info, sizeof(loc->arenap->btt_info), loc->offset)) { CHECK_ERR(ppc, "arena %u: cannot read BTT Info header", loc->arenap->id); ppc->result = CHECK_RESULT_ERROR; goto error_cleanup; } loc->arenap->id = ppc->pool->narenas; /* BLK is consistent even without BTT Layout */ if (ppc->pool->params.type == POOL_TYPE_BLK) { int is_zeroed = util_is_zeroed((const void *) &loc->arenap->btt_info, sizeof(loc->arenap->btt_info)); if (is_zeroed) { CHECK_INFO(ppc, "BTT Layout not written"); loc->step = CHECK_STEP_COMPLETE; ppc->pool->blk_no_layout = 1; location_release(loc); check_end(ppc->data); return 0; } } /* check consistency of BTT Info */ if (pool_btt_info_valid(&loc->arenap->btt_info)) { CHECK_INFO(ppc, "arena %u: BTT Info header checksum correct", loc->arenap->id); loc->valid.btti_header = 1; } else if (CHECK_IS_NOT(ppc, REPAIR)) { CHECK_ERR(ppc, "arena %u: BTT Info header checksum incorrect", loc->arenap->id); ppc->result = CHECK_RESULT_NOT_CONSISTENT; check_end(ppc->data); goto error_cleanup; } return 0; error_cleanup: location_release(loc); return -1; } /* * btt_info_backup -- (internal) check BTT Info backup */ static int btt_info_backup(PMEMpoolcheck *ppc, location *loc) { LOG(3, NULL); /* check BTT Info backup consistency */ const size_t btt_info_size = sizeof(ppc->pool->bttc.btt_info); uint64_t btt_info_off = pool_next_arena_offset(ppc->pool, loc->offset) - btt_info_size; if (pool_read(ppc->pool, &ppc->pool->bttc.btt_info, btt_info_size, btt_info_off)) { CHECK_ERR(ppc, "arena %u: cannot read BTT Info backup", loc->arenap->id); goto error; } /* check whether this BTT Info backup is valid */ if (pool_btt_info_valid(&ppc->pool->bttc.btt_info)) { loc->valid.btti_backup = 1; /* restore BTT Info from backup */ if (!loc->valid.btti_header && CHECK_IS(ppc, REPAIR)) CHECK_ASK(ppc, Q_RESTORE_FROM_BACKUP, "arena %u: BTT " "Info header checksum incorrect.|Restore BTT " "Info from backup?", loc->arenap->id); } /* * if BTT Info backup require repairs it will be fixed in further steps */ return check_questions_sequence_validate(ppc); error: ppc->result = CHECK_RESULT_ERROR; location_release(loc); return -1; } /* * btt_info_from_backup_fix -- (internal) fix BTT Info using its backup */ static int btt_info_from_backup_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question, void *ctx) { LOG(3, NULL); ASSERTeq(ctx, NULL); ASSERTne(loc, NULL); switch (question) { case Q_RESTORE_FROM_BACKUP: CHECK_INFO(ppc, "arena %u: restoring BTT Info header from backup", loc->arenap->id); memcpy(&loc->arenap->btt_info, &ppc->pool->bttc.btt_info, sizeof(loc->arenap->btt_info)); loc->valid.btti_header = 1; break; default: ERR("not implemented question id: %u", question); } return 0; } /* * btt_info_gen -- (internal) ask whether try to regenerate BTT Info */ static int btt_info_gen(PMEMpoolcheck *ppc, location *loc) { LOG(3, NULL); if (loc->valid.btti_header) return 0; ASSERT(CHECK_IS(ppc, REPAIR)); if (!loc->pool_valid.btti_offset) { ppc->result = CHECK_RESULT_NOT_CONSISTENT; check_end(ppc->data); return CHECK_ERR(ppc, "can not find any valid BTT Info"); } CHECK_ASK(ppc, Q_REGENERATE, "arena %u: BTT Info header checksum incorrect.|Do you want to " "regenerate BTT Info?", loc->arenap->id); return check_questions_sequence_validate(ppc); } /* * btt_info_gen_fix -- (internal) fix by regenerating BTT Info */ static int btt_info_gen_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question, void *ctx) { LOG(3, NULL); ASSERTeq(ctx, NULL); ASSERTne(loc, NULL); switch (question) { case Q_REGENERATE: CHECK_INFO(ppc, "arena %u: regenerating BTT Info header", loc->arenap->id); /* * We do not have valid BTT Info backup so we get first valid * BTT Info and try to calculate BTT Info for current arena */ uint64_t arena_size = ppc->pool->set_file->size - loc->offset; if (arena_size > BTT_MAX_ARENA) arena_size = BTT_MAX_ARENA; uint64_t space_left = ppc->pool->set_file->size - loc->offset - arena_size; struct btt_info *bttd = &loc->arenap->btt_info; struct btt_info *btts = &loc->pool_valid.btti; btt_info_convert2h(bttd); /* * all valid BTT Info structures have the same signature, UUID, * parent UUID, flags, major, minor, external LBA size, internal * LBA size, nfree, info size and data offset */ memcpy(bttd->sig, btts->sig, BTTINFO_SIG_LEN); memcpy(bttd->uuid, btts->uuid, BTTINFO_UUID_LEN); memcpy(bttd->parent_uuid, btts->parent_uuid, BTTINFO_UUID_LEN); memset(bttd->unused, 0, BTTINFO_UNUSED_LEN); bttd->flags = btts->flags; bttd->major = btts->major; bttd->minor = btts->minor; /* other parameters can be calculated */ if (btt_info_set(bttd, btts->external_lbasize, btts->nfree, arena_size, space_left)) { CHECK_ERR(ppc, "can not restore BTT Info"); return -1; } ASSERTeq(bttd->external_lbasize, btts->external_lbasize); ASSERTeq(bttd->internal_lbasize, btts->internal_lbasize); ASSERTeq(bttd->nfree, btts->nfree); ASSERTeq(bttd->infosize, btts->infosize); ASSERTeq(bttd->dataoff, btts->dataoff); return 0; default: ERR("not implemented question id: %u", question); return -1; } } /* * btt_info_checksum_retry -- (internal) check BTT Info checksum */ static int btt_info_checksum_retry(PMEMpoolcheck *ppc, location *loc) { LOG(3, NULL); if (loc->valid.btti_header) return 0; btt_info_convert2le(&loc->arenap->btt_info); /* check consistency of BTT Info */ if (pool_btt_info_valid(&loc->arenap->btt_info)) { CHECK_INFO(ppc, "arena %u: BTT Info header checksum correct", loc->arenap->id); loc->valid.btti_header = 1; return 0; } if (CHECK_IS_NOT(ppc, ADVANCED)) { ppc->result = CHECK_RESULT_CANNOT_REPAIR; CHECK_INFO(ppc, REQUIRE_ADVANCED); CHECK_ERR(ppc, "arena %u: BTT Info header checksum incorrect", loc->arenap->id); check_end(ppc->data); goto error_cleanup; } CHECK_ASK(ppc, Q_REGENERATE_CHECKSUM, "arena %u: BTT Info header checksum incorrect.|Do you want to " "regenerate BTT Info checksum?", loc->arenap->id); return check_questions_sequence_validate(ppc); error_cleanup: location_release(loc); return -1; } /* * btt_info_checksum_fix -- (internal) fix by regenerating BTT Info checksum */ static int btt_info_checksum_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question, void *ctx) { LOG(3, NULL); ASSERTeq(ctx, NULL); ASSERTne(loc, NULL); switch (question) { case Q_REGENERATE_CHECKSUM: util_checksum(&loc->arenap->btt_info, sizeof(struct btt_info), &loc->arenap->btt_info.checksum, 1, 0); loc->valid.btti_header = 1; break; default: ERR("not implemented question id: %u", question); } return 0; } /* * btt_info_backup_checksum -- (internal) check BTT Info backup checksum */ static int btt_info_backup_checksum(PMEMpoolcheck *ppc, location *loc) { LOG(3, NULL); ASSERT(loc->valid.btti_header); if (loc->valid.btti_backup) return 0; /* BTT Info backup is not valid so it must be fixed */ if (CHECK_IS_NOT(ppc, REPAIR)) { CHECK_ERR(ppc, "arena %u: BTT Info backup checksum incorrect", loc->arenap->id); ppc->result = CHECK_RESULT_NOT_CONSISTENT; check_end(ppc->data); goto error_cleanup; } CHECK_ASK(ppc, Q_RESTORE_FROM_HEADER, "arena %u: BTT Info backup checksum incorrect.|Do you want to " "restore it from BTT Info header?", loc->arenap->id); return check_questions_sequence_validate(ppc); error_cleanup: location_release(loc); return -1; } /* * btt_info_backup_fix -- (internal) prepare restore BTT Info backup from header */ static int btt_info_backup_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question, void *ctx) { LOG(3, NULL); ASSERTeq(ctx, NULL); ASSERTne(loc, NULL); switch (question) { case Q_RESTORE_FROM_HEADER: /* BTT Info backup would be restored in check_write step */ CHECK_INFO(ppc, "arena %u: restoring BTT Info backup from header", loc->arenap->id); break; default: ERR("not implemented question id: %u", question); } return 0; } struct step { int (*check)(PMEMpoolcheck *, location *); int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *); }; static const struct step steps[] = { { .check = btt_info_checksum, }, { .check = btt_info_backup, }, { .fix = btt_info_from_backup_fix, }, { .check = btt_info_gen, }, { .fix = btt_info_gen_fix, }, { .check = btt_info_checksum_retry, }, { .fix = btt_info_checksum_fix, }, { .check = btt_info_backup_checksum, }, { .fix = btt_info_backup_fix, }, { .check = NULL, .fix = NULL, }, }; /* * step_exe -- (internal) perform single step according to its parameters */ static inline int step_exe(PMEMpoolcheck *ppc, location *loc) { ASSERT(loc->step < ARRAY_SIZE(steps)); const struct step *step = &steps[loc->step++]; if (!step->fix) return step->check(ppc, loc); if (!check_answer_loop(ppc, loc, NULL, 1, step->fix)) return 0; if (check_has_error(ppc->data)) location_release(loc); return -1; } /* * check_btt_info -- entry point for btt info check */ void check_btt_info(PMEMpoolcheck *ppc) { LOG(3, NULL); location *loc = check_get_step_data(ppc->data); uint64_t nextoff = 0; /* initialize check */ if (!loc->offset) { CHECK_INFO(ppc, "checking BTT Info headers"); loc->offset = BTT_ALIGNMENT; if (ppc->pool->params.type == POOL_TYPE_BLK) loc->offset += BTT_ALIGNMENT; loc->pool_valid.btti_offset = pool_get_first_valid_btt( ppc->pool, &loc->pool_valid.btti, loc->offset, NULL); /* Without valid BTT Info we can not proceed */ if (!loc->pool_valid.btti_offset) { if (ppc->pool->params.type == POOL_TYPE_BTT) { CHECK_ERR(ppc, "can not find any valid BTT Info"); ppc->result = CHECK_RESULT_NOT_CONSISTENT; check_end(ppc->data); return; } } else btt_info_convert2h(&loc->pool_valid.btti); } do { /* jump to next offset */ if (ppc->result != CHECK_RESULT_PROCESS_ANSWERS) { loc->offset += nextoff; loc->step = 0; loc->valid.btti_header = 0; loc->valid.btti_backup = 0; } /* do all checks */ while (CHECK_NOT_COMPLETE(loc, steps)) { if (step_exe(ppc, loc) || ppc->pool->blk_no_layout == 1) return; } /* save offset and insert BTT to cache for next steps */ loc->arenap->offset = loc->offset; loc->arenap->valid = true; check_insert_arena(ppc, loc->arenap); nextoff = le64toh(loc->arenap->btt_info.nextoff); } while (nextoff > 0); }
13,144
23.524254
80
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmempool/check_util.c
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * check_util.c -- check utility functions */ #include <stdio.h> #include <stdint.h> #include "out.h" #include "libpmempool.h" #include "pmempool.h" #include "pool.h" #include "check_util.h" #define CHECK_END UINT_MAX /* separate info part of message from question part of message */ #define MSG_SEPARATOR '|' /* error part of message must have '.' at the end */ #define MSG_PLACE_OF_SEPARATION '.' #define MAX_MSG_STR_SIZE 8192 #define CHECK_ANSWER_YES "yes" #define CHECK_ANSWER_NO "no" #define STR_MAX 256 #define TIME_STR_FMT "%a %b %d %Y %H:%M:%S" #define UUID_STR_MAX 37 enum check_answer { PMEMPOOL_CHECK_ANSWER_EMPTY, PMEMPOOL_CHECK_ANSWER_YES, PMEMPOOL_CHECK_ANSWER_NO, PMEMPOOL_CHECK_ANSWER_DEFAULT, }; /* queue of check statuses */ struct check_status { TAILQ_ENTRY(check_status) next; struct pmempool_check_status status; unsigned question; enum check_answer answer; char *msg; }; TAILQ_HEAD(check_status_head, check_status); /* check control context */ struct check_data { unsigned step; location step_data; struct check_status *error; struct check_status_head infos; struct check_status_head questions; struct check_status_head answers; struct check_status *check_status_cache; }; /* * check_data_alloc -- allocate and initialize check_data structure */ struct check_data * check_data_alloc(void) { LOG(3, NULL); struct check_data *data = calloc(1, sizeof(*data)); if (data == NULL) { ERR("!calloc"); return NULL; } TAILQ_INIT(&data->infos); TAILQ_INIT(&data->questions); TAILQ_INIT(&data->answers); return data; } /* * check_data_free -- clean and deallocate check_data */ void check_data_free(struct check_data *data) { LOG(3, NULL); if (data->error != NULL) { free(data->error); data->error = NULL; } if (data->check_status_cache != NULL) { free(data->check_status_cache); data->check_status_cache = NULL; } while (!TAILQ_EMPTY(&data->infos)) { struct check_status *statp = TAILQ_FIRST(&data->infos); TAILQ_REMOVE(&data->infos, statp, next); free(statp); } while (!TAILQ_EMPTY(&data->questions)) { struct check_status *statp = TAILQ_FIRST(&data->questions); TAILQ_REMOVE(&data->questions, statp, next); free(statp); } while (!TAILQ_EMPTY(&data->answers)) { struct check_status *statp = TAILQ_FIRST(&data->answers); TAILQ_REMOVE(&data->answers, statp, next); free(statp); } free(data); } /* * check_step_get - return current check step number */ uint32_t check_step_get(struct check_data *data) { return data->step; } /* * check_step_inc -- move to next step number */ void check_step_inc(struct check_data *data) { if (check_is_end_util(data)) return; ++data->step; memset(&data->step_data, 0, sizeof(location)); } /* * check_get_step_data -- return pointer to check step data */ location * check_get_step_data(struct check_data *data) { return &data->step_data; } /* * check_end -- mark check as ended */ void check_end(struct check_data *data) { LOG(3, NULL); data->step = CHECK_END; } /* * check_is_end_util -- return if check has ended */ int check_is_end_util(struct check_data *data) { return data->step == CHECK_END; } /* * status_alloc -- (internal) allocate and initialize check_status */ static inline struct check_status * status_alloc(void) { struct check_status *status = malloc(sizeof(*status)); if (!status) FATAL("!malloc"); status->msg = malloc(sizeof(char) * MAX_MSG_STR_SIZE); if (!status->msg) { free(status); FATAL("!malloc"); } status->status.str.msg = status->msg; status->answer = PMEMPOOL_CHECK_ANSWER_EMPTY; status->question = CHECK_INVALID_QUESTION; return status; } /* * status_release -- (internal) release check_status */ static void status_release(struct check_status *status) { #ifdef _WIN32 /* dealloc duplicate string after conversion */ if (status->status.str.msg != status->msg) free((void *)status->status.str.msg); #endif free(status->msg); free(status); } /* * status_msg_info_only -- (internal) separate info part of the message * * If message is in form of "info.|question" it modifies it as follows * "info\0|question" */ static inline int status_msg_info_only(const char *msg) { char *sep = strchr(msg, MSG_SEPARATOR); if (sep) { ASSERTne(sep, msg); --sep; ASSERTeq(*sep, MSG_PLACE_OF_SEPARATION); *sep = '\0'; return 0; } return -1; } /* * status_msg_info_and_question -- (internal) join info and question * * If message is in form "info.|question" it will replace MSG_SEPARATOR '|' with * space to get "info. question" */ static inline int status_msg_info_and_question(const char *msg) { char *sep = strchr(msg, MSG_SEPARATOR); if (sep) { *sep = ' '; return 0; } return -1; } /* * status_push -- (internal) push single status object */ static int status_push(PMEMpoolcheck *ppc, struct check_status *st, uint32_t question) { if (st->status.type == PMEMPOOL_CHECK_MSG_TYPE_ERROR) { ASSERTeq(ppc->data->error, NULL); ppc->data->error = st; return -1; } else if (st->status.type == PMEMPOOL_CHECK_MSG_TYPE_INFO) { if (CHECK_IS(ppc, VERBOSE)) TAILQ_INSERT_TAIL(&ppc->data->infos, st, next); else check_status_release(ppc, st); return 0; } /* st->status.type == PMEMPOOL_CHECK_MSG_TYPE_QUESTION */ if (CHECK_IS_NOT(ppc, REPAIR)) { /* error status */ if (status_msg_info_only(st->msg)) { ERR("no error message for the user"); st->msg[0] = '\0'; } st->status.type = PMEMPOOL_CHECK_MSG_TYPE_ERROR; return status_push(ppc, st, question); } if (CHECK_IS(ppc, ALWAYS_YES)) { if (!status_msg_info_only(st->msg)) { /* information status */ st->status.type = PMEMPOOL_CHECK_MSG_TYPE_INFO; status_push(ppc, st, question); st = status_alloc(); } /* answer status */ ppc->result = CHECK_RESULT_PROCESS_ANSWERS; st->question = question; st->answer = PMEMPOOL_CHECK_ANSWER_YES; st->status.type = PMEMPOOL_CHECK_MSG_TYPE_QUESTION; TAILQ_INSERT_TAIL(&ppc->data->answers, st, next); } else { /* question message */ status_msg_info_and_question(st->msg); st->question = question; ppc->result = CHECK_RESULT_ASK_QUESTIONS; st->answer = PMEMPOOL_CHECK_ANSWER_EMPTY; TAILQ_INSERT_TAIL(&ppc->data->questions, st, next); } return 0; } /* * check_status_create -- create single status, push it to proper queue * * MSG_SEPARATOR character in fmt is treated as message separator. If creating * question but check arguments do not allow to make any changes (asking any * question is pointless) it takes part of message before MSG_SEPARATOR * character and use it to create error message. Character just before separator * must be a MSG_PLACE_OF_SEPARATION character. Return non 0 value if error * status would be created. * * The arg is an additional argument for specified type of status. */ int check_status_create(PMEMpoolcheck *ppc, enum pmempool_check_msg_type type, uint32_t arg, const char *fmt, ...) { if (CHECK_IS_NOT(ppc, VERBOSE) && type == PMEMPOOL_CHECK_MSG_TYPE_INFO) return 0; struct check_status *st = status_alloc(); ASSERT(CHECK_IS(ppc, FORMAT_STR)); va_list ap; va_start(ap, fmt); int p = vsnprintf(st->msg, MAX_MSG_STR_SIZE, fmt, ap); va_end(ap); /* append possible strerror at the end of the message */ if (type != PMEMPOOL_CHECK_MSG_TYPE_QUESTION && arg && p > 0) { char buff[UTIL_MAX_ERR_MSG]; util_strerror((int)arg, buff, UTIL_MAX_ERR_MSG); int ret = snprintf(st->msg + p, MAX_MSG_STR_SIZE - (size_t)p, ": %s", buff); if (ret < 0 || ret >= (int)(MAX_MSG_STR_SIZE - (size_t)p)) { ERR("snprintf: %d", ret); free(st); return -1; } } st->status.type = type; return status_push(ppc, st, arg); } /* * check_status_release -- release single status object */ void check_status_release(PMEMpoolcheck *ppc, struct check_status *status) { if (status->status.type == PMEMPOOL_CHECK_MSG_TYPE_ERROR) ppc->data->error = NULL; status_release(status); } /* * pop_status -- (internal) pop single message from check_status queue */ static struct check_status * pop_status(struct check_data *data, struct check_status_head *queue) { if (!TAILQ_EMPTY(queue)) { ASSERTeq(data->check_status_cache, NULL); data->check_status_cache = TAILQ_FIRST(queue); TAILQ_REMOVE(queue, data->check_status_cache, next); return data->check_status_cache; } return NULL; } /* * check_pop_question -- pop single question from questions queue */ struct check_status * check_pop_question(struct check_data *data) { return pop_status(data, &data->questions); } /* * check_pop_info -- pop single info from information queue */ struct check_status * check_pop_info(struct check_data *data) { return pop_status(data, &data->infos); } /* * check_pop_error -- pop error from state */ struct check_status * check_pop_error(struct check_data *data) { if (data->error) { ASSERTeq(data->check_status_cache, NULL); data->check_status_cache = data->error; data->error = NULL; return data->check_status_cache; } return NULL; } #ifdef _WIN32 void cache_to_utf8(struct check_data *data, char *buf, size_t size) { if (data->check_status_cache == NULL) return; struct check_status *status = data->check_status_cache; /* if it was a question, convert it and the answer to utf8 */ if (status->status.type == PMEMPOOL_CHECK_MSG_TYPE_QUESTION) { struct pmempool_check_statusW *wstatus = (struct pmempool_check_statusW *)&status->status; wchar_t *wstring = (wchar_t *)wstatus->str.msg; status->status.str.msg = util_toUTF8(wstring); if (status->status.str.msg == NULL) FATAL("!malloc"); util_free_UTF16(wstring); if (util_toUTF8_buff(wstatus->str.answer, buf, size) != 0) FATAL("Invalid answer conversion %s", out_get_errormsg()); status->status.str.answer = buf; } } #endif /* * check_clear_status_cache -- release check_status from cache */ void check_clear_status_cache(struct check_data *data) { if (data->check_status_cache) { switch (data->check_status_cache->status.type) { case PMEMPOOL_CHECK_MSG_TYPE_INFO: case PMEMPOOL_CHECK_MSG_TYPE_ERROR: /* * Info and error statuses are disposable. After showing * them to the user we have to release them. */ status_release(data->check_status_cache); data->check_status_cache = NULL; break; case PMEMPOOL_CHECK_MSG_TYPE_QUESTION: /* * Question status after being showed to the user carry * users answer. It must be kept till answer would be * processed so it can not be released from cache. It * has to be pushed to the answers queue, processed and * released after that. */ break; default: ASSERT(0); } } } /* * status_answer_push -- (internal) push single answer to answers queue */ static void status_answer_push(struct check_data *data, struct check_status *st) { ASSERTeq(st->status.type, PMEMPOOL_CHECK_MSG_TYPE_QUESTION); TAILQ_INSERT_TAIL(&data->answers, st, next); } /* * check_push_answer -- process answer and push it to answers queue */ int check_push_answer(PMEMpoolcheck *ppc) { if (ppc->data->check_status_cache == NULL) return 0; /* check if answer is "yes" or "no" */ struct check_status *status = ppc->data->check_status_cache; if (status->status.str.answer != NULL) { if (strcmp(status->status.str.answer, CHECK_ANSWER_YES) == 0) status->answer = PMEMPOOL_CHECK_ANSWER_YES; else if (strcmp(status->status.str.answer, CHECK_ANSWER_NO) == 0) status->answer = PMEMPOOL_CHECK_ANSWER_NO; } if (status->answer == PMEMPOOL_CHECK_ANSWER_EMPTY) { /* invalid answer provided */ status_answer_push(ppc->data, ppc->data->check_status_cache); ppc->data->check_status_cache = NULL; CHECK_INFO(ppc, "Answer must be either %s or %s", CHECK_ANSWER_YES, CHECK_ANSWER_NO); return -1; } /* push answer */ TAILQ_INSERT_TAIL(&ppc->data->answers, ppc->data->check_status_cache, next); ppc->data->check_status_cache = NULL; return 0; } /* * check_has_error - check if error exists */ bool check_has_error(struct check_data *data) { return data->error != NULL; } /* * check_has_answer - check if any answer exists */ bool check_has_answer(struct check_data *data) { return !TAILQ_EMPTY(&data->answers); } /* * pop_answer -- (internal) pop single answer from answers queue */ static struct check_status * pop_answer(struct check_data *data) { struct check_status *ret = NULL; if (!TAILQ_EMPTY(&data->answers)) { ret = TAILQ_FIRST(&data->answers); TAILQ_REMOVE(&data->answers, ret, next); } return ret; } /* * check_status_get_util -- extract pmempool_check_status from check_status */ struct pmempool_check_status * check_status_get_util(struct check_status *status) { return &status->status; } /* * check_answer_loop -- loop through all available answers and process them */ int check_answer_loop(PMEMpoolcheck *ppc, location *data, void *ctx, int fail_on_no, int (*callback)(PMEMpoolcheck *, location *, uint32_t, void *ctx)) { struct check_status *answer; while ((answer = pop_answer(ppc->data)) != NULL) { /* if answer is "no" we cannot fix an issue */ if (answer->answer != PMEMPOOL_CHECK_ANSWER_YES) { if (fail_on_no || answer->answer != PMEMPOOL_CHECK_ANSWER_NO) { CHECK_ERR(ppc, "cannot complete repair, reverting changes"); ppc->result = CHECK_RESULT_NOT_CONSISTENT; goto error; } ppc->result = CHECK_RESULT_REPAIRED; check_status_release(ppc, answer); continue; } /* perform fix */ if (callback(ppc, data, answer->question, ctx)) { ppc->result = CHECK_RESULT_CANNOT_REPAIR; goto error; } if (ppc->result == CHECK_RESULT_ERROR) goto error; /* fix succeeded */ ppc->result = CHECK_RESULT_REPAIRED; check_status_release(ppc, answer); } return 0; error: check_status_release(ppc, answer); return -1; } /* * check_questions_sequence_validate -- generate return value from result * * Sequence of questions can result in one of the following results: CONSISTENT, * REPAIRED, ASK_QUESTIONS of PROCESS_ANSWERS. If result == ASK_QUESTIONS it * returns -1 to indicate existence of unanswered questions. */ int check_questions_sequence_validate(PMEMpoolcheck *ppc) { ASSERT(ppc->result == CHECK_RESULT_CONSISTENT || ppc->result == CHECK_RESULT_ASK_QUESTIONS || ppc->result == CHECK_RESULT_PROCESS_ANSWERS || ppc->result == CHECK_RESULT_REPAIRED); if (ppc->result == CHECK_RESULT_ASK_QUESTIONS) { ASSERT(!TAILQ_EMPTY(&ppc->data->questions)); return -1; } return 0; } /* * check_get_time_str -- returns time in human-readable format */ const char * check_get_time_str(time_t time) { static char str_buff[STR_MAX] = {0, }; struct tm *tm = util_localtime(&time); if (tm) strftime(str_buff, STR_MAX, TIME_STR_FMT, tm); else { int ret = snprintf(str_buff, STR_MAX, "unknown"); if (ret < 0) { ERR("failed to get time str"); return ""; } } return str_buff; } /* * check_get_uuid_str -- returns uuid in human readable format */ const char * check_get_uuid_str(uuid_t uuid) { static char uuid_str[UUID_STR_MAX] = {0, }; int ret = util_uuid_to_string(uuid, uuid_str); if (ret != 0) { ERR("failed to covert uuid to string"); return ""; } return uuid_str; } /* * pmempool_check_insert_arena -- insert arena to list */ void check_insert_arena(PMEMpoolcheck *ppc, struct arena *arenap) { TAILQ_INSERT_TAIL(&ppc->pool->arenas, arenap, next); ppc->pool->narenas++; }
16,996
23.316166
80
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmempool/pool.h
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * pool.h -- internal definitions for pool processing functions */ #ifndef POOL_H #define POOL_H #include <stdbool.h> #include <sys/types.h> #include "libpmemobj.h" #include "libpmemcto.h" #include "queue.h" #include "set.h" #include "log.h" #include "blk.h" #include "btt_layout.h" #include "cto.h" #ifdef __cplusplus extern "C" { #endif enum pool_type { POOL_TYPE_UNKNOWN = (1 << 0), POOL_TYPE_LOG = (1 << 1), POOL_TYPE_BLK = (1 << 2), POOL_TYPE_OBJ = (1 << 3), POOL_TYPE_BTT = (1 << 4), POOL_TYPE_CTO = (1 << 5), POOL_TYPE_ANY = POOL_TYPE_UNKNOWN | POOL_TYPE_LOG | POOL_TYPE_BLK | POOL_TYPE_OBJ | POOL_TYPE_BTT | POOL_TYPE_CTO, }; struct pool_params { enum pool_type type; char signature[POOL_HDR_SIG_LEN]; features_t features; size_t size; mode_t mode; int is_poolset; int is_part; int is_dev_dax; int is_pmem; union { struct { uint64_t bsize; } blk; struct { char layout[PMEMOBJ_MAX_LAYOUT]; } obj; struct { char layout[PMEMCTO_MAX_LAYOUT]; } cto; }; }; struct pool_set_file { int fd; char *fname; void *addr; size_t size; struct pool_set *poolset; time_t mtime; mode_t mode; }; struct arena { TAILQ_ENTRY(arena) next; struct btt_info btt_info; uint32_t id; bool valid; bool zeroed; uint64_t offset; uint8_t *flog; size_t flogsize; uint32_t *map; size_t mapsize; }; struct pool_data { struct pool_params params; struct pool_set_file *set_file; int blk_no_layout; union { struct pool_hdr pool; struct pmemlog log; struct pmemblk blk; struct pmemcto cto; } hdr; enum { UUID_NOP = 0, UUID_FROM_BTT, UUID_NOT_FROM_BTT, } uuid_op; struct arena bttc; TAILQ_HEAD(arenashead, arena) arenas; uint32_t narenas; }; struct pool_data *pool_data_alloc(PMEMpoolcheck *ppc); void pool_data_free(struct pool_data *pool); void pool_params_from_header(struct pool_params *params, const struct pool_hdr *hdr); int pool_set_parse(struct pool_set **setp, const char *path); void *pool_set_file_map(struct pool_set_file *file, uint64_t offset); int pool_read(struct pool_data *pool, void *buff, size_t nbytes, uint64_t off); int pool_write(struct pool_data *pool, const void *buff, size_t nbytes, uint64_t off); int pool_copy(struct pool_data *pool, const char *dst_path, int overwrite); int pool_set_part_copy(struct pool_set_part *dpart, struct pool_set_part *spart, int overwrite); int pool_memset(struct pool_data *pool, uint64_t off, int c, size_t count); unsigned pool_set_files_count(struct pool_set_file *file); int pool_set_file_map_headers(struct pool_set_file *file, int rdonly, int prv); void pool_set_file_unmap_headers(struct pool_set_file *file); void pool_hdr_default(enum pool_type type, struct pool_hdr *hdrp); enum pool_type pool_hdr_get_type(const struct pool_hdr *hdrp); enum pool_type pool_set_type(struct pool_set *set); const char *pool_get_pool_type_str(enum pool_type type); int pool_btt_info_valid(struct btt_info *infop); int pool_blk_get_first_valid_arena(struct pool_data *pool, struct arena *arenap); int pool_blk_bsize_valid(uint32_t bsize, uint64_t fsize); uint64_t pool_next_arena_offset(struct pool_data *pool, uint64_t header_offset); uint64_t pool_get_first_valid_btt(struct pool_data *pool, struct btt_info *infop, uint64_t offset, bool *zeroed); size_t pool_get_min_size(enum pool_type); #ifdef __cplusplus } #endif #endif
4,963
27.365714
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmempool/pool.c
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * pool.c -- pool processing functions */ #include <stdio.h> #include <stdint.h> #include <sys/mman.h> #include <unistd.h> #include <fcntl.h> #include <endian.h> #ifndef _WIN32 #include <sys/ioctl.h> #ifdef __FreeBSD__ #include <sys/disk.h> #define BLKGETSIZE64 DIOCGMEDIASIZE #else #include <linux/fs.h> #endif #endif #include "libpmem.h" #include "libpmemlog.h" #include "libpmemblk.h" #include "libpmemcto.h" #include "libpmempool.h" #include "out.h" #include "pmempool.h" #include "pool.h" #include "lane.h" #include "obj.h" #include "btt.h" #include "cto.h" #include "file.h" #include "os.h" #include "set.h" #include "check_util.h" #include "util_pmem.h" #include "mmap.h" /* arbitrary size of a maximum file part being read / write at once */ #define RW_BUFFERING_SIZE (128 * 1024 * 1024) /* * pool_btt_lseek -- (internal) perform lseek in BTT file mode */ static inline os_off_t pool_btt_lseek(struct pool_data *pool, os_off_t offset, int whence) { os_off_t result; if ((result = os_lseek(pool->set_file->fd, offset, whence)) == -1) ERR("!lseek"); return result; } /* * pool_btt_read -- (internal) perform read in BTT file mode */ static inline ssize_t pool_btt_read(struct pool_data *pool, void *dst, size_t count) { size_t total = 0; ssize_t nread; while (count > total && (nread = util_read(pool->set_file->fd, dst, count - total))) { if (nread == -1) { ERR("!read"); return total ? (ssize_t)total : -1; } dst = (void *)((ssize_t)dst + nread); total += (size_t)nread; } return (ssize_t)total; } /* * pool_btt_write -- (internal) perform write in BTT file mode */ static inline ssize_t pool_btt_write(struct pool_data *pool, const void *src, size_t count) { ssize_t nwrite = 0; size_t total = 0; while (count > total && (nwrite = util_write(pool->set_file->fd, src, count - total))) { if (nwrite == -1) { ERR("!write"); return total ? (ssize_t)total : -1; } src = (void *)((ssize_t)src + nwrite); total += (size_t)nwrite; } return (ssize_t)total; } /* * pool_set_read_header -- (internal) read a header of a pool set */ static int pool_set_read_header(const char *fname, struct pool_hdr *hdr) { struct pool_set *set; int ret = 0; if (util_poolset_read(&set, fname)) { return -1; } /* open the first part set file to read the pool header values */ const struct pool_set_part *part = PART(REP(set, 0), 0); int fdp = util_file_open(part->path, NULL, 0, O_RDONLY); if (fdp < 0) { ERR("cannot open poolset part file"); ret = -1; goto err_pool_set; } /* read the pool header from first pool set file */ if (pread(fdp, hdr, sizeof(*hdr), 0) != sizeof(*hdr)) { ERR("cannot read pool header from poolset"); ret = -1; goto err_close_part; } err_close_part: os_close(fdp); err_pool_set: util_poolset_free(set); return ret; } /* * pool_set_map -- (internal) map poolset */ static int pool_set_map(const char *fname, struct pool_set **poolset, unsigned flags) { ASSERTeq(util_is_poolset_file(fname), 1); struct pool_hdr hdr; if (pool_set_read_header(fname, &hdr)) return -1; util_convert2h_hdr_nocheck(&hdr); /* parse pool type from first pool set file */ enum pool_type type = pool_hdr_get_type(&hdr); if (type == POOL_TYPE_UNKNOWN) { ERR("cannot determine pool type from poolset"); return -1; } /* * Open the poolset, the values passed to util_pool_open are read * from the first poolset file, these values are then compared with * the values from all headers of poolset files. */ struct pool_attr attr; util_pool_hdr2attr(&attr, &hdr); if (util_pool_open(poolset, fname, 0 /* minpartsize */, &attr, NULL, NULL, flags | POOL_OPEN_IGNORE_SDS | POOL_OPEN_IGNORE_BAD_BLOCKS)) { ERR("opening poolset failed"); return -1; } return 0; } /* * pool_params_from_header -- parse pool params from pool header */ void pool_params_from_header(struct pool_params *params, const struct pool_hdr *hdr) { memcpy(params->signature, hdr->signature, sizeof(params->signature)); memcpy(&params->features, &hdr->features, sizeof(params->features)); /* * Check if file is a part of pool set by comparing the UUID with the * next part UUID. If it is the same it means the pool consist of a * single file. */ int uuid_eq_next = uuidcmp(hdr->uuid, hdr->next_part_uuid); int uuid_eq_prev = uuidcmp(hdr->uuid, hdr->prev_part_uuid); params->is_part = !params->is_poolset && (uuid_eq_next || uuid_eq_prev); params->type = pool_hdr_get_type(hdr); } /* * pool_check_type_to_pool_type -- (internal) convert check pool type to * internal pool type value */ static enum pool_type pool_check_type_to_pool_type(enum pmempool_pool_type check_pool_type) { switch (check_pool_type) { case PMEMPOOL_POOL_TYPE_LOG: return POOL_TYPE_LOG; case PMEMPOOL_POOL_TYPE_BLK: return POOL_TYPE_BLK; case PMEMPOOL_POOL_TYPE_OBJ: return POOL_TYPE_OBJ; case PMEMPOOL_POOL_TYPE_CTO: return POOL_TYPE_CTO; default: ERR("can not convert pmempool_pool_type %u to pool_type", check_pool_type); return POOL_TYPE_UNKNOWN; } } /* * pool_parse_params -- parse pool type, file size and block size */ static int pool_params_parse(const PMEMpoolcheck *ppc, struct pool_params *params, int check) { LOG(3, NULL); int is_btt = ppc->args.pool_type == PMEMPOOL_POOL_TYPE_BTT; params->type = POOL_TYPE_UNKNOWN; params->is_poolset = util_is_poolset_file(ppc->path) == 1; int fd = util_file_open(ppc->path, NULL, 0, O_RDONLY); if (fd < 0) return -1; int ret = 0; os_stat_t stat_buf; ret = os_fstat(fd, &stat_buf); if (ret) goto out_close; ASSERT(stat_buf.st_size >= 0); params->mode = stat_buf.st_mode; struct pool_set *set; void *addr; if (params->is_poolset) { /* * Need to close the poolset because it will be opened with * flock in the following instructions. */ os_close(fd); fd = -1; if (check) { if (pool_set_map(ppc->path, &set, 0)) return -1; } else { ret = util_poolset_create_set(&set, ppc->path, 0, 0, true); if (ret < 0) { LOG(2, "cannot open pool set -- '%s'", ppc->path); return -1; } if (set->remote) { ERR("poolsets with remote replicas are not " "supported"); return -1; } if (util_pool_open_nocheck(set, POOL_OPEN_IGNORE_BAD_BLOCKS)) return -1; } params->size = set->poolsize; addr = set->replica[0]->part[0].addr; /* * XXX mprotect for device dax with length not aligned to its * page granularity causes SIGBUS on the next page fault. * The length argument of this call should be changed to * set->poolsize once the kernel issue is solved. */ if (mprotect(addr, set->replica[0]->repsize, PROT_READ) < 0) { ERR("!mprotect"); goto out_unmap; } params->is_dev_dax = set->replica[0]->part[0].is_dev_dax; params->is_pmem = set->replica[0]->is_pmem; } else if (is_btt) { params->size = (size_t)stat_buf.st_size; #ifndef _WIN32 if (params->mode & S_IFBLK) if (ioctl(fd, BLKGETSIZE64, &params->size)) { ERR("!ioctl"); goto out_close; } #endif addr = NULL; } else { enum file_type type = util_file_get_type(ppc->path); if (type < 0) return -1; ssize_t s = util_file_get_size(ppc->path); if (s < 0) { ret = -1; goto out_close; } params->size = (size_t)s; int map_sync; addr = util_map(fd, params->size, MAP_SHARED, 1, 0, &map_sync); if (addr == NULL) { ret = -1; goto out_close; } params->is_dev_dax = type == TYPE_DEVDAX; params->is_pmem = params->is_dev_dax || map_sync || pmem_is_pmem(addr, params->size); } /* stop processing for BTT device */ if (is_btt) { params->type = POOL_TYPE_BTT; params->is_part = false; goto out_close; } struct pool_hdr hdr; memcpy(&hdr, addr, sizeof(hdr)); util_convert2h_hdr_nocheck(&hdr); pool_params_from_header(params, &hdr); if (ppc->args.pool_type != PMEMPOOL_POOL_TYPE_DETECT) { enum pool_type declared_type = pool_check_type_to_pool_type(ppc->args.pool_type); if ((params->type & ~declared_type) != 0) { ERR("declared pool type does not match"); errno = EINVAL; ret = 1; goto out_unmap; } } if (params->type == POOL_TYPE_BLK) { struct pmemblk pbp; memcpy(&pbp, addr, sizeof(pbp)); params->blk.bsize = le32toh(pbp.bsize); } else if (params->type == POOL_TYPE_OBJ) { struct pmemobjpool *pop = addr; memcpy(params->obj.layout, pop->layout, PMEMOBJ_MAX_LAYOUT); } else if (params->type == POOL_TYPE_CTO) { struct pmemcto *pcp = addr; memcpy(params->cto.layout, pcp->layout, PMEMCTO_MAX_LAYOUT); } out_unmap: if (params->is_poolset) { ASSERTeq(fd, -1); ASSERTne(addr, NULL); util_poolset_close(set, DO_NOT_DELETE_PARTS); } else if (!is_btt) { ASSERTne(fd, -1); ASSERTne(addr, NULL); munmap(addr, params->size); } out_close: if (fd != -1) os_close(fd); return ret; } /* * pool_set_file_open -- (internal) opens pool set file or regular file */ static struct pool_set_file * pool_set_file_open(const char *fname, struct pool_params *params, int rdonly) { LOG(3, NULL); struct pool_set_file *file = calloc(1, sizeof(*file)); if (!file) return NULL; file->fname = strdup(fname); if (!file->fname) goto err; const char *path = file->fname; if (params->type != POOL_TYPE_BTT) { int ret = util_poolset_create_set(&file->poolset, path, 0, 0, true); if (ret < 0) { LOG(2, "cannot open pool set -- '%s'", path); goto err_free_fname; } unsigned flags = (rdonly ? POOL_OPEN_COW : 0) | POOL_OPEN_IGNORE_BAD_BLOCKS; if (util_pool_open_nocheck(file->poolset, flags)) goto err_free_fname; file->size = file->poolset->poolsize; /* get modification time from the first part of first replica */ path = file->poolset->replica[0]->part[0].path; file->addr = file->poolset->replica[0]->part[0].addr; } else { int oflag = rdonly ? O_RDONLY : O_RDWR; file->fd = util_file_open(fname, NULL, 0, oflag); file->size = params->size; } os_stat_t buf; if (os_stat(path, &buf)) { ERR("%s", path); goto err_close_poolset; } file->mtime = buf.st_mtime; file->mode = buf.st_mode; return file; err_close_poolset: if (params->type != POOL_TYPE_BTT) util_poolset_close(file->poolset, DO_NOT_DELETE_PARTS); else if (file->fd != -1) os_close(file->fd); err_free_fname: free(file->fname); err: free(file); return NULL; } /* * pool_set_parse -- parse poolset file */ int pool_set_parse(struct pool_set **setp, const char *path) { LOG(3, "setp %p path %s", setp, path); int fd = os_open(path, O_RDONLY); int ret = 0; if (fd < 0) return 1; if (util_poolset_parse(setp, path, fd)) { ret = 1; goto err_close; } err_close: os_close(fd); return ret; } /* * pool_data_alloc -- allocate pool data and open set_file */ struct pool_data * pool_data_alloc(PMEMpoolcheck *ppc) { LOG(3, NULL); struct pool_data *pool = calloc(1, sizeof(*pool)); if (!pool) { ERR("!calloc"); return NULL; } TAILQ_INIT(&pool->arenas); pool->uuid_op = UUID_NOP; if (pool_params_parse(ppc, &pool->params, 0)) goto error; int rdonly = CHECK_IS_NOT(ppc, REPAIR); int prv = CHECK_IS(ppc, DRY_RUN); if (prv && pool->params.is_dev_dax) { errno = ENOTSUP; ERR("!cannot perform a dry run on dax device"); goto error; } pool->set_file = pool_set_file_open(ppc->path, &pool->params, prv); if (pool->set_file == NULL) goto error; /* * XXX mprotect for device dax with length not aligned to its * page granularity causes SIGBUS on the next page fault. * The length argument of this call should be changed to * pool->set_file->poolsize once the kernel issue is solved. */ if (rdonly && mprotect(pool->set_file->addr, pool->set_file->poolset->replica[0]->repsize, PROT_READ) < 0) goto error; if (pool->params.type != POOL_TYPE_BTT) { if (pool_set_file_map_headers(pool->set_file, rdonly, prv)) goto error; } return pool; error: pool_data_free(pool); return NULL; } /* * pool_set_file_close -- (internal) closes pool set file or regular file */ static void pool_set_file_close(struct pool_set_file *file) { LOG(3, NULL); if (file->poolset) util_poolset_close(file->poolset, DO_NOT_DELETE_PARTS); else if (file->addr) { munmap(file->addr, file->size); os_close(file->fd); } else if (file->fd) os_close(file->fd); free(file->fname); free(file); } /* * pool_data_free -- close set_file and release pool data */ void pool_data_free(struct pool_data *pool) { LOG(3, NULL); if (pool->set_file) { if (pool->params.type != POOL_TYPE_BTT) pool_set_file_unmap_headers(pool->set_file); pool_set_file_close(pool->set_file); } while (!TAILQ_EMPTY(&pool->arenas)) { struct arena *arenap = TAILQ_FIRST(&pool->arenas); if (arenap->map) free(arenap->map); if (arenap->flog) free(arenap->flog); TAILQ_REMOVE(&pool->arenas, arenap, next); free(arenap); } free(pool); } /* * pool_set_file_map -- return mapped address at given offset */ void * pool_set_file_map(struct pool_set_file *file, uint64_t offset) { if (file->addr == MAP_FAILED) return NULL; return (char *)file->addr + offset; } /* * pool_read -- read from pool set file or regular file * * 'buff' has to be a buffer at least 'nbytes' long * 'off' is an offset from the beginning of the pool */ int pool_read(struct pool_data *pool, void *buff, size_t nbytes, uint64_t off) { if (off + nbytes > pool->set_file->size) return -1; if (pool->params.type != POOL_TYPE_BTT) memcpy(buff, (char *)pool->set_file->addr + off, nbytes); else { if (pool_btt_lseek(pool, (os_off_t)off, SEEK_SET) == -1) return -1; if ((size_t)pool_btt_read(pool, buff, nbytes) != nbytes) return -1; } return 0; } /* * pool_write -- write to pool set file or regular file * * 'buff' has to be a buffer at least 'nbytes' long * 'off' is an offset from the beginning of the pool */ int pool_write(struct pool_data *pool, const void *buff, size_t nbytes, uint64_t off) { if (off + nbytes > pool->set_file->size) return -1; if (pool->params.type != POOL_TYPE_BTT) { memcpy((char *)pool->set_file->addr + off, buff, nbytes); util_persist_auto(pool->params.is_pmem, (char *)pool->set_file->addr + off, nbytes); } else { if (pool_btt_lseek(pool, (os_off_t)off, SEEK_SET) == -1) return -1; if ((size_t)pool_btt_write(pool, buff, nbytes) != nbytes) return -1; } return 0; } /* * pool_copy -- make a copy of the pool */ int pool_copy(struct pool_data *pool, const char *dst_path, int overwrite) { struct pool_set_file *file = pool->set_file; int dfd; int exists = util_file_exists(dst_path); if (exists < 0) return -1; if (exists) { if (!overwrite) { errno = EEXIST; return -1; } dfd = util_file_open(dst_path, NULL, 0, O_RDWR); } else { errno = 0; dfd = util_file_create(dst_path, file->size, 0); } if (dfd < 0) return -1; int result = 0; os_stat_t stat_buf; if (os_stat(file->fname, &stat_buf)) { result = -1; goto out_close; } if (fchmod(dfd, stat_buf.st_mode)) { result = -1; goto out_close; } void *daddr = mmap(NULL, file->size, PROT_READ | PROT_WRITE, MAP_SHARED, dfd, 0); if (daddr == MAP_FAILED) { result = -1; goto out_close; } if (pool->params.type != POOL_TYPE_BTT) { void *saddr = pool_set_file_map(file, 0); memcpy(daddr, saddr, file->size); goto out_unmap; } void *buf = malloc(RW_BUFFERING_SIZE); if (buf == NULL) { ERR("!malloc"); result = -1; goto out_unmap; } if (pool_btt_lseek(pool, 0, SEEK_SET) == -1) { result = -1; goto out_free; } ssize_t buf_read = 0; void *dst = daddr; while ((buf_read = pool_btt_read(pool, buf, RW_BUFFERING_SIZE))) { if (buf_read == -1) break; memcpy(dst, buf, (size_t)buf_read); dst = (void *)((ssize_t)dst + buf_read); } out_free: free(buf); out_unmap: munmap(daddr, file->size); out_close: (void) os_close(dfd); return result; } /* * pool_set_part_copy -- make a copy of the poolset part */ int pool_set_part_copy(struct pool_set_part *dpart, struct pool_set_part *spart, int overwrite) { LOG(3, "dpart %p spart %p", dpart, spart); int result = 0; os_stat_t stat_buf; if (os_fstat(spart->fd, &stat_buf)) { ERR("!util_stat"); return -1; } size_t smapped = 0; void *saddr = pmem_map_file(spart->path, 0, 0, S_IREAD, &smapped, NULL); if (!saddr) return -1; size_t dmapped = 0; int is_pmem; void *daddr; int exists = util_file_exists(dpart->path); if (exists < 0) { result = -1; goto out_sunmap; } if (exists) { if (!overwrite) { errno = EEXIST; result = -1; goto out_sunmap; } daddr = pmem_map_file(dpart->path, 0, 0, S_IWRITE, &dmapped, &is_pmem); } else { errno = 0; daddr = pmem_map_file(dpart->path, dpart->filesize, PMEM_FILE_CREATE | PMEM_FILE_EXCL, stat_buf.st_mode, &dmapped, &is_pmem); } if (!daddr) { result = -1; goto out_sunmap; } #ifdef DEBUG /* provide extra logging in case of wrong dmapped/smapped value */ if (dmapped < smapped) { LOG(1, "dmapped < smapped: dmapped = %lu, smapped = %lu", dmapped, smapped); ASSERT(0); } #endif if (is_pmem) { pmem_memcpy_persist(daddr, saddr, smapped); } else { memcpy(daddr, saddr, smapped); pmem_msync(daddr, smapped); } pmem_unmap(daddr, dmapped); out_sunmap: pmem_unmap(saddr, smapped); return result; } /* * pool_memset -- memset pool part described by off and count */ int pool_memset(struct pool_data *pool, uint64_t off, int c, size_t count) { int result = 0; if (pool->params.type != POOL_TYPE_BTT) memset((char *)off, 0, count); else { if (pool_btt_lseek(pool, (os_off_t)off, SEEK_SET) == -1) return -1; size_t zero_size = min(count, RW_BUFFERING_SIZE); void *buf = malloc(zero_size); if (!buf) { ERR("!malloc"); return -1; } memset(buf, c, zero_size); ssize_t nwrite = 0; do { zero_size = min(zero_size, count); nwrite = pool_btt_write(pool, buf, zero_size); if (nwrite < 0) { result = -1; break; } count -= (size_t)nwrite; } while (count > 0); free(buf); } return result; } /* * pool_set_files_count -- get total number of parts of all replicas */ unsigned pool_set_files_count(struct pool_set_file *file) { unsigned ret = 0; unsigned nreplicas = file->poolset->nreplicas; for (unsigned r = 0; r < nreplicas; r++) { struct pool_replica *rep = file->poolset->replica[r]; ret += rep->nparts; } return ret; } /* * pool_set_file_map_headers -- map headers of each pool set part file */ int pool_set_file_map_headers(struct pool_set_file *file, int rdonly, int prv) { if (!file->poolset) return -1; for (unsigned r = 0; r < file->poolset->nreplicas; r++) { struct pool_replica *rep = file->poolset->replica[r]; for (unsigned p = 0; p < rep->nparts; p++) { struct pool_set_part *part = &rep->part[p]; if (util_map_hdr(part, prv ? MAP_PRIVATE : MAP_SHARED, rdonly)) { part->hdr = NULL; goto err; } } } return 0; err: pool_set_file_unmap_headers(file); return -1; } /* * pool_set_file_unmap_headers -- unmap headers of each pool set part file */ void pool_set_file_unmap_headers(struct pool_set_file *file) { if (!file->poolset) return; for (unsigned r = 0; r < file->poolset->nreplicas; r++) { struct pool_replica *rep = file->poolset->replica[r]; for (unsigned p = 0; p < rep->nparts; p++) { struct pool_set_part *part = &rep->part[p]; util_unmap_hdr(part); } } } /* * pool_get_signature -- (internal) return signature of specified pool type */ static const char * pool_get_signature(enum pool_type type) { switch (type) { case POOL_TYPE_LOG: return LOG_HDR_SIG; case POOL_TYPE_BLK: return BLK_HDR_SIG; case POOL_TYPE_OBJ: return OBJ_HDR_SIG; case POOL_TYPE_CTO: return CTO_HDR_SIG; default: return NULL; } } /* * pool_hdr_default -- return default pool header values */ void pool_hdr_default(enum pool_type type, struct pool_hdr *hdrp) { memset(hdrp, 0, sizeof(*hdrp)); const char *sig = pool_get_signature(type); ASSERTne(sig, NULL); memcpy(hdrp->signature, sig, POOL_HDR_SIG_LEN); switch (type) { case POOL_TYPE_LOG: hdrp->major = LOG_FORMAT_MAJOR; hdrp->features = log_format_feat_default; break; case POOL_TYPE_BLK: hdrp->major = BLK_FORMAT_MAJOR; hdrp->features = blk_format_feat_default; break; case POOL_TYPE_OBJ: hdrp->major = OBJ_FORMAT_MAJOR; hdrp->features = obj_format_feat_default; break; case POOL_TYPE_CTO: hdrp->major = CTO_FORMAT_MAJOR; hdrp->features = cto_format_feat_default; break; default: break; } } /* * pool_hdr_get_type -- return pool type based on pool header data */ enum pool_type pool_hdr_get_type(const struct pool_hdr *hdrp) { if (memcmp(hdrp->signature, LOG_HDR_SIG, POOL_HDR_SIG_LEN) == 0) return POOL_TYPE_LOG; else if (memcmp(hdrp->signature, BLK_HDR_SIG, POOL_HDR_SIG_LEN) == 0) return POOL_TYPE_BLK; else if (memcmp(hdrp->signature, OBJ_HDR_SIG, POOL_HDR_SIG_LEN) == 0) return POOL_TYPE_OBJ; else if (memcmp(hdrp->signature, CTO_HDR_SIG, POOL_HDR_SIG_LEN) == 0) return POOL_TYPE_CTO; else return POOL_TYPE_UNKNOWN; } /* * pool_get_pool_type_str -- return human-readable pool type string */ const char * pool_get_pool_type_str(enum pool_type type) { switch (type) { case POOL_TYPE_BTT: return "btt"; case POOL_TYPE_LOG: return "pmemlog"; case POOL_TYPE_BLK: return "pmemblk"; case POOL_TYPE_OBJ: return "pmemobj"; case POOL_TYPE_CTO: return "pmemcto"; default: return "unknown"; } } /* * pool_set_type -- get pool type of a poolset */ enum pool_type pool_set_type(struct pool_set *set) { struct pool_hdr hdr; /* open the first part file to read the pool header values */ const struct pool_set_part *part = PART(REP(set, 0), 0); if (util_file_pread(part->path, &hdr, sizeof(hdr), 0) != sizeof(hdr)) { ERR("cannot read pool header from poolset"); return POOL_TYPE_UNKNOWN; } util_convert2h_hdr_nocheck(&hdr); enum pool_type type = pool_hdr_get_type(&hdr); return type; } /* * pool_btt_info_valid -- check consistency of BTT Info header */ int pool_btt_info_valid(struct btt_info *infop) { if (memcmp(infop->sig, BTTINFO_SIG, BTTINFO_SIG_LEN) != 0) return 0; return util_checksum(infop, sizeof(*infop), &infop->checksum, 0, 0); } /* * pool_blk_get_first_valid_arena -- get first valid BTT Info in arena */ int pool_blk_get_first_valid_arena(struct pool_data *pool, struct arena *arenap) { arenap->zeroed = true; uint64_t offset = pool_get_first_valid_btt(pool, &arenap->btt_info, 2 * BTT_ALIGNMENT, &arenap->zeroed); if (offset != 0) { arenap->offset = offset; arenap->valid = true; return 1; } return 0; } /* * pool_next_arena_offset -- get offset of next arena * * Calculated offset is theoretical. Function does not check if such arena can * exist. */ uint64_t pool_next_arena_offset(struct pool_data *pool, uint64_t offset) { uint64_t lastoff = (pool->set_file->size & ~(BTT_ALIGNMENT - 1)); uint64_t nextoff = min(offset + BTT_MAX_ARENA, lastoff); return nextoff; } /* * pool_get_first_valid_btt -- return offset to first valid BTT Info * * - Return offset to valid BTT Info header in pool file. * - Start looking from given offset. * - Convert BTT Info header to host endianness. * - Return the BTT Info header by pointer. * - If zeroed pointer provided would check if all checked BTT Info are zeroed * which is useful for BLK pools */ uint64_t pool_get_first_valid_btt(struct pool_data *pool, struct btt_info *infop, uint64_t offset, bool *zeroed) { /* if we have valid arena get BTT Info header from it */ if (pool->narenas != 0) { struct arena *arenap = TAILQ_FIRST(&pool->arenas); memcpy(infop, &arenap->btt_info, sizeof(*infop)); return arenap->offset; } const size_t info_size = sizeof(*infop); /* theoretical offsets to BTT Info header and backup */ uint64_t offsets[2] = {offset, 0}; while (offsets[0] < pool->set_file->size) { /* calculate backup offset */ offsets[1] = pool_next_arena_offset(pool, offsets[0]) - info_size; /* check both offsets: header and backup */ for (int i = 0; i < 2; ++i) { if (pool_read(pool, infop, info_size, offsets[i])) continue; /* check if all possible BTT Info are zeroed */ if (zeroed) *zeroed &= util_is_zeroed((const void *)infop, info_size); /* check if read BTT Info is valid */ if (pool_btt_info_valid(infop)) { btt_info_convert2h(infop); return offsets[i]; } } /* jump to next arena */ offsets[0] += BTT_MAX_ARENA; } return 0; } /* * pool_get_min_size -- return the minimum pool size of a pool of a given type */ size_t pool_get_min_size(enum pool_type type) { switch (type) { case POOL_TYPE_LOG: return PMEMLOG_MIN_POOL; case POOL_TYPE_BLK: return PMEMBLK_MIN_POOL; case POOL_TYPE_OBJ: return PMEMOBJ_MIN_POOL; case POOL_TYPE_CTO: return PMEMCTO_MIN_POOL; default: ERR("unknown type of a pool"); return SIZE_MAX; } }
26,517
21.959307
79
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmempool/check_write.c
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * check_write.c -- write fixed data back */ #include <stdint.h> #include <endian.h> #include "out.h" #include "btt.h" #include "libpmempool.h" #include "pmempool.h" #include "pool.h" #include "check_util.h" enum questions { Q_REPAIR_MAP, Q_REPAIR_FLOG, }; /* * log_write -- (internal) write all structures for log pool */ static int log_write(PMEMpoolcheck *ppc, location *loc) { LOG(3, NULL); if (CHECK_WITHOUT_FIXING(ppc)) return 0; /* endianness conversion */ struct pmemlog *log = &ppc->pool->hdr.log; log_convert2le(log); if (pool_write(ppc->pool, log, sizeof(*log), 0)) { ppc->result = CHECK_RESULT_CANNOT_REPAIR; return CHECK_ERR(ppc, "writing pmemlog structure failed"); } return 0; } /* * blk_write_flog -- (internal) convert and write flog to file */ static int blk_write_flog(PMEMpoolcheck *ppc, struct arena *arenap) { if (!arenap->flog) { ppc->result = CHECK_RESULT_ERROR; return CHECK_ERR(ppc, "flog is missing"); } uint64_t flogoff = arenap->offset + arenap->btt_info.flogoff; uint8_t *ptr = arenap->flog; uint32_t i; for (i = 0; i < arenap->btt_info.nfree; i++) { struct btt_flog *flog = (struct btt_flog *)ptr; btt_flog_convert2le(&flog[0]); btt_flog_convert2le(&flog[1]); ptr += BTT_FLOG_PAIR_ALIGN; } if (pool_write(ppc->pool, arenap->flog, arenap->flogsize, flogoff)) { CHECK_INFO(ppc, "%s", ppc->path); ppc->result = CHECK_RESULT_CANNOT_REPAIR; return CHECK_ERR(ppc, "arena %u: writing BTT FLOG failed\n", arenap->id); } return 0; } /* * blk_write_map -- (internal) convert and write map to file */ static int blk_write_map(PMEMpoolcheck *ppc, struct arena *arenap) { if (!arenap->map) { ppc->result = CHECK_RESULT_ERROR; return CHECK_ERR(ppc, "map is missing"); } uint64_t mapoff = arenap->offset + arenap->btt_info.mapoff; uint32_t i; for (i = 0; i < arenap->btt_info.external_nlba; i++) arenap->map[i] = htole32(arenap->map[i]); if (pool_write(ppc->pool, arenap->map, arenap->mapsize, mapoff)) { CHECK_INFO(ppc, "%s", ppc->path); ppc->result = CHECK_RESULT_CANNOT_REPAIR; return CHECK_ERR(ppc, "arena %u: writing BTT map failed\n", arenap->id); } return 0; } /* * blk_write -- (internal) write all structures for blk pool */ static int blk_write(PMEMpoolcheck *ppc, location *loc) { LOG(3, NULL); if (CHECK_WITHOUT_FIXING(ppc)) return 0; /* endianness conversion */ ppc->pool->hdr.blk.bsize = htole32(ppc->pool->hdr.blk.bsize); if (pool_write(ppc->pool, &ppc->pool->hdr.blk, sizeof(ppc->pool->hdr.blk), 0)) { CHECK_INFO(ppc, "%s", ppc->path); ppc->result = CHECK_RESULT_CANNOT_REPAIR; return CHECK_ERR(ppc, "writing pmemblk structure failed"); } return 0; } /* * btt_data_write -- (internal) write BTT data */ static int btt_data_write(PMEMpoolcheck *ppc, location *loc) { LOG(3, NULL); struct arena *arenap; TAILQ_FOREACH(arenap, &ppc->pool->arenas, next) { if (ppc->pool->uuid_op == UUID_NOT_FROM_BTT) { memcpy(arenap->btt_info.parent_uuid, ppc->pool->hdr.pool.poolset_uuid, sizeof(arenap->btt_info.parent_uuid)); util_checksum(&arenap->btt_info, sizeof(arenap->btt_info), &arenap->btt_info.checksum, 1, 0); } if (pool_write(ppc->pool, &arenap->btt_info, sizeof(arenap->btt_info), arenap->offset)) { CHECK_INFO(ppc, "%s", ppc->path); CHECK_ERR(ppc, "arena %u: writing BTT Info failed", arenap->id); goto error; } if (pool_write(ppc->pool, &arenap->btt_info, sizeof(arenap->btt_info), arenap->offset + le64toh(arenap->btt_info.infooff))) { CHECK_INFO(ppc, "%s", ppc->path); CHECK_ERR(ppc, "arena %u: writing BTT Info backup failed", arenap->id); goto error; } if (blk_write_flog(ppc, arenap)) goto error; if (blk_write_map(ppc, arenap)) goto error; } return 0; error: ppc->result = CHECK_RESULT_CANNOT_REPAIR; return -1; } /* * cto_write -- (internal) write all structures for pmemcto pool */ static int cto_write(PMEMpoolcheck *ppc, location *loc) { LOG(3, NULL); if (CHECK_WITHOUT_FIXING(ppc)) return 0; if (pool_write(ppc->pool, &ppc->pool->hdr.cto, sizeof(ppc->pool->hdr.cto), 0)) { CHECK_INFO(ppc, "%s", ppc->path); ppc->result = CHECK_RESULT_CANNOT_REPAIR; return CHECK_ERR(ppc, "writing pmemcto structure failed"); } return 0; } struct step { int (*func)(PMEMpoolcheck *, location *loc); enum pool_type type; }; static const struct step steps[] = { { .func = log_write, .type = POOL_TYPE_LOG, }, { .func = blk_write, .type = POOL_TYPE_BLK, }, { .func = cto_write, .type = POOL_TYPE_CTO, }, { .func = btt_data_write, .type = POOL_TYPE_BLK | POOL_TYPE_BTT, }, { .func = NULL, }, }; /* * step_exe -- (internal) perform single step according to its parameters */ static inline int step_exe(PMEMpoolcheck *ppc, location *loc) { ASSERT(loc->step < ARRAY_SIZE(steps)); const struct step *step = &steps[loc->step++]; /* check step conditions */ if (!(step->type & ppc->pool->params.type)) return 0; return step->func(ppc, loc); } /* * check_write -- write fixed data back */ void check_write(PMEMpoolcheck *ppc) { /* * XXX: Disabling individual checks based on type should be done in the * step structure. This however requires refactor of the step * processing code. */ if (CHECK_IS_NOT(ppc, REPAIR)) return; location *loc = (location *)check_get_step_data(ppc->data); /* do all steps */ while (loc->step != CHECK_STEP_COMPLETE && steps[loc->step].func != NULL) { if (step_exe(ppc, loc)) return; } }
7,165
22.728477
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmempool/check_cto.c
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * check_cto.c -- check pmemcto */ #include <inttypes.h> #include <sys/param.h> #include <endian.h> #include "out.h" #include "libpmempool.h" #include "pmempool.h" #include "pool.h" #include "check_util.h" enum question { Q_CTO_CONSISTENT, Q_CTO_ADDR, Q_CTO_SIZE, Q_CTO_ROOT }; /* * cto_read -- (internal) read pmemcto header */ static int cto_read(PMEMpoolcheck *ppc) { /* * Here we want to read the pmemcto header without the pool_hdr as we've * already done it before. * * Take the pointer to fields right after pool_hdr, compute the size and * offset of remaining fields. */ uint8_t *ptr = (uint8_t *)&ppc->pool->hdr.cto; ptr += sizeof(ppc->pool->hdr.cto.hdr); size_t size = sizeof(ppc->pool->hdr.cto) - sizeof(ppc->pool->hdr.cto.hdr); uint64_t offset = sizeof(ppc->pool->hdr.log.hdr); if (pool_read(ppc->pool, ptr, size, offset)) return CHECK_ERR(ppc, "cannot read pmemcto structure"); return 0; } /* * cto_hdr_check -- (internal) check pmemcto header */ static int cto_hdr_check(PMEMpoolcheck *ppc, location *loc) { LOG(3, NULL); CHECK_INFO(ppc, "checking pmemcto header"); if (cto_read(ppc)) { ppc->result = CHECK_RESULT_ERROR; return -1; } if (ppc->pool->hdr.cto.consistent == 0) { if (CHECK_ASK(ppc, Q_CTO_CONSISTENT, "pmemcto.consistent flag is not set.|Do you want to set pmemcto.consistent flag?")) goto error; } if ((void *)ppc->pool->hdr.cto.addr == NULL) { if (CHECK_ASK(ppc, Q_CTO_ADDR, "invalid pmemcto.addr: %p.|Do you want to recover pmemcto.addr?", (void *)ppc->pool->hdr.cto.addr)) goto error; } if (ppc->pool->hdr.cto.size < PMEMCTO_MIN_POOL) { CHECK_INFO(ppc, "pmemcto.size is less than minimum: %zu < %zu.", ppc->pool->hdr.cto.size, PMEMCTO_MIN_POOL); } if (ppc->pool->hdr.cto.size != ppc->pool->params.size) { if (CHECK_ASK(ppc, Q_CTO_SIZE, "pmemcto.size is different than pool size: %zu != %zu.|Do you want to set pmemlog.size to the actual pool size?", ppc->pool->hdr.cto.size, ppc->pool->params.size)) goto error; } char *valid_addr_begin = (char *)ppc->pool->hdr.cto.addr + CTO_DSC_SIZE_ALIGNED; char *valid_addr_end = (char *)ppc->pool->hdr.cto.addr + ppc->pool->hdr.cto.size; if ((void *)ppc->pool->hdr.cto.root != NULL && ((char *)ppc->pool->hdr.cto.root < valid_addr_begin || (char *)ppc->pool->hdr.cto.root >= valid_addr_end)) { if (CHECK_ASK(ppc, Q_CTO_ROOT, "invalid pmemcto.root: %p.|Do you want to recover pmemcto.root?", (void *)ppc->pool->hdr.cto.root)) goto error; } if (ppc->result == CHECK_RESULT_CONSISTENT || ppc->result == CHECK_RESULT_REPAIRED) CHECK_INFO(ppc, "pmemcto header correct"); return check_questions_sequence_validate(ppc); error: ppc->result = CHECK_RESULT_NOT_CONSISTENT; check_end(ppc->data); return -1; } /* * cto_hdr_fix -- (internal) fix pmemcto header */ static int cto_hdr_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question, void *ctx) { LOG(3, NULL); switch (question) { case Q_CTO_CONSISTENT: CHECK_INFO(ppc, "setting pmemcto.consistent flag"); ppc->pool->hdr.cto.consistent = 1; break; case Q_CTO_ADDR: CHECK_INFO(ppc, "recovering pmemcto.addr"); ppc->pool->hdr.cto.addr = 0; break; case Q_CTO_SIZE: CHECK_INFO(ppc, "setting pmemcto.size to the actual pool size %zu", ppc->pool->params.size); ppc->pool->hdr.cto.size = ppc->pool->params.size; break; case Q_CTO_ROOT: CHECK_INFO(ppc, "recovering pmemcto.root pointer"); ppc->pool->hdr.cto.root = 0; break; default: ERR("not implemented question id: %u", question); } return 0; } struct step { int (*check)(PMEMpoolcheck *, location *); int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *); enum pool_type type; }; static const struct step steps[] = { { .check = cto_hdr_check, .type = POOL_TYPE_CTO }, { .fix = cto_hdr_fix, .type = POOL_TYPE_CTO }, { .check = NULL, .fix = NULL, }, }; /* * step_exe -- (internal) perform single step according to its parameters */ static inline int step_exe(PMEMpoolcheck *ppc, location *loc) { ASSERT(loc->step < ARRAY_SIZE(steps)); ASSERTeq(ppc->pool->params.type, POOL_TYPE_CTO); const struct step *step = &steps[loc->step++]; if (!(step->type & ppc->pool->params.type)) return 0; if (!step->fix) return step->check(ppc, loc); if (cto_read(ppc)) { ppc->result = CHECK_RESULT_ERROR; return -1; } return check_answer_loop(ppc, loc, NULL, 1, step->fix); } /* * check_ctok -- entry point for pmemcto checks */ void check_cto(PMEMpoolcheck *ppc) { LOG(3, NULL); location *loc = check_get_step_data(ppc->data); /* do all checks */ while (CHECK_NOT_COMPLETE(loc, steps)) { if (step_exe(ppc, loc)) break; } }
6,338
24.873469
117
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/pmem_posix.c
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * pmem_posix.c -- pmem utilities with Posix implementation */ #include <stddef.h> #include <sys/mman.h> #include "pmem.h" #include "out.h" #include "mmap.h" /* * is_pmem_detect -- implement pmem_is_pmem() * * This function returns true only if the entire range can be confirmed * as being direct access persistent memory. Finding any part of the * range is not direct access, or failing to look up the information * because it is unmapped or because any sort of error happens, just * results in returning false. */ int is_pmem_detect(const void *addr, size_t len) { LOG(3, "addr %p len %zu", addr, len); if (len == 0) return 0; int retval = util_range_is_pmem(addr, len); LOG(4, "returning %d", retval); return retval; } /* * pmem_map_register -- memory map file and register mapping */ void * pmem_map_register(int fd, size_t len, const char *path, int is_dev_dax) { LOG(3, "fd %d len %zu path %s id_dev_dax %d", fd, len, path, is_dev_dax); void *addr; int map_sync; addr = util_map(fd, len, MAP_SHARED, 0, 0, &map_sync); if (!addr) return NULL; enum pmem_map_type type = MAX_PMEM_TYPE; if (is_dev_dax) type = PMEM_DEV_DAX; else if (map_sync) type = PMEM_MAP_SYNC; if (type != MAX_PMEM_TYPE) { if (util_range_register(addr, len, path, type)) { LOG(1, "can't track mapped region"); goto err_unmap; } } return addr; err_unmap: util_unmap(addr, len); return NULL; } /* * pmem_os_init -- os-dependent part of pmem initialization */ void pmem_os_init(void) { LOG(3, NULL); }
3,143
27.844037
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/libpmem.c
/* * Copyright 2014-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmem.c -- pmem entry points for libpmem */ #include <stdio.h> #include <stdint.h> #include "libpmem.h" #include "pmem.h" #include "pmemcommon.h" /* * libpmem_init -- load-time initialization for libpmem * * Called automatically by the run-time loader. */ ATTR_CONSTRUCTOR void libpmem_init(void) { common_init(PMEM_LOG_PREFIX, PMEM_LOG_LEVEL_VAR, PMEM_LOG_FILE_VAR, PMEM_MAJOR_VERSION, PMEM_MINOR_VERSION); LOG(3, NULL); pmem_init(); } /* * libpmem_fini -- libpmem cleanup routine * * Called automatically when the process terminates. */ ATTR_DESTRUCTOR void libpmem_fini(void) { LOG(3, NULL); common_fini(); } /* * pmem_check_versionU -- see if library meets application version requirements */ #ifndef _WIN32 static inline #endif const char * pmem_check_versionU(unsigned major_required, unsigned minor_required) { LOG(3, "major_required %u minor_required %u", major_required, minor_required); if (major_required != PMEM_MAJOR_VERSION) { ERR("libpmem major version mismatch (need %u, found %u)", major_required, PMEM_MAJOR_VERSION); return out_get_errormsg(); } if (minor_required > PMEM_MINOR_VERSION) { ERR("libpmem minor version mismatch (need %u, found %u)", minor_required, PMEM_MINOR_VERSION); return out_get_errormsg(); } return NULL; } #ifndef _WIN32 /* * pmem_check_version -- see if library meets application version requirements */ const char * pmem_check_version(unsigned major_required, unsigned minor_required) { return pmem_check_versionU(major_required, minor_required); } #else /* * pmem_check_versionW -- see if library meets application version requirements */ const wchar_t * pmem_check_versionW(unsigned major_required, unsigned minor_required) { if (pmem_check_versionU(major_required, minor_required) != NULL) return out_get_errormsgW(); else return NULL; } #endif /* * pmem_errormsgU -- return last error message */ #ifndef _WIN32 static inline #endif const char * pmem_errormsgU(void) { return out_get_errormsg(); } #ifndef _WIN32 /* * pmem_errormsg -- return last error message */ const char * pmem_errormsg(void) { return pmem_errormsgU(); } #else /* * pmem_errormsgW -- return last error message as wchar_t */ const wchar_t * pmem_errormsgW(void) { return out_get_errormsgW(); } #endif
3,902
24.180645
79
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/memops_generic.c
/* * Copyright 2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * memops_generic.c -- architecture-independent memmove & memset fallback * * This fallback is needed to fulfill guarantee that pmem_mem[cpy|set|move] * will use at least 8-byte stores (for 8-byte aligned buffers and sizes), * even when accelerated implementation is missing or disabled. * This guarantee is needed to maintain correctness eg in pmemobj. * Libc may do the same, but this behavior is not documented, so we can't rely * on that. */ #include <stddef.h> #include "out.h" #include "pmem.h" #include "libpmem.h" #include "util.h" /* * cpy64 -- (internal) copy 64 bytes from src to dst */ static force_inline void cpy64(uint64_t *dst, const uint64_t *src) { /* * We use atomics here just to be sure compiler will not split stores. * Order of stores doesn't matter. */ uint64_t tmp[8]; util_atomic_load_explicit64(&src[0], &tmp[0], memory_order_relaxed); util_atomic_load_explicit64(&src[1], &tmp[1], memory_order_relaxed); util_atomic_load_explicit64(&src[2], &tmp[2], memory_order_relaxed); util_atomic_load_explicit64(&src[3], &tmp[3], memory_order_relaxed); util_atomic_load_explicit64(&src[4], &tmp[4], memory_order_relaxed); util_atomic_load_explicit64(&src[5], &tmp[5], memory_order_relaxed); util_atomic_load_explicit64(&src[6], &tmp[6], memory_order_relaxed); util_atomic_load_explicit64(&src[7], &tmp[7], memory_order_relaxed); util_atomic_store_explicit64(&dst[0], tmp[0], memory_order_relaxed); util_atomic_store_explicit64(&dst[1], tmp[1], memory_order_relaxed); util_atomic_store_explicit64(&dst[2], tmp[2], memory_order_relaxed); util_atomic_store_explicit64(&dst[3], tmp[3], memory_order_relaxed); util_atomic_store_explicit64(&dst[4], tmp[4], memory_order_relaxed); util_atomic_store_explicit64(&dst[5], tmp[5], memory_order_relaxed); util_atomic_store_explicit64(&dst[6], tmp[6], memory_order_relaxed); util_atomic_store_explicit64(&dst[7], tmp[7], memory_order_relaxed); } /* * cpy8 -- (internal) copy 8 bytes from src to dst */ static force_inline void cpy8(uint64_t *dst, const uint64_t *src) { uint64_t tmp; util_atomic_load_explicit64(src, &tmp, memory_order_relaxed); util_atomic_store_explicit64(dst, tmp, memory_order_relaxed); } /* * store8 -- (internal) store 8 bytes */ static force_inline void store8(uint64_t *dst, uint64_t c) { util_atomic_store_explicit64(dst, c, memory_order_relaxed); } /* * memmove_nodrain_generic -- generic memmove to pmem without hw drain */ void * memmove_nodrain_generic(void *dst, const void *src, size_t len, unsigned flags) { LOG(15, "pmemdest %p src %p len %zu flags 0x%x", dst, src, len, flags); char *cdst = dst; const char *csrc = src; size_t remaining; (void) flags; if ((uintptr_t)cdst - (uintptr_t)csrc >= len) { size_t cnt = (uint64_t)cdst & 7; if (cnt > 0) { cnt = 8 - cnt; if (cnt > len) cnt = len; for (size_t i = 0; i < cnt; ++i) cdst[i] = csrc[i]; pmem_flush_flags(cdst, cnt, flags); cdst += cnt; csrc += cnt; len -= cnt; } uint64_t *dst8 = (uint64_t *)cdst; const uint64_t *src8 = (const uint64_t *)csrc; while (len >= 64) { cpy64(dst8, src8); pmem_flush_flags(dst8, 64, flags); len -= 64; dst8 += 8; src8 += 8; } remaining = len; while (len >= 8) { cpy8(dst8, src8); len -= 8; dst8++; src8++; } cdst = (char *)dst8; csrc = (const char *)src8; for (size_t i = 0; i < len; ++i) *cdst++ = *csrc++; if (remaining) pmem_flush_flags(cdst - remaining, remaining, flags); } else { cdst += len; csrc += len; size_t cnt = (uint64_t)cdst & 7; if (cnt > 0) { if (cnt > len) cnt = len; cdst -= cnt; csrc -= cnt; len -= cnt; for (size_t i = cnt; i > 0; --i) cdst[i - 1] = csrc[i - 1]; pmem_flush_flags(cdst, cnt, flags); } uint64_t *dst8 = (uint64_t *)cdst; const uint64_t *src8 = (const uint64_t *)csrc; while (len >= 64) { dst8 -= 8; src8 -= 8; cpy64(dst8, src8); pmem_flush_flags(dst8, 64, flags); len -= 64; } remaining = len; while (len >= 8) { --dst8; --src8; cpy8(dst8, src8); len -= 8; } cdst = (char *)dst8; csrc = (const char *)src8; for (size_t i = len; i > 0; --i) *--cdst = *--csrc; if (remaining) pmem_flush_flags(cdst, remaining, flags); } return dst; } /* * memset_nodrain_generic -- generic memset to pmem without hw drain */ void * memset_nodrain_generic(void *dst, int c, size_t len, unsigned flags) { LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", dst, c, len, flags); (void) flags; char *cdst = dst; size_t cnt = (uint64_t)cdst & 7; if (cnt > 0) { cnt = 8 - cnt; if (cnt > len) cnt = len; for (size_t i = 0; i < cnt; ++i) cdst[i] = (char)c; pmem_flush_flags(cdst, cnt, flags); cdst += cnt; len -= cnt; } uint64_t *dst8 = (uint64_t *)cdst; uint64_t u = (unsigned char)c; uint64_t tmp = (u << 56) | (u << 48) | (u << 40) | (u << 32) | (u << 24) | (u << 16) | (u << 8) | u; while (len >= 64) { store8(&dst8[0], tmp); store8(&dst8[1], tmp); store8(&dst8[2], tmp); store8(&dst8[3], tmp); store8(&dst8[4], tmp); store8(&dst8[5], tmp); store8(&dst8[6], tmp); store8(&dst8[7], tmp); pmem_flush_flags(dst8, 64, flags); len -= 64; dst8 += 8; } size_t remaining = len; while (len >= 8) { store8(dst8, tmp); len -= 8; dst8++; } cdst = (char *)dst8; for (size_t i = 0; i < len; ++i) *cdst++ = (char)c; if (remaining) pmem_flush_flags(cdst - remaining, remaining, flags); return dst; }
7,120
25.180147
78
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/libpmem_main.c
/* * Copyright 2015-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libpmem_main.c -- entry point for libpmem.dll * * XXX - This is a placeholder. All the library initialization/cleanup * that is done in library ctors/dtors, as well as TLS initialization * should be moved here. */ #include "win_mmap.h" void libpmem_init(void); void libpmem_fini(void); int APIENTRY DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved) { switch (dwReason) { case DLL_PROCESS_ATTACH: libpmem_init(); win_mmap_init(); break; case DLL_THREAD_ATTACH: case DLL_THREAD_DETACH: break; case DLL_PROCESS_DETACH: win_mmap_fini(); libpmem_fini(); break; } return TRUE; }
2,227
32.757576
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/pmem.c
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * pmem.c -- pmem entry points for libpmem * * * PERSISTENT MEMORY INSTRUCTIONS ON X86 * * The primary feature of this library is to provide a way to flush * changes to persistent memory as outlined below (note that many * of the decisions below are made at initialization time, and not * repeated every time a flush is requested). * * To flush a range to pmem when CLWB is available: * * CLWB for each cache line in the given range. * * SFENCE to ensure the CLWBs above have completed. * * To flush a range to pmem when CLFLUSHOPT is available and CLWB is not * (same as above but issue CLFLUSHOPT instead of CLWB): * * CLFLUSHOPT for each cache line in the given range. * * SFENCE to ensure the CLWBs above have completed. * * To flush a range to pmem when neither CLFLUSHOPT or CLWB are available * (same as above but fences surrounding CLFLUSH are not required): * * CLFLUSH for each cache line in the given range. * * To memcpy a range of memory to pmem when MOVNT is available: * * Copy any non-64-byte portion of the destination using MOV. * * Use the flush flow above without the fence for the copied portion. * * Copy using MOVNTDQ, up to any non-64-byte aligned end portion. * (The MOVNT instructions bypass the cache, so no flush is required.) * * Copy any unaligned end portion using MOV. * * Use the flush flow above for the copied portion (including fence). * * To memcpy a range of memory to pmem when MOVNT is not available: * * Just pass the call to the normal memcpy() followed by pmem_persist(). * * To memset a non-trivial sized range of memory to pmem: * * Same as the memcpy cases above but store the given value instead * of reading values from the source. * * These features are supported for ARM AARCH64 using equivalent ARM * assembly instruction. Please refer to (arm_cacheops.h) for more details. * * INTERFACES FOR FLUSHING TO PERSISTENT MEMORY * * Given the flows above, three interfaces are provided for flushing a range * so that the caller has the ability to separate the steps when necessary, * but otherwise leaves the detection of available instructions to the libpmem: * * pmem_persist(addr, len) * * This is the common case, which just calls the two other functions: * * pmem_flush(addr, len); * pmem_drain(); * * pmem_flush(addr, len) * * CLWB or CLFLUSHOPT or CLFLUSH for each cache line * * pmem_drain() * * SFENCE unless using CLFLUSH * * * INTERFACES FOR COPYING/SETTING RANGES OF MEMORY * * Given the flows above, the following interfaces are provided for the * memmove/memcpy/memset operations to persistent memory: * * pmem_memmove_nodrain() * * Checks for overlapped ranges to determine whether to copy from * the beginning of the range or from the end. If MOVNT instructions * are available, uses the memory copy flow described above, otherwise * calls the libc memmove() followed by pmem_flush(). Since no conditional * compilation and/or architecture specific CFLAGS are in use at the * moment, SSE2 ( thus movnt ) is just assumed to be available. * * pmem_memcpy_nodrain() * * Just calls pmem_memmove_nodrain(). * * pmem_memset_nodrain() * * If MOVNT instructions are available, uses the memset flow described * above, otherwise calls the libc memset() followed by pmem_flush(). * * pmem_memmove_persist() * pmem_memcpy_persist() * pmem_memset_persist() * * Calls the appropriate _nodrain() function followed by pmem_drain(). * * * DECISIONS MADE AT INITIALIZATION TIME * * As much as possible, all decisions described above are made at library * initialization time. This is achieved using function pointers that are * setup by pmem_init() when the library loads. * * Func_predrain_fence is used by pmem_drain() to call one of: * predrain_fence_empty() * predrain_memory_barrier() * * Func_flush is used by pmem_flush() to call one of: * flush_dcache() * flush_dcache_invalidate_opt() * flush_dcache_invalidate() * * Func_memmove_nodrain is used by memmove_nodrain() to call one of: * memmove_nodrain_libc() * memmove_nodrain_movnt() * * Func_memset_nodrain is used by memset_nodrain() to call one of: * memset_nodrain_libc() * memset_nodrain_movnt() * * DEBUG LOGGING * * Many of the functions here get called hundreds of times from loops * iterating over ranges, making the usual LOG() calls at level 3 * impractical. The call tracing log for those functions is set at 15. */ #include <sys/mman.h> #include <sys/stat.h> #include <errno.h> #include <fcntl.h> #include "libpmem.h" #include "pmem.h" #include "out.h" #include "os.h" #include "mmap.h" #include "file.h" #include "valgrind_internal.h" #include "os_deep.h" #include "os_auto_flush.h" static struct pmem_funcs Funcs; /* * pmem_has_hw_drain -- return whether or not HW drain was found * * Always false for x86: HW drain is done by HW with no SW involvement. */ int pmem_has_hw_drain(void) { LOG(3, NULL); return 0; } /* * pmem_drain -- wait for any PM stores to drain from HW buffers */ void pmem_drain(void) { LOG(15, NULL); Funcs.predrain_fence(); } /* * pmem_has_auto_flush -- check if platform supports eADR */ int pmem_has_auto_flush() { LOG(3, NULL); return os_auto_flush(); } /* * pmem_deep_flush -- flush processor cache for the given range * regardless of eADR support on platform */ void pmem_deep_flush(const void *addr, size_t len) { LOG(15, "addr %p len %zu", addr, len); VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len); Funcs.deep_flush(addr, len); } /* * pmem_flush -- flush processor cache for the given range */ void pmem_flush(const void *addr, size_t len) { LOG(15, "addr %p len %zu", addr, len); VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len); Funcs.flush(addr, len); } /* * pmem_persist -- make any cached changes to a range of pmem persistent */ void pmem_persist(const void *addr, size_t len) { LOG(15, "addr %p len %zu", addr, len); pmem_flush(addr, len); pmem_drain(); } /* * pmem_msync -- flush to persistence via msync * * Using msync() means this routine is less optimal for pmem (but it * still works) but it also works for any memory mapped file, unlike * pmem_persist() which is only safe where pmem_is_pmem() returns true. */ int pmem_msync(const void *addr, size_t len) { LOG(15, "addr %p len %zu", addr, len); VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len); /* * msync requires len to be a multiple of pagesize, so * adjust addr and len to represent the full 4k chunks * covering the given range. */ /* increase len by the amount we gain when we round addr down */ len += (uintptr_t)addr & (Pagesize - 1); /* round addr down to page boundary */ uintptr_t uptr = (uintptr_t)addr & ~((uintptr_t)Pagesize - 1); /* * msync accepts addresses aligned to page boundary, so we may sync * more and part of it may have been marked as undefined/inaccessible * Msyncing such memory is not a bug, so as a workaround temporarily * disable error reporting. */ VALGRIND_DO_DISABLE_ERROR_REPORTING; int ret; if ((ret = msync((void *)uptr, len, MS_SYNC)) < 0) ERR("!msync"); VALGRIND_DO_ENABLE_ERROR_REPORTING; /* full flush */ VALGRIND_DO_PERSIST(uptr, len); return ret; } /* * is_pmem_always -- (internal) always true (for meaningful parameters) version * of pmem_is_pmem() */ static int is_pmem_always(const void *addr, size_t len) { LOG(3, "addr %p len %zu", addr, len); if (len == 0) return 0; return 1; } /* * is_pmem_never -- (internal) never true version of pmem_is_pmem() */ static int is_pmem_never(const void *addr, size_t len) { LOG(3, "addr %p len %zu", addr, len); return 0; } /* * pmem_is_pmem_init -- (internal) initialize Func_is_pmem pointer * * This should be done only once - on the first call to pmem_is_pmem(). * If PMEM_IS_PMEM_FORCE is set, it would override the default behavior * of pmem_is_pmem(). */ static void pmem_is_pmem_init(void) { LOG(3, NULL); static volatile unsigned init; while (init != 2) { if (!util_bool_compare_and_swap32(&init, 0, 1)) continue; /* * For debugging/testing, allow pmem_is_pmem() to be forced * to always true or never true using environment variable * PMEM_IS_PMEM_FORCE values of zero or one. * * This isn't #ifdef DEBUG because it has a trivial performance * impact and it may turn out to be useful as a "chicken bit" * for systems where pmem_is_pmem() isn't correctly detecting * true persistent memory. */ char *ptr = os_getenv("PMEM_IS_PMEM_FORCE"); if (ptr) { int val = atoi(ptr); if (val == 0) Funcs.is_pmem = is_pmem_never; else if (val == 1) Funcs.is_pmem = is_pmem_always; VALGRIND_ANNOTATE_HAPPENS_BEFORE(&Funcs.is_pmem); LOG(4, "PMEM_IS_PMEM_FORCE=%d", val); } if (Funcs.is_pmem == NULL) Funcs.is_pmem = is_pmem_never; if (!util_bool_compare_and_swap32(&init, 1, 2)) FATAL("util_bool_compare_and_swap32"); } } /* * pmem_is_pmem -- return true if entire range is persistent memory */ int pmem_is_pmem(const void *addr, size_t len) { LOG(10, "addr %p len %zu", addr, len); static int once; /* This is not thread-safe, but pmem_is_pmem_init() is. */ if (once == 0) { pmem_is_pmem_init(); util_fetch_and_add32(&once, 1); } VALGRIND_ANNOTATE_HAPPENS_AFTER(&Funcs.is_pmem); return Funcs.is_pmem(addr, len); } #define PMEM_FILE_ALL_FLAGS\ (PMEM_FILE_CREATE|PMEM_FILE_EXCL|PMEM_FILE_SPARSE|PMEM_FILE_TMPFILE) #define PMEM_DAX_VALID_FLAGS\ (PMEM_FILE_CREATE|PMEM_FILE_SPARSE) /* * pmem_map_fileU -- create or open the file and map it to memory */ #ifndef _WIN32 static inline #endif void * pmem_map_fileU(const char *path, size_t len, int flags, mode_t mode, size_t *mapped_lenp, int *is_pmemp) { LOG(3, "path \"%s\" size %zu flags %x mode %o mapped_lenp %p " "is_pmemp %p", path, len, flags, mode, mapped_lenp, is_pmemp); int oerrno; int fd; int open_flags = O_RDWR; int delete_on_err = 0; int file_type = util_file_get_type(path); if (file_type == OTHER_ERROR) return NULL; if (flags & ~(PMEM_FILE_ALL_FLAGS)) { ERR("invalid flag specified %x", flags); errno = EINVAL; return NULL; } if (file_type == TYPE_DEVDAX) { if (flags & ~(PMEM_DAX_VALID_FLAGS)) { ERR("flag unsupported for Device DAX %x", flags); errno = EINVAL; return NULL; } else { /* we are ignoring all of the flags */ flags = 0; ssize_t actual_len = util_file_get_size(path); if (actual_len < 0) { ERR("unable to read Device DAX size"); errno = EINVAL; return NULL; } if (len != 0 && len != (size_t)actual_len) { ERR("Device DAX length must be either 0 or " "the exact size of the device %zu", len); errno = EINVAL; return NULL; } len = 0; } } if (flags & PMEM_FILE_CREATE) { if ((os_off_t)len < 0) { ERR("invalid file length %zu", len); errno = EINVAL; return NULL; } open_flags |= O_CREAT; } if (flags & PMEM_FILE_EXCL) open_flags |= O_EXCL; if ((len != 0) && !(flags & PMEM_FILE_CREATE)) { ERR("non-zero 'len' not allowed without PMEM_FILE_CREATE"); errno = EINVAL; return NULL; } if ((len == 0) && (flags & PMEM_FILE_CREATE)) { ERR("zero 'len' not allowed with PMEM_FILE_CREATE"); errno = EINVAL; return NULL; } if ((flags & PMEM_FILE_TMPFILE) && !(flags & PMEM_FILE_CREATE)) { ERR("PMEM_FILE_TMPFILE not allowed without PMEM_FILE_CREATE"); errno = EINVAL; return NULL; } if (flags & PMEM_FILE_TMPFILE) { if ((fd = util_tmpfile(path, OS_DIR_SEP_STR"pmem.XXXXXX", open_flags & O_EXCL)) < 0) { LOG(2, "failed to create temporary file at \"%s\"", path); return NULL; } } else { if ((fd = os_open(path, open_flags, mode)) < 0) { ERR("!open %s", path); return NULL; } if ((flags & PMEM_FILE_CREATE) && (flags & PMEM_FILE_EXCL)) delete_on_err = 1; } if (flags & PMEM_FILE_CREATE) { /* * Always set length of file to 'len'. * (May either extend or truncate existing file.) */ if (os_ftruncate(fd, (os_off_t)len) != 0) { ERR("!ftruncate"); goto err; } if ((flags & PMEM_FILE_SPARSE) == 0) { if ((errno = os_posix_fallocate(fd, 0, (os_off_t)len)) != 0) { ERR("!posix_fallocate"); goto err; } } } else { ssize_t actual_size = util_file_get_size(path); if (actual_size < 0) { ERR("stat %s: negative size", path); errno = EINVAL; goto err; } len = (size_t)actual_size; } void *addr = pmem_map_register(fd, len, path, file_type == TYPE_DEVDAX); if (addr == NULL) goto err; if (mapped_lenp != NULL) *mapped_lenp = len; if (is_pmemp != NULL) *is_pmemp = pmem_is_pmem(addr, len); LOG(3, "returning %p", addr); VALGRIND_REGISTER_PMEM_MAPPING(addr, len); VALGRIND_REGISTER_PMEM_FILE(fd, addr, len, 0); (void) os_close(fd); return addr; err: oerrno = errno; (void) os_close(fd); if (delete_on_err) (void) os_unlink(path); errno = oerrno; return NULL; } #ifndef _WIN32 /* * pmem_map_file -- create or open the file and map it to memory */ void * pmem_map_file(const char *path, size_t len, int flags, mode_t mode, size_t *mapped_lenp, int *is_pmemp) { return pmem_map_fileU(path, len, flags, mode, mapped_lenp, is_pmemp); } #else /* * pmem_map_fileW -- create or open the file and map it to memory */ void * pmem_map_fileW(const wchar_t *path, size_t len, int flags, mode_t mode, size_t *mapped_lenp, int *is_pmemp) { char *upath = util_toUTF8(path); if (upath == NULL) return NULL; void *ret = pmem_map_fileU(upath, len, flags, mode, mapped_lenp, is_pmemp); util_free_UTF8(upath); return ret; } #endif /* * pmem_unmap -- unmap the specified region */ int pmem_unmap(void *addr, size_t len) { LOG(3, "addr %p len %zu", addr, len); #ifndef _WIN32 util_range_unregister(addr, len); #endif VALGRIND_REMOVE_PMEM_MAPPING(addr, len); return util_unmap(addr, len); } /* * pmem_memmove -- memmove to pmem */ void * pmem_memmove(void *pmemdest, const void *src, size_t len, unsigned flags) { LOG(15, "pmemdest %p src %p len %zu flags 0x%x", pmemdest, src, len, flags); #ifdef DEBUG if (flags & ~PMEM_F_MEM_VALID_FLAGS) ERR("invalid flags 0x%x", flags); #endif Funcs.memmove_nodrain(pmemdest, src, len, flags & ~PMEM_F_MEM_NODRAIN); if ((flags & (PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH)) == 0) pmem_drain(); return pmemdest; } /* * pmem_memcpy -- memcpy to pmem */ void * pmem_memcpy(void *pmemdest, const void *src, size_t len, unsigned flags) { return pmem_memmove(pmemdest, src, len, flags); } /* * pmem_memset -- memset to pmem */ void * pmem_memset(void *pmemdest, int c, size_t len, unsigned flags) { LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", pmemdest, c, len, flags); #ifdef DEBUG if (flags & ~PMEM_F_MEM_VALID_FLAGS) ERR("invalid flags 0x%x", flags); #endif Funcs.memset_nodrain(pmemdest, c, len, flags & ~PMEM_F_MEM_NODRAIN); if ((flags & (PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH)) == 0) pmem_drain(); return pmemdest; } /* * pmem_memmove_nodrain -- memmove to pmem without hw drain */ void * pmem_memmove_nodrain(void *pmemdest, const void *src, size_t len) { return pmem_memmove(pmemdest, src, len, PMEM_F_MEM_NODRAIN); } /* * pmem_memcpy_nodrain -- memcpy to pmem without hw drain */ void * pmem_memcpy_nodrain(void *pmemdest, const void *src, size_t len) { return pmem_memcpy(pmemdest, src, len, PMEM_F_MEM_NODRAIN); } /* * pmem_memmove_persist -- memmove to pmem */ void * pmem_memmove_persist(void *pmemdest, const void *src, size_t len) { return pmem_memmove(pmemdest, src, len, 0); } /* * pmem_memcpy_persist -- memcpy to pmem */ void * pmem_memcpy_persist(void *pmemdest, const void *src, size_t len) { return pmem_memcpy(pmemdest, src, len, 0); } /* * pmem_memset_nodrain -- memset to pmem without hw drain */ void * pmem_memset_nodrain(void *pmemdest, int c, size_t len) { return pmem_memset(pmemdest, c, len, PMEM_F_MEM_NODRAIN); } /* * pmem_memset_persist -- memset to pmem */ void * pmem_memset_persist(void *pmemdest, int c, size_t len) { return pmem_memset(pmemdest, c, len, 0); } /* * pmem_init -- load-time initialization for pmem.c */ void pmem_init(void) { LOG(3, NULL); pmem_init_funcs(&Funcs); pmem_os_init(); } /* * pmem_deep_persist -- perform deep persist on a memory range * * It merely acts as wrapper around an msync call in most cases, the only * exception is the case of an mmap'ed DAX device on Linux. */ int pmem_deep_persist(const void *addr, size_t len) { LOG(3, "addr %p len %zu", addr, len); pmem_deep_flush(addr, len); return pmem_deep_drain(addr, len); } /* * pmem_deep_drain -- perform deep drain on a memory range */ int pmem_deep_drain(const void *addr, size_t len) { LOG(3, "addr %p len %zu", addr, len); return os_range_deep_common((uintptr_t)addr, len); }
18,443
23.592
79
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/pmem_windows.c
/* * Copyright 2016-2018, Intel Corporation * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * pmem_windows.c -- pmem utilities with OS-specific implementation */ #include <memoryapi.h> #include "pmem.h" #include "out.h" #include "mmap.h" #include "win_mmap.h" #include "sys/mman.h" #if (NTDDI_VERSION >= NTDDI_WIN10_RS1) typedef BOOL (WINAPI *PQVM)( HANDLE, const void *, enum WIN32_MEMORY_INFORMATION_CLASS, PVOID, SIZE_T, PSIZE_T); static PQVM Func_qvmi = NULL; #endif /* * is_direct_mapped -- (internal) for each page in the given region * checks with MM, if it's direct mapped. */ static int is_direct_mapped(const void *begin, const void *end) { LOG(3, "begin %p end %p", begin, end); #if (NTDDI_VERSION >= NTDDI_WIN10_RS1) int retval = 1; WIN32_MEMORY_REGION_INFORMATION region_info; SIZE_T bytes_returned; if (Func_qvmi == NULL) { LOG(4, "QueryVirtualMemoryInformation not supported, " "assuming non-DAX."); return 0; } const void *begin_aligned = (const void *)rounddown((intptr_t)begin, Pagesize); const void *end_aligned = (const void *)roundup((intptr_t)end, Pagesize); for (const void *page = begin_aligned; page < end_aligned; page = (const void *)((char *)page + Pagesize)) { if (Func_qvmi(GetCurrentProcess(), page, MemoryRegionInfo, &region_info, sizeof(region_info), &bytes_returned)) { retval = region_info.DirectMapped; } else { LOG(4, "QueryVirtualMemoryInformation failed, assuming " "non-DAX. Last error: %08x", GetLastError()); retval = 0; } if (retval == 0) { LOG(4, "page %p is not direct mapped", page); break; } } return retval; #else /* if the MM API is not available the safest answer is NO */ return 0; #endif /* NTDDI_VERSION >= NTDDI_WIN10_RS1 */ } /* * is_pmem_detect -- implement pmem_is_pmem() * * This function returns true only if the entire range can be confirmed * as being direct access persistent memory. Finding any part of the * range is not direct access, or failing to look up the information * because it is unmapped or because any sort of error happens, just * results in returning false. */ int is_pmem_detect(const void *addr, size_t len) { LOG(3, "addr %p len %zu", addr, len); if (len == 0) return 0; if (len > UINTPTR_MAX - (uintptr_t)addr) { len = UINTPTR_MAX - (uintptr_t)addr; LOG(4, "limit len to %zu to not get beyond address space", len); } int retval = 1; const void *begin = addr; const void *end = (const void *)((char *)addr + len); LOG(4, "begin %p end %p", begin, end); AcquireSRWLockShared(&FileMappingQLock); PFILE_MAPPING_TRACKER mt; SORTEDQ_FOREACH(mt, &FileMappingQHead, ListEntry) { if (mt->BaseAddress >= end) { LOG(4, "ignoring all mapped ranges beyond given range"); break; } if (mt->EndAddress <= begin) { LOG(4, "skipping all mapped ranges before given range"); continue; } if (!(mt->Flags & FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED)) { LOG(4, "tracked range [%p, %p) is not direct mapped", mt->BaseAddress, mt->EndAddress); retval = 0; break; } /* * If there is a gap between the given region that we process * currently and the mapped region in our tracking list, we * need to process the gap by taking the long route of asking * MM for each page in that range. */ if (begin < mt->BaseAddress && !is_direct_mapped(begin, mt->BaseAddress)) { LOG(4, "untracked range [%p, %p) is not direct mapped", begin, mt->BaseAddress); retval = 0; break; } /* push our begin to reflect what we have already processed */ begin = mt->EndAddress; } /* * If we still have a range to verify, check with MM if the entire * region is direct mapped. */ if (begin < end && !is_direct_mapped(begin, end)) { LOG(4, "untracked end range [%p, %p) is not direct mapped", begin, end); retval = 0; } ReleaseSRWLockShared(&FileMappingQLock); LOG(4, "returning %d", retval); return retval; } /* * pmem_map_register -- memory map file and register mapping */ void * pmem_map_register(int fd, size_t len, const char *path, int is_dev_dax) { /* there is no device dax on windows */ ASSERTeq(is_dev_dax, 0); return util_map(fd, len, MAP_SHARED, 0, 0, NULL); } /* * pmem_os_init -- os-dependent part of pmem initialization */ void pmem_os_init(void) { LOG(3, NULL); #if NTDDI_VERSION >= NTDDI_WIN10_RS1 Func_qvmi = (PQVM)GetProcAddress( GetModuleHandle(TEXT("KernelBase.dll")), "QueryVirtualMemoryInformation"); #endif }
6,094
27.615023
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/pmem.h
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * pmem.h -- internal definitions for libpmem */ #ifndef PMEM_H #define PMEM_H #include <stddef.h> #include "libpmem.h" #include "util.h" #ifdef __cplusplus extern "C" { #endif #define PMEM_LOG_PREFIX "libpmem" #define PMEM_LOG_LEVEL_VAR "PMEM_LOG_LEVEL" #define PMEM_LOG_FILE_VAR "PMEM_LOG_FILE" typedef void (*predrain_fence_func)(void); typedef void (*flush_func)(const void *, size_t); typedef int (*is_pmem_func)(const void *addr, size_t len); typedef void *(*memmove_nodrain_func)(void *pmemdest, const void *src, size_t len, unsigned flags); typedef void *(*memset_nodrain_func)(void *pmemdest, int c, size_t len, unsigned flags); struct pmem_funcs { predrain_fence_func predrain_fence; flush_func flush; is_pmem_func is_pmem; memmove_nodrain_func memmove_nodrain; memset_nodrain_func memset_nodrain; flush_func deep_flush; }; void pmem_init(void); void pmem_os_init(void); void pmem_init_funcs(struct pmem_funcs *funcs); int is_pmem_detect(const void *addr, size_t len); void *pmem_map_register(int fd, size_t len, const char *path, int is_dev_dax); /* * flush_empty_nolog -- (internal) do not flush the CPU cache */ static force_inline void flush_empty_nolog(const void *addr, size_t len) { /* NOP */ } /* * flush64b_empty -- (internal) do not flush the CPU cache */ static force_inline void flush64b_empty(const char *addr) { } /* * pmem_flush_flags -- internal wrapper around pmem_flush */ static inline void pmem_flush_flags(const void *addr, size_t len, unsigned flags) { if (!(flags & PMEM_F_MEM_NOFLUSH)) pmem_flush(addr, len); } void *memmove_nodrain_generic(void *pmemdest, const void *src, size_t len, unsigned flags); void *memset_nodrain_generic(void *pmemdest, int c, size_t len, unsigned flags); #ifdef __cplusplus } #endif #endif
3,394
29.585586
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/flush.h
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef X86_64_FLUSH_H #define X86_64_FLUSH_H #include <emmintrin.h> #include <stddef.h> #include <stdint.h> #include "util.h" #define FLUSH_ALIGN ((uintptr_t)64) #ifdef _MSC_VER #define pmem_clflushopt _mm_clflushopt #define pmem_clwb _mm_clwb #else /* * The x86 memory instructions are new enough that the compiler * intrinsic functions are not always available. The intrinsic * functions are defined here in terms of asm statements for now. */ #define pmem_clflushopt(addr)\ asm volatile(".byte 0x66; clflush %0" : "+m" \ (*(volatile char *)(addr))); #define pmem_clwb(addr)\ asm volatile(".byte 0x66; xsaveopt %0" : "+m" \ (*(volatile char *)(addr))); #endif /* _MSC_VER */ /* * flush_clflush_nolog -- flush the CPU cache, using clflush */ static force_inline void flush_clflush_nolog(const void *addr, size_t len) { uintptr_t uptr; /* * Loop through cache-line-size (typically 64B) aligned chunks * covering the given range. */ for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1); uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) _mm_clflush((char *)uptr); } /* * flush_clflushopt_nolog -- flush the CPU cache, using clflushopt */ static force_inline void flush_clflushopt_nolog(const void *addr, size_t len) { uintptr_t uptr; /* * Loop through cache-line-size (typically 64B) aligned chunks * covering the given range. */ for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1); uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) { pmem_clflushopt((char *)uptr); } } /* * flush_clwb_nolog -- flush the CPU cache, using clwb */ static force_inline void flush_clwb_nolog(const void *addr, size_t len) { uintptr_t uptr; /* * Loop through cache-line-size (typically 64B) aligned chunks * covering the given range. */ for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1); uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) { pmem_clwb((char *)uptr); } } #endif
3,520
29.885965
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/cpu.c
/* * Copyright 2015-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * cpu.c -- CPU features detection * * These routines do not work AARCH64 platforms, and need new detection * routiones to be added. Currently to ensure msync is not used and ARM * FLUSH instructions are used PMEM_IS_PMEM_FORCE=1 needs to be used. */ /* * Reference: * http://www.intel.com/content/www/us/en/processors/ * architectures-software-developer-manuals.html * * https://support.amd.com/TechDocs/24594.pdf */ #include <string.h> #include "out.h" #include "cpu.h" #define EAX_IDX 0 #define EBX_IDX 1 #define ECX_IDX 2 #define EDX_IDX 3 #if defined(__x86_64__) || defined(__amd64__) #include <cpuid.h> static inline void cpuid(unsigned func, unsigned subfunc, unsigned cpuinfo[4]) { __cpuid_count(func, subfunc, cpuinfo[EAX_IDX], cpuinfo[EBX_IDX], cpuinfo[ECX_IDX], cpuinfo[EDX_IDX]); } #elif defined(_M_X64) || defined(_M_AMD64) #include <intrin.h> static inline void cpuid(unsigned func, unsigned subfunc, unsigned cpuinfo[4]) { __cpuidex(cpuinfo, func, subfunc); } #else /* not x86_64 */ #define cpuid(func, subfunc, cpuinfo)\ do { (void)(func); (void)(subfunc); (void)(cpuinfo); } while (0) #endif #ifndef bit_CLFLUSH #define bit_CLFLUSH (1 << 19) #endif #ifndef bit_CLFLUSHOPT #define bit_CLFLUSHOPT (1 << 23) #endif #ifndef bit_CLWB #define bit_CLWB (1 << 24) #endif #ifndef bit_AVX #define bit_AVX (1 << 28) #endif #ifndef bit_AVX512F #define bit_AVX512F (1 << 16) #endif /* * is_cpu_feature_present -- (internal) checks if CPU feature is supported */ static int is_cpu_feature_present(unsigned func, unsigned reg, unsigned bit) { unsigned cpuinfo[4] = { 0 }; /* check CPUID level first */ cpuid(0x0, 0x0, cpuinfo); if (cpuinfo[EAX_IDX] < func) return 0; cpuid(func, 0x0, cpuinfo); return (cpuinfo[reg] & bit) != 0; } /* * is_cpu_genuine_intel -- checks for genuine Intel CPU */ int is_cpu_genuine_intel(void) { unsigned cpuinfo[4] = { 0 }; union { char name[0x20]; unsigned cpuinfo[3]; } vendor; memset(&vendor, 0, sizeof(vendor)); cpuid(0x0, 0x0, cpuinfo); vendor.cpuinfo[0] = cpuinfo[EBX_IDX]; vendor.cpuinfo[1] = cpuinfo[EDX_IDX]; vendor.cpuinfo[2] = cpuinfo[ECX_IDX]; LOG(4, "CPU vendor: %s", vendor.name); return (strncmp(vendor.name, "GenuineIntel", sizeof(vendor.name))) == 0; } /* * is_cpu_clflush_present -- checks if CLFLUSH instruction is supported */ int is_cpu_clflush_present(void) { int ret = is_cpu_feature_present(0x1, EDX_IDX, bit_CLFLUSH); LOG(4, "CLFLUSH %ssupported", ret == 0 ? "not " : ""); return ret; } /* * is_cpu_clflushopt_present -- checks if CLFLUSHOPT instruction is supported */ int is_cpu_clflushopt_present(void) { int ret = is_cpu_feature_present(0x7, EBX_IDX, bit_CLFLUSHOPT); LOG(4, "CLFLUSHOPT %ssupported", ret == 0 ? "not " : ""); return ret; } /* * is_cpu_clwb_present -- checks if CLWB instruction is supported */ int is_cpu_clwb_present(void) { if (!is_cpu_genuine_intel()) return 0; int ret = is_cpu_feature_present(0x7, EBX_IDX, bit_CLWB); LOG(4, "CLWB %ssupported", ret == 0 ? "not " : ""); return ret; } /* * is_cpu_avx_present -- checks if AVX instructions are supported */ int is_cpu_avx_present(void) { int ret = is_cpu_feature_present(0x1, ECX_IDX, bit_AVX); LOG(4, "AVX %ssupported", ret == 0 ? "not " : ""); return ret; } /* * is_cpu_avx512f_present -- checks if AVX-512f instructions are supported */ int is_cpu_avx512f_present(void) { int ret = is_cpu_feature_present(0x7, EBX_IDX, bit_AVX512F); LOG(4, "AVX512f %ssupported", ret == 0 ? "not " : ""); return ret; }
5,154
23.316038
77
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/cpu.h
/* * Copyright 2016-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PMDK_CPU_H #define PMDK_CPU_H 1 /* * cpu.h -- definitions for "cpu" module */ int is_cpu_genuine_intel(void); int is_cpu_clflush_present(void); int is_cpu_clflushopt_present(void); int is_cpu_clwb_present(void); int is_cpu_avx_present(void); int is_cpu_avx512f_present(void); #endif
1,898
38.5625
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/init.c
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <string.h> #include <xmmintrin.h> #include "libpmem.h" #include "cpu.h" #include "flush.h" #include "memcpy_memset.h" #include "os.h" #include "out.h" #include "pmem.h" #include "valgrind_internal.h" #define MOVNT_THRESHOLD 256 size_t Movnt_threshold = MOVNT_THRESHOLD; /* * predrain_fence_empty -- (internal) issue the pre-drain fence instruction */ static void predrain_fence_empty(void) { LOG(15, NULL); VALGRIND_DO_FENCE; /* nothing to do (because CLFLUSH did it for us) */ } /* * predrain_memory_barrier -- (internal) issue the pre-drain fence instruction */ static void predrain_memory_barrier(void) { LOG(15, NULL); _mm_sfence(); /* ensure CLWB or CLFLUSHOPT completes */ } /* * flush_clflush -- (internal) flush the CPU cache, using clflush */ static void flush_clflush(const void *addr, size_t len) { LOG(15, "addr %p len %zu", addr, len); flush_clflush_nolog(addr, len); } /* * flush_clflushopt -- (internal) flush the CPU cache, using clflushopt */ static void flush_clflushopt(const void *addr, size_t len) { LOG(15, "addr %p len %zu", addr, len); flush_clflushopt_nolog(addr, len); } /* * flush_clwb -- (internal) flush the CPU cache, using clwb */ static void flush_clwb(const void *addr, size_t len) { LOG(15, "addr %p len %zu", addr, len); flush_clwb_nolog(addr, len); } /* * flush_empty -- (internal) do not flush the CPU cache */ static void flush_empty(const void *addr, size_t len) { LOG(15, "addr %p len %zu", addr, len); flush_empty_nolog(addr, len); } #if SSE2_AVAILABLE || AVX_AVAILABLE || AVX512F_AVAILABLE #define PMEM_F_MEM_MOVNT (PMEM_F_MEM_WC | PMEM_F_MEM_NONTEMPORAL) #define PMEM_F_MEM_MOV (PMEM_F_MEM_WB | PMEM_F_MEM_TEMPORAL) #define MEMCPY_TEMPLATE(isa, flush) \ static void *\ memmove_nodrain_##isa##_##flush(void *dest, const void *src, size_t len, \ unsigned flags)\ {\ if (len == 0 || src == dest)\ return dest;\ \ if (flags & PMEM_F_MEM_NOFLUSH) \ memmove_mov_##isa##_empty(dest, src, len); \ else if (flags & PMEM_F_MEM_MOVNT)\ memmove_movnt_##isa ##_##flush(dest, src, len);\ else if (flags & PMEM_F_MEM_MOV)\ memmove_mov_##isa##_##flush(dest, src, len);\ else if (len < Movnt_threshold)\ memmove_mov_##isa##_##flush(dest, src, len);\ else\ memmove_movnt_##isa##_##flush(dest, src, len);\ \ return dest;\ } #define MEMSET_TEMPLATE(isa, flush)\ static void *\ memset_nodrain_##isa##_##flush(void *dest, int c, size_t len, unsigned flags)\ {\ if (len == 0)\ return dest;\ \ if (flags & PMEM_F_MEM_NOFLUSH) \ memset_mov_##isa##_empty(dest, c, len); \ else if (flags & PMEM_F_MEM_MOVNT)\ memset_movnt_##isa##_##flush(dest, c, len);\ else if (flags & PMEM_F_MEM_MOV)\ memset_mov_##isa##_##flush(dest, c, len);\ else if (len < Movnt_threshold)\ memset_mov_##isa##_##flush(dest, c, len);\ else\ memset_movnt_##isa##_##flush(dest, c, len);\ \ return dest;\ } #endif #if SSE2_AVAILABLE MEMCPY_TEMPLATE(sse2, clflush) MEMCPY_TEMPLATE(sse2, clflushopt) MEMCPY_TEMPLATE(sse2, clwb) MEMCPY_TEMPLATE(sse2, empty) MEMSET_TEMPLATE(sse2, clflush) MEMSET_TEMPLATE(sse2, clflushopt) MEMSET_TEMPLATE(sse2, clwb) MEMSET_TEMPLATE(sse2, empty) #endif #if AVX_AVAILABLE MEMCPY_TEMPLATE(avx, clflush) MEMCPY_TEMPLATE(avx, clflushopt) MEMCPY_TEMPLATE(avx, clwb) MEMCPY_TEMPLATE(avx, empty) MEMSET_TEMPLATE(avx, clflush) MEMSET_TEMPLATE(avx, clflushopt) MEMSET_TEMPLATE(avx, clwb) MEMSET_TEMPLATE(avx, empty) #endif #if AVX512F_AVAILABLE MEMCPY_TEMPLATE(avx512f, clflush) MEMCPY_TEMPLATE(avx512f, clflushopt) MEMCPY_TEMPLATE(avx512f, clwb) MEMCPY_TEMPLATE(avx512f, empty) MEMSET_TEMPLATE(avx512f, clflush) MEMSET_TEMPLATE(avx512f, clflushopt) MEMSET_TEMPLATE(avx512f, clwb) MEMSET_TEMPLATE(avx512f, empty) #endif /* * memmove_nodrain_libc -- (internal) memmove to pmem using libc */ static void * memmove_nodrain_libc(void *pmemdest, const void *src, size_t len, unsigned flags) { LOG(15, "pmemdest %p src %p len %zu flags 0x%x", pmemdest, src, len, flags); (void) flags; memmove(pmemdest, src, len); pmem_flush_flags(pmemdest, len, flags); return pmemdest; } /* * memset_nodrain_libc -- (internal) memset to pmem using libc */ static void * memset_nodrain_libc(void *pmemdest, int c, size_t len, unsigned flags) { LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", pmemdest, c, len, flags); (void) flags; memset(pmemdest, c, len); pmem_flush_flags(pmemdest, len, flags); return pmemdest; } enum memcpy_impl { MEMCPY_INVALID, MEMCPY_LIBC, MEMCPY_GENERIC, MEMCPY_SSE2, MEMCPY_AVX, MEMCPY_AVX512F }; /* * use_sse2_memcpy_memset -- (internal) SSE2 detected, use it if possible */ static void use_sse2_memcpy_memset(struct pmem_funcs *funcs, enum memcpy_impl *impl) { #if SSE2_AVAILABLE *impl = MEMCPY_SSE2; if (funcs->deep_flush == flush_clflush) funcs->memmove_nodrain = memmove_nodrain_sse2_clflush; else if (funcs->deep_flush == flush_clflushopt) funcs->memmove_nodrain = memmove_nodrain_sse2_clflushopt; else if (funcs->deep_flush == flush_clwb) funcs->memmove_nodrain = memmove_nodrain_sse2_clwb; else if (funcs->deep_flush == flush_empty) funcs->memmove_nodrain = memmove_nodrain_sse2_empty; else ASSERT(0); if (funcs->deep_flush == flush_clflush) funcs->memset_nodrain = memset_nodrain_sse2_clflush; else if (funcs->deep_flush == flush_clflushopt) funcs->memset_nodrain = memset_nodrain_sse2_clflushopt; else if (funcs->deep_flush == flush_clwb) funcs->memset_nodrain = memset_nodrain_sse2_clwb; else if (funcs->deep_flush == flush_empty) funcs->memset_nodrain = memset_nodrain_sse2_empty; else ASSERT(0); #else LOG(3, "sse2 disabled at build time"); #endif } /* * use_avx_memcpy_memset -- (internal) AVX detected, use it if possible */ static void use_avx_memcpy_memset(struct pmem_funcs *funcs, enum memcpy_impl *impl) { #if AVX_AVAILABLE LOG(3, "avx supported"); char *e = os_getenv("PMEM_AVX"); if (e == NULL || strcmp(e, "1") != 0) { LOG(3, "PMEM_AVX not set or not == 1"); return; } LOG(3, "PMEM_AVX enabled"); *impl = MEMCPY_AVX; if (funcs->deep_flush == flush_clflush) funcs->memmove_nodrain = memmove_nodrain_avx_clflush; else if (funcs->deep_flush == flush_clflushopt) funcs->memmove_nodrain = memmove_nodrain_avx_clflushopt; else if (funcs->deep_flush == flush_clwb) funcs->memmove_nodrain = memmove_nodrain_avx_clwb; else if (funcs->deep_flush == flush_empty) funcs->memmove_nodrain = memmove_nodrain_avx_empty; else ASSERT(0); if (funcs->deep_flush == flush_clflush) funcs->memset_nodrain = memset_nodrain_avx_clflush; else if (funcs->deep_flush == flush_clflushopt) funcs->memset_nodrain = memset_nodrain_avx_clflushopt; else if (funcs->deep_flush == flush_clwb) funcs->memset_nodrain = memset_nodrain_avx_clwb; else if (funcs->deep_flush == flush_empty) funcs->memset_nodrain = memset_nodrain_avx_empty; else ASSERT(0); #else LOG(3, "avx supported, but disabled at build time"); #endif } /* * use_avx512f_memcpy_memset -- (internal) AVX512F detected, use it if possible */ static void use_avx512f_memcpy_memset(struct pmem_funcs *funcs, enum memcpy_impl *impl) { #if AVX512F_AVAILABLE LOG(3, "avx512f supported"); char *e = os_getenv("PMEM_AVX512F"); if (e == NULL || strcmp(e, "1") != 0) { LOG(3, "PMEM_AVX512F not set or not == 1"); return; } LOG(3, "PMEM_AVX512F enabled"); *impl = MEMCPY_AVX512F; if (funcs->deep_flush == flush_clflush) funcs->memmove_nodrain = memmove_nodrain_avx512f_clflush; else if (funcs->deep_flush == flush_clflushopt) funcs->memmove_nodrain = memmove_nodrain_avx512f_clflushopt; else if (funcs->deep_flush == flush_clwb) funcs->memmove_nodrain = memmove_nodrain_avx512f_clwb; else if (funcs->deep_flush == flush_empty) funcs->memmove_nodrain = memmove_nodrain_avx512f_empty; else ASSERT(0); if (funcs->deep_flush == flush_clflush) funcs->memset_nodrain = memset_nodrain_avx512f_clflush; else if (funcs->deep_flush == flush_clflushopt) funcs->memset_nodrain = memset_nodrain_avx512f_clflushopt; else if (funcs->deep_flush == flush_clwb) funcs->memset_nodrain = memset_nodrain_avx512f_clwb; else if (funcs->deep_flush == flush_empty) funcs->memset_nodrain = memset_nodrain_avx512f_empty; else ASSERT(0); #else LOG(3, "avx512f supported, but disabled at build time"); #endif } /* * pmem_get_cpuinfo -- configure libpmem based on CPUID */ static void pmem_cpuinfo_to_funcs(struct pmem_funcs *funcs, enum memcpy_impl *impl) { LOG(3, NULL); if (is_cpu_clflush_present()) { funcs->is_pmem = is_pmem_detect; LOG(3, "clflush supported"); } if (is_cpu_clflushopt_present()) { LOG(3, "clflushopt supported"); char *e = os_getenv("PMEM_NO_CLFLUSHOPT"); if (e && strcmp(e, "1") == 0) { LOG(3, "PMEM_NO_CLFLUSHOPT forced no clflushopt"); } else { funcs->deep_flush = flush_clflushopt; funcs->predrain_fence = predrain_memory_barrier; } } if (is_cpu_clwb_present()) { LOG(3, "clwb supported"); char *e = os_getenv("PMEM_NO_CLWB"); if (e && strcmp(e, "1") == 0) { LOG(3, "PMEM_NO_CLWB forced no clwb"); } else { funcs->deep_flush = flush_clwb; funcs->predrain_fence = predrain_memory_barrier; } } char *ptr = os_getenv("PMEM_NO_MOVNT"); if (ptr && strcmp(ptr, "1") == 0) { LOG(3, "PMEM_NO_MOVNT forced no movnt"); } else { use_sse2_memcpy_memset(funcs, impl); if (is_cpu_avx_present()) use_avx_memcpy_memset(funcs, impl); if (is_cpu_avx512f_present()) use_avx512f_memcpy_memset(funcs, impl); } } /* * pmem_init_funcs -- initialize architecture-specific list of pmem operations */ void pmem_init_funcs(struct pmem_funcs *funcs) { LOG(3, NULL); funcs->predrain_fence = predrain_fence_empty; funcs->deep_flush = flush_clflush; funcs->is_pmem = NULL; funcs->memmove_nodrain = memmove_nodrain_generic; funcs->memset_nodrain = memset_nodrain_generic; enum memcpy_impl impl = MEMCPY_GENERIC; char *ptr = os_getenv("PMEM_NO_GENERIC_MEMCPY"); if (ptr) { long long val = atoll(ptr); if (val) { funcs->memmove_nodrain = memmove_nodrain_libc; funcs->memset_nodrain = memset_nodrain_libc; impl = MEMCPY_LIBC; } } pmem_cpuinfo_to_funcs(funcs, &impl); /* * For testing, allow overriding the default threshold * for using non-temporal stores in pmem_memcpy_*(), pmem_memmove_*() * and pmem_memset_*(). * It has no effect if movnt is not supported or disabled. */ ptr = os_getenv("PMEM_MOVNT_THRESHOLD"); if (ptr) { long long val = atoll(ptr); if (val < 0) { LOG(3, "Invalid PMEM_MOVNT_THRESHOLD"); } else { LOG(3, "PMEM_MOVNT_THRESHOLD set to %zu", (size_t)val); Movnt_threshold = (size_t)val; } } int flush; char *e = os_getenv("PMEM_NO_FLUSH"); if (e && (strcmp(e, "1") == 0)) { flush = 0; LOG(3, "Forced not flushing CPU_cache"); } else if (e && (strcmp(e, "0") == 0)) { flush = 1; LOG(3, "Forced flushing CPU_cache"); } else if (pmem_has_auto_flush() == 1) { flush = 0; LOG(3, "Not flushing CPU_cache, eADR detected"); } else { flush = 1; LOG(3, "Flushing CPU cache"); } if (flush) { funcs->flush = funcs->deep_flush; } else { funcs->flush = flush_empty; funcs->predrain_fence = predrain_memory_barrier; } if (funcs->deep_flush == flush_clwb) LOG(3, "using clwb"); else if (funcs->deep_flush == flush_clflushopt) LOG(3, "using clflushopt"); else if (funcs->deep_flush == flush_clflush) LOG(3, "using clflush"); else FATAL("invalid deep flush function address"); if (funcs->flush == flush_empty) LOG(3, "not flushing CPU cache"); else if (funcs->flush != funcs->deep_flush) FATAL("invalid flush function address"); if (impl == MEMCPY_AVX512F) LOG(3, "using movnt AVX512F"); else if (impl == MEMCPY_AVX) LOG(3, "using movnt AVX"); else if (impl == MEMCPY_SSE2) LOG(3, "using movnt SSE2"); else if (impl == MEMCPY_LIBC) LOG(3, "using libc memmove"); else if (impl == MEMCPY_GENERIC) LOG(3, "using generic memmove"); else FATAL("invalid memcpy impl"); }
13,566
25.654224
79
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/avx.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PMEM_AVX_H #define PMEM_AVX_H #include <immintrin.h> #include "util.h" /* * avx_zeroupper -- _mm256_zeroupper wrapper * * _mm256_zeroupper clears upper parts of avx registers. * * It's needed for 2 reasons: * - it improves performance of non-avx code after avx * - it works around problem discovered by Valgrind * * In optimized builds gcc inserts VZEROUPPER automatically before * calling non-avx code (or at the end of the function). But in release * builds it doesn't, so if we don't do this by ourselves, then when * someone memcpy'ies uninitialized data, Valgrind complains whenever * someone reads those registers. * * One notable example is loader, which tries to detect whether it * needs to save whole ymm registers by looking at their current * (possibly uninitialized) value. * * Valgrind complains like that: * Conditional jump or move depends on uninitialised value(s) * at 0x4015CC9: _dl_runtime_resolve_avx_slow * (in /lib/x86_64-linux-gnu/ld-2.24.so) * by 0x10B531: test_realloc_api (obj_basic_integration.c:185) * by 0x10F1EE: main (obj_basic_integration.c:594) * * Note: We have to be careful to not read AVX registers after this * intrinsic, because of this stupid gcc bug: * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82735 */ static force_inline void avx_zeroupper(void) { _mm256_zeroupper(); } static force_inline __m128i m256_get16b(__m256i ymm) { return _mm256_extractf128_si256(ymm, 0); } #ifdef _MSC_VER static force_inline uint64_t m256_get8b(__m256i ymm) { return (uint64_t)_mm_extract_epi64(m256_get16b(ymm), 0); } static force_inline uint32_t m256_get4b(__m256i ymm) { return (uint32_t)m256_get8b(ymm); } static force_inline uint16_t m256_get2b(__m256i ymm) { return (uint16_t)m256_get8b(ymm); } #else static force_inline uint64_t m256_get8b(__m256i ymm) { return (uint64_t)_mm256_extract_epi64(ymm, 0); } static force_inline uint32_t m256_get4b(__m256i ymm) { return (uint32_t)_mm256_extract_epi32(ymm, 0); } static force_inline uint16_t m256_get2b(__m256i ymm) { return (uint16_t)_mm256_extract_epi16(ymm, 0); } #endif #endif
3,753
31.362069
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy_memset.h
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef MEMCPY_MEMSET_H #define MEMCPY_MEMSET_H #include <stddef.h> #include <xmmintrin.h> #include "pmem.h" static inline void barrier_after_ntstores(void) { /* * In this configuration pmem_drain does not contain sfence, so we have * to serialize non-temporal store instructions. */ _mm_sfence(); } static inline void no_barrier_after_ntstores(void) { /* * In this configuration pmem_drain contains sfence, so we don't have * to serialize non-temporal store instructions */ } #ifndef AVX512F_AVAILABLE /* XXX not supported in MSVC version we currently use */ #ifdef _MSC_VER #define AVX512F_AVAILABLE 0 #else #define AVX512F_AVAILABLE 1 #endif #endif #ifndef AVX_AVAILABLE #define AVX_AVAILABLE 1 #endif #ifndef SSE2_AVAILABLE #define SSE2_AVAILABLE 1 #endif #if SSE2_AVAILABLE void memmove_mov_sse2_clflush(char *dest, const char *src, size_t len); void memmove_mov_sse2_clflushopt(char *dest, const char *src, size_t len); void memmove_mov_sse2_clwb(char *dest, const char *src, size_t len); void memmove_mov_sse2_empty(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clflush(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clflushopt(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clwb(char *dest, const char *src, size_t len); void memmove_movnt_sse2_empty(char *dest, const char *src, size_t len); void memset_mov_sse2_clflush(char *dest, int c, size_t len); void memset_mov_sse2_clflushopt(char *dest, int c, size_t len); void memset_mov_sse2_clwb(char *dest, int c, size_t len); void memset_mov_sse2_empty(char *dest, int c, size_t len); void memset_movnt_sse2_clflush(char *dest, int c, size_t len); void memset_movnt_sse2_clflushopt(char *dest, int c, size_t len); void memset_movnt_sse2_clwb(char *dest, int c, size_t len); void memset_movnt_sse2_empty(char *dest, int c, size_t len); #endif #if AVX_AVAILABLE void memmove_mov_avx_clflush(char *dest, const char *src, size_t len); void memmove_mov_avx_clflushopt(char *dest, const char *src, size_t len); void memmove_mov_avx_clwb(char *dest, const char *src, size_t len); void memmove_mov_avx_empty(char *dest, const char *src, size_t len); void memmove_movnt_avx_clflush(char *dest, const char *src, size_t len); void memmove_movnt_avx_clflushopt(char *dest, const char *src, size_t len); void memmove_movnt_avx_clwb(char *dest, const char *src, size_t len); void memmove_movnt_avx_empty(char *dest, const char *src, size_t len); void memset_mov_avx_clflush(char *dest, int c, size_t len); void memset_mov_avx_clflushopt(char *dest, int c, size_t len); void memset_mov_avx_clwb(char *dest, int c, size_t len); void memset_mov_avx_empty(char *dest, int c, size_t len); void memset_movnt_avx_clflush(char *dest, int c, size_t len); void memset_movnt_avx_clflushopt(char *dest, int c, size_t len); void memset_movnt_avx_clwb(char *dest, int c, size_t len); void memset_movnt_avx_empty(char *dest, int c, size_t len); #endif #if AVX512F_AVAILABLE void memmove_mov_avx512f_clflush(char *dest, const char *src, size_t len); void memmove_mov_avx512f_clflushopt(char *dest, const char *src, size_t len); void memmove_mov_avx512f_clwb(char *dest, const char *src, size_t len); void memmove_mov_avx512f_empty(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_clflush(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_clflushopt(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_clwb(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_empty(char *dest, const char *src, size_t len); void memset_mov_avx512f_clflush(char *dest, int c, size_t len); void memset_mov_avx512f_clflushopt(char *dest, int c, size_t len); void memset_mov_avx512f_clwb(char *dest, int c, size_t len); void memset_mov_avx512f_empty(char *dest, int c, size_t len); void memset_movnt_avx512f_clflush(char *dest, int c, size_t len); void memset_movnt_avx512f_clflushopt(char *dest, int c, size_t len); void memset_movnt_avx512f_clwb(char *dest, int c, size_t len); void memset_movnt_avx512f_empty(char *dest, int c, size_t len); #endif extern size_t Movnt_threshold; #endif
5,754
41.316176
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx_clflush.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_clflush_nolog #define EXPORTED_SYMBOL memset_movnt_avx_clflush #define maybe_barrier barrier_after_ntstores #include "memset_nt_avx.h"
1,757
46.513514
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_avx512f.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PMEM_MEMSET_AVX512F_H #define PMEM_MEMSET_AVX512F_H #include <stddef.h> #include "memset_avx.h" static force_inline void memset_small_avx512f(char *dest, __m256i ymm, size_t len) { /* We can't do better than AVX here. */ memset_small_avx(dest, ymm, len); } #endif
1,880
38.1875
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx_clflush.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b _mm_clflush #define flush flush_clflush_nolog #define EXPORTED_SYMBOL memset_mov_avx_clflush #include "memset_t_avx.h"
1,738
46
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_sse2_empty.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b flush64b_empty #define flush flush_empty_nolog #define EXPORTED_SYMBOL memset_mov_sse2_empty #include "memset_t_sse2.h"
1,739
46.027027
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_sse2_clwb.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b pmem_clwb #define flush flush_clwb_nolog #define EXPORTED_SYMBOL memset_mov_sse2_clwb #include "memset_t_sse2.h"
1,732
45.837838
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx_clwb.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_clwb_nolog #define EXPORTED_SYMBOL memset_movnt_avx_clwb #define maybe_barrier no_barrier_after_ntstores #include "memset_nt_avx.h"
1,754
46.432432
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx_clwb.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b pmem_clwb #define flush flush_clwb_nolog #define EXPORTED_SYMBOL memset_mov_avx_clwb #include "memset_t_avx.h"
1,730
45.783784
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_sse2.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "flush.h" #include "libpmem.h" #include "memcpy_memset.h" #include "memset_sse2.h" #include "out.h" #include "valgrind_internal.h" static force_inline void memset_movnt4x64b(char *dest, __m128i xmm) { _mm_stream_si128((__m128i *)dest + 0, xmm); _mm_stream_si128((__m128i *)dest + 1, xmm); _mm_stream_si128((__m128i *)dest + 2, xmm); _mm_stream_si128((__m128i *)dest + 3, xmm); _mm_stream_si128((__m128i *)dest + 4, xmm); _mm_stream_si128((__m128i *)dest + 5, xmm); _mm_stream_si128((__m128i *)dest + 6, xmm); _mm_stream_si128((__m128i *)dest + 7, xmm); _mm_stream_si128((__m128i *)dest + 8, xmm); _mm_stream_si128((__m128i *)dest + 9, xmm); _mm_stream_si128((__m128i *)dest + 10, xmm); _mm_stream_si128((__m128i *)dest + 11, xmm); _mm_stream_si128((__m128i *)dest + 12, xmm); _mm_stream_si128((__m128i *)dest + 13, xmm); _mm_stream_si128((__m128i *)dest + 14, xmm); _mm_stream_si128((__m128i *)dest + 15, xmm); VALGRIND_DO_FLUSH(dest, 4 * 64); } static force_inline void memset_movnt2x64b(char *dest, __m128i xmm) { _mm_stream_si128((__m128i *)dest + 0, xmm); _mm_stream_si128((__m128i *)dest + 1, xmm); _mm_stream_si128((__m128i *)dest + 2, xmm); _mm_stream_si128((__m128i *)dest + 3, xmm); _mm_stream_si128((__m128i *)dest + 4, xmm); _mm_stream_si128((__m128i *)dest + 5, xmm); _mm_stream_si128((__m128i *)dest + 6, xmm); _mm_stream_si128((__m128i *)dest + 7, xmm); VALGRIND_DO_FLUSH(dest, 2 * 64); } static force_inline void memset_movnt1x64b(char *dest, __m128i xmm) { _mm_stream_si128((__m128i *)dest + 0, xmm); _mm_stream_si128((__m128i *)dest + 1, xmm); _mm_stream_si128((__m128i *)dest + 2, xmm); _mm_stream_si128((__m128i *)dest + 3, xmm); VALGRIND_DO_FLUSH(dest, 64); } static force_inline void memset_movnt1x32b(char *dest, __m128i xmm) { _mm_stream_si128((__m128i *)dest + 0, xmm); _mm_stream_si128((__m128i *)dest + 1, xmm); VALGRIND_DO_FLUSH(dest, 32); } static force_inline void memset_movnt1x16b(char *dest, __m128i xmm) { _mm_stream_si128((__m128i *)dest, xmm); VALGRIND_DO_FLUSH(dest, 16); } static force_inline void memset_movnt1x8b(char *dest, __m128i xmm) { uint64_t x = (uint64_t)_mm_cvtsi128_si64(xmm); _mm_stream_si64((long long *)dest, (long long)x); VALGRIND_DO_FLUSH(dest, 8); } static force_inline void memset_movnt1x4b(char *dest, __m128i xmm) { uint32_t x = (uint32_t)_mm_cvtsi128_si32(xmm); _mm_stream_si32((int *)dest, (int)x); VALGRIND_DO_FLUSH(dest, 4); } void EXPORTED_SYMBOL(char *dest, int c, size_t len) { __m128i xmm = _mm_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_sse2(dest, xmm, cnt); dest += cnt; len -= cnt; } while (len >= 4 * 64) { memset_movnt4x64b(dest, xmm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_movnt2x64b(dest, xmm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_movnt1x64b(dest, xmm); dest += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memset_movnt1x32b(dest, xmm); else if (len == 16) memset_movnt1x16b(dest, xmm); else if (len == 8) memset_movnt1x8b(dest, xmm); else if (len == 4) memset_movnt1x4b(dest, xmm); else goto nonnt; goto end; } nonnt: memset_small_sse2(dest, xmm, len); end: maybe_barrier(); }
5,136
25.755208
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_sse2.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_sse2.h" static force_inline void memset_mov4x64b(char *dest, __m128i xmm) { _mm_store_si128((__m128i *)dest + 0, xmm); _mm_store_si128((__m128i *)dest + 1, xmm); _mm_store_si128((__m128i *)dest + 2, xmm); _mm_store_si128((__m128i *)dest + 3, xmm); _mm_store_si128((__m128i *)dest + 4, xmm); _mm_store_si128((__m128i *)dest + 5, xmm); _mm_store_si128((__m128i *)dest + 6, xmm); _mm_store_si128((__m128i *)dest + 7, xmm); _mm_store_si128((__m128i *)dest + 8, xmm); _mm_store_si128((__m128i *)dest + 9, xmm); _mm_store_si128((__m128i *)dest + 10, xmm); _mm_store_si128((__m128i *)dest + 11, xmm); _mm_store_si128((__m128i *)dest + 12, xmm); _mm_store_si128((__m128i *)dest + 13, xmm); _mm_store_si128((__m128i *)dest + 14, xmm); _mm_store_si128((__m128i *)dest + 15, xmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memset_mov2x64b(char *dest, __m128i xmm) { _mm_store_si128((__m128i *)dest + 0, xmm); _mm_store_si128((__m128i *)dest + 1, xmm); _mm_store_si128((__m128i *)dest + 2, xmm); _mm_store_si128((__m128i *)dest + 3, xmm); _mm_store_si128((__m128i *)dest + 4, xmm); _mm_store_si128((__m128i *)dest + 5, xmm); _mm_store_si128((__m128i *)dest + 6, xmm); _mm_store_si128((__m128i *)dest + 7, xmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memset_mov1x64b(char *dest, __m128i xmm) { _mm_store_si128((__m128i *)dest + 0, xmm); _mm_store_si128((__m128i *)dest + 1, xmm); _mm_store_si128((__m128i *)dest + 2, xmm); _mm_store_si128((__m128i *)dest + 3, xmm); flush64b(dest + 0 * 64); } void EXPORTED_SYMBOL(char *dest, int c, size_t len) { __m128i xmm = _mm_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_sse2(dest, xmm, cnt); dest += cnt; len -= cnt; } while (len >= 4 * 64) { memset_mov4x64b(dest, xmm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_mov2x64b(dest, xmm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_mov1x64b(dest, xmm); dest += 1 * 64; len -= 1 * 64; } if (len) memset_small_sse2(dest, xmm, len); }
3,985
28.525926
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_sse2_clflush.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_clflush_nolog #define EXPORTED_SYMBOL memset_movnt_sse2_clflush #define maybe_barrier barrier_after_ntstores #include "memset_nt_sse2.h"
1,759
46.567568
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "avx.h" #include "flush.h" #include "libpmem.h" #include "memset_avx.h" #include "memcpy_memset.h" #include "out.h" #include "valgrind_internal.h" static force_inline void memset_movnt8x64b(char *dest, __m256i ymm) { _mm256_stream_si256((__m256i *)dest + 0, ymm); _mm256_stream_si256((__m256i *)dest + 1, ymm); _mm256_stream_si256((__m256i *)dest + 2, ymm); _mm256_stream_si256((__m256i *)dest + 3, ymm); _mm256_stream_si256((__m256i *)dest + 4, ymm); _mm256_stream_si256((__m256i *)dest + 5, ymm); _mm256_stream_si256((__m256i *)dest + 6, ymm); _mm256_stream_si256((__m256i *)dest + 7, ymm); _mm256_stream_si256((__m256i *)dest + 8, ymm); _mm256_stream_si256((__m256i *)dest + 9, ymm); _mm256_stream_si256((__m256i *)dest + 10, ymm); _mm256_stream_si256((__m256i *)dest + 11, ymm); _mm256_stream_si256((__m256i *)dest + 12, ymm); _mm256_stream_si256((__m256i *)dest + 13, ymm); _mm256_stream_si256((__m256i *)dest + 14, ymm); _mm256_stream_si256((__m256i *)dest + 15, ymm); VALGRIND_DO_FLUSH(dest, 8 * 64); } static force_inline void memset_movnt4x64b(char *dest, __m256i ymm) { _mm256_stream_si256((__m256i *)dest + 0, ymm); _mm256_stream_si256((__m256i *)dest + 1, ymm); _mm256_stream_si256((__m256i *)dest + 2, ymm); _mm256_stream_si256((__m256i *)dest + 3, ymm); _mm256_stream_si256((__m256i *)dest + 4, ymm); _mm256_stream_si256((__m256i *)dest + 5, ymm); _mm256_stream_si256((__m256i *)dest + 6, ymm); _mm256_stream_si256((__m256i *)dest + 7, ymm); VALGRIND_DO_FLUSH(dest, 4 * 64); } static force_inline void memset_movnt2x64b(char *dest, __m256i ymm) { _mm256_stream_si256((__m256i *)dest + 0, ymm); _mm256_stream_si256((__m256i *)dest + 1, ymm); _mm256_stream_si256((__m256i *)dest + 2, ymm); _mm256_stream_si256((__m256i *)dest + 3, ymm); VALGRIND_DO_FLUSH(dest, 2 * 64); } static force_inline void memset_movnt1x64b(char *dest, __m256i ymm) { _mm256_stream_si256((__m256i *)dest + 0, ymm); _mm256_stream_si256((__m256i *)dest + 1, ymm); VALGRIND_DO_FLUSH(dest, 64); } static force_inline void memset_movnt1x32b(char *dest, __m256i ymm) { _mm256_stream_si256((__m256i *)dest, ymm); VALGRIND_DO_FLUSH(dest, 32); } static force_inline void memset_movnt1x16b(char *dest, __m256i ymm) { __m128i xmm0 = m256_get16b(ymm); _mm_stream_si128((__m128i *)dest, xmm0); VALGRIND_DO_FLUSH(dest - 16, 16); } static force_inline void memset_movnt1x8b(char *dest, __m256i ymm) { uint64_t x = m256_get8b(ymm); _mm_stream_si64((long long *)dest, (long long)x); VALGRIND_DO_FLUSH(dest, 8); } static force_inline void memset_movnt1x4b(char *dest, __m256i ymm) { uint32_t x = m256_get4b(ymm); _mm_stream_si32((int *)dest, (int)x); VALGRIND_DO_FLUSH(dest, 4); } void EXPORTED_SYMBOL(char *dest, int c, size_t len) { __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx(dest, ymm, cnt); dest += cnt; len -= cnt; } while (len >= 8 * 64) { memset_movnt8x64b(dest, ymm); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_movnt4x64b(dest, ymm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_movnt2x64b(dest, ymm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_movnt1x64b(dest, ymm); dest += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memset_movnt1x32b(dest, ymm); else if (len == 16) memset_movnt1x16b(dest, ymm); else if (len == 8) memset_movnt1x8b(dest, ymm); else if (len == 4) memset_movnt1x4b(dest, ymm); else goto nonnt; goto end; } nonnt: memset_small_avx(dest, ymm, len); end: avx_zeroupper(); maybe_barrier(); }
5,514
25.137441
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_avx.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PMEM_MEMSET_AVX_H #define PMEM_MEMSET_AVX_H #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include <string.h> #include "avx.h" #include "libpmem.h" #include "out.h" static force_inline void memset_small_avx_noflush(char *dest, __m256i ymm, size_t len) { ASSERT(len <= 64); if (len <= 8) goto le8; if (len <= 32) goto le32; /* 33..64 */ _mm256_storeu_si256((__m256i *)dest, ymm); _mm256_storeu_si256((__m256i *)(dest + len - 32), ymm); return; le32: if (len > 16) { /* 17..32 */ __m128i xmm = m256_get16b(ymm); _mm_storeu_si128((__m128i *)dest, xmm); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm); return; } /* 9..16 */ uint64_t d8 = m256_get8b(ymm); *(uint64_t *)dest = d8; *(uint64_t *)(dest + len - 8) = d8; return; le8: if (len <= 2) goto le2; if (len > 4) { /* 5..8 */ uint32_t d = m256_get4b(ymm); *(uint32_t *)dest = d; *(uint32_t *)(dest + len - 4) = d; return; } /* 3..4 */ uint16_t d2 = m256_get2b(ymm); *(uint16_t *)dest = d2; *(uint16_t *)(dest + len - 2) = d2; return; le2: if (len == 2) { uint16_t d2 = m256_get2b(ymm); *(uint16_t *)dest = d2; return; } *(uint8_t *)dest = (uint8_t)m256_get2b(ymm); } static force_inline void memset_small_avx(char *dest, __m256i ymm, size_t len) { memset_small_avx_noflush(dest, ymm, len); flush(dest, len); } #endif
2,975
24.655172
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx512f_clwb.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_clwb_nolog #define EXPORTED_SYMBOL memset_movnt_avx512f_clwb #define maybe_barrier no_barrier_after_ntstores #include "memset_nt_avx512f.h"
1,762
46.648649
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx_clflushopt.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_clflushopt_nolog #define EXPORTED_SYMBOL memset_movnt_avx_clflushopt #define maybe_barrier no_barrier_after_ntstores #include "memset_nt_avx.h"
1,766
46.756757
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_sse2_clflushopt.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_clflushopt_nolog #define EXPORTED_SYMBOL memset_movnt_sse2_clflushopt #define maybe_barrier no_barrier_after_ntstores #include "memset_nt_sse2.h"
1,768
46.810811
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "avx.h" #include "flush.h" #include "memset_avx.h" #include "memcpy_memset.h" static force_inline void memset_mov8x64b(char *dest, __m256i ymm) { _mm256_store_si256((__m256i *)dest + 0, ymm); _mm256_store_si256((__m256i *)dest + 1, ymm); _mm256_store_si256((__m256i *)dest + 2, ymm); _mm256_store_si256((__m256i *)dest + 3, ymm); _mm256_store_si256((__m256i *)dest + 4, ymm); _mm256_store_si256((__m256i *)dest + 5, ymm); _mm256_store_si256((__m256i *)dest + 6, ymm); _mm256_store_si256((__m256i *)dest + 7, ymm); _mm256_store_si256((__m256i *)dest + 8, ymm); _mm256_store_si256((__m256i *)dest + 9, ymm); _mm256_store_si256((__m256i *)dest + 10, ymm); _mm256_store_si256((__m256i *)dest + 11, ymm); _mm256_store_si256((__m256i *)dest + 12, ymm); _mm256_store_si256((__m256i *)dest + 13, ymm); _mm256_store_si256((__m256i *)dest + 14, ymm); _mm256_store_si256((__m256i *)dest + 15, ymm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memset_mov4x64b(char *dest, __m256i ymm) { _mm256_store_si256((__m256i *)dest + 0, ymm); _mm256_store_si256((__m256i *)dest + 1, ymm); _mm256_store_si256((__m256i *)dest + 2, ymm); _mm256_store_si256((__m256i *)dest + 3, ymm); _mm256_store_si256((__m256i *)dest + 4, ymm); _mm256_store_si256((__m256i *)dest + 5, ymm); _mm256_store_si256((__m256i *)dest + 6, ymm); _mm256_store_si256((__m256i *)dest + 7, ymm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memset_mov2x64b(char *dest, __m256i ymm) { _mm256_store_si256((__m256i *)dest + 0, ymm); _mm256_store_si256((__m256i *)dest + 1, ymm); _mm256_store_si256((__m256i *)dest + 2, ymm); _mm256_store_si256((__m256i *)dest + 3, ymm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memset_mov1x64b(char *dest, __m256i ymm) { _mm256_store_si256((__m256i *)dest + 0, ymm); _mm256_store_si256((__m256i *)dest + 1, ymm); flush64b(dest + 0 * 64); } void EXPORTED_SYMBOL(char *dest, int c, size_t len) { __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx(dest, ymm, cnt); dest += cnt; len -= cnt; } while (len >= 8 * 64) { memset_mov8x64b(dest, ymm); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_mov4x64b(dest, ymm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_mov2x64b(dest, ymm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_mov1x64b(dest, ymm); dest += 1 * 64; len -= 1 * 64; } if (len) memset_small_avx(dest, ymm, len); avx_zeroupper(); }
4,570
27.56875
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx512f_clflushopt.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_clflushopt_nolog #define EXPORTED_SYMBOL memset_movnt_avx512f_clflushopt #define maybe_barrier no_barrier_after_ntstores #include "memset_nt_avx512f.h"
1,774
46.972973
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx512f_clflushopt.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b pmem_clflushopt #define flush flush_clflushopt_nolog #define EXPORTED_SYMBOL memset_mov_avx512f_clflushopt #include "memset_t_avx512f.h"
1,756
46.486486
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx512f.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "avx.h" #include "flush.h" #include "memset_avx512f.h" #include "memcpy_memset.h" static force_inline void memset_mov32x64b(char *dest, __m512i zmm) { _mm512_store_si512((__m512i *)dest + 0, zmm); _mm512_store_si512((__m512i *)dest + 1, zmm); _mm512_store_si512((__m512i *)dest + 2, zmm); _mm512_store_si512((__m512i *)dest + 3, zmm); _mm512_store_si512((__m512i *)dest + 4, zmm); _mm512_store_si512((__m512i *)dest + 5, zmm); _mm512_store_si512((__m512i *)dest + 6, zmm); _mm512_store_si512((__m512i *)dest + 7, zmm); _mm512_store_si512((__m512i *)dest + 8, zmm); _mm512_store_si512((__m512i *)dest + 9, zmm); _mm512_store_si512((__m512i *)dest + 10, zmm); _mm512_store_si512((__m512i *)dest + 11, zmm); _mm512_store_si512((__m512i *)dest + 12, zmm); _mm512_store_si512((__m512i *)dest + 13, zmm); _mm512_store_si512((__m512i *)dest + 14, zmm); _mm512_store_si512((__m512i *)dest + 15, zmm); _mm512_store_si512((__m512i *)dest + 16, zmm); _mm512_store_si512((__m512i *)dest + 17, zmm); _mm512_store_si512((__m512i *)dest + 18, zmm); _mm512_store_si512((__m512i *)dest + 19, zmm); _mm512_store_si512((__m512i *)dest + 20, zmm); _mm512_store_si512((__m512i *)dest + 21, zmm); _mm512_store_si512((__m512i *)dest + 22, zmm); _mm512_store_si512((__m512i *)dest + 23, zmm); _mm512_store_si512((__m512i *)dest + 24, zmm); _mm512_store_si512((__m512i *)dest + 25, zmm); _mm512_store_si512((__m512i *)dest + 26, zmm); _mm512_store_si512((__m512i *)dest + 27, zmm); _mm512_store_si512((__m512i *)dest + 28, zmm); _mm512_store_si512((__m512i *)dest + 29, zmm); _mm512_store_si512((__m512i *)dest + 30, zmm); _mm512_store_si512((__m512i *)dest + 31, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); flush64b(dest + 16 * 64); flush64b(dest + 17 * 64); flush64b(dest + 18 * 64); flush64b(dest + 19 * 64); flush64b(dest + 20 * 64); flush64b(dest + 21 * 64); flush64b(dest + 22 * 64); flush64b(dest + 23 * 64); flush64b(dest + 24 * 64); flush64b(dest + 25 * 64); flush64b(dest + 26 * 64); flush64b(dest + 27 * 64); flush64b(dest + 28 * 64); flush64b(dest + 29 * 64); flush64b(dest + 30 * 64); flush64b(dest + 31 * 64); } static force_inline void memset_mov16x64b(char *dest, __m512i zmm) { _mm512_store_si512((__m512i *)dest + 0, zmm); _mm512_store_si512((__m512i *)dest + 1, zmm); _mm512_store_si512((__m512i *)dest + 2, zmm); _mm512_store_si512((__m512i *)dest + 3, zmm); _mm512_store_si512((__m512i *)dest + 4, zmm); _mm512_store_si512((__m512i *)dest + 5, zmm); _mm512_store_si512((__m512i *)dest + 6, zmm); _mm512_store_si512((__m512i *)dest + 7, zmm); _mm512_store_si512((__m512i *)dest + 8, zmm); _mm512_store_si512((__m512i *)dest + 9, zmm); _mm512_store_si512((__m512i *)dest + 10, zmm); _mm512_store_si512((__m512i *)dest + 11, zmm); _mm512_store_si512((__m512i *)dest + 12, zmm); _mm512_store_si512((__m512i *)dest + 13, zmm); _mm512_store_si512((__m512i *)dest + 14, zmm); _mm512_store_si512((__m512i *)dest + 15, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); } static force_inline void memset_mov8x64b(char *dest, __m512i zmm) { _mm512_store_si512((__m512i *)dest + 0, zmm); _mm512_store_si512((__m512i *)dest + 1, zmm); _mm512_store_si512((__m512i *)dest + 2, zmm); _mm512_store_si512((__m512i *)dest + 3, zmm); _mm512_store_si512((__m512i *)dest + 4, zmm); _mm512_store_si512((__m512i *)dest + 5, zmm); _mm512_store_si512((__m512i *)dest + 6, zmm); _mm512_store_si512((__m512i *)dest + 7, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memset_mov4x64b(char *dest, __m512i zmm) { _mm512_store_si512((__m512i *)dest + 0, zmm); _mm512_store_si512((__m512i *)dest + 1, zmm); _mm512_store_si512((__m512i *)dest + 2, zmm); _mm512_store_si512((__m512i *)dest + 3, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memset_mov2x64b(char *dest, __m512i zmm) { _mm512_store_si512((__m512i *)dest + 0, zmm); _mm512_store_si512((__m512i *)dest + 1, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memset_mov1x64b(char *dest, __m512i zmm) { _mm512_store_si512((__m512i *)dest + 0, zmm); flush64b(dest + 0 * 64); } void EXPORTED_SYMBOL(char *dest, int c, size_t len) { __m512i zmm = _mm512_set1_epi8((char)c); /* See comment in memset_movnt_avx512f */ __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx512f(dest, ymm, cnt); dest += cnt; len -= cnt; } while (len >= 32 * 64) { memset_mov32x64b(dest, zmm); dest += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memset_mov16x64b(dest, zmm); dest += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memset_mov8x64b(dest, zmm); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_mov4x64b(dest, zmm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_mov2x64b(dest, zmm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_mov1x64b(dest, zmm); dest += 1 * 64; len -= 1 * 64; } if (len) memset_small_avx512f(dest, ymm, len); avx_zeroupper(); }
7,852
28.411985
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_sse2_clflush.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b _mm_clflush #define flush flush_clflush_nolog #define EXPORTED_SYMBOL memset_mov_sse2_clflush #include "memset_t_sse2.h"
1,740
46.054054
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx512f_clflush.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_clflush_nolog #define EXPORTED_SYMBOL memset_movnt_avx512f_clflush #define maybe_barrier barrier_after_ntstores #include "memset_nt_avx512f.h"
1,765
46.72973
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx_clflushopt.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b pmem_clflushopt #define flush flush_clflushopt_nolog #define EXPORTED_SYMBOL memset_mov_avx_clflushopt #include "memset_t_avx.h"
1,748
46.27027
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_sse2_empty.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_empty_nolog #define EXPORTED_SYMBOL memset_movnt_sse2_empty #define maybe_barrier barrier_after_ntstores #include "memset_nt_sse2.h"
1,755
46.459459
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx512f_clflush.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b _mm_clflush #define flush flush_clflush_nolog #define EXPORTED_SYMBOL memset_mov_avx512f_clflush #include "memset_t_avx512f.h"
1,746
46.216216
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx512f_clwb.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b pmem_clwb #define flush flush_clwb_nolog #define EXPORTED_SYMBOL memset_mov_avx512f_clwb #include "memset_t_avx512f.h"
1,738
46
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx_empty.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b flush64b_empty #define flush flush_empty_nolog #define EXPORTED_SYMBOL memset_mov_avx_empty #include "memset_t_avx.h"
1,737
45.972973
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx_empty.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_empty_nolog #define EXPORTED_SYMBOL memset_movnt_avx_empty #define maybe_barrier barrier_after_ntstores #include "memset_nt_avx.h"
1,753
46.405405
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_sse2.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PMEM_MEMSET_SSE2_H #define PMEM_MEMSET_SSE2_H #include <xmmintrin.h> #include <stddef.h> #include <stdint.h> #include <string.h> #include "libpmem.h" #include "out.h" static force_inline void memset_small_sse2_noflush(char *dest, __m128i xmm, size_t len) { ASSERT(len <= 64); if (len <= 8) goto le8; if (len <= 32) goto le32; if (len > 48) { /* 49..64 */ _mm_storeu_si128((__m128i *)(dest + 0), xmm); _mm_storeu_si128((__m128i *)(dest + 16), xmm); _mm_storeu_si128((__m128i *)(dest + 32), xmm); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm); return; } /* 33..48 */ _mm_storeu_si128((__m128i *)(dest + 0), xmm); _mm_storeu_si128((__m128i *)(dest + 16), xmm); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm); return; le32: if (len > 16) { /* 17..32 */ _mm_storeu_si128((__m128i *)(dest + 0), xmm); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm); return; } /* 9..16 */ uint64_t d8 = (uint64_t)_mm_cvtsi128_si64(xmm); *(uint64_t *)dest = d8; *(uint64_t *)(dest + len - 8) = d8; return; le8: if (len <= 2) goto le2; if (len > 4) { /* 5..8 */ uint32_t d4 = (uint32_t)_mm_cvtsi128_si32(xmm); *(uint32_t *)dest = d4; *(uint32_t *)(dest + len - 4) = d4; return; } /* 3..4 */ uint16_t d2 = (uint16_t)(uint32_t)_mm_cvtsi128_si32(xmm); *(uint16_t *)dest = d2; *(uint16_t *)(dest + len - 2) = d2; return; le2: if (len == 2) { uint16_t d2 = (uint16_t)(uint32_t)_mm_cvtsi128_si32(xmm); *(uint16_t *)dest = d2; return; } *(uint8_t *)dest = (uint8_t)_mm_cvtsi128_si32(xmm); } static force_inline void memset_small_sse2(char *dest, __m128i xmm, size_t len) { memset_small_sse2_noflush(dest, xmm, len); flush(dest, len); } #endif
3,327
26.056911
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_sse2_clwb.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_clwb_nolog #define EXPORTED_SYMBOL memset_movnt_sse2_clwb #define maybe_barrier no_barrier_after_ntstores #include "memset_nt_sse2.h"
1,756
46.486486
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_sse2_clflushopt.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b pmem_clflushopt #define flush flush_clflushopt_nolog #define EXPORTED_SYMBOL memset_mov_sse2_clflushopt #include "memset_t_sse2.h"
1,750
46.324324
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_t_avx512f_empty.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b flush64b_empty #define flush flush_empty_nolog #define EXPORTED_SYMBOL memset_mov_avx512f_empty #include "memset_t_avx512f.h"
1,745
46.189189
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx512f.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "avx.h" #include "flush.h" #include "libpmem.h" #include "memcpy_memset.h" #include "memset_avx512f.h" #include "out.h" #include "util.h" #include "valgrind_internal.h" static force_inline void memset_movnt32x64b(char *dest, __m512i zmm) { _mm512_stream_si512((__m512i *)dest + 0, zmm); _mm512_stream_si512((__m512i *)dest + 1, zmm); _mm512_stream_si512((__m512i *)dest + 2, zmm); _mm512_stream_si512((__m512i *)dest + 3, zmm); _mm512_stream_si512((__m512i *)dest + 4, zmm); _mm512_stream_si512((__m512i *)dest + 5, zmm); _mm512_stream_si512((__m512i *)dest + 6, zmm); _mm512_stream_si512((__m512i *)dest + 7, zmm); _mm512_stream_si512((__m512i *)dest + 8, zmm); _mm512_stream_si512((__m512i *)dest + 9, zmm); _mm512_stream_si512((__m512i *)dest + 10, zmm); _mm512_stream_si512((__m512i *)dest + 11, zmm); _mm512_stream_si512((__m512i *)dest + 12, zmm); _mm512_stream_si512((__m512i *)dest + 13, zmm); _mm512_stream_si512((__m512i *)dest + 14, zmm); _mm512_stream_si512((__m512i *)dest + 15, zmm); _mm512_stream_si512((__m512i *)dest + 16, zmm); _mm512_stream_si512((__m512i *)dest + 17, zmm); _mm512_stream_si512((__m512i *)dest + 18, zmm); _mm512_stream_si512((__m512i *)dest + 19, zmm); _mm512_stream_si512((__m512i *)dest + 20, zmm); _mm512_stream_si512((__m512i *)dest + 21, zmm); _mm512_stream_si512((__m512i *)dest + 22, zmm); _mm512_stream_si512((__m512i *)dest + 23, zmm); _mm512_stream_si512((__m512i *)dest + 24, zmm); _mm512_stream_si512((__m512i *)dest + 25, zmm); _mm512_stream_si512((__m512i *)dest + 26, zmm); _mm512_stream_si512((__m512i *)dest + 27, zmm); _mm512_stream_si512((__m512i *)dest + 28, zmm); _mm512_stream_si512((__m512i *)dest + 29, zmm); _mm512_stream_si512((__m512i *)dest + 30, zmm); _mm512_stream_si512((__m512i *)dest + 31, zmm); VALGRIND_DO_FLUSH(dest, 32 * 64); } static force_inline void memset_movnt16x64b(char *dest, __m512i zmm) { _mm512_stream_si512((__m512i *)dest + 0, zmm); _mm512_stream_si512((__m512i *)dest + 1, zmm); _mm512_stream_si512((__m512i *)dest + 2, zmm); _mm512_stream_si512((__m512i *)dest + 3, zmm); _mm512_stream_si512((__m512i *)dest + 4, zmm); _mm512_stream_si512((__m512i *)dest + 5, zmm); _mm512_stream_si512((__m512i *)dest + 6, zmm); _mm512_stream_si512((__m512i *)dest + 7, zmm); _mm512_stream_si512((__m512i *)dest + 8, zmm); _mm512_stream_si512((__m512i *)dest + 9, zmm); _mm512_stream_si512((__m512i *)dest + 10, zmm); _mm512_stream_si512((__m512i *)dest + 11, zmm); _mm512_stream_si512((__m512i *)dest + 12, zmm); _mm512_stream_si512((__m512i *)dest + 13, zmm); _mm512_stream_si512((__m512i *)dest + 14, zmm); _mm512_stream_si512((__m512i *)dest + 15, zmm); VALGRIND_DO_FLUSH(dest, 16 * 64); } static force_inline void memset_movnt8x64b(char *dest, __m512i zmm) { _mm512_stream_si512((__m512i *)dest + 0, zmm); _mm512_stream_si512((__m512i *)dest + 1, zmm); _mm512_stream_si512((__m512i *)dest + 2, zmm); _mm512_stream_si512((__m512i *)dest + 3, zmm); _mm512_stream_si512((__m512i *)dest + 4, zmm); _mm512_stream_si512((__m512i *)dest + 5, zmm); _mm512_stream_si512((__m512i *)dest + 6, zmm); _mm512_stream_si512((__m512i *)dest + 7, zmm); VALGRIND_DO_FLUSH(dest, 8 * 64); } static force_inline void memset_movnt4x64b(char *dest, __m512i zmm) { _mm512_stream_si512((__m512i *)dest + 0, zmm); _mm512_stream_si512((__m512i *)dest + 1, zmm); _mm512_stream_si512((__m512i *)dest + 2, zmm); _mm512_stream_si512((__m512i *)dest + 3, zmm); VALGRIND_DO_FLUSH(dest, 4 * 64); } static force_inline void memset_movnt2x64b(char *dest, __m512i zmm) { _mm512_stream_si512((__m512i *)dest + 0, zmm); _mm512_stream_si512((__m512i *)dest + 1, zmm); VALGRIND_DO_FLUSH(dest, 2 * 64); } static force_inline void memset_movnt1x64b(char *dest, __m512i zmm) { _mm512_stream_si512((__m512i *)dest + 0, zmm); VALGRIND_DO_FLUSH(dest, 64); } static force_inline void memset_movnt1x32b(char *dest, __m256i ymm) { _mm256_stream_si256((__m256i *)dest, ymm); VALGRIND_DO_FLUSH(dest, 32); } static force_inline void memset_movnt1x16b(char *dest, __m256i ymm) { __m128i xmm = _mm256_extracti128_si256(ymm, 0); _mm_stream_si128((__m128i *)dest, xmm); VALGRIND_DO_FLUSH(dest, 16); } static force_inline void memset_movnt1x8b(char *dest, __m256i ymm) { uint64_t x = m256_get8b(ymm); _mm_stream_si64((long long *)dest, (long long)x); VALGRIND_DO_FLUSH(dest, 8); } static force_inline void memset_movnt1x4b(char *dest, __m256i ymm) { uint32_t x = m256_get4b(ymm); _mm_stream_si32((int *)dest, (int)x); VALGRIND_DO_FLUSH(dest, 4); } void EXPORTED_SYMBOL(char *dest, int c, size_t len) { __m512i zmm = _mm512_set1_epi8((char)c); /* * Can't use _mm512_extracti64x4_epi64, because some versions of gcc * crash. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82887 */ __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx512f(dest, ymm, cnt); dest += cnt; len -= cnt; } while (len >= 32 * 64) { memset_movnt32x64b(dest, zmm); dest += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memset_movnt16x64b(dest, zmm); dest += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memset_movnt8x64b(dest, zmm); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_movnt4x64b(dest, zmm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_movnt2x64b(dest, zmm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_movnt1x64b(dest, zmm); dest += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memset_movnt1x32b(dest, ymm); else if (len == 16) memset_movnt1x16b(dest, ymm); else if (len == 8) memset_movnt1x8b(dest, ymm); else if (len == 4) memset_movnt1x4b(dest, ymm); else goto nonnt; goto end; } nonnt: memset_small_avx512f(dest, ymm, len); end: avx_zeroupper(); maybe_barrier(); }
7,756
27.105072
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memset/memset_nt_avx512f_empty.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_empty_nolog #define EXPORTED_SYMBOL memset_movnt_avx512f_empty #define maybe_barrier barrier_after_ntstores #include "memset_nt_avx512f.h"
1,761
46.621622
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx512f_clwb.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b pmem_clwb #define flush flush_clwb_nolog #define EXPORTED_SYMBOL memmove_mov_avx512f_clwb #include "memcpy_t_avx512f.h"
1,739
46.027027
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx512f_clflush.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_clflush_nolog #define EXPORTED_SYMBOL memmove_movnt_avx512f_clflush #define maybe_barrier barrier_after_ntstores #include "memcpy_nt_avx512f.h"
1,766
46.756757
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx512f_clflushopt.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_clflushopt_nolog #define EXPORTED_SYMBOL memmove_movnt_avx512f_clflushopt #define maybe_barrier no_barrier_after_ntstores #include "memcpy_nt_avx512f.h"
1,775
47
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx_clflush.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b _mm_clflush #define flush flush_clflush_nolog #define EXPORTED_SYMBOL memmove_mov_avx_clflush #include "memcpy_t_avx.h"
1,739
46.027027
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_sse2_clflushopt.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_clflushopt_nolog #define EXPORTED_SYMBOL memmove_movnt_sse2_clflushopt #define maybe_barrier no_barrier_after_ntstores #include "memcpy_nt_sse2.h"
1,769
46.837838
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx_clflush.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_clflush_nolog #define EXPORTED_SYMBOL memmove_movnt_avx_clflush #define maybe_barrier barrier_after_ntstores #include "memcpy_nt_avx.h"
1,758
46.540541
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_avx.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PMEM_MEMCPY_AVX_H #define PMEM_MEMCPY_AVX_H #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "libpmem.h" #include "out.h" static force_inline void memmove_small_avx_noflush(char *dest, const char *src, size_t len) { ASSERT(len <= 64); if (len <= 8) goto le8; if (len <= 32) goto le32; /* 33..64 */ __m256i ymm0 = _mm256_loadu_si256((__m256i *)src); __m256i ymm1 = _mm256_loadu_si256((__m256i *)(src + len - 32)); _mm256_storeu_si256((__m256i *)dest, ymm0); _mm256_storeu_si256((__m256i *)(dest + len - 32), ymm1); return; le32: if (len > 16) { /* 17..32 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm1); return; } /* 9..16 */ uint64_t d80 = *(uint64_t *)src; uint64_t d81 = *(uint64_t *)(src + len - 8); *(uint64_t *)dest = d80; *(uint64_t *)(dest + len - 8) = d81; return; le8: if (len <= 2) goto le2; if (len > 4) { /* 5..8 */ uint32_t d40 = *(uint32_t *)src; uint32_t d41 = *(uint32_t *)(src + len - 4); *(uint32_t *)dest = d40; *(uint32_t *)(dest + len - 4) = d41; return; } /* 3..4 */ uint16_t d20 = *(uint16_t *)src; uint16_t d21 = *(uint16_t *)(src + len - 2); *(uint16_t *)dest = d20; *(uint16_t *)(dest + len - 2) = d21; return; le2: if (len == 2) { *(uint16_t *)dest = *(uint16_t *)src; return; } *(uint8_t *)dest = *(uint8_t *)src; } static force_inline void memmove_small_avx(char *dest, const char *src, size_t len) { memmove_small_avx_noflush(dest, src, len); flush(dest, len); } #endif
3,275
26.529412
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_sse2.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PMEM_MEMCPY_SSE2_H #define PMEM_MEMCPY_SSE2_H #include <xmmintrin.h> #include <stddef.h> #include <stdint.h> #include "libpmem.h" #include "out.h" static force_inline void memmove_small_sse2_noflush(char *dest, const char *src, size_t len) { ASSERT(len <= 64); if (len <= 8) goto le8; if (len <= 32) goto le32; if (len > 48) { /* 49..64 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + 16)); __m128i xmm2 = _mm_loadu_si128((__m128i *)(src + 32)); __m128i xmm3 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + 16), xmm1); _mm_storeu_si128((__m128i *)(dest + 32), xmm2); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm3); return; } /* 33..48 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + 16)); __m128i xmm2 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + 16), xmm1); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm2); return; le32: if (len > 16) { /* 17..32 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm1); return; } /* 9..16 */ uint64_t d80 = *(uint64_t *)src; uint64_t d81 = *(uint64_t *)(src + len - 8); *(uint64_t *)dest = d80; *(uint64_t *)(dest + len - 8) = d81; return; le8: if (len <= 2) goto le2; if (len > 4) { /* 5..8 */ uint32_t d40 = *(uint32_t *)src; uint32_t d41 = *(uint32_t *)(src + len - 4); *(uint32_t *)dest = d40; *(uint32_t *)(dest + len - 4) = d41; return; } /* 3..4 */ uint16_t d20 = *(uint16_t *)src; uint16_t d21 = *(uint16_t *)(src + len - 2); *(uint16_t *)dest = d20; *(uint16_t *)(dest + len - 2) = d21; return; le2: if (len == 2) { *(uint16_t *)dest = *(uint16_t *)src; return; } *(uint8_t *)dest = *(uint8_t *)src; } static force_inline void memmove_small_sse2(char *dest, const char *src, size_t len) { memmove_small_sse2_noflush(dest, src, len); flush(dest, len); } #endif
3,846
27.496296
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_sse2.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_sse2.h" static force_inline void memmove_mov4x64b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0); __m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1); __m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2); __m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3); __m128i xmm4 = _mm_loadu_si128((__m128i *)src + 4); __m128i xmm5 = _mm_loadu_si128((__m128i *)src + 5); __m128i xmm6 = _mm_loadu_si128((__m128i *)src + 6); __m128i xmm7 = _mm_loadu_si128((__m128i *)src + 7); __m128i xmm8 = _mm_loadu_si128((__m128i *)src + 8); __m128i xmm9 = _mm_loadu_si128((__m128i *)src + 9); __m128i xmm10 = _mm_loadu_si128((__m128i *)src + 10); __m128i xmm11 = _mm_loadu_si128((__m128i *)src + 11); __m128i xmm12 = _mm_loadu_si128((__m128i *)src + 12); __m128i xmm13 = _mm_loadu_si128((__m128i *)src + 13); __m128i xmm14 = _mm_loadu_si128((__m128i *)src + 14); __m128i xmm15 = _mm_loadu_si128((__m128i *)src + 15); _mm_store_si128((__m128i *)dest + 0, xmm0); _mm_store_si128((__m128i *)dest + 1, xmm1); _mm_store_si128((__m128i *)dest + 2, xmm2); _mm_store_si128((__m128i *)dest + 3, xmm3); _mm_store_si128((__m128i *)dest + 4, xmm4); _mm_store_si128((__m128i *)dest + 5, xmm5); _mm_store_si128((__m128i *)dest + 6, xmm6); _mm_store_si128((__m128i *)dest + 7, xmm7); _mm_store_si128((__m128i *)dest + 8, xmm8); _mm_store_si128((__m128i *)dest + 9, xmm9); _mm_store_si128((__m128i *)dest + 10, xmm10); _mm_store_si128((__m128i *)dest + 11, xmm11); _mm_store_si128((__m128i *)dest + 12, xmm12); _mm_store_si128((__m128i *)dest + 13, xmm13); _mm_store_si128((__m128i *)dest + 14, xmm14); _mm_store_si128((__m128i *)dest + 15, xmm15); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memmove_mov2x64b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0); __m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1); __m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2); __m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3); __m128i xmm4 = _mm_loadu_si128((__m128i *)src + 4); __m128i xmm5 = _mm_loadu_si128((__m128i *)src + 5); __m128i xmm6 = _mm_loadu_si128((__m128i *)src + 6); __m128i xmm7 = _mm_loadu_si128((__m128i *)src + 7); _mm_store_si128((__m128i *)dest + 0, xmm0); _mm_store_si128((__m128i *)dest + 1, xmm1); _mm_store_si128((__m128i *)dest + 2, xmm2); _mm_store_si128((__m128i *)dest + 3, xmm3); _mm_store_si128((__m128i *)dest + 4, xmm4); _mm_store_si128((__m128i *)dest + 5, xmm5); _mm_store_si128((__m128i *)dest + 6, xmm6); _mm_store_si128((__m128i *)dest + 7, xmm7); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memmove_mov1x64b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0); __m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1); __m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2); __m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3); _mm_store_si128((__m128i *)dest + 0, xmm0); _mm_store_si128((__m128i *)dest + 1, xmm1); _mm_store_si128((__m128i *)dest + 2, xmm2); _mm_store_si128((__m128i *)dest + 3, xmm3); flush64b(dest + 0 * 64); } static force_inline void memmove_mov_sse_fw(char *dest, const char *src, size_t len) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_sse2(dest, src, cnt); dest += cnt; src += cnt; len -= cnt; } while (len >= 4 * 64) { memmove_mov4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_mov2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_mov1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len) memmove_small_sse2(dest, src, len); } static force_inline void memmove_mov_sse_bw(char *dest, const char *src, size_t len) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_sse2(dest, src, cnt); } while (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_mov4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_mov2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_mov1x64b(dest, src); } if (len) memmove_small_sse2(dest - len, src - len, len); } void EXPORTED_SYMBOL(char *dest, const char *src, size_t len) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_mov_sse_fw(dest, src, len); else memmove_mov_sse_bw(dest, src, len); }
6,467
28.534247
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_sse2_clflush.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_clflush_nolog #define EXPORTED_SYMBOL memmove_movnt_sse2_clflush #define maybe_barrier barrier_after_ntstores #include "memcpy_nt_sse2.h"
1,760
46.594595
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx512f_empty.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b flush64b_empty #define flush flush_empty_nolog #define EXPORTED_SYMBOL memmove_mov_avx512f_empty #include "memcpy_t_avx512f.h"
1,746
46.216216
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx_clflushopt.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_clflushopt_nolog #define EXPORTED_SYMBOL memmove_movnt_avx_clflushopt #define maybe_barrier no_barrier_after_ntstores #include "memcpy_nt_avx.h"
1,767
46.783784
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx512f.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "avx.h" #include "flush.h" #include "memcpy_avx512f.h" #include "memcpy_memset.h" static force_inline void memmove_mov32x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); __m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2); __m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3); __m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4); __m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5); __m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6); __m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7); __m512i zmm8 = _mm512_loadu_si512((__m512i *)src + 8); __m512i zmm9 = _mm512_loadu_si512((__m512i *)src + 9); __m512i zmm10 = _mm512_loadu_si512((__m512i *)src + 10); __m512i zmm11 = _mm512_loadu_si512((__m512i *)src + 11); __m512i zmm12 = _mm512_loadu_si512((__m512i *)src + 12); __m512i zmm13 = _mm512_loadu_si512((__m512i *)src + 13); __m512i zmm14 = _mm512_loadu_si512((__m512i *)src + 14); __m512i zmm15 = _mm512_loadu_si512((__m512i *)src + 15); __m512i zmm16 = _mm512_loadu_si512((__m512i *)src + 16); __m512i zmm17 = _mm512_loadu_si512((__m512i *)src + 17); __m512i zmm18 = _mm512_loadu_si512((__m512i *)src + 18); __m512i zmm19 = _mm512_loadu_si512((__m512i *)src + 19); __m512i zmm20 = _mm512_loadu_si512((__m512i *)src + 20); __m512i zmm21 = _mm512_loadu_si512((__m512i *)src + 21); __m512i zmm22 = _mm512_loadu_si512((__m512i *)src + 22); __m512i zmm23 = _mm512_loadu_si512((__m512i *)src + 23); __m512i zmm24 = _mm512_loadu_si512((__m512i *)src + 24); __m512i zmm25 = _mm512_loadu_si512((__m512i *)src + 25); __m512i zmm26 = _mm512_loadu_si512((__m512i *)src + 26); __m512i zmm27 = _mm512_loadu_si512((__m512i *)src + 27); __m512i zmm28 = _mm512_loadu_si512((__m512i *)src + 28); __m512i zmm29 = _mm512_loadu_si512((__m512i *)src + 29); __m512i zmm30 = _mm512_loadu_si512((__m512i *)src + 30); __m512i zmm31 = _mm512_loadu_si512((__m512i *)src + 31); _mm512_store_si512((__m512i *)dest + 0, zmm0); _mm512_store_si512((__m512i *)dest + 1, zmm1); _mm512_store_si512((__m512i *)dest + 2, zmm2); _mm512_store_si512((__m512i *)dest + 3, zmm3); _mm512_store_si512((__m512i *)dest + 4, zmm4); _mm512_store_si512((__m512i *)dest + 5, zmm5); _mm512_store_si512((__m512i *)dest + 6, zmm6); _mm512_store_si512((__m512i *)dest + 7, zmm7); _mm512_store_si512((__m512i *)dest + 8, zmm8); _mm512_store_si512((__m512i *)dest + 9, zmm9); _mm512_store_si512((__m512i *)dest + 10, zmm10); _mm512_store_si512((__m512i *)dest + 11, zmm11); _mm512_store_si512((__m512i *)dest + 12, zmm12); _mm512_store_si512((__m512i *)dest + 13, zmm13); _mm512_store_si512((__m512i *)dest + 14, zmm14); _mm512_store_si512((__m512i *)dest + 15, zmm15); _mm512_store_si512((__m512i *)dest + 16, zmm16); _mm512_store_si512((__m512i *)dest + 17, zmm17); _mm512_store_si512((__m512i *)dest + 18, zmm18); _mm512_store_si512((__m512i *)dest + 19, zmm19); _mm512_store_si512((__m512i *)dest + 20, zmm20); _mm512_store_si512((__m512i *)dest + 21, zmm21); _mm512_store_si512((__m512i *)dest + 22, zmm22); _mm512_store_si512((__m512i *)dest + 23, zmm23); _mm512_store_si512((__m512i *)dest + 24, zmm24); _mm512_store_si512((__m512i *)dest + 25, zmm25); _mm512_store_si512((__m512i *)dest + 26, zmm26); _mm512_store_si512((__m512i *)dest + 27, zmm27); _mm512_store_si512((__m512i *)dest + 28, zmm28); _mm512_store_si512((__m512i *)dest + 29, zmm29); _mm512_store_si512((__m512i *)dest + 30, zmm30); _mm512_store_si512((__m512i *)dest + 31, zmm31); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); flush64b(dest + 16 * 64); flush64b(dest + 17 * 64); flush64b(dest + 18 * 64); flush64b(dest + 19 * 64); flush64b(dest + 20 * 64); flush64b(dest + 21 * 64); flush64b(dest + 22 * 64); flush64b(dest + 23 * 64); flush64b(dest + 24 * 64); flush64b(dest + 25 * 64); flush64b(dest + 26 * 64); flush64b(dest + 27 * 64); flush64b(dest + 28 * 64); flush64b(dest + 29 * 64); flush64b(dest + 30 * 64); flush64b(dest + 31 * 64); } static force_inline void memmove_mov16x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); __m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2); __m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3); __m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4); __m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5); __m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6); __m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7); __m512i zmm8 = _mm512_loadu_si512((__m512i *)src + 8); __m512i zmm9 = _mm512_loadu_si512((__m512i *)src + 9); __m512i zmm10 = _mm512_loadu_si512((__m512i *)src + 10); __m512i zmm11 = _mm512_loadu_si512((__m512i *)src + 11); __m512i zmm12 = _mm512_loadu_si512((__m512i *)src + 12); __m512i zmm13 = _mm512_loadu_si512((__m512i *)src + 13); __m512i zmm14 = _mm512_loadu_si512((__m512i *)src + 14); __m512i zmm15 = _mm512_loadu_si512((__m512i *)src + 15); _mm512_store_si512((__m512i *)dest + 0, zmm0); _mm512_store_si512((__m512i *)dest + 1, zmm1); _mm512_store_si512((__m512i *)dest + 2, zmm2); _mm512_store_si512((__m512i *)dest + 3, zmm3); _mm512_store_si512((__m512i *)dest + 4, zmm4); _mm512_store_si512((__m512i *)dest + 5, zmm5); _mm512_store_si512((__m512i *)dest + 6, zmm6); _mm512_store_si512((__m512i *)dest + 7, zmm7); _mm512_store_si512((__m512i *)dest + 8, zmm8); _mm512_store_si512((__m512i *)dest + 9, zmm9); _mm512_store_si512((__m512i *)dest + 10, zmm10); _mm512_store_si512((__m512i *)dest + 11, zmm11); _mm512_store_si512((__m512i *)dest + 12, zmm12); _mm512_store_si512((__m512i *)dest + 13, zmm13); _mm512_store_si512((__m512i *)dest + 14, zmm14); _mm512_store_si512((__m512i *)dest + 15, zmm15); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); } static force_inline void memmove_mov8x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); __m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2); __m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3); __m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4); __m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5); __m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6); __m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7); _mm512_store_si512((__m512i *)dest + 0, zmm0); _mm512_store_si512((__m512i *)dest + 1, zmm1); _mm512_store_si512((__m512i *)dest + 2, zmm2); _mm512_store_si512((__m512i *)dest + 3, zmm3); _mm512_store_si512((__m512i *)dest + 4, zmm4); _mm512_store_si512((__m512i *)dest + 5, zmm5); _mm512_store_si512((__m512i *)dest + 6, zmm6); _mm512_store_si512((__m512i *)dest + 7, zmm7); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memmove_mov4x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); __m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2); __m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3); _mm512_store_si512((__m512i *)dest + 0, zmm0); _mm512_store_si512((__m512i *)dest + 1, zmm1); _mm512_store_si512((__m512i *)dest + 2, zmm2); _mm512_store_si512((__m512i *)dest + 3, zmm3); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memmove_mov2x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); _mm512_store_si512((__m512i *)dest + 0, zmm0); _mm512_store_si512((__m512i *)dest + 1, zmm1); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memmove_mov1x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); _mm512_store_si512((__m512i *)dest + 0, zmm0); flush64b(dest + 0 * 64); } static force_inline void memmove_mov_avx512f_fw(char *dest, const char *src, size_t len) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx512f(dest, src, cnt); dest += cnt; src += cnt; len -= cnt; } while (len >= 32 * 64) { memmove_mov32x64b(dest, src); dest += 32 * 64; src += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memmove_mov16x64b(dest, src); dest += 16 * 64; src += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memmove_mov8x64b(dest, src); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_mov4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_mov2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_mov1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len) memmove_small_avx512f(dest, src, len); } static force_inline void memmove_mov_avx512f_bw(char *dest, const char *src, size_t len) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx512f(dest, src, cnt); } while (len >= 32 * 64) { dest -= 32 * 64; src -= 32 * 64; len -= 32 * 64; memmove_mov32x64b(dest, src); } if (len >= 16 * 64) { dest -= 16 * 64; src -= 16 * 64; len -= 16 * 64; memmove_mov16x64b(dest, src); } if (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_mov8x64b(dest, src); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_mov4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_mov2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_mov1x64b(dest, src); } if (len) memmove_small_avx512f(dest - len, src - len, len); } void EXPORTED_SYMBOL(char *dest, const char *src, size_t len) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_mov_avx512f_fw(dest, src, len); else memmove_mov_avx512f_bw(dest, src, len); avx_zeroupper(); }
12,825
30.131068
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx_clwb.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_clwb_nolog #define EXPORTED_SYMBOL memmove_movnt_avx_clwb #define maybe_barrier no_barrier_after_ntstores #include "memcpy_nt_avx.h"
1,755
46.459459
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_sse2_clflushopt.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b pmem_clflushopt #define flush flush_clflushopt_nolog #define EXPORTED_SYMBOL memmove_mov_sse2_clflushopt #include "memcpy_t_sse2.h"
1,751
46.351351
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx_empty.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b flush64b_empty #define flush flush_empty_nolog #define EXPORTED_SYMBOL memmove_mov_avx_empty #include "memcpy_t_avx.h"
1,738
46
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx512f_clflushopt.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b pmem_clflushopt #define flush flush_clflushopt_nolog #define EXPORTED_SYMBOL memmove_mov_avx512f_clflushopt #include "memcpy_t_avx512f.h"
1,757
46.513514
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx_empty.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_empty_nolog #define EXPORTED_SYMBOL memmove_movnt_avx_empty #define maybe_barrier barrier_after_ntstores #include "memcpy_nt_avx.h"
1,754
46.432432
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx_clflushopt.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b pmem_clflushopt #define flush flush_clflushopt_nolog #define EXPORTED_SYMBOL memmove_mov_avx_clflushopt #include "memcpy_t_avx.h"
1,749
46.297297
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "avx.h" #include "flush.h" #include "memcpy_avx.h" #include "memcpy_memset.h" static force_inline void memmove_mov8x64b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0); __m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1); __m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2); __m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3); __m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4); __m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5); __m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6); __m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7); __m256i ymm8 = _mm256_loadu_si256((__m256i *)src + 8); __m256i ymm9 = _mm256_loadu_si256((__m256i *)src + 9); __m256i ymm10 = _mm256_loadu_si256((__m256i *)src + 10); __m256i ymm11 = _mm256_loadu_si256((__m256i *)src + 11); __m256i ymm12 = _mm256_loadu_si256((__m256i *)src + 12); __m256i ymm13 = _mm256_loadu_si256((__m256i *)src + 13); __m256i ymm14 = _mm256_loadu_si256((__m256i *)src + 14); __m256i ymm15 = _mm256_loadu_si256((__m256i *)src + 15); _mm256_store_si256((__m256i *)dest + 0, ymm0); _mm256_store_si256((__m256i *)dest + 1, ymm1); _mm256_store_si256((__m256i *)dest + 2, ymm2); _mm256_store_si256((__m256i *)dest + 3, ymm3); _mm256_store_si256((__m256i *)dest + 4, ymm4); _mm256_store_si256((__m256i *)dest + 5, ymm5); _mm256_store_si256((__m256i *)dest + 6, ymm6); _mm256_store_si256((__m256i *)dest + 7, ymm7); _mm256_store_si256((__m256i *)dest + 8, ymm8); _mm256_store_si256((__m256i *)dest + 9, ymm9); _mm256_store_si256((__m256i *)dest + 10, ymm10); _mm256_store_si256((__m256i *)dest + 11, ymm11); _mm256_store_si256((__m256i *)dest + 12, ymm12); _mm256_store_si256((__m256i *)dest + 13, ymm13); _mm256_store_si256((__m256i *)dest + 14, ymm14); _mm256_store_si256((__m256i *)dest + 15, ymm15); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memmove_mov4x64b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0); __m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1); __m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2); __m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3); __m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4); __m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5); __m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6); __m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7); _mm256_store_si256((__m256i *)dest + 0, ymm0); _mm256_store_si256((__m256i *)dest + 1, ymm1); _mm256_store_si256((__m256i *)dest + 2, ymm2); _mm256_store_si256((__m256i *)dest + 3, ymm3); _mm256_store_si256((__m256i *)dest + 4, ymm4); _mm256_store_si256((__m256i *)dest + 5, ymm5); _mm256_store_si256((__m256i *)dest + 6, ymm6); _mm256_store_si256((__m256i *)dest + 7, ymm7); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memmove_mov2x64b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0); __m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1); __m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2); __m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3); _mm256_store_si256((__m256i *)dest + 0, ymm0); _mm256_store_si256((__m256i *)dest + 1, ymm1); _mm256_store_si256((__m256i *)dest + 2, ymm2); _mm256_store_si256((__m256i *)dest + 3, ymm3); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memmove_mov1x64b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0); __m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1); _mm256_store_si256((__m256i *)dest + 0, ymm0); _mm256_store_si256((__m256i *)dest + 1, ymm1); flush64b(dest + 0 * 64); } static force_inline void memmove_mov_avx_fw(char *dest, const char *src, size_t len) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx(dest, src, cnt); dest += cnt; src += cnt; len -= cnt; } while (len >= 8 * 64) { memmove_mov8x64b(dest, src); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_mov4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_mov2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_mov1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len) memmove_small_avx(dest, src, len); } static force_inline void memmove_mov_avx_bw(char *dest, const char *src, size_t len) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx(dest, src, cnt); } while (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_mov8x64b(dest, src); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_mov4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_mov2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_mov1x64b(dest, src); } if (len) memmove_small_avx(dest - len, src - len, len); } void EXPORTED_SYMBOL(char *dest, const char *src, size_t len) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_mov_avx_fw(dest, src, len); else memmove_mov_avx_bw(dest, src, len); avx_zeroupper(); }
7,378
27.937255
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx512f_clflush.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b _mm_clflush #define flush flush_clflush_nolog #define EXPORTED_SYMBOL memmove_mov_avx512f_clflush #include "memcpy_t_avx512f.h"
1,747
46.243243
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_sse2_empty.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_empty_nolog #define EXPORTED_SYMBOL memmove_movnt_sse2_empty #define maybe_barrier barrier_after_ntstores #include "memcpy_nt_sse2.h"
1,756
46.486486
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_sse2_clflush.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b _mm_clflush #define flush flush_clflush_nolog #define EXPORTED_SYMBOL memmove_mov_sse2_clflush #include "memcpy_t_sse2.h"
1,741
46.081081
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_sse2_clwb.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_clwb_nolog #define EXPORTED_SYMBOL memmove_movnt_sse2_clwb #define maybe_barrier no_barrier_after_ntstores #include "memcpy_nt_sse2.h"
1,757
46.513514
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_avx512f.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef PMEM_MEMCPY_AVX512F_H #define PMEM_MEMCPY_AVX512F_H #include <stddef.h> #include "memcpy_avx.h" static force_inline void memmove_small_avx512f(char *dest, const char *src, size_t len) { /* We can't do better than AVX here. */ memmove_small_avx(dest, src, len); } #endif
1,886
38.3125
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_sse2_empty.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b flush64b_empty #define flush flush_empty_nolog #define EXPORTED_SYMBOL memmove_mov_sse2_empty #include "memcpy_t_sse2.h"
1,740
46.054054
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx512f.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "avx.h" #include "flush.h" #include "memcpy_avx512f.h" #include "memcpy_memset.h" #include "libpmem.h" #include "valgrind_internal.h" static force_inline void memmove_movnt32x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); __m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2); __m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3); __m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4); __m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5); __m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6); __m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7); __m512i zmm8 = _mm512_loadu_si512((__m512i *)src + 8); __m512i zmm9 = _mm512_loadu_si512((__m512i *)src + 9); __m512i zmm10 = _mm512_loadu_si512((__m512i *)src + 10); __m512i zmm11 = _mm512_loadu_si512((__m512i *)src + 11); __m512i zmm12 = _mm512_loadu_si512((__m512i *)src + 12); __m512i zmm13 = _mm512_loadu_si512((__m512i *)src + 13); __m512i zmm14 = _mm512_loadu_si512((__m512i *)src + 14); __m512i zmm15 = _mm512_loadu_si512((__m512i *)src + 15); __m512i zmm16 = _mm512_loadu_si512((__m512i *)src + 16); __m512i zmm17 = _mm512_loadu_si512((__m512i *)src + 17); __m512i zmm18 = _mm512_loadu_si512((__m512i *)src + 18); __m512i zmm19 = _mm512_loadu_si512((__m512i *)src + 19); __m512i zmm20 = _mm512_loadu_si512((__m512i *)src + 20); __m512i zmm21 = _mm512_loadu_si512((__m512i *)src + 21); __m512i zmm22 = _mm512_loadu_si512((__m512i *)src + 22); __m512i zmm23 = _mm512_loadu_si512((__m512i *)src + 23); __m512i zmm24 = _mm512_loadu_si512((__m512i *)src + 24); __m512i zmm25 = _mm512_loadu_si512((__m512i *)src + 25); __m512i zmm26 = _mm512_loadu_si512((__m512i *)src + 26); __m512i zmm27 = _mm512_loadu_si512((__m512i *)src + 27); __m512i zmm28 = _mm512_loadu_si512((__m512i *)src + 28); __m512i zmm29 = _mm512_loadu_si512((__m512i *)src + 29); __m512i zmm30 = _mm512_loadu_si512((__m512i *)src + 30); __m512i zmm31 = _mm512_loadu_si512((__m512i *)src + 31); _mm512_stream_si512((__m512i *)dest + 0, zmm0); _mm512_stream_si512((__m512i *)dest + 1, zmm1); _mm512_stream_si512((__m512i *)dest + 2, zmm2); _mm512_stream_si512((__m512i *)dest + 3, zmm3); _mm512_stream_si512((__m512i *)dest + 4, zmm4); _mm512_stream_si512((__m512i *)dest + 5, zmm5); _mm512_stream_si512((__m512i *)dest + 6, zmm6); _mm512_stream_si512((__m512i *)dest + 7, zmm7); _mm512_stream_si512((__m512i *)dest + 8, zmm8); _mm512_stream_si512((__m512i *)dest + 9, zmm9); _mm512_stream_si512((__m512i *)dest + 10, zmm10); _mm512_stream_si512((__m512i *)dest + 11, zmm11); _mm512_stream_si512((__m512i *)dest + 12, zmm12); _mm512_stream_si512((__m512i *)dest + 13, zmm13); _mm512_stream_si512((__m512i *)dest + 14, zmm14); _mm512_stream_si512((__m512i *)dest + 15, zmm15); _mm512_stream_si512((__m512i *)dest + 16, zmm16); _mm512_stream_si512((__m512i *)dest + 17, zmm17); _mm512_stream_si512((__m512i *)dest + 18, zmm18); _mm512_stream_si512((__m512i *)dest + 19, zmm19); _mm512_stream_si512((__m512i *)dest + 20, zmm20); _mm512_stream_si512((__m512i *)dest + 21, zmm21); _mm512_stream_si512((__m512i *)dest + 22, zmm22); _mm512_stream_si512((__m512i *)dest + 23, zmm23); _mm512_stream_si512((__m512i *)dest + 24, zmm24); _mm512_stream_si512((__m512i *)dest + 25, zmm25); _mm512_stream_si512((__m512i *)dest + 26, zmm26); _mm512_stream_si512((__m512i *)dest + 27, zmm27); _mm512_stream_si512((__m512i *)dest + 28, zmm28); _mm512_stream_si512((__m512i *)dest + 29, zmm29); _mm512_stream_si512((__m512i *)dest + 30, zmm30); _mm512_stream_si512((__m512i *)dest + 31, zmm31); VALGRIND_DO_FLUSH(dest, 32 * 64); } static force_inline void memmove_movnt16x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); __m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2); __m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3); __m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4); __m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5); __m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6); __m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7); __m512i zmm8 = _mm512_loadu_si512((__m512i *)src + 8); __m512i zmm9 = _mm512_loadu_si512((__m512i *)src + 9); __m512i zmm10 = _mm512_loadu_si512((__m512i *)src + 10); __m512i zmm11 = _mm512_loadu_si512((__m512i *)src + 11); __m512i zmm12 = _mm512_loadu_si512((__m512i *)src + 12); __m512i zmm13 = _mm512_loadu_si512((__m512i *)src + 13); __m512i zmm14 = _mm512_loadu_si512((__m512i *)src + 14); __m512i zmm15 = _mm512_loadu_si512((__m512i *)src + 15); _mm512_stream_si512((__m512i *)dest + 0, zmm0); _mm512_stream_si512((__m512i *)dest + 1, zmm1); _mm512_stream_si512((__m512i *)dest + 2, zmm2); _mm512_stream_si512((__m512i *)dest + 3, zmm3); _mm512_stream_si512((__m512i *)dest + 4, zmm4); _mm512_stream_si512((__m512i *)dest + 5, zmm5); _mm512_stream_si512((__m512i *)dest + 6, zmm6); _mm512_stream_si512((__m512i *)dest + 7, zmm7); _mm512_stream_si512((__m512i *)dest + 8, zmm8); _mm512_stream_si512((__m512i *)dest + 9, zmm9); _mm512_stream_si512((__m512i *)dest + 10, zmm10); _mm512_stream_si512((__m512i *)dest + 11, zmm11); _mm512_stream_si512((__m512i *)dest + 12, zmm12); _mm512_stream_si512((__m512i *)dest + 13, zmm13); _mm512_stream_si512((__m512i *)dest + 14, zmm14); _mm512_stream_si512((__m512i *)dest + 15, zmm15); VALGRIND_DO_FLUSH(dest, 16 * 64); } static force_inline void memmove_movnt8x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); __m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2); __m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3); __m512i zmm4 = _mm512_loadu_si512((__m512i *)src + 4); __m512i zmm5 = _mm512_loadu_si512((__m512i *)src + 5); __m512i zmm6 = _mm512_loadu_si512((__m512i *)src + 6); __m512i zmm7 = _mm512_loadu_si512((__m512i *)src + 7); _mm512_stream_si512((__m512i *)dest + 0, zmm0); _mm512_stream_si512((__m512i *)dest + 1, zmm1); _mm512_stream_si512((__m512i *)dest + 2, zmm2); _mm512_stream_si512((__m512i *)dest + 3, zmm3); _mm512_stream_si512((__m512i *)dest + 4, zmm4); _mm512_stream_si512((__m512i *)dest + 5, zmm5); _mm512_stream_si512((__m512i *)dest + 6, zmm6); _mm512_stream_si512((__m512i *)dest + 7, zmm7); VALGRIND_DO_FLUSH(dest, 8 * 64); } static force_inline void memmove_movnt4x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); __m512i zmm2 = _mm512_loadu_si512((__m512i *)src + 2); __m512i zmm3 = _mm512_loadu_si512((__m512i *)src + 3); _mm512_stream_si512((__m512i *)dest + 0, zmm0); _mm512_stream_si512((__m512i *)dest + 1, zmm1); _mm512_stream_si512((__m512i *)dest + 2, zmm2); _mm512_stream_si512((__m512i *)dest + 3, zmm3); VALGRIND_DO_FLUSH(dest, 4 * 64); } static force_inline void memmove_movnt2x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); __m512i zmm1 = _mm512_loadu_si512((__m512i *)src + 1); _mm512_stream_si512((__m512i *)dest + 0, zmm0); _mm512_stream_si512((__m512i *)dest + 1, zmm1); VALGRIND_DO_FLUSH(dest, 2 * 64); } static force_inline void memmove_movnt1x64b(char *dest, const char *src) { __m512i zmm0 = _mm512_loadu_si512((__m512i *)src + 0); _mm512_stream_si512((__m512i *)dest + 0, zmm0); VALGRIND_DO_FLUSH(dest, 64); } static force_inline void memmove_movnt1x32b(char *dest, const char *src) { __m256i zmm0 = _mm256_loadu_si256((__m256i *)src); _mm256_stream_si256((__m256i *)dest, zmm0); VALGRIND_DO_FLUSH(dest, 32); } static force_inline void memmove_movnt1x16b(char *dest, const char *src) { __m128i ymm0 = _mm_loadu_si128((__m128i *)src); _mm_stream_si128((__m128i *)dest, ymm0); VALGRIND_DO_FLUSH(dest, 16); } static force_inline void memmove_movnt1x8b(char *dest, const char *src) { _mm_stream_si64((long long *)dest, *(long long *)src); VALGRIND_DO_FLUSH(dest, 8); } static force_inline void memmove_movnt1x4b(char *dest, const char *src) { _mm_stream_si32((int *)dest, *(int *)src); VALGRIND_DO_FLUSH(dest, 4); } static force_inline void memmove_movnt_avx512f_fw(char *dest, const char *src, size_t len) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx512f(dest, src, cnt); dest += cnt; src += cnt; len -= cnt; } while (len >= 32 * 64) { memmove_movnt32x64b(dest, src); dest += 32 * 64; src += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memmove_movnt16x64b(dest, src); dest += 16 * 64; src += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memmove_movnt8x64b(dest, src); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_movnt2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_movnt1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memmove_movnt1x32b(dest, src); else if (len == 16) memmove_movnt1x16b(dest, src); else if (len == 8) memmove_movnt1x8b(dest, src); else if (len == 4) memmove_movnt1x4b(dest, src); else goto nonnt; goto end; } nonnt: memmove_small_avx512f(dest, src, len); end: avx_zeroupper(); } static force_inline void memmove_movnt_avx512f_bw(char *dest, const char *src, size_t len) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx512f(dest, src, cnt); } while (len >= 32 * 64) { dest -= 32 * 64; src -= 32 * 64; len -= 32 * 64; memmove_movnt32x64b(dest, src); } if (len >= 16 * 64) { dest -= 16 * 64; src -= 16 * 64; len -= 16 * 64; memmove_movnt16x64b(dest, src); } if (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_movnt8x64b(dest, src); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_movnt2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_movnt1x64b(dest, src); } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) { dest -= 32; src -= 32; memmove_movnt1x32b(dest, src); } else if (len == 16) { dest -= 16; src -= 16; memmove_movnt1x16b(dest, src); } else if (len == 8) { dest -= 8; src -= 8; memmove_movnt1x8b(dest, src); } else if (len == 4) { dest -= 4; src -= 4; memmove_movnt1x4b(dest, src); } else { goto nonnt; } goto end; } nonnt: dest -= len; src -= len; memmove_small_avx512f(dest, src, len); end: avx_zeroupper(); } void EXPORTED_SYMBOL(char *dest, const char *src, size_t len) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_movnt_avx512f_fw(dest, src, len); else memmove_movnt_avx512f_bw(dest, src, len); maybe_barrier(); }
13,191
28.446429
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx512f_empty.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_empty_nolog #define EXPORTED_SYMBOL memmove_movnt_avx512f_empty #define maybe_barrier barrier_after_ntstores #include "memcpy_nt_avx512f.h"
1,762
46.648649
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_sse2.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_sse2.h" #include "valgrind_internal.h" static force_inline void memmove_movnt4x64b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0); __m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1); __m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2); __m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3); __m128i xmm4 = _mm_loadu_si128((__m128i *)src + 4); __m128i xmm5 = _mm_loadu_si128((__m128i *)src + 5); __m128i xmm6 = _mm_loadu_si128((__m128i *)src + 6); __m128i xmm7 = _mm_loadu_si128((__m128i *)src + 7); __m128i xmm8 = _mm_loadu_si128((__m128i *)src + 8); __m128i xmm9 = _mm_loadu_si128((__m128i *)src + 9); __m128i xmm10 = _mm_loadu_si128((__m128i *)src + 10); __m128i xmm11 = _mm_loadu_si128((__m128i *)src + 11); __m128i xmm12 = _mm_loadu_si128((__m128i *)src + 12); __m128i xmm13 = _mm_loadu_si128((__m128i *)src + 13); __m128i xmm14 = _mm_loadu_si128((__m128i *)src + 14); __m128i xmm15 = _mm_loadu_si128((__m128i *)src + 15); _mm_stream_si128((__m128i *)dest + 0, xmm0); _mm_stream_si128((__m128i *)dest + 1, xmm1); _mm_stream_si128((__m128i *)dest + 2, xmm2); _mm_stream_si128((__m128i *)dest + 3, xmm3); _mm_stream_si128((__m128i *)dest + 4, xmm4); _mm_stream_si128((__m128i *)dest + 5, xmm5); _mm_stream_si128((__m128i *)dest + 6, xmm6); _mm_stream_si128((__m128i *)dest + 7, xmm7); _mm_stream_si128((__m128i *)dest + 8, xmm8); _mm_stream_si128((__m128i *)dest + 9, xmm9); _mm_stream_si128((__m128i *)dest + 10, xmm10); _mm_stream_si128((__m128i *)dest + 11, xmm11); _mm_stream_si128((__m128i *)dest + 12, xmm12); _mm_stream_si128((__m128i *)dest + 13, xmm13); _mm_stream_si128((__m128i *)dest + 14, xmm14); _mm_stream_si128((__m128i *)dest + 15, xmm15); VALGRIND_DO_FLUSH(dest, 4 * 64); } static force_inline void memmove_movnt2x64b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0); __m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1); __m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2); __m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3); __m128i xmm4 = _mm_loadu_si128((__m128i *)src + 4); __m128i xmm5 = _mm_loadu_si128((__m128i *)src + 5); __m128i xmm6 = _mm_loadu_si128((__m128i *)src + 6); __m128i xmm7 = _mm_loadu_si128((__m128i *)src + 7); _mm_stream_si128((__m128i *)dest + 0, xmm0); _mm_stream_si128((__m128i *)dest + 1, xmm1); _mm_stream_si128((__m128i *)dest + 2, xmm2); _mm_stream_si128((__m128i *)dest + 3, xmm3); _mm_stream_si128((__m128i *)dest + 4, xmm4); _mm_stream_si128((__m128i *)dest + 5, xmm5); _mm_stream_si128((__m128i *)dest + 6, xmm6); _mm_stream_si128((__m128i *)dest + 7, xmm7); VALGRIND_DO_FLUSH(dest, 2 * 64); } static force_inline void memmove_movnt1x64b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0); __m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1); __m128i xmm2 = _mm_loadu_si128((__m128i *)src + 2); __m128i xmm3 = _mm_loadu_si128((__m128i *)src + 3); _mm_stream_si128((__m128i *)dest + 0, xmm0); _mm_stream_si128((__m128i *)dest + 1, xmm1); _mm_stream_si128((__m128i *)dest + 2, xmm2); _mm_stream_si128((__m128i *)dest + 3, xmm3); VALGRIND_DO_FLUSH(dest, 64); } static force_inline void memmove_movnt1x32b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src + 0); __m128i xmm1 = _mm_loadu_si128((__m128i *)src + 1); _mm_stream_si128((__m128i *)dest + 0, xmm0); _mm_stream_si128((__m128i *)dest + 1, xmm1); VALGRIND_DO_FLUSH(dest, 32); } static force_inline void memmove_movnt1x16b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src); _mm_stream_si128((__m128i *)dest, xmm0); VALGRIND_DO_FLUSH(dest, 16); } static force_inline void memmove_movnt1x8b(char *dest, const char *src) { _mm_stream_si64((long long *)dest, *(long long *)src); VALGRIND_DO_FLUSH(dest, 8); } static force_inline void memmove_movnt1x4b(char *dest, const char *src) { _mm_stream_si32((int *)dest, *(int *)src); VALGRIND_DO_FLUSH(dest, 4); } static force_inline void memmove_movnt_sse_fw(char *dest, const char *src, size_t len) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_sse2(dest, src, cnt); dest += cnt; src += cnt; len -= cnt; } while (len >= 4 * 64) { memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_movnt2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_movnt1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len == 0) return; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memmove_movnt1x32b(dest, src); else if (len == 16) memmove_movnt1x16b(dest, src); else if (len == 8) memmove_movnt1x8b(dest, src); else if (len == 4) memmove_movnt1x4b(dest, src); else goto nonnt; return; } nonnt: memmove_small_sse2(dest, src, len); } static force_inline void memmove_movnt_sse_bw(char *dest, const char *src, size_t len) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_sse2(dest, src, cnt); } while (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_movnt2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_movnt1x64b(dest, src); } if (len == 0) return; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) { dest -= 32; src -= 32; memmove_movnt1x32b(dest, src); } else if (len == 16) { dest -= 16; src -= 16; memmove_movnt1x16b(dest, src); } else if (len == 8) { dest -= 8; src -= 8; memmove_movnt1x8b(dest, src); } else if (len == 4) { dest -= 4; src -= 4; memmove_movnt1x4b(dest, src); } else { goto nonnt; } return; } nonnt: dest -= len; src -= len; memmove_small_sse2(dest, src, len); } void EXPORTED_SYMBOL(char *dest, const char *src, size_t len) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_movnt_sse_fw(dest, src, len); else memmove_movnt_sse_bw(dest, src, len); maybe_barrier(); }
8,204
25.813725
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_sse2_clwb.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b pmem_clwb #define flush flush_clwb_nolog #define EXPORTED_SYMBOL memmove_mov_sse2_clwb #include "memcpy_t_sse2.h"
1,733
45.864865
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx512f_clwb.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush flush_clwb_nolog #define EXPORTED_SYMBOL memmove_movnt_avx512f_clwb #define maybe_barrier no_barrier_after_ntstores #include "memcpy_nt_avx512f.h"
1,763
46.675676
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_nt_avx.h
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem.h" #include "avx.h" #include "flush.h" #include "memcpy_avx.h" #include "memcpy_memset.h" #include "valgrind_internal.h" static force_inline void memmove_movnt8x64b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0); __m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1); __m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2); __m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3); __m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4); __m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5); __m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6); __m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7); __m256i ymm8 = _mm256_loadu_si256((__m256i *)src + 8); __m256i ymm9 = _mm256_loadu_si256((__m256i *)src + 9); __m256i ymm10 = _mm256_loadu_si256((__m256i *)src + 10); __m256i ymm11 = _mm256_loadu_si256((__m256i *)src + 11); __m256i ymm12 = _mm256_loadu_si256((__m256i *)src + 12); __m256i ymm13 = _mm256_loadu_si256((__m256i *)src + 13); __m256i ymm14 = _mm256_loadu_si256((__m256i *)src + 14); __m256i ymm15 = _mm256_loadu_si256((__m256i *)src + 15); _mm256_stream_si256((__m256i *)dest + 0, ymm0); _mm256_stream_si256((__m256i *)dest + 1, ymm1); _mm256_stream_si256((__m256i *)dest + 2, ymm2); _mm256_stream_si256((__m256i *)dest + 3, ymm3); _mm256_stream_si256((__m256i *)dest + 4, ymm4); _mm256_stream_si256((__m256i *)dest + 5, ymm5); _mm256_stream_si256((__m256i *)dest + 6, ymm6); _mm256_stream_si256((__m256i *)dest + 7, ymm7); _mm256_stream_si256((__m256i *)dest + 8, ymm8); _mm256_stream_si256((__m256i *)dest + 9, ymm9); _mm256_stream_si256((__m256i *)dest + 10, ymm10); _mm256_stream_si256((__m256i *)dest + 11, ymm11); _mm256_stream_si256((__m256i *)dest + 12, ymm12); _mm256_stream_si256((__m256i *)dest + 13, ymm13); _mm256_stream_si256((__m256i *)dest + 14, ymm14); _mm256_stream_si256((__m256i *)dest + 15, ymm15); VALGRIND_DO_FLUSH(dest, 8 * 64); } static force_inline void memmove_movnt4x64b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0); __m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1); __m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2); __m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3); __m256i ymm4 = _mm256_loadu_si256((__m256i *)src + 4); __m256i ymm5 = _mm256_loadu_si256((__m256i *)src + 5); __m256i ymm6 = _mm256_loadu_si256((__m256i *)src + 6); __m256i ymm7 = _mm256_loadu_si256((__m256i *)src + 7); _mm256_stream_si256((__m256i *)dest + 0, ymm0); _mm256_stream_si256((__m256i *)dest + 1, ymm1); _mm256_stream_si256((__m256i *)dest + 2, ymm2); _mm256_stream_si256((__m256i *)dest + 3, ymm3); _mm256_stream_si256((__m256i *)dest + 4, ymm4); _mm256_stream_si256((__m256i *)dest + 5, ymm5); _mm256_stream_si256((__m256i *)dest + 6, ymm6); _mm256_stream_si256((__m256i *)dest + 7, ymm7); VALGRIND_DO_FLUSH(dest, 4 * 64); } static force_inline void memmove_movnt2x64b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0); __m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1); __m256i ymm2 = _mm256_loadu_si256((__m256i *)src + 2); __m256i ymm3 = _mm256_loadu_si256((__m256i *)src + 3); _mm256_stream_si256((__m256i *)dest + 0, ymm0); _mm256_stream_si256((__m256i *)dest + 1, ymm1); _mm256_stream_si256((__m256i *)dest + 2, ymm2); _mm256_stream_si256((__m256i *)dest + 3, ymm3); VALGRIND_DO_FLUSH(dest, 2 * 64); } static force_inline void memmove_movnt1x64b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src + 0); __m256i ymm1 = _mm256_loadu_si256((__m256i *)src + 1); _mm256_stream_si256((__m256i *)dest + 0, ymm0); _mm256_stream_si256((__m256i *)dest + 1, ymm1); VALGRIND_DO_FLUSH(dest, 64); } static force_inline void memmove_movnt1x32b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src); _mm256_stream_si256((__m256i *)dest, ymm0); VALGRIND_DO_FLUSH(dest, 32); } static force_inline void memmove_movnt1x16b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src); _mm_stream_si128((__m128i *)dest, xmm0); VALGRIND_DO_FLUSH(dest, 16); } static force_inline void memmove_movnt1x8b(char *dest, const char *src) { _mm_stream_si64((long long *)dest, *(long long *)src); VALGRIND_DO_FLUSH(dest, 8); } static force_inline void memmove_movnt1x4b(char *dest, const char *src) { _mm_stream_si32((int *)dest, *(int *)src); VALGRIND_DO_FLUSH(dest, 4); } static force_inline void memmove_movnt_avx_fw(char *dest, const char *src, size_t len) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx(dest, src, cnt); dest += cnt; src += cnt; len -= cnt; } while (len >= 8 * 64) { memmove_movnt8x64b(dest, src); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_movnt2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_movnt1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memmove_movnt1x32b(dest, src); else if (len == 16) memmove_movnt1x16b(dest, src); else if (len == 8) memmove_movnt1x8b(dest, src); else if (len == 4) memmove_movnt1x4b(dest, src); else goto nonnt; goto end; } nonnt: memmove_small_avx(dest, src, len); end: avx_zeroupper(); } static force_inline void memmove_movnt_avx_bw(char *dest, const char *src, size_t len) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx(dest, src, cnt); } while (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_movnt8x64b(dest, src); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_movnt2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_movnt1x64b(dest, src); } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) { dest -= 32; src -= 32; memmove_movnt1x32b(dest, src); } else if (len == 16) { dest -= 16; src -= 16; memmove_movnt1x16b(dest, src); } else if (len == 8) { dest -= 8; src -= 8; memmove_movnt1x8b(dest, src); } else if (len == 4) { dest -= 4; src -= 4; memmove_movnt1x4b(dest, src); } else { goto nonnt; } goto end; } nonnt: dest -= len; src -= len; memmove_small_avx(dest, src, len); end: avx_zeroupper(); } void EXPORTED_SYMBOL(char *dest, const char *src, size_t len) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_movnt_avx_fw(dest, src, len); else memmove_movnt_avx_bw(dest, src, len); maybe_barrier(); }
8,883
25.519403
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/x86_64/memcpy/memcpy_t_avx_clwb.c
/* * Copyright 2017-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define flush64b pmem_clwb #define flush flush_clwb_nolog #define EXPORTED_SYMBOL memmove_mov_avx_clwb #include "memcpy_t_avx.h"
1,731
45.810811
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/aarch64/flush.h
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef ARM64_FLUSH_H #define ARM64_FLUSH_H #include <stdint.h> #include "arm_cacheops.h" #include "util.h" #define FLUSH_ALIGN ((uintptr_t)64) /* * flush_clflushopt_nolog -- flush the CPU cache, using * arm_clean_and_invalidate_va_to_poc (see arm_cacheops.h) {DC CIVAC} */ static force_inline void flush_dcache_invalidate_opt_nolog(const void *addr, size_t len) { uintptr_t uptr; arm_data_memory_barrier(); for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1); uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) { arm_clean_and_invalidate_va_to_poc((char *)uptr); } arm_data_memory_barrier(); } /* * flush_dcache_nolog -- flush the CPU cache, using DC CVAC */ static force_inline void flush_dcache_nolog(const void *addr, size_t len) { uintptr_t uptr; /* * Loop through cache-line-size (typically 64B) aligned chunks * covering the given range. */ for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1); uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) { arm_clean_va_to_poc((char *)uptr); } } #endif
2,631
32.74359
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/aarch64/arm_cacheops.h
/* * Copyright 2014-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * ARM inline assembly to flush and invalidate caches * clwb => dc cvac * clflush | clflushopt => dc civac * fence => dmb ish */ #ifndef AARCH64_CACHEOPS_H #define AARCH64_CACHEOPS_H #include <stdlib.h> static inline void arm_clean_va_to_poc(void const *p __attribute__((unused))) { asm volatile("dc cvac, %0" : : "r" (p) : "memory"); } static inline void arm_data_memory_barrier(void) { asm volatile("dmb ish" : : : "memory"); } static inline void arm_clean_and_invalidate_va_to_poc(const void *addr) { asm volatile("dc civac, %0" : : "r" (addr) : "memory"); } #endif
2,185
34.258065
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libpmem/aarch64/init.c
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <string.h> #include "libpmem.h" #include "flush.h" #include "os.h" #include "out.h" #include "pmem.h" #include "valgrind_internal.h" /* * memmove_nodrain_libc -- (internal) memmove to pmem without hw drain */ static void * memmove_nodrain_libc(void *pmemdest, const void *src, size_t len, unsigned flags) { LOG(15, "pmemdest %p src %p len %zu flags 0x%x", pmemdest, src, len, flags); memmove(pmemdest, src, len); pmem_flush_flags(pmemdest, len, flags); return pmemdest; } /* * memset_nodrain_libc -- (internal) memset to pmem without hw drain */ static void * memset_nodrain_libc(void *pmemdest, int c, size_t len, unsigned flags) { LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", pmemdest, c, len, flags); memset(pmemdest, c, len); pmem_flush_flags(pmemdest, len, flags); return pmemdest; } /* * predrain_fence_empty -- (internal) issue the pre-drain fence instruction */ static void predrain_fence_empty(void) { LOG(15, NULL); VALGRIND_DO_FENCE; /* nothing to do (because CLFLUSH did it for us) */ } /* * predrain_memory_barrier -- (internal) issue the pre-drain fence instruction */ static void predrain_memory_barrier(void) { LOG(15, NULL); arm_data_memory_barrier(); } /* * flush_dcache_invalidate_opt -- (internal) flush the CPU cache, * using clflushopt for X86 and arm_clean_and_invalidate_va_to_poc * for aarch64 (see arm_cacheops.h) {DC CIVAC} */ static void flush_dcache_invalidate_opt(const void *addr, size_t len) { LOG(15, "addr %p len %zu", addr, len); flush_dcache_invalidate_opt_nolog(addr, len); } /* * flush_dcache -- (internal) flush the CPU cache, using clwb */ static void flush_dcache(const void *addr, size_t len) { LOG(15, "addr %p len %zu", addr, len); flush_dcache_nolog(addr, len); } /* * flush_empty -- (internal) do not flush the CPU cache */ static void flush_empty(const void *addr, size_t len) { LOG(15, "addr %p len %zu", addr, len); flush_empty_nolog(addr, len); } /* * pmem_init_funcs -- initialize architecture-specific list of pmem operations */ void pmem_init_funcs(struct pmem_funcs *funcs) { LOG(3, NULL); funcs->predrain_fence = predrain_fence_empty; funcs->deep_flush = flush_dcache_invalidate_opt; funcs->is_pmem = is_pmem_detect; funcs->memmove_nodrain = memmove_nodrain_generic; funcs->memset_nodrain = memset_nodrain_generic; char *ptr = os_getenv("PMEM_NO_GENERIC_MEMCPY"); if (ptr) { long long val = atoll(ptr); if (val) { funcs->memmove_nodrain = memmove_nodrain_libc; funcs->memset_nodrain = memset_nodrain_libc; } } int flush; char *e = os_getenv("PMEM_NO_FLUSH"); if (e && (strcmp(e, "1") == 0)) { flush = 0; LOG(3, "Forced not flushing CPU_cache"); } else if (e && (strcmp(e, "0") == 0)) { flush = 1; LOG(3, "Forced flushing CPU_cache"); } else if (pmem_has_auto_flush() == 1) { flush = 0; LOG(3, "Not flushing CPU_cache, eADR detected"); } else { flush = 1; LOG(3, "Flushing CPU cache"); } if (flush) { funcs->flush = funcs->deep_flush; } else { funcs->flush = flush_empty; funcs->predrain_fence = predrain_memory_barrier; } if (funcs->deep_flush == flush_dcache) LOG(3, "Using ARM invalidate"); else if (funcs->deep_flush == flush_dcache_invalidate_opt) LOG(3, "Synchronize VA to poc for ARM"); else FATAL("invalid deep flush function address"); if (funcs->deep_flush == flush_empty) LOG(3, "not flushing CPU cache"); else if (funcs->flush != funcs->deep_flush) FATAL("invalid flush function address"); if (funcs->memmove_nodrain == memmove_nodrain_generic) LOG(3, "using generic memmove"); else if (funcs->memmove_nodrain == memmove_nodrain_libc) LOG(3, "using libc memmove"); else FATAL("invalid memove_nodrain function address"); }
5,348
26.572165
78
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libvmem/libvmem_main.c
/* * Copyright 2015-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libvmem_main.c -- entry point for libvmem.dll * * XXX - This is a placeholder. All the library initialization/cleanup * that is done in library ctors/dtors, as well as TLS initialization * should be moved here. */ #include "win_mmap.h" void vmem_init(void); void vmem_fini(void); void jemalloc_constructor(void); void jemalloc_destructor(void); int APIENTRY DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved) { switch (dwReason) { case DLL_PROCESS_ATTACH: jemalloc_constructor(); vmem_init(); win_mmap_init(); break; case DLL_THREAD_ATTACH: case DLL_THREAD_DETACH: break; case DLL_PROCESS_DETACH: win_mmap_fini(); vmem_fini(); jemalloc_destructor(); break; } return TRUE; }
2,332
31.859155
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libvmem/vmem.c
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * vmem.c -- memory pool & allocation entry points for libvmem */ #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/param.h> #include <errno.h> #include <stdint.h> #include <fcntl.h> #include <inttypes.h> #include <wchar.h> #include "libvmem.h" #include "jemalloc.h" #include "pmemcommon.h" #include "sys_util.h" #include "file.h" #include "vmem.h" #include "valgrind_internal.h" /* * private to this file... */ static size_t Header_size; static os_mutex_t Vmem_init_lock; static os_mutex_t Pool_lock; /* guards vmem_create and vmem_delete */ /* * print_jemalloc_messages -- custom print function, for jemalloc * * Prints traces from jemalloc. All traces from jemalloc * are considered as error messages. */ static void print_jemalloc_messages(void *ignore, const char *s) { ERR("%s", s); } /* * print_jemalloc_stats -- print function, for jemalloc statistics * * Prints statistics from jemalloc. All statistics are printed with level 0. */ static void print_jemalloc_stats(void *ignore, const char *s) { LOG_NONL(0, "%s", s); } /* * vmem_construct -- initialization for vmem * * Called automatically by the run-time loader or on the first use of vmem. */ void vmem_construct(void) { static bool initialized = false; int (*je_vmem_navsnprintf) (char *, size_t, const char *, va_list) = NULL; if (initialized) return; util_mutex_lock(&Vmem_init_lock); if (!initialized) { common_init(VMEM_LOG_PREFIX, VMEM_LOG_LEVEL_VAR, VMEM_LOG_FILE_VAR, VMEM_MAJOR_VERSION, VMEM_MINOR_VERSION); out_set_vsnprintf_func(je_vmem_navsnprintf); LOG(3, NULL); Header_size = roundup(sizeof(VMEM), Pagesize); /* Set up jemalloc messages to a custom print function */ je_vmem_malloc_message = print_jemalloc_messages; initialized = true; } util_mutex_unlock(&Vmem_init_lock); } /* * vmem_init -- load-time initialization for vmem * * Called automatically by the run-time loader. */ ATTR_CONSTRUCTOR void vmem_init(void) { util_mutex_init(&Vmem_init_lock); util_mutex_init(&Pool_lock); vmem_construct(); } /* * vmem_fini -- libvmem cleanup routine * * Called automatically when the process terminates. */ ATTR_DESTRUCTOR void vmem_fini(void) { LOG(3, NULL); util_mutex_destroy(&Pool_lock); util_mutex_destroy(&Vmem_init_lock); /* set up jemalloc messages back to stderr */ je_vmem_malloc_message = NULL; common_fini(); } /* * vmem_createU -- create a memory pool in a temp file */ #ifndef _WIN32 static inline #endif VMEM * vmem_createU(const char *dir, size_t size) { vmem_construct(); LOG(3, "dir \"%s\" size %zu", dir, size); if (size < VMEM_MIN_POOL) { ERR("size %zu smaller than %zu", size, VMEM_MIN_POOL); errno = EINVAL; return NULL; } enum file_type type = util_file_get_type(dir); if (type == OTHER_ERROR) return NULL; util_mutex_lock(&Pool_lock); /* silently enforce multiple of mapping alignment */ size = roundup(size, Mmap_align); void *addr; if (type == TYPE_DEVDAX) { if ((addr = util_file_map_whole(dir)) == NULL) { util_mutex_unlock(&Pool_lock); return NULL; } } else { if ((addr = util_map_tmpfile(dir, size, 4 * MEGABYTE)) == NULL) { util_mutex_unlock(&Pool_lock); return NULL; } } /* store opaque info at beginning of mapped area */ struct vmem *vmp = addr; memset(&vmp->hdr, '\0', sizeof(vmp->hdr)); memcpy(vmp->hdr.signature, VMEM_HDR_SIG, POOL_HDR_SIG_LEN); vmp->addr = addr; vmp->size = size; vmp->caller_mapped = 0; /* Prepare pool for jemalloc */ if (je_vmem_pool_create((void *)((uintptr_t)addr + Header_size), size - Header_size, /* zeroed if */ type != TYPE_DEVDAX, /* empty */ 1) == NULL) { ERR("pool creation failed"); util_unmap(vmp->addr, vmp->size); util_mutex_unlock(&Pool_lock); return NULL; } /* * If possible, turn off all permissions on the pool header page. * * The prototype PMFS doesn't allow this when large pages are in * use. It is not considered an error if this fails. */ if (type != TYPE_DEVDAX) util_range_none(addr, sizeof(struct pool_hdr)); util_mutex_unlock(&Pool_lock); LOG(3, "vmp %p", vmp); return vmp; } #ifndef _WIN32 /* * vmem_create -- create a memory pool in a temp file */ VMEM * vmem_create(const char *dir, size_t size) { return vmem_createU(dir, size); } #else /* * vmem_createW -- create a memory pool in a temp file */ VMEM * vmem_createW(const wchar_t *dir, size_t size) { char *udir = util_toUTF8(dir); if (udir == NULL) return NULL; VMEM *ret = vmem_createU(udir, size); util_free_UTF8(udir); return ret; } #endif /* * vmem_create_in_region -- create a memory pool in a given range */ VMEM * vmem_create_in_region(void *addr, size_t size) { vmem_construct(); LOG(3, "addr %p size %zu", addr, size); if (((uintptr_t)addr & (Pagesize - 1)) != 0) { ERR("addr %p not aligned to pagesize %llu", addr, Pagesize); errno = EINVAL; return NULL; } if (size < VMEM_MIN_POOL) { ERR("size %zu smaller than %zu", size, VMEM_MIN_POOL); errno = EINVAL; return NULL; } /* * Initially, treat this memory region as undefined. * Once jemalloc initializes its metadata, it will also mark * registered free chunks (usable heap space) as unaddressable. */ VALGRIND_DO_MAKE_MEM_UNDEFINED(addr, size); /* store opaque info at beginning of mapped area */ struct vmem *vmp = addr; memset(&vmp->hdr, '\0', sizeof(vmp->hdr)); memcpy(vmp->hdr.signature, VMEM_HDR_SIG, POOL_HDR_SIG_LEN); vmp->addr = addr; vmp->size = size; vmp->caller_mapped = 1; util_mutex_lock(&Pool_lock); /* Prepare pool for jemalloc */ if (je_vmem_pool_create((void *)((uintptr_t)addr + Header_size), size - Header_size, 0, /* empty */ 1) == NULL) { ERR("pool creation failed"); util_mutex_unlock(&Pool_lock); return NULL; } #ifndef _WIN32 /* * If possible, turn off all permissions on the pool header page. * * The prototype PMFS doesn't allow this when large pages are in * use. It is not considered an error if this fails. */ util_range_none(addr, sizeof(struct pool_hdr)); #endif util_mutex_unlock(&Pool_lock); LOG(3, "vmp %p", vmp); return vmp; } /* * vmem_delete -- delete a memory pool */ void vmem_delete(VMEM *vmp) { LOG(3, "vmp %p", vmp); util_mutex_lock(&Pool_lock); int ret = je_vmem_pool_delete((pool_t *)((uintptr_t)vmp + Header_size)); if (ret != 0) { ERR("invalid pool handle: 0x%" PRIxPTR, (uintptr_t)vmp); errno = EINVAL; util_mutex_unlock(&Pool_lock); return; } #ifndef _WIN32 util_range_rw(vmp->addr, sizeof(struct pool_hdr)); #endif if (vmp->caller_mapped == 0) { util_unmap(vmp->addr, vmp->size); } else { /* * The application cannot do any assumptions about the content * of this memory region once the pool is destroyed. */ VALGRIND_DO_MAKE_MEM_UNDEFINED(vmp->addr, vmp->size); } util_mutex_unlock(&Pool_lock); } /* * vmem_check -- memory pool consistency check */ int vmem_check(VMEM *vmp) { vmem_construct(); LOG(3, "vmp %p", vmp); util_mutex_lock(&Pool_lock); int ret = je_vmem_pool_check((pool_t *)((uintptr_t)vmp + Header_size)); util_mutex_unlock(&Pool_lock); return ret; } /* * vmem_stats_print -- spew memory allocator stats for a pool */ void vmem_stats_print(VMEM *vmp, const char *opts) { LOG(3, "vmp %p opts \"%s\"", vmp, opts ? opts : ""); je_vmem_pool_malloc_stats_print( (pool_t *)((uintptr_t)vmp + Header_size), print_jemalloc_stats, NULL, opts); } /* * vmem_malloc -- allocate memory */ void * vmem_malloc(VMEM *vmp, size_t size) { LOG(3, "vmp %p size %zu", vmp, size); return je_vmem_pool_malloc( (pool_t *)((uintptr_t)vmp + Header_size), size); } /* * vmem_free -- free memory */ void vmem_free(VMEM *vmp, void *ptr) { LOG(3, "vmp %p ptr %p", vmp, ptr); je_vmem_pool_free((pool_t *)((uintptr_t)vmp + Header_size), ptr); } /* * vmem_calloc -- allocate zeroed memory */ void * vmem_calloc(VMEM *vmp, size_t nmemb, size_t size) { LOG(3, "vmp %p nmemb %zu size %zu", vmp, nmemb, size); return je_vmem_pool_calloc((pool_t *)((uintptr_t)vmp + Header_size), nmemb, size); } /* * vmem_realloc -- resize a memory allocation */ void * vmem_realloc(VMEM *vmp, void *ptr, size_t size) { LOG(3, "vmp %p ptr %p size %zu", vmp, ptr, size); return je_vmem_pool_ralloc((pool_t *)((uintptr_t)vmp + Header_size), ptr, size); } /* * vmem_aligned_alloc -- allocate aligned memory */ void * vmem_aligned_alloc(VMEM *vmp, size_t alignment, size_t size) { LOG(3, "vmp %p alignment %zu size %zu", vmp, alignment, size); return je_vmem_pool_aligned_alloc( (pool_t *)((uintptr_t)vmp + Header_size), alignment, size); } /* * vmem_strdup -- allocate memory for copy of string */ char * vmem_strdup(VMEM *vmp, const char *s) { LOG(3, "vmp %p s %p", vmp, s); size_t size = strlen(s) + 1; void *retaddr = je_vmem_pool_malloc( (pool_t *)((uintptr_t)vmp + Header_size), size); if (retaddr == NULL) return NULL; return (char *)memcpy(retaddr, s, size); } /* * vmem_wcsdup -- allocate memory for copy of wide character string */ wchar_t * vmem_wcsdup(VMEM *vmp, const wchar_t *s) { LOG(3, "vmp %p s %p", vmp, s); size_t size = (wcslen(s) + 1) * sizeof(wchar_t); void *retaddr = je_vmem_pool_malloc( (pool_t *)((uintptr_t)vmp + Header_size), size); if (retaddr == NULL) return NULL; return (wchar_t *)memcpy(retaddr, s, size); } /* * vmem_malloc_usable_size -- get usable size of allocation */ size_t vmem_malloc_usable_size(VMEM *vmp, void *ptr) { LOG(3, "vmp %p ptr %p", vmp, ptr); return je_vmem_pool_malloc_usable_size( (pool_t *)((uintptr_t)vmp + Header_size), ptr); }
11,248
21.957143
76
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libvmem/vmem.h
/* * Copyright 2014-2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * vmem.h -- internal definitions for libvmem */ #ifndef VMEM_H #define VMEM_H 1 #include <stddef.h> #include "pool_hdr.h" #ifdef __cplusplus extern "C" { #endif #define VMEM_LOG_PREFIX "libvmem" #define VMEM_LOG_LEVEL_VAR "VMEM_LOG_LEVEL" #define VMEM_LOG_FILE_VAR "VMEM_LOG_FILE" /* attributes of the vmem memory pool format for the pool header */ #define VMEM_HDR_SIG "VMEM " /* must be 8 bytes including '\0' */ #define VMEM_FORMAT_MAJOR 1 struct vmem { struct pool_hdr hdr; /* memory pool header */ void *addr; /* mapped region */ size_t size; /* size of mapped region */ int caller_mapped; }; void vmem_construct(void); #ifdef __cplusplus } #endif #endif
2,284
31.183099
74
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/libvmem/libvmem.c
/* * Copyright 2014-2017, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * libvmem.c -- basic libvmem functions */ #include <stdio.h> #include <stdint.h> #include "libvmem.h" #include "jemalloc.h" #include "out.h" #include "vmem.h" /* * vmem_check_versionU -- see if library meets application version requirements */ #ifndef _WIN32 static inline #endif const char * vmem_check_versionU(unsigned major_required, unsigned minor_required) { vmem_construct(); LOG(3, "major_required %u minor_required %u", major_required, minor_required); if (major_required != VMEM_MAJOR_VERSION) { ERR("libvmem major version mismatch (need %u, found %u)", major_required, VMEM_MAJOR_VERSION); return out_get_errormsg(); } if (minor_required > VMEM_MINOR_VERSION) { ERR("libvmem minor version mismatch (need %u, found %u)", minor_required, VMEM_MINOR_VERSION); return out_get_errormsg(); } return NULL; } #ifndef _WIN32 /* * vmem_check_version -- see if library meets application version requirements */ const char * vmem_check_version(unsigned major_required, unsigned minor_required) { return vmem_check_versionU(major_required, minor_required); } #else /* * vmem_check_versionW -- see if library meets application version requirements */ const wchar_t * vmem_check_versionW(unsigned major_required, unsigned minor_required) { if (vmem_check_versionU(major_required, minor_required) != NULL) return out_get_errormsgW(); else return NULL; } #endif /* * vmem_set_funcs -- allow overriding libvmem's call to malloc, etc. */ void vmem_set_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s), void (*print_func)(const char *s)) { vmem_construct(); LOG(3, NULL); util_set_alloc_funcs(malloc_func, free_func, realloc_func, strdup_func); out_set_print_func(print_func); je_vmem_pool_set_alloc_funcs(malloc_func, free_func); } /* * vmem_errormsgU -- return last error message */ #ifndef _WIN32 static inline #endif const char * vmem_errormsgU(void) { return out_get_errormsg(); } #ifndef _WIN32 /* * vmem_errormsg -- return last error message */ const char * vmem_errormsg(void) { return vmem_errormsgU(); } #else /* * vmem_errormsgW -- return last error message as wchar_t */ const wchar_t * vmem_errormsgW(void) { return out_get_errormsgW(); } #endif
3,940
25.809524
79
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/win_mmap.c
/* * Copyright 2015-2018, Intel Corporation * Copyright (c) 2015-2017, Microsoft Corporation. All rights reserved. * Copyright (c) 2016, Hewlett Packard Enterprise Development LP * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * win_mmap.c -- memory-mapped files for Windows */ /* * XXX - The initial approach to PMDK for Windows port was to minimize the * amount of changes required in the core part of the library, and to avoid * preprocessor conditionals, if possible. For that reason, some of the * Linux system calls that have no equivalents on Windows have been emulated * using Windows API. * Note that it was not a goal to fully emulate POSIX-compliant behavior * of mentioned functions. They are used only internally, so current * implementation is just good enough to satisfy PMDK needs and to make it * work on Windows. * * This is a subject for change in the future. Likely, all these functions * will be replaced with "util_xxx" wrappers with OS-specific implementation * for Linux and Windows. * * Known issues: * - on Windows, mapping granularity/alignment is 64KB, not 4KB; * - mprotect() behavior and protection flag handling in mmap() is slightly * different than on Linux (see comments below). */ #include <sys/mman.h> #include "mmap.h" #include "util.h" #include "out.h" #include "win_mmap.h" /* uncomment for more debug information on mmap trackers */ /* #define MMAP_DEBUG_INFO */ NTSTATUS NtFreeVirtualMemory(_In_ HANDLE ProcessHandle, _Inout_ PVOID *BaseAddress, _Inout_ PSIZE_T RegionSize, _In_ ULONG FreeType); /* * XXX Unify the Linux and Windows code and replace this structure with * the map tracking list defined in mmap.h. */ SRWLOCK FileMappingQLock = SRWLOCK_INIT; struct FMLHead FileMappingQHead = SORTEDQ_HEAD_INITIALIZER(FileMappingQHead); /* * mmap_file_mapping_comparer -- (internal) compares the two file mapping * trackers */ static LONG_PTR mmap_file_mapping_comparer(PFILE_MAPPING_TRACKER a, PFILE_MAPPING_TRACKER b) { return ((LONG_PTR)a->BaseAddress - (LONG_PTR)b->BaseAddress); } #ifdef MMAP_DEBUG_INFO /* * mmap_info -- (internal) dump info about all the mapping trackers */ static void mmap_info(void) { LOG(4, NULL); AcquireSRWLockShared(&FileMappingQLock); PFILE_MAPPING_TRACKER mt; for (mt = SORTEDQ_FIRST(&FileMappingQHead); mt != (void *)&FileMappingQHead; mt = SORTEDQ_NEXT(mt, ListEntry)) { LOG(4, "FH %08x FMH %08x AD %p-%p (%zu) " "OF %08x FL %zu AC %d F %d", mt->FileHandle, mt->FileMappingHandle, mt->BaseAddress, mt->EndAddress, (char *)mt->EndAddress - (char *)mt->BaseAddress, mt->Offset, mt->FileLen, mt->Access, mt->Flags); } ReleaseSRWLockShared(&FileMappingQLock); } #endif /* * mmap_reserve -- (internal) reserve virtual address range */ static void * mmap_reserve(void *addr, size_t len) { LOG(4, "addr %p len %zu", addr, len); ASSERTeq((uintptr_t)addr % Mmap_align, 0); ASSERTeq(len % Mmap_align, 0); void *reserved_addr = VirtualAlloc(addr, len, MEM_RESERVE, PAGE_NOACCESS); if (reserved_addr == NULL) { ERR("cannot find a contiguous region - " "addr: %p, len: %lx, gle: 0x%08x", addr, len, GetLastError()); errno = ENOMEM; return MAP_FAILED; } return reserved_addr; } /* * mmap_unreserve -- (internal) frees the range that's previously reserved */ static int mmap_unreserve(void *addr, size_t len) { LOG(4, "addr %p len %zu", addr, len); ASSERTeq((uintptr_t)addr % Mmap_align, 0); ASSERTeq(len % Mmap_align, 0); size_t bytes_returned; MEMORY_BASIC_INFORMATION basic_info; bytes_returned = VirtualQuery(addr, &basic_info, sizeof(basic_info)); if (bytes_returned != sizeof(basic_info)) { ERR("cannot query the virtual address properties of the range " "- addr: %p, len: %d", addr, len); errno = EINVAL; return -1; } if (basic_info.State == MEM_RESERVE) { DWORD nt_status; void *release_addr = addr; size_t release_size = len; nt_status = NtFreeVirtualMemory(GetCurrentProcess(), &release_addr, &release_size, MEM_RELEASE); if (nt_status != 0) { ERR("cannot release the reserved virtual space - " "addr: %p, len: %d, nt_status: 0x%08x", addr, len, nt_status); errno = EINVAL; return -1; } ASSERTeq(release_addr, addr); ASSERTeq(release_size, len); LOG(4, "freed reservation - addr: %p, size: %d", release_addr, release_size); } else { LOG(4, "range not reserved - addr: %p, size: %d", addr, len); } return 0; } /* * win_mmap_init -- initialization of file mapping tracker */ void win_mmap_init(void) { AcquireSRWLockExclusive(&FileMappingQLock); SORTEDQ_INIT(&FileMappingQHead); ReleaseSRWLockExclusive(&FileMappingQLock); } /* * win_mmap_fini -- file mapping tracker cleanup routine */ void win_mmap_fini(void) { /* * Let's make sure that no one is in the middle of updating the * list by grabbing the lock. */ AcquireSRWLockExclusive(&FileMappingQLock); while (!SORTEDQ_EMPTY(&FileMappingQHead)) { PFILE_MAPPING_TRACKER mt; mt = (PFILE_MAPPING_TRACKER)SORTEDQ_FIRST(&FileMappingQHead); SORTEDQ_REMOVE(&FileMappingQHead, mt, ListEntry); if (mt->BaseAddress != NULL) UnmapViewOfFile(mt->BaseAddress); size_t release_size = (char *)mt->EndAddress - (char *)mt->BaseAddress; /* * Free reservation after file mapping (if reservation was * bigger than length of mapped file) */ void *release_addr = (char *)mt->BaseAddress + mt->FileLen; mmap_unreserve(release_addr, release_size - mt->FileLen); if (mt->FileMappingHandle != NULL) CloseHandle(mt->FileMappingHandle); if (mt->FileHandle != NULL) CloseHandle(mt->FileHandle); free(mt); } ReleaseSRWLockExclusive(&FileMappingQLock); } #define PROT_ALL (PROT_READ|PROT_WRITE|PROT_EXEC) /* * mmap -- map file into memory * * XXX - If read-only mapping was created initially, it is not possible * to change protection to R/W, even if the file itself was open in R/W mode. * To workaround that, we could modify mmap() to create R/W mapping first, * then change the protection to R/O. This way, it should be possible * to elevate permissions later. */ void * mmap(void *addr, size_t len, int prot, int flags, int fd, os_off_t offset) { LOG(4, "addr %p len %zu prot %d flags %d fd %d offset %ju", addr, len, prot, flags, fd, offset); if (len == 0) { ERR("invalid length: %zu", len); errno = EINVAL; return MAP_FAILED; } if ((prot & ~PROT_ALL) != 0) { ERR("invalid flags: 0x%08x", flags); /* invalid protection flags */ errno = EINVAL; return MAP_FAILED; } if (((flags & MAP_PRIVATE) && (flags & MAP_SHARED)) || ((flags & (MAP_PRIVATE | MAP_SHARED)) == 0)) { ERR("neither MAP_PRIVATE or MAP_SHARED is set, or both: 0x%08x", flags); errno = EINVAL; return MAP_FAILED; } /* XXX shall we use SEC_LARGE_PAGES flag? */ DWORD protect = 0; DWORD access = 0; /* on x86, PROT_WRITE implies PROT_READ */ if (prot & PROT_WRITE) { if (flags & MAP_PRIVATE) { access = FILE_MAP_COPY; if (prot & PROT_EXEC) protect = PAGE_EXECUTE_WRITECOPY; else protect = PAGE_WRITECOPY; } else { /* FILE_MAP_ALL_ACCESS == FILE_MAP_WRITE */ access = FILE_MAP_ALL_ACCESS; if (prot & PROT_EXEC) protect = PAGE_EXECUTE_READWRITE; else protect = PAGE_READWRITE; } } else if (prot & PROT_READ) { access = FILE_MAP_READ; if (prot & PROT_EXEC) protect = PAGE_EXECUTE_READ; else protect = PAGE_READONLY; } else { /* XXX - PAGE_NOACCESS is not supported by CreateFileMapping */ ERR("PAGE_NOACCESS is not supported"); errno = ENOTSUP; return MAP_FAILED; } if (((uintptr_t)addr % Mmap_align) != 0) { if ((flags & MAP_FIXED) == 0) { /* ignore invalid hint if no MAP_FIXED flag is set */ addr = NULL; } else { ERR("hint address is not well-aligned: %p", addr); errno = EINVAL; return MAP_FAILED; } } if ((offset % Mmap_align) != 0) { ERR("offset is not well-aligned: %ju", offset); errno = EINVAL; return MAP_FAILED; } if ((flags & MAP_FIXED) != 0) { /* * Free any reservations that the caller might have, also we * have to unmap any existing mappings in this region as per * mmap's manual. * XXX - Ideally we should unmap only if the prot and flags * are similar, we are deferring it as we don't rely on it * yet. */ int ret = munmap(addr, len); if (ret != 0) { ERR("!munmap: addr %p len %zu", addr, len); return MAP_FAILED; } } size_t len_align = roundup(len, Mmap_align); size_t filelen; size_t filelen_align; HANDLE fh; if (flags & MAP_ANON) { /* * In our implementation we are choosing to ignore fd when * MAP_ANON is set, instead of failing. */ fh = INVALID_HANDLE_VALUE; /* ignore/override offset */ offset = 0; filelen = len; filelen_align = len_align; if ((flags & MAP_NORESERVE) != 0) { /* * For anonymous mappings the meaning of MAP_NORESERVE * flag is pretty much the same as SEC_RESERVE. */ protect |= SEC_RESERVE; } } else { LARGE_INTEGER filesize; if (fd == -1) { ERR("invalid file descriptor: %d", fd); errno = EBADF; return MAP_FAILED; } /* * We need to keep file handle open for proper * implementation of msync() and to hold the file lock. */ if (!DuplicateHandle(GetCurrentProcess(), (HANDLE)_get_osfhandle(fd), GetCurrentProcess(), &fh, 0, FALSE, DUPLICATE_SAME_ACCESS)) { ERR("cannot duplicate handle - fd: %d, gle: 0x%08x", fd, GetLastError()); errno = ENOMEM; return MAP_FAILED; } /* * If we are asked to map more than the file size, map till the * file size and reserve the following. */ if (!GetFileSizeEx(fh, &filesize)) { ERR("cannot query the file size - fh: %d, gle: 0x%08x", fd, GetLastError()); CloseHandle(fh); return MAP_FAILED; } if (offset >= (os_off_t)filesize.QuadPart) { errno = EINVAL; ERR("offset is beyond the file size"); CloseHandle(fh); return MAP_FAILED; } /* calculate length of the mapped portion of the file */ filelen = filesize.QuadPart - offset; if (filelen > len) filelen = len; filelen_align = roundup(filelen, Mmap_align); if ((offset + len) > (size_t)filesize.QuadPart) { /* * Reserve virtual address for the rest of range we need * to map, and free a portion in the beginning for this * allocation. */ void *reserved_addr = mmap_reserve(addr, len_align); if (reserved_addr == MAP_FAILED) { ERR("cannot reserve region"); CloseHandle(fh); return MAP_FAILED; } if (addr != reserved_addr && (flags & MAP_FIXED) != 0) { ERR("cannot find a contiguous region - " "addr: %p, len: %lx, gle: 0x%08x", addr, len, GetLastError()); if (mmap_unreserve(reserved_addr, len_align) != 0) { ASSERT(FALSE); ERR("cannot free reserved region"); } errno = ENOMEM; CloseHandle(fh); return MAP_FAILED; } addr = reserved_addr; if (mmap_unreserve(reserved_addr, filelen_align) != 0) { ASSERT(FALSE); ERR("cannot free reserved region"); CloseHandle(fh); return MAP_FAILED; } } } HANDLE fmh = CreateFileMapping(fh, NULL, /* security attributes */ protect, (DWORD) ((filelen + offset) >> 32), (DWORD) ((filelen + offset) & 0xFFFFFFFF), NULL); if (fmh == NULL) { DWORD gle = GetLastError(); ERR("CreateFileMapping, gle: 0x%08x", gle); if (gle == ERROR_ACCESS_DENIED) errno = EACCES; else errno = EINVAL; /* XXX */ CloseHandle(fh); return MAP_FAILED; } void *base = MapViewOfFileEx(fmh, access, (DWORD) (offset >> 32), (DWORD) (offset & 0xFFFFFFFF), filelen, addr); /* hint address */ if (base == NULL) { if (addr == NULL || (flags & MAP_FIXED) != 0) { ERR("MapViewOfFileEx, gle: 0x%08x", GetLastError()); errno = EINVAL; CloseHandle(fh); CloseHandle(fmh); return MAP_FAILED; } /* try again w/o hint */ base = MapViewOfFileEx(fmh, access, (DWORD) (offset >> 32), (DWORD) (offset & 0xFFFFFFFF), filelen, NULL); /* no hint address */ } if (base == NULL) { ERR("MapViewOfFileEx, gle: 0x%08x", GetLastError()); errno = ENOMEM; CloseHandle(fh); CloseHandle(fmh); return MAP_FAILED; } /* * We will track the file mapping handle on a lookaside list so that * we don't have to modify the fact that we only return back the base * address rather than a more elaborate structure. */ PFILE_MAPPING_TRACKER mt = malloc(sizeof(struct FILE_MAPPING_TRACKER)); if (mt == NULL) { ERR("!malloc"); CloseHandle(fh); CloseHandle(fmh); return MAP_FAILED; } mt->Flags = 0; mt->FileHandle = fh; mt->FileMappingHandle = fmh; mt->BaseAddress = base; mt->EndAddress = (void *)((char *)base + len_align); mt->Access = access; mt->Offset = offset; mt->FileLen = filelen_align; /* * XXX: Use the QueryVirtualMemoryInformation when available in the new * SDK. If the file is DAX mapped say so in the FILE_MAPPING_TRACKER * Flags. */ DWORD filesystemFlags; if (fh == INVALID_HANDLE_VALUE) { LOG(4, "anonymous mapping - not DAX mapped - handle: %p", fh); } else if (GetVolumeInformationByHandleW(fh, NULL, 0, NULL, NULL, &filesystemFlags, NULL, 0)) { if (filesystemFlags & FILE_DAX_VOLUME) { mt->Flags |= FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED; } else { LOG(4, "file is not DAX mapped - handle: %p", fh); } } else { ERR("failed to query volume information : %08x", GetLastError()); } AcquireSRWLockExclusive(&FileMappingQLock); SORTEDQ_INSERT(&FileMappingQHead, mt, ListEntry, FILE_MAPPING_TRACKER, mmap_file_mapping_comparer); ReleaseSRWLockExclusive(&FileMappingQLock); #ifdef MMAP_DEBUG_INFO mmap_info(); #endif return base; } /* * mmap_split -- (internal) replace existing mapping with another one(s) * * Unmaps the region between [begin,end]. If it's in a middle of the existing * mapping, it results in two new mappings and duplicated file/mapping handles. */ static int mmap_split(PFILE_MAPPING_TRACKER mt, void *begin, void *end) { LOG(4, "begin %p end %p", begin, end); ASSERTeq((uintptr_t)begin % Mmap_align, 0); ASSERTeq((uintptr_t)end % Mmap_align, 0); PFILE_MAPPING_TRACKER mtb = NULL; PFILE_MAPPING_TRACKER mte = NULL; HANDLE fh = mt->FileHandle; HANDLE fmh = mt->FileMappingHandle; size_t len; /* * In this routine we copy flags from mt to the two subsets that we * create. All flags may not be appropriate to propagate so let's * assert about the flags we know, if some one adds a new flag in the * future they would know about this copy and take appropricate action. */ C_ASSERT(FILE_MAPPING_TRACKER_FLAGS_MASK == 1); /* * 1) b e b e * xxxxxxxxxxxxx => xxx.......xxxx - mtb+mte * 2) b e b e * xxxxxxxxxxxxx => xxxxxxx....... - mtb * 3) b e b e * xxxxxxxxxxxxx => ........xxxxxx - mte * 4) b e b e * xxxxxxxxxxxxx => .............. - <none> */ if (begin > mt->BaseAddress) { /* case #1/2 */ /* new mapping at the beginning */ mtb = malloc(sizeof(struct FILE_MAPPING_TRACKER)); if (mtb == NULL) { ERR("!malloc"); goto err; } mtb->Flags = mt->Flags; mtb->FileHandle = fh; mtb->FileMappingHandle = fmh; mtb->BaseAddress = mt->BaseAddress; mtb->EndAddress = begin; mtb->Access = mt->Access; mtb->Offset = mt->Offset; len = (char *)begin - (char *)mt->BaseAddress; mtb->FileLen = len >= mt->FileLen ? mt->FileLen : len; } if (end < mt->EndAddress) { /* case #1/3 */ /* new mapping at the end */ mte = malloc(sizeof(struct FILE_MAPPING_TRACKER)); if (mte == NULL) { ERR("!malloc"); goto err; } if (!mtb) { /* case #3 */ mte->FileHandle = fh; mte->FileMappingHandle = fmh; } else { /* case #1 - need to duplicate handles */ mte->FileHandle = NULL; mte->FileMappingHandle = NULL; if (!DuplicateHandle(GetCurrentProcess(), fh, GetCurrentProcess(), &mte->FileHandle, 0, FALSE, DUPLICATE_SAME_ACCESS)) { ERR("DuplicateHandle, gle: 0x%08x", GetLastError()); goto err; } if (!DuplicateHandle(GetCurrentProcess(), fmh, GetCurrentProcess(), &mte->FileMappingHandle, 0, FALSE, DUPLICATE_SAME_ACCESS)) { ERR("DuplicateHandle, gle: 0x%08x", GetLastError()); goto err; } } mte->Flags = mt->Flags; mte->BaseAddress = end; mte->EndAddress = mt->EndAddress; mte->Access = mt->Access; mte->Offset = mt->Offset + ((char *)mte->BaseAddress - (char *)mt->BaseAddress); len = (char *)end - (char *)mt->BaseAddress; mte->FileLen = len >= mt->FileLen ? 0 : mt->FileLen - len; } if (mt->FileLen > 0 && UnmapViewOfFile(mt->BaseAddress) == FALSE) { ERR("UnmapViewOfFile, gle: 0x%08x", GetLastError()); goto err; } len = (char *)mt->EndAddress - (char *)mt->BaseAddress; if (len > mt->FileLen) { void *addr = (char *)mt->BaseAddress + mt->FileLen; mmap_unreserve(addr, len - mt->FileLen); } if (!mtb && !mte) { /* case #4 */ CloseHandle(fmh); CloseHandle(fh); } /* * free entry for the original mapping */ SORTEDQ_REMOVE(&FileMappingQHead, mt, ListEntry); free(mt); if (mtb) { len = (char *)mtb->EndAddress - (char *)mtb->BaseAddress; if (len > mtb->FileLen) { void *addr = (char *)mtb->BaseAddress + mtb->FileLen; void *raddr = mmap_reserve(addr, len - mtb->FileLen); if (raddr == MAP_FAILED) { ERR("cannot find a contiguous region - " "addr: %p, len: %lx, gle: 0x%08x", addr, len, GetLastError()); goto err; } } if (mtb->FileLen > 0) { void *base = MapViewOfFileEx(mtb->FileMappingHandle, mtb->Access, (DWORD) (mtb->Offset >> 32), (DWORD) (mtb->Offset & 0xFFFFFFFF), mtb->FileLen, mtb->BaseAddress); /* hint address */ if (base == NULL) { ERR("MapViewOfFileEx, gle: 0x%08x", GetLastError()); goto err; } } SORTEDQ_INSERT(&FileMappingQHead, mtb, ListEntry, FILE_MAPPING_TRACKER, mmap_file_mapping_comparer); } if (mte) { len = (char *)mte->EndAddress - (char *)mte->BaseAddress; if (len > mte->FileLen) { void *addr = (char *)mte->BaseAddress + mte->FileLen; void *raddr = mmap_reserve(addr, len - mte->FileLen); if (raddr == MAP_FAILED) { ERR("cannot find a contiguous region - " "addr: %p, len: %lx, gle: 0x%08x", addr, len, GetLastError()); goto err; } } if (mte->FileLen > 0) { void *base = MapViewOfFileEx(mte->FileMappingHandle, mte->Access, (DWORD) (mte->Offset >> 32), (DWORD) (mte->Offset & 0xFFFFFFFF), mte->FileLen, mte->BaseAddress); /* hint address */ if (base == NULL) { ERR("MapViewOfFileEx, gle: 0x%08x", GetLastError()); goto err_mte; } } SORTEDQ_INSERT(&FileMappingQHead, mte, ListEntry, FILE_MAPPING_TRACKER, mmap_file_mapping_comparer); } return 0; err: if (mtb) { ASSERTeq(mtb->FileMappingHandle, fmh); ASSERTeq(mtb->FileHandle, fh); CloseHandle(mtb->FileMappingHandle); CloseHandle(mtb->FileHandle); len = (char *)mtb->EndAddress - (char *)mtb->BaseAddress; if (len > mtb->FileLen) { void *addr = (char *)mtb->BaseAddress + mtb->FileLen; mmap_unreserve(addr, len - mtb->FileLen); } } err_mte: if (mte) { if (mte->FileMappingHandle) CloseHandle(mte->FileMappingHandle); if (mte->FileHandle) CloseHandle(mte->FileHandle); len = (char *)mte->EndAddress - (char *)mte->BaseAddress; if (len > mte->FileLen) { void *addr = (char *)mte->BaseAddress + mte->FileLen; mmap_unreserve(addr, len - mte->FileLen); } } free(mtb); free(mte); return -1; } /* * munmap -- delete mapping */ int munmap(void *addr, size_t len) { LOG(4, "addr %p len %zu", addr, len); if (((uintptr_t)addr % Mmap_align) != 0) { ERR("address is not well-aligned: %p", addr); errno = EINVAL; return -1; } if (len == 0) { ERR("invalid length: %zu", len); errno = EINVAL; return -1; } int retval = -1; if (len > UINTPTR_MAX - (uintptr_t)addr) { /* limit len to not get beyond address space */ len = UINTPTR_MAX - (uintptr_t)addr; } void *begin = addr; void *end = (void *)((char *)addr + len); AcquireSRWLockExclusive(&FileMappingQLock); PFILE_MAPPING_TRACKER mt; PFILE_MAPPING_TRACKER next; for (mt = SORTEDQ_FIRST(&FileMappingQHead); mt != (void *)&FileMappingQHead; mt = next) { /* * Pick the next entry before we split there by delete the * this one (NOTE: mmap_spilt could delete this entry). */ next = SORTEDQ_NEXT(mt, ListEntry); if (mt->BaseAddress >= end) { LOG(4, "ignoring all mapped ranges beyond given range"); break; } if (mt->EndAddress <= begin) { LOG(4, "skipping a mapped range before given range"); continue; } void *begin2 = begin > mt->BaseAddress ? begin : mt->BaseAddress; void *end2 = end < mt->EndAddress ? end : mt->EndAddress; size_t len2 = (char *)end2 - (char *)begin2; void *align_end = (void *)roundup((uintptr_t)end2, Mmap_align); if (mmap_split(mt, begin2, align_end) != 0) { LOG(2, "mapping split failed"); goto err; } if (len > len2) { len -= len2; } else { len = 0; break; } } /* * If we didn't find any mapped regions in our list attempt to free * as if the entire range is reserved. * * XXX: We don't handle a range having few mapped regions and few * reserved regions. */ if (len > 0) mmap_unreserve(addr, roundup(len, Mmap_align)); retval = 0; err: ReleaseSRWLockExclusive(&FileMappingQLock); if (retval == -1) errno = EINVAL; #ifdef MMAP_DEBUG_INFO mmap_info(); #endif return retval; } #define MS_ALL (MS_SYNC|MS_ASYNC|MS_INVALIDATE) /* * msync -- synchronize a file with a memory map */ int msync(void *addr, size_t len, int flags) { LOG(4, "addr %p len %zu flags %d", addr, len, flags); if ((flags & ~MS_ALL) != 0) { ERR("invalid flags: 0x%08x", flags); errno = EINVAL; return -1; } /* * XXX - On Linux it is allowed to call msync() without MS_SYNC * nor MS_ASYNC. */ if (((flags & MS_SYNC) && (flags & MS_ASYNC)) || ((flags & (MS_SYNC | MS_ASYNC)) == 0)) { ERR("neither MS_SYNC or MS_ASYNC is set, or both: 0x%08x", flags); errno = EINVAL; return -1; } if (((uintptr_t)addr % Pagesize) != 0) { ERR("address is not page-aligned: %p", addr); errno = EINVAL; return -1; } if (len == 0) { LOG(4, "zero-length region - do nothing"); return 0; /* do nothing */ } if (len > UINTPTR_MAX - (uintptr_t)addr) { /* limit len to not get beyond address space */ len = UINTPTR_MAX - (uintptr_t)addr; } int retval = -1; void *begin = addr; void *end = (void *)((char *)addr + len); AcquireSRWLockShared(&FileMappingQLock); PFILE_MAPPING_TRACKER mt; SORTEDQ_FOREACH(mt, &FileMappingQHead, ListEntry) { if (mt->BaseAddress >= end) { LOG(4, "ignoring all mapped ranges beyond given range"); break; } if (mt->EndAddress <= begin) { LOG(4, "skipping a mapped range before given range"); continue; } void *begin2 = begin > mt->BaseAddress ? begin : mt->BaseAddress; void *end2 = end < mt->EndAddress ? end : mt->EndAddress; size_t len2 = (char *)end2 - (char *)begin2; /* do nothing for anonymous mappings */ if (mt->FileHandle != INVALID_HANDLE_VALUE) { if (FlushViewOfFile(begin2, len2) == FALSE) { ERR("FlushViewOfFile, gle: 0x%08x", GetLastError()); errno = ENOMEM; goto err; } if (FlushFileBuffers(mt->FileHandle) == FALSE) { ERR("FlushFileBuffers, gle: 0x%08x", GetLastError()); errno = EINVAL; goto err; } } if (len > len2) { len -= len2; } else { len = 0; break; } } if (len > 0) { ERR("indicated memory (or part of it) was not mapped"); errno = ENOMEM; } else { retval = 0; } err: ReleaseSRWLockShared(&FileMappingQLock); return retval; } #define PROT_ALL (PROT_READ|PROT_WRITE|PROT_EXEC) /* * mprotect -- set protection on a region of memory * * XXX - If the memory range passed to mprotect() includes invalid pages, * returned status will indicate error, and errno is set to ENOMEM. * However, the protection change is actually applied to all the valid pages, * ignoring the rest. * This is different than on Linux, where it stops on the first invalid page. */ int mprotect(void *addr, size_t len, int prot) { LOG(4, "addr %p len %zu prot %d", addr, len, prot); if (((uintptr_t)addr % Pagesize) != 0) { ERR("address is not page-aligned: %p", addr); errno = EINVAL; return -1; } if (len == 0) { LOG(4, "zero-length region - do nothing"); return 0; /* do nothing */ } if (len > UINTPTR_MAX - (uintptr_t)addr) { len = UINTPTR_MAX - (uintptr_t)addr; LOG(4, "limit len to %zu to not get beyond address space", len); } DWORD protect = 0; if ((prot & PROT_READ) && (prot & PROT_WRITE)) { protect |= PAGE_READWRITE; if (prot & PROT_EXEC) protect |= PAGE_EXECUTE_READWRITE; } else if (prot & PROT_READ) { protect |= PAGE_READONLY; if (prot & PROT_EXEC) protect |= PAGE_EXECUTE_READ; } else { protect |= PAGE_NOACCESS; } int retval = -1; void *begin = addr; void *end = (void *)((char *)addr + len); AcquireSRWLockShared(&FileMappingQLock); PFILE_MAPPING_TRACKER mt; SORTEDQ_FOREACH(mt, &FileMappingQHead, ListEntry) { if (mt->BaseAddress >= end) { LOG(4, "ignoring all mapped ranges beyond given range"); break; } if (mt->EndAddress <= begin) { LOG(4, "skipping a mapped range before given range"); continue; } void *begin2 = begin > mt->BaseAddress ? begin : mt->BaseAddress; void *end2 = end < mt->EndAddress ? end : mt->EndAddress; /* * protect of region to VirtualProtection must be compatible * with the access protection specified for this region * when the view was mapped using MapViewOfFileEx */ if (mt->Access == FILE_MAP_COPY) { if (protect & PAGE_READWRITE) { protect &= ~PAGE_READWRITE; protect |= PAGE_WRITECOPY; } else if (protect & PAGE_EXECUTE_READWRITE) { protect &= ~PAGE_EXECUTE_READWRITE; protect |= PAGE_EXECUTE_WRITECOPY; } } size_t len2 = (char *)end2 - (char *)begin2; DWORD oldprot = 0; BOOL ret; ret = VirtualProtect(begin2, len2, protect, &oldprot); if (ret == FALSE) { DWORD gle = GetLastError(); ERR("VirtualProtect, gle: 0x%08x", gle); /* translate error code */ switch (gle) { case ERROR_INVALID_PARAMETER: errno = EACCES; break; case ERROR_INVALID_ADDRESS: errno = ENOMEM; break; default: errno = EINVAL; break; } goto err; } if (len > len2) { len -= len2; } else { len = 0; break; } } if (len > 0) { ERR("indicated memory (or part of it) was not mapped"); errno = ENOMEM; } else { retval = 0; } err: ReleaseSRWLockShared(&FileMappingQLock); return retval; }
28,400
24.089223
79
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/getopt/getopt.c
/* * *Copyright (c) 2012, Kim Gräsman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Kim Gräsman nor the * names of contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "getopt.h" #include <stddef.h> #include <string.h> #include <stdio.h> char* optarg; int optopt; /* The variable optind [...] shall be initialized to 1 by the system. */ int optind = 1; int opterr; static char* optcursor = NULL; static char *first = NULL; /* rotates argv array */ static void rotate(char **argv, int argc) { if (argc <= 1) return; char *tmp = argv[0]; memmove(argv, argv + 1, (argc - 1) * sizeof(char *)); argv[argc - 1] = tmp; } /* Implemented based on [1] and [2] for optional arguments. optopt is handled FreeBSD-style, per [3]. Other GNU and FreeBSD extensions are purely accidental. [1] http://pubs.opengroup.org/onlinepubs/000095399/functions/getopt.html [2] http://www.kernel.org/doc/man-pages/online/pages/man3/getopt.3.html [3] http://www.freebsd.org/cgi/man.cgi?query=getopt&sektion=3&manpath=FreeBSD+9.0-RELEASE */ int getopt(int argc, char* const argv[], const char* optstring) { int optchar = -1; const char* optdecl = NULL; optarg = NULL; opterr = 0; optopt = 0; /* Unspecified, but we need it to avoid overrunning the argv bounds. */ if (optind >= argc) goto no_more_optchars; /* If, when getopt() is called argv[optind] is a null pointer, getopt() shall return -1 without changing optind. */ if (argv[optind] == NULL) goto no_more_optchars; /* If, when getopt() is called *argv[optind] is not the character '-', permute argv to move non options to the end */ if (*argv[optind] != '-') { if (argc - optind <= 1) goto no_more_optchars; if (!first) first = argv[optind]; do { rotate((char **)(argv + optind), argc - optind); } while (*argv[optind] != '-' && argv[optind] != first); if (argv[optind] == first) goto no_more_optchars; } /* If, when getopt() is called argv[optind] points to the string "-", getopt() shall return -1 without changing optind. */ if (strcmp(argv[optind], "-") == 0) goto no_more_optchars; /* If, when getopt() is called argv[optind] points to the string "--", getopt() shall return -1 after incrementing optind. */ if (strcmp(argv[optind], "--") == 0) { ++optind; if (first) { do { rotate((char **)(argv + optind), argc - optind); } while (argv[optind] != first); } goto no_more_optchars; } if (optcursor == NULL || *optcursor == '\0') optcursor = argv[optind] + 1; optchar = *optcursor; /* FreeBSD: The variable optopt saves the last known option character returned by getopt(). */ optopt = optchar; /* The getopt() function shall return the next option character (if one is found) from argv that matches a character in optstring, if there is one that matches. */ optdecl = strchr(optstring, optchar); if (optdecl) { /* [I]f a character is followed by a colon, the option takes an argument. */ if (optdecl[1] == ':') { optarg = ++optcursor; if (*optarg == '\0') { /* GNU extension: Two colons mean an option takes an optional arg; if there is text in the current argv-element (i.e., in the same word as the option name itself, for example, "-oarg"), then it is returned in optarg, otherwise optarg is set to zero. */ if (optdecl[2] != ':') { /* If the option was the last character in the string pointed to by an element of argv, then optarg shall contain the next element of argv, and optind shall be incremented by 2. If the resulting value of optind is greater than argc, this indicates a missing option-argument, and getopt() shall return an error indication. Otherwise, optarg shall point to the string following the option character in that element of argv, and optind shall be incremented by 1. */ if (++optind < argc) { optarg = argv[optind]; } else { /* If it detects a missing option-argument, it shall return the colon character ( ':' ) if the first character of optstring was a colon, or a question-mark character ( '?' ) otherwise. */ optarg = NULL; fprintf(stderr, "%s: option requires an argument -- '%c'\n", argv[0], optchar); optchar = (optstring[0] == ':') ? ':' : '?'; } } else { optarg = NULL; } } optcursor = NULL; } } else { fprintf(stderr,"%s: invalid option -- '%c'\n", argv[0], optchar); /* If getopt() encounters an option character that is not contained in optstring, it shall return the question-mark ( '?' ) character. */ optchar = '?'; } if (optcursor == NULL || *++optcursor == '\0') ++optind; return optchar; no_more_optchars: optcursor = NULL; first = NULL; return -1; } /* Implementation based on [1]. [1] http://www.kernel.org/doc/man-pages/online/pages/man3/getopt.3.html */ int getopt_long(int argc, char* const argv[], const char* optstring, const struct option* longopts, int* longindex) { const struct option* o = longopts; const struct option* match = NULL; int num_matches = 0; size_t argument_name_length = 0; const char* current_argument = NULL; int retval = -1; optarg = NULL; optopt = 0; if (optind >= argc) return -1; /* If, when getopt() is called argv[optind] is a null pointer, getopt_long() shall return -1 without changing optind. */ if (argv[optind] == NULL) goto no_more_optchars; /* If, when getopt_long() is called *argv[optind] is not the character '-', permute argv to move non options to the end */ if (*argv[optind] != '-') { if (argc - optind <= 1) goto no_more_optchars; if (!first) first = argv[optind]; do { rotate((char **)(argv + optind), argc - optind); } while (*argv[optind] != '-' && argv[optind] != first); if (argv[optind] == first) goto no_more_optchars; } if (strlen(argv[optind]) < 3 || strncmp(argv[optind], "--", 2) != 0) return getopt(argc, argv, optstring); /* It's an option; starts with -- and is longer than two chars. */ current_argument = argv[optind] + 2; argument_name_length = strcspn(current_argument, "="); for (; o->name; ++o) { if (strncmp(o->name, current_argument, argument_name_length) == 0) { match = o; ++num_matches; if (strlen(o->name) == argument_name_length) { /* found match is exactly the one which we are looking for */ num_matches = 1; break; } } } if (num_matches == 1) { /* If longindex is not NULL, it points to a variable which is set to the index of the long option relative to longopts. */ if (longindex) *longindex = (int)(match - longopts); /* If flag is NULL, then getopt_long() shall return val. Otherwise, getopt_long() returns 0, and flag shall point to a variable which shall be set to val if the option is found, but left unchanged if the option is not found. */ if (match->flag) *(match->flag) = match->val; retval = match->flag ? 0 : match->val; if (match->has_arg != no_argument) { optarg = strchr(argv[optind], '='); if (optarg != NULL) ++optarg; if (match->has_arg == required_argument) { /* Only scan the next argv for required arguments. Behavior is not specified, but has been observed with Ubuntu and Mac OSX. */ if (optarg == NULL && ++optind < argc) { optarg = argv[optind]; } if (optarg == NULL) retval = ':'; } } else if (strchr(argv[optind], '=')) { /* An argument was provided to a non-argument option. I haven't seen this specified explicitly, but both GNU and BSD-based implementations show this behavior. */ retval = '?'; } } else { /* Unknown option or ambiguous match. */ retval = '?'; if (num_matches == 0) { fprintf(stderr, "%s: unrecognized option -- '%s'\n", argv[0], argv[optind]); } else { fprintf(stderr, "%s: option '%s' is ambiguous\n", argv[0], argv[optind]); } } ++optind; return retval; no_more_optchars: first = NULL; return -1; }
9,862
32.547619
91
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/getopt/getopt.h
/* * *Copyright (c) 2012, Kim Gräsman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Kim Gräsman nor the * names of contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef INCLUDED_GETOPT_PORT_H #define INCLUDED_GETOPT_PORT_H #if defined(__cplusplus) extern "C" { #endif #define no_argument 0 #define required_argument 1 #define optional_argument 2 extern char* optarg; extern int optind, opterr, optopt; struct option { const char* name; int has_arg; int* flag; int val; }; int getopt(int argc, char* const argv[], const char* optstring); int getopt_long(int argc, char* const argv[], const char* optstring, const struct option* longopts, int* longindex); #if defined(__cplusplus) } #endif #endif // INCLUDED_GETOPT_PORT_H
2,137
35.237288
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_defs.h
/* ./../windows/jemalloc_gen/include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */ /* Defined if __attribute__((...)) syntax is supported. */ /* #undef JEMALLOC_HAVE_ATTR */ /* Defined if alloc_size attribute is supported. */ /* #undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE */ /* Defined if format(gnu_printf, ...) attribute is supported. */ /* #undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */ /* Defined if format(printf, ...) attribute is supported. */ /* #undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF */ /* * Define overrides for non-standard allocator-related functions if they are * present on the system. */ /* #undef JEMALLOC_OVERRIDE_MEMALIGN */ /* #undef JEMALLOC_OVERRIDE_VALLOC */ /* * At least Linux omits the "const" in: * * size_t malloc_usable_size(const void *ptr); * * Match the operating system's prototype. */ #define JEMALLOC_USABLE_SIZE_CONST const /* * If defined, specify throw() for the public function prototypes when compiling * with C++. The only justification for this is to match the prototypes that * glibc defines. */ /* #undef JEMALLOC_USE_CXX_THROW */ #ifdef _MSC_VER # ifdef _WIN64 # define LG_SIZEOF_PTR_WIN 3 # else # define LG_SIZEOF_PTR_WIN 2 # endif #endif /* sizeof(void *) == 2^LG_SIZEOF_PTR. */ #define LG_SIZEOF_PTR LG_SIZEOF_PTR_WIN
1,327
27.255319
115
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_mangle_jet.h
/* * By default application code must explicitly refer to mangled symbol names, * so that it is possible to use jemalloc in conjunction with another allocator * in the same application. Define JEMALLOC_MANGLE in order to cause automatic * name mangling that matches the API prefixing that happened as a result of * --with-mangling and/or --with-jemalloc-prefix configuration settings. */ #ifdef JEMALLOC_MANGLE # ifndef JEMALLOC_NO_DEMANGLE # define JEMALLOC_NO_DEMANGLE # endif # define pool_create jet_pool_create # define pool_delete jet_pool_delete # define pool_malloc jet_pool_malloc # define pool_calloc jet_pool_calloc # define pool_ralloc jet_pool_ralloc # define pool_aligned_alloc jet_pool_aligned_alloc # define pool_free jet_pool_free # define pool_malloc_usable_size jet_pool_malloc_usable_size # define pool_malloc_stats_print jet_pool_malloc_stats_print # define pool_extend jet_pool_extend # define pool_set_alloc_funcs jet_pool_set_alloc_funcs # define pool_check jet_pool_check # define malloc_conf jet_malloc_conf # define malloc_message jet_malloc_message # define malloc jet_malloc # define calloc jet_calloc # define posix_memalign jet_posix_memalign # define aligned_alloc jet_aligned_alloc # define realloc jet_realloc # define free jet_free # define mallocx jet_mallocx # define rallocx jet_rallocx # define xallocx jet_xallocx # define sallocx jet_sallocx # define dallocx jet_dallocx # define nallocx jet_nallocx # define mallctl jet_mallctl # define mallctlnametomib jet_mallctlnametomib # define mallctlbymib jet_mallctlbymib # define navsnprintf jet_navsnprintf # define malloc_stats_print jet_malloc_stats_print # define malloc_usable_size jet_malloc_usable_size #endif /* * The jet_* macros can be used as stable alternative names for the * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily * meant for use in jemalloc itself, but it can be used by application code to * provide isolation from the name mangling specified via --with-mangling * and/or --with-jemalloc-prefix. */ #ifndef JEMALLOC_NO_DEMANGLE # undef jet_pool_create # undef jet_pool_delete # undef jet_pool_malloc # undef jet_pool_calloc # undef jet_pool_ralloc # undef jet_pool_aligned_alloc # undef jet_pool_free # undef jet_pool_malloc_usable_size # undef jet_pool_malloc_stats_print # undef jet_pool_extend # undef jet_pool_set_alloc_funcs # undef jet_pool_check # undef jet_malloc_conf # undef jet_malloc_message # undef jet_malloc # undef jet_calloc # undef jet_posix_memalign # undef jet_aligned_alloc # undef jet_realloc # undef jet_free # undef jet_mallocx # undef jet_rallocx # undef jet_xallocx # undef jet_sallocx # undef jet_dallocx # undef jet_nallocx # undef jet_mallctl # undef jet_mallctlnametomib # undef jet_mallctlbymib # undef jet_navsnprintf # undef jet_malloc_stats_print # undef jet_malloc_usable_size #endif
2,939
32.793103
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_protos_jet.h
/* * The jet_ prefix on the following public symbol declarations is an artifact * of namespace management, and should be omitted in application code unless * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h). */ extern JEMALLOC_EXPORT const char *jet_malloc_conf; extern JEMALLOC_EXPORT void (*jet_malloc_message)(void *cbopaque, const char *s); typedef struct pool_s pool_t; JEMALLOC_EXPORT pool_t *jet_pool_create(void *addr, size_t size, int zeroed); JEMALLOC_EXPORT int jet_pool_delete(pool_t *pool); JEMALLOC_EXPORT size_t jet_pool_extend(pool_t *pool, void *addr, size_t size, int zeroed); JEMALLOC_EXPORT void *jet_pool_malloc(pool_t *pool, size_t size); JEMALLOC_EXPORT void *jet_pool_calloc(pool_t *pool, size_t nmemb, size_t size); JEMALLOC_EXPORT void *jet_pool_ralloc(pool_t *pool, void *ptr, size_t size); JEMALLOC_EXPORT void *jet_pool_aligned_alloc(pool_t *pool, size_t alignment, size_t size); JEMALLOC_EXPORT void jet_pool_free(pool_t *pool, void *ptr); JEMALLOC_EXPORT size_t jet_pool_malloc_usable_size(pool_t *pool, void *ptr); JEMALLOC_EXPORT void jet_pool_malloc_stats_print(pool_t *pool, void (*write_cb)(void *, const char *), void *cbopaque, const char *opts); JEMALLOC_EXPORT void jet_pool_set_alloc_funcs(void *(*malloc_func)(size_t), void (*free_func)(void *)); JEMALLOC_EXPORT int jet_pool_check(pool_t *pool); JEMALLOC_EXPORT void *jet_malloc(size_t size) JEMALLOC_ATTR(malloc); JEMALLOC_EXPORT void *jet_calloc(size_t num, size_t size) JEMALLOC_ATTR(malloc); JEMALLOC_EXPORT int jet_posix_memalign(void **memptr, size_t alignment, size_t size) JEMALLOC_ATTR(nonnull(1)); JEMALLOC_EXPORT void *jet_aligned_alloc(size_t alignment, size_t size) JEMALLOC_ATTR(malloc); JEMALLOC_EXPORT void *jet_realloc(void *ptr, size_t size); JEMALLOC_EXPORT void jet_free(void *ptr); JEMALLOC_EXPORT void *jet_mallocx(size_t size, int flags); JEMALLOC_EXPORT void *jet_rallocx(void *ptr, size_t size, int flags); JEMALLOC_EXPORT size_t jet_xallocx(void *ptr, size_t size, size_t extra, int flags); JEMALLOC_EXPORT size_t jet_sallocx(const void *ptr, int flags); JEMALLOC_EXPORT void jet_dallocx(void *ptr, int flags); JEMALLOC_EXPORT size_t jet_nallocx(size_t size, int flags); JEMALLOC_EXPORT int jet_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); JEMALLOC_EXPORT int jet_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp); JEMALLOC_EXPORT int jet_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); JEMALLOC_EXPORT void jet_malloc_stats_print(void (*write_cb)(void *, const char *), void *jet_cbopaque, const char *opts); JEMALLOC_EXPORT size_t jet_malloc_usable_size( JEMALLOC_USABLE_SIZE_CONST void *ptr); JEMALLOC_EXPORT int jet_navsnprintf(char *str, size_t size, const char *format, va_list ap); #ifdef JEMALLOC_OVERRIDE_MEMALIGN JEMALLOC_EXPORT void * jet_memalign(size_t alignment, size_t size) JEMALLOC_ATTR(malloc); #endif #ifdef JEMALLOC_OVERRIDE_VALLOC JEMALLOC_EXPORT void * jet_valloc(size_t size) JEMALLOC_ATTR(malloc); #endif
3,176
45.043478
91
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/pmdk/src/windows/jemalloc_gen/include/jemalloc/jemalloc_rename.h
/* * Name mangling for public symbols is controlled by --with-mangling and * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by * these macro definitions. */ #ifndef JEMALLOC_NO_RENAME # define je_pool_create je_vmem_pool_create # define je_pool_delete je_vmem_pool_delete # define je_pool_malloc je_vmem_pool_malloc # define je_pool_calloc je_vmem_pool_calloc # define je_pool_ralloc je_vmem_pool_ralloc # define je_pool_aligned_alloc je_vmem_pool_aligned_alloc # define je_pool_free je_vmem_pool_free # define je_pool_malloc_usable_size je_vmem_pool_malloc_usable_size # define je_pool_malloc_stats_print je_vmem_pool_malloc_stats_print # define je_pool_extend je_vmem_pool_extend # define je_pool_set_alloc_funcs je_vmem_pool_set_alloc_funcs # define je_pool_check je_vmem_pool_check # define je_malloc_conf je_vmem_malloc_conf # define je_malloc_message je_vmem_malloc_message # define je_malloc je_vmem_malloc # define je_calloc je_vmem_calloc # define je_posix_memalign je_vmem_posix_memalign # define je_aligned_alloc je_vmem_aligned_alloc # define je_realloc je_vmem_realloc # define je_free je_vmem_free # define je_mallocx je_vmem_mallocx # define je_rallocx je_vmem_rallocx # define je_xallocx je_vmem_xallocx # define je_sallocx je_vmem_sallocx # define je_dallocx je_vmem_dallocx # define je_nallocx je_vmem_nallocx # define je_mallctl je_vmem_mallctl # define je_mallctlnametomib je_vmem_mallctlnametomib # define je_mallctlbymib je_vmem_mallctlbymib # define je_navsnprintf je_vmem_navsnprintf # define je_malloc_stats_print je_vmem_malloc_stats_print # define je_malloc_usable_size je_vmem_malloc_usable_size #endif
1,694
41.375
79
h