Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/rpmem_common/rpmem_fip_common.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_fip_common.h -- common definitions for librpmem and rpmemd
*/
#ifndef RPMEM_FIP_COMMON_H
#define RPMEM_FIP_COMMON_H 1
#include <string.h>
#include <netinet/in.h>
#include <rdma/fabric.h>
#include <rdma/fi_cm.h>
#include <rdma/fi_rma.h>
#ifdef __cplusplus
extern "C" {
#endif
#define RPMEM_FIVERSION FI_VERSION(1, 4)
#define RPMEM_FIP_CQ_WAIT_MS 100
#define min(a, b) ((a) < (b) ? (a) : (b))
#define max(a, b) ((a) > (b) ? (a) : (b))
/*
* rpmem_fip_node -- client or server node type
*/
enum rpmem_fip_node {
RPMEM_FIP_NODE_CLIENT,
RPMEM_FIP_NODE_SERVER,
MAX_RPMEM_FIP_NODE,
};
/*
* rpmem_fip_probe -- list of providers
*/
struct rpmem_fip_probe {
unsigned providers;
size_t max_wq_size[MAX_RPMEM_PROV];
};
/*
* rpmem_fip_probe -- returns true if specified provider is available
*/
static inline int
rpmem_fip_probe(struct rpmem_fip_probe probe, enum rpmem_provider provider)
{
return (probe.providers & (1U << provider)) != 0;
}
/*
* rpmem_fip_probe_any -- returns true if any provider is available
*/
static inline int
rpmem_fip_probe_any(struct rpmem_fip_probe probe)
{
return probe.providers != 0;
}
int rpmem_fip_probe_get(const char *target, struct rpmem_fip_probe *probe);
struct fi_info *rpmem_fip_get_hints(enum rpmem_provider provider);
int rpmem_fip_read_eq_check(struct fid_eq *eq, struct fi_eq_cm_entry *entry,
uint32_t exp_event, fid_t exp_fid, int timeout);
int rpmem_fip_read_eq(struct fid_eq *eq, struct fi_eq_cm_entry *entry,
uint32_t *event, int timeout);
size_t rpmem_fip_cq_size(enum rpmem_persist_method pm,
enum rpmem_fip_node node);
size_t rpmem_fip_wq_size(enum rpmem_persist_method pm,
enum rpmem_fip_node node);
size_t rpmem_fip_rx_size(enum rpmem_persist_method pm,
enum rpmem_fip_node node);
size_t rpmem_fip_max_nlanes(struct fi_info *fi);
void rpmem_fip_print_info(struct fi_info *fi);
#ifdef __cplusplus
}
#endif
#endif
| 1,992 | 21.144444 | 76 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/rpmem_common/rpmem_fip_common.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_common.c -- common definitions for librpmem and rpmemd
*/
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <stddef.h>
#include <errno.h>
#include "rpmem_common.h"
#include "rpmem_fip_common.h"
#include "rpmem_proto.h"
#include "rpmem_common_log.h"
#include "valgrind_internal.h"
#include <rdma/fi_errno.h>
/*
* rpmem_fip_get_hints -- return fabric interface information hints
*/
struct fi_info *
rpmem_fip_get_hints(enum rpmem_provider provider)
{
RPMEMC_ASSERT(provider < MAX_RPMEM_PROV);
struct fi_info *hints = fi_allocinfo();
if (!hints) {
RPMEMC_LOG(ERR, "!fi_allocinfo");
return NULL;
}
/* connection-oriented endpoint */
hints->ep_attr->type = FI_EP_MSG;
/*
* Basic memory registration mode indicates that MR attributes
* (rkey, lkey) are selected by provider.
*/
hints->domain_attr->mr_mode = FI_MR_BASIC;
/*
* FI_THREAD_SAFE indicates MT applications can access any
* resources through interface without any restrictions
*/
hints->domain_attr->threading = FI_THREAD_SAFE;
/*
* FI_MSG - SEND and RECV
* FI_RMA - WRITE and READ
*/
hints->caps = FI_MSG | FI_RMA;
/* must register locally accessed buffers */
hints->mode = FI_CONTEXT | FI_LOCAL_MR | FI_RX_CQ_DATA;
/* READ-after-WRITE and SEND-after-WRITE message ordering required */
hints->tx_attr->msg_order = FI_ORDER_RAW | FI_ORDER_SAW;
hints->addr_format = FI_SOCKADDR;
if (provider != RPMEM_PROV_UNKNOWN) {
const char *prov_name = rpmem_provider_to_str(provider);
RPMEMC_ASSERT(prov_name != NULL);
hints->fabric_attr->prov_name = strdup(prov_name);
if (!hints->fabric_attr->prov_name) {
RPMEMC_LOG(ERR, "!strdup(provider)");
goto err_strdup;
}
}
return hints;
err_strdup:
fi_freeinfo(hints);
return NULL;
}
/*
* rpmem_fip_probe_get -- return list of available providers
*/
int
rpmem_fip_probe_get(const char *target, struct rpmem_fip_probe *probe)
{
struct fi_info *hints = rpmem_fip_get_hints(RPMEM_PROV_UNKNOWN);
if (!hints)
return -1;
int ret;
struct fi_info *fi;
ret = fi_getinfo(RPMEM_FIVERSION, target, NULL, 0, hints, &fi);
if (ret) {
goto err_getinfo;
}
if (probe) {
memset(probe, 0, sizeof(*probe));
struct fi_info *prov = fi;
while (prov) {
enum rpmem_provider p = rpmem_provider_from_str(
prov->fabric_attr->prov_name);
if (p == RPMEM_PROV_UNKNOWN) {
prov = prov->next;
continue;
}
probe->providers |= (1U << p);
probe->max_wq_size[p] = prov->tx_attr->size;
prov = prov->next;
}
}
fi_freeinfo(fi);
err_getinfo:
fi_freeinfo(hints);
return ret;
}
/*
* rpmem_fip_read_eq -- read event queue entry with specified timeout
*/
int
rpmem_fip_read_eq(struct fid_eq *eq, struct fi_eq_cm_entry *entry,
uint32_t *event, int timeout)
{
int ret;
ssize_t sret;
struct fi_eq_err_entry err;
sret = fi_eq_sread(eq, event, entry, sizeof(*entry), timeout, 0);
VALGRIND_DO_MAKE_MEM_DEFINED(&sret, sizeof(sret));
if (timeout != -1 && (sret == -FI_ETIMEDOUT || sret == -FI_EAGAIN)) {
errno = ETIMEDOUT;
return 1;
}
if (sret < 0 || (size_t)sret != sizeof(*entry)) {
if (sret < 0)
ret = (int)sret;
else
ret = -1;
sret = fi_eq_readerr(eq, &err, 0);
if (sret < 0) {
errno = EIO;
RPMEMC_LOG(ERR, "error reading from event queue: "
"cannot read error from event queue: %s",
fi_strerror((int)sret));
} else if (sret > 0) {
RPMEMC_ASSERT(sret == sizeof(err));
errno = -err.prov_errno;
RPMEMC_LOG(ERR, "error reading from event queue: %s",
fi_eq_strerror(eq, err.prov_errno,
NULL, NULL, 0));
}
return ret;
}
return 0;
}
/*
* rpmem_fip_read_eq -- read event queue entry and expect specified event
* and fid
*
* Returns:
* 1 - timeout
* 0 - success
* otherwise - error
*/
int
rpmem_fip_read_eq_check(struct fid_eq *eq, struct fi_eq_cm_entry *entry,
uint32_t exp_event, fid_t exp_fid, int timeout)
{
uint32_t event;
int ret = rpmem_fip_read_eq(eq, entry, &event, timeout);
if (ret)
return ret;
if (event != exp_event || entry->fid != exp_fid) {
errno = EIO;
RPMEMC_LOG(ERR, "unexpected event received (%u) "
"expected (%u)%s", event, exp_event,
entry->fid != exp_fid ?
" invalid endpoint" : "");
return -1;
}
return 0;
}
/*
* rpmem_fip_lane_attr -- lane attributes
*
* This structure describes how many SQ, RQ and CQ entries are
* required for a single lane.
*
* NOTE:
* - WRITE, READ and SEND requests are placed in SQ,
* - RECV requests are placed in RQ.
*/
struct rpmem_fip_lane_attr {
size_t n_per_sq; /* number of entries per lane in send queue */
size_t n_per_rq; /* number of entries per lane in receive queue */
size_t n_per_cq; /* number of entries per lane in completion queue */
};
/* queues size required by remote persist operation methods */
static const struct rpmem_fip_lane_attr
rpmem_fip_lane_attrs[MAX_RPMEM_FIP_NODE][MAX_RPMEM_PM] = {
[RPMEM_FIP_NODE_CLIENT][RPMEM_PM_GPSPM] = {
.n_per_sq = 2, /* WRITE + SEND */
.n_per_rq = 1, /* RECV */
.n_per_cq = 3,
},
[RPMEM_FIP_NODE_CLIENT][RPMEM_PM_APM] = {
/* WRITE + READ for persist, WRITE + SEND for deep persist */
.n_per_sq = 2, /* WRITE + SEND */
.n_per_rq = 1, /* RECV */
.n_per_cq = 3,
},
[RPMEM_FIP_NODE_SERVER][RPMEM_PM_GPSPM] = {
.n_per_sq = 1, /* SEND */
.n_per_rq = 1, /* RECV */
.n_per_cq = 3,
},
[RPMEM_FIP_NODE_SERVER][RPMEM_PM_APM] = {
.n_per_sq = 1, /* SEND */
.n_per_rq = 1, /* RECV */
.n_per_cq = 3,
},
};
/*
* rpmem_fip_cq_size -- returns completion queue size based on
* persist method and node type
*/
size_t
rpmem_fip_cq_size(enum rpmem_persist_method pm, enum rpmem_fip_node node)
{
RPMEMC_ASSERT(pm < MAX_RPMEM_PM);
RPMEMC_ASSERT(node < MAX_RPMEM_FIP_NODE);
const struct rpmem_fip_lane_attr *attr =
&rpmem_fip_lane_attrs[node][pm];
return attr->n_per_cq ? : 1;
}
/*
* rpmem_fip_wq_size -- returns submission queue (transmit queue) size based
* on persist method and node type
*/
size_t
rpmem_fip_wq_size(enum rpmem_persist_method pm, enum rpmem_fip_node node)
{
RPMEMC_ASSERT(pm < MAX_RPMEM_PM);
RPMEMC_ASSERT(node < MAX_RPMEM_FIP_NODE);
const struct rpmem_fip_lane_attr *attr =
&rpmem_fip_lane_attrs[node][pm];
return attr->n_per_sq ? : 1;
}
/*
* rpmem_fip_rx_size -- returns receive queue size based
* on persist method and node type
*/
size_t
rpmem_fip_rx_size(enum rpmem_persist_method pm, enum rpmem_fip_node node)
{
RPMEMC_ASSERT(pm < MAX_RPMEM_PM);
RPMEMC_ASSERT(node < MAX_RPMEM_FIP_NODE);
const struct rpmem_fip_lane_attr *attr =
&rpmem_fip_lane_attrs[node][pm];
return attr->n_per_rq ? : 1;
}
/*
* rpmem_fip_max_nlanes -- returns maximum number of lanes
*/
size_t
rpmem_fip_max_nlanes(struct fi_info *fi)
{
return min(min(fi->domain_attr->tx_ctx_cnt,
fi->domain_attr->rx_ctx_cnt),
fi->domain_attr->cq_cnt);
}
/*
* rpmem_fip_print_info -- print some useful info about fabric interface
*/
void
rpmem_fip_print_info(struct fi_info *fi)
{
RPMEMC_LOG(INFO, "libfabric version: %s",
fi_tostr(fi, FI_TYPE_VERSION));
char *str = fi_tostr(fi, FI_TYPE_INFO);
char *buff = strdup(str);
if (!buff) {
RPMEMC_LOG(ERR, "!allocating string buffer for "
"libfabric interface information");
return;
}
RPMEMC_LOG(INFO, "libfabric interface info:");
char *nl;
char *last = buff;
while (last != NULL) {
nl = strchr(last, '\n');
if (nl) {
*nl = '\0';
nl++;
}
RPMEMC_LOG(INFO, "%s", last);
last = nl;
}
free(buff);
}
| 7,550 | 21.675676 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/rpmem_common/rpmem_common_log.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* rpmem_common_log.h -- common log macros for librpmem and rpmemd
*/
#if defined(RPMEMC_LOG_RPMEM) && defined(RPMEMC_LOG_RPMEMD)
#error Both RPMEMC_LOG_RPMEM and RPMEMC_LOG_RPMEMD defined
#elif !defined(RPMEMC_LOG_RPMEM) && !defined(RPMEMC_LOG_RPMEMD)
#define RPMEMC_LOG(level, fmt, args...) do {} while (0)
#define RPMEMC_DBG(level, fmt, args...) do {} while (0)
#define RPMEMC_FATAL(fmt, args...) do {} while (0)
#define RPMEMC_ASSERT(cond) do {} while (0)
#elif defined(RPMEMC_LOG_RPMEM)
#include "out.h"
#include "rpmem_util.h"
#define RPMEMC_LOG(level, fmt, args...) RPMEM_LOG(level, fmt, ## args)
#define RPMEMC_DBG(level, fmt, args...) RPMEM_DBG(fmt, ## args)
#define RPMEMC_FATAL(fmt, args...) RPMEM_FATAL(fmt, ## args)
#define RPMEMC_ASSERT(cond) RPMEM_ASSERT(cond)
#else
#include "rpmemd_log.h"
#define RPMEMC_LOG(level, fmt, args...) RPMEMD_LOG(level, fmt, ## args)
#define RPMEMC_DBG(level, fmt, args...) RPMEMD_DBG(fmt, ## args)
#define RPMEMC_FATAL(fmt, args...) RPMEMD_FATAL(fmt, ## args)
#define RPMEMC_ASSERT(cond) RPMEMD_ASSERT(cond)
#endif
| 1,160 | 28.769231 | 71 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/rpmem_common/rpmem_common.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_common.h -- common definitions for librpmem and rpmemd
*/
#ifndef RPMEM_COMMON_H
#define RPMEM_COMMON_H 1
/*
* Values for SO_KEEPALIVE socket option
*/
#define RPMEM_CMD_ENV "RPMEM_CMD"
#define RPMEM_SSH_ENV "RPMEM_SSH"
#define RPMEM_DEF_CMD "rpmemd"
#define RPMEM_DEF_SSH "ssh"
#define RPMEM_PROV_SOCKET_ENV "RPMEM_ENABLE_SOCKETS"
#define RPMEM_PROV_VERBS_ENV "RPMEM_ENABLE_VERBS"
#define RPMEM_MAX_NLANES_ENV "RPMEM_MAX_NLANES"
#define RPMEM_WQ_SIZE_ENV "RPMEM_WORK_QUEUE_SIZE"
#define RPMEM_ACCEPT_TIMEOUT 30000
#define RPMEM_CONNECT_TIMEOUT 30000
#define RPMEM_MONITOR_TIMEOUT 1000
#include <stdint.h>
#include <sys/socket.h>
#include <netdb.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* rpmem_err -- error codes
*/
enum rpmem_err {
RPMEM_SUCCESS = 0,
RPMEM_ERR_BADPROTO = 1,
RPMEM_ERR_BADNAME = 2,
RPMEM_ERR_BADSIZE = 3,
RPMEM_ERR_BADNLANES = 4,
RPMEM_ERR_BADPROVIDER = 5,
RPMEM_ERR_FATAL = 6,
RPMEM_ERR_FATAL_CONN = 7,
RPMEM_ERR_BUSY = 8,
RPMEM_ERR_EXISTS = 9,
RPMEM_ERR_PROVNOSUP = 10,
RPMEM_ERR_NOEXIST = 11,
RPMEM_ERR_NOACCESS = 12,
RPMEM_ERR_POOL_CFG = 13,
MAX_RPMEM_ERR,
};
/*
* rpmem_persist_method -- remote persist operation method
*/
enum rpmem_persist_method {
RPMEM_PM_GPSPM = 1, /* General Purpose Server Persistency Method */
RPMEM_PM_APM = 2, /* Appliance Persistency Method */
MAX_RPMEM_PM,
};
const char *rpmem_persist_method_to_str(enum rpmem_persist_method pm);
/*
* rpmem_provider -- supported providers
*/
enum rpmem_provider {
RPMEM_PROV_UNKNOWN = 0,
RPMEM_PROV_LIBFABRIC_VERBS = 1,
RPMEM_PROV_LIBFABRIC_SOCKETS = 2,
MAX_RPMEM_PROV,
};
enum rpmem_provider rpmem_provider_from_str(const char *str);
const char *rpmem_provider_to_str(enum rpmem_provider provider);
/*
* rpmem_req_attr -- arguments for open/create request
*/
struct rpmem_req_attr {
size_t pool_size;
unsigned nlanes;
size_t buff_size;
enum rpmem_provider provider;
const char *pool_desc;
};
/*
* rpmem_resp_attr -- return arguments from open/create request
*/
struct rpmem_resp_attr {
unsigned short port;
uint64_t rkey;
uint64_t raddr;
unsigned nlanes;
enum rpmem_persist_method persist_method;
};
#define RPMEM_HAS_USER 0x1
#define RPMEM_HAS_SERVICE 0x2
#define RPMEM_FLAGS_USE_IPV4 0x4
#define RPMEM_MAX_USER (32 + 1) /* see useradd(8) + 1 for '\0' */
#define RPMEM_MAX_NODE (255 + 1) /* see gethostname(2) + 1 for '\0' */
#define RPMEM_MAX_SERVICE (NI_MAXSERV + 1) /* + 1 for '\0' */
#define RPMEM_HDR_SIZE 4096
#define RPMEM_CLOSE_FLAGS_REMOVE 0x1
#define RPMEM_DEF_BUFF_SIZE 8192
struct rpmem_target_info {
char user[RPMEM_MAX_USER];
char node[RPMEM_MAX_NODE];
char service[RPMEM_MAX_SERVICE];
unsigned flags;
};
extern unsigned Rpmem_max_nlanes;
extern unsigned Rpmem_wq_size;
extern int Rpmem_fork_unsafe;
int rpmem_b64_write(int sockfd, const void *buf, size_t len, int flags);
int rpmem_b64_read(int sockfd, void *buf, size_t len, int flags);
const char *rpmem_get_ip_str(const struct sockaddr *addr);
struct rpmem_target_info *rpmem_target_parse(const char *target);
void rpmem_target_free(struct rpmem_target_info *info);
int rpmem_xwrite(int fd, const void *buf, size_t len, int flags);
int rpmem_xread(int fd, void *buf, size_t len, int flags);
char *rpmem_get_ssh_conn_addr(void);
#ifdef __cplusplus
}
#endif
#endif
| 3,404 | 23.321429 | 72 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/rpmem_common/rpmem_proto.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmem_proto.h -- rpmem protocol definitions
*/
#ifndef RPMEM_PROTO_H
#define RPMEM_PROTO_H 1
#include <stdint.h>
#include <endian.h>
#include "librpmem.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PACKED __attribute__((packed))
#define RPMEM_PROTO "tcp"
#define RPMEM_PROTO_MAJOR 0
#define RPMEM_PROTO_MINOR 1
#define RPMEM_SIG_SIZE 8
#define RPMEM_UUID_SIZE 16
#define RPMEM_PROV_SIZE 32
#define RPMEM_USER_SIZE 16
/*
* rpmem_msg_type -- type of messages
*/
enum rpmem_msg_type {
RPMEM_MSG_TYPE_CREATE = 1, /* create request */
RPMEM_MSG_TYPE_CREATE_RESP = 2, /* create request response */
RPMEM_MSG_TYPE_OPEN = 3, /* open request */
RPMEM_MSG_TYPE_OPEN_RESP = 4, /* open request response */
RPMEM_MSG_TYPE_CLOSE = 5, /* close request */
RPMEM_MSG_TYPE_CLOSE_RESP = 6, /* close request response */
RPMEM_MSG_TYPE_SET_ATTR = 7, /* set attributes request */
/* set attributes request response */
RPMEM_MSG_TYPE_SET_ATTR_RESP = 8,
MAX_RPMEM_MSG_TYPE,
};
/*
* rpmem_pool_attr_packed -- a packed version
*/
struct rpmem_pool_attr_packed {
char signature[RPMEM_POOL_HDR_SIG_LEN]; /* pool signature */
uint32_t major; /* format major version number */
uint32_t compat_features; /* mask: compatible "may" features */
uint32_t incompat_features; /* mask: "must support" features */
uint32_t ro_compat_features; /* mask: force RO if unsupported */
unsigned char poolset_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* pool uuid */
unsigned char uuid[RPMEM_POOL_HDR_UUID_LEN]; /* first part uuid */
unsigned char next_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* next pool uuid */
unsigned char prev_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* prev pool uuid */
unsigned char user_flags[RPMEM_POOL_USER_FLAGS_LEN]; /* user flags */
} PACKED;
/*
* rpmem_msg_ibc_attr -- in-band connection attributes
*
* Used by create request response and open request response.
* Contains essential information to proceed with in-band connection
* initialization.
*/
struct rpmem_msg_ibc_attr {
uint32_t port; /* RDMA connection port */
uint32_t persist_method; /* persist method */
uint64_t rkey; /* remote key */
uint64_t raddr; /* remote address */
uint32_t nlanes; /* number of lanes */
} PACKED;
/*
* rpmem_msg_pool_desc -- remote pool descriptor
*/
struct rpmem_msg_pool_desc {
uint32_t size; /* size of pool descriptor */
uint8_t desc[0]; /* pool descriptor, null-terminated string */
} PACKED;
/*
* rpmem_msg_hdr -- message header which consists of type and size of message
*
* The type must be one of the rpmem_msg_type values.
*/
struct rpmem_msg_hdr {
uint32_t type; /* type of message */
uint64_t size; /* size of message */
uint8_t body[0];
} PACKED;
/*
* rpmem_msg_hdr_resp -- message response header which consists of type, size
* and status.
*
* The type must be one of the rpmem_msg_type values.
*/
struct rpmem_msg_hdr_resp {
uint32_t status; /* response status */
uint32_t type; /* type of message */
uint64_t size; /* size of message */
} PACKED;
/*
* rpmem_msg_common -- common fields for open/create messages
*/
struct rpmem_msg_common {
uint16_t major; /* protocol version major number */
uint16_t minor; /* protocol version minor number */
uint64_t pool_size; /* minimum required size of a pool */
uint32_t nlanes; /* number of lanes used by initiator */
uint32_t provider; /* provider */
uint64_t buff_size; /* buffer size for inline persist */
} PACKED;
/*
* rpmem_msg_create -- create request message
*
* The type of message must be set to RPMEM_MSG_TYPE_CREATE.
* The size of message must be set to
* sizeof(struct rpmem_msg_create) + pool_desc_size
*/
struct rpmem_msg_create {
struct rpmem_msg_hdr hdr; /* message header */
struct rpmem_msg_common c;
struct rpmem_pool_attr_packed pool_attr; /* pool attributes */
struct rpmem_msg_pool_desc pool_desc; /* pool descriptor */
} PACKED;
/*
* rpmem_msg_create_resp -- create request response message
*
* The type of message must be set to RPMEM_MSG_TYPE_CREATE_RESP.
* The size of message must be set to sizeof(struct rpmem_msg_create_resp).
*/
struct rpmem_msg_create_resp {
struct rpmem_msg_hdr_resp hdr; /* message header */
struct rpmem_msg_ibc_attr ibc; /* in-band connection attributes */
} PACKED;
/*
* rpmem_msg_open -- open request message
*
* The type of message must be set to RPMEM_MSG_TYPE_OPEN.
* The size of message must be set to
* sizeof(struct rpmem_msg_open) + pool_desc_size
*/
struct rpmem_msg_open {
struct rpmem_msg_hdr hdr; /* message header */
struct rpmem_msg_common c;
struct rpmem_msg_pool_desc pool_desc; /* pool descriptor */
} PACKED;
/*
* rpmem_msg_open_resp -- open request response message
*
* The type of message must be set to RPMEM_MSG_TYPE_OPEN_RESP.
* The size of message must be set to sizeof(struct rpmem_msg_open_resp)
*/
struct rpmem_msg_open_resp {
struct rpmem_msg_hdr_resp hdr; /* message header */
struct rpmem_msg_ibc_attr ibc; /* in-band connection attributes */
struct rpmem_pool_attr_packed pool_attr; /* pool attributes */
} PACKED;
/*
* rpmem_msg_close -- close request message
*
* The type of message must be set to RPMEM_MSG_TYPE_CLOSE
* The size of message must be set to sizeof(struct rpmem_msg_close)
*/
struct rpmem_msg_close {
struct rpmem_msg_hdr hdr; /* message header */
uint32_t flags; /* flags */
} PACKED;
/*
* rpmem_msg_close_resp -- close request response message
*
* The type of message must be set to RPMEM_MSG_TYPE_CLOSE_RESP
* The size of message must be set to sizeof(struct rpmem_msg_close_resp)
*/
struct rpmem_msg_close_resp {
struct rpmem_msg_hdr_resp hdr; /* message header */
/* no more fields */
} PACKED;
#define RPMEM_FLUSH_WRITE 0U /* flush / persist using RDMA WRITE */
#define RPMEM_DEEP_PERSIST 1U /* deep persist operation */
#define RPMEM_PERSIST_SEND 2U /* persist using RDMA SEND */
#define RPMEM_COMPLETION 4U /* schedule command with a completion */
/* the two least significant bits are reserved for mode of persist */
#define RPMEM_FLUSH_PERSIST_MASK 0x3U
#define RPMEM_PERSIST_MAX 2U /* maximum valid persist value */
/*
* rpmem_msg_persist -- remote persist message
*/
struct rpmem_msg_persist {
uint32_t flags; /* lane flags */
uint32_t lane; /* lane identifier */
uint64_t addr; /* remote memory address */
uint64_t size; /* remote memory size */
uint8_t data[];
};
/*
* rpmem_msg_persist_resp -- remote persist response message
*/
struct rpmem_msg_persist_resp {
uint32_t flags; /* lane flags */
uint32_t lane; /* lane identifier */
};
/*
* rpmem_msg_set_attr -- set attributes request message
*
* The type of message must be set to RPMEM_MSG_TYPE_SET_ATTR.
* The size of message must be set to sizeof(struct rpmem_msg_set_attr)
*/
struct rpmem_msg_set_attr {
struct rpmem_msg_hdr hdr; /* message header */
struct rpmem_pool_attr_packed pool_attr; /* pool attributes */
} PACKED;
/*
* rpmem_msg_set_attr_resp -- set attributes request response message
*
* The type of message must be set to RPMEM_MSG_TYPE_SET_ATTR_RESP.
* The size of message must be set to sizeof(struct rpmem_msg_set_attr_resp).
*/
struct rpmem_msg_set_attr_resp {
struct rpmem_msg_hdr_resp hdr; /* message header */
} PACKED;
/*
* XXX Begin: Suppress gcc conversion warnings for FreeBSD be*toh macros.
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
/*
* rpmem_ntoh_msg_ibc_attr -- convert rpmem_msg_ibc attr to host byte order
*/
static inline void
rpmem_ntoh_msg_ibc_attr(struct rpmem_msg_ibc_attr *ibc)
{
ibc->port = be32toh(ibc->port);
ibc->persist_method = be32toh(ibc->persist_method);
ibc->rkey = be64toh(ibc->rkey);
ibc->raddr = be64toh(ibc->raddr);
}
/*
* rpmem_ntoh_msg_pool_desc -- convert rpmem_msg_pool_desc to host byte order
*/
static inline void
rpmem_ntoh_msg_pool_desc(struct rpmem_msg_pool_desc *pool_desc)
{
pool_desc->size = be32toh(pool_desc->size);
}
/*
* rpmem_ntoh_pool_attr -- convert rpmem_pool_attr to host byte order
*/
static inline void
rpmem_ntoh_pool_attr(struct rpmem_pool_attr_packed *attr)
{
attr->major = be32toh(attr->major);
attr->ro_compat_features = be32toh(attr->ro_compat_features);
attr->incompat_features = be32toh(attr->incompat_features);
attr->compat_features = be32toh(attr->compat_features);
}
/*
* rpmem_ntoh_msg_hdr -- convert rpmem_msg_hdr to host byte order
*/
static inline void
rpmem_ntoh_msg_hdr(struct rpmem_msg_hdr *hdrp)
{
hdrp->type = be32toh(hdrp->type);
hdrp->size = be64toh(hdrp->size);
}
/*
* rpmem_hton_msg_hdr -- convert rpmem_msg_hdr to network byte order
*/
static inline void
rpmem_hton_msg_hdr(struct rpmem_msg_hdr *hdrp)
{
rpmem_ntoh_msg_hdr(hdrp);
}
/*
* rpmem_ntoh_msg_hdr_resp -- convert rpmem_msg_hdr_resp to host byte order
*/
static inline void
rpmem_ntoh_msg_hdr_resp(struct rpmem_msg_hdr_resp *hdrp)
{
hdrp->status = be32toh(hdrp->status);
hdrp->type = be32toh(hdrp->type);
hdrp->size = be64toh(hdrp->size);
}
/*
* rpmem_hton_msg_hdr_resp -- convert rpmem_msg_hdr_resp to network byte order
*/
static inline void
rpmem_hton_msg_hdr_resp(struct rpmem_msg_hdr_resp *hdrp)
{
rpmem_ntoh_msg_hdr_resp(hdrp);
}
/*
* rpmem_ntoh_msg_common -- convert rpmem_msg_common to host byte order
*/
static inline void
rpmem_ntoh_msg_common(struct rpmem_msg_common *msg)
{
msg->major = be16toh(msg->major);
msg->minor = be16toh(msg->minor);
msg->pool_size = be64toh(msg->pool_size);
msg->nlanes = be32toh(msg->nlanes);
msg->provider = be32toh(msg->provider);
msg->buff_size = be64toh(msg->buff_size);
}
/*
* rpmem_hton_msg_common -- convert rpmem_msg_common to network byte order
*/
static inline void
rpmem_hton_msg_common(struct rpmem_msg_common *msg)
{
rpmem_ntoh_msg_common(msg);
}
/*
* rpmem_ntoh_msg_create -- convert rpmem_msg_create to host byte order
*/
static inline void
rpmem_ntoh_msg_create(struct rpmem_msg_create *msg)
{
rpmem_ntoh_msg_hdr(&msg->hdr);
rpmem_ntoh_msg_common(&msg->c);
rpmem_ntoh_pool_attr(&msg->pool_attr);
rpmem_ntoh_msg_pool_desc(&msg->pool_desc);
}
/*
* rpmem_hton_msg_create -- convert rpmem_msg_create to network byte order
*/
static inline void
rpmem_hton_msg_create(struct rpmem_msg_create *msg)
{
rpmem_ntoh_msg_create(msg);
}
/*
* rpmem_ntoh_msg_create_resp -- convert rpmem_msg_create_resp to host byte
* order
*/
static inline void
rpmem_ntoh_msg_create_resp(struct rpmem_msg_create_resp *msg)
{
rpmem_ntoh_msg_hdr_resp(&msg->hdr);
rpmem_ntoh_msg_ibc_attr(&msg->ibc);
}
/*
* rpmem_hton_msg_create_resp -- convert rpmem_msg_create_resp to network byte
* order
*/
static inline void
rpmem_hton_msg_create_resp(struct rpmem_msg_create_resp *msg)
{
rpmem_ntoh_msg_create_resp(msg);
}
/*
* rpmem_ntoh_msg_open -- convert rpmem_msg_open to host byte order
*/
static inline void
rpmem_ntoh_msg_open(struct rpmem_msg_open *msg)
{
rpmem_ntoh_msg_hdr(&msg->hdr);
rpmem_ntoh_msg_common(&msg->c);
rpmem_ntoh_msg_pool_desc(&msg->pool_desc);
}
/*
* XXX End: Suppress gcc conversion warnings for FreeBSD be*toh macros
*/
#pragma GCC diagnostic pop
/*
* rpmem_hton_msg_open -- convert rpmem_msg_open to network byte order
*/
static inline void
rpmem_hton_msg_open(struct rpmem_msg_open *msg)
{
rpmem_ntoh_msg_open(msg);
}
/*
* rpmem_ntoh_msg_open_resp -- convert rpmem_msg_open_resp to host byte order
*/
static inline void
rpmem_ntoh_msg_open_resp(struct rpmem_msg_open_resp *msg)
{
rpmem_ntoh_msg_hdr_resp(&msg->hdr);
rpmem_ntoh_msg_ibc_attr(&msg->ibc);
rpmem_ntoh_pool_attr(&msg->pool_attr);
}
/*
* rpmem_hton_msg_open_resp -- convert rpmem_msg_open_resp to network byte order
*/
static inline void
rpmem_hton_msg_open_resp(struct rpmem_msg_open_resp *msg)
{
rpmem_ntoh_msg_open_resp(msg);
}
/*
* rpmem_ntoh_msg_set_attr -- convert rpmem_msg_set_attr to host byte order
*/
static inline void
rpmem_ntoh_msg_set_attr(struct rpmem_msg_set_attr *msg)
{
rpmem_ntoh_msg_hdr(&msg->hdr);
rpmem_ntoh_pool_attr(&msg->pool_attr);
}
/*
* rpmem_hton_msg_set_attr -- convert rpmem_msg_set_attr to network byte order
*/
static inline void
rpmem_hton_msg_set_attr(struct rpmem_msg_set_attr *msg)
{
rpmem_ntoh_msg_set_attr(msg);
}
/*
* rpmem_ntoh_msg_set_attr_resp -- convert rpmem_msg_set_attr_resp to host byte
* order
*/
static inline void
rpmem_ntoh_msg_set_attr_resp(struct rpmem_msg_set_attr_resp *msg)
{
rpmem_ntoh_msg_hdr_resp(&msg->hdr);
}
/*
* rpmem_hton_msg_set_attr_resp -- convert rpmem_msg_set_attr_resp to network
* byte order
*/
static inline void
rpmem_hton_msg_set_attr_resp(struct rpmem_msg_set_attr_resp *msg)
{
rpmem_hton_msg_hdr_resp(&msg->hdr);
}
/*
* rpmem_ntoh_msg_close -- convert rpmem_msg_close to host byte order
*/
static inline void
rpmem_ntoh_msg_close(struct rpmem_msg_close *msg)
{
rpmem_ntoh_msg_hdr(&msg->hdr);
}
/*
* rpmem_hton_msg_close -- convert rpmem_msg_close to network byte order
*/
static inline void
rpmem_hton_msg_close(struct rpmem_msg_close *msg)
{
rpmem_ntoh_msg_close(msg);
}
/*
* rpmem_ntoh_msg_close_resp -- convert rpmem_msg_close_resp to host byte order
*/
static inline void
rpmem_ntoh_msg_close_resp(struct rpmem_msg_close_resp *msg)
{
rpmem_ntoh_msg_hdr_resp(&msg->hdr);
}
/*
* rpmem_hton_msg_close_resp -- convert rpmem_msg_close_resp to network byte
* order
*/
static inline void
rpmem_hton_msg_close_resp(struct rpmem_msg_close_resp *msg)
{
rpmem_ntoh_msg_close_resp(msg);
}
/*
* pack_rpmem_pool_attr -- copy pool attributes to a packed structure
*/
static inline void
pack_rpmem_pool_attr(const struct rpmem_pool_attr *src,
struct rpmem_pool_attr_packed *dst)
{
memcpy(dst->signature, src->signature, sizeof(src->signature));
dst->major = src->major;
dst->compat_features = src->compat_features;
dst->incompat_features = src->incompat_features;
dst->ro_compat_features = src->ro_compat_features;
memcpy(dst->poolset_uuid, src->poolset_uuid, sizeof(dst->poolset_uuid));
memcpy(dst->uuid, src->uuid, sizeof(dst->uuid));
memcpy(dst->next_uuid, src->next_uuid, sizeof(dst->next_uuid));
memcpy(dst->prev_uuid, src->prev_uuid, sizeof(dst->prev_uuid));
memcpy(dst->user_flags, src->user_flags, sizeof(dst->user_flags));
}
/*
* unpack_rpmem_pool_attr -- copy pool attributes to an unpacked structure
*/
static inline void
unpack_rpmem_pool_attr(const struct rpmem_pool_attr_packed *src,
struct rpmem_pool_attr *dst)
{
memcpy(dst->signature, src->signature, sizeof(src->signature));
dst->major = src->major;
dst->compat_features = src->compat_features;
dst->incompat_features = src->incompat_features;
dst->ro_compat_features = src->ro_compat_features;
memcpy(dst->poolset_uuid, src->poolset_uuid, sizeof(dst->poolset_uuid));
memcpy(dst->uuid, src->uuid, sizeof(dst->uuid));
memcpy(dst->next_uuid, src->next_uuid, sizeof(dst->next_uuid));
memcpy(dst->prev_uuid, src->prev_uuid, sizeof(dst->prev_uuid));
memcpy(dst->user_flags, src->user_flags, sizeof(dst->user_flags));
}
#ifdef __cplusplus
}
#endif
#endif
| 15,016 | 26.503663 | 80 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/rpmem_common/rpmem_common.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* rpmem_common.c -- common definitions for librpmem and rpmemd
*/
#include <unistd.h>
#include <stdint.h>
#include <string.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <netdb.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <sys/socket.h>
#include <netinet/tcp.h>
#include "util.h"
#include "rpmem_common.h"
#include "rpmem_proto.h"
#include "rpmem_common_log.h"
#include "os.h"
unsigned Rpmem_max_nlanes = UINT_MAX;
/*
* work queue of size 50 gives best performance of consecutive rpmem_flush
* operations with smallest used resources. Default value obtained empirically.
*/
unsigned Rpmem_wq_size = 50;
/*
* If set, indicates libfabric does not support fork() and consecutive calls to
* rpmem_create/rpmem_open must fail.
*/
int Rpmem_fork_unsafe;
/*
* rpmem_xwrite -- send entire buffer or fail
*
* Returns 1 if send returned 0.
*/
int
rpmem_xwrite(int fd, const void *buf, size_t len, int flags)
{
size_t wr = 0;
const uint8_t *cbuf = buf;
while (wr < len) {
ssize_t sret;
if (!flags)
sret = write(fd, &cbuf[wr], len - wr);
else
sret = send(fd, &cbuf[wr], len - wr, flags);
if (sret == 0)
return 1;
if (sret < 0)
return (int)sret;
wr += (size_t)sret;
}
return 0;
}
/*
* rpmem_xread -- read entire buffer or fail
*
* Returns 1 if recv returned 0.
*/
int
rpmem_xread(int fd, void *buf, size_t len, int flags)
{
size_t rd = 0;
uint8_t *cbuf = buf;
while (rd < len) {
ssize_t sret;
if (!flags)
sret = read(fd, &cbuf[rd], len - rd);
else
sret = recv(fd, &cbuf[rd], len - rd, flags);
if (sret == 0) {
RPMEMC_DBG(ERR, "recv/read returned 0");
return 1;
}
if (sret < 0)
return (int)sret;
rd += (size_t)sret;
}
return 0;
}
static const char *pm2str[MAX_RPMEM_PM] = {
[RPMEM_PM_APM] = "Appliance Persistency Method",
[RPMEM_PM_GPSPM] = "General Purpose Server Persistency Method",
};
/*
* rpmem_persist_method_to_str -- convert enum rpmem_persist_method to string
*/
const char *
rpmem_persist_method_to_str(enum rpmem_persist_method pm)
{
if (pm >= MAX_RPMEM_PM)
return NULL;
return pm2str[pm];
}
static const char *provider2str[MAX_RPMEM_PROV] = {
[RPMEM_PROV_LIBFABRIC_VERBS] = "verbs",
[RPMEM_PROV_LIBFABRIC_SOCKETS] = "sockets",
};
/*
* rpmem_provider_from_str -- convert string to enum rpmem_provider
*
* Returns RPMEM_PROV_UNKNOWN if provider is not known.
*/
enum rpmem_provider
rpmem_provider_from_str(const char *str)
{
for (enum rpmem_provider p = 0; p < MAX_RPMEM_PROV; p++) {
if (provider2str[p] && strcmp(str, provider2str[p]) == 0)
return p;
}
return RPMEM_PROV_UNKNOWN;
}
/*
* rpmem_provider_to_str -- convert enum rpmem_provider to string
*/
const char *
rpmem_provider_to_str(enum rpmem_provider provider)
{
if (provider >= MAX_RPMEM_PROV)
return NULL;
return provider2str[provider];
}
/*
* rpmem_get_ip_str -- converts socket address to string
*/
const char *
rpmem_get_ip_str(const struct sockaddr *addr)
{
static char str[INET6_ADDRSTRLEN + NI_MAXSERV + 1];
char ip[INET6_ADDRSTRLEN];
struct sockaddr_in *in4;
struct sockaddr_in6 *in6;
switch (addr->sa_family) {
case AF_INET:
in4 = (struct sockaddr_in *)addr;
if (!inet_ntop(AF_INET, &in4->sin_addr, ip, sizeof(ip)))
return NULL;
if (util_snprintf(str, sizeof(str), "%s:%u",
ip, ntohs(in4->sin_port)) < 0)
return NULL;
break;
case AF_INET6:
in6 = (struct sockaddr_in6 *)addr;
if (!inet_ntop(AF_INET6, &in6->sin6_addr, ip, sizeof(ip)))
return NULL;
if (util_snprintf(str, sizeof(str), "%s:%u",
ip, ntohs(in6->sin6_port)) < 0)
return NULL;
break;
default:
return NULL;
}
return str;
}
/*
* rpmem_target_parse -- parse target info
*/
struct rpmem_target_info *
rpmem_target_parse(const char *target)
{
struct rpmem_target_info *info = calloc(1, sizeof(*info));
if (!info)
return NULL;
char *str = strdup(target);
if (!str)
goto err_strdup;
char *tmp = strchr(str, '@');
if (tmp) {
*tmp = '\0';
info->flags |= RPMEM_HAS_USER;
strncpy(info->user, str, sizeof(info->user) - 1);
tmp++;
} else {
tmp = str;
}
if (*tmp == '[') {
tmp++;
/* IPv6 */
char *end = strchr(tmp, ']');
if (!end) {
errno = EINVAL;
goto err_ipv6;
}
*end = '\0';
strncpy(info->node, tmp, sizeof(info->node) - 1);
tmp = end + 1;
end = strchr(tmp, ':');
if (end) {
*end = '\0';
end++;
info->flags |= RPMEM_HAS_SERVICE;
strncpy(info->service, end, sizeof(info->service) - 1);
}
} else {
char *first = strchr(tmp, ':');
char *last = strrchr(tmp, ':');
if (first == last) {
/* IPv4 - one colon */
if (first) {
*first = '\0';
first++;
info->flags |= RPMEM_HAS_SERVICE;
strncpy(info->service, first,
sizeof(info->service) - 1);
}
}
strncpy(info->node, tmp, sizeof(info->node) - 1);
}
if (*info->node == '\0') {
errno = EINVAL;
goto err_node;
}
free(str);
/* make sure that user, node and service are NULL-terminated */
info->user[sizeof(info->user) - 1] = '\0';
info->node[sizeof(info->node) - 1] = '\0';
info->service[sizeof(info->service) - 1] = '\0';
return info;
err_node:
err_ipv6:
free(str);
err_strdup:
free(info);
return NULL;
}
/*
* rpmem_target_free -- free target info
*/
void
rpmem_target_free(struct rpmem_target_info *info)
{
free(info);
}
/*
* rpmem_get_ssh_conn_addr -- returns an address which the ssh connection is
* established on
*
* This function utilizes the SSH_CONNECTION environment variable to retrieve
* the server IP address. See ssh(1) for details.
*/
char *
rpmem_get_ssh_conn_addr(void)
{
char *ssh_conn = os_getenv("SSH_CONNECTION");
if (!ssh_conn) {
RPMEMC_LOG(ERR, "SSH_CONNECTION variable is not set");
return NULL;
}
char *sp = strchr(ssh_conn, ' ');
if (!sp)
goto err_fmt;
char *addr = strchr(sp + 1, ' ');
if (!addr)
goto err_fmt;
addr++;
sp = strchr(addr, ' ');
if (!sp)
goto err_fmt;
*sp = '\0';
return addr;
err_fmt:
RPMEMC_LOG(ERR, "invalid format of SSH_CONNECTION variable");
return NULL;
}
| 6,161 | 18.561905 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/rpmem_common/rpmem_fip_lane.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* rpmem_fip_lane.h -- rpmem fabric provider lane definition
*/
#include <sched.h>
#include <stdint.h>
#include "sys_util.h"
/*
* rpmem_fip_lane -- basic lane structure
*
* This structure consist of a synchronization object and a return value.
* It is possible to wait on the lane for specified event. The event can be
* signalled by another thread which can pass the return value if required.
*
* The sync variable can store up to 64 different events, each event on
* separate bit.
*/
struct rpmem_fip_lane {
os_spinlock_t lock;
int ret;
uint64_t sync;
};
/*
* rpmem_fip_lane_init -- initialize basic lane structure
*/
static inline int
rpmem_fip_lane_init(struct rpmem_fip_lane *lanep)
{
lanep->ret = 0;
lanep->sync = 0;
return util_spin_init(&lanep->lock, PTHREAD_PROCESS_PRIVATE);
}
/*
* rpmem_fip_lane_fini -- deinitialize basic lane structure
*/
static inline void
rpmem_fip_lane_fini(struct rpmem_fip_lane *lanep)
{
util_spin_destroy(&lanep->lock);
}
/*
* rpmem_fip_lane_busy -- return true if lane has pending events
*/
static inline int
rpmem_fip_lane_busy(struct rpmem_fip_lane *lanep)
{
util_spin_lock(&lanep->lock);
int ret = lanep->sync != 0;
util_spin_unlock(&lanep->lock);
return ret;
}
/*
* rpmem_fip_lane_begin -- begin waiting for specified event(s)
*/
static inline void
rpmem_fip_lane_begin(struct rpmem_fip_lane *lanep, uint64_t sig)
{
util_spin_lock(&lanep->lock);
lanep->ret = 0;
lanep->sync |= sig;
util_spin_unlock(&lanep->lock);
}
static inline int
rpmem_fip_lane_is_busy(struct rpmem_fip_lane *lanep, uint64_t sig)
{
util_spin_lock(&lanep->lock);
int ret = (lanep->sync & sig) != 0;
util_spin_unlock(&lanep->lock);
return ret;
}
static inline int
rpmem_fip_lane_ret(struct rpmem_fip_lane *lanep)
{
util_spin_lock(&lanep->lock);
int ret = lanep->ret;
util_spin_unlock(&lanep->lock);
return ret;
}
/*
* rpmem_fip_lane_wait -- wait for specified event(s)
*/
static inline int
rpmem_fip_lane_wait(struct rpmem_fip_lane *lanep, uint64_t sig)
{
while (rpmem_fip_lane_is_busy(lanep, sig))
sched_yield();
return rpmem_fip_lane_ret(lanep);
}
/*
* rpmem_fip_lane_signal -- signal lane about specified event
*/
static inline void
rpmem_fip_lane_signal(struct rpmem_fip_lane *lanep, uint64_t sig)
{
util_spin_lock(&lanep->lock);
lanep->sync &= ~sig;
util_spin_unlock(&lanep->lock);
}
/*
* rpmem_fip_lane_signal -- signal lane about specified event and store
* return value
*/
static inline void
rpmem_fip_lane_sigret(struct rpmem_fip_lane *lanep, uint64_t sig, int ret)
{
util_spin_lock(&lanep->lock);
lanep->ret = ret;
lanep->sync &= ~sig;
util_spin_unlock(&lanep->lock);
}
| 2,754 | 20.523438 | 75 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/rpmem_common/rpmem_fip_msg.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_fip_msg.h -- simple wrappers for fi_rma(3) and fi_msg(3) functions
*/
#ifndef RPMEM_FIP_MSG_H
#define RPMEM_FIP_MSG_H 1
#include <rdma/fi_rma.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* rpmem_fip_rma -- helper struct for RMA operation
*/
struct rpmem_fip_rma {
struct fi_msg_rma msg; /* message structure */
struct iovec msg_iov; /* IO vector buffer */
struct fi_rma_iov rma_iov; /* RMA IO vector buffer */
void *desc; /* local memory descriptor */
uint64_t flags; /* RMA operation flags */
};
/*
* rpmem_fip_msg -- helper struct for MSG operation
*/
struct rpmem_fip_msg {
struct fi_msg msg; /* message structure */
struct iovec iov; /* IO vector buffer */
void *desc; /* local memory descriptor */
uint64_t flags; /* MSG operation flags */
};
/*
* rpmem_fip_rma_init -- initialize RMA helper struct
*/
static inline void
rpmem_fip_rma_init(struct rpmem_fip_rma *rma, void *desc,
fi_addr_t addr, uint64_t rkey, void *context, uint64_t flags)
{
memset(rma, 0, sizeof(*rma));
rma->desc = desc;
rma->flags = flags;
rma->rma_iov.key = rkey;
rma->msg.context = context;
rma->msg.addr = addr;
rma->msg.desc = &rma->desc;
rma->msg.rma_iov = &rma->rma_iov;
rma->msg.rma_iov_count = 1;
rma->msg.msg_iov = &rma->msg_iov;
rma->msg.iov_count = 1;
}
/*
* rpmem_fip_msg_init -- initialize MSG helper struct
*/
static inline void
rpmem_fip_msg_init(struct rpmem_fip_msg *msg, void *desc, fi_addr_t addr,
void *context, void *buff, size_t len, uint64_t flags)
{
memset(msg, 0, sizeof(*msg));
msg->desc = desc;
msg->flags = flags;
msg->iov.iov_base = buff;
msg->iov.iov_len = len;
msg->msg.context = context;
msg->msg.addr = addr;
msg->msg.desc = &msg->desc;
msg->msg.msg_iov = &msg->iov;
msg->msg.iov_count = 1;
}
/*
* rpmem_fip_writemsg -- wrapper for fi_writemsg
*/
static inline int
rpmem_fip_writemsg(struct fid_ep *ep, struct rpmem_fip_rma *rma,
const void *buff, size_t len, uint64_t addr)
{
rma->rma_iov.addr = addr;
rma->rma_iov.len = len;
rma->msg_iov.iov_base = (void *)buff;
rma->msg_iov.iov_len = len;
return (int)fi_writemsg(ep, &rma->msg, rma->flags);
}
/*
* rpmem_fip_readmsg -- wrapper for fi_readmsg
*/
static inline int
rpmem_fip_readmsg(struct fid_ep *ep, struct rpmem_fip_rma *rma,
void *buff, size_t len, uint64_t addr)
{
rma->rma_iov.addr = addr;
rma->rma_iov.len = len;
rma->msg_iov.iov_base = buff;
rma->msg_iov.iov_len = len;
return (int)fi_readmsg(ep, &rma->msg, rma->flags);
}
/*
* rpmem_fip_sendmsg -- wrapper for fi_sendmsg
*/
static inline int
rpmem_fip_sendmsg(struct fid_ep *ep, struct rpmem_fip_msg *msg, size_t len)
{
msg->iov.iov_len = len;
return (int)fi_sendmsg(ep, &msg->msg, msg->flags);
}
/*
* rpmem_fip_recvmsg -- wrapper for fi_recvmsg
*/
static inline int
rpmem_fip_recvmsg(struct fid_ep *ep, struct rpmem_fip_msg *msg)
{
return (int)fi_recvmsg(ep, &msg->msg, msg->flags);
}
/*
* rpmem_fip_msg_get_pmsg -- returns message buffer as a persist message
*/
static inline struct rpmem_msg_persist *
rpmem_fip_msg_get_pmsg(struct rpmem_fip_msg *msg)
{
return (struct rpmem_msg_persist *)msg->iov.iov_base;
}
/*
* rpmem_fip_msg_get_pres -- returns message buffer as a persist response
*/
static inline struct rpmem_msg_persist_resp *
rpmem_fip_msg_get_pres(struct rpmem_fip_msg *msg)
{
return (struct rpmem_msg_persist_resp *)msg->iov.iov_base;
}
#ifdef __cplusplus
}
#endif
#endif
| 3,494 | 22.77551 | 75 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/libpmempool.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* libpmempool.c -- entry points for libpmempool
*/
#include <stdlib.h>
#include <stdint.h>
#include <errno.h>
#include <sys/param.h>
#include "pmemcommon.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check.h"
#ifdef USE_RPMEM
#include "rpmem_common.h"
#include "rpmem_util.h"
#endif
#ifdef _WIN32
#define ANSWER_BUFFSIZE 256
#endif
/*
* libpmempool_init -- load-time initialization for libpmempool
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
libpmempool_init(void)
{
common_init(PMEMPOOL_LOG_PREFIX, PMEMPOOL_LOG_LEVEL_VAR,
PMEMPOOL_LOG_FILE_VAR, PMEMPOOL_MAJOR_VERSION,
PMEMPOOL_MINOR_VERSION);
LOG(3, NULL);
#ifdef USE_RPMEM
util_remote_init();
rpmem_util_cmds_init();
#endif
}
/*
* libpmempool_fini -- libpmempool cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
libpmempool_fini(void)
{
LOG(3, NULL);
#ifdef USE_RPMEM
util_remote_unload();
util_remote_fini();
rpmem_util_cmds_fini();
#endif
common_fini();
}
/*
* pmempool_check_versionU -- see if library meets application version
* requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
pmempool_check_versionU(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != PMEMPOOL_MAJOR_VERSION) {
ERR("libpmempool major version mismatch (need %u, found %u)",
major_required, PMEMPOOL_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > PMEMPOOL_MINOR_VERSION) {
ERR("libpmempool minor version mismatch (need %u, found %u)",
minor_required, PMEMPOOL_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* pmempool_check_version -- see if lib meets application version requirements
*/
const char *
pmempool_check_version(unsigned major_required, unsigned minor_required)
{
return pmempool_check_versionU(major_required, minor_required);
}
#else
/*
* pmempool_check_versionW -- see if library meets application version
* requirements as widechar
*/
const wchar_t *
pmempool_check_versionW(unsigned major_required, unsigned minor_required)
{
if (pmempool_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* pmempool_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
pmempool_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* pmempool_errormsg -- return last error message
*/
const char *
pmempool_errormsg(void)
{
return pmempool_errormsgU();
}
#else
/*
* pmempool_errormsgW -- return last error message as widechar
*/
const wchar_t *
pmempool_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
/*
* pmempool_ppc_set_default -- (internal) set default values of check context
*/
static void
pmempool_ppc_set_default(PMEMpoolcheck *ppc)
{
/* all other fields should be zeroed */
const PMEMpoolcheck ppc_default = {
.args = {
.pool_type = PMEMPOOL_POOL_TYPE_DETECT,
},
.result = CHECK_RESULT_CONSISTENT,
};
*ppc = ppc_default;
}
/*
* pmempool_check_initU -- initialize check context
*/
#ifndef _WIN32
static inline
#endif
PMEMpoolcheck *
pmempool_check_initU(struct pmempool_check_argsU *args, size_t args_size)
{
LOG(3, "path %s backup_path %s pool_type %u flags %x", args->path,
args->backup_path, args->pool_type, args->flags);
/*
* Currently one size of args structure is supported. The version of the
* pmempool_check_args structure can be distinguished based on provided
* args_size.
*/
if (args_size < sizeof(struct pmempool_check_args)) {
ERR("provided args_size is not supported");
errno = EINVAL;
return NULL;
}
/*
* Dry run does not allow to made changes possibly performed during
* repair. Advanced allow to perform more complex repairs. Questions
* are ask only if repairs are made. So dry run, advanced and always_yes
* can be set only if repair is set.
*/
if (util_flag_isclr(args->flags, PMEMPOOL_CHECK_REPAIR) &&
util_flag_isset(args->flags, PMEMPOOL_CHECK_DRY_RUN |
PMEMPOOL_CHECK_ADVANCED | PMEMPOOL_CHECK_ALWAYS_YES)) {
ERR("dry_run, advanced and always_yes are applicable only if "
"repair is set");
errno = EINVAL;
return NULL;
}
/*
* dry run does not modify anything so performing backup is redundant
*/
if (util_flag_isset(args->flags, PMEMPOOL_CHECK_DRY_RUN) &&
args->backup_path != NULL) {
ERR("dry run does not allow one to perform backup");
errno = EINVAL;
return NULL;
}
/*
* libpmempool uses str format of communication so it must be set
*/
if (util_flag_isclr(args->flags, PMEMPOOL_CHECK_FORMAT_STR)) {
ERR("PMEMPOOL_CHECK_FORMAT_STR flag must be set");
errno = EINVAL;
return NULL;
}
PMEMpoolcheck *ppc = calloc(1, sizeof(*ppc));
if (ppc == NULL) {
ERR("!calloc");
return NULL;
}
pmempool_ppc_set_default(ppc);
memcpy(&ppc->args, args, sizeof(ppc->args));
ppc->path = strdup(args->path);
if (!ppc->path) {
ERR("!strdup");
goto error_path_malloc;
}
ppc->args.path = ppc->path;
if (args->backup_path != NULL) {
ppc->backup_path = strdup(args->backup_path);
if (!ppc->backup_path) {
ERR("!strdup");
goto error_backup_path_malloc;
}
ppc->args.backup_path = ppc->backup_path;
}
if (check_init(ppc) != 0)
goto error_check_init;
return ppc;
error_check_init:
/* in case errno not set by any of the used functions set its value */
if (errno == 0)
errno = EINVAL;
free(ppc->backup_path);
error_backup_path_malloc:
free(ppc->path);
error_path_malloc:
free(ppc);
return NULL;
}
#ifndef _WIN32
/*
* pmempool_check_init -- initialize check context
*/
PMEMpoolcheck *
pmempool_check_init(struct pmempool_check_args *args, size_t args_size)
{
return pmempool_check_initU(args, args_size);
}
#else
/*
* pmempool_check_initW -- initialize check context as widechar
*/
PMEMpoolcheck *
pmempool_check_initW(struct pmempool_check_argsW *args, size_t args_size)
{
char *upath = util_toUTF8(args->path);
if (upath == NULL)
return NULL;
char *ubackup_path = NULL;
if (args->backup_path != NULL) {
ubackup_path = util_toUTF8(args->backup_path);
if (ubackup_path == NULL) {
util_free_UTF8(upath);
return NULL;
}
}
struct pmempool_check_argsU uargs = {
.path = upath,
.backup_path = ubackup_path,
.pool_type = args->pool_type,
.flags = args->flags
};
PMEMpoolcheck *ret = pmempool_check_initU(&uargs, args_size);
util_free_UTF8(ubackup_path);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmempool_checkU -- continue check till produce status to consume for caller
*/
#ifndef _WIN32
static inline
#endif
struct pmempool_check_statusU *
pmempool_checkU(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
ASSERTne(ppc, NULL);
struct check_status *result;
do {
result = check_step(ppc);
if (check_is_end(ppc->data) && result == NULL)
return NULL;
} while (result == NULL);
return check_status_get(result);
}
#ifndef _WIN32
/*
* pmempool_check -- continue check till produce status to consume for caller
*/
struct pmempool_check_status *
pmempool_check(PMEMpoolcheck *ppc)
{
return pmempool_checkU(ppc);
}
#else
/*
* pmempool_checkW -- continue check till produce status to consume for caller
*/
struct pmempool_check_statusW *
pmempool_checkW(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
ASSERTne(ppc, NULL);
/* check the cache and convert msg and answer */
char buf[ANSWER_BUFFSIZE];
memset(buf, 0, ANSWER_BUFFSIZE);
convert_status_cache(ppc, buf, ANSWER_BUFFSIZE);
struct check_status *uresult;
do {
uresult = check_step(ppc);
if (check_is_end(ppc->data) && uresult == NULL)
return NULL;
} while (uresult == NULL);
struct pmempool_check_statusU *uret_res = check_status_get(uresult);
const wchar_t *wmsg = util_toUTF16(uret_res->str.msg);
if (wmsg == NULL)
FATAL("!malloc");
struct pmempool_check_statusW *wret_res =
(struct pmempool_check_statusW *)uret_res;
/* pointer to old message is freed in next check step */
wret_res->str.msg = wmsg;
return wret_res;
}
#endif
/*
* pmempool_check_end -- end check and release check context
*/
enum pmempool_check_result
pmempool_check_end(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
const enum check_result result = ppc->result;
const unsigned sync_required = ppc->sync_required;
check_fini(ppc);
free(ppc->path);
free(ppc->backup_path);
free(ppc);
if (sync_required) {
switch (result) {
case CHECK_RESULT_CONSISTENT:
case CHECK_RESULT_REPAIRED:
return PMEMPOOL_CHECK_RESULT_SYNC_REQ;
default:
/* other results require fixing prior to sync */
;
}
}
switch (result) {
case CHECK_RESULT_CONSISTENT:
return PMEMPOOL_CHECK_RESULT_CONSISTENT;
case CHECK_RESULT_NOT_CONSISTENT:
return PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT;
case CHECK_RESULT_REPAIRED:
return PMEMPOOL_CHECK_RESULT_REPAIRED;
case CHECK_RESULT_CANNOT_REPAIR:
return PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR;
default:
return PMEMPOOL_CHECK_RESULT_ERROR;
}
}
| 9,142 | 20.873206 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/replica.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* replica.h -- module for synchronizing and transforming poolset
*/
#ifndef REPLICA_H
#define REPLICA_H
#include "libpmempool.h"
#include "pool.h"
#include "badblocks.h"
#ifdef __cplusplus
extern "C" {
#endif
#define UNDEF_REPLICA UINT_MAX
#define UNDEF_PART UINT_MAX
/*
* A part marked as broken does not exist or is damaged so that
* it cannot be opened and has to be recreated.
*/
#define IS_BROKEN (1U << 0)
/*
* A replica marked as inconsistent exists but has inconsistent metadata
* (e.g. inconsistent parts or replicas linkage)
*/
#define IS_INCONSISTENT (1U << 1)
/*
* A part or replica marked in this way has bad blocks inside.
*/
#define HAS_BAD_BLOCKS (1U << 2)
/*
* A part marked in this way has bad blocks in the header
*/
#define HAS_CORRUPTED_HEADER (1U << 3)
/*
* A flag which can be passed to sync_replica() to indicate that the function is
* called by pmempool_transform
*/
#define IS_TRANSFORMED (1U << 10)
/*
* Number of lanes utilized when working with remote replicas
*/
#define REMOTE_NLANES 1
/*
* Helping structures for storing part's health status
*/
struct part_health_status {
unsigned flags;
struct badblocks bbs; /* structure with bad blocks */
char *recovery_file_name; /* name of bad block recovery file */
int recovery_file_exists; /* bad block recovery file exists */
};
/*
* Helping structures for storing replica and poolset's health status
*/
struct replica_health_status {
unsigned nparts;
unsigned nhdrs;
/* a flag for the replica */
unsigned flags;
/* effective size of a pool, valid only for healthy replica */
size_t pool_size;
/* flags for each part */
struct part_health_status part[];
};
struct poolset_health_status {
unsigned nreplicas;
/* a flag for the poolset */
unsigned flags;
/* health statuses for each replica */
struct replica_health_status *replica[];
};
/* get index of the (r)th replica health status */
static inline unsigned
REP_HEALTHidx(struct poolset_health_status *set, unsigned r)
{
ASSERTne(set->nreplicas, 0);
return (set->nreplicas + r) % set->nreplicas;
}
/* get index of the (r + 1)th replica health status */
static inline unsigned
REPN_HEALTHidx(struct poolset_health_status *set, unsigned r)
{
ASSERTne(set->nreplicas, 0);
return (set->nreplicas + r + 1) % set->nreplicas;
}
/* get (p)th part health status */
static inline unsigned
PART_HEALTHidx(struct replica_health_status *rep, unsigned p)
{
ASSERTne(rep->nparts, 0);
return (rep->nparts + p) % rep->nparts;
}
/* get (r)th replica health status */
static inline struct replica_health_status *
REP_HEALTH(struct poolset_health_status *set, unsigned r)
{
return set->replica[REP_HEALTHidx(set, r)];
}
/* get (p)th part health status */
static inline unsigned
PART_HEALTH(struct replica_health_status *rep, unsigned p)
{
return rep->part[PART_HEALTHidx(rep, p)].flags;
}
uint64_t replica_get_part_offset(struct pool_set *set,
unsigned repn, unsigned partn);
void replica_align_badblock_offset_length(size_t *offset, size_t *length,
struct pool_set *set_in, unsigned repn, unsigned partn);
size_t replica_get_part_data_len(struct pool_set *set_in, unsigned repn,
unsigned partn);
uint64_t replica_get_part_data_offset(struct pool_set *set_in, unsigned repn,
unsigned part);
/*
* is_dry_run -- (internal) check whether only verification mode is enabled
*/
static inline bool
is_dry_run(unsigned flags)
{
/*
* PMEMPOOL_SYNC_DRY_RUN and PMEMPOOL_TRANSFORM_DRY_RUN
* have to have the same value in order to use this common function.
*/
ASSERT_COMPILE_ERROR_ON(PMEMPOOL_SYNC_DRY_RUN !=
PMEMPOOL_TRANSFORM_DRY_RUN);
return flags & PMEMPOOL_SYNC_DRY_RUN;
}
/*
* fix_bad_blocks -- (internal) fix bad blocks - it causes reading or creating
* bad blocks recovery files
* (depending on if they exist or not)
*/
static inline bool
fix_bad_blocks(unsigned flags)
{
return flags & PMEMPOOL_SYNC_FIX_BAD_BLOCKS;
}
int replica_remove_all_recovery_files(struct poolset_health_status *set_hs);
int replica_remove_part(struct pool_set *set, unsigned repn, unsigned partn,
int fix_bad_blocks);
int replica_create_poolset_health_status(struct pool_set *set,
struct poolset_health_status **set_hsp);
void replica_free_poolset_health_status(struct poolset_health_status *set_s);
int replica_check_poolset_health(struct pool_set *set,
struct poolset_health_status **set_hs,
int called_from_sync, unsigned flags);
int replica_is_part_broken(unsigned repn, unsigned partn,
struct poolset_health_status *set_hs);
int replica_has_bad_blocks(unsigned repn, struct poolset_health_status *set_hs);
int replica_part_has_bad_blocks(struct part_health_status *phs);
int replica_part_has_corrupted_header(unsigned repn, unsigned partn,
struct poolset_health_status *set_hs);
unsigned replica_find_unbroken_part(unsigned repn,
struct poolset_health_status *set_hs);
int replica_is_replica_broken(unsigned repn,
struct poolset_health_status *set_hs);
int replica_is_replica_consistent(unsigned repn,
struct poolset_health_status *set_hs);
int replica_is_replica_healthy(unsigned repn,
struct poolset_health_status *set_hs);
unsigned replica_find_healthy_replica(
struct poolset_health_status *set_hs);
unsigned replica_find_replica_healthy_header(
struct poolset_health_status *set_hs);
int replica_is_poolset_healthy(struct poolset_health_status *set_hs);
int replica_is_poolset_transformed(unsigned flags);
ssize_t replica_get_pool_size(struct pool_set *set, unsigned repn);
int replica_check_part_sizes(struct pool_set *set, size_t min_size);
int replica_check_part_dirs(struct pool_set *set);
int replica_check_local_part_dir(struct pool_set *set, unsigned repn,
unsigned partn);
int replica_open_replica_part_files(struct pool_set *set, unsigned repn);
int replica_open_poolset_part_files(struct pool_set *set);
int replica_sync(struct pool_set *set_in, struct poolset_health_status *set_hs,
unsigned flags);
int replica_transform(struct pool_set *set_in, struct pool_set *set_out,
unsigned flags);
#ifdef __cplusplus
}
#endif
#endif
| 6,216 | 28.325472 | 80 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check.h -- internal definitions for logic performing check
*/
#ifndef CHECK_H
#define CHECK_H
#ifdef __cplusplus
extern "C" {
#endif
int check_init(PMEMpoolcheck *ppc);
struct check_status *check_step(PMEMpoolcheck *ppc);
void check_fini(PMEMpoolcheck *ppc);
int check_is_end(struct check_data *data);
struct pmempool_check_status *check_status_get(struct check_status *status);
#ifdef _WIN32
void convert_status_cache(PMEMpoolcheck *ppc, char *buf, size_t size);
#endif
#ifdef __cplusplus
}
#endif
#endif
| 607 | 18.612903 | 76 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_blk.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_blk.c -- check pmemblk
*/
#include <inttypes.h>
#include <sys/param.h>
#include <endian.h>
#include "out.h"
#include "btt.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum question {
Q_BLK_BSIZE,
};
/*
* blk_get_max_bsize -- (internal) return maximum size of block for given file
* size
*/
static inline uint32_t
blk_get_max_bsize(uint64_t fsize)
{
LOG(3, NULL);
if (fsize == 0)
return 0;
/* default nfree */
uint32_t nfree = BTT_DEFAULT_NFREE;
/* number of blocks must be at least 2 * nfree */
uint32_t internal_nlba = 2 * nfree;
/* compute arena size from file size without pmemblk structure */
uint64_t arena_size = fsize - sizeof(struct pmemblk);
if (arena_size > BTT_MAX_ARENA)
arena_size = BTT_MAX_ARENA;
arena_size = btt_arena_datasize(arena_size, nfree);
/* compute maximum internal LBA size */
uint64_t internal_lbasize = (arena_size - BTT_ALIGNMENT) /
internal_nlba - BTT_MAP_ENTRY_SIZE;
ASSERT(internal_lbasize <= UINT32_MAX);
if (internal_lbasize < BTT_MIN_LBA_SIZE)
internal_lbasize = BTT_MIN_LBA_SIZE;
internal_lbasize = roundup(internal_lbasize, BTT_INTERNAL_LBA_ALIGNMENT)
- BTT_INTERNAL_LBA_ALIGNMENT;
return (uint32_t)internal_lbasize;
}
/*
* blk_read -- (internal) read pmemblk header
*/
static int
blk_read(PMEMpoolcheck *ppc)
{
/*
* Here we want to read the pmemblk header without the pool_hdr as we've
* already done it before.
*
* Take the pointer to fields right after pool_hdr, compute the size and
* offset of remaining fields.
*/
uint8_t *ptr = (uint8_t *)&ppc->pool->hdr.blk;
ptr += sizeof(ppc->pool->hdr.blk.hdr);
size_t size = sizeof(ppc->pool->hdr.blk) -
sizeof(ppc->pool->hdr.blk.hdr);
uint64_t offset = sizeof(ppc->pool->hdr.blk.hdr);
if (pool_read(ppc->pool, ptr, size, offset)) {
return CHECK_ERR(ppc, "cannot read pmemblk structure");
}
/* endianness conversion */
ppc->pool->hdr.blk.bsize = le32toh(ppc->pool->hdr.blk.bsize);
return 0;
}
/*
* blk_bsize_valid -- (internal) check if block size is valid for given file
* size
*/
static int
blk_bsize_valid(uint32_t bsize, uint64_t fsize)
{
uint32_t max_bsize = blk_get_max_bsize(fsize);
return (bsize >= max_bsize);
}
/*
* blk_hdr_check -- (internal) check pmemblk header
*/
static int
blk_hdr_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
CHECK_INFO(ppc, "checking pmemblk header");
if (blk_read(ppc)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
/* check for valid BTT Info arena as we can take bsize from it */
if (!ppc->pool->bttc.valid)
pool_blk_get_first_valid_arena(ppc->pool, &ppc->pool->bttc);
if (ppc->pool->bttc.valid) {
const uint32_t btt_bsize =
ppc->pool->bttc.btt_info.external_lbasize;
if (ppc->pool->hdr.blk.bsize != btt_bsize) {
CHECK_ASK(ppc, Q_BLK_BSIZE,
"invalid pmemblk.bsize.|Do you want to set "
"pmemblk.bsize to %u from BTT Info?",
btt_bsize);
}
} else if (!ppc->pool->bttc.zeroed) {
if (ppc->pool->hdr.blk.bsize < BTT_MIN_LBA_SIZE ||
blk_bsize_valid(ppc->pool->hdr.blk.bsize,
ppc->pool->set_file->size)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
return CHECK_ERR(ppc, "invalid pmemblk.bsize");
}
}
if (ppc->result == CHECK_RESULT_CONSISTENT ||
ppc->result == CHECK_RESULT_REPAIRED)
CHECK_INFO(ppc, "pmemblk header correct");
return check_questions_sequence_validate(ppc);
}
/*
* blk_hdr_fix -- (internal) fix pmemblk header
*/
static int
blk_hdr_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question, void *ctx)
{
LOG(3, NULL);
uint32_t btt_bsize;
switch (question) {
case Q_BLK_BSIZE:
/*
* check for valid BTT Info arena as we can take bsize from it
*/
if (!ppc->pool->bttc.valid)
pool_blk_get_first_valid_arena(ppc->pool,
&ppc->pool->bttc);
btt_bsize = ppc->pool->bttc.btt_info.external_lbasize;
CHECK_INFO(ppc, "setting pmemblk.b_size to 0x%x", btt_bsize);
ppc->pool->hdr.blk.bsize = btt_bsize;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
enum pool_type type;
};
static const struct step steps[] = {
{
.check = blk_hdr_check,
.type = POOL_TYPE_BLK
},
{
.fix = blk_hdr_fix,
.type = POOL_TYPE_BLK
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
ASSERTeq(ppc->pool->params.type, POOL_TYPE_BLK);
const struct step *step = &steps[loc->step++];
if (!(step->type & ppc->pool->params.type))
return 0;
if (!step->fix)
return step->check(ppc, loc);
if (blk_read(ppc)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
return check_answer_loop(ppc, loc, NULL, 1, step->fix);
}
/*
* check_blk -- entry point for pmemblk checks
*/
void
check_blk(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc))
break;
}
}
| 5,277 | 21.176471 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_sds.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* check_shutdown_state.c -- shutdown state check
*/
#include <stdio.h>
#include <inttypes.h>
#include <sys/mman.h>
#include <endian.h>
#include "out.h"
#include "util_pmem.h"
#include "libpmempool.h"
#include "libpmem.h"
#include "pmempool.h"
#include "pool.h"
#include "set.h"
#include "check_util.h"
enum question {
Q_RESET_SDS,
};
#define SDS_CHECK_STR "checking shutdown state"
#define SDS_OK_STR "shutdown state correct"
#define SDS_DIRTY_STR "shutdown state is dirty"
#define ADR_FAILURE_STR \
"an ADR failure was detected - your pool might be corrupted"
#define ZERO_SDS_STR \
"Do you want to zero shutdown state?"
#define RESET_SDS_STR \
"Do you want to reset shutdown state at your own risk? " \
"If you have more then one replica you will have to " \
"synchronize your pool after this operation."
#define SDS_FAIL_MSG(hdrp) \
IGNORE_SDS(hdrp) ? SDS_DIRTY_STR : ADR_FAILURE_STR
#define SDS_REPAIR_MSG(hdrp) \
IGNORE_SDS(hdrp) \
? SDS_DIRTY_STR ".|" ZERO_SDS_STR \
: ADR_FAILURE_STR ".|" RESET_SDS_STR
/*
* sds_check_replica -- (internal) check if replica is healthy
*/
static int
sds_check_replica(location *loc)
{
LOG(3, NULL);
struct pool_replica *rep = REP(loc->set, loc->replica);
if (rep->remote)
return 0;
/* make a copy of sds as we shouldn't modify a pool */
struct shutdown_state old_sds = loc->hdr.sds;
struct shutdown_state curr_sds;
if (IGNORE_SDS(&loc->hdr))
return util_is_zeroed(&old_sds, sizeof(old_sds)) ? 0 : -1;
shutdown_state_init(&curr_sds, NULL);
/* get current shutdown state */
for (unsigned p = 0; p < rep->nparts; ++p) {
if (shutdown_state_add_part(&curr_sds,
PART(rep, p)->fd, NULL))
return -1;
}
/* compare current and old shutdown state */
return shutdown_state_check(&curr_sds, &old_sds, NULL);
}
/*
* sds_check -- (internal) check shutdown_state
*/
static int
sds_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
CHECK_INFO(ppc, "%s" SDS_CHECK_STR, loc->prefix);
/* shutdown state is valid */
if (!sds_check_replica(loc)) {
CHECK_INFO(ppc, "%s" SDS_OK_STR, loc->prefix);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
/* shutdown state is NOT valid and can NOT be repaired */
if (CHECK_IS_NOT(ppc, REPAIR)) {
check_end(ppc->data);
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
return CHECK_ERR(ppc, "%s%s", loc->prefix,
SDS_FAIL_MSG(&loc->hdr));
}
/* shutdown state is NOT valid but can be repaired */
CHECK_ASK(ppc, Q_RESET_SDS, "%s%s", loc->prefix,
SDS_REPAIR_MSG(&loc->hdr));
return check_questions_sequence_validate(ppc);
}
/*
* sds_fix -- (internal) fix shutdown state
*/
static int
sds_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *context)
{
LOG(3, NULL);
switch (question) {
case Q_RESET_SDS:
CHECK_INFO(ppc, "%sresetting pool_hdr.sds", loc->prefix);
memset(&loc->hdr.sds, 0, sizeof(loc->hdr.sds));
++loc->healthy_replicas;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
};
static const struct step steps[] = {
{
.check = sds_check,
},
{
.fix = sds_fix,
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static int
step_exe(PMEMpoolcheck *ppc, const struct step *steps, location *loc)
{
const struct step *step = &steps[loc->step++];
if (!step->fix)
return step->check(ppc, loc);
if (!check_has_answer(ppc->data))
return 0;
if (check_answer_loop(ppc, loc, NULL, 0 /* fail on no */, step->fix))
return -1;
util_convert2le_hdr(&loc->hdr);
memcpy(loc->hdrp, &loc->hdr, sizeof(loc->hdr));
util_persist_auto(loc->is_dev_dax, loc->hdrp, sizeof(*loc->hdrp));
util_convert2h_hdr_nocheck(&loc->hdr);
loc->pool_hdr_modified = 1;
return 0;
}
/*
* init_prefix -- prepare prefix for messages
*/
static void
init_prefix(location *loc)
{
if (loc->set->nreplicas > 1) {
int ret = util_snprintf(loc->prefix, PREFIX_MAX_SIZE,
"replica %u: ",
loc->replica);
if (ret < 0)
FATAL("!snprintf");
} else
loc->prefix[0] = '\0';
loc->step = 0;
}
/*
* init_location_data -- (internal) prepare location information
*/
static void
init_location_data(PMEMpoolcheck *ppc, location *loc)
{
ASSERTeq(loc->part, 0);
loc->set = ppc->pool->set_file->poolset;
if (ppc->result != CHECK_RESULT_PROCESS_ANSWERS)
init_prefix(loc);
struct pool_replica *rep = REP(loc->set, loc->replica);
loc->hdrp = HDR(rep, loc->part);
memcpy(&loc->hdr, loc->hdrp, sizeof(loc->hdr));
util_convert2h_hdr_nocheck(&loc->hdr);
loc->is_dev_dax = PART(rep, 0)->is_dev_dax;
}
/*
* sds_get_healthy_replicas_num -- (internal) get number of healthy replicas
*/
static void
sds_get_healthy_replicas_num(PMEMpoolcheck *ppc, location *loc)
{
const unsigned nreplicas = ppc->pool->set_file->poolset->nreplicas;
loc->healthy_replicas = 0;
loc->part = 0;
for (; loc->replica < nreplicas; loc->replica++) {
init_location_data(ppc, loc);
if (!sds_check_replica(loc)) {
++loc->healthy_replicas; /* healthy replica found */
}
}
loc->replica = 0; /* reset replica index */
}
/*
* check_sds -- entry point for shutdown state checks
*/
void
check_sds(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
const unsigned nreplicas = ppc->pool->set_file->poolset->nreplicas;
location *loc = check_get_step_data(ppc->data);
if (!loc->init_done) {
sds_get_healthy_replicas_num(ppc, loc);
if (loc->healthy_replicas == nreplicas) {
/* all replicas have healthy shutdown state */
/* print summary */
for (; loc->replica < nreplicas; loc->replica++) {
init_prefix(loc);
CHECK_INFO(ppc, "%s" SDS_CHECK_STR,
loc->prefix);
CHECK_INFO(ppc, "%s" SDS_OK_STR, loc->prefix);
}
return;
} else if (loc->healthy_replicas > 0) {
ppc->sync_required = true;
return;
}
loc->init_done = true;
}
/* produce single healthy replica */
loc->part = 0;
for (; loc->replica < nreplicas; loc->replica++) {
init_location_data(ppc, loc);
while (CHECK_NOT_COMPLETE(loc, steps)) {
ASSERT(loc->step < ARRAY_SIZE(steps));
if (step_exe(ppc, steps, loc))
return;
}
if (loc->healthy_replicas)
break;
}
if (loc->healthy_replicas == 0) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
CHECK_ERR(ppc, "cannot complete repair, reverting changes");
} else if (loc->healthy_replicas < nreplicas) {
ppc->sync_required = true;
}
}
| 6,571 | 21.662069 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_log.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_log.c -- check pmemlog
*/
#include <inttypes.h>
#include <sys/param.h>
#include <endian.h>
#include "out.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum question {
Q_LOG_START_OFFSET,
Q_LOG_END_OFFSET,
Q_LOG_WRITE_OFFSET,
};
/*
* log_read -- (internal) read pmemlog header
*/
static int
log_read(PMEMpoolcheck *ppc)
{
/*
* Here we want to read the pmemlog header without the pool_hdr as we've
* already done it before.
*
* Take the pointer to fields right after pool_hdr, compute the size and
* offset of remaining fields.
*/
uint8_t *ptr = (uint8_t *)&ppc->pool->hdr.log;
ptr += sizeof(ppc->pool->hdr.log.hdr);
size_t size = sizeof(ppc->pool->hdr.log) -
sizeof(ppc->pool->hdr.log.hdr);
uint64_t offset = sizeof(ppc->pool->hdr.log.hdr);
if (pool_read(ppc->pool, ptr, size, offset))
return CHECK_ERR(ppc, "cannot read pmemlog structure");
/* endianness conversion */
log_convert2h(&ppc->pool->hdr.log);
return 0;
}
/*
* log_hdr_check -- (internal) check pmemlog header
*/
static int
log_hdr_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
CHECK_INFO(ppc, "checking pmemlog header");
if (log_read(ppc)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
/* determine constant values for pmemlog */
const uint64_t d_start_offset =
roundup(sizeof(ppc->pool->hdr.log), LOG_FORMAT_DATA_ALIGN);
if (ppc->pool->hdr.log.start_offset != d_start_offset) {
if (CHECK_ASK(ppc, Q_LOG_START_OFFSET,
"invalid pmemlog.start_offset: 0x%jx.|Do you "
"want to set pmemlog.start_offset to default "
"0x%jx?",
ppc->pool->hdr.log.start_offset,
d_start_offset))
goto error;
}
if (ppc->pool->hdr.log.end_offset != ppc->pool->set_file->size) {
if (CHECK_ASK(ppc, Q_LOG_END_OFFSET,
"invalid pmemlog.end_offset: 0x%jx.|Do you "
"want to set pmemlog.end_offset to 0x%jx?",
ppc->pool->hdr.log.end_offset,
ppc->pool->set_file->size))
goto error;
}
if (ppc->pool->hdr.log.write_offset < d_start_offset ||
ppc->pool->hdr.log.write_offset > ppc->pool->set_file->size) {
if (CHECK_ASK(ppc, Q_LOG_WRITE_OFFSET,
"invalid pmemlog.write_offset: 0x%jx.|Do you "
"want to set pmemlog.write_offset to "
"pmemlog.end_offset?",
ppc->pool->hdr.log.write_offset))
goto error;
}
if (ppc->result == CHECK_RESULT_CONSISTENT ||
ppc->result == CHECK_RESULT_REPAIRED)
CHECK_INFO(ppc, "pmemlog header correct");
return check_questions_sequence_validate(ppc);
error:
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
return -1;
}
/*
* log_hdr_fix -- (internal) fix pmemlog header
*/
static int
log_hdr_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question, void *ctx)
{
LOG(3, NULL);
uint64_t d_start_offset;
switch (question) {
case Q_LOG_START_OFFSET:
/* determine constant values for pmemlog */
d_start_offset = roundup(sizeof(ppc->pool->hdr.log),
LOG_FORMAT_DATA_ALIGN);
CHECK_INFO(ppc, "setting pmemlog.start_offset to 0x%jx",
d_start_offset);
ppc->pool->hdr.log.start_offset = d_start_offset;
break;
case Q_LOG_END_OFFSET:
CHECK_INFO(ppc, "setting pmemlog.end_offset to 0x%jx",
ppc->pool->set_file->size);
ppc->pool->hdr.log.end_offset = ppc->pool->set_file->size;
break;
case Q_LOG_WRITE_OFFSET:
CHECK_INFO(ppc, "setting pmemlog.write_offset to "
"pmemlog.end_offset");
ppc->pool->hdr.log.write_offset = ppc->pool->set_file->size;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
enum pool_type type;
};
static const struct step steps[] = {
{
.check = log_hdr_check,
.type = POOL_TYPE_LOG
},
{
.fix = log_hdr_fix,
.type = POOL_TYPE_LOG
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
ASSERTeq(ppc->pool->params.type, POOL_TYPE_LOG);
const struct step *step = &steps[loc->step++];
if (!(step->type & ppc->pool->params.type))
return 0;
if (!step->fix)
return step->check(ppc, loc);
if (log_read(ppc)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
return check_answer_loop(ppc, loc, NULL, 1, step->fix);
}
/*
* check_log -- entry point for pmemlog checks
*/
void
check_log(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc))
break;
}
}
| 4,760 | 21.671429 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/replica.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* replica.c -- groups all commands for replica manipulation
*/
#include "replica.h"
#include <errno.h>
#include <sys/mman.h>
#include <errno.h>
#include <fcntl.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <libgen.h>
#include "obj.h"
#include "palloc.h"
#include "file.h"
#include "os.h"
#include "out.h"
#include "pool_hdr.h"
#include "set.h"
#include "util.h"
#include "uuid.h"
#include "shutdown_state.h"
#include "badblocks.h"
#include "set_badblocks.h"
/*
* check_flags_sync -- (internal) check if flags are supported for sync
*/
static int
check_flags_sync(unsigned flags)
{
flags &= ~(PMEMPOOL_SYNC_DRY_RUN | PMEMPOOL_SYNC_FIX_BAD_BLOCKS);
return flags > 0;
}
/*
* check_flags_transform -- (internal) check if flags are supported for
* transform
*/
static int
check_flags_transform(unsigned flags)
{
flags &= ~PMEMPOOL_TRANSFORM_DRY_RUN;
return flags > 0;
}
/*
* replica_align_badblock_offset_length -- align offset and length
* of the bad block for the given part
*/
void
replica_align_badblock_offset_length(size_t *offset, size_t *length,
struct pool_set *set_in, unsigned repn, unsigned partn)
{
LOG(3, "offset %zu, length %zu, pool_set %p, replica %u, part %u",
*offset, *length, set_in, repn, partn);
size_t alignment = set_in->replica[repn]->part[partn].alignment;
size_t off = ALIGN_DOWN(*offset, alignment);
size_t len = ALIGN_UP(*length + (*offset - off), alignment);
*offset = off;
*length = len;
}
/*
* replica_get_part_data_len -- get data length for given part
*/
size_t
replica_get_part_data_len(struct pool_set *set_in, unsigned repn,
unsigned partn)
{
size_t alignment = set_in->replica[repn]->part[partn].alignment;
size_t hdrsize = (set_in->options & OPTION_SINGLEHDR) ? 0 : alignment;
return ALIGN_DOWN(set_in->replica[repn]->part[partn].filesize,
alignment) - ((partn == 0) ? POOL_HDR_SIZE : hdrsize);
}
/*
* replica_get_part_offset -- get part's offset from the beginning of replica
*/
uint64_t
replica_get_part_offset(struct pool_set *set, unsigned repn, unsigned partn)
{
return (uint64_t)set->replica[repn]->part[partn].addr -
(uint64_t)set->replica[repn]->part[0].addr;
}
/*
* replica_get_part_data_offset -- get data length before given part
*/
uint64_t
replica_get_part_data_offset(struct pool_set *set, unsigned repn,
unsigned partn)
{
if (partn == 0)
return POOL_HDR_SIZE;
return (uint64_t)set->replica[repn]->part[partn].addr -
(uint64_t)set->replica[repn]->part[0].addr;
}
/*
* replica_remove_part -- unlink part from replica
*/
int
replica_remove_part(struct pool_set *set, unsigned repn, unsigned partn,
int fix_bad_blocks)
{
LOG(3, "set %p repn %u partn %u fix_bad_blocks %i",
set, repn, partn, fix_bad_blocks);
struct pool_set_part *part = PART(REP(set, repn), partn);
if (part->fd != -1) {
os_close(part->fd);
part->fd = -1;
}
int olderrno = errno;
enum file_type type = util_file_get_type(part->path);
if (type == OTHER_ERROR)
return -1;
/* if the part is a device dax, clear its bad blocks */
if (type == TYPE_DEVDAX && fix_bad_blocks &&
badblocks_clear_all(part->path)) {
ERR("clearing bad blocks in device dax failed -- '%s'",
part->path);
errno = EIO;
return -1;
}
if (type == TYPE_NORMAL && util_unlink(part->path)) {
ERR("!removing part %u from replica %u failed",
partn, repn);
return -1;
}
errno = olderrno;
LOG(4, "Removed part %s number %u from replica %u", part->path, partn,
repn);
return 0;
}
/*
* create_replica_health_status -- (internal) create helping structure for
* storing replica's health status
*/
static struct replica_health_status *
create_replica_health_status(struct pool_set *set, unsigned repn)
{
LOG(3, "set %p, repn %u", set, repn);
unsigned nparts = set->replica[repn]->nparts;
struct replica_health_status *replica_hs;
replica_hs = Zalloc(sizeof(struct replica_health_status)
+ nparts * sizeof(struct part_health_status));
if (replica_hs == NULL) {
ERR("!Zalloc for replica health status");
return NULL;
}
replica_hs->nparts = nparts;
replica_hs->nhdrs = set->replica[repn]->nhdrs;
return replica_hs;
}
/*
* replica_part_remove_recovery_file -- remove bad blocks' recovery file
*/
static int
replica_part_remove_recovery_file(struct part_health_status *phs)
{
LOG(3, "phs %p", phs);
if (phs->recovery_file_name == NULL || phs->recovery_file_exists == 0)
return 0;
if (os_unlink(phs->recovery_file_name) < 0) {
ERR("!removing the bad block recovery file failed -- '%s'",
phs->recovery_file_name);
return -1;
}
LOG(3, "bad block recovery file removed -- '%s'",
phs->recovery_file_name);
phs->recovery_file_exists = 0;
return 0;
}
/*
* replica_remove_all_recovery_files -- remove all recovery files
*/
int
replica_remove_all_recovery_files(struct poolset_health_status *set_hs)
{
LOG(3, "set_hs %p", set_hs);
int ret = 0;
for (unsigned r = 0; r < set_hs->nreplicas; ++r) {
struct replica_health_status *rhs = set_hs->replica[r];
for (unsigned p = 0; p < rhs->nparts; ++p)
ret |= replica_part_remove_recovery_file(&rhs->part[p]);
}
return ret;
}
/*
* replica_free_poolset_health_status -- free memory allocated for helping
* structure
*/
void
replica_free_poolset_health_status(struct poolset_health_status *set_hs)
{
LOG(3, "set_hs %p", set_hs);
for (unsigned r = 0; r < set_hs->nreplicas; ++r) {
struct replica_health_status *rep_hs = set_hs->replica[r];
for (unsigned p = 0; p < rep_hs->nparts; ++p) {
Free(rep_hs->part[p].recovery_file_name);
Free(rep_hs->part[p].bbs.bbv);
}
Free(set_hs->replica[r]);
}
Free(set_hs);
}
/*
* replica_create_poolset_health_status -- create helping structure for storing
* poolset's health status
*/
int
replica_create_poolset_health_status(struct pool_set *set,
struct poolset_health_status **set_hsp)
{
LOG(3, "set %p, set_hsp %p", set, set_hsp);
unsigned nreplicas = set->nreplicas;
struct poolset_health_status *set_hs;
set_hs = Zalloc(sizeof(struct poolset_health_status) +
nreplicas * sizeof(struct replica_health_status *));
if (set_hs == NULL) {
ERR("!Zalloc for poolset health state");
return -1;
}
set_hs->nreplicas = nreplicas;
for (unsigned i = 0; i < nreplicas; ++i) {
struct replica_health_status *replica_hs =
create_replica_health_status(set, i);
if (replica_hs == NULL) {
replica_free_poolset_health_status(set_hs);
return -1;
}
set_hs->replica[i] = replica_hs;
}
*set_hsp = set_hs;
return 0;
}
/*
* replica_is_part_broken -- check if part is marked as broken in the helping
* structure
*/
int
replica_is_part_broken(unsigned repn, unsigned partn,
struct poolset_health_status *set_hs)
{
struct replica_health_status *rhs = REP_HEALTH(set_hs, repn);
return (rhs->flags & IS_BROKEN) ||
(PART_HEALTH(rhs, partn) & IS_BROKEN);
}
/*
* is_replica_broken -- check if any part in the replica is marked as broken
*/
int
replica_is_replica_broken(unsigned repn, struct poolset_health_status *set_hs)
{
LOG(3, "repn %u, set_hs %p", repn, set_hs);
struct replica_health_status *r_hs = REP_HEALTH(set_hs, repn);
if (r_hs->flags & IS_BROKEN)
return 1;
for (unsigned p = 0; p < r_hs->nparts; ++p) {
if (replica_is_part_broken(repn, p, set_hs))
return 1;
}
return 0;
}
/*
* replica_is_replica_consistent -- check if replica is not marked as
* inconsistent
*/
int
replica_is_replica_consistent(unsigned repn,
struct poolset_health_status *set_hs)
{
return !(REP_HEALTH(set_hs, repn)->flags & IS_INCONSISTENT);
}
/*
* replica_has_bad_blocks -- check if replica has bad blocks
*/
int
replica_has_bad_blocks(unsigned repn, struct poolset_health_status *set_hs)
{
return REP_HEALTH(set_hs, repn)->flags & HAS_BAD_BLOCKS;
}
/*
* replica_part_has_bad_blocks -- check if replica's part has bad blocks
*/
int
replica_part_has_bad_blocks(struct part_health_status *phs)
{
return phs->flags & HAS_BAD_BLOCKS;
}
/*
* replica_part_has_corrupted_header -- (internal) check if replica's part
* has bad blocks in the header (corrupted header)
*/
int
replica_part_has_corrupted_header(unsigned repn, unsigned partn,
struct poolset_health_status *set_hs)
{
struct replica_health_status *rhs = REP_HEALTH(set_hs, repn);
return PART_HEALTH(rhs, partn) & HAS_CORRUPTED_HEADER;
}
/*
* replica_has_corrupted_header -- (internal) check if replica has bad blocks
* in the header (corrupted header)
*/
static int
replica_has_corrupted_header(unsigned repn,
struct poolset_health_status *set_hs)
{
return REP_HEALTH(set_hs, repn)->flags & HAS_CORRUPTED_HEADER;
}
/*
* replica_is_replica_healthy -- check if replica is unbroken and consistent
*/
int
replica_is_replica_healthy(unsigned repn, struct poolset_health_status *set_hs)
{
LOG(3, "repn %u, set_hs %p", repn, set_hs);
int ret = !replica_is_replica_broken(repn, set_hs) &&
replica_is_replica_consistent(repn, set_hs) &&
!replica_has_bad_blocks(repn, set_hs);
LOG(4, "return %i", ret);
return ret;
}
/*
* replica_has_healthy_header -- (internal) check if replica has healthy headers
*/
static int
replica_has_healthy_header(unsigned repn, struct poolset_health_status *set_hs)
{
LOG(3, "repn %u, set_hs %p", repn, set_hs);
int ret = !replica_is_replica_broken(repn, set_hs) &&
replica_is_replica_consistent(repn, set_hs) &&
!replica_has_corrupted_header(repn, set_hs);
LOG(4, "return %i", ret);
return ret;
}
/*
* replica_is_poolset_healthy -- check if all replicas in a poolset are not
* marked as broken nor inconsistent in the
* helping structure
*/
int
replica_is_poolset_healthy(struct poolset_health_status *set_hs)
{
LOG(3, "set_hs %p", set_hs);
for (unsigned r = 0; r < set_hs->nreplicas; ++r) {
if (!replica_is_replica_healthy(r, set_hs))
return 0;
}
return 1;
}
/*
* replica_is_poolset_transformed -- check if the flag indicating a call from
* pmempool_transform is on
*/
int
replica_is_poolset_transformed(unsigned flags)
{
return flags & IS_TRANSFORMED;
}
/*
* replica_find_unbroken_part_with_header -- find a part number in a given
* replica, which is not marked as broken in the helping structure and contains
* a pool header
*/
unsigned
replica_find_unbroken_part(unsigned repn, struct poolset_health_status *set_hs)
{
LOG(3, "repn %u, set_hs %p", repn, set_hs);
for (unsigned p = 0; p < REP_HEALTH(set_hs, repn)->nhdrs; ++p) {
if (!replica_is_part_broken(repn, p, set_hs))
return p;
}
return UNDEF_PART;
}
/*
* replica_find_healthy_replica -- find a replica which is a good source of data
*/
unsigned
replica_find_healthy_replica(struct poolset_health_status *set_hs)
{
LOG(3, "set_hs %p", set_hs);
for (unsigned r = 0; r < set_hs->nreplicas; ++r) {
if (replica_is_replica_healthy(r, set_hs)) {
LOG(4, "return %i", r);
return r;
}
}
LOG(4, "return %i", UNDEF_REPLICA);
return UNDEF_REPLICA;
}
/*
* replica_find_replica_healthy_header -- find a replica with a healthy header
*/
unsigned
replica_find_replica_healthy_header(struct poolset_health_status *set_hs)
{
LOG(3, "set_hs %p", set_hs);
for (unsigned r = 0; r < set_hs->nreplicas; ++r) {
if (replica_has_healthy_header(r, set_hs)) {
LOG(4, "return %i", r);
return r;
}
}
LOG(4, "return %i", UNDEF_REPLICA);
return UNDEF_REPLICA;
}
/*
* replica_check_store_size -- (internal) store size from pool descriptor for
* replica
*/
static int
replica_check_store_size(struct pool_set *set,
struct poolset_health_status *set_hs, unsigned repn)
{
LOG(3, "set %p, set_hs %p, repn %u", set, set_hs, repn);
struct pool_replica *rep = set->replica[repn];
struct pmemobjpool pop;
if (rep->remote) {
memcpy(&pop.hdr, rep->part[0].hdr, sizeof(pop.hdr));
void *descr = (void *)((uintptr_t)&pop + POOL_HDR_SIZE);
if (Rpmem_read(rep->remote->rpp, descr, POOL_HDR_SIZE,
sizeof(pop) - POOL_HDR_SIZE, 0)) {
return -1;
}
} else {
/* round up map size to Mmap align size */
if (util_map_part(&rep->part[0], NULL,
ALIGN_UP(sizeof(pop), rep->part[0].alignment),
0, MAP_SHARED, 1)) {
return -1;
}
memcpy(&pop, rep->part[0].addr, sizeof(pop));
util_unmap_part(&rep->part[0]);
}
void *dscp = (void *)((uintptr_t)&pop + sizeof(pop.hdr));
if (!util_checksum(dscp, OBJ_DSC_P_SIZE, &pop.checksum, 0,
0)) {
set_hs->replica[repn]->flags |= IS_BROKEN;
return 0;
}
set_hs->replica[repn]->pool_size = pop.heap_offset + pop.heap_size;
return 0;
}
/*
* check_store_all_sizes -- (internal) store sizes from pool descriptor for all
* healthy replicas
*/
static int
check_store_all_sizes(struct pool_set *set,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
for (unsigned r = 0; r < set->nreplicas; ++r) {
if (!replica_has_healthy_header(r, set_hs))
continue;
if (replica_check_store_size(set, set_hs, r))
return -1;
}
return 0;
}
/*
* check_and_open_poolset_part_files -- (internal) for each part in a poolset
* check if the part files are accessible, and if not, mark it as broken
* in a helping structure; then open the part file
*/
static int
check_and_open_poolset_part_files(struct pool_set *set,
struct poolset_health_status *set_hs, unsigned flags)
{
LOG(3, "set %p, set_hs %p, flags %u", set, set_hs, flags);
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = set->replica[r];
struct replica_health_status *rep_hs = set_hs->replica[r];
if (rep->remote) {
if (util_replica_open_remote(set, r, 0)) {
LOG(1, "cannot open remote replica no %u", r);
return -1;
}
unsigned nlanes = REMOTE_NLANES;
int ret = util_poolset_remote_open(rep, r,
rep->repsize, 0,
rep->part[0].addr,
rep->resvsize, &nlanes);
if (ret) {
rep_hs->flags |= IS_BROKEN;
LOG(1, "remote replica #%u marked as BROKEN",
r);
}
continue;
}
for (unsigned p = 0; p < rep->nparts; ++p) {
const char *path = rep->part[p].path;
enum file_type type = util_file_get_type(path);
if (type < 0 || os_access(path, R_OK|W_OK) != 0) {
LOG(1, "part file %s is not accessible", path);
errno = 0;
rep_hs->part[p].flags |= IS_BROKEN;
if (is_dry_run(flags))
continue;
}
if (util_part_open(&rep->part[p], 0, 0)) {
if (type == TYPE_DEVDAX) {
LOG(1,
"opening part on Device DAX %s failed",
path);
return -1;
}
LOG(1, "opening part %s failed", path);
errno = 0;
rep_hs->part[p].flags |= IS_BROKEN;
}
}
}
return 0;
}
/*
* map_all_unbroken_headers -- (internal) map all headers in a poolset,
* skipping those marked as broken in a helping
* structure
*/
static int
map_all_unbroken_headers(struct pool_set *set,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = set->replica[r];
struct replica_health_status *rep_hs = set_hs->replica[r];
if (rep->remote)
continue;
for (unsigned p = 0; p < rep->nhdrs; ++p) {
/* skip broken parts */
if (replica_is_part_broken(r, p, set_hs))
continue;
LOG(4, "mapping header for part %u, replica %u", p, r);
if (util_map_hdr(&rep->part[p], MAP_SHARED, 0) != 0) {
LOG(1, "header mapping failed - part #%d", p);
rep_hs->part[p].flags |= IS_BROKEN;
}
}
}
return 0;
}
/*
* unmap_all_headers -- (internal) unmap all headers in a poolset
*/
static int
unmap_all_headers(struct pool_set *set)
{
LOG(3, "set %p", set);
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = set->replica[r];
util_replica_close(set, r);
if (rep->remote && rep->remote->rpp) {
Rpmem_close(rep->remote->rpp);
rep->remote->rpp = NULL;
}
}
return 0;
}
/*
* check_checksums_and_signatures -- (internal) check if checksums
* and signatures are correct for parts
* in a given replica
*/
static int
check_checksums_and_signatures(struct pool_set *set,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = REP(set, r);
struct replica_health_status *rep_hs = REP_HEALTH(set_hs, r);
/*
* Checksums and signatures of remote replicas are checked
* during opening them on the remote side by the rpmem daemon.
* The local version of remote headers does not contain
* such data.
*/
if (rep->remote)
continue;
for (unsigned p = 0; p < rep->nhdrs; ++p) {
/* skip broken parts */
if (replica_is_part_broken(r, p, set_hs))
continue;
/* check part's checksum */
LOG(4, "checking checksum for part %u, replica %u",
p, r);
struct pool_hdr *hdr = HDR(rep, p);
if (!util_checksum(hdr, sizeof(*hdr), &hdr->checksum, 0,
POOL_HDR_CSUM_END_OFF(hdr))) {
ERR("invalid checksum of pool header");
rep_hs->part[p].flags |= IS_BROKEN;
} else if (util_is_zeroed(hdr, sizeof(*hdr))) {
rep_hs->part[p].flags |= IS_BROKEN;
}
enum pool_type type = pool_hdr_get_type(hdr);
if (type == POOL_TYPE_UNKNOWN) {
ERR("invalid signature");
rep_hs->part[p].flags |= IS_BROKEN;
}
}
}
return 0;
}
/*
* replica_badblocks_recovery_file_save -- save bad blocks in the bad blocks
* recovery file before clearing them
*/
static int
replica_badblocks_recovery_file_save(struct part_health_status *part_hs)
{
LOG(3, "part_health_status %p", part_hs);
ASSERTeq(part_hs->recovery_file_exists, 1);
ASSERTne(part_hs->recovery_file_name, NULL);
struct badblocks *bbs = &part_hs->bbs;
char *path = part_hs->recovery_file_name;
int ret = -1;
int fd = os_open(path, O_WRONLY | O_TRUNC);
if (fd < 0) {
ERR("!opening bad block recovery file failed -- '%s'", path);
return -1;
}
FILE *recovery_file_name = os_fdopen(fd, "w");
if (recovery_file_name == NULL) {
ERR(
"!opening a file stream for bad block recovery file failed -- '%s'",
path);
os_close(fd);
return -1;
}
/* save bad blocks */
for (unsigned i = 0; i < bbs->bb_cnt; i++) {
ASSERT(bbs->bbv[i].length != 0);
fprintf(recovery_file_name, "%zu %zu\n",
bbs->bbv[i].offset, bbs->bbv[i].length);
}
if (fflush(recovery_file_name) == EOF) {
ERR("!flushing bad block recovery file failed -- '%s'", path);
goto exit_error;
}
if (os_fsync(fd) < 0) {
ERR("!syncing bad block recovery file failed -- '%s'", path);
goto exit_error;
}
/* save the finish flag */
fprintf(recovery_file_name, "0 0\n");
if (fflush(recovery_file_name) == EOF) {
ERR("!flushing bad block recovery file failed -- '%s'", path);
goto exit_error;
}
if (os_fsync(fd) < 0) {
ERR("!syncing bad block recovery file failed -- '%s'", path);
goto exit_error;
}
LOG(3, "bad blocks saved in the recovery file -- '%s'", path);
ret = 0;
exit_error:
os_fclose(recovery_file_name);
return ret;
}
/*
* replica_part_badblocks_recovery_file_read -- read bad blocks
* from the bad block recovery file
* for the current part
*/
static int
replica_part_badblocks_recovery_file_read(struct part_health_status *part_hs)
{
LOG(3, "part_health_status %p", part_hs);
ASSERT(part_hs->recovery_file_exists);
ASSERTne(part_hs->recovery_file_name, NULL);
VEC(bbsvec, struct bad_block) bbv = VEC_INITIALIZER;
char *path = part_hs->recovery_file_name;
struct bad_block bb;
int ret = -1;
FILE *recovery_file = os_fopen(path, "r");
if (!recovery_file) {
ERR("!opening the recovery file for reading failed -- '%s'",
path);
return -1;
}
unsigned long long min_offset = 0; /* minimum possible offset */
do {
if (fscanf(recovery_file, "%zu %zu\n",
&bb.offset, &bb.length) < 2) {
LOG(1, "incomplete bad block recovery file -- '%s'",
path);
ret = 1;
goto error_exit;
}
if (bb.offset == 0 && bb.length == 0) {
/* finish_flag */
break;
}
/* check if bad blocks build an increasing sequence */
if (bb.offset < min_offset) {
ERR(
"wrong format of bad block recovery file (bad blocks are not sorted by the offset in ascending order) -- '%s'",
path);
errno = EINVAL;
ret = -1;
goto error_exit;
}
/* update the minimum possible offset */
min_offset = bb.offset + bb.length;
bb.nhealthy = NO_HEALTHY_REPLICA; /* unknown healthy replica */
/* add the new bad block to the vector */
if (VEC_PUSH_BACK(&bbv, bb))
goto error_exit;
} while (1);
part_hs->bbs.bbv = VEC_ARR(&bbv);
part_hs->bbs.bb_cnt = (unsigned)VEC_SIZE(&bbv);
os_fclose(recovery_file);
LOG(1, "bad blocks read from the recovery file -- '%s'", path);
return 0;
error_exit:
VEC_DELETE(&bbv);
os_fclose(recovery_file);
return ret;
}
/* status returned by the replica_badblocks_recovery_files_check() function */
enum badblocks_recovery_files_status {
RECOVERY_FILES_ERROR = -1,
RECOVERY_FILES_DO_NOT_EXIST = 0,
RECOVERY_FILES_EXIST_ALL = 1,
RECOVERY_FILES_NOT_ALL_EXIST = 2
};
/*
* replica_badblocks_recovery_files_check -- (internal) check if bad blocks
* recovery files exist
*/
static enum badblocks_recovery_files_status
replica_badblocks_recovery_files_check(struct pool_set *set,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
int recovery_file_exists = 0;
int recovery_file_does_not_exist = 0;
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = set->replica[r];
struct replica_health_status *rep_hs = set_hs->replica[r];
if (rep->remote) {
/*
* Bad blocks in remote replicas currently are fixed
* during opening by removing and recreating
* the whole remote replica.
*/
continue;
}
for (unsigned p = 0; p < rep->nparts; ++p) {
const char *path = PART(rep, p)->path;
struct part_health_status *part_hs = &rep_hs->part[p];
int exists = util_file_exists(path);
if (exists < 0)
return -1;
if (!exists) {
/* part file does not exist - skip it */
continue;
}
part_hs->recovery_file_name =
badblocks_recovery_file_alloc(set->path,
r, p);
if (part_hs->recovery_file_name == NULL) {
LOG(1,
"allocating name of bad block recovery file failed");
return RECOVERY_FILES_ERROR;
}
exists = util_file_exists(part_hs->recovery_file_name);
if (exists < 0)
return -1;
part_hs->recovery_file_exists = exists;
if (part_hs->recovery_file_exists) {
LOG(3, "bad block recovery file exists: %s",
part_hs->recovery_file_name);
recovery_file_exists = 1;
} else {
LOG(3,
"bad block recovery file does not exist: %s",
part_hs->recovery_file_name);
recovery_file_does_not_exist = 1;
}
}
}
if (recovery_file_exists) {
if (recovery_file_does_not_exist) {
LOG(4, "return RECOVERY_FILES_NOT_ALL_EXIST");
return RECOVERY_FILES_NOT_ALL_EXIST;
} else {
LOG(4, "return RECOVERY_FILES_EXIST_ALL");
return RECOVERY_FILES_EXIST_ALL;
}
}
LOG(4, "return RECOVERY_FILES_DO_NOT_EXIST");
return RECOVERY_FILES_DO_NOT_EXIST;
}
/*
* replica_badblocks_recovery_files_read -- (internal) read bad blocks from all
* bad block recovery files for all parts
*/
static int
replica_badblocks_recovery_files_read(struct pool_set *set,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
int ret;
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = set->replica[r];
struct replica_health_status *rep_hs = set_hs->replica[r];
/* XXX: not supported yet */
if (rep->remote)
continue;
for (unsigned p = 0; p < rep->nparts; ++p) {
const char *path = PART(rep, p)->path;
struct part_health_status *part_hs = &rep_hs->part[p];
int exists = util_file_exists(path);
if (exists < 0)
return -1;
if (!exists) {
/* the part does not exist */
continue;
}
LOG(1,
"reading bad blocks from the recovery file -- '%s'",
part_hs->recovery_file_name);
ret = replica_part_badblocks_recovery_file_read(
part_hs);
if (ret < 0) {
LOG(1,
"reading bad blocks from the recovery file failed -- '%s'",
part_hs->recovery_file_name);
return -1;
}
if (ret > 0) {
LOG(1,
"incomplete bad block recovery file detected -- '%s'",
part_hs->recovery_file_name);
return 1;
}
if (part_hs->bbs.bb_cnt) {
LOG(3, "part %u contains %u bad blocks -- '%s'",
p, part_hs->bbs.bb_cnt, path);
}
}
}
return 0;
}
/*
* replica_badblocks_recovery_files_create_empty -- (internal) create one empty
* bad block recovery file
* for each part file
*/
static int
replica_badblocks_recovery_files_create_empty(struct pool_set *set,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
struct part_health_status *part_hs;
const char *path;
int fd;
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = set->replica[r];
struct replica_health_status *rep_hs = set_hs->replica[r];
/* XXX: not supported yet */
if (rep->remote)
continue;
for (unsigned p = 0; p < rep->nparts; ++p) {
part_hs = &rep_hs->part[p];
path = PART(rep, p)->path;
if (!part_hs->recovery_file_name)
continue;
fd = os_open(part_hs->recovery_file_name,
O_RDWR | O_CREAT | O_EXCL,
0600);
if (fd < 0) {
ERR(
"!creating an empty bad block recovery file failed -- '%s' (part file '%s')",
part_hs->recovery_file_name, path);
return -1;
}
os_close(fd);
char *file_name = Strdup(part_hs->recovery_file_name);
if (file_name == NULL) {
ERR("!Strdup");
return -1;
}
char *dir_name = dirname(file_name);
/* fsync the file's directory */
if (os_fsync_dir(dir_name) < 0) {
ERR(
"!syncing the directory of the bad block recovery file failed -- '%s' (part file '%s')",
dir_name, path);
Free(file_name);
return -1;
}
Free(file_name);
part_hs->recovery_file_exists = 1;
}
}
return 0;
}
/*
* replica_badblocks_recovery_files_save -- (internal) save bad blocks
* in the bad block recovery files
*/
static int
replica_badblocks_recovery_files_save(struct pool_set *set,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = set->replica[r];
struct replica_health_status *rep_hs = set_hs->replica[r];
/* XXX: not supported yet */
if (rep->remote)
continue;
for (unsigned p = 0; p < rep->nparts; ++p) {
struct part_health_status *part_hs = &rep_hs->part[p];
if (!part_hs->recovery_file_name)
continue;
int ret = replica_badblocks_recovery_file_save(part_hs);
if (ret < 0) {
LOG(1,
"opening bad block recovery file failed -- '%s'",
part_hs->recovery_file_name);
return -1;
}
}
}
return 0;
}
/*
* replica_badblocks_get -- (internal) get all bad blocks and save them
* in part_hs->bbs structures.
* Returns 1 if any bad block was found, 0 otherwise.
*/
static int
replica_badblocks_get(struct pool_set *set,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
int bad_blocks_found = 0;
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = set->replica[r];
struct replica_health_status *rep_hs = set_hs->replica[r];
/* XXX: not supported yet */
if (rep->remote)
continue;
for (unsigned p = 0; p < rep->nparts; ++p) {
const char *path = PART(rep, p)->path;
struct part_health_status *part_hs = &rep_hs->part[p];
int exists = util_file_exists(path);
if (exists < 0)
return -1;
if (!exists)
continue;
int ret = badblocks_get(path, &part_hs->bbs);
if (ret < 0) {
ERR(
"!checking the pool part for bad blocks failed -- '%s'",
path);
return -1;
}
if (part_hs->bbs.bb_cnt) {
LOG(3, "part %u contains %u bad blocks -- '%s'",
p, part_hs->bbs.bb_cnt, path);
bad_blocks_found = 1;
}
}
}
return bad_blocks_found;
}
/*
* check_badblocks_in_header -- (internal) check if bad blocks corrupted
* the header
*/
static int
check_badblocks_in_header(struct badblocks *bbs)
{
for (unsigned b = 0; b < bbs->bb_cnt; b++)
if (bbs->bbv[b].offset < POOL_HDR_SIZE)
return 1;
return 0;
}
/*
* replica_badblocks_clear -- (internal) clear all bad blocks
*/
static int
replica_badblocks_clear(struct pool_set *set,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
int ret;
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = set->replica[r];
struct replica_health_status *rep_hs = set_hs->replica[r];
/* XXX: not supported yet */
if (rep->remote)
continue;
for (unsigned p = 0; p < rep->nparts; ++p) {
const char *path = PART(rep, p)->path;
struct part_health_status *part_hs = &rep_hs->part[p];
int exists = util_file_exists(path);
if (exists < 0)
return -1;
if (!exists) {
/* the part does not exist */
continue;
}
if (part_hs->bbs.bb_cnt == 0) {
/* no bad blocks found */
continue;
}
/* bad blocks were found */
part_hs->flags |= HAS_BAD_BLOCKS;
rep_hs->flags |= HAS_BAD_BLOCKS;
if (check_badblocks_in_header(&part_hs->bbs)) {
part_hs->flags |= HAS_CORRUPTED_HEADER;
if (p == 0)
rep_hs->flags |= HAS_CORRUPTED_HEADER;
}
ret = badblocks_clear(path, &part_hs->bbs);
if (ret < 0) {
LOG(1,
"clearing bad blocks in replica failed -- '%s'",
path);
return -1;
}
}
}
return 0;
}
/*
* replica_badblocks_check_or_clear -- (internal) check if replica contains
* bad blocks when in dry run
* or clear them otherwise
*/
static int
replica_badblocks_check_or_clear(struct pool_set *set,
struct poolset_health_status *set_hs,
int dry_run, int called_from_sync,
int check_bad_blocks, int fix_bad_blocks)
{
LOG(3,
"set %p, set_hs %p, dry_run %i, called_from_sync %i, "
"check_bad_blocks %i, fix_bad_blocks %i",
set, set_hs, dry_run, called_from_sync,
check_bad_blocks, fix_bad_blocks);
#define ERR_MSG_BB \
" please read the manual first and use this option\n"\
" ONLY IF you are sure that you know what you are doing"
enum badblocks_recovery_files_status status;
int ret;
/* check all bad block recovery files */
status = replica_badblocks_recovery_files_check(set, set_hs);
/* phase #1 - error handling */
switch (status) {
case RECOVERY_FILES_ERROR:
LOG(1, "checking bad block recovery files failed");
return -1;
case RECOVERY_FILES_EXIST_ALL:
case RECOVERY_FILES_NOT_ALL_EXIST:
if (!called_from_sync) {
ERR(
"error: a bad block recovery file exists, run 'pmempool sync --bad-blocks' to fix bad blocks first");
return -1;
}
if (!fix_bad_blocks) {
ERR(
"error: a bad block recovery file exists, but the '--bad-blocks' option is not set\n"
ERR_MSG_BB);
return -1;
}
break;
default:
break;
};
/*
* The pool is checked for bad blocks only if:
* 1) compat feature POOL_FEAT_CHECK_BAD_BLOCKS is set
* OR:
* 2) the '--bad-blocks' option is set
*
* Bad blocks are cleared and fixed only if:
* - the '--bad-blocks' option is set
*/
if (!fix_bad_blocks && !check_bad_blocks) {
LOG(3, "skipping bad blocks checking");
return 0;
}
/* phase #2 - reading recovery files */
switch (status) {
case RECOVERY_FILES_EXIST_ALL:
/* read all bad block recovery files */
ret = replica_badblocks_recovery_files_read(set, set_hs);
if (ret < 0) {
LOG(1, "checking bad block recovery files failed");
return -1;
}
if (ret > 0) {
/* incomplete bad block recovery file was detected */
LOG(1,
"warning: incomplete bad block recovery file detected\n"
" - all recovery files will be removed");
/* changing status to RECOVERY_FILES_NOT_ALL_EXIST */
status = RECOVERY_FILES_NOT_ALL_EXIST;
}
break;
case RECOVERY_FILES_NOT_ALL_EXIST:
LOG(1,
"warning: one of bad block recovery files does not exist\n"
" - all recovery files will be removed");
break;
default:
break;
};
if (status == RECOVERY_FILES_NOT_ALL_EXIST) {
/*
* At least one of bad block recovery files does not exist,
* or an incomplete bad block recovery file was detected,
* so all recovery files have to be removed.
*/
if (!dry_run) {
LOG(1, "removing all bad block recovery files...");
ret = replica_remove_all_recovery_files(set_hs);
if (ret < 0) {
LOG(1,
"removing bad block recovery files failed");
return -1;
}
} else {
LOG(1, "all bad block recovery files would be removed");
}
/* changing status to RECOVERY_FILES_DO_NOT_EXIST */
status = RECOVERY_FILES_DO_NOT_EXIST;
}
if (status == RECOVERY_FILES_DO_NOT_EXIST) {
/*
* There are no bad block recovery files,
* so let's check bad blocks.
*/
int bad_blocks_found = replica_badblocks_get(set, set_hs);
if (bad_blocks_found < 0) {
if (errno == ENOTSUP) {
LOG(1, BB_NOT_SUPP);
return -1;
}
LOG(1, "checking bad blocks failed");
return -1;
}
if (!bad_blocks_found) {
LOG(4, "no bad blocks found");
return 0;
}
/* bad blocks were found */
if (!called_from_sync) {
ERR(
"error: bad blocks found, run 'pmempool sync --bad-blocks' to fix bad blocks first");
return -1;
}
if (!fix_bad_blocks) {
ERR(
"error: bad blocks found, but the '--bad-blocks' option is not set\n"
ERR_MSG_BB);
return -1;
}
if (dry_run) {
/* dry-run - do nothing */
LOG(1, "warning: bad blocks were found");
return 0;
}
/* create one empty recovery file for each part file */
ret = replica_badblocks_recovery_files_create_empty(set,
set_hs);
if (ret < 0) {
LOG(1,
"creating empty bad block recovery files failed");
return -1;
}
/* save bad blocks in recovery files */
ret = replica_badblocks_recovery_files_save(set, set_hs);
if (ret < 0) {
LOG(1, "saving bad block recovery files failed");
return -1;
}
}
if (dry_run) {
/* dry-run - do nothing */
LOG(1, "bad blocks would be cleared");
return 0;
}
ret = replica_badblocks_clear(set, set_hs);
if (ret < 0) {
ERR("clearing bad blocks failed");
return -1;
}
return 0;
}
/*
* check_shutdown_state -- (internal) check if replica has
* healthy shutdown_state
*/
static int
check_shutdown_state(struct pool_set *set,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
for (unsigned r = 0; r < set->nreplicas; ++r) {\
struct pool_replica *rep = set->replica[r];
struct replica_health_status *rep_hs = set_hs->replica[r];
struct pool_hdr *hdrp = HDR(rep, 0);
if (rep->remote)
continue;
if (hdrp == NULL) {
/* cannot verify shutdown state */
rep_hs->flags |= IS_BROKEN;
continue;
}
struct shutdown_state curr_sds;
shutdown_state_init(&curr_sds, NULL);
for (unsigned p = 0; p < rep->nparts; ++p) {
if (PART(rep, p)->fd < 0)
continue;
if (shutdown_state_add_part(&curr_sds,
PART(rep, p)->fd, NULL)) {
rep_hs->flags |= IS_BROKEN;
break;
}
}
if (rep_hs->flags & IS_BROKEN)
continue;
/* make a copy of sds as we shouldn't modify a pool */
struct shutdown_state pool_sds = hdrp->sds;
if (shutdown_state_check(&curr_sds, &pool_sds, NULL))
rep_hs->flags |= IS_BROKEN;
}
return 0;
}
/*
* check_uuids_between_parts -- (internal) check if uuids between adjacent
* parts are consistent for a given replica
*/
static int
check_uuids_between_parts(struct pool_set *set, unsigned repn,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, repn %u, set_hs %p", set, repn, set_hs);
struct pool_replica *rep = REP(set, repn);
/* check poolset_uuid consistency between replica's parts */
LOG(4, "checking consistency of poolset uuid in replica %u", repn);
uuid_t poolset_uuid;
int uuid_stored = 0;
unsigned part_stored = UNDEF_PART;
for (unsigned p = 0; p < rep->nhdrs; ++p) {
/* skip broken parts */
if (replica_is_part_broken(repn, p, set_hs))
continue;
if (!uuid_stored) {
memcpy(poolset_uuid, HDR(rep, p)->poolset_uuid,
POOL_HDR_UUID_LEN);
uuid_stored = 1;
part_stored = p;
continue;
}
if (uuidcmp(HDR(rep, p)->poolset_uuid, poolset_uuid)) {
ERR(
"different poolset uuids in parts from the same replica (repn %u, parts %u and %u) - cannot synchronize",
repn, part_stored, p);
errno = EINVAL;
return -1;
}
}
/* check if all uuids for adjacent replicas are the same across parts */
LOG(4, "checking consistency of adjacent replicas' uuids in replica %u",
repn);
unsigned unbroken_p = UNDEF_PART;
for (unsigned p = 0; p < rep->nhdrs; ++p) {
/* skip broken parts */
if (replica_is_part_broken(repn, p, set_hs))
continue;
if (unbroken_p == UNDEF_PART) {
unbroken_p = p;
continue;
}
struct pool_hdr *hdrp = HDR(rep, p);
int prev_differ = uuidcmp(HDR(rep, unbroken_p)->prev_repl_uuid,
hdrp->prev_repl_uuid);
int next_differ = uuidcmp(HDR(rep, unbroken_p)->next_repl_uuid,
hdrp->next_repl_uuid);
if (prev_differ || next_differ) {
ERR(
"different adjacent replica UUID between parts (repn %u, parts %u and %u) - cannot synchronize",
repn, unbroken_p, p);
errno = EINVAL;
return -1;
}
}
/* check parts linkage */
LOG(4, "checking parts linkage in replica %u", repn);
for (unsigned p = 0; p < rep->nhdrs; ++p) {
/* skip broken parts */
if (replica_is_part_broken(repn, p, set_hs))
continue;
struct pool_hdr *hdrp = HDR(rep, p);
struct pool_hdr *next_hdrp = HDRN(rep, p);
int next_is_broken = replica_is_part_broken(repn, p + 1,
set_hs);
if (!next_is_broken) {
int next_decoupled =
uuidcmp(next_hdrp->prev_part_uuid,
hdrp->uuid) ||
uuidcmp(hdrp->next_part_uuid, next_hdrp->uuid);
if (next_decoupled) {
ERR(
"two consecutive unbroken parts are not linked to each other (repn %u, parts %u and %u) - cannot synchronize",
repn, p, p + 1);
errno = EINVAL;
return -1;
}
}
}
return 0;
}
/*
* check_replicas_consistency -- (internal) check if all uuids within each
* replica are consistent
*/
static int
check_replicas_consistency(struct pool_set *set,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
for (unsigned r = 0; r < set->nreplicas; ++r) {
if (check_uuids_between_parts(set, r, set_hs))
return -1;
}
return 0;
}
/*
* check_replica_options -- (internal) check if options are consistent in the
* replica
*/
static int
check_replica_options(struct pool_set *set, unsigned repn,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, repn %u, set_hs %p", set, repn, set_hs);
struct pool_replica *rep = REP(set, repn);
struct replica_health_status *rep_hs = REP_HEALTH(set_hs, repn);
for (unsigned p = 0; p < rep->nhdrs; ++p) {
/* skip broken parts */
if (replica_is_part_broken(repn, p, set_hs))
continue;
struct pool_hdr *hdr = HDR(rep, p);
if (((hdr->features.incompat & POOL_FEAT_SINGLEHDR) == 0) !=
((set->options & OPTION_SINGLEHDR) == 0)) {
LOG(1,
"improper options are set in part %u's header in replica %u",
p, repn);
rep_hs->part[p].flags |= IS_BROKEN;
}
}
return 0;
}
/*
* check_options -- (internal) check if options are consistent in all replicas
*/
static int
check_options(struct pool_set *set, struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
for (unsigned r = 0; r < set->nreplicas; ++r) {
if (check_replica_options(set, r, set_hs))
return -1;
}
return 0;
}
/*
* check_replica_poolset_uuids - (internal) check if poolset_uuid fields are
* consistent among all parts of a replica;
* the replica is initially considered as
* consistent
*/
static int
check_replica_poolset_uuids(struct pool_set *set, unsigned repn,
uuid_t poolset_uuid, struct poolset_health_status *set_hs)
{
LOG(3, "set %p, repn %u, poolset_uuid %p, set_hs %p", set, repn,
poolset_uuid, set_hs);
struct pool_replica *rep = REP(set, repn);
for (unsigned p = 0; p < rep->nhdrs; ++p) {
/* skip broken parts */
if (replica_is_part_broken(repn, p, set_hs))
continue;
if (uuidcmp(HDR(rep, p)->poolset_uuid, poolset_uuid)) {
/*
* two internally consistent replicas have
* different poolset_uuid
*/
return -1;
} else {
/*
* it is sufficient to check only one part
* from internally consistent replica
*/
break;
}
}
return 0;
}
/*
* check_poolset_uuids -- (internal) check if poolset_uuid fields are consistent
* among all internally consistent replicas
*/
static int
check_poolset_uuids(struct pool_set *set,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
/* find a replica with healthy header */
unsigned r_h = replica_find_replica_healthy_header(set_hs);
if (r_h == UNDEF_REPLICA) {
ERR("no healthy replica found");
return -1;
}
uuid_t poolset_uuid;
memcpy(poolset_uuid, HDR(REP(set, r_h), 0)->poolset_uuid,
POOL_HDR_UUID_LEN);
for (unsigned r = 0; r < set->nreplicas; ++r) {
/* skip inconsistent replicas */
if (!replica_is_replica_consistent(r, set_hs) || r == r_h)
continue;
if (check_replica_poolset_uuids(set, r, poolset_uuid, set_hs)) {
ERR(
"inconsistent poolset uuids between replicas %u and %u - cannot synchronize",
r_h, r);
return -1;
}
}
return 0;
}
/*
* get_replica_uuid -- (internal) get replica uuid
*/
static int
get_replica_uuid(struct pool_replica *rep, unsigned repn,
struct poolset_health_status *set_hs, uuid_t **uuidpp)
{
unsigned nhdrs = rep->nhdrs;
if (!replica_is_part_broken(repn, 0, set_hs)) {
/* the first part is not broken */
*uuidpp = &HDR(rep, 0)->uuid;
return 0;
} else if (nhdrs > 1 && !replica_is_part_broken(repn, 1, set_hs)) {
/* the second part is not broken */
*uuidpp = &HDR(rep, 1)->prev_part_uuid;
return 0;
} else if (nhdrs > 1 &&
!replica_is_part_broken(repn, nhdrs - 1, set_hs)) {
/* the last part is not broken */
*uuidpp = &HDR(rep, nhdrs - 1)->next_part_uuid;
return 0;
} else {
/* cannot get replica uuid */
return -1;
}
}
/*
* check_uuids_between_replicas -- (internal) check if uuids between internally
* consistent adjacent replicas are consistent
*/
static int
check_uuids_between_replicas(struct pool_set *set,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
for (unsigned r = 0; r < set->nreplicas; ++r) {
/* skip comparing inconsistent pairs of replicas */
if (!replica_is_replica_consistent(r, set_hs) ||
!replica_is_replica_consistent(r + 1, set_hs))
continue;
struct pool_replica *rep = REP(set, r);
struct pool_replica *rep_n = REPN(set, r);
/* get uuids of the two adjacent replicas */
uuid_t *rep_uuidp = NULL;
uuid_t *rep_n_uuidp = NULL;
unsigned r_n = REPN_HEALTHidx(set_hs, r);
if (get_replica_uuid(rep, r, set_hs, &rep_uuidp))
LOG(2, "cannot get replica uuid, replica %u", r);
if (get_replica_uuid(rep_n, r_n, set_hs, &rep_n_uuidp))
LOG(2, "cannot get replica uuid, replica %u", r_n);
/*
* check if replica uuids are consistent between two adjacent
* replicas
*/
unsigned p = replica_find_unbroken_part(r, set_hs);
unsigned p_n = replica_find_unbroken_part(r_n, set_hs);
if (p_n != UNDEF_PART && rep_uuidp != NULL &&
uuidcmp(*rep_uuidp,
HDR(rep_n, p_n)->prev_repl_uuid)) {
ERR(
"inconsistent replica uuids between replicas %u and %u",
r, r_n);
return -1;
}
if (p != UNDEF_PART && rep_n_uuidp != NULL &&
uuidcmp(*rep_n_uuidp,
HDR(rep, p)->next_repl_uuid)) {
ERR(
"inconsistent replica uuids between replicas %u and %u",
r, r_n);
return -1;
}
/*
* check if replica uuids on borders of a broken replica are
* consistent
*/
unsigned r_nn = REPN_HEALTHidx(set_hs, r_n);
if (set->nreplicas > 1 && p != UNDEF_PART &&
replica_is_replica_broken(r_n, set_hs) &&
replica_is_replica_consistent(r_nn, set_hs)) {
unsigned p_nn =
replica_find_unbroken_part(r_nn, set_hs);
if (p_nn == UNDEF_PART) {
LOG(2,
"cannot compare uuids on borders of replica %u",
r);
continue;
}
struct pool_replica *rep_nn = REP(set, r_nn);
if (uuidcmp(HDR(rep, p)->next_repl_uuid,
HDR(rep_nn, p_nn)->prev_repl_uuid)) {
ERR(
"inconsistent replica uuids on borders of replica %u",
r);
return -1;
}
}
}
return 0;
}
/*
* check_replica_cycles -- (internal) check if healthy replicas form cycles
* shorter than the number of all replicas
*/
static int
check_replica_cycles(struct pool_set *set,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
unsigned first_healthy;
unsigned count_healthy = 0;
for (unsigned r = 0; r < set->nreplicas; ++r) {
if (!replica_is_replica_healthy(r, set_hs)) {
count_healthy = 0;
continue;
}
if (count_healthy == 0)
first_healthy = r;
++count_healthy;
struct pool_hdr *hdrh =
PART(REP(set, first_healthy), 0)->hdr;
struct pool_hdr *hdr = PART(REP(set, r), 0)->hdr;
if (uuidcmp(hdrh->uuid, hdr->next_repl_uuid) == 0 &&
count_healthy < set->nreplicas) {
/*
* Healthy replicas form a cycle shorter than
* the number of all replicas; for the user it
* means that:
*/
ERR(
"alien replica found (probably coming from a different poolset)");
return -1;
}
}
return 0;
}
/*
* check_replica_sizes -- (internal) check if all replicas are large
* enough to hold data from a healthy replica
*/
static int
check_replica_sizes(struct pool_set *set, struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
ssize_t pool_size = -1;
for (unsigned r = 0; r < set->nreplicas; ++r) {
/* skip broken replicas */
if (!replica_is_replica_healthy(r, set_hs))
continue;
/* get the size of a pool in the replica */
ssize_t replica_pool_size;
if (REP(set, r)->remote)
/* XXX: no way to get the size of a remote pool yet */
replica_pool_size = (ssize_t)set->poolsize;
else
replica_pool_size = replica_get_pool_size(set, r);
if (replica_pool_size < 0) {
LOG(1, "getting pool size from replica %u failed", r);
set_hs->replica[r]->flags |= IS_BROKEN;
continue;
}
/* check if the pool is bigger than minimum size */
enum pool_type type = pool_hdr_get_type(HDR(REP(set, r), 0));
if ((size_t)replica_pool_size < pool_get_min_size(type)) {
LOG(1,
"pool size from replica %u is smaller than the minimum size allowed for the pool",
r);
set_hs->replica[r]->flags |= IS_BROKEN;
continue;
}
/* check if each replica is big enough to hold the pool data */
if (set->poolsize < (size_t)replica_pool_size) {
ERR(
"some replicas are too small to hold synchronized data");
return -1;
}
if (pool_size < 0) {
pool_size = replica_pool_size;
continue;
}
/* check if pools in all healthy replicas are of equal size */
if (pool_size != replica_pool_size) {
ERR("pool sizes from different replicas differ");
return -1;
}
}
return 0;
}
/*
* replica_read_features -- (internal) read features from the header
*/
static int
replica_read_features(struct pool_set *set,
struct poolset_health_status *set_hs,
features_t *features)
{
LOG(3, "set %p set_hs %p features %p", set, set_hs, features);
ASSERTne(features, NULL);
for (unsigned r = 0; r < set->nreplicas; r++) {
struct pool_replica *rep = set->replica[r];
struct replica_health_status *rep_hs = set_hs->replica[r];
if (rep->remote) {
if (rep_hs->flags & IS_BROKEN)
continue;
struct pool_hdr *hdrp = rep->part[0].hdr;
memcpy(features, &hdrp->features, sizeof(*features));
return 0;
}
for (unsigned p = 0; p < rep->nparts; p++) {
struct pool_set_part *part = &rep->part[p];
if (part->fd == -1)
continue;
if (util_map_hdr(part, MAP_SHARED, 0) != 0) {
LOG(1, "header mapping failed");
return -1;
}
struct pool_hdr *hdrp = part->hdr;
memcpy(features, &hdrp->features, sizeof(*features));
util_unmap_hdr(part);
return 0;
}
}
/* no healthy replica/part found */
return -1;
}
/*
* replica_check_poolset_health -- check if a given poolset can be considered as
* healthy, and store the status in a helping structure
*/
int
replica_check_poolset_health(struct pool_set *set,
struct poolset_health_status **set_hsp,
int called_from_sync, unsigned flags)
{
LOG(3, "set %p, set_hsp %p, called_from_sync %i, flags %u",
set, set_hsp, called_from_sync, flags);
if (replica_create_poolset_health_status(set, set_hsp)) {
LOG(1, "creating poolset health status failed");
return -1;
}
struct poolset_health_status *set_hs = *set_hsp;
/* check if part files exist and are accessible */
if (check_and_open_poolset_part_files(set, set_hs, flags)) {
LOG(1, "poolset part files check failed");
goto err;
}
features_t features;
int check_bad_blks;
int fix_bad_blks = called_from_sync && fix_bad_blocks(flags);
if (fix_bad_blks) {
/*
* We will fix bad blocks, so we cannot read features here,
* because reading could fail, because of bad blocks.
* We will read features after having bad blocks fixed.
*
* Fixing bad blocks implies checking bad blocks.
*/
check_bad_blks = 1;
} else {
/*
* We will not fix bad blocks, so we have to read features here.
*/
if (replica_read_features(set, set_hs, &features)) {
LOG(1, "reading features failed");
goto err;
}
check_bad_blks = features.compat & POOL_FEAT_CHECK_BAD_BLOCKS;
}
/* check for bad blocks when in dry run or clear them otherwise */
if (replica_badblocks_check_or_clear(set, set_hs, is_dry_run(flags),
called_from_sync, check_bad_blks, fix_bad_blks)) {
LOG(1, "replica bad_blocks check failed");
goto err;
}
/* read features after fixing bad blocks */
if (fix_bad_blks && replica_read_features(set, set_hs, &features)) {
LOG(1, "reading features failed");
goto err;
}
/* set ignore_sds flag basing on features read from the header */
set->ignore_sds = !(features.incompat & POOL_FEAT_SDS);
/* map all headers */
map_all_unbroken_headers(set, set_hs);
/*
* Check if checksums and signatures are correct for all parts
* in all replicas.
*/
check_checksums_and_signatures(set, set_hs);
/* check if option flags are consistent */
if (check_options(set, set_hs)) {
LOG(1, "flags check failed");
goto err;
}
if (!set->ignore_sds && check_shutdown_state(set, set_hs)) {
LOG(1, "replica shutdown_state check failed");
goto err;
}
/* check if uuids in parts across each replica are consistent */
if (check_replicas_consistency(set, set_hs)) {
LOG(1, "replica consistency check failed");
goto err;
}
/* check poolset_uuid values between replicas */
if (check_poolset_uuids(set, set_hs)) {
LOG(1, "poolset uuids check failed");
goto err;
}
/* check if uuids for adjacent replicas are consistent */
if (check_uuids_between_replicas(set, set_hs)) {
LOG(1, "replica uuids check failed");
goto err;
}
/* check if healthy replicas make up another poolset */
if (check_replica_cycles(set, set_hs)) {
LOG(1, "replica cycles check failed");
goto err;
}
/* check if replicas are large enough */
if (check_replica_sizes(set, set_hs)) {
LOG(1, "replica sizes check failed");
goto err;
}
if (check_store_all_sizes(set, set_hs)) {
LOG(1, "reading pool sizes failed");
goto err;
}
unmap_all_headers(set);
util_poolset_fdclose_always(set);
return 0;
err:
errno = EINVAL;
unmap_all_headers(set);
util_poolset_fdclose_always(set);
replica_free_poolset_health_status(set_hs);
return -1;
}
/*
* replica_get_pool_size -- find the effective size (mapped) of a pool based
* on metadata from given replica
*/
ssize_t
replica_get_pool_size(struct pool_set *set, unsigned repn)
{
LOG(3, "set %p, repn %u", set, repn);
struct pool_set_part *part = PART(REP(set, repn), 0);
int should_close_part = 0;
int should_unmap_part = 0;
if (part->fd == -1) {
if (util_part_open(part, 0, 0))
return -1;
should_close_part = 1;
}
if (part->addr == NULL) {
if (util_map_part(part, NULL,
ALIGN_UP(sizeof(PMEMobjpool), part->alignment), 0,
MAP_SHARED, 1)) {
util_part_fdclose(part);
return -1;
}
should_unmap_part = 1;
}
PMEMobjpool *pop = (PMEMobjpool *)part->addr;
ssize_t ret = (ssize_t)(pop->heap_offset + pop->heap_size);
if (should_unmap_part)
util_unmap_part(part);
if (should_close_part)
util_part_fdclose(part);
return ret;
}
/*
* replica_check_part_sizes -- check if all parts are large enough
*/
int
replica_check_part_sizes(struct pool_set *set, size_t min_size)
{
LOG(3, "set %p, min_size %zu", set, min_size);
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = set->replica[r];
if (rep->remote != NULL)
/* skip remote replicas */
continue;
for (unsigned p = 0; p < rep->nparts; ++p) {
if (PART(rep, p)->filesize < min_size) {
ERR("replica %u, part %u: file is too small",
r, p);
errno = EINVAL;
return -1;
}
}
}
return 0;
}
/*
* replica_check_local_part_dir -- check if directory for the part file
* exists
*/
int
replica_check_local_part_dir(struct pool_set *set, unsigned repn,
unsigned partn)
{
LOG(3, "set %p, repn %u, partn %u", set, repn, partn);
char *path = Strdup(PART(REP(set, repn), partn)->path);
const char *dir = dirname(path);
os_stat_t sb;
if (os_stat(dir, &sb) != 0 || !(sb.st_mode & S_IFDIR)) {
ERR(
"directory %s for part %u in replica %u does not exist or is not accessible",
path, partn, repn);
Free(path);
return -1;
}
Free(path);
return 0;
}
/*
* replica_check_part_dirs -- (internal) check if directories for part files
* exist
*/
int
replica_check_part_dirs(struct pool_set *set)
{
LOG(3, "set %p", set);
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = set->replica[r];
if (rep->remote != NULL)
/* skip remote replicas */
continue;
for (unsigned p = 0; p < rep->nparts; ++p) {
if (replica_check_local_part_dir(set, r, p))
return -1;
}
}
return 0;
}
/*
* replica_open_replica_part_files -- open all part files for a replica
*/
int
replica_open_replica_part_files(struct pool_set *set, unsigned repn)
{
LOG(3, "set %p, repn %u", set, repn);
struct pool_replica *rep = set->replica[repn];
for (unsigned p = 0; p < rep->nparts; ++p) {
/* skip already opened files */
if (rep->part[p].fd != -1)
continue;
if (util_part_open(&rep->part[p], 0, 0)) {
LOG(1, "part files open failed for replica %u, part %u",
repn, p);
errno = EINVAL;
goto err;
}
}
return 0;
err:
util_replica_fdclose(set->replica[repn]);
return -1;
}
/*
* replica_open_poolset_part_files -- open all part files for a poolset
*/
int
replica_open_poolset_part_files(struct pool_set *set)
{
LOG(3, "set %p", set);
for (unsigned r = 0; r < set->nreplicas; ++r) {
if (set->replica[r]->remote)
continue;
if (replica_open_replica_part_files(set, r)) {
LOG(1, "opening replica %u, part files failed", r);
goto err;
}
}
return 0;
err:
util_poolset_fdclose_always(set);
return -1;
}
/*
* pmempool_syncU -- synchronize replicas within a poolset
*/
#ifndef _WIN32
static inline
#endif
int
pmempool_syncU(const char *poolset, unsigned flags)
{
LOG(3, "poolset %s, flags %u", poolset, flags);
ASSERTne(poolset, NULL);
/* check if poolset has correct signature */
if (util_is_poolset_file(poolset) != 1) {
ERR("file is not a poolset file");
goto err;
}
/* check if flags are supported */
if (check_flags_sync(flags)) {
ERR("unsupported flags");
errno = EINVAL;
goto err;
}
/* open poolset file */
int fd = util_file_open(poolset, NULL, 0, O_RDONLY);
if (fd < 0) {
ERR("cannot open a poolset file");
goto err;
}
/* fill up pool_set structure */
struct pool_set *set = NULL;
if (util_poolset_parse(&set, poolset, fd)) {
ERR("parsing input poolset failed");
goto err_close_file;
}
if (set->nreplicas == 1) {
ERR("no replica(s) found in the pool set");
errno = EINVAL;
goto err_close_file;
}
if (set->remote && util_remote_load()) {
ERR("remote replication not available");
errno = ENOTSUP;
goto err_close_file;
}
/* sync all replicas */
if (replica_sync(set, NULL, flags)) {
LOG(1, "synchronization failed");
goto err_close_all;
}
util_poolset_close(set, DO_NOT_DELETE_PARTS);
os_close(fd);
return 0;
err_close_all:
util_poolset_close(set, DO_NOT_DELETE_PARTS);
err_close_file:
os_close(fd);
err:
if (errno == 0)
errno = EINVAL;
return -1;
}
#ifndef _WIN32
/*
* pmempool_sync -- synchronize replicas within a poolset
*/
int
pmempool_sync(const char *poolset, unsigned flags)
{
return pmempool_syncU(poolset, flags);
}
#else
/*
* pmempool_syncW -- synchronize replicas within a poolset in widechar
*/
int
pmempool_syncW(const wchar_t *poolset, unsigned flags)
{
char *path = util_toUTF8(poolset);
if (path == NULL) {
ERR("Invalid poolest file path.");
return -1;
}
int ret = pmempool_syncU(path, flags);
util_free_UTF8(path);
return ret;
}
#endif
/*
* pmempool_transformU -- alter poolset structure
*/
#ifndef _WIN32
static inline
#endif
int
pmempool_transformU(const char *poolset_src,
const char *poolset_dst, unsigned flags)
{
LOG(3, "poolset_src %s, poolset_dst %s, flags %u", poolset_src,
poolset_dst, flags);
ASSERTne(poolset_src, NULL);
ASSERTne(poolset_dst, NULL);
/* check if the source poolset has correct signature */
if (util_is_poolset_file(poolset_src) != 1) {
ERR("source file is not a poolset file");
goto err;
}
/* check if the destination poolset has correct signature */
if (util_is_poolset_file(poolset_dst) != 1) {
ERR("destination file is not a poolset file");
goto err;
}
/* check if flags are supported */
if (check_flags_transform(flags)) {
ERR("unsupported flags");
errno = EINVAL;
goto err;
}
/* open the source poolset file */
int fd_in = util_file_open(poolset_src, NULL, 0, O_RDONLY);
if (fd_in < 0) {
ERR("cannot open source poolset file");
goto err;
}
/* parse the source poolset file */
struct pool_set *set_in = NULL;
if (util_poolset_parse(&set_in, poolset_src, fd_in)) {
ERR("parsing source poolset failed");
os_close(fd_in);
goto err;
}
os_close(fd_in);
/* open the destination poolset file */
int fd_out = util_file_open(poolset_dst, NULL, 0, O_RDONLY);
if (fd_out < 0) {
ERR("cannot open destination poolset file");
goto err;
}
enum del_parts_mode del = DO_NOT_DELETE_PARTS;
/* parse the destination poolset file */
struct pool_set *set_out = NULL;
if (util_poolset_parse(&set_out, poolset_dst, fd_out)) {
ERR("parsing destination poolset failed");
os_close(fd_out);
goto err_free_poolin;
}
os_close(fd_out);
/* check if the source poolset is of a correct type */
enum pool_type ptype = pool_set_type(set_in);
if (ptype != POOL_TYPE_OBJ) {
errno = EINVAL;
ERR("transform is not supported for given pool type: %s",
pool_get_pool_type_str(ptype));
goto err_free_poolout;
}
/* load remote library if needed */
if (set_in->remote && util_remote_load()) {
ERR("remote replication not available");
goto err_free_poolout;
}
if (set_out->remote && util_remote_load()) {
ERR("remote replication not available");
goto err_free_poolout;
}
del = is_dry_run(flags) ? DO_NOT_DELETE_PARTS : DELETE_CREATED_PARTS;
/* transform poolset */
if (replica_transform(set_in, set_out, flags)) {
LOG(1, "transformation failed");
goto err_free_poolout;
}
util_poolset_close(set_in, DO_NOT_DELETE_PARTS);
util_poolset_close(set_out, DO_NOT_DELETE_PARTS);
return 0;
err_free_poolout:
util_poolset_close(set_out, del);
err_free_poolin:
util_poolset_close(set_in, DO_NOT_DELETE_PARTS);
err:
if (errno == 0)
errno = EINVAL;
return -1;
}
#ifndef _WIN32
/*
* pmempool_transform -- alter poolset structure
*/
int
pmempool_transform(const char *poolset_src,
const char *poolset_dst, unsigned flags)
{
return pmempool_transformU(poolset_src, poolset_dst, flags);
}
#else
/*
* pmempool_transformW -- alter poolset structure in widechar
*/
int
pmempool_transformW(const wchar_t *poolset_src,
const wchar_t *poolset_dst, unsigned flags)
{
char *path_src = util_toUTF8(poolset_src);
if (path_src == NULL) {
ERR("Invalid source poolest file path.");
return -1;
}
char *path_dst = util_toUTF8(poolset_dst);
if (path_dst == NULL) {
ERR("Invalid destination poolest file path.");
Free(path_src);
return -1;
}
int ret = pmempool_transformU(path_src, path_dst, flags);
util_free_UTF8(path_src);
util_free_UTF8(path_dst);
return ret;
}
#endif
| 62,038 | 23.775958 | 115 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/transform.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* transform.c -- a module for poolset transforming
*/
#include <stdio.h>
#include <stdint.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <unistd.h>
#include <fcntl.h>
#include <limits.h>
#include <dirent.h>
#include <assert.h>
#include "replica.h"
#include "out.h"
#include "file.h"
#include "os.h"
#include "libpmem.h"
#include "util_pmem.h"
/*
* poolset_compare_status - a helping structure for gathering corresponding
* replica numbers when comparing poolsets
*/
struct poolset_compare_status
{
unsigned nreplicas;
unsigned flags;
unsigned replica[];
};
/*
* type of transform operation to be done
*/
enum transform_op {
NOT_TRANSFORMABLE,
ADD_REPLICAS,
RM_REPLICAS,
ADD_HDRS,
RM_HDRS,
};
/*
* check_if_part_used_once -- (internal) check if the part is used only once in
* the rest of the poolset
*/
static int
check_if_part_used_once(struct pool_set *set, unsigned repn, unsigned partn)
{
LOG(3, "set %p, repn %u, partn %u", set, repn, partn);
struct pool_replica *rep = REP(set, repn);
char *path = util_part_realpath(PART(rep, partn)->path);
if (path == NULL) {
LOG(1, "cannot get absolute path for %s, replica %u, part %u",
PART(rep, partn)->path, repn, partn);
errno = 0;
path = strdup(PART(rep, partn)->path);
if (path == NULL) {
ERR("!strdup");
return -1;
}
}
int ret = 0;
for (unsigned r = repn; r < set->nreplicas; ++r) {
struct pool_replica *repr = set->replica[r];
/* skip remote replicas */
if (repr->remote != NULL)
continue;
/* avoid superfluous comparisons */
unsigned i = (r == repn) ? partn + 1 : 0;
for (unsigned p = i; p < repr->nparts; ++p) {
char *pathp = util_part_realpath(PART(repr, p)->path);
if (pathp == NULL) {
if (errno != ENOENT) {
ERR("realpath failed for %s, errno %d",
PART(repr, p)->path, errno);
ret = -1;
goto out;
}
LOG(1, "cannot get absolute path for %s,"
" replica %u, part %u",
PART(rep, partn)->path, repn,
partn);
pathp = strdup(PART(repr, p)->path);
errno = 0;
}
int result = util_compare_file_inodes(path, pathp);
if (result == 0) {
/* same file used multiple times */
ERR("some part file's path is"
" used multiple times");
ret = -1;
errno = EINVAL;
free(pathp);
goto out;
} else if (result < 0) {
ERR("comparing file inodes failed for %s and"
" %s", path, pathp);
ret = -1;
free(pathp);
goto out;
}
free(pathp);
}
}
out:
free(path);
return ret;
}
/*
* check_if_remote_replica_used_once -- (internal) check if remote replica is
* used only once in the rest of the
* poolset
*/
static int
check_if_remote_replica_used_once(struct pool_set *set, unsigned repn)
{
LOG(3, "set %p, repn %u", set, repn);
struct remote_replica *rep = REP(set, repn)->remote;
ASSERTne(rep, NULL);
for (unsigned r = repn + 1; r < set->nreplicas; ++r) {
/* skip local replicas */
if (REP(set, r)->remote == NULL)
continue;
struct remote_replica *repr = REP(set, r)->remote;
/* XXX: add comparing resolved addresses of the nodes */
if (strcmp(rep->node_addr, repr->node_addr) == 0 &&
strcmp(rep->pool_desc, repr->pool_desc) == 0) {
ERR("remote replica %u is used multiple times", repn);
errno = EINVAL;
return -1;
}
}
return 0;
}
/*
* check_paths -- (internal) check if directories for part files exist
* and if paths for part files do not repeat in the poolset
*/
static int
check_paths(struct pool_set *set)
{
LOG(3, "set %p", set);
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = set->replica[r];
if (rep->remote != NULL) {
if (check_if_remote_replica_used_once(set, r))
return -1;
} else {
for (unsigned p = 0; p < rep->nparts; ++p) {
if (replica_check_local_part_dir(set, r, p))
return -1;
if (check_if_part_used_once(set, r, p))
return -1;
}
}
}
return 0;
}
/*
* validate_args -- (internal) check whether passed arguments are valid
*/
static int
validate_args(struct pool_set *set_in, struct pool_set *set_out)
{
LOG(3, "set_in %p, set_out %p", set_in, set_out);
if (set_in->directory_based) {
ERR("transform of directory poolsets is not supported");
errno = EINVAL;
return -1;
}
/*
* check if all parts in the target poolset are large enough
* (now replication works only for pmemobj pools)
*/
if (replica_check_part_sizes(set_out, PMEMOBJ_MIN_POOL)) {
ERR("part sizes check failed");
return -1;
}
/*
* check if all directories for part files exist and if part files
* do not reoccur in the poolset
*/
if (check_paths(set_out))
return -1;
/*
* check if set_out has enough size, i.e. if the target poolset
* structure has enough capacity to accommodate the effective size of
* the source poolset
*/
ssize_t master_pool_size = replica_get_pool_size(set_in, 0);
if (master_pool_size < 0) {
ERR("getting pool size from master replica failed");
return -1;
}
if (set_out->poolsize < (size_t)master_pool_size) {
ERR("target poolset is too small");
errno = EINVAL;
return -1;
}
return 0;
}
/*
* create poolset_compare_status -- (internal) create structure for gathering
* status of poolset comparison
*/
static int
create_poolset_compare_status(struct pool_set *set,
struct poolset_compare_status **set_sp)
{
LOG(3, "set %p, set_sp %p", set, set_sp);
struct poolset_compare_status *set_s;
set_s = Zalloc(sizeof(struct poolset_compare_status)
+ set->nreplicas * sizeof(unsigned));
if (set_s == NULL) {
ERR("!Zalloc for poolset status");
return -1;
}
for (unsigned r = 0; r < set->nreplicas; ++r)
set_s->replica[r] = UNDEF_REPLICA;
set_s->nreplicas = set->nreplicas;
*set_sp = set_s;
return 0;
}
/*
* compare_parts -- (internal) check if two parts can be considered the same
*/
static int
compare_parts(struct pool_set_part *p1, struct pool_set_part *p2)
{
LOG(3, "p1 %p, p2 %p", p1, p2);
LOG(4, "p1->path: %s, p1->filesize: %lu", p1->path, p1->filesize);
LOG(4, "p2->path: %s, p2->filesize: %lu", p2->path, p2->filesize);
return strcmp(p1->path, p2->path) || (p1->filesize != p2->filesize);
}
/*
* compare_replicas -- (internal) check if two replicas are different
*/
static int
compare_replicas(struct pool_replica *r1, struct pool_replica *r2)
{
LOG(3, "r1 %p, r2 %p", r1, r2);
LOG(4, "r1->nparts: %u, r2->nparts: %u", r1->nparts, r2->nparts);
/* both replicas are local */
if (r1->remote == NULL && r2->remote == NULL) {
if (r1->nparts != r2->nparts)
return 1;
for (unsigned p = 0; p < r1->nparts; ++p) {
if (compare_parts(&r1->part[p], &r2->part[p]))
return 1;
}
return 0;
}
/* both replicas are remote */
if (r1->remote != NULL && r2->remote != NULL) {
return strcmp(r1->remote->node_addr, r2->remote->node_addr) ||
strcmp(r1->remote->pool_desc, r2->remote->pool_desc);
}
/* a remote and a local replicas */
return 1;
}
/*
* check_compare_poolsets_status -- (internal) find different replicas between
* two poolsets; for each replica which has
* a counterpart in the other poolset store
* the other replica's number in a helping
* structure
*/
static int
check_compare_poolsets_status(struct pool_set *set_in,
struct pool_set *set_out,
struct poolset_compare_status *set_in_s,
struct poolset_compare_status *set_out_s)
{
LOG(3, "set_in %p, set_out %p, set_in_s %p, set_out_s %p", set_in,
set_out, set_in_s, set_out_s);
for (unsigned ri = 0; ri < set_in->nreplicas; ++ri) {
struct pool_replica *rep_in = REP(set_in, ri);
for (unsigned ro = 0; ro < set_out->nreplicas; ++ro) {
struct pool_replica *rep_out = REP(set_out, ro);
LOG(1, "comparing rep_in %u with rep_out %u", ri, ro);
/* skip different replicas */
if (compare_replicas(rep_in, rep_out))
continue;
if (set_in_s->replica[ri] != UNDEF_REPLICA ||
set_out_s->replica[ro]
!= UNDEF_REPLICA) {
/* there are more than one counterparts */
ERR("there are more then one corresponding"
" replicas; cannot transform");
errno = EINVAL;
return -1;
}
set_in_s->replica[ri] = ro;
set_out_s->replica[ro] = ri;
}
}
return 0;
}
/*
* check_compare_poolset_options -- (internal) check poolset options
*/
static int
check_compare_poolsets_options(struct pool_set *set_in,
struct pool_set *set_out,
struct poolset_compare_status *set_in_s,
struct poolset_compare_status *set_out_s)
{
if (set_in->options & OPTION_SINGLEHDR)
set_in_s->flags |= OPTION_SINGLEHDR;
if (set_out->options & OPTION_SINGLEHDR)
set_out_s->flags |= OPTION_SINGLEHDR;
if ((set_in->options & OPTION_NOHDRS) ||
(set_out->options & OPTION_NOHDRS)) {
errno = EINVAL;
ERR(
"the NOHDRS poolset option is not supported in local poolset files");
return -1;
}
return 0;
}
/*
* compare_poolsets -- (internal) compare two poolsets; for each replica which
* has a counterpart in the other poolset store the other
* replica's number in a helping structure
*/
static int
compare_poolsets(struct pool_set *set_in, struct pool_set *set_out,
struct poolset_compare_status **set_in_s,
struct poolset_compare_status **set_out_s)
{
LOG(3, "set_in %p, set_out %p, set_in_s %p, set_out_s %p", set_in,
set_out, set_in_s, set_out_s);
if (create_poolset_compare_status(set_in, set_in_s))
return -1;
if (create_poolset_compare_status(set_out, set_out_s))
goto err_free_in;
if (check_compare_poolsets_status(set_in, set_out, *set_in_s,
*set_out_s))
goto err_free_out;
if (check_compare_poolsets_options(set_in, set_out, *set_in_s,
*set_out_s))
goto err_free_out;
return 0;
err_free_out:
Free(*set_out_s);
err_free_in:
Free(*set_in_s);
return -1;
}
/*
* replica_counterpart -- (internal) returns index of a counterpart replica
*/
static unsigned
replica_counterpart(unsigned repn,
struct poolset_compare_status *set_s)
{
return set_s->replica[repn];
}
/*
* are_poolsets_transformable -- (internal) check if poolsets can be transformed
* one into the other; also gather info about
* replicas's health
*/
static enum transform_op
identify_transform_operation(struct poolset_compare_status *set_in_s,
struct poolset_compare_status *set_out_s,
struct poolset_health_status *set_in_hs,
struct poolset_health_status *set_out_hs)
{
LOG(3, "set_in_s %p, set_out_s %p", set_in_s, set_out_s);
int has_replica_to_keep = 0;
int is_removing_replicas = 0;
int is_adding_replicas = 0;
/* check if there are replicas to be removed */
for (unsigned r = 0; r < set_in_s->nreplicas; ++r) {
unsigned c = replica_counterpart(r, set_in_s);
if (c != UNDEF_REPLICA) {
LOG(2, "replica %u has a counterpart %u", r,
set_in_s->replica[r]);
has_replica_to_keep = 1;
REP_HEALTH(set_out_hs, c)->pool_size =
REP_HEALTH(set_in_hs, r)->pool_size;
} else {
LOG(2, "replica %u has no counterpart", r);
is_removing_replicas = 1;
}
}
/* make sure we have at least one replica to keep */
if (!has_replica_to_keep) {
ERR("there must be at least one replica left");
return NOT_TRANSFORMABLE;
}
/* check if there are replicas to be added */
for (unsigned r = 0; r < set_out_s->nreplicas; ++r) {
if (replica_counterpart(r, set_out_s) == UNDEF_REPLICA) {
LOG(2, "Replica %u from output set has no counterpart",
r);
if (is_removing_replicas) {
ERR(
"adding and removing replicas at the same time is not allowed");
return NOT_TRANSFORMABLE;
}
REP_HEALTH(set_out_hs, r)->flags |= IS_BROKEN;
is_adding_replicas = 1;
}
}
/* check if there is anything to do */
if (!is_removing_replicas && !is_adding_replicas &&
(set_in_s->flags & OPTION_SINGLEHDR) ==
(set_out_s->flags & OPTION_SINGLEHDR)) {
ERR("both poolsets are equal");
return NOT_TRANSFORMABLE;
}
/* allow changing the SINGLEHDR option only as the sole operation */
if ((is_removing_replicas || is_adding_replicas) &&
(set_in_s->flags & OPTION_SINGLEHDR) !=
(set_out_s->flags & OPTION_SINGLEHDR)) {
ERR(
"cannot add/remove replicas and change the SINGLEHDR option at the same time");
return NOT_TRANSFORMABLE;
}
if (is_removing_replicas)
return RM_REPLICAS;
if (is_adding_replicas)
return ADD_REPLICAS;
if (set_out_s->flags & OPTION_SINGLEHDR)
return RM_HDRS;
if (set_in_s->flags & OPTION_SINGLEHDR)
return ADD_HDRS;
ASSERT(0);
return NOT_TRANSFORMABLE;
}
/*
* do_added_parts_exist -- (internal) check if any part of the replicas that are
* to be added (marked as broken) already exists
*/
static int
do_added_parts_exist(struct pool_set *set,
struct poolset_health_status *set_hs)
{
for (unsigned r = 0; r < set->nreplicas; ++r) {
/* skip unbroken (i.e. not being added) replicas */
if (!replica_is_replica_broken(r, set_hs))
continue;
struct pool_replica *rep = REP(set, r);
/* skip remote replicas */
if (rep->remote)
continue;
for (unsigned p = 0; p < rep->nparts; ++p) {
/* check if part file exists */
int oerrno = errno;
int exists = util_file_exists(rep->part[p].path);
if (exists < 0)
return -1;
if (exists && !rep->part[p].is_dev_dax) {
LOG(1, "part file %s exists",
rep->part[p].path);
return 1;
}
errno = oerrno;
}
}
return 0;
}
/*
* delete_replicas -- (internal) delete replicas which do not have their
* counterpart set in the helping status structure
*/
static int
delete_replicas(struct pool_set *set, struct poolset_compare_status *set_s)
{
LOG(3, "set %p, set_s %p", set, set_s);
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = REP(set, r);
if (replica_counterpart(r, set_s) == UNDEF_REPLICA) {
if (!rep->remote) {
if (util_replica_close_local(rep, r,
DELETE_ALL_PARTS))
return -1;
} else {
if (util_replica_close_remote(rep, r,
DELETE_ALL_PARTS))
return -1;
}
}
}
return 0;
}
/*
* copy_replica_data_fw -- (internal) copy data between replicas of two
* poolsets, starting from the beginning of the
* second part
*/
static void
copy_replica_data_fw(struct pool_set *set_dst, struct pool_set *set_src,
unsigned repn)
{
LOG(3, "set_in %p, set_out %p, repn %u", set_src, set_dst, repn);
ssize_t pool_size = replica_get_pool_size(set_src, repn);
if (pool_size < 0) {
LOG(1, "getting pool size from replica %u failed", repn);
pool_size = (ssize_t)set_src->poolsize;
}
size_t len = (size_t)pool_size - POOL_HDR_SIZE -
replica_get_part_data_len(set_src, repn, 0);
void *src = PART(REP(set_src, repn), 1)->addr;
void *dst = PART(REP(set_dst, repn), 1)->addr;
size_t count = len / POOL_HDR_SIZE;
while (count-- > 0) {
pmem_memcpy_persist(dst, src, POOL_HDR_SIZE);
src = ADDR_SUM(src, POOL_HDR_SIZE);
dst = ADDR_SUM(dst, POOL_HDR_SIZE);
}
}
/*
* copy_replica_data_bw -- (internal) copy data between replicas of two
* poolsets, starting from the end of the pool
*/
static void
copy_replica_data_bw(struct pool_set *set_dst, struct pool_set *set_src,
unsigned repn)
{
LOG(3, "set_in %p, set_out %p, repn %u", set_src, set_dst, repn);
ssize_t pool_size = replica_get_pool_size(set_src, repn);
if (pool_size < 0) {
LOG(1, "getting pool size from replica %u failed", repn);
pool_size = (ssize_t)set_src->poolsize;
}
size_t len = (size_t)pool_size - POOL_HDR_SIZE -
replica_get_part_data_len(set_src, repn, 0);
size_t count = len / POOL_HDR_SIZE;
void *src = ADDR_SUM(PART(REP(set_src, repn), 1)->addr, len);
void *dst = ADDR_SUM(PART(REP(set_dst, repn), 1)->addr, len);
while (count-- > 0) {
src = ADDR_SUM(src, -(ssize_t)POOL_HDR_SIZE);
dst = ADDR_SUM(dst, -(ssize_t)POOL_HDR_SIZE);
pmem_memcpy_persist(dst, src, POOL_HDR_SIZE);
}
}
/*
* create_missing_headers -- (internal) create headers for all parts but the
* first one
*/
static int
create_missing_headers(struct pool_set *set, unsigned repn)
{
LOG(3, "set %p, repn %u", set, repn);
struct pool_hdr *src_hdr = HDR(REP(set, repn), 0);
for (unsigned p = 1; p < set->replica[repn]->nhdrs; ++p) {
struct pool_attr attr;
util_pool_hdr2attr(&attr, src_hdr);
attr.features.incompat &= (uint32_t)(~POOL_FEAT_SINGLEHDR);
if (util_header_create(set, repn, p, &attr, 1) != 0) {
LOG(1, "part headers create failed for"
" replica %u part %u", repn, p);
errno = EINVAL;
return -1;
}
}
return 0;
}
/*
* update_replica_header -- (internal) update field values in the first header
* in the replica
*/
static void
update_replica_header(struct pool_set *set, unsigned repn)
{
LOG(3, "set %p, repn %u", set, repn);
struct pool_replica *rep = REP(set, repn);
struct pool_set_part *part = PART(REP(set, repn), 0);
struct pool_hdr *hdr = (struct pool_hdr *)part->hdr;
if (set->options & OPTION_SINGLEHDR) {
hdr->features.incompat |= POOL_FEAT_SINGLEHDR;
memcpy(hdr->next_part_uuid, hdr->uuid, POOL_HDR_UUID_LEN);
memcpy(hdr->prev_part_uuid, hdr->uuid, POOL_HDR_UUID_LEN);
} else {
hdr->features.incompat &= (uint32_t)(~POOL_FEAT_SINGLEHDR);
}
util_checksum(hdr, sizeof(*hdr), &hdr->checksum, 1,
POOL_HDR_CSUM_END_OFF(hdr));
util_persist_auto(rep->is_pmem, hdr, sizeof(*hdr));
}
/*
* fill_replica_struct_uuids -- (internal) gather all uuids required for the
* replica in the helper structure
*/
static int
fill_replica_struct_uuids(struct pool_set *set, unsigned repn)
{
LOG(3, "set %p, repn %u", set, repn);
struct pool_replica *rep = REP(set, repn);
memcpy(PART(rep, 0)->uuid, HDR(rep, 0)->uuid, POOL_HDR_UUID_LEN);
for (unsigned p = 1; p < rep->nhdrs; ++p) {
if (util_uuid_generate(rep->part[p].uuid) < 0) {
ERR("cannot generate part UUID");
errno = EINVAL;
return -1;
}
}
return 0;
}
/*
* update_uuids -- (internal) update uuids in all headers in the replica
*/
static void
update_uuids(struct pool_set *set, unsigned repn)
{
LOG(3, "set %p, repn %u", set, repn);
struct pool_replica *rep = REP(set, repn);
struct pool_hdr *hdr0 = HDR(rep, 0);
for (unsigned p = 0; p < rep->nhdrs; ++p) {
struct pool_hdr *hdrp = HDR(rep, p);
memcpy(hdrp->next_part_uuid, PARTN(rep, p)->uuid,
POOL_HDR_UUID_LEN);
memcpy(hdrp->prev_part_uuid, PARTP(rep, p)->uuid,
POOL_HDR_UUID_LEN);
/* Avoid calling memcpy() on identical regions */
if (p != 0) {
memcpy(hdrp->next_repl_uuid, hdr0->next_repl_uuid,
POOL_HDR_UUID_LEN);
memcpy(hdrp->prev_repl_uuid, hdr0->prev_repl_uuid,
POOL_HDR_UUID_LEN);
memcpy(hdrp->poolset_uuid, hdr0->poolset_uuid,
POOL_HDR_UUID_LEN);
}
util_checksum(hdrp, sizeof(*hdrp), &hdrp->checksum, 1,
POOL_HDR_CSUM_END_OFF(hdrp));
util_persist(PART(rep, p)->is_dev_dax, hdrp, sizeof(*hdrp));
}
}
/*
* copy_part_fds -- (internal) copy poolset part file descriptors between
* two poolsets
*/
static void
copy_part_fds(struct pool_set *set_dst, struct pool_set *set_src)
{
ASSERTeq(set_src->nreplicas, set_dst->nreplicas);
for (unsigned r = 0; r < set_dst->nreplicas; ++r) {
ASSERTeq(REP(set_src, r)->nparts, REP(set_dst, r)->nparts);
for (unsigned p = 0; p < REP(set_dst, r)->nparts; ++p) {
PART(REP(set_dst, r), p)->fd =
PART(REP(set_src, r), p)->fd;
}
}
}
/*
* remove_hdrs_replica -- (internal) remove headers from the replica
*/
static int
remove_hdrs_replica(struct pool_set *set_in, struct pool_set *set_out,
unsigned repn)
{
LOG(3, "set %p, repn %u", set_in, repn);
int ret = 0;
/* open all part files of the input replica */
if (replica_open_replica_part_files(set_in, repn)) {
LOG(1, "opening replica %u, part files failed", repn);
ret = -1;
goto out;
}
/* share part file descriptors between poolset structures */
copy_part_fds(set_out, set_in);
/* map the whole input replica */
if (util_replica_open(set_in, repn, MAP_SHARED)) {
LOG(1, "opening input replica failed: replica %u", repn);
ret = -1;
goto out_close;
}
/* map the whole output replica */
if (util_replica_open(set_out, repn, MAP_SHARED)) {
LOG(1, "opening output replica failed: replica %u", repn);
ret = -1;
goto out_unmap_in;
}
/* move data between the two mappings of the replica */
if (REP(set_in, repn)->nparts > 1)
copy_replica_data_fw(set_out, set_in, repn);
/* make changes to the first part's header */
update_replica_header(set_out, repn);
util_replica_close(set_out, repn);
out_unmap_in:
util_replica_close(set_in, repn);
out_close:
util_replica_fdclose(REP(set_in, repn));
out:
return ret;
}
/*
* add_hdrs_replica -- (internal) add lacking headers to the replica
*
* when the operation fails and returns -1, the replica remains untouched
*/
static int
add_hdrs_replica(struct pool_set *set_in, struct pool_set *set_out,
unsigned repn)
{
LOG(3, "set %p, repn %u", set_in, repn);
int ret = 0;
/* open all part files of the input replica */
if (replica_open_replica_part_files(set_in, repn)) {
LOG(1, "opening replica %u, part files failed", repn);
ret = -1;
goto out;
}
/* share part file descriptors between poolset structures */
copy_part_fds(set_out, set_in);
/* map the whole input replica */
if (util_replica_open(set_in, repn, MAP_SHARED)) {
LOG(1, "opening input replica failed: replica %u", repn);
ret = -1;
goto out_close;
}
/* map the whole output replica */
if (util_replica_open(set_out, repn, MAP_SHARED)) {
LOG(1, "opening output replica failed: replica %u", repn);
ret = -1;
goto out_unmap_in;
}
/* generate new uuids for lacking headers */
if (fill_replica_struct_uuids(set_out, repn)) {
LOG(1, "generating lacking uuids for parts failed: replica %u",
repn);
ret = -1;
goto out_unmap_out;
}
/* copy data between the two mappings of the replica */
if (REP(set_in, repn)->nparts > 1)
copy_replica_data_bw(set_out, set_in, repn);
/* create the missing headers */
if (create_missing_headers(set_out, repn)) {
LOG(1, "creating lacking headers failed: replica %u", repn);
/*
* copy the data back, so we could fall back to the original
* state
*/
if (REP(set_in, repn)->nparts > 1)
copy_replica_data_fw(set_in, set_out, repn);
ret = -1;
goto out_unmap_out;
}
/* make changes to the first part's header */
update_replica_header(set_out, repn);
/* store new uuids in all headers and update linkage in the replica */
update_uuids(set_out, repn);
out_unmap_out:
util_replica_close(set_out, repn);
out_unmap_in:
util_replica_close(set_in, repn);
out_close:
util_replica_fdclose(REP(set_in, repn));
out:
return ret;
}
/*
* remove_hdrs -- (internal) transform a poolset without the SINGLEHDR option
* (with headers) into a poolset with the SINGLEHDR option
* (without headers)
*/
static int
remove_hdrs(struct pool_set *set_in, struct pool_set *set_out,
struct poolset_health_status *set_in_hs, unsigned flags)
{
LOG(3, "set_in %p, set_out %p, set_in_hs %p, flags %u",
set_in, set_out, set_in_hs, flags);
for (unsigned r = 0; r < set_in->nreplicas; ++r) {
if (remove_hdrs_replica(set_in, set_out, r)) {
LOG(1, "removing headers from replica %u failed", r);
/* mark all previous replicas as damaged */
while (--r < set_in->nreplicas)
REP_HEALTH(set_in_hs, r)->flags |= IS_BROKEN;
return -1;
}
}
return 0;
}
/*
* add_hdrs -- (internal) transform a poolset with the SINGLEHDR option (without
* headers) into a poolset without the SINGLEHDR option (with
* headers)
*/
static int
add_hdrs(struct pool_set *set_in, struct pool_set *set_out,
struct poolset_health_status *set_in_hs,
unsigned flags)
{
LOG(3, "set_in %p, set_out %p, set_in_hs %p, flags %u",
set_in, set_out, set_in_hs, flags);
for (unsigned r = 0; r < set_in->nreplicas; ++r) {
if (add_hdrs_replica(set_in, set_out, r)) {
LOG(1, "adding headers to replica %u failed", r);
/* mark all previous replicas as damaged */
while (--r < set_in->nreplicas)
REP_HEALTH(set_in_hs, r)->flags |= IS_BROKEN;
return -1;
}
}
return 0;
}
/*
* transform_replica -- transforming one poolset into another
*/
int
replica_transform(struct pool_set *set_in, struct pool_set *set_out,
unsigned flags)
{
LOG(3, "set_in %p, set_out %p", set_in, set_out);
int ret = 0;
/* validate user arguments */
if (validate_args(set_in, set_out))
return -1;
/* check if the source poolset is healthy */
struct poolset_health_status *set_in_hs = NULL;
if (replica_check_poolset_health(set_in, &set_in_hs,
0 /* called from transform */, flags)) {
ERR("source poolset health check failed");
return -1;
}
if (!replica_is_poolset_healthy(set_in_hs)) {
ERR("source poolset is broken");
ret = -1;
errno = EINVAL;
goto free_hs_in;
}
/* copy value of the ignore_sds flag from the input poolset */
set_out->ignore_sds = set_in->ignore_sds;
struct poolset_health_status *set_out_hs = NULL;
if (replica_create_poolset_health_status(set_out, &set_out_hs)) {
ERR("creating poolset health status failed");
ret = -1;
goto free_hs_in;
}
/* check if the poolsets are transformable */
struct poolset_compare_status *set_in_cs = NULL;
struct poolset_compare_status *set_out_cs = NULL;
if (compare_poolsets(set_in, set_out, &set_in_cs, &set_out_cs)) {
ERR("comparing poolsets failed");
ret = -1;
goto free_hs_out;
}
enum transform_op operation = identify_transform_operation(set_in_cs,
set_out_cs, set_in_hs, set_out_hs);
if (operation == NOT_TRANSFORMABLE) {
LOG(1, "poolsets are not transformable");
ret = -1;
errno = EINVAL;
goto free_cs;
}
if (operation == RM_HDRS) {
if (!is_dry_run(flags) &&
remove_hdrs(set_in, set_out, set_in_hs,
flags)) {
ERR("removing headers failed; falling back to the "
"input poolset");
if (replica_sync(set_in, set_in_hs,
flags | IS_TRANSFORMED)) {
LOG(1, "falling back to the input poolset "
"failed");
} else {
LOG(1, "falling back to the input poolset "
"succeeded");
}
ret = -1;
}
goto free_cs;
}
if (operation == ADD_HDRS) {
if (!is_dry_run(flags) &&
add_hdrs(set_in, set_out, set_in_hs, flags)) {
ERR("adding headers failed; falling back to the "
"input poolset");
if (replica_sync(set_in, set_in_hs,
flags | IS_TRANSFORMED)) {
LOG(1, "falling back to the input poolset "
"failed");
} else {
LOG(1, "falling back to the input poolset "
"succeeded");
}
ret = -1;
}
goto free_cs;
}
if (operation == ADD_REPLICAS) {
/*
* check if any of the parts that are to be added already exists
*/
if (do_added_parts_exist(set_out, set_out_hs)) {
ERR("some parts being added already exist");
ret = -1;
errno = EINVAL;
goto free_cs;
}
}
/* signal that sync is called by transform */
if (replica_sync(set_out, set_out_hs, flags | IS_TRANSFORMED)) {
ret = -1;
goto free_cs;
}
if (operation == RM_REPLICAS) {
if (!is_dry_run(flags) && delete_replicas(set_in, set_in_cs))
ret = -1;
}
free_cs:
Free(set_in_cs);
Free(set_out_cs);
free_hs_out:
replica_free_poolset_health_status(set_out_hs);
free_hs_in:
replica_free_poolset_health_status(set_in_hs);
return ret;
}
| 27,700 | 26.211198 | 81 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_pool_hdr.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* check_pool_hdr.c -- pool header check
*/
#include <stdio.h>
#include <inttypes.h>
#include <sys/mman.h>
#include <endian.h>
#include "out.h"
#include "util_pmem.h"
#include "libpmempool.h"
#include "libpmem.h"
#include "pmempool.h"
#include "pool.h"
#include "set.h"
#include "check_util.h"
#define NO_COMMON_POOLSET_UUID "%sno common pool_hdr.poolset_uuid"
#define INVALID_UUID "%sinvalid pool_hdr.uuid"
#define INVALID_CHECKSUM "%sinvalid pool_hdr.checksum"
enum question {
Q_DEFAULT_SIGNATURE,
Q_DEFAULT_MAJOR,
Q_DEFAULT_COMPAT_FEATURES,
Q_DEFAULT_INCOMPAT_FEATURES,
Q_DEFAULT_RO_COMPAT_FEATURES,
Q_ZERO_UNUSED_AREA,
Q_ARCH_FLAGS,
Q_CRTIME,
Q_CHECKSUM,
Q_POOLSET_UUID_SET,
Q_POOLSET_UUID_FROM_BTT_INFO,
Q_POOLSET_UUID_REGENERATE,
Q_UUID_SET,
Q_UUID_REGENERATE,
Q_NEXT_PART_UUID_SET,
Q_PREV_PART_UUID_SET,
Q_NEXT_REPL_UUID_SET,
Q_PREV_REPL_UUID_SET
};
/*
* pool_hdr_possible_type -- (internal) return possible type of pool
*/
static enum pool_type
pool_hdr_possible_type(PMEMpoolcheck *ppc)
{
if (pool_blk_get_first_valid_arena(ppc->pool, &ppc->pool->bttc))
return POOL_TYPE_BLK;
return POOL_TYPE_UNKNOWN;
}
/*
* pool_hdr_valid -- (internal) return true if pool header is valid
*/
static int
pool_hdr_valid(struct pool_hdr *hdrp)
{
return !util_is_zeroed((void *)hdrp, sizeof(*hdrp)) &&
util_checksum(hdrp, sizeof(*hdrp), &hdrp->checksum, 0,
POOL_HDR_CSUM_END_OFF(hdrp));
}
/*
* pool_supported -- (internal) check if pool type is supported
*/
static int
pool_supported(enum pool_type type)
{
switch (type) {
case POOL_TYPE_LOG:
return 1;
case POOL_TYPE_BLK:
return 1;
case POOL_TYPE_OBJ:
default:
return 0;
}
}
/*
* pool_hdr_preliminary_check -- (internal) check pool header checksum and pool
* parameters
*/
static int
pool_hdr_preliminary_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
CHECK_INFO(ppc, "%schecking pool header", loc->prefix);
if (util_is_zeroed((void *)&loc->hdr, sizeof(loc->hdr))) {
if (CHECK_IS_NOT(ppc, REPAIR)) {
check_end(ppc->data);
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
return CHECK_ERR(ppc, "%sempty pool hdr", loc->prefix);
}
} else if (loc->hdr_valid) {
enum pool_type type = pool_hdr_get_type(&loc->hdr);
if (type == POOL_TYPE_UNKNOWN) {
if (CHECK_IS_NOT(ppc, REPAIR)) {
check_end(ppc->data);
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
return CHECK_ERR(ppc, "%sinvalid signature",
loc->prefix);
}
CHECK_INFO(ppc, "%sinvalid signature", loc->prefix);
} else {
/* valid check sum */
CHECK_INFO(ppc, "%spool header correct",
loc->prefix);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
} else if (CHECK_IS_NOT(ppc, REPAIR)) {
check_end(ppc->data);
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
return CHECK_ERR(ppc, "%sincorrect pool header", loc->prefix);
} else {
CHECK_INFO(ppc, "%sincorrect pool header", loc->prefix);
}
ASSERT(CHECK_IS(ppc, REPAIR));
if (ppc->pool->params.type == POOL_TYPE_UNKNOWN) {
ppc->pool->params.type = pool_hdr_possible_type(ppc);
if (ppc->pool->params.type == POOL_TYPE_UNKNOWN) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
return CHECK_ERR(ppc, "cannot determine pool type");
}
}
if (!pool_supported(ppc->pool->params.type)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
return CHECK_ERR(ppc, "the repair of %s pools is not supported",
pool_get_pool_type_str(ppc->pool->params.type));
}
return 0;
}
/*
* pool_hdr_default_check -- (internal) check some default values in pool header
*/
static int
pool_hdr_default_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
ASSERT(CHECK_IS(ppc, REPAIR));
struct pool_hdr def_hdr;
pool_hdr_default(ppc->pool->params.type, &def_hdr);
if (memcmp(loc->hdr.signature, def_hdr.signature, POOL_HDR_SIG_LEN)) {
CHECK_ASK(ppc, Q_DEFAULT_SIGNATURE,
"%spool_hdr.signature is not valid.|Do you want to set "
"it to %.8s?", loc->prefix, def_hdr.signature);
}
if (loc->hdr.major != def_hdr.major) {
CHECK_ASK(ppc, Q_DEFAULT_MAJOR,
"%spool_hdr.major is not valid.|Do you want to set it "
"to default value 0x%x?", loc->prefix, def_hdr.major);
}
features_t unknown = util_get_unknown_features(
loc->hdr.features, def_hdr.features);
if (unknown.compat) {
CHECK_ASK(ppc, Q_DEFAULT_COMPAT_FEATURES,
"%spool_hdr.features.compat is not valid.|Do you want "
"to set it to default value 0x%x?", loc->prefix,
def_hdr.features.compat);
}
if (unknown.incompat) {
CHECK_ASK(ppc, Q_DEFAULT_INCOMPAT_FEATURES,
"%spool_hdr.features.incompat is not valid.|Do you "
"want to set it to default value 0x%x?", loc->prefix,
def_hdr.features.incompat);
}
if (unknown.ro_compat) {
CHECK_ASK(ppc, Q_DEFAULT_RO_COMPAT_FEATURES,
"%spool_hdr.features.ro_compat is not valid.|Do you "
"want to set it to default value 0x%x?", loc->prefix,
def_hdr.features.ro_compat);
}
if (!util_is_zeroed(loc->hdr.unused, sizeof(loc->hdr.unused))) {
CHECK_ASK(ppc, Q_ZERO_UNUSED_AREA,
"%sunused area is not filled by zeros.|Do you want to "
"fill it up?", loc->prefix);
}
return check_questions_sequence_validate(ppc);
}
/*
* pool_hdr_default_fix -- (internal) fix some default values in pool header
*/
static int
pool_hdr_default_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *context)
{
LOG(3, NULL);
ASSERTne(loc, NULL);
struct pool_hdr def_hdr;
pool_hdr_default(ppc->pool->params.type, &def_hdr);
switch (question) {
case Q_DEFAULT_SIGNATURE:
CHECK_INFO(ppc, "%ssetting pool_hdr.signature to %.8s",
loc->prefix, def_hdr.signature);
memcpy(&loc->hdr.signature, &def_hdr.signature,
POOL_HDR_SIG_LEN);
break;
case Q_DEFAULT_MAJOR:
CHECK_INFO(ppc, "%ssetting pool_hdr.major to 0x%x", loc->prefix,
def_hdr.major);
loc->hdr.major = def_hdr.major;
break;
case Q_DEFAULT_COMPAT_FEATURES:
CHECK_INFO(ppc, "%ssetting pool_hdr.features.compat to 0x%x",
loc->prefix, def_hdr.features.compat);
loc->hdr.features.compat = def_hdr.features.compat;
break;
case Q_DEFAULT_INCOMPAT_FEATURES:
CHECK_INFO(ppc, "%ssetting pool_hdr.features.incompat to 0x%x",
loc->prefix, def_hdr.features.incompat);
loc->hdr.features.incompat = def_hdr.features.incompat;
break;
case Q_DEFAULT_RO_COMPAT_FEATURES:
CHECK_INFO(ppc, "%ssetting pool_hdr.features.ro_compat to 0x%x",
loc->prefix, def_hdr.features.ro_compat);
loc->hdr.features.ro_compat = def_hdr.features.ro_compat;
break;
case Q_ZERO_UNUSED_AREA:
CHECK_INFO(ppc, "%ssetting pool_hdr.unused to zeros",
loc->prefix);
memset(loc->hdr.unused, 0, sizeof(loc->hdr.unused));
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* pool_hdr_quick_check -- (internal) end check if pool header is valid
*/
static int
pool_hdr_quick_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (pool_hdr_valid(loc->hdrp))
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
/*
* pool_hdr_nondefault -- (internal) validate custom value fields
*/
static int
pool_hdr_nondefault(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (loc->hdr.crtime > (uint64_t)ppc->pool->set_file->mtime) {
const char * const error = "%spool_hdr.crtime is not valid";
if (CHECK_IS_NOT(ppc, REPAIR)) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
return CHECK_ERR(ppc, error, loc->prefix);
} else if (CHECK_IS_NOT(ppc, ADVANCED)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_INFO(ppc, "%s" REQUIRE_ADVANCED, loc->prefix);
return CHECK_ERR(ppc, error, loc->prefix);
}
CHECK_ASK(ppc, Q_CRTIME,
"%spool_hdr.crtime is not valid.|Do you want to set it "
"to file's modtime [%s]?", loc->prefix,
check_get_time_str(ppc->pool->set_file->mtime));
}
if (loc->valid_part_hdrp &&
memcmp(&loc->valid_part_hdrp->arch_flags,
&loc->hdr.arch_flags,
sizeof(struct arch_flags)) != 0) {
const char * const error = "%spool_hdr.arch_flags is not valid";
if (CHECK_IS_NOT(ppc, REPAIR)) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
return CHECK_ERR(ppc, error, loc->prefix);
}
CHECK_ASK(ppc, Q_ARCH_FLAGS,
"%spool_hdr.arch_flags is not valid.|Do you want to "
"copy it from a valid part?", loc->prefix);
}
return check_questions_sequence_validate(ppc);
}
/*
* pool_hdr_nondefault_fix -- (internal) fix custom value fields
*/
static int
pool_hdr_nondefault_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *context)
{
LOG(3, NULL);
ASSERTne(loc, NULL);
uint64_t *flags = NULL;
switch (question) {
case Q_CRTIME:
CHECK_INFO(ppc, "%ssetting pool_hdr.crtime to file's modtime: "
"%s", loc->prefix,
check_get_time_str(ppc->pool->set_file->mtime));
util_convert2h_hdr_nocheck(&loc->hdr);
loc->hdr.crtime = (uint64_t)ppc->pool->set_file->mtime;
util_convert2le_hdr(&loc->hdr);
break;
case Q_ARCH_FLAGS:
flags = (uint64_t *)&loc->valid_part_hdrp->arch_flags;
CHECK_INFO(ppc, "%ssetting pool_hdr.arch_flags to 0x%08" PRIx64
"%08" PRIx64, loc->prefix, flags[0], flags[1]);
util_convert2h_hdr_nocheck(&loc->hdr);
memcpy(&loc->hdr.arch_flags, &loc->valid_part_hdrp->arch_flags,
sizeof(struct arch_flags));
util_convert2le_hdr(&loc->hdr);
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* pool_hdr_poolset_uuid -- (internal) check poolset_uuid field
*/
static int
pool_hdr_poolset_uuid_find(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
/*
* If the pool header is valid and there is not other parts or replicas
* in the poolset its poolset_uuid is also valid.
*/
if (loc->hdr_valid && loc->single_repl && loc->single_part)
return 0;
if (loc->replica != 0 || loc->part != 0)
goto after_lookup;
/* for blk pool we can take the UUID from BTT Info header */
if (ppc->pool->params.type == POOL_TYPE_BLK && ppc->pool->bttc.valid) {
loc->valid_puuid = &ppc->pool->bttc.btt_info.parent_uuid;
if (uuidcmp(loc->hdr.poolset_uuid, *loc->valid_puuid) != 0) {
CHECK_ASK(ppc, Q_POOLSET_UUID_FROM_BTT_INFO,
"%sinvalid pool_hdr.poolset_uuid.|Do you want "
"to set it to %s from BTT Info?", loc->prefix,
check_get_uuid_str(*loc->valid_puuid));
goto exit_question;
}
}
if (loc->single_part && loc->single_repl) {
/*
* If the pool is not blk pool or BTT Info header is invalid
* there is no other way to validate poolset uuid.
*/
return 0;
}
/*
* if all valid poolset part files have the same poolset uuid it is
* the valid poolset uuid
* if all part files have the same poolset uuid it is valid poolset uuid
*/
struct pool_set *poolset = ppc->pool->set_file->poolset;
unsigned nreplicas = poolset->nreplicas;
uuid_t *common_puuid = loc->valid_puuid;
for (unsigned r = 0; r < nreplicas; r++) {
struct pool_replica *rep = REP(poolset, r);
for (unsigned p = 0; p < rep->nhdrs; p++) {
struct pool_hdr *hdr = HDR(rep, p);
/*
* find poolset uuid if it is the same for all part
* files
*/
if (common_puuid == NULL) {
if (r == 0 && p == 0) {
common_puuid = &hdr->poolset_uuid;
}
} else if (uuidcmp(*common_puuid, hdr->poolset_uuid)
!= 0) {
common_puuid = NULL;
}
if (!pool_hdr_valid(hdr))
continue;
/*
* find poolset uuid if it is the same for all valid
* part files
*/
if (loc->valid_puuid == NULL) {
loc->valid_puuid = &hdr->poolset_uuid;
} else if (uuidcmp(*loc->valid_puuid, hdr->poolset_uuid)
!= 0) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
return CHECK_ERR(ppc, "the poolset contains "
"part files from various poolsets");
}
}
}
if (!loc->valid_puuid && common_puuid)
loc->valid_puuid = common_puuid;
if (loc->valid_puuid)
goto after_lookup;
if (CHECK_IS_NOT(ppc, REPAIR)) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
return CHECK_ERR(ppc, NO_COMMON_POOLSET_UUID, loc->prefix);
} else if (CHECK_IS_NOT(ppc, ADVANCED)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_INFO(ppc, "%s" REQUIRE_ADVANCED, loc->prefix);
return CHECK_ERR(ppc, NO_COMMON_POOLSET_UUID, loc->prefix);
} else {
CHECK_ASK(ppc, Q_POOLSET_UUID_REGENERATE, NO_COMMON_POOLSET_UUID
".|Do you want to regenerate pool_hdr.poolset_uuid?",
loc->prefix);
goto exit_question;
}
after_lookup:
if (loc->valid_puuid) {
if (uuidcmp(*loc->valid_puuid, loc->hdr.poolset_uuid) != 0) {
if (CHECK_IS_NOT(ppc, REPAIR)) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
return CHECK_ERR(ppc, "%sinvalid "
"pool_hdr.poolset_uuid", loc->prefix);
}
CHECK_ASK(ppc, Q_POOLSET_UUID_SET, "%sinvalid "
"pool_hdr.poolset_uuid.|Do you want to set "
"it to %s from a valid part file?", loc->prefix,
check_get_uuid_str(*loc->valid_puuid));
}
}
exit_question:
return check_questions_sequence_validate(ppc);
}
/*
* pool_hdr_poolset_uuid_fix -- (internal) fix poolset_uuid field
*/
static int
pool_hdr_poolset_uuid_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *context)
{
LOG(3, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_POOLSET_UUID_SET:
case Q_POOLSET_UUID_FROM_BTT_INFO:
CHECK_INFO(ppc, "%ssetting pool_hdr.poolset_uuid to %s",
loc->prefix, check_get_uuid_str(*loc->valid_puuid));
memcpy(loc->hdr.poolset_uuid, loc->valid_puuid,
POOL_HDR_UUID_LEN);
if (question == Q_POOLSET_UUID_SET)
ppc->pool->uuid_op = UUID_NOT_FROM_BTT;
else
ppc->pool->uuid_op = UUID_FROM_BTT;
break;
case Q_POOLSET_UUID_REGENERATE:
if (util_uuid_generate(loc->hdr.poolset_uuid) != 0) {
ppc->result = CHECK_RESULT_INTERNAL_ERROR;
return CHECK_ERR(ppc, "%suuid generation failed",
loc->prefix);
}
CHECK_INFO(ppc, "%ssetting pool_hdr.pooset_uuid to %s",
loc->prefix,
check_get_uuid_str(loc->hdr.poolset_uuid));
ppc->pool->uuid_op = UUID_NOT_FROM_BTT;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
#define COMPARE_TO_FIRST_PART_ONLY 2
/*
* pool_hdr_uuid_find -- (internal) check UUID value
*/
static int
pool_hdr_uuid_find(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
/*
* If the pool header is valid and there is not other parts or replicas
* in the poolset its uuid is also valid.
*/
if (loc->hdr_valid && loc->single_repl && loc->single_part)
return 0;
int hdrs_valid[] = {
loc->next_part_hdr_valid, loc->prev_part_hdr_valid,
loc->next_repl_hdr_valid, loc->prev_repl_hdr_valid};
uuid_t *uuids[] = {
&loc->next_part_hdrp->prev_part_uuid,
&loc->prev_part_hdrp->next_part_uuid,
&loc->next_repl_hdrp->prev_repl_uuid,
&loc->prev_repl_hdrp->next_repl_uuid
};
/*
* if all valid poolset part files have the same uuid links to this part
* file it is valid uuid
* if all links have the same uuid and it is single file pool it is also
* the valid uuid
*/
loc->valid_uuid = NULL;
if (loc->hdr_valid)
loc->valid_uuid = &loc->hdr.uuid;
uuid_t *common_uuid = uuids[0];
COMPILE_ERROR_ON(ARRAY_SIZE(uuids) != ARRAY_SIZE(hdrs_valid));
COMPILE_ERROR_ON(COMPARE_TO_FIRST_PART_ONLY >= ARRAY_SIZE(uuids));
for (unsigned i = 0; i < ARRAY_SIZE(uuids); ++i) {
if (i > 0 && common_uuid != NULL) {
if (uuidcmp(*common_uuid, *uuids[i]) != 0) {
common_uuid = NULL;
}
}
if (i >= COMPARE_TO_FIRST_PART_ONLY && loc->part != 0)
continue;
if (!hdrs_valid[i])
continue;
if (!loc->valid_uuid) {
loc->valid_uuid = uuids[i];
} else if (uuidcmp(*loc->valid_uuid, *uuids[i]) != 0) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
return CHECK_ERR(ppc, "%sambiguous pool_hdr.uuid",
loc->prefix);
}
}
if (!loc->valid_uuid && common_uuid)
loc->valid_uuid = common_uuid;
if (loc->valid_uuid != NULL) {
if (uuidcmp(*loc->valid_uuid, loc->hdr.uuid) != 0) {
CHECK_ASK(ppc, Q_UUID_SET, INVALID_UUID ".|Do you want "
"to set it to %s from a valid part file?",
loc->prefix,
check_get_uuid_str(*loc->valid_uuid));
}
} else if (CHECK_IS(ppc, ADVANCED)) {
CHECK_ASK(ppc, Q_UUID_REGENERATE, INVALID_UUID ".|Do you want "
"to regenerate it?", loc->prefix);
} else if (CHECK_IS(ppc, REPAIR)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_INFO(ppc, "%s" REQUIRE_ADVANCED, loc->prefix);
return CHECK_ERR(ppc, INVALID_UUID, loc->prefix);
} else {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
return CHECK_ERR(ppc, INVALID_UUID, loc->prefix);
}
return check_questions_sequence_validate(ppc);
}
/*
* pool_hdr_uuid_fix -- (internal) fix UUID value
*/
static int
pool_hdr_uuid_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *context)
{
LOG(3, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_UUID_SET:
CHECK_INFO(ppc, "%ssetting pool_hdr.uuid to %s", loc->prefix,
check_get_uuid_str(*loc->valid_uuid));
memcpy(loc->hdr.uuid, loc->valid_uuid, POOL_HDR_UUID_LEN);
break;
case Q_UUID_REGENERATE:
if (util_uuid_generate(loc->hdr.uuid) != 0) {
ppc->result = CHECK_RESULT_INTERNAL_ERROR;
return CHECK_ERR(ppc, "%suuid generation failed",
loc->prefix);
}
CHECK_INFO(ppc, "%ssetting pool_hdr.uuid to %s", loc->prefix,
check_get_uuid_str(loc->hdr.uuid));
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* pool_hdr_uuid_links -- (internal) check UUID links values
*/
static int
pool_hdr_uuid_links(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
/*
* If the pool header is valid and there is not other parts or replicas
* in the poolset its uuid links are also valid.
*/
if (loc->hdr_valid && loc->single_repl && loc->single_part)
return 0;
uuid_t *links[] = {
&loc->hdr.next_part_uuid, &loc->hdr.prev_part_uuid,
&loc->hdr.next_repl_uuid, &loc->hdr.prev_repl_uuid};
uuid_t *uuids[] = {
&loc->next_part_hdrp->uuid, &loc->prev_part_hdrp->uuid,
&loc->next_repl_hdrp->uuid, &loc->prev_repl_hdrp->uuid
};
uint32_t questions[] = {
Q_NEXT_PART_UUID_SET, Q_PREV_PART_UUID_SET,
Q_NEXT_REPL_UUID_SET, Q_PREV_REPL_UUID_SET
};
const char *fields[] = {
"pool_hdr.next_part_uuid", "pool_hdr.prev_part_uuid",
"pool_hdr.next_repl_uuid", "pool_hdr.prev_repl_uuid"
};
COMPILE_ERROR_ON(ARRAY_SIZE(links) != ARRAY_SIZE(uuids));
COMPILE_ERROR_ON(ARRAY_SIZE(links) != ARRAY_SIZE(questions));
COMPILE_ERROR_ON(ARRAY_SIZE(links) != ARRAY_SIZE(fields));
for (uint64_t i = 0; i < ARRAY_SIZE(links); ++i) {
if (uuidcmp(*links[i], *uuids[i]) == 0)
continue;
if (CHECK_IS(ppc, REPAIR)) {
CHECK_ASK(ppc, questions[i],
"%sinvalid %s.|Do you want to set it to a "
"valid value?", loc->prefix, fields[i]);
} else {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
return CHECK_ERR(ppc, "%sinvalid %s", loc->prefix,
fields[i]);
}
}
return check_questions_sequence_validate(ppc);
}
/*
* pool_hdr_uuid_links_fix -- (internal) fix UUID links values
*/
static int
pool_hdr_uuid_links_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *context)
{
LOG(3, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_NEXT_PART_UUID_SET:
CHECK_INFO(ppc, "%ssetting pool_hdr.next_part_uuid to %s",
loc->prefix,
check_get_uuid_str(loc->next_part_hdrp->uuid));
memcpy(loc->hdr.next_part_uuid, loc->next_part_hdrp->uuid,
POOL_HDR_UUID_LEN);
break;
case Q_PREV_PART_UUID_SET:
CHECK_INFO(ppc, "%ssetting pool_hdr.prev_part_uuid to %s",
loc->prefix,
check_get_uuid_str(loc->prev_part_hdrp->uuid));
memcpy(loc->hdr.prev_part_uuid, loc->prev_part_hdrp->uuid,
POOL_HDR_UUID_LEN);
break;
case Q_NEXT_REPL_UUID_SET:
CHECK_INFO(ppc, "%ssetting pool_hdr.next_repl_uuid to %s",
loc->prefix,
check_get_uuid_str(loc->next_repl_hdrp->uuid));
memcpy(loc->hdr.next_repl_uuid, loc->next_repl_hdrp->uuid,
POOL_HDR_UUID_LEN);
break;
case Q_PREV_REPL_UUID_SET:
CHECK_INFO(ppc, "%ssetting pool_hdr.prev_repl_uuid to %s",
loc->prefix,
check_get_uuid_str(loc->prev_repl_hdrp->uuid));
memcpy(loc->hdr.prev_repl_uuid, loc->prev_repl_hdrp->uuid,
POOL_HDR_UUID_LEN);
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* pool_hdr_checksum -- (internal) validate checksum
*/
static int
pool_hdr_checksum(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (loc->hdr_valid)
return 0;
if (CHECK_IS_NOT(ppc, REPAIR)) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
return CHECK_ERR(ppc, INVALID_CHECKSUM, loc->prefix);
} else if (CHECK_IS_NOT(ppc, ADVANCED)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_INFO(ppc, "%s" REQUIRE_ADVANCED, loc->prefix);
return CHECK_ERR(ppc, INVALID_CHECKSUM, loc->prefix);
}
CHECK_ASK(ppc, Q_CHECKSUM, INVALID_CHECKSUM ".|Do you want to "
"regenerate checksum?", loc->prefix);
return check_questions_sequence_validate(ppc);
}
/*
* pool_hdr_checksum_fix -- (internal) fix checksum
*/
static int
pool_hdr_checksum_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *context)
{
LOG(3, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_CHECKSUM:
util_checksum(&loc->hdr, sizeof(loc->hdr), &loc->hdr.checksum,
1, POOL_HDR_CSUM_END_OFF(&loc->hdr));
CHECK_INFO(ppc, "%ssetting pool_hdr.checksum to 0x%jx",
loc->prefix, le64toh(loc->hdr.checksum));
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
};
static const struct step steps_initial[] = {
{
.check = pool_hdr_preliminary_check,
},
{
.check = pool_hdr_default_check,
},
{
.fix = pool_hdr_default_fix,
.check = pool_hdr_quick_check,
},
{
.check = pool_hdr_nondefault,
},
{
.fix = pool_hdr_nondefault_fix,
},
{
.check = NULL,
.fix = NULL,
},
};
static const struct step steps_uuids[] = {
{
.check = pool_hdr_poolset_uuid_find,
},
{
.fix = pool_hdr_poolset_uuid_fix,
},
{
.check = pool_hdr_uuid_find,
},
{
.fix = pool_hdr_uuid_fix,
},
{
.check = pool_hdr_uuid_links,
},
{
.fix = pool_hdr_uuid_links_fix,
},
{
.check = pool_hdr_checksum,
},
{
.fix = pool_hdr_checksum_fix,
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static int
step_exe(PMEMpoolcheck *ppc, const struct step *steps, location *loc,
struct pool_replica *rep, unsigned nreplicas)
{
const struct step *step = &steps[loc->step++];
if (!step->fix)
return step->check(ppc, loc);
if (!check_has_answer(ppc->data))
return 0;
if (check_answer_loop(ppc, loc, NULL, 1, step->fix))
return -1;
util_convert2le_hdr(&loc->hdr);
memcpy(loc->hdrp, &loc->hdr, sizeof(loc->hdr));
loc->hdr_valid = pool_hdr_valid(loc->hdrp);
util_persist_auto(rep->part[0].is_dev_dax, loc->hdrp,
sizeof(*loc->hdrp));
util_convert2h_hdr_nocheck(&loc->hdr);
loc->pool_hdr_modified = 1;
/* execute check after fix if available */
if (step->check)
return step->check(ppc, loc);
return 0;
}
/*
* init_location_data -- (internal) prepare location information
*/
static void
init_location_data(PMEMpoolcheck *ppc, location *loc)
{
/* prepare prefix for messages */
unsigned nfiles = pool_set_files_count(ppc->pool->set_file);
if (ppc->result != CHECK_RESULT_PROCESS_ANSWERS) {
if (nfiles > 1) {
int ret = util_snprintf(loc->prefix, PREFIX_MAX_SIZE,
"replica %u part %u: ",
loc->replica, loc->part);
if (ret < 0)
FATAL("!snprintf");
} else
loc->prefix[0] = '\0';
loc->step = 0;
}
/* get neighboring parts and replicas and briefly validate them */
const struct pool_set *poolset = ppc->pool->set_file->poolset;
loc->single_repl = poolset->nreplicas == 1;
loc->single_part = poolset->replica[loc->replica]->nparts == 1;
struct pool_replica *rep = REP(poolset, loc->replica);
struct pool_replica *next_rep = REPN(poolset, loc->replica);
struct pool_replica *prev_rep = REPP(poolset, loc->replica);
loc->hdrp = HDR(rep, loc->part);
memcpy(&loc->hdr, loc->hdrp, sizeof(loc->hdr));
util_convert2h_hdr_nocheck(&loc->hdr);
loc->hdr_valid = pool_hdr_valid(loc->hdrp);
loc->next_part_hdrp = HDRN(rep, loc->part);
loc->prev_part_hdrp = HDRP(rep, loc->part);
loc->next_repl_hdrp = HDR(next_rep, 0);
loc->prev_repl_hdrp = HDR(prev_rep, 0);
loc->next_part_hdr_valid = pool_hdr_valid(loc->next_part_hdrp);
loc->prev_part_hdr_valid = pool_hdr_valid(loc->prev_part_hdrp);
loc->next_repl_hdr_valid = pool_hdr_valid(loc->next_repl_hdrp);
loc->prev_repl_hdr_valid = pool_hdr_valid(loc->prev_repl_hdrp);
if (!loc->valid_part_done || loc->valid_part_replica != loc->replica) {
loc->valid_part_hdrp = NULL;
for (unsigned p = 0; p < rep->nhdrs; ++p) {
if (pool_hdr_valid(HDR(rep, p))) {
loc->valid_part_hdrp = HDR(rep, p);
break;
}
}
loc->valid_part_done = true;
}
}
/*
* check_pool_hdr -- entry point for pool header checks
*/
void
check_pool_hdr(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
unsigned nreplicas = ppc->pool->set_file->poolset->nreplicas;
struct pool_set *poolset = ppc->pool->set_file->poolset;
for (; loc->replica < nreplicas; loc->replica++) {
struct pool_replica *rep = poolset->replica[loc->replica];
for (; loc->part < rep->nhdrs; loc->part++) {
init_location_data(ppc, loc);
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps_initial)) {
ASSERT(loc->step < ARRAY_SIZE(steps_initial));
if (step_exe(ppc, steps_initial, loc, rep,
nreplicas))
return;
}
}
loc->part = 0;
}
memcpy(&ppc->pool->hdr.pool, poolset->replica[0]->part[0].hdr,
sizeof(struct pool_hdr));
if (loc->pool_hdr_modified) {
struct pool_hdr hdr;
memcpy(&hdr, &ppc->pool->hdr.pool, sizeof(struct pool_hdr));
util_convert2h_hdr_nocheck(&hdr);
pool_params_from_header(&ppc->pool->params, &hdr);
}
}
/*
* check_pool_hdr_uuids -- entry point for pool header links checks
*/
void
check_pool_hdr_uuids(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
unsigned nreplicas = ppc->pool->set_file->poolset->nreplicas;
struct pool_set *poolset = ppc->pool->set_file->poolset;
for (; loc->replica < nreplicas; loc->replica++) {
struct pool_replica *rep = poolset->replica[loc->replica];
for (; loc->part < rep->nparts; loc->part++) {
init_location_data(ppc, loc);
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps_uuids)) {
ASSERT(loc->step < ARRAY_SIZE(steps_uuids));
if (step_exe(ppc, steps_uuids, loc, rep,
nreplicas))
return;
}
}
loc->part = 0;
}
memcpy(&ppc->pool->hdr.pool, poolset->replica[0]->part[0].hdr,
sizeof(struct pool_hdr));
if (loc->pool_hdr_modified) {
struct pool_hdr hdr;
memcpy(&hdr, &ppc->pool->hdr.pool, sizeof(struct pool_hdr));
util_convert2h_hdr_nocheck(&hdr);
pool_params_from_header(&ppc->pool->params, &hdr);
}
}
| 26,865 | 25.573689 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_util.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_util.h -- internal definitions check util
*/
#ifndef CHECK_UTIL_H
#define CHECK_UTIL_H
#include <time.h>
#include <limits.h>
#include <sys/param.h>
#ifdef __cplusplus
extern "C" {
#endif
#define CHECK_STEP_COMPLETE UINT_MAX
#define CHECK_INVALID_QUESTION UINT_MAX
#define REQUIRE_ADVANCED "the following error can be fixed using " \
"PMEMPOOL_CHECK_ADVANCED flag"
#ifndef min
#define min(a, b) ((a) < (b) ? (a) : (b))
#endif
/* check control context */
struct check_data;
struct arena;
/* queue of check statuses */
struct check_status;
/* container storing state of all check steps */
#define PREFIX_MAX_SIZE 30
typedef struct {
unsigned init_done;
unsigned step;
unsigned replica;
unsigned part;
int single_repl;
int single_part;
struct pool_set *set;
int is_dev_dax;
struct pool_hdr *hdrp;
/* copy of the pool header in host byte order */
struct pool_hdr hdr;
int hdr_valid;
/*
* If pool header has been modified this field indicates that
* the pool parameters structure requires refresh.
*/
int pool_hdr_modified;
unsigned healthy_replicas;
struct pool_hdr *next_part_hdrp;
struct pool_hdr *prev_part_hdrp;
struct pool_hdr *next_repl_hdrp;
struct pool_hdr *prev_repl_hdrp;
int next_part_hdr_valid;
int prev_part_hdr_valid;
int next_repl_hdr_valid;
int prev_repl_hdr_valid;
/* valid poolset uuid */
uuid_t *valid_puuid;
/* valid part uuid */
uuid_t *valid_uuid;
/* valid part pool header */
struct pool_hdr *valid_part_hdrp;
int valid_part_done;
unsigned valid_part_replica;
char prefix[PREFIX_MAX_SIZE];
struct arena *arenap;
uint64_t offset;
uint32_t narena;
uint8_t *bitmap;
uint8_t *dup_bitmap;
uint8_t *fbitmap;
struct list *list_inval;
struct list *list_flog_inval;
struct list *list_unmap;
struct {
int btti_header;
int btti_backup;
} valid;
struct {
struct btt_info btti;
uint64_t btti_offset;
} pool_valid;
} location;
/* check steps */
void check_bad_blocks(PMEMpoolcheck *ppc);
void check_backup(PMEMpoolcheck *ppc);
void check_pool_hdr(PMEMpoolcheck *ppc);
void check_pool_hdr_uuids(PMEMpoolcheck *ppc);
void check_sds(PMEMpoolcheck *ppc);
void check_log(PMEMpoolcheck *ppc);
void check_blk(PMEMpoolcheck *ppc);
void check_btt_info(PMEMpoolcheck *ppc);
void check_btt_map_flog(PMEMpoolcheck *ppc);
void check_write(PMEMpoolcheck *ppc);
struct check_data *check_data_alloc(void);
void check_data_free(struct check_data *data);
uint32_t check_step_get(struct check_data *data);
void check_step_inc(struct check_data *data);
location *check_get_step_data(struct check_data *data);
void check_end(struct check_data *data);
int check_is_end_util(struct check_data *data);
int check_status_create(PMEMpoolcheck *ppc, enum pmempool_check_msg_type type,
uint32_t arg, const char *fmt, ...) FORMAT_PRINTF(4, 5);
void check_status_release(PMEMpoolcheck *ppc, struct check_status *status);
void check_clear_status_cache(struct check_data *data);
struct check_status *check_pop_question(struct check_data *data);
struct check_status *check_pop_error(struct check_data *data);
struct check_status *check_pop_info(struct check_data *data);
bool check_has_error(struct check_data *data);
bool check_has_answer(struct check_data *data);
int check_push_answer(PMEMpoolcheck *ppc);
struct pmempool_check_status *check_status_get_util(
struct check_status *status);
int check_status_is(struct check_status *status,
enum pmempool_check_msg_type type);
/* create info status */
#define CHECK_INFO(ppc, ...)\
check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_INFO, 0, __VA_ARGS__)
/* create info status and append error message based on errno */
#define CHECK_INFO_ERRNO(ppc, ...)\
check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_INFO,\
(uint32_t)errno, __VA_ARGS__)
/* create error status */
#define CHECK_ERR(ppc, ...)\
check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_ERROR, 0, __VA_ARGS__)
/* create question status */
#define CHECK_ASK(ppc, question, ...)\
check_status_create(ppc, PMEMPOOL_CHECK_MSG_TYPE_QUESTION, question,\
__VA_ARGS__)
#define CHECK_NOT_COMPLETE(loc, steps)\
((loc)->step != CHECK_STEP_COMPLETE &&\
((steps)[(loc)->step].check != NULL ||\
(steps)[(loc)->step].fix != NULL))
int check_answer_loop(PMEMpoolcheck *ppc, location *data,
void *ctx, int fail_on_no,
int (*callback)(PMEMpoolcheck *, location *, uint32_t, void *ctx));
int check_questions_sequence_validate(PMEMpoolcheck *ppc);
const char *check_get_time_str(time_t time);
const char *check_get_uuid_str(uuid_t uuid);
const char *check_get_pool_type_str(enum pool_type type);
void check_insert_arena(PMEMpoolcheck *ppc, struct arena *arenap);
#ifdef _WIN32
void cache_to_utf8(struct check_data *data, char *buf, size_t size);
#endif
#define CHECK_IS(ppc, flag)\
util_flag_isset((ppc)->args.flags, PMEMPOOL_CHECK_ ## flag)
#define CHECK_IS_NOT(ppc, flag)\
util_flag_isclr((ppc)->args.flags, PMEMPOOL_CHECK_ ## flag)
#define CHECK_WITHOUT_FIXING(ppc)\
CHECK_IS_NOT(ppc, REPAIR) || CHECK_IS(ppc, DRY_RUN)
#ifdef __cplusplus
}
#endif
#endif
| 5,143 | 25.111675 | 78 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/pmempool.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* pmempool.h -- internal definitions for libpmempool
*/
#ifndef PMEMPOOL_H
#define PMEMPOOL_H
#ifdef __cplusplus
extern "C" {
#endif
#define PMEMPOOL_LOG_PREFIX "libpmempool"
#define PMEMPOOL_LOG_LEVEL_VAR "PMEMPOOL_LOG_LEVEL"
#define PMEMPOOL_LOG_FILE_VAR "PMEMPOOL_LOG_FILE"
enum check_result {
CHECK_RESULT_CONSISTENT,
CHECK_RESULT_NOT_CONSISTENT,
CHECK_RESULT_ASK_QUESTIONS,
CHECK_RESULT_PROCESS_ANSWERS,
CHECK_RESULT_REPAIRED,
CHECK_RESULT_CANNOT_REPAIR,
CHECK_RESULT_ERROR,
CHECK_RESULT_INTERNAL_ERROR
};
/*
* pmempool_check_ctx -- context and arguments for check command
*/
struct pmempool_check_ctx {
struct pmempool_check_args args;
char *path;
char *backup_path;
struct check_data *data;
struct pool_data *pool;
enum check_result result;
unsigned sync_required;
};
#ifdef __cplusplus
}
#endif
#endif
| 927 | 17.938776 | 64 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/libpmempool_main.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* libpmempool_main.c -- entry point for libpmempool.dll
*
* XXX - This is a placeholder. All the library initialization/cleanup
* that is done in library ctors/dtors, as well as TLS initialization
* should be moved here.
*/
#include <stdio.h>
void libpmempool_init(void);
void libpmempool_fini(void);
int APIENTRY
DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved)
{
switch (dwReason) {
case DLL_PROCESS_ATTACH:
libpmempool_init();
break;
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
break;
case DLL_PROCESS_DETACH:
libpmempool_fini();
break;
}
return TRUE;
}
| 695 | 18.885714 | 71 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_bad_blocks.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* check_bad_blocks.c -- pre-check bad_blocks
*/
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
#include "out.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
#include "set_badblocks.h"
#include "badblocks.h"
/*
* check_bad_blocks -- check poolset for bad_blocks
*/
void
check_bad_blocks(PMEMpoolcheck *ppc)
{
LOG(3, "ppc %p", ppc);
int ret;
if (!(ppc->pool->params.features.compat & POOL_FEAT_CHECK_BAD_BLOCKS)) {
/* skipping checking poolset for bad blocks */
ppc->result = CHECK_RESULT_CONSISTENT;
return;
}
if (ppc->pool->set_file->poolset) {
ret = badblocks_check_poolset(ppc->pool->set_file->poolset, 0);
} else {
ret = badblocks_check_file(ppc->pool->set_file->fname);
}
if (ret < 0) {
if (errno == ENOTSUP) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_ERR(ppc, BB_NOT_SUPP);
return;
}
ppc->result = CHECK_RESULT_ERROR;
CHECK_ERR(ppc, "checking poolset for bad blocks failed -- '%s'",
ppc->path);
return;
}
if (ret > 0) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_ERR(ppc,
"poolset contains bad blocks, use 'pmempool info --bad-blocks=yes' to print or 'pmempool sync --bad-blocks' to clear them");
}
}
| 1,329 | 20.803279 | 127 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/feature.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* feature.c -- implementation of pmempool_feature_(enable|disable|query)()
*/
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
#include <errno.h>
#include <sys/mman.h>
#include "libpmempool.h"
#include "util_pmem.h"
#include "pool_hdr.h"
#include "pool.h"
#define RW 0
#define RDONLY 1
#define FEATURE_INCOMPAT(X) \
(features_t)FEAT_INCOMPAT(X)
static const features_t f_singlehdr = FEAT_INCOMPAT(SINGLEHDR);
static const features_t f_cksum_2k = FEAT_INCOMPAT(CKSUM_2K);
static const features_t f_sds = FEAT_INCOMPAT(SDS);
static const features_t f_chkbb = FEAT_COMPAT(CHECK_BAD_BLOCKS);
#define FEAT_INVALID \
{UINT32_MAX, UINT32_MAX, UINT32_MAX};
static const features_t f_invalid = FEAT_INVALID;
#define FEATURE_MAXPRINT ((size_t)1024)
/*
* buff_concat -- (internal) concat formatted string to string buffer
*/
static int
buff_concat(char *buff, size_t *pos, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
const size_t size = FEATURE_MAXPRINT - *pos - 1;
int ret = vsnprintf(buff + *pos, size, fmt, ap);
va_end(ap);
if (ret < 0) {
ERR("vsprintf");
return ret;
}
if ((size_t)ret >= size) {
ERR("buffer truncated %d >= %zu", ret, size);
return -1;
}
*pos += (size_t)ret;
return 0;
}
/*
* buff_concat_features -- (internal) concat features string to string buffer
*/
static int
buff_concat_features(char *buff, size_t *pos, features_t f)
{
return buff_concat(buff, pos,
"{compat 0x%x, incompat 0x%x, ro_compat 0x%x}",
f.compat, f.incompat, f.ro_compat);
}
/*
* poolset_close -- (internal) close pool set
*/
static void
poolset_close(struct pool_set *set)
{
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = REP(set, r);
ASSERT(!rep->remote);
for (unsigned p = 0; p < rep->nparts; ++p) {
util_unmap_hdr(PART(rep, p));
}
}
util_poolset_close(set, DO_NOT_DELETE_PARTS);
}
/*
* features_check -- (internal) check if features are correct
*/
static int
features_check(features_t *features, struct pool_hdr *hdrp)
{
static char msg[FEATURE_MAXPRINT];
struct pool_hdr hdr;
memcpy(&hdr, hdrp, sizeof(hdr));
util_convert2h_hdr_nocheck(&hdr);
/* (features != f_invlaid) <=> features is set */
if (!util_feature_cmp(*features, f_invalid)) {
/* features from current and previous headers have to match */
if (!util_feature_cmp(*features, hdr.features)) {
size_t pos = 0;
if (buff_concat_features(msg, &pos, hdr.features))
goto err;
if (buff_concat(msg, &pos, "%s", " != "))
goto err;
if (buff_concat_features(msg, &pos, *features))
goto err;
ERR("features mismatch detected: %s", msg);
return -1;
} else {
return 0;
}
}
features_t unknown = util_get_unknown_features(
hdr.features, (features_t)POOL_FEAT_VALID);
/* all features are known */
if (util_feature_is_zero(unknown)) {
memcpy(features, &hdr.features, sizeof(*features));
return 0;
}
/* unknown features detected - print error message */
size_t pos = 0;
if (buff_concat_features(msg, &pos, unknown))
goto err;
ERR("invalid features detected: %s", msg);
err:
return -1;
}
/*
* get_pool_open_flags -- (internal) generate pool open flags
*/
static inline unsigned
get_pool_open_flags(struct pool_set *set, int rdonly)
{
unsigned flags = 0;
if (rdonly == RDONLY && !util_pool_has_device_dax(set))
flags = POOL_OPEN_COW;
flags |= POOL_OPEN_IGNORE_BAD_BLOCKS;
return flags;
}
/*
* get_mmap_flags -- (internal) generate mmap flags
*/
static inline int
get_mmap_flags(struct pool_set_part *part, int rdonly)
{
if (part->is_dev_dax)
return MAP_SHARED;
else
return rdonly ? MAP_PRIVATE : MAP_SHARED;
}
/*
* poolset_open -- (internal) open pool set
*/
static struct pool_set *
poolset_open(const char *path, int rdonly)
{
struct pool_set *set;
features_t features = FEAT_INVALID;
/* read poolset */
int ret = util_poolset_create_set(&set, path, 0, 0, true);
if (ret < 0) {
ERR("cannot open pool set -- '%s'", path);
goto err_poolset;
}
if (set->remote) {
ERR("poolsets with remote replicas are not supported");
errno = EINVAL;
goto err_open;
}
/* open a memory pool */
unsigned flags = get_pool_open_flags(set, rdonly);
if (util_pool_open_nocheck(set, flags))
goto err_open;
/* map all headers and check features */
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = REP(set, r);
ASSERT(!rep->remote);
for (unsigned p = 0; p < rep->nparts; ++p) {
struct pool_set_part *part = PART(rep, p);
int mmap_flags = get_mmap_flags(part, rdonly);
if (util_map_hdr(part, mmap_flags, rdonly)) {
part->hdr = NULL;
goto err_map_hdr;
}
if (features_check(&features, HDR(rep, p))) {
ERR(
"invalid features - replica #%d part #%d",
r, p);
goto err_open;
}
}
}
return set;
err_map_hdr:
/* unmap all headers */
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = REP(set, r);
ASSERT(!rep->remote);
for (unsigned p = 0; p < rep->nparts; ++p) {
util_unmap_hdr(PART(rep, p));
}
}
err_open:
/* close the memory pool and release pool set structure */
util_poolset_close(set, DO_NOT_DELETE_PARTS);
err_poolset:
return NULL;
}
/*
* get_hdr -- (internal) read header in host byte order
*/
static struct pool_hdr *
get_hdr(struct pool_set *set, unsigned rep, unsigned part)
{
static struct pool_hdr hdr;
/* copy header */
struct pool_hdr *hdrp = HDR(REP(set, rep), part);
memcpy(&hdr, hdrp, sizeof(hdr));
/* convert to host byte order and return */
util_convert2h_hdr_nocheck(&hdr);
return &hdr;
}
/*
* set_hdr -- (internal) convert header to little-endian, checksum and write
*/
static void
set_hdr(struct pool_set *set, unsigned rep, unsigned part, struct pool_hdr *src)
{
/* convert to little-endian and set new checksum */
const size_t skip_off = POOL_HDR_CSUM_END_OFF(src);
util_convert2le_hdr(src);
util_checksum(src, sizeof(*src), &src->checksum, 1, skip_off);
/* write header */
struct pool_replica *replica = REP(set, rep);
struct pool_hdr *dst = HDR(replica, part);
memcpy(dst, src, sizeof(*src));
util_persist_auto(PART(replica, part)->is_dev_dax, dst, sizeof(*src));
}
typedef enum {
DISABLED,
ENABLED
} fstate_t;
#define FEATURE_IS_ENABLED_STR "feature already enabled: %s"
#define FEATURE_IS_DISABLED_STR "feature already disabled: %s"
/*
* require_feature_is -- (internal) check if required feature is enabled
* (or disabled)
*/
static int
require_feature_is(struct pool_set *set, features_t feature, fstate_t req_state)
{
struct pool_hdr *hdrp = get_hdr((set), 0, 0);
fstate_t state = util_feature_is_set(hdrp->features, feature)
? ENABLED : DISABLED;
if (state == req_state)
return 1;
const char *msg = (state == ENABLED)
? FEATURE_IS_ENABLED_STR : FEATURE_IS_DISABLED_STR;
LOG(3, msg, util_feature2str(feature, NULL));
return 0;
}
#define FEATURE_IS_NOT_ENABLED_PRIOR_STR "enable %s prior to %s %s"
#define FEATURE_IS_NOT_DISABLED_PRIOR_STR "disable %s prior to %s %s"
/*
* require_other_feature_is -- (internal) check if other feature is enabled
* (or disabled) in case the other feature has to be enabled (or disabled)
* prior to the main one
*/
static int
require_other_feature_is(struct pool_set *set, features_t other,
fstate_t req_state, features_t feature, const char *cause)
{
struct pool_hdr *hdrp = get_hdr((set), 0, 0);
fstate_t state = util_feature_is_set(hdrp->features, other)
? ENABLED : DISABLED;
if (state == req_state)
return 1;
const char *msg = (req_state == ENABLED)
? FEATURE_IS_NOT_ENABLED_PRIOR_STR
: FEATURE_IS_NOT_DISABLED_PRIOR_STR;
ERR(msg, util_feature2str(other, NULL),
cause, util_feature2str(feature, NULL));
return 0;
}
/*
* feature_set -- (internal) enable (or disable) feature
*/
static void
feature_set(struct pool_set *set, features_t feature, int value)
{
for (unsigned r = 0; r < set->nreplicas; ++r) {
for (unsigned p = 0; p < REP(set, r)->nparts; ++p) {
struct pool_hdr *hdrp = get_hdr(set, r, p);
if (value == ENABLED)
util_feature_enable(&hdrp->features, feature);
else
util_feature_disable(&hdrp->features, feature);
set_hdr(set, r, p, hdrp);
}
}
}
/*
* query_feature -- (internal) query feature value
*/
static int
query_feature(const char *path, features_t feature)
{
struct pool_set *set = poolset_open(path, RDONLY);
if (!set)
goto err_open;
struct pool_hdr *hdrp = get_hdr(set, 0, 0);
const int query = util_feature_is_set(hdrp->features, feature);
poolset_close(set);
return query;
err_open:
return -1;
}
/*
* unsupported_feature -- (internal) report unsupported feature
*/
static inline int
unsupported_feature(features_t feature)
{
ERR("unsupported feature: %s", util_feature2str(feature, NULL));
errno = EINVAL;
return -1;
}
/*
* enable_singlehdr -- (internal) enable POOL_FEAT_SINGLEHDR
*/
static int
enable_singlehdr(const char *path)
{
return unsupported_feature(f_singlehdr);
}
/*
* disable_singlehdr -- (internal) disable POOL_FEAT_SINGLEHDR
*/
static int
disable_singlehdr(const char *path)
{
return unsupported_feature(f_singlehdr);
}
/*
* query_singlehdr -- (internal) query POOL_FEAT_SINGLEHDR
*/
static int
query_singlehdr(const char *path)
{
return query_feature(path, f_singlehdr);
}
/*
* enable_checksum_2k -- (internal) enable POOL_FEAT_CKSUM_2K
*/
static int
enable_checksum_2k(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
if (require_feature_is(set, f_cksum_2k, DISABLED))
feature_set(set, f_cksum_2k, ENABLED);
poolset_close(set);
return 0;
}
/*
* disable_checksum_2k -- (internal) disable POOL_FEAT_CKSUM_2K
*/
static int
disable_checksum_2k(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
int ret = 0;
if (!require_feature_is(set, f_cksum_2k, ENABLED))
goto exit;
/* check if POOL_FEAT_SDS is disabled */
if (!require_other_feature_is(set, f_sds, DISABLED,
f_cksum_2k, "disabling")) {
ret = -1;
goto exit;
}
feature_set(set, f_cksum_2k, DISABLED);
exit:
poolset_close(set);
return ret;
}
/*
* query_checksum_2k -- (internal) query POOL_FEAT_CKSUM_2K
*/
static int
query_checksum_2k(const char *path)
{
return query_feature(path, f_cksum_2k);
}
/*
* enable_shutdown_state -- (internal) enable POOL_FEAT_SDS
*/
static int
enable_shutdown_state(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
int ret = 0;
if (!require_feature_is(set, f_sds, DISABLED))
goto exit;
/* check if POOL_FEAT_CKSUM_2K is enabled */
if (!require_other_feature_is(set, f_cksum_2k, ENABLED,
f_sds, "enabling")) {
ret = -1;
goto exit;
}
feature_set(set, f_sds, ENABLED);
exit:
poolset_close(set);
return ret;
}
/*
* reset_shutdown_state -- zero all shutdown structures
*/
static void
reset_shutdown_state(struct pool_set *set)
{
for (unsigned rep = 0; rep < set->nreplicas; ++rep) {
for (unsigned part = 0; part < REP(set, rep)->nparts; ++part) {
struct pool_hdr *hdrp = HDR(REP(set, rep), part);
shutdown_state_init(&hdrp->sds, REP(set, rep));
}
}
}
/*
* disable_shutdown_state -- (internal) disable POOL_FEAT_SDS
*/
static int
disable_shutdown_state(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
if (require_feature_is(set, f_sds, ENABLED)) {
feature_set(set, f_sds, DISABLED);
reset_shutdown_state(set);
}
poolset_close(set);
return 0;
}
/*
* query_shutdown_state -- (internal) query POOL_FEAT_SDS
*/
static int
query_shutdown_state(const char *path)
{
return query_feature(path, f_sds);
}
/*
* enable_badblocks_checking -- (internal) enable POOL_FEAT_CHECK_BAD_BLOCKS
*/
static int
enable_badblocks_checking(const char *path)
{
#ifdef _WIN32
ERR("bad blocks checking is not supported on Windows");
return -1;
#else
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
if (require_feature_is(set, f_chkbb, DISABLED))
feature_set(set, f_chkbb, ENABLED);
poolset_close(set);
return 0;
#endif
}
/*
* disable_badblocks_checking -- (internal) disable POOL_FEAT_CHECK_BAD_BLOCKS
*/
static int
disable_badblocks_checking(const char *path)
{
struct pool_set *set = poolset_open(path, RW);
if (!set)
return -1;
int ret = 0;
if (!require_feature_is(set, f_chkbb, ENABLED))
goto exit;
feature_set(set, f_chkbb, DISABLED);
exit:
poolset_close(set);
return ret;
}
/*
* query_badblocks_checking -- (internal) query POOL_FEAT_CHECK_BAD_BLOCKS
*/
static int
query_badblocks_checking(const char *path)
{
return query_feature(path, f_chkbb);
}
struct feature_funcs {
int (*enable)(const char *);
int (*disable)(const char *);
int (*query)(const char *);
};
static struct feature_funcs features[] = {
{
.enable = enable_singlehdr,
.disable = disable_singlehdr,
.query = query_singlehdr
},
{
.enable = enable_checksum_2k,
.disable = disable_checksum_2k,
.query = query_checksum_2k
},
{
.enable = enable_shutdown_state,
.disable = disable_shutdown_state,
.query = query_shutdown_state
},
{
.enable = enable_badblocks_checking,
.disable = disable_badblocks_checking,
.query = query_badblocks_checking
},
};
#define FEATURE_FUNCS_MAX ARRAY_SIZE(features)
/*
* are_flags_valid -- (internal) check if flags are valid
*/
static inline int
are_flags_valid(unsigned flags)
{
if (flags != 0) {
ERR("invalid flags: 0x%x", flags);
errno = EINVAL;
return 0;
}
return 1;
}
/*
* is_feature_valid -- (internal) check if feature is valid
*/
static inline int
is_feature_valid(uint32_t feature)
{
if (feature >= FEATURE_FUNCS_MAX) {
ERR("invalid feature: 0x%x", feature);
errno = EINVAL;
return 0;
}
return 1;
}
/*
* pmempool_feature_enableU -- enable pool set feature
*/
#ifndef _WIN32
static inline
#endif
int
pmempool_feature_enableU(const char *path, enum pmempool_feature feature,
unsigned flags)
{
LOG(3, "path %s feature %x flags %x", path, feature, flags);
if (!is_feature_valid(feature))
return -1;
if (!are_flags_valid(flags))
return -1;
return features[feature].enable(path);
}
/*
* pmempool_feature_disableU -- disable pool set feature
*/
#ifndef _WIN32
static inline
#endif
int
pmempool_feature_disableU(const char *path, enum pmempool_feature feature,
unsigned flags)
{
LOG(3, "path %s feature %x flags %x", path, feature, flags);
if (!is_feature_valid(feature))
return -1;
if (!are_flags_valid(flags))
return -1;
return features[feature].disable(path);
}
/*
* pmempool_feature_queryU -- query pool set feature
*/
#ifndef _WIN32
static inline
#endif
int
pmempool_feature_queryU(const char *path, enum pmempool_feature feature,
unsigned flags)
{
LOG(3, "path %s feature %x flags %x", path, feature, flags);
/*
* XXX: Windows does not allow function call in a constant expressions
*/
#ifndef _WIN32
#define CHECK_INCOMPAT_MAPPING(FEAT, ENUM) \
COMPILE_ERROR_ON( \
util_feature2pmempool_feature(FEATURE_INCOMPAT(FEAT)) != ENUM)
CHECK_INCOMPAT_MAPPING(SINGLEHDR, PMEMPOOL_FEAT_SINGLEHDR);
CHECK_INCOMPAT_MAPPING(CKSUM_2K, PMEMPOOL_FEAT_CKSUM_2K);
CHECK_INCOMPAT_MAPPING(SDS, PMEMPOOL_FEAT_SHUTDOWN_STATE);
#undef CHECK_INCOMPAT_MAPPING
#endif
if (!is_feature_valid(feature))
return -1;
if (!are_flags_valid(flags))
return -1;
return features[feature].query(path);
}
#ifndef _WIN32
/*
* pmempool_feature_enable -- enable pool set feature
*/
int
pmempool_feature_enable(const char *path, enum pmempool_feature feature,
unsigned flags)
{
return pmempool_feature_enableU(path, feature, flags);
}
#else
/*
* pmempool_feature_enableW -- enable pool set feature as widechar
*/
int
pmempool_feature_enableW(const wchar_t *path, enum pmempool_feature feature,
unsigned flags)
{
char *upath = util_toUTF8(path);
if (upath == NULL) {
ERR("Invalid poolest/pool file path.");
return -1;
}
int ret = pmempool_feature_enableU(upath, feature, flags);
util_free_UTF8(upath);
return ret;
}
#endif
#ifndef _WIN32
/*
* pmempool_feature_disable -- disable pool set feature
*/
int
pmempool_feature_disable(const char *path, enum pmempool_feature feature,
unsigned flags)
{
return pmempool_feature_disableU(path, feature, flags);
}
#else
/*
* pmempool_feature_disableW -- disable pool set feature as widechar
*/
int
pmempool_feature_disableW(const wchar_t *path, enum pmempool_feature feature,
unsigned flags)
{
char *upath = util_toUTF8(path);
if (upath == NULL) {
ERR("Invalid poolest/pool file path.");
return -1;
}
int ret = pmempool_feature_disableU(upath, feature, flags);
util_free_UTF8(upath);
return ret;
}
#endif
#ifndef _WIN32
/*
* pmempool_feature_query -- query pool set feature
*/
int
pmempool_feature_query(const char *path, enum pmempool_feature feature,
unsigned flags)
{
return pmempool_feature_queryU(path, feature, flags);
}
#else
/*
* pmempool_feature_queryW -- query pool set feature as widechar
*/
int
pmempool_feature_queryW(const wchar_t *path, enum pmempool_feature feature,
unsigned flags)
{
char *upath = util_toUTF8(path);
if (upath == NULL) {
ERR("Invalid poolest/pool file path.");
return -1;
}
int ret = pmempool_feature_queryU(upath, feature, flags);
util_free_UTF8(upath);
return ret;
}
#endif
| 17,344 | 20.955696 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check.c -- functions performing checks in proper order
*/
#include <stdint.h>
#include "out.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check.h"
#include "check_util.h"
#define CHECK_RESULT_IS_STOP(result)\
((result) == CHECK_RESULT_ERROR ||\
(result) == CHECK_RESULT_INTERNAL_ERROR ||\
((result) == CHECK_RESULT_CANNOT_REPAIR) ||\
((result) == CHECK_RESULT_NOT_CONSISTENT))
struct step {
void (*func)(PMEMpoolcheck *);
enum pool_type type;
bool part;
};
static const struct step steps[] = {
{
.type = POOL_TYPE_ANY,
.func = check_bad_blocks,
.part = true,
},
{
.type = POOL_TYPE_ANY,
.func = check_backup,
.part = true,
},
{
.type = POOL_TYPE_BLK | POOL_TYPE_LOG |
POOL_TYPE_OBJ,
.func = check_sds,
.part = true,
},
{
.type = POOL_TYPE_BLK | POOL_TYPE_LOG |
POOL_TYPE_OBJ |
POOL_TYPE_UNKNOWN,
.func = check_pool_hdr,
.part = true,
},
{
.type = POOL_TYPE_BLK | POOL_TYPE_LOG |
POOL_TYPE_OBJ |
POOL_TYPE_UNKNOWN,
.func = check_pool_hdr_uuids,
.part = true,
},
{
.type = POOL_TYPE_LOG,
.func = check_log,
.part = false,
},
{
.type = POOL_TYPE_BLK,
.func = check_blk,
.part = false,
},
{
.type = POOL_TYPE_BLK | POOL_TYPE_BTT,
.func = check_btt_info,
.part = false,
},
{
.type = POOL_TYPE_BLK | POOL_TYPE_BTT,
.func = check_btt_map_flog,
.part = false,
},
{
.type = POOL_TYPE_BLK | POOL_TYPE_LOG |
POOL_TYPE_BTT,
.func = check_write,
.part = false,
},
{
.func = NULL,
},
};
/*
* check_init -- initialize check process
*/
int
check_init(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
if (!(ppc->data = check_data_alloc()))
goto error_data_malloc;
if (!(ppc->pool = pool_data_alloc(ppc)))
goto error_pool_malloc;
return 0;
error_pool_malloc:
check_data_free(ppc->data);
error_data_malloc:
return -1;
}
#ifdef _WIN32
void
convert_status_cache(PMEMpoolcheck *ppc, char *buf, size_t size)
{
cache_to_utf8(ppc->data, buf, size);
}
#endif
/*
* status_get -- (internal) get next check_status
*
* The assumed order of check_statuses is: all info messages, error or question.
*/
static struct check_status *
status_get(PMEMpoolcheck *ppc)
{
struct check_status *status = NULL;
/* clear cache if exists */
check_clear_status_cache(ppc->data);
/* return next info if exists */
if ((status = check_pop_info(ppc->data)))
return status;
/* return error if exists */
if ((status = check_pop_error(ppc->data)))
return status;
if (ppc->result == CHECK_RESULT_ASK_QUESTIONS) {
/*
* push answer for previous question and return info if answer
* is not valid
*/
if (check_push_answer(ppc))
if ((status = check_pop_info(ppc->data)))
return status;
/* if has next question ask it */
if ((status = check_pop_question(ppc->data)))
return status;
/* process answers otherwise */
ppc->result = CHECK_RESULT_PROCESS_ANSWERS;
} else if (CHECK_RESULT_IS_STOP(ppc->result))
check_end(ppc->data);
return NULL;
}
/*
* check_step -- perform single check step
*/
struct check_status *
check_step(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
struct check_status *status = NULL;
/* return if we have information or questions to ask or check ended */
if ((status = status_get(ppc)) || check_is_end(ppc->data))
return status;
/* get next step and check if exists */
const struct step *step = &steps[check_step_get(ppc->data)];
if (step->func == NULL) {
check_end(ppc->data);
return status;
}
/*
* step would be performed if pool type is one of the required pool type
* and it is not part if parts are excluded from current step
*/
if (!(step->type & ppc->pool->params.type) ||
(ppc->pool->params.is_part && !step->part)) {
/* skip test */
check_step_inc(ppc->data);
return NULL;
}
/* perform step */
step->func(ppc);
/* move on to next step if no questions were generated */
if (ppc->result != CHECK_RESULT_ASK_QUESTIONS)
check_step_inc(ppc->data);
/* get current status and return */
return status_get(ppc);
}
/*
* check_fini -- stop check process
*/
void
check_fini(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
pool_data_free(ppc->pool);
check_data_free(ppc->data);
}
/*
* check_is_end -- return if check has ended
*/
int
check_is_end(struct check_data *data)
{
return check_is_end_util(data);
}
/*
* check_status_get -- extract pmempool_check_status from check_status
*/
struct pmempool_check_status *
check_status_get(struct check_status *status)
{
return check_status_get_util(status);
}
| 4,651 | 18.965665 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_btt_map_flog.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* check_btt_map_flog.c -- check BTT Map and Flog
*/
#include <stdint.h>
#include <sys/param.h>
#include <endian.h>
#include "out.h"
#include "btt.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum questions {
Q_REPAIR_MAP,
Q_REPAIR_FLOG,
};
/*
* flog_read -- (internal) read and convert flog from file
*/
static int
flog_read(PMEMpoolcheck *ppc, struct arena *arenap)
{
uint64_t flogoff = arenap->offset + arenap->btt_info.flogoff;
arenap->flogsize = btt_flog_size(arenap->btt_info.nfree);
arenap->flog = malloc(arenap->flogsize);
if (!arenap->flog) {
ERR("!malloc");
goto error_malloc;
}
if (pool_read(ppc->pool, arenap->flog, arenap->flogsize, flogoff))
goto error_read;
uint8_t *ptr = arenap->flog;
uint32_t i;
for (i = 0; i < arenap->btt_info.nfree; i++) {
struct btt_flog *flog = (struct btt_flog *)ptr;
btt_flog_convert2h(&flog[0]);
btt_flog_convert2h(&flog[1]);
ptr += BTT_FLOG_PAIR_ALIGN;
}
return 0;
error_read:
free(arenap->flog);
arenap->flog = NULL;
error_malloc:
return -1;
}
/*
* map_read -- (internal) read and convert map from file
*/
static int
map_read(PMEMpoolcheck *ppc, struct arena *arenap)
{
uint64_t mapoff = arenap->offset + arenap->btt_info.mapoff;
arenap->mapsize = btt_map_size(arenap->btt_info.external_nlba);
ASSERT(arenap->mapsize != 0);
arenap->map = malloc(arenap->mapsize);
if (!arenap->map) {
ERR("!malloc");
goto error_malloc;
}
if (pool_read(ppc->pool, arenap->map, arenap->mapsize, mapoff)) {
goto error_read;
}
uint32_t i;
for (i = 0; i < arenap->btt_info.external_nlba; i++)
arenap->map[i] = le32toh(arenap->map[i]);
return 0;
error_read:
free(arenap->map);
arenap->map = NULL;
error_malloc:
return -1;
}
/*
* list_item -- item for simple list
*/
struct list_item {
PMDK_LIST_ENTRY(list_item) next;
uint32_t val;
};
/*
* list -- simple list for storing numbers
*/
struct list {
PMDK_LIST_HEAD(listhead, list_item) head;
uint32_t count;
};
/*
* list_alloc -- (internal) allocate an empty list
*/
static struct list *
list_alloc(void)
{
struct list *list = malloc(sizeof(struct list));
if (!list) {
ERR("!malloc");
return NULL;
}
PMDK_LIST_INIT(&list->head);
list->count = 0;
return list;
}
/*
* list_push -- (internal) insert new element to the list
*/
static struct list_item *
list_push(struct list *list, uint32_t val)
{
struct list_item *item = malloc(sizeof(*item));
if (!item) {
ERR("!malloc");
return NULL;
}
item->val = val;
list->count++;
PMDK_LIST_INSERT_HEAD(&list->head, item, next);
return item;
}
/*
* list_pop -- (internal) pop element from list head
*/
static int
list_pop(struct list *list, uint32_t *valp)
{
if (!PMDK_LIST_EMPTY(&list->head)) {
struct list_item *i = PMDK_LIST_FIRST(&list->head);
PMDK_LIST_REMOVE(i, next);
if (valp)
*valp = i->val;
free(i);
list->count--;
return 1;
}
return 0;
}
/*
* list_free -- (internal) free the list
*/
static void
list_free(struct list *list)
{
while (list_pop(list, NULL))
;
free(list);
}
/*
* cleanup -- (internal) prepare resources for map and flog check
*/
static int
cleanup(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (loc->list_unmap)
list_free(loc->list_unmap);
if (loc->list_flog_inval)
list_free(loc->list_flog_inval);
if (loc->list_inval)
list_free(loc->list_inval);
if (loc->fbitmap)
free(loc->fbitmap);
if (loc->bitmap)
free(loc->bitmap);
if (loc->dup_bitmap)
free(loc->dup_bitmap);
return 0;
}
/*
* init -- (internal) initialize map and flog check
*/
static int
init(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
struct arena *arenap = loc->arenap;
/* read flog and map entries */
if (flog_read(ppc, arenap)) {
CHECK_ERR(ppc, "arena %u: cannot read BTT Flog", arenap->id);
goto error;
}
if (map_read(ppc, arenap)) {
CHECK_ERR(ppc, "arena %u: cannot read BTT Map", arenap->id);
goto error;
}
/* create bitmaps for checking duplicated blocks */
uint32_t bitmapsize = howmany(arenap->btt_info.internal_nlba, 8);
loc->bitmap = calloc(bitmapsize, 1);
if (!loc->bitmap) {
ERR("!calloc");
CHECK_ERR(ppc, "arena %u: cannot allocate memory for blocks "
"bitmap", arenap->id);
goto error;
}
loc->dup_bitmap = calloc(bitmapsize, 1);
if (!loc->dup_bitmap) {
ERR("!calloc");
CHECK_ERR(ppc, "arena %u: cannot allocate memory for "
"duplicated blocks bitmap", arenap->id);
goto error;
}
loc->fbitmap = calloc(bitmapsize, 1);
if (!loc->fbitmap) {
ERR("!calloc");
CHECK_ERR(ppc, "arena %u: cannot allocate memory for BTT Flog "
"bitmap", arenap->id);
goto error;
}
/* list of invalid map entries */
loc->list_inval = list_alloc();
if (!loc->list_inval) {
CHECK_ERR(ppc,
"arena %u: cannot allocate memory for invalid BTT map "
"entries list", arenap->id);
goto error;
}
/* list of invalid flog entries */
loc->list_flog_inval = list_alloc();
if (!loc->list_flog_inval) {
CHECK_ERR(ppc,
"arena %u: cannot allocate memory for invalid BTT Flog "
"entries list", arenap->id);
goto error;
}
/* list of unmapped blocks */
loc->list_unmap = list_alloc();
if (!loc->list_unmap) {
CHECK_ERR(ppc,
"arena %u: cannot allocate memory for unmaped blocks "
"list", arenap->id);
goto error;
}
return 0;
error:
ppc->result = CHECK_RESULT_ERROR;
cleanup(ppc, loc);
return -1;
}
/*
* map_get_postmap_lba -- extract postmap LBA from map entry
*/
static inline uint32_t
map_get_postmap_lba(struct arena *arenap, uint32_t i)
{
uint32_t entry = arenap->map[i];
/* if map record is in initial state (flags == 0b00) */
if (map_entry_is_initial(entry))
return i;
/* read postmap LBA otherwise */
return entry & BTT_MAP_ENTRY_LBA_MASK;
}
/*
* map_entry_check -- (internal) check single map entry
*/
static int
map_entry_check(PMEMpoolcheck *ppc, location *loc, uint32_t i)
{
struct arena *arenap = loc->arenap;
uint32_t lba = map_get_postmap_lba(arenap, i);
/* add duplicated and invalid entries to list */
if (lba < arenap->btt_info.internal_nlba) {
if (util_isset(loc->bitmap, lba)) {
CHECK_INFO(ppc, "arena %u: BTT Map entry %u duplicated "
"at %u", arenap->id, lba, i);
util_setbit(loc->dup_bitmap, lba);
if (!list_push(loc->list_inval, i))
return -1;
} else
util_setbit(loc->bitmap, lba);
} else {
CHECK_INFO(ppc, "arena %u: invalid BTT Map entry at %u",
arenap->id, i);
if (!list_push(loc->list_inval, i))
return -1;
}
return 0;
}
/*
* flog_entry_check -- (internal) check single flog entry
*/
static int
flog_entry_check(PMEMpoolcheck *ppc, location *loc, uint32_t i,
uint8_t **ptr)
{
struct arena *arenap = loc->arenap;
/* flog entry consists of two btt_flog structures */
struct btt_flog *flog = (struct btt_flog *)*ptr;
int next;
struct btt_flog *flog_cur = btt_flog_get_valid(flog, &next);
/* insert invalid and duplicated indexes to list */
if (!flog_cur) {
CHECK_INFO(ppc, "arena %u: invalid BTT Flog entry at %u",
arenap->id, i);
if (!list_push(loc->list_flog_inval, i))
return -1;
goto next;
}
uint32_t entry = flog_cur->old_map & BTT_MAP_ENTRY_LBA_MASK;
uint32_t new_entry = flog_cur->new_map & BTT_MAP_ENTRY_LBA_MASK;
/*
* Check if lba is in extranal_nlba range, and check if both old_map and
* new_map are in internal_nlba range.
*/
if (flog_cur->lba >= arenap->btt_info.external_nlba ||
entry >= arenap->btt_info.internal_nlba ||
new_entry >= arenap->btt_info.internal_nlba) {
CHECK_INFO(ppc, "arena %u: invalid BTT Flog entry at %u",
arenap->id, i);
if (!list_push(loc->list_flog_inval, i))
return -1;
goto next;
}
if (util_isset(loc->fbitmap, entry)) {
/*
* here we have two flog entries which holds the same free block
*/
CHECK_INFO(ppc, "arena %u: duplicated BTT Flog entry at %u\n",
arenap->id, i);
if (!list_push(loc->list_flog_inval, i))
return -1;
} else if (util_isset(loc->bitmap, entry)) {
/* here we have probably an unfinished write */
if (util_isset(loc->bitmap, new_entry)) {
/* Both old_map and new_map are already used in map. */
CHECK_INFO(ppc, "arena %u: duplicated BTT Flog entry "
"at %u", arenap->id, i);
util_setbit(loc->dup_bitmap, new_entry);
if (!list_push(loc->list_flog_inval, i))
return -1;
} else {
/*
* Unfinished write. Next time pool is opened, the map
* will be updated to new_map.
*/
util_setbit(loc->bitmap, new_entry);
util_setbit(loc->fbitmap, entry);
}
} else {
int flog_valid = 1;
/*
* Either flog entry is in its initial state:
* - current_btt_flog entry is first one in pair and
* - current_btt_flog.old_map == current_btt_flog.new_map and
* - current_btt_flog.seq == 0b01 and
* - second flog entry in pair is zeroed
* or
* current_btt_flog.old_map != current_btt_flog.new_map
*/
if (entry == new_entry)
flog_valid = (next == 1) && (flog_cur->seq == 1) &&
util_is_zeroed((const void *)&flog[1],
sizeof(flog[1]));
if (flog_valid) {
/* totally fine case */
util_setbit(loc->bitmap, entry);
util_setbit(loc->fbitmap, entry);
} else {
CHECK_INFO(ppc, "arena %u: invalid BTT Flog entry at "
"%u", arenap->id, i);
if (!list_push(loc->list_flog_inval, i))
return -1;
}
}
next:
*ptr += BTT_FLOG_PAIR_ALIGN;
return 0;
}
/*
* arena_map_flog_check -- (internal) check map and flog
*/
static int
arena_map_flog_check(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
struct arena *arenap = loc->arenap;
/* check map entries */
uint32_t i;
for (i = 0; i < arenap->btt_info.external_nlba; i++) {
if (map_entry_check(ppc, loc, i))
goto error_push;
}
/* check flog entries */
uint8_t *ptr = arenap->flog;
for (i = 0; i < arenap->btt_info.nfree; i++) {
if (flog_entry_check(ppc, loc, i, &ptr))
goto error_push;
}
/* check unmapped blocks and insert to list */
for (i = 0; i < arenap->btt_info.internal_nlba; i++) {
if (!util_isset(loc->bitmap, i)) {
CHECK_INFO(ppc, "arena %u: unmapped block %u",
arenap->id, i);
if (!list_push(loc->list_unmap, i))
goto error_push;
}
}
if (loc->list_unmap->count)
CHECK_INFO(ppc, "arena %u: number of unmapped blocks: %u",
arenap->id, loc->list_unmap->count);
if (loc->list_inval->count)
CHECK_INFO(ppc, "arena %u: number of invalid BTT Map entries: "
"%u", arenap->id, loc->list_inval->count);
if (loc->list_flog_inval->count)
CHECK_INFO(ppc, "arena %u: number of invalid BTT Flog entries: "
"%u", arenap->id, loc->list_flog_inval->count);
if (CHECK_IS_NOT(ppc, REPAIR) && loc->list_unmap->count > 0) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
goto cleanup;
}
/*
* We are able to repair if and only if number of unmapped blocks is
* equal to sum of invalid map and flog entries.
*/
if (loc->list_unmap->count != (loc->list_inval->count +
loc->list_flog_inval->count)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_ERR(ppc, "arena %u: cannot repair BTT Map and Flog",
arenap->id);
goto cleanup;
}
if (CHECK_IS_NOT(ppc, ADVANCED) && loc->list_inval->count +
loc->list_flog_inval->count > 0) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_INFO(ppc, REQUIRE_ADVANCED);
CHECK_ERR(ppc, "BTT Map and / or BTT Flog contain invalid "
"entries");
check_end(ppc->data);
goto cleanup;
}
if (loc->list_inval->count > 0) {
CHECK_ASK(ppc, Q_REPAIR_MAP, "Do you want to repair invalid "
"BTT Map entries?");
}
if (loc->list_flog_inval->count > 0) {
CHECK_ASK(ppc, Q_REPAIR_FLOG, "Do you want to repair invalid "
"BTT Flog entries?");
}
return check_questions_sequence_validate(ppc);
error_push:
CHECK_ERR(ppc, "arena %u: cannot allocate momory for list item",
arenap->id);
ppc->result = CHECK_RESULT_ERROR;
cleanup:
cleanup(ppc, loc);
return -1;
}
/*
* arena_map_flog_fix -- (internal) fix map and flog
*/
static int
arena_map_flog_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
struct arena *arenap = loc->arenap;
uint32_t inval;
uint32_t unmap;
switch (question) {
case Q_REPAIR_MAP:
/*
* Cause first of duplicated map entries seems valid till we
* find second of them we must find all first map entries
* pointing to the postmap LBA's we know are duplicated to mark
* them with error flag.
*/
for (uint32_t i = 0; i < arenap->btt_info.external_nlba; i++) {
uint32_t lba = map_get_postmap_lba(arenap, i);
if (lba >= arenap->btt_info.internal_nlba)
continue;
if (!util_isset(loc->dup_bitmap, lba))
continue;
arenap->map[i] = BTT_MAP_ENTRY_ERROR | lba;
util_clrbit(loc->dup_bitmap, lba);
CHECK_INFO(ppc,
"arena %u: storing 0x%x at %u BTT Map entry",
arenap->id, arenap->map[i], i);
}
/*
* repair invalid or duplicated map entries by using unmapped
* blocks
*/
while (list_pop(loc->list_inval, &inval)) {
if (!list_pop(loc->list_unmap, &unmap)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
arenap->map[inval] = unmap | BTT_MAP_ENTRY_ERROR;
CHECK_INFO(ppc, "arena %u: storing 0x%x at %u BTT Map "
"entry", arenap->id, arenap->map[inval], inval);
}
break;
case Q_REPAIR_FLOG:
/* repair invalid flog entries using unmapped blocks */
while (list_pop(loc->list_flog_inval, &inval)) {
if (!list_pop(loc->list_unmap, &unmap)) {
ppc->result = CHECK_RESULT_ERROR;
return -1;
}
struct btt_flog *flog = (struct btt_flog *)
(arenap->flog + inval * BTT_FLOG_PAIR_ALIGN);
memset(&flog[1], 0, sizeof(flog[1]));
uint32_t entry = unmap | BTT_MAP_ENTRY_ERROR;
flog[0].lba = inval;
flog[0].new_map = entry;
flog[0].old_map = entry;
flog[0].seq = 1;
CHECK_INFO(ppc, "arena %u: repairing BTT Flog at %u "
"with free block entry 0x%x", loc->arenap->id,
inval, entry);
}
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
};
static const struct step steps[] = {
{
.check = init,
},
{
.check = arena_map_flog_check,
},
{
.fix = arena_map_flog_fix,
},
{
.check = cleanup,
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
const struct step *step = &steps[loc->step++];
if (!step->fix)
return step->check(ppc, loc);
if (!check_answer_loop(ppc, loc, NULL, 1, step->fix))
return 0;
cleanup(ppc, loc);
return -1;
}
/*
* check_btt_map_flog -- perform check and fixing of map and flog
*/
void
check_btt_map_flog(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
if (ppc->pool->blk_no_layout)
return;
/* initialize check */
if (!loc->arenap && loc->narena == 0 &&
ppc->result != CHECK_RESULT_PROCESS_ANSWERS) {
CHECK_INFO(ppc, "checking BTT Map and Flog");
loc->arenap = PMDK_TAILQ_FIRST(&ppc->pool->arenas);
loc->narena = 0;
}
while (loc->arenap != NULL) {
/* add info about checking next arena */
if (ppc->result != CHECK_RESULT_PROCESS_ANSWERS &&
loc->step == 0) {
CHECK_INFO(ppc, "arena %u: checking BTT Map and Flog",
loc->narena);
}
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc))
return;
}
/* jump to next arena */
loc->arenap = PMDK_TAILQ_NEXT(loc->arenap, next);
loc->narena++;
loc->step = 0;
}
}
| 15,734 | 21.937318 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/rm.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rm.c -- implementation of pmempool_rm() function
*/
#include <errno.h>
#include <fcntl.h>
#include "libpmempool.h"
#include "out.h"
#include "os.h"
#include "util.h"
#include "set.h"
#include "file.h"
#define PMEMPOOL_RM_ALL_FLAGS (\
PMEMPOOL_RM_FORCE |\
PMEMPOOL_RM_POOLSET_LOCAL |\
PMEMPOOL_RM_POOLSET_REMOTE)
#define ERR_F(f, ...) do {\
if (CHECK_FLAG((f), FORCE))\
LOG(2, "!(ignored) " __VA_ARGS__);\
else\
ERR(__VA_ARGS__);\
} while (0)
#define CHECK_FLAG(f, i) ((f) & PMEMPOOL_RM_##i)
struct cb_args {
unsigned flags;
int error;
};
/*
* rm_local -- (internal) remove single local file
*/
static int
rm_local(const char *path, unsigned flags, int is_part_file)
{
int ret = util_unlink_flock(path);
if (!ret) {
LOG(3, "%s: removed", path);
return 0;
}
int oerrno = errno;
os_stat_t buff;
ret = os_stat(path, &buff);
if (!ret) {
if (S_ISDIR(buff.st_mode)) {
errno = EISDIR;
if (is_part_file)
ERR("%s: removing file failed", path);
else
ERR("removing file failed");
return -1;
}
}
errno = oerrno;
if (is_part_file)
ERR_F(flags, "%s: removing file failed", path);
else
ERR_F(flags, "removing file failed");
if (CHECK_FLAG(flags, FORCE))
return 0;
return -1;
}
/*
* rm_remote -- (internal) remove remote replica
*/
static int
rm_remote(const char *node, const char *path, unsigned flags)
{
if (!Rpmem_remove) {
ERR_F(flags, "cannot remove remote replica"
" -- missing librpmem");
return -1;
}
int rpmem_flags = 0;
if (CHECK_FLAG(flags, FORCE))
rpmem_flags |= RPMEM_REMOVE_FORCE;
if (CHECK_FLAG(flags, POOLSET_REMOTE))
rpmem_flags |= RPMEM_REMOVE_POOL_SET;
int ret = Rpmem_remove(node, path, rpmem_flags);
if (ret) {
ERR_F(flags, "%s/%s removing failed", node, path);
if (CHECK_FLAG(flags, FORCE))
ret = 0;
} else {
LOG(3, "%s/%s: removed", node, path);
}
return ret;
}
/*
* rm_cb -- (internal) foreach part callback
*/
static int
rm_cb(struct part_file *pf, void *arg)
{
struct cb_args *args = (struct cb_args *)arg;
int ret;
if (pf->is_remote) {
ret = rm_remote(pf->remote->node_addr, pf->remote->pool_desc,
args->flags);
} else {
ret = rm_local(pf->part->path, args->flags, 1);
}
if (ret)
args->error = ret;
return 0;
}
/*
* pmempool_rmU -- remove pool files or poolsets
*/
#ifndef _WIN32
static inline
#endif
int
pmempool_rmU(const char *path, unsigned flags)
{
LOG(3, "path %s flags %x", path, flags);
int ret;
if (flags & ~PMEMPOOL_RM_ALL_FLAGS) {
ERR("invalid flags specified");
errno = EINVAL;
return -1;
}
int is_poolset = util_is_poolset_file(path);
if (is_poolset < 0) {
os_stat_t buff;
ret = os_stat(path, &buff);
if (!ret) {
if (S_ISDIR(buff.st_mode)) {
errno = EISDIR;
ERR("removing file failed");
return -1;
}
}
ERR_F(flags, "removing file failed");
if (CHECK_FLAG(flags, FORCE))
return 0;
return -1;
}
if (!is_poolset) {
LOG(2, "%s: not a poolset file", path);
return rm_local(path, flags, 0);
}
LOG(2, "%s: poolset file", path);
/* fill up pool_set structure */
struct pool_set *set = NULL;
int fd = os_open(path, O_RDONLY);
if (fd == -1 || util_poolset_parse(&set, path, fd)) {
ERR_F(flags, "parsing poolset file failed");
if (fd != -1)
os_close(fd);
if (CHECK_FLAG(flags, FORCE))
return 0;
return -1;
}
os_close(fd);
if (set->remote) {
/* ignore error - it will be handled in rm_remote() */
(void) util_remote_load();
}
util_poolset_free(set);
struct cb_args args;
args.flags = flags;
args.error = 0;
ret = util_poolset_foreach_part(path, rm_cb, &args);
if (ret == -1) {
ERR_F(flags, "parsing poolset file failed");
if (CHECK_FLAG(flags, FORCE))
return 0;
return ret;
}
ASSERTeq(ret, 0);
if (args.error)
return args.error;
if (CHECK_FLAG(flags, POOLSET_LOCAL)) {
ret = rm_local(path, flags, 0);
if (ret) {
ERR_F(flags, "removing pool set file failed");
} else {
LOG(3, "%s: removed", path);
}
if (CHECK_FLAG(flags, FORCE))
return 0;
return ret;
}
return 0;
}
#ifndef _WIN32
/*
* pmempool_rm -- remove pool files or poolsets
*/
int
pmempool_rm(const char *path, unsigned flags)
{
return pmempool_rmU(path, flags);
}
#else
/*
* pmempool_rmW -- remove pool files or poolsets in widechar
*/
int
pmempool_rmW(const wchar_t *path, unsigned flags)
{
char *upath = util_toUTF8(path);
if (upath == NULL) {
ERR("Invalid poolest/pool file path.");
return -1;
}
int ret = pmempool_rmU(upath, flags);
util_free_UTF8(upath);
return ret;
}
#endif
| 4,636 | 17.400794 | 63 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_backup.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_backup.c -- pre-check backup
*/
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
#include "out.h"
#include "file.h"
#include "os.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum question {
Q_OVERWRITE_EXISTING_FILE,
Q_OVERWRITE_EXISTING_PARTS
};
/*
* location_release -- (internal) release poolset structure
*/
static void
location_release(location *loc)
{
if (loc->set) {
util_poolset_free(loc->set);
loc->set = NULL;
}
}
/*
* backup_nonpoolset_requirements -- (internal) check backup requirements
*/
static int
backup_nonpoolset_requirements(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, "backup_path %s", ppc->backup_path);
int exists = util_file_exists(ppc->backup_path);
if (exists < 0) {
return CHECK_ERR(ppc,
"unable to access the backup destination: %s",
ppc->backup_path);
}
if (!exists) {
errno = 0;
return 0;
}
if ((size_t)util_file_get_size(ppc->backup_path) !=
ppc->pool->set_file->size) {
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc,
"destination of the backup does not match the size of the source pool file: %s",
ppc->backup_path);
}
if (CHECK_WITHOUT_FIXING(ppc)) {
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
CHECK_ASK(ppc, Q_OVERWRITE_EXISTING_FILE,
"destination of the backup already exists.|Do you want to overwrite it?");
return check_questions_sequence_validate(ppc);
}
/*
* backup_nonpoolset_overwrite -- (internal) overwrite pool
*/
static int
backup_nonpoolset_overwrite(PMEMpoolcheck *ppc, location *loc,
uint32_t question, void *context)
{
LOG(3, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_OVERWRITE_EXISTING_FILE:
if (pool_copy(ppc->pool, ppc->backup_path, 1 /* overwrite */)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "cannot perform backup");
}
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* backup_nonpoolset_create -- (internal) create backup
*/
static int
backup_nonpoolset_create(PMEMpoolcheck *ppc, location *loc)
{
CHECK_INFO(ppc, "creating backup file: %s", ppc->backup_path);
if (pool_copy(ppc->pool, ppc->backup_path, 0)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "cannot perform backup");
}
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
/*
* backup_poolset_requirements -- (internal) check backup requirements
*/
static int
backup_poolset_requirements(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, "backup_path %s", ppc->backup_path);
if (ppc->pool->set_file->poolset->nreplicas > 1) {
CHECK_INFO(ppc,
"backup of a poolset with multiple replicas is not supported");
goto err;
}
if (pool_set_parse(&loc->set, ppc->backup_path)) {
CHECK_INFO_ERRNO(ppc, "invalid poolset backup file: %s",
ppc->backup_path);
goto err;
}
if (loc->set->nreplicas > 1) {
CHECK_INFO(ppc,
"backup to a poolset with multiple replicas is not supported");
goto err_poolset;
}
ASSERTeq(loc->set->nreplicas, 1);
struct pool_replica *srep = ppc->pool->set_file->poolset->replica[0];
struct pool_replica *drep = loc->set->replica[0];
if (srep->nparts != drep->nparts) {
CHECK_INFO(ppc,
"number of part files in the backup poolset must match number of part files in the source poolset");
goto err_poolset;
}
int overwrite_required = 0;
for (unsigned p = 0; p < srep->nparts; p++) {
int exists = util_file_exists(drep->part[p].path);
if (exists < 0) {
CHECK_INFO(ppc,
"unable to access the part of the destination poolset: %s",
ppc->backup_path);
goto err_poolset;
}
if (srep->part[p].filesize != drep->part[p].filesize) {
CHECK_INFO(ppc,
"size of the part %u of the backup poolset does not match source poolset",
p);
goto err_poolset;
}
if (!exists) {
errno = 0;
continue;
}
overwrite_required = true;
if ((size_t)util_file_get_size(drep->part[p].path) !=
srep->part[p].filesize) {
CHECK_INFO(ppc,
"destination of the backup part does not match size of the source part file: %s",
drep->part[p].path);
goto err_poolset;
}
}
if (CHECK_WITHOUT_FIXING(ppc)) {
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
if (overwrite_required) {
CHECK_ASK(ppc, Q_OVERWRITE_EXISTING_PARTS,
"part files of the destination poolset of the backup already exist.|"
"Do you want to overwrite them?");
}
return check_questions_sequence_validate(ppc);
err_poolset:
location_release(loc);
err:
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "unable to backup poolset");
}
/*
* backup_poolset -- (internal) backup the poolset
*/
static int
backup_poolset(PMEMpoolcheck *ppc, location *loc, int overwrite)
{
struct pool_replica *srep = ppc->pool->set_file->poolset->replica[0];
struct pool_replica *drep = loc->set->replica[0];
for (unsigned p = 0; p < srep->nparts; p++) {
if (overwrite == 0) {
CHECK_INFO(ppc, "creating backup file: %s",
drep->part[p].path);
}
if (pool_set_part_copy(&drep->part[p], &srep->part[p],
overwrite)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
CHECK_INFO(ppc, "unable to create backup file");
return CHECK_ERR(ppc, "unable to backup poolset");
}
}
return 0;
}
/*
* backup_poolset_overwrite -- (internal) backup poolset with overwrite
*/
static int
backup_poolset_overwrite(PMEMpoolcheck *ppc, location *loc,
uint32_t question, void *context)
{
LOG(3, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_OVERWRITE_EXISTING_PARTS:
if (backup_poolset(ppc, loc, 1 /* overwrite */)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "cannot perform backup");
}
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* backup_poolset_create -- (internal) backup poolset
*/
static int
backup_poolset_create(PMEMpoolcheck *ppc, location *loc)
{
if (backup_poolset(ppc, loc, 0)) {
location_release(loc);
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "cannot perform backup");
}
location_release(loc);
loc->step = CHECK_STEP_COMPLETE;
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
int poolset;
};
static const struct step steps[] = {
{
.check = backup_nonpoolset_requirements,
.poolset = false,
},
{
.fix = backup_nonpoolset_overwrite,
.poolset = false,
},
{
.check = backup_nonpoolset_create,
.poolset = false
},
{
.check = backup_poolset_requirements,
.poolset = true,
},
{
.fix = backup_poolset_overwrite,
.poolset = true,
},
{
.check = backup_poolset_create,
.poolset = true
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
const struct step *step = &steps[loc->step++];
if (step->poolset == 0 && ppc->pool->params.is_poolset == 1)
return 0;
if (!step->fix)
return step->check(ppc, loc);
if (!check_has_answer(ppc->data))
return 0;
if (check_answer_loop(ppc, loc, NULL, 1, step->fix))
return -1;
ppc->result = CHECK_RESULT_CONSISTENT;
return 0;
}
/*
* check_backup -- perform backup if requested and needed
*/
void
check_backup(PMEMpoolcheck *ppc)
{
LOG(3, "backup_path %s", ppc->backup_path);
if (ppc->backup_path == NULL)
return;
location *loc = check_get_step_data(ppc->data);
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc))
break;
}
}
| 7,968 | 20.654891 | 103 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/sync.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* sync.c -- a module for poolset synchronizing
*/
#include <stdio.h>
#include <stdint.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <unistd.h>
#include <fcntl.h>
#include <limits.h>
#include "libpmem.h"
#include "replica.h"
#include "out.h"
#include "os.h"
#include "util_pmem.h"
#include "util.h"
#ifdef USE_RPMEM
#include "rpmem_common.h"
#include "rpmem_ssh.h"
#endif
#define BB_DATA_STR "offset 0x%zx, length 0x%zx, nhealthy %i"
/* defines 'struct bb_vec' - the vector of the 'struct bad_block' structures */
VEC(bb_vec, struct bad_block);
/*
* validate_args -- (internal) check whether passed arguments are valid
*/
static int
validate_args(struct pool_set *set)
{
LOG(3, "set %p", set);
ASSERTne(set, NULL);
/* the checks below help detect use of incorrect poolset file */
/*
* check if all parts in the poolset are large enough
* (now replication works only for pmemobj pools)
*/
if (replica_check_part_sizes(set, PMEMOBJ_MIN_POOL)) {
LOG(2, "part sizes check failed");
goto err;
}
/*
* check if all directories for part files exist
*/
if (replica_check_part_dirs(set)) {
LOG(2, "part directories check failed");
goto err;
}
return 0;
err:
if (errno == 0)
errno = EINVAL;
return -1;
}
/*
* sync_copy_data -- (internal) copy data from the healthy replica
* to the broken one
*/
static int
sync_copy_data(void *src_addr, void *dst_addr, size_t off, size_t len,
struct pool_replica *rep_h,
struct pool_replica *rep, const struct pool_set_part *part)
{
LOG(3, "src_addr %p dst_addr %p off %zu len %zu "
"rep_h %p rep %p part %p",
src_addr, dst_addr, off, len, rep_h, rep, part);
int ret;
if (rep->remote) {
LOG(10,
"copying data (offset 0x%zx length 0x%zx) to remote node -- '%s' on '%s'",
off, len,
rep->remote->pool_desc,
rep->remote->node_addr);
ret = Rpmem_persist(rep->remote->rpp, off, len, 0, 0);
if (ret) {
LOG(1,
"copying data to remote node failed -- '%s' on '%s'",
rep->remote->pool_desc,
rep->remote->node_addr);
return -1;
}
} else if (rep_h->remote) {
LOG(10,
"reading data (offset 0x%zx length 0x%zx) from remote node -- '%s' on '%s'",
off, len,
rep_h->remote->pool_desc,
rep_h->remote->node_addr);
ret = Rpmem_read(rep_h->remote->rpp, dst_addr, off, len, 0);
if (ret) {
LOG(1,
"reading data from remote node failed -- '%s' on '%s'",
rep_h->remote->pool_desc,
rep_h->remote->node_addr);
return -1;
}
} else {
LOG(10,
"copying data (offset 0x%zx length 0x%zx) from local replica -- '%s'",
off, len, rep_h->part[0].path);
/* copy all data */
memcpy(dst_addr, src_addr, len);
util_persist(part->is_dev_dax, dst_addr, len);
}
return 0;
}
/*
* sync_recreate_header -- (internal) recreate the header
*/
static int
sync_recreate_header(struct pool_set *set, unsigned r, unsigned p,
struct pool_hdr *src_hdr)
{
LOG(3, "set %p replica %u part %u src_hdr %p", set, r, p, src_hdr);
struct pool_attr attr;
util_pool_hdr2attr(&attr, src_hdr);
if (util_header_create(set, r, p, &attr, 1) != 0) {
LOG(1, "part headers create failed for replica %u part %u",
r, p);
errno = EINVAL;
return -1;
}
return 0;
}
/*
* sync_mark_replica_no_badblocks -- (internal) mark replica as not having
* bad blocks
*/
static void
sync_mark_replica_no_badblocks(unsigned repn,
struct poolset_health_status *set_hs)
{
LOG(3, "repn %u set_hs %p", repn, set_hs);
struct replica_health_status *rhs = REP_HEALTH(set_hs, repn);
if (rhs->flags & HAS_BAD_BLOCKS) {
rhs->flags &= ~HAS_BAD_BLOCKS;
LOG(4, "replica %u has no bad blocks now", repn);
}
}
/*
* sync_mark_part_no_badblocks -- (internal) mark part as not having bad blocks
*/
static void
sync_mark_part_no_badblocks(unsigned repn, unsigned partn,
struct poolset_health_status *set_hs)
{
LOG(3, "repn %u partn %u set_hs %p", repn, partn, set_hs);
struct replica_health_status *rhs = REP_HEALTH(set_hs, repn);
if (rhs->part[PART_HEALTHidx(rhs, partn)].flags & HAS_BAD_BLOCKS) {
rhs->part[PART_HEALTHidx(rhs, partn)].flags &= ~HAS_BAD_BLOCKS;
LOG(4, "replica %u part %u has no bad blocks now", repn, partn);
}
}
/*
* sync_recalc_badblocks -- (internal) recalculate offset and length
* of bad blocks to absolute ones
* (relative to the beginning of the pool)
*/
static int
sync_recalc_badblocks(struct pool_set *set,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p set_hs %p", set, set_hs);
/* header size for all headers but the first one */
size_t hdrsize = (set->options & (OPTION_SINGLEHDR | OPTION_NOHDRS)) ?
0 : Mmap_align;
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = REP(set, r);
struct replica_health_status *rep_hs = set_hs->replica[r];
for (unsigned p = 0; p < rep->nparts; ++p) {
struct part_health_status *phs = &rep_hs->part[p];
if (!replica_part_has_bad_blocks(phs)) {
/* skip parts with no bad blocks */
continue;
}
ASSERTne(phs->bbs.bb_cnt, 0);
ASSERTne(phs->bbs.bbv, NULL);
LOG(10, "Replica %u part %u HAS %u bad blocks",
r, p, phs->bbs.bb_cnt);
size_t part_off = replica_get_part_offset(set, r, p);
for (unsigned i = 0; i < phs->bbs.bb_cnt; i++) {
LOG(10,
"relative bad block #%i: offset %zu, length %zu",
i,
phs->bbs.bbv[i].offset,
phs->bbs.bbv[i].length);
size_t off = phs->bbs.bbv[i].offset;
size_t len = phs->bbs.bbv[i].length;
if (len + off <= hdrsize)
continue;
/* parts #>0 are mapped without the header */
if (p > 0 && hdrsize > 0) {
if (off >= hdrsize) {
/*
* Bad block does not overlap
* with the header, so only
* adjust the offset.
*/
off -= hdrsize;
} else {
/*
* Bad block overlaps
* with the header,
* so adjust the length
* and zero the offset.
*/
len -= hdrsize - off;
off = 0;
}
}
replica_align_badblock_offset_length(&off, &len,
set, r, p);
phs->bbs.bbv[i].offset = part_off + off;
phs->bbs.bbv[i].length = (unsigned)len;
LOG(10,
"absolute bad block #%i: offset 0x%zx, length 0x%zx",
i,
phs->bbs.bbv[i].offset,
phs->bbs.bbv[i].length);
}
}
}
return 0;
}
/*
* sync_badblocks_find_healthy_replica -- (internal) look for a healthy replica
* for each bad block
*
* This function looks for a healthy replica for each bad block. Bad blocks
* can overlap across replicas, so each bad block may have to be divided
* into smaller parts which can be fixed using different healthy replica.
*
* Key variables:
* - bbv_all[] - array containing all (possibly divided) bad blocks
* from all previous replicas.
* - bbv_aux[] - array containing all (possibly divided) bad blocks
* from all previous parts of the current replica merged with
* these bad blocks from bbv_all[] that have offsets less or equal
* the greatest bad block's offset in the previous part.
*
* This function merges bad blocks from bbv_all[] with bad blocks
* from the current part and writes the outcome bad blocks to bbv_aux[].
* Only bad blocks with offsets less or equal the greatest bad block's offset
* in the current part will be moved from bbv_all[] to bbv_aux[].
* The rest of them has to be moved at the end by sync_badblocks_move_vec().
*
* bbv_aux[] becomes new bbv_all[] and bbv_aux[] is zeroed
* before checking the next replica (bbv_all = bbv_aux; bbv_aux = 0).
*
* For example (all replicas have only one part):
* - bbv_all with rep#0: |__----___________----__|
* - merged with rep#1: |____----_______----____|
* - gives such bbv_aux: |__11--00_______00--11__|
* - merged with rep#2: |__________---__________|
* - gives such bbv_aux: |__112200__000__002211__| (all bad blocks can be fixed)
*
* where:
* '_' stands for a healthy block (no bad block)
* '-' stands for a bad block with nhealthy == NO_HEALTHY_REPLICA
* 'N' stands for a bad block with nhealthy == N (can be fixed using rep#N)
*/
static int
sync_badblocks_find_healthy_replica(struct part_health_status *phs,
int rep,
struct bb_vec *pbbv_all,
struct bb_vec *pbbv_aux,
unsigned *i_all)
{
LOG(3, "phs %p rep %i pbbv_all %p pbbv_aux %p i_all %i",
phs, rep, pbbv_all, pbbv_aux, *i_all);
struct bad_block bb_add; /* the element which is being added */
struct bad_block bb_new; /* a new element */
struct bad_block *pbb_all; /* current element of bbv_all[] */
unsigned long long beg_prev;
unsigned long long end_prev;
unsigned long long beg_new;
unsigned long long end_new;
size_t len_prev;
size_t len_new;
size_t size_all = VEC_SIZE(pbbv_all);
if (size_all == 0) {
/* there were no bad blocks so far, so fill up bbv_aux[] */
for (unsigned i = 0; i < phs->bbs.bb_cnt; i++) {
bb_add = phs->bbs.bbv[i];
if (rep > 0)
/* bad block can be fixed with replica #0 */
bb_add.nhealthy = 0;
if (VEC_PUSH_BACK(pbbv_aux, bb_add))
return -1;
LOG(10,
"added bad block (prev-empty): " BB_DATA_STR,
bb_add.offset, bb_add.length, bb_add.nhealthy);
}
} else {
if (*i_all < size_all) {
pbb_all = VEC_GET(pbbv_all, (*i_all)++);
} else {
pbb_all = NULL;
}
for (unsigned i = 0; i < phs->bbs.bb_cnt; i++) {
bb_new = phs->bbs.bbv[i];
LOG(10,
" * (%u) inserting new bad block: " BB_DATA_STR,
i + 1,
bb_new.offset, bb_new.length, bb_new.nhealthy);
if (pbb_all == NULL || pbb_all->length == 0) {
if (*i_all < size_all)
pbb_all = VEC_GET(pbbv_all, (*i_all)++);
else
pbb_all = NULL;
}
/* all from bbv_all before the bb_new */
while (pbb_all != NULL && pbb_all->offset
+ pbb_all->length - 1
< bb_new.offset) {
if (pbb_all->nhealthy == NO_HEALTHY_REPLICA)
/* can be fixed with this replica */
pbb_all->nhealthy = rep;
if (VEC_PUSH_BACK(pbbv_aux, *pbb_all))
return -1;
LOG(10,
"added bad block (prev-before): "
BB_DATA_STR,
pbb_all->offset, pbb_all->length,
pbb_all->nhealthy);
if (*i_all < size_all) {
pbb_all = VEC_GET(pbbv_all, (*i_all)++);
} else {
pbb_all = NULL;
break;
}
}
beg_new = bb_new.offset;
len_new = bb_new.length;
end_new = beg_new + len_new - 1;
/* all pbb_all overlapping with the bb_new */
while (len_new > 0 && pbb_all != NULL) {
beg_prev = pbb_all->offset;
len_prev = pbb_all->length;
end_prev = beg_prev + len_prev - 1;
/* check if new overlaps with prev */
if (end_prev < beg_new || end_new < beg_prev)
break;
/*
* 1st part: non-overlapping part
* of pbb_all or bb_new
*/
if (beg_prev < beg_new) {
/* non-overlapping part of pbb_all */
bb_add.offset = beg_prev;
bb_add.length = (unsigned)
(beg_new - beg_prev);
if (pbb_all->nhealthy !=
NO_HEALTHY_REPLICA) {
bb_add.nhealthy =
pbb_all->nhealthy;
} else {
/*
* It can be fixed with
* this replica.
*/
bb_add.nhealthy = rep;
}
if (VEC_PUSH_BACK(pbbv_aux, bb_add))
return -1;
LOG(10,
"added bad block (prev-only): "
BB_DATA_STR,
bb_add.offset, bb_add.length,
bb_add.nhealthy);
beg_prev += bb_add.length;
len_prev -= bb_add.length;
} else if (beg_new < beg_prev) {
/* non-overlapping part of bb_new */
bb_add.offset = beg_new;
bb_add.length = (unsigned)
(beg_prev - beg_new);
if (rep == 0) {
bb_add.nhealthy =
NO_HEALTHY_REPLICA;
} else {
/*
* It can be fixed with any
* previous replica, so let's
* choose replia #0.
*/
bb_add.nhealthy = 0;
}
if (VEC_PUSH_BACK(pbbv_aux, bb_add))
return -1;
LOG(10,
"added bad block (new-only): "
BB_DATA_STR,
bb_add.offset, bb_add.length,
bb_add.nhealthy);
beg_new += bb_add.length;
len_new -= bb_add.length;
}
/*
* 2nd part: overlapping part
* of pbb_all and bb_new
*/
if (len_prev <= len_new) {
bb_add.offset = beg_prev;
bb_add.length = len_prev;
beg_new += len_prev;
len_new -= len_prev;
/* whole pbb_all was added */
len_prev = 0;
} else {
bb_add.offset = beg_new;
bb_add.length = len_new;
beg_prev += len_new;
len_prev -= len_new;
/* whole bb_new was added */
len_new = 0;
}
bb_add.nhealthy = pbb_all->nhealthy;
if (VEC_PUSH_BACK(pbbv_aux, bb_add))
return -1;
LOG(10,
"added bad block (common): "
BB_DATA_STR,
bb_add.offset, bb_add.length,
bb_add.nhealthy);
/* update pbb_all */
pbb_all->offset = beg_prev;
pbb_all->length = len_prev;
if (len_prev == 0) {
if (*i_all < size_all)
pbb_all = VEC_GET(pbbv_all,
(*i_all)++);
else
pbb_all = NULL;
}
}
/* the rest of the bb_new */
if (len_new > 0) {
bb_add.offset = beg_new;
bb_add.length = len_new;
if (rep > 0)
/* it can be fixed with replica #0 */
bb_add.nhealthy = 0;
else
bb_add.nhealthy = NO_HEALTHY_REPLICA;
if (VEC_PUSH_BACK(pbbv_aux, bb_add))
return -1;
LOG(10,
"added bad block (new-rest): "
BB_DATA_STR,
bb_add.offset, bb_add.length,
bb_add.nhealthy);
}
}
if (pbb_all != NULL && pbb_all->length > 0 && *i_all > 0)
/* this pbb_all will be used again in the next part */
(*i_all)--;
}
return 0;
}
/*
* sync_badblocks_assign_healthy_replica -- (internal) assign healthy replica
* for each bad block
*/
static int
sync_badblocks_assign_healthy_replica(struct part_health_status *phs,
int rep,
struct bb_vec *pbbv_all,
unsigned *i_all)
{
LOG(3, "phs %p rep %i pbbv_all %p i_all %i",
phs, rep, pbbv_all, *i_all);
struct bad_block bb_new; /* a new element */
struct bad_block bb_old; /* an old element */
struct bad_block *pbb_all; /* current element of bbv_all[] */
size_t length_left;
struct bb_vec bbv_new = VEC_INITIALIZER;
size_t size_all = VEC_SIZE(pbbv_all);
pbb_all = VEC_GET(pbbv_all, *i_all);
for (unsigned i = 0; i < phs->bbs.bb_cnt; i++) {
bb_old = phs->bbs.bbv[i];
LOG(10,
"assigning old bad block: " BB_DATA_STR,
bb_old.offset, bb_old.length, bb_old.nhealthy);
/*
* Skip all bad blocks from bbv_all with offsets
* less than the offset of the current bb_old.
*/
while (pbb_all->offset < bb_old.offset) {
/* (*i_all) has to be less than (size_all - 1) */
ASSERT(*i_all < size_all - 1);
pbb_all = VEC_GET(pbbv_all, ++(*i_all));
}
bb_new.offset = bb_old.offset;
length_left = bb_old.length;
while (length_left > 0) {
LOG(10,
"checking saved bad block: " BB_DATA_STR,
pbb_all->offset, pbb_all->length,
pbb_all->nhealthy);
ASSERTeq(pbb_all->offset, bb_new.offset);
ASSERT(pbb_all->length <= length_left);
bb_new.length = pbb_all->length;
bb_new.nhealthy = pbb_all->nhealthy;
if (VEC_PUSH_BACK(&bbv_new, bb_new))
goto error_exit;
LOG(10,
"added new bad block: " BB_DATA_STR,
bb_new.offset, bb_new.length, bb_new.nhealthy);
bb_new.offset += bb_new.length;
length_left -= bb_new.length;
if (length_left == 0)
continue;
/* (*i_all) has to be less than (size_all - 1) */
ASSERT(*i_all < size_all - 1);
pbb_all = VEC_GET(pbbv_all, ++(*i_all));
}
}
Free(phs->bbs.bbv);
phs->bbs.bbv = VEC_ARR(&bbv_new);
phs->bbs.bb_cnt = (unsigned)VEC_SIZE(&bbv_new);
LOG(10, "added %u new bad blocks", phs->bbs.bb_cnt);
return 0;
error_exit:
VEC_DELETE(&bbv_new);
return -1;
}
/*
* sync_badblocks_move_vec -- (internal) move bad blocks from vector pbbv_all
* to vector pbbv_aux
*/
static int
sync_badblocks_move_vec(struct bb_vec *pbbv_all,
struct bb_vec *pbbv_aux,
unsigned i_all,
unsigned rep)
{
LOG(3, "pbbv_all %p pbbv_aux %p i_all %u rep %u",
pbbv_all, pbbv_aux, i_all, rep);
size_t size_all = VEC_SIZE(pbbv_all);
struct bad_block *pbb_all;
while (i_all < size_all) {
pbb_all = VEC_GET(pbbv_all, i_all++);
if (pbb_all->length == 0)
continue;
if (pbb_all->nhealthy == NO_HEALTHY_REPLICA && rep > 0)
/* it can be fixed using the last replica */
pbb_all->nhealthy = (int)rep;
if (VEC_PUSH_BACK(pbbv_aux, *pbb_all))
return -1;
LOG(10,
"added bad block (prev-after): " BB_DATA_STR,
pbb_all->offset, pbb_all->length,
pbb_all->nhealthy);
}
return 0;
}
/*
* sync_check_bad_blocks_overlap -- (internal) check if there are uncorrectable
* bad blocks (bad blocks overlapping
* in all replicas)
*/
static int
sync_check_bad_blocks_overlap(struct pool_set *set,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p set_hs %p", set, set_hs);
struct bb_vec bbv_all = VEC_INITIALIZER;
struct bb_vec bbv_aux = VEC_INITIALIZER;
int ret = -1;
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = REP(set, r);
struct replica_health_status *rep_hs = set_hs->replica[r];
unsigned i_all = 0; /* index in bbv_all */
for (unsigned p = 0; p < rep->nparts; ++p) {
struct part_health_status *phs = &rep_hs->part[p];
if (!replica_part_has_bad_blocks(phs)) {
/* skip parts with no bad blocks */
continue;
}
ASSERTne(phs->bbs.bb_cnt, 0);
ASSERTne(phs->bbs.bbv, NULL);
LOG(10, "Replica %u part %u HAS %u bad blocks",
r, p, phs->bbs.bb_cnt);
/*
* This function merges bad blocks from bbv_all
* with bad blocks from the current part
* and writes the outcome bad blocks to bbv_aux.
* Only bad blocks with offsets less or equal
* the greatest bad block's offset in the current part
* will be moved from bbv_all to bbv_aux.
* The rest of them has to be moved at the end
* by sync_badblocks_move_vec() below.
*/
if (sync_badblocks_find_healthy_replica(phs, (int)r,
&bbv_all, &bbv_aux,
&i_all))
goto exit;
}
/*
* Move the rest of bad blocks from bbv_all to bbv_aux
* (for more details see the comment above).
* All these bad blocks can be fixed using the last replica 'r'.
*/
if (sync_badblocks_move_vec(&bbv_all, &bbv_aux, i_all, r))
return -1;
/* bbv_aux becomes a new bbv_all */
VEC_MOVE(&bbv_all, &bbv_aux);
i_all = 0;
}
ret = 0;
/* check if there is an uncorrectable bad block */
size_t size_all = VEC_SIZE(&bbv_all);
for (unsigned i = 0; i < size_all; i++) {
struct bad_block *pbb_all = VEC_GET(&bbv_all, i);
if (pbb_all->nhealthy == NO_HEALTHY_REPLICA) {
ret = 1; /* this bad block cannot be fixed */
LOG(1,
"uncorrectable bad block found: offset 0x%zx, length 0x%zx",
pbb_all->offset, pbb_all->length);
goto exit;
}
}
/*
* All bad blocks can be fixed,
* so assign healthy replica for each of them.
*/
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = REP(set, r);
struct replica_health_status *rep_hs = set_hs->replica[r];
if (!replica_has_bad_blocks(r, set_hs)) {
/* skip replicas with no bad blocks */
continue;
}
unsigned i_all = 0; /* index in bbv_all */
for (unsigned p = 0; p < rep->nparts; ++p) {
struct part_health_status *phs = &rep_hs->part[p];
if (!replica_part_has_bad_blocks(phs)) {
/* skip parts with no bad blocks */
continue;
}
if (sync_badblocks_assign_healthy_replica(phs, (int)r,
&bbv_all,
&i_all))
goto exit;
}
}
exit:
VEC_DELETE(&bbv_aux);
VEC_DELETE(&bbv_all);
return ret;
}
/*
* sync_badblocks_data -- (internal) clear bad blocks in replica
*/
static int
sync_badblocks_data(struct pool_set *set, struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
struct pool_replica *rep_h;
for (unsigned r = 0; r < set->nreplicas; ++r) {
struct pool_replica *rep = REP(set, r);
struct replica_health_status *rep_hs = set_hs->replica[r];
for (unsigned p = 0; p < rep->nparts; ++p) {
struct part_health_status *phs = &rep_hs->part[p];
if (!replica_part_has_bad_blocks(phs)) {
/* skip parts with no bad blocks */
continue;
}
ASSERTne(phs->bbs.bb_cnt, 0);
ASSERTne(phs->bbs.bbv, NULL);
const struct pool_set_part *part = &rep->part[p];
size_t part_off = replica_get_part_offset(set, r, p);
for (unsigned i = 0; i < phs->bbs.bb_cnt; i++) {
size_t off = phs->bbs.bbv[i].offset - part_off;
size_t len = phs->bbs.bbv[i].length;
ASSERT(phs->bbs.bbv[i].nhealthy >= 0);
rep_h = REP(set,
(unsigned)phs->bbs.bbv[i].nhealthy);
void *src_addr = ADDR_SUM(rep_h->part[0].addr,
part_off + off);
void *dst_addr = ADDR_SUM(part->addr, off);
if (sync_copy_data(src_addr, dst_addr,
part_off + off, len,
rep_h, rep, part))
return -1;
}
/* free array of bad blocks */
Free(phs->bbs.bbv);
phs->bbs.bbv = NULL;
/* mark part as having no bad blocks */
sync_mark_part_no_badblocks(r, p, set_hs);
}
/* mark replica as having no bad blocks */
sync_mark_replica_no_badblocks(r, set_hs);
}
LOG(1, "all bad blocks have been fixed");
if (replica_remove_all_recovery_files(set_hs)) {
LOG(1, "removing bad block recovery files failed");
return -1;
}
return 0;
}
/*
* recreate_broken_parts -- (internal) create parts in place of the broken ones
*/
static int
recreate_broken_parts(struct pool_set *set,
struct poolset_health_status *set_hs,
int fix_bad_blocks)
{
LOG(3, "set %p set_hs %p fix_bad_blocks %i",
set, set_hs, fix_bad_blocks);
for (unsigned r = 0; r < set_hs->nreplicas; ++r) {
if (set->replica[r]->remote)
continue;
struct pool_replica *broken_r = set->replica[r];
for (unsigned p = 0; p < set_hs->replica[r]->nparts; ++p) {
/* skip unbroken parts */
if (!replica_is_part_broken(r, p, set_hs))
continue;
/* remove parts from broken replica */
if (replica_remove_part(set, r, p, fix_bad_blocks)) {
LOG(2, "cannot remove part");
return -1;
}
/* create removed part and open it */
if (util_part_open(&broken_r->part[p], 0,
1 /* create */)) {
LOG(2, "cannot open/create parts");
return -1;
}
sync_mark_part_no_badblocks(r, p, set_hs);
}
}
return 0;
}
/*
* fill_struct_part_uuids -- (internal) set part uuids in pool_set structure
*/
static void
fill_struct_part_uuids(struct pool_set *set, unsigned repn,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, repn %u, set_hs %p", set, repn, set_hs);
struct pool_replica *rep = REP(set, repn);
struct pool_hdr *hdrp;
for (unsigned p = 0; p < rep->nhdrs; ++p) {
/* skip broken parts */
if (replica_is_part_broken(repn, p, set_hs))
continue;
hdrp = HDR(rep, p);
memcpy(rep->part[p].uuid, hdrp->uuid, POOL_HDR_UUID_LEN);
}
}
/*
* is_uuid_already_used -- (internal) check if given uuid is assigned to
* any of the earlier replicas
*/
static int
is_uuid_already_used(uuid_t uuid, struct pool_set *set, unsigned repn)
{
for (unsigned r = 0; r < repn; ++r) {
if (uuidcmp(uuid, PART(REP(set, r), 0)->uuid) == 0)
return 1;
}
return 0;
}
/*
* fill_struct_broken_part_uuids -- (internal) set part uuids in pool_set
* structure
*/
static int
fill_struct_broken_part_uuids(struct pool_set *set, unsigned repn,
struct poolset_health_status *set_hs, unsigned flags)
{
LOG(3, "set %p, repn %u, set_hs %p, flags %u", set, repn, set_hs,
flags);
struct pool_replica *rep = REP(set, repn);
struct pool_hdr *hdrp;
for (unsigned p = 0; p < rep->nhdrs; ++p) {
/* skip unbroken parts */
if (!replica_is_part_broken(repn, p, set_hs))
continue;
/* check if part was damaged or was added by transform */
if (replica_is_poolset_transformed(flags)) {
/* generate new uuid for this part */
if (util_uuid_generate(rep->part[p].uuid) < 0) {
ERR("cannot generate pool set part UUID");
errno = EINVAL;
return -1;
}
continue;
}
if (!replica_is_part_broken(repn, p - 1, set_hs) &&
!(set->options & OPTION_SINGLEHDR)) {
/* try to get part uuid from the previous part */
hdrp = HDRP(rep, p);
memcpy(rep->part[p].uuid, hdrp->next_part_uuid,
POOL_HDR_UUID_LEN);
} else if (!replica_is_part_broken(repn, p + 1, set_hs) &&
!(set->options & OPTION_SINGLEHDR)) {
/* try to get part uuid from the next part */
hdrp = HDRN(rep, p);
memcpy(rep->part[p].uuid, hdrp->prev_part_uuid,
POOL_HDR_UUID_LEN);
} else if (p == 0 &&
!replica_is_part_broken(repn - 1, 0, set_hs)) {
/* try to get part uuid from the previous replica */
hdrp = HDR(REPP(set, repn), 0);
if (is_uuid_already_used(hdrp->next_repl_uuid, set,
repn)) {
ERR(
"repeated uuid - some replicas were created with a different poolset file");
errno = EINVAL;
return -1;
}
memcpy(rep->part[p].uuid, hdrp->next_repl_uuid,
POOL_HDR_UUID_LEN);
} else if (p == 0 &&
!replica_is_part_broken(repn + 1, 0, set_hs)) {
/* try to get part uuid from the next replica */
hdrp = HDR(REPN(set, repn), 0);
if (is_uuid_already_used(hdrp->prev_repl_uuid, set,
repn)) {
ERR(
"repeated uuid - some replicas were created with a different poolset file");
errno = EINVAL;
return -1;
}
memcpy(rep->part[p].uuid, hdrp->prev_repl_uuid,
POOL_HDR_UUID_LEN);
} else {
/* generate new uuid for this part */
if (util_uuid_generate(rep->part[p].uuid) < 0) {
ERR("cannot generate pool set part UUID");
errno = EINVAL;
return -1;
}
}
}
return 0;
}
/*
* fill_struct_uuids -- (internal) fill fields in pool_set needed for further
* altering of uuids
*/
static int
fill_struct_uuids(struct pool_set *set, unsigned src_replica,
struct poolset_health_status *set_hs, unsigned flags)
{
LOG(3, "set %p, src_replica %u, set_hs %p, flags %u", set, src_replica,
set_hs, flags);
/* set poolset uuid */
struct pool_hdr *src_hdr0 = HDR(REP(set, src_replica), 0);
memcpy(set->uuid, src_hdr0->poolset_uuid, POOL_HDR_UUID_LEN);
/* set unbroken parts' uuids */
for (unsigned r = 0; r < set->nreplicas; ++r) {
fill_struct_part_uuids(set, r, set_hs);
}
/* set broken parts' uuids */
for (unsigned r = 0; r < set->nreplicas; ++r) {
if (fill_struct_broken_part_uuids(set, r, set_hs, flags))
return -1;
}
return 0;
}
/*
* create_headers_for_broken_parts -- (internal) create headers for all new
* parts created in place of the broken ones
*/
static int
create_headers_for_broken_parts(struct pool_set *set, unsigned src_replica,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, src_replica %u, set_hs %p", set, src_replica, set_hs);
struct pool_hdr *src_hdr = HDR(REP(set, src_replica), 0);
for (unsigned r = 0; r < set_hs->nreplicas; ++r) {
/* skip unbroken replicas */
if (!replica_is_replica_broken(r, set_hs) &&
!replica_has_bad_blocks(r, set_hs))
continue;
for (unsigned p = 0; p < set_hs->replica[r]->nhdrs; p++) {
/* skip unbroken parts */
if (!replica_is_part_broken(r, p, set_hs) &&
!replica_part_has_corrupted_header(r, p, set_hs))
continue;
if (sync_recreate_header(set, r, p, src_hdr))
return -1;
}
}
return 0;
}
/*
* copy_data_to_broken_parts -- (internal) copy data to all parts created
* in place of the broken ones
*/
static int
copy_data_to_broken_parts(struct pool_set *set, unsigned healthy_replica,
unsigned flags, struct poolset_health_status *set_hs)
{
LOG(3, "set %p, healthy_replica %u, flags %u, set_hs %p", set,
healthy_replica, flags, set_hs);
/* get pool size from healthy replica */
size_t poolsize = set->poolsize;
for (unsigned r = 0; r < set_hs->nreplicas; ++r) {
/* skip unbroken and consistent replicas */
if (replica_is_replica_healthy(r, set_hs))
continue;
struct pool_replica *rep = REP(set, r);
struct pool_replica *rep_h = REP(set, healthy_replica);
for (unsigned p = 0; p < rep->nparts; ++p) {
/* skip unbroken parts from consistent replicas */
if (!replica_is_part_broken(r, p, set_hs) &&
replica_is_replica_consistent(r, set_hs))
continue;
const struct pool_set_part *part = &rep->part[p];
size_t off = replica_get_part_data_offset(set, r, p);
size_t len = replica_get_part_data_len(set, r, p);
/* do not allow copying too much data */
if (off >= poolsize)
continue;
if (off + len > poolsize || rep->remote)
len = poolsize - off;
/*
* First part of replica is mapped
* with header
*/
size_t fpoff = (p == 0) ? POOL_HDR_SIZE : 0;
void *src_addr = ADDR_SUM(rep_h->part[0].addr, off);
void *dst_addr = ADDR_SUM(part->addr, fpoff);
if (sync_copy_data(src_addr, dst_addr, off, len,
rep_h, rep, part))
return -1;
}
}
return 0;
}
/*
* grant_created_parts_perm -- (internal) set RW permission rights to all
* the parts created in place of the broken ones
*/
static int
grant_created_parts_perm(struct pool_set *set, unsigned src_repn,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, src_repn %u, set_hs %p", set, src_repn, set_hs);
/* choose the default permissions */
mode_t def_mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
/* get permissions of the first part of the source replica */
mode_t src_mode;
os_stat_t sb;
if (REP(set, src_repn)->remote) {
src_mode = def_mode;
} else if (os_stat(PART(REP(set, src_repn), 0)->path, &sb) != 0) {
ERR("cannot check file permissions of %s (replica %u, part %u)",
PART(REP(set, src_repn), 0)->path, src_repn, 0);
src_mode = def_mode;
} else {
src_mode = sb.st_mode;
}
/* set permissions to all recreated parts */
for (unsigned r = 0; r < set_hs->nreplicas; ++r) {
/* skip unbroken replicas */
if (!replica_is_replica_broken(r, set_hs))
continue;
if (set->replica[r]->remote)
continue;
for (unsigned p = 0; p < set_hs->replica[r]->nparts; p++) {
/* skip parts which were not created */
if (!PART(REP(set, r), p)->created)
continue;
LOG(4, "setting permissions for part %u, replica %u",
p, r);
/* set rights to those of existing part files */
if (os_chmod(PART(REP(set, r), p)->path, src_mode)) {
ERR(
"cannot set permission rights for created parts: replica %u, part %u",
r, p);
errno = EPERM;
return -1;
}
}
}
return 0;
}
/*
* update_parts_linkage -- (internal) set uuids linking recreated parts within
* a replica
*/
static int
update_parts_linkage(struct pool_set *set, unsigned repn,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, repn %u, set_hs %p", set, repn, set_hs);
struct pool_replica *rep = REP(set, repn);
for (unsigned p = 0; p < rep->nhdrs; ++p) {
struct pool_hdr *hdrp = HDR(rep, p);
struct pool_hdr *prev_hdrp = HDRP(rep, p);
struct pool_hdr *next_hdrp = HDRN(rep, p);
/* set uuids in the current part */
memcpy(hdrp->prev_part_uuid, PARTP(rep, p)->uuid,
POOL_HDR_UUID_LEN);
memcpy(hdrp->next_part_uuid, PARTN(rep, p)->uuid,
POOL_HDR_UUID_LEN);
util_checksum(hdrp, sizeof(*hdrp), &hdrp->checksum,
1, POOL_HDR_CSUM_END_OFF(hdrp));
/* set uuids in the previous part */
memcpy(prev_hdrp->next_part_uuid, PART(rep, p)->uuid,
POOL_HDR_UUID_LEN);
util_checksum(prev_hdrp, sizeof(*prev_hdrp),
&prev_hdrp->checksum, 1,
POOL_HDR_CSUM_END_OFF(prev_hdrp));
/* set uuids in the next part */
memcpy(next_hdrp->prev_part_uuid, PART(rep, p)->uuid,
POOL_HDR_UUID_LEN);
util_checksum(next_hdrp, sizeof(*next_hdrp),
&next_hdrp->checksum, 1,
POOL_HDR_CSUM_END_OFF(next_hdrp));
/* store pool's header */
util_persist(PART(rep, p)->is_dev_dax, hdrp, sizeof(*hdrp));
util_persist(PARTP(rep, p)->is_dev_dax, prev_hdrp,
sizeof(*prev_hdrp));
util_persist(PARTN(rep, p)->is_dev_dax, next_hdrp,
sizeof(*next_hdrp));
}
return 0;
}
/*
* update_replicas_linkage -- (internal) update uuids linking replicas
*/
static int
update_replicas_linkage(struct pool_set *set, unsigned repn)
{
LOG(3, "set %p, repn %u", set, repn);
struct pool_replica *rep = REP(set, repn);
struct pool_replica *prev_r = REPP(set, repn);
struct pool_replica *next_r = REPN(set, repn);
ASSERT(rep->nparts > 0);
ASSERT(prev_r->nparts > 0);
ASSERT(next_r->nparts > 0);
/* set uuids in the current replica */
for (unsigned p = 0; p < rep->nhdrs; ++p) {
struct pool_hdr *hdrp = HDR(rep, p);
memcpy(hdrp->prev_repl_uuid, PART(prev_r, 0)->uuid,
POOL_HDR_UUID_LEN);
memcpy(hdrp->next_repl_uuid, PART(next_r, 0)->uuid,
POOL_HDR_UUID_LEN);
util_checksum(hdrp, sizeof(*hdrp), &hdrp->checksum,
1, POOL_HDR_CSUM_END_OFF(hdrp));
/* store pool's header */
util_persist(PART(rep, p)->is_dev_dax, hdrp, sizeof(*hdrp));
}
/* set uuids in the previous replica */
for (unsigned p = 0; p < prev_r->nhdrs; ++p) {
struct pool_hdr *prev_hdrp = HDR(prev_r, p);
memcpy(prev_hdrp->next_repl_uuid, PART(rep, 0)->uuid,
POOL_HDR_UUID_LEN);
util_checksum(prev_hdrp, sizeof(*prev_hdrp),
&prev_hdrp->checksum, 1,
POOL_HDR_CSUM_END_OFF(prev_hdrp));
/* store pool's header */
util_persist(PART(prev_r, p)->is_dev_dax, prev_hdrp,
sizeof(*prev_hdrp));
}
/* set uuids in the next replica */
for (unsigned p = 0; p < next_r->nhdrs; ++p) {
struct pool_hdr *next_hdrp = HDR(next_r, p);
memcpy(next_hdrp->prev_repl_uuid, PART(rep, 0)->uuid,
POOL_HDR_UUID_LEN);
util_checksum(next_hdrp, sizeof(*next_hdrp),
&next_hdrp->checksum, 1,
POOL_HDR_CSUM_END_OFF(next_hdrp));
/* store pool's header */
util_persist(PART(next_r, p)->is_dev_dax, next_hdrp,
sizeof(*next_hdrp));
}
return 0;
}
/*
* update_poolset_uuids -- (internal) update poolset uuid in recreated parts
*/
static int
update_poolset_uuids(struct pool_set *set, unsigned repn,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, repn %u, set_hs %p", set, repn, set_hs);
struct pool_replica *rep = REP(set, repn);
for (unsigned p = 0; p < rep->nhdrs; ++p) {
struct pool_hdr *hdrp = HDR(rep, p);
memcpy(hdrp->poolset_uuid, set->uuid, POOL_HDR_UUID_LEN);
util_checksum(hdrp, sizeof(*hdrp), &hdrp->checksum,
1, POOL_HDR_CSUM_END_OFF(hdrp));
/* store pool's header */
util_persist(PART(rep, p)->is_dev_dax, hdrp, sizeof(*hdrp));
}
return 0;
}
/*
* update_remote_headers -- (internal) update headers of existing remote
* replicas
*/
static int
update_remote_headers(struct pool_set *set)
{
LOG(3, "set %p", set);
for (unsigned r = 0; r < set->nreplicas; ++ r) {
/* skip local or just created replicas */
if (REP(set, r)->remote == NULL ||
PART(REP(set, r), 0)->created == 1)
continue;
if (util_update_remote_header(set, r)) {
LOG(1,
"updating header of a remote replica no. %u failed",
r);
return -1;
}
}
return 0;
}
/*
* update_uuids -- (internal) set all uuids that might have changed or be unset
* after recreating parts
*/
static int
update_uuids(struct pool_set *set, struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
for (unsigned r = 0; r < set->nreplicas; ++r) {
if (!replica_is_replica_healthy(r, set_hs))
update_parts_linkage(set, r, set_hs);
update_replicas_linkage(set, r);
update_poolset_uuids(set, r, set_hs);
}
if (update_remote_headers(set))
return -1;
return 0;
}
/*
* remove_remote -- (internal) remove remote pool
*/
static int
remove_remote(const char *target, const char *pool_set)
{
LOG(3, "target %s, pool_set %s", target, pool_set);
#ifdef USE_RPMEM
struct rpmem_target_info *info = rpmem_target_parse(target);
if (!info)
goto err_parse;
struct rpmem_ssh *ssh = rpmem_ssh_exec(info, "--remove",
pool_set, "--force", NULL);
if (!ssh) {
goto err_ssh_exec;
}
if (rpmem_ssh_monitor(ssh, 0))
goto err_ssh_monitor;
int ret = rpmem_ssh_close(ssh);
rpmem_target_free(info);
return ret;
err_ssh_monitor:
rpmem_ssh_close(ssh);
err_ssh_exec:
rpmem_target_free(info);
err_parse:
return -1;
#else
FATAL("remote replication not supported");
return -1;
#endif
}
/*
* open_remote_replicas -- (internal) open all unbroken remote replicas
*/
static int
open_remote_replicas(struct pool_set *set,
struct poolset_health_status *set_hs)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
for (unsigned r = 0; r < set->nreplicas; r++) {
struct pool_replica *rep = set->replica[r];
if (!rep->remote)
continue;
if (!replica_is_replica_healthy(r, set_hs))
continue;
unsigned nlanes = REMOTE_NLANES;
int ret = util_poolset_remote_replica_open(set, r,
set->poolsize, 0, &nlanes);
if (ret) {
LOG(1, "Opening '%s' on '%s' failed",
rep->remote->pool_desc,
rep->remote->node_addr);
return ret;
}
}
return 0;
}
/*
* create_remote_replicas -- (internal) recreate all broken replicas
*/
static int
create_remote_replicas(struct pool_set *set,
struct poolset_health_status *set_hs, unsigned flags)
{
LOG(3, "set %p, set_hs %p", set, set_hs);
for (unsigned r = 0; r < set->nreplicas; r++) {
struct pool_replica *rep = set->replica[r];
if (!rep->remote)
continue;
if (replica_is_replica_healthy(r, set_hs))
continue;
if (!replica_is_poolset_transformed(flags)) {
/* ignore errors from remove operation */
remove_remote(rep->remote->node_addr,
rep->remote->pool_desc);
}
unsigned nlanes = REMOTE_NLANES;
int ret = util_poolset_remote_replica_open(set, r,
set->poolsize, 1, &nlanes);
if (ret) {
LOG(1, "Creating '%s' on '%s' failed",
rep->remote->pool_desc,
rep->remote->node_addr);
return ret;
}
}
return 0;
}
/*
* sync_replica -- synchronize data across replicas within a poolset
*/
int
replica_sync(struct pool_set *set, struct poolset_health_status *s_hs,
unsigned flags)
{
LOG(3, "set %p, flags %u", set, flags);
int ret = 0;
struct poolset_health_status *set_hs = NULL;
/* check if we already know the poolset health status */
if (s_hs == NULL) {
/* validate poolset before checking its health */
if (validate_args(set))
return -1;
/* examine poolset's health */
if (replica_check_poolset_health(set, &set_hs,
1 /* called from sync */,
flags)) {
LOG(1, "poolset health check failed");
return -1;
}
/* check if poolset is broken; if not, nothing to do */
if (replica_is_poolset_healthy(set_hs)) {
LOG(1, "poolset is healthy");
goto out;
}
} else {
set_hs = s_hs;
}
/* find a replica with healthy header; it will be the source of data */
unsigned healthy_replica = replica_find_healthy_replica(set_hs);
unsigned healthy_header = healthy_replica;
if (healthy_header == UNDEF_REPLICA) {
healthy_header = replica_find_replica_healthy_header(set_hs);
if (healthy_header == UNDEF_REPLICA) {
ERR("no healthy replica found");
errno = EINVAL;
ret = -1;
goto out;
}
}
/* in dry-run mode we can stop here */
if (is_dry_run(flags)) {
LOG(1, "Sync in dry-run mode finished successfully");
goto out;
}
/* recreate broken parts */
if (recreate_broken_parts(set, set_hs, fix_bad_blocks(flags))) {
ERR("recreating broken parts failed");
ret = -1;
goto out;
}
/* open all part files */
if (replica_open_poolset_part_files(set)) {
ERR("opening poolset part files failed");
ret = -1;
goto out;
}
/* map all replicas */
if (util_poolset_open(set)) {
ERR("opening poolset failed");
ret = -1;
goto out;
}
/* this is required for opening remote pools */
set->poolsize = set_hs->replica[healthy_header]->pool_size;
LOG(3, "setting the pool size (%zu) from replica #%u",
set->poolsize, healthy_header);
/* open all remote replicas */
if (open_remote_replicas(set, set_hs)) {
ERR("opening remote replicas failed");
ret = -1;
goto out;
}
/* recalculate offset and length of bad blocks */
if (sync_recalc_badblocks(set, set_hs)) {
LOG(1, "syncing bad blocks data failed");
ret = -1;
goto out;
}
/*
* Check if there are uncorrectable bad blocks
* (bad blocks overlapping in all replicas).
*/
int status = sync_check_bad_blocks_overlap(set, set_hs);
if (status == -1) {
LOG(1, "checking bad blocks failed");
ret = -1;
goto out;
}
if (status == 1) {
ERR(
"a part of the pool has uncorrectable errors in all replicas");
errno = EINVAL;
ret = -1;
goto out;
}
LOG(3, "bad blocks do not overlap");
/* sync data in bad blocks */
if (sync_badblocks_data(set, set_hs)) {
LOG(1, "syncing bad blocks data failed");
ret = -1;
goto out;
}
/* find one good replica; it will be the source of data */
healthy_replica = replica_find_healthy_replica(set_hs);
if (healthy_replica == UNDEF_REPLICA) {
ERR("no healthy replica found");
errno = EINVAL;
ret = -1;
goto out;
}
/* update uuid fields in the set structure with part headers */
if (fill_struct_uuids(set, healthy_replica, set_hs, flags)) {
ERR("gathering uuids failed");
ret = -1;
goto out;
}
/* create headers for broken parts */
if (create_headers_for_broken_parts(set, healthy_replica, set_hs)) {
ERR("creating headers for broken parts failed");
ret = -1;
goto out;
}
/* create all remote replicas */
if (create_remote_replicas(set, set_hs, flags)) {
ERR("creating remote replicas failed");
ret = -1;
goto out;
}
/* check and copy data if possible */
if (copy_data_to_broken_parts(set, healthy_replica,
flags, set_hs)) {
ERR("copying data to broken parts failed");
ret = -1;
goto out;
}
/* update uuids of replicas and parts */
if (update_uuids(set, set_hs)) {
ERR("updating uuids failed");
ret = -1;
goto out;
}
/* grant permissions to all created parts */
if (grant_created_parts_perm(set, healthy_replica, set_hs)) {
ERR("granting permissions to created parts failed");
ret = -1;
}
out:
if (s_hs == NULL)
replica_free_poolset_health_status(set_hs);
return ret;
}
| 42,366 | 24.72374 | 81 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_btt_info.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* check_btt_info.c -- check BTT Info
*/
#include <stdlib.h>
#include <stdint.h>
#include <endian.h>
#include "out.h"
#include "util.h"
#include "btt.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum question {
Q_RESTORE_FROM_BACKUP,
Q_REGENERATE,
Q_REGENERATE_CHECKSUM,
Q_RESTORE_FROM_HEADER
};
/*
* location_release -- (internal) release check_btt_info_loc allocations
*/
static void
location_release(location *loc)
{
free(loc->arenap);
loc->arenap = NULL;
}
/*
* btt_info_checksum -- (internal) check BTT Info checksum
*/
static int
btt_info_checksum(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
loc->arenap = calloc(1, sizeof(struct arena));
if (!loc->arenap) {
ERR("!calloc");
ppc->result = CHECK_RESULT_INTERNAL_ERROR;
CHECK_ERR(ppc, "cannot allocate memory for arena");
goto error_cleanup;
}
/* read the BTT Info header at well known offset */
if (pool_read(ppc->pool, &loc->arenap->btt_info,
sizeof(loc->arenap->btt_info), loc->offset)) {
CHECK_ERR(ppc, "arena %u: cannot read BTT Info header",
loc->arenap->id);
ppc->result = CHECK_RESULT_ERROR;
goto error_cleanup;
}
loc->arenap->id = ppc->pool->narenas;
/* BLK is consistent even without BTT Layout */
if (ppc->pool->params.type == POOL_TYPE_BLK) {
int is_zeroed = util_is_zeroed((const void *)
&loc->arenap->btt_info, sizeof(loc->arenap->btt_info));
if (is_zeroed) {
CHECK_INFO(ppc, "BTT Layout not written");
loc->step = CHECK_STEP_COMPLETE;
ppc->pool->blk_no_layout = 1;
location_release(loc);
check_end(ppc->data);
return 0;
}
}
/* check consistency of BTT Info */
if (pool_btt_info_valid(&loc->arenap->btt_info)) {
CHECK_INFO(ppc, "arena %u: BTT Info header checksum correct",
loc->arenap->id);
loc->valid.btti_header = 1;
} else if (CHECK_IS_NOT(ppc, REPAIR)) {
CHECK_ERR(ppc, "arena %u: BTT Info header checksum incorrect",
loc->arenap->id);
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
goto error_cleanup;
}
return 0;
error_cleanup:
location_release(loc);
return -1;
}
/*
* btt_info_backup -- (internal) check BTT Info backup
*/
static int
btt_info_backup(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
/* check BTT Info backup consistency */
const size_t btt_info_size = sizeof(ppc->pool->bttc.btt_info);
uint64_t btt_info_off = pool_next_arena_offset(ppc->pool, loc->offset) -
btt_info_size;
if (pool_read(ppc->pool, &ppc->pool->bttc.btt_info, btt_info_size,
btt_info_off)) {
CHECK_ERR(ppc, "arena %u: cannot read BTT Info backup",
loc->arenap->id);
goto error;
}
/* check whether this BTT Info backup is valid */
if (pool_btt_info_valid(&ppc->pool->bttc.btt_info)) {
loc->valid.btti_backup = 1;
/* restore BTT Info from backup */
if (!loc->valid.btti_header && CHECK_IS(ppc, REPAIR))
CHECK_ASK(ppc, Q_RESTORE_FROM_BACKUP, "arena %u: BTT "
"Info header checksum incorrect.|Restore BTT "
"Info from backup?", loc->arenap->id);
}
/*
* if BTT Info backup require repairs it will be fixed in further steps
*/
return check_questions_sequence_validate(ppc);
error:
ppc->result = CHECK_RESULT_ERROR;
location_release(loc);
return -1;
}
/*
* btt_info_from_backup_fix -- (internal) fix BTT Info using its backup
*/
static int
btt_info_from_backup_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_RESTORE_FROM_BACKUP:
CHECK_INFO(ppc,
"arena %u: restoring BTT Info header from backup",
loc->arenap->id);
memcpy(&loc->arenap->btt_info, &ppc->pool->bttc.btt_info,
sizeof(loc->arenap->btt_info));
loc->valid.btti_header = 1;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* btt_info_gen -- (internal) ask whether try to regenerate BTT Info
*/
static int
btt_info_gen(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (loc->valid.btti_header)
return 0;
ASSERT(CHECK_IS(ppc, REPAIR));
if (!loc->pool_valid.btti_offset) {
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
return CHECK_ERR(ppc, "can not find any valid BTT Info");
}
CHECK_ASK(ppc, Q_REGENERATE,
"arena %u: BTT Info header checksum incorrect.|Do you want to "
"regenerate BTT Info?", loc->arenap->id);
return check_questions_sequence_validate(ppc);
}
/*
* btt_info_gen_fix -- (internal) fix by regenerating BTT Info
*/
static int
btt_info_gen_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_REGENERATE:
CHECK_INFO(ppc, "arena %u: regenerating BTT Info header",
loc->arenap->id);
/*
* We do not have valid BTT Info backup so we get first valid
* BTT Info and try to calculate BTT Info for current arena
*/
uint64_t arena_size = ppc->pool->set_file->size - loc->offset;
if (arena_size > BTT_MAX_ARENA)
arena_size = BTT_MAX_ARENA;
uint64_t space_left = ppc->pool->set_file->size - loc->offset -
arena_size;
struct btt_info *bttd = &loc->arenap->btt_info;
struct btt_info *btts = &loc->pool_valid.btti;
btt_info_convert2h(bttd);
/*
* all valid BTT Info structures have the same signature, UUID,
* parent UUID, flags, major, minor, external LBA size, internal
* LBA size, nfree, info size and data offset
*/
memcpy(bttd->sig, btts->sig, BTTINFO_SIG_LEN);
memcpy(bttd->uuid, btts->uuid, BTTINFO_UUID_LEN);
memcpy(bttd->parent_uuid, btts->parent_uuid, BTTINFO_UUID_LEN);
memset(bttd->unused, 0, BTTINFO_UNUSED_LEN);
bttd->flags = btts->flags;
bttd->major = btts->major;
bttd->minor = btts->minor;
/* other parameters can be calculated */
if (btt_info_set(bttd, btts->external_lbasize, btts->nfree,
arena_size, space_left)) {
CHECK_ERR(ppc, "can not restore BTT Info");
return -1;
}
ASSERTeq(bttd->external_lbasize, btts->external_lbasize);
ASSERTeq(bttd->internal_lbasize, btts->internal_lbasize);
ASSERTeq(bttd->nfree, btts->nfree);
ASSERTeq(bttd->infosize, btts->infosize);
ASSERTeq(bttd->dataoff, btts->dataoff);
return 0;
default:
ERR("not implemented question id: %u", question);
return -1;
}
}
/*
* btt_info_checksum_retry -- (internal) check BTT Info checksum
*/
static int
btt_info_checksum_retry(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (loc->valid.btti_header)
return 0;
btt_info_convert2le(&loc->arenap->btt_info);
/* check consistency of BTT Info */
if (pool_btt_info_valid(&loc->arenap->btt_info)) {
CHECK_INFO(ppc, "arena %u: BTT Info header checksum correct",
loc->arenap->id);
loc->valid.btti_header = 1;
return 0;
}
if (CHECK_IS_NOT(ppc, ADVANCED)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
CHECK_INFO(ppc, REQUIRE_ADVANCED);
CHECK_ERR(ppc, "arena %u: BTT Info header checksum incorrect",
loc->arenap->id);
check_end(ppc->data);
goto error_cleanup;
}
CHECK_ASK(ppc, Q_REGENERATE_CHECKSUM,
"arena %u: BTT Info header checksum incorrect.|Do you want to "
"regenerate BTT Info checksum?", loc->arenap->id);
return check_questions_sequence_validate(ppc);
error_cleanup:
location_release(loc);
return -1;
}
/*
* btt_info_checksum_fix -- (internal) fix by regenerating BTT Info checksum
*/
static int
btt_info_checksum_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_REGENERATE_CHECKSUM:
util_checksum(&loc->arenap->btt_info, sizeof(struct btt_info),
&loc->arenap->btt_info.checksum, 1, 0);
loc->valid.btti_header = 1;
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
/*
* btt_info_backup_checksum -- (internal) check BTT Info backup checksum
*/
static int
btt_info_backup_checksum(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
ASSERT(loc->valid.btti_header);
if (loc->valid.btti_backup)
return 0;
/* BTT Info backup is not valid so it must be fixed */
if (CHECK_IS_NOT(ppc, REPAIR)) {
CHECK_ERR(ppc,
"arena %u: BTT Info backup checksum incorrect",
loc->arenap->id);
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
goto error_cleanup;
}
CHECK_ASK(ppc, Q_RESTORE_FROM_HEADER,
"arena %u: BTT Info backup checksum incorrect.|Do you want to "
"restore it from BTT Info header?", loc->arenap->id);
return check_questions_sequence_validate(ppc);
error_cleanup:
location_release(loc);
return -1;
}
/*
* btt_info_backup_fix -- (internal) prepare restore BTT Info backup from header
*/
static int
btt_info_backup_fix(PMEMpoolcheck *ppc, location *loc, uint32_t question,
void *ctx)
{
LOG(3, NULL);
ASSERTeq(ctx, NULL);
ASSERTne(loc, NULL);
switch (question) {
case Q_RESTORE_FROM_HEADER:
/* BTT Info backup would be restored in check_write step */
CHECK_INFO(ppc,
"arena %u: restoring BTT Info backup from header",
loc->arenap->id);
break;
default:
ERR("not implemented question id: %u", question);
}
return 0;
}
struct step {
int (*check)(PMEMpoolcheck *, location *);
int (*fix)(PMEMpoolcheck *, location *, uint32_t, void *);
};
static const struct step steps[] = {
{
.check = btt_info_checksum,
},
{
.check = btt_info_backup,
},
{
.fix = btt_info_from_backup_fix,
},
{
.check = btt_info_gen,
},
{
.fix = btt_info_gen_fix,
},
{
.check = btt_info_checksum_retry,
},
{
.fix = btt_info_checksum_fix,
},
{
.check = btt_info_backup_checksum,
},
{
.fix = btt_info_backup_fix,
},
{
.check = NULL,
.fix = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
const struct step *step = &steps[loc->step++];
if (!step->fix)
return step->check(ppc, loc);
if (!check_answer_loop(ppc, loc, NULL, 1, step->fix))
return 0;
if (check_has_error(ppc->data))
location_release(loc);
return -1;
}
/*
* check_btt_info -- entry point for btt info check
*/
void
check_btt_info(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
location *loc = check_get_step_data(ppc->data);
uint64_t nextoff = 0;
/* initialize check */
if (!loc->offset) {
CHECK_INFO(ppc, "checking BTT Info headers");
loc->offset = sizeof(struct pool_hdr);
if (ppc->pool->params.type == POOL_TYPE_BLK)
loc->offset += ALIGN_UP(sizeof(struct pmemblk) -
sizeof(struct pool_hdr),
BLK_FORMAT_DATA_ALIGN);
loc->pool_valid.btti_offset = pool_get_first_valid_btt(
ppc->pool, &loc->pool_valid.btti, loc->offset, NULL);
/* Without valid BTT Info we can not proceed */
if (!loc->pool_valid.btti_offset) {
if (ppc->pool->params.type == POOL_TYPE_BTT) {
CHECK_ERR(ppc,
"can not find any valid BTT Info");
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
check_end(ppc->data);
return;
}
} else
btt_info_convert2h(&loc->pool_valid.btti);
}
do {
/* jump to next offset */
if (ppc->result != CHECK_RESULT_PROCESS_ANSWERS) {
loc->offset += nextoff;
loc->step = 0;
loc->valid.btti_header = 0;
loc->valid.btti_backup = 0;
}
/* do all checks */
while (CHECK_NOT_COMPLETE(loc, steps)) {
if (step_exe(ppc, loc) || ppc->pool->blk_no_layout == 1)
return;
}
/* save offset and insert BTT to cache for next steps */
loc->arenap->offset = loc->offset;
loc->arenap->valid = true;
check_insert_arena(ppc, loc->arenap);
nextoff = le64toh(loc->arenap->btt_info.nextoff);
} while (nextoff > 0);
}
| 11,735 | 22.011765 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_util.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* check_util.c -- check utility functions
*/
#include <stdio.h>
#include <stdint.h>
#include "out.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
#define CHECK_END UINT_MAX
/* separate info part of message from question part of message */
#define MSG_SEPARATOR '|'
/* error part of message must have '.' at the end */
#define MSG_PLACE_OF_SEPARATION '.'
#define MAX_MSG_STR_SIZE 8192
#define CHECK_ANSWER_YES "yes"
#define CHECK_ANSWER_NO "no"
#define STR_MAX 256
#define TIME_STR_FMT "%a %b %d %Y %H:%M:%S"
#define UUID_STR_MAX 37
enum check_answer {
PMEMPOOL_CHECK_ANSWER_EMPTY,
PMEMPOOL_CHECK_ANSWER_YES,
PMEMPOOL_CHECK_ANSWER_NO,
PMEMPOOL_CHECK_ANSWER_DEFAULT,
};
/* queue of check statuses */
struct check_status {
PMDK_TAILQ_ENTRY(check_status) next;
struct pmempool_check_status status;
unsigned question;
enum check_answer answer;
char *msg;
};
PMDK_TAILQ_HEAD(check_status_head, check_status);
/* check control context */
struct check_data {
unsigned step;
location step_data;
struct check_status *error;
struct check_status_head infos;
struct check_status_head questions;
struct check_status_head answers;
struct check_status *check_status_cache;
};
/*
* check_data_alloc -- allocate and initialize check_data structure
*/
struct check_data *
check_data_alloc(void)
{
LOG(3, NULL);
struct check_data *data = calloc(1, sizeof(*data));
if (data == NULL) {
ERR("!calloc");
return NULL;
}
PMDK_TAILQ_INIT(&data->infos);
PMDK_TAILQ_INIT(&data->questions);
PMDK_TAILQ_INIT(&data->answers);
return data;
}
/*
* check_data_free -- clean and deallocate check_data
*/
void
check_data_free(struct check_data *data)
{
LOG(3, NULL);
if (data->error != NULL) {
free(data->error);
data->error = NULL;
}
if (data->check_status_cache != NULL) {
free(data->check_status_cache);
data->check_status_cache = NULL;
}
while (!PMDK_TAILQ_EMPTY(&data->infos)) {
struct check_status *statp = PMDK_TAILQ_FIRST(&data->infos);
PMDK_TAILQ_REMOVE(&data->infos, statp, next);
free(statp);
}
while (!PMDK_TAILQ_EMPTY(&data->questions)) {
struct check_status *statp = PMDK_TAILQ_FIRST(&data->questions);
PMDK_TAILQ_REMOVE(&data->questions, statp, next);
free(statp);
}
while (!PMDK_TAILQ_EMPTY(&data->answers)) {
struct check_status *statp = PMDK_TAILQ_FIRST(&data->answers);
PMDK_TAILQ_REMOVE(&data->answers, statp, next);
free(statp);
}
free(data);
}
/*
* check_step_get - return current check step number
*/
uint32_t
check_step_get(struct check_data *data)
{
return data->step;
}
/*
* check_step_inc -- move to next step number
*/
void
check_step_inc(struct check_data *data)
{
if (check_is_end_util(data))
return;
++data->step;
memset(&data->step_data, 0, sizeof(location));
}
/*
* check_get_step_data -- return pointer to check step data
*/
location *
check_get_step_data(struct check_data *data)
{
return &data->step_data;
}
/*
* check_end -- mark check as ended
*/
void
check_end(struct check_data *data)
{
LOG(3, NULL);
data->step = CHECK_END;
}
/*
* check_is_end_util -- return if check has ended
*/
int
check_is_end_util(struct check_data *data)
{
return data->step == CHECK_END;
}
/*
* status_alloc -- (internal) allocate and initialize check_status
*/
static inline struct check_status *
status_alloc(void)
{
struct check_status *status = malloc(sizeof(*status));
if (!status)
FATAL("!malloc");
status->msg = malloc(sizeof(char) * MAX_MSG_STR_SIZE);
if (!status->msg) {
free(status);
FATAL("!malloc");
}
status->status.str.msg = status->msg;
status->answer = PMEMPOOL_CHECK_ANSWER_EMPTY;
status->question = CHECK_INVALID_QUESTION;
return status;
}
/*
* status_release -- (internal) release check_status
*/
static void
status_release(struct check_status *status)
{
#ifdef _WIN32
/* dealloc duplicate string after conversion */
if (status->status.str.msg != status->msg)
free((void *)status->status.str.msg);
#endif
free(status->msg);
free(status);
}
/*
* status_msg_info_only -- (internal) separate info part of the message
*
* If message is in form of "info.|question" it modifies it as follows
* "info\0|question"
*/
static inline int
status_msg_info_only(const char *msg)
{
char *sep = strchr(msg, MSG_SEPARATOR);
if (sep) {
ASSERTne(sep, msg);
--sep;
ASSERTeq(*sep, MSG_PLACE_OF_SEPARATION);
*sep = '\0';
return 0;
}
return -1;
}
/*
* status_msg_info_and_question -- (internal) join info and question
*
* If message is in form "info.|question" it will replace MSG_SEPARATOR '|' with
* space to get "info. question"
*/
static inline int
status_msg_info_and_question(const char *msg)
{
char *sep = strchr(msg, MSG_SEPARATOR);
if (sep) {
*sep = ' ';
return 0;
}
return -1;
}
/*
* status_push -- (internal) push single status object
*/
static int
status_push(PMEMpoolcheck *ppc, struct check_status *st, uint32_t question)
{
if (st->status.type == PMEMPOOL_CHECK_MSG_TYPE_ERROR) {
ASSERTeq(ppc->data->error, NULL);
ppc->data->error = st;
return -1;
} else if (st->status.type == PMEMPOOL_CHECK_MSG_TYPE_INFO) {
if (CHECK_IS(ppc, VERBOSE))
PMDK_TAILQ_INSERT_TAIL(&ppc->data->infos, st, next);
else
check_status_release(ppc, st);
return 0;
}
/* st->status.type == PMEMPOOL_CHECK_MSG_TYPE_QUESTION */
if (CHECK_IS_NOT(ppc, REPAIR)) {
/* error status */
if (status_msg_info_only(st->msg)) {
ERR("no error message for the user");
st->msg[0] = '\0';
}
st->status.type = PMEMPOOL_CHECK_MSG_TYPE_ERROR;
return status_push(ppc, st, question);
}
if (CHECK_IS(ppc, ALWAYS_YES)) {
if (!status_msg_info_only(st->msg)) {
/* information status */
st->status.type = PMEMPOOL_CHECK_MSG_TYPE_INFO;
status_push(ppc, st, question);
st = status_alloc();
}
/* answer status */
ppc->result = CHECK_RESULT_PROCESS_ANSWERS;
st->question = question;
st->answer = PMEMPOOL_CHECK_ANSWER_YES;
st->status.type = PMEMPOOL_CHECK_MSG_TYPE_QUESTION;
PMDK_TAILQ_INSERT_TAIL(&ppc->data->answers, st, next);
} else {
/* question message */
status_msg_info_and_question(st->msg);
st->question = question;
ppc->result = CHECK_RESULT_ASK_QUESTIONS;
st->answer = PMEMPOOL_CHECK_ANSWER_EMPTY;
PMDK_TAILQ_INSERT_TAIL(&ppc->data->questions, st, next);
}
return 0;
}
/*
* check_status_create -- create single status, push it to proper queue
*
* MSG_SEPARATOR character in fmt is treated as message separator. If creating
* question but check arguments do not allow to make any changes (asking any
* question is pointless) it takes part of message before MSG_SEPARATOR
* character and use it to create error message. Character just before separator
* must be a MSG_PLACE_OF_SEPARATION character. Return non 0 value if error
* status would be created.
*
* The arg is an additional argument for specified type of status.
*/
int
check_status_create(PMEMpoolcheck *ppc, enum pmempool_check_msg_type type,
uint32_t arg, const char *fmt, ...)
{
if (CHECK_IS_NOT(ppc, VERBOSE) && type == PMEMPOOL_CHECK_MSG_TYPE_INFO)
return 0;
struct check_status *st = status_alloc();
ASSERT(CHECK_IS(ppc, FORMAT_STR));
va_list ap;
va_start(ap, fmt);
int p = vsnprintf(st->msg, MAX_MSG_STR_SIZE, fmt, ap);
va_end(ap);
/* append possible strerror at the end of the message */
if (type != PMEMPOOL_CHECK_MSG_TYPE_QUESTION && arg && p > 0) {
char buff[UTIL_MAX_ERR_MSG];
util_strerror((int)arg, buff, UTIL_MAX_ERR_MSG);
int ret = util_snprintf(st->msg + p,
MAX_MSG_STR_SIZE - (size_t)p, ": %s", buff);
if (ret < 0) {
ERR("!snprintf");
status_release(st);
return -1;
}
}
st->status.type = type;
return status_push(ppc, st, arg);
}
/*
* check_status_release -- release single status object
*/
void
check_status_release(PMEMpoolcheck *ppc, struct check_status *status)
{
if (status->status.type == PMEMPOOL_CHECK_MSG_TYPE_ERROR)
ppc->data->error = NULL;
status_release(status);
}
/*
* pop_status -- (internal) pop single message from check_status queue
*/
static struct check_status *
pop_status(struct check_data *data, struct check_status_head *queue)
{
if (!PMDK_TAILQ_EMPTY(queue)) {
ASSERTeq(data->check_status_cache, NULL);
data->check_status_cache = PMDK_TAILQ_FIRST(queue);
PMDK_TAILQ_REMOVE(queue, data->check_status_cache, next);
return data->check_status_cache;
}
return NULL;
}
/*
* check_pop_question -- pop single question from questions queue
*/
struct check_status *
check_pop_question(struct check_data *data)
{
return pop_status(data, &data->questions);
}
/*
* check_pop_info -- pop single info from information queue
*/
struct check_status *
check_pop_info(struct check_data *data)
{
return pop_status(data, &data->infos);
}
/*
* check_pop_error -- pop error from state
*/
struct check_status *
check_pop_error(struct check_data *data)
{
if (data->error) {
ASSERTeq(data->check_status_cache, NULL);
data->check_status_cache = data->error;
data->error = NULL;
return data->check_status_cache;
}
return NULL;
}
#ifdef _WIN32
void
cache_to_utf8(struct check_data *data, char *buf, size_t size)
{
if (data->check_status_cache == NULL)
return;
struct check_status *status = data->check_status_cache;
/* if it was a question, convert it and the answer to utf8 */
if (status->status.type == PMEMPOOL_CHECK_MSG_TYPE_QUESTION) {
struct pmempool_check_statusW *wstatus =
(struct pmempool_check_statusW *)&status->status;
wchar_t *wstring = (wchar_t *)wstatus->str.msg;
status->status.str.msg = util_toUTF8(wstring);
if (status->status.str.msg == NULL)
FATAL("!malloc");
util_free_UTF16(wstring);
if (util_toUTF8_buff(wstatus->str.answer, buf, size) != 0)
FATAL("Invalid answer conversion %s",
out_get_errormsg());
status->status.str.answer = buf;
}
}
#endif
/*
* check_clear_status_cache -- release check_status from cache
*/
void
check_clear_status_cache(struct check_data *data)
{
if (data->check_status_cache) {
switch (data->check_status_cache->status.type) {
case PMEMPOOL_CHECK_MSG_TYPE_INFO:
case PMEMPOOL_CHECK_MSG_TYPE_ERROR:
/*
* Info and error statuses are disposable. After showing
* them to the user we have to release them.
*/
status_release(data->check_status_cache);
data->check_status_cache = NULL;
break;
case PMEMPOOL_CHECK_MSG_TYPE_QUESTION:
/*
* Question status after being showed to the user carry
* users answer. It must be kept till answer would be
* processed so it can not be released from cache. It
* has to be pushed to the answers queue, processed and
* released after that.
*/
break;
default:
ASSERT(0);
}
}
}
/*
* status_answer_push -- (internal) push single answer to answers queue
*/
static void
status_answer_push(struct check_data *data, struct check_status *st)
{
ASSERTeq(st->status.type, PMEMPOOL_CHECK_MSG_TYPE_QUESTION);
PMDK_TAILQ_INSERT_TAIL(&data->answers, st, next);
}
/*
* check_push_answer -- process answer and push it to answers queue
*/
int
check_push_answer(PMEMpoolcheck *ppc)
{
if (ppc->data->check_status_cache == NULL)
return 0;
/* check if answer is "yes" or "no" */
struct check_status *status = ppc->data->check_status_cache;
if (status->status.str.answer != NULL) {
if (strcmp(status->status.str.answer, CHECK_ANSWER_YES) == 0)
status->answer = PMEMPOOL_CHECK_ANSWER_YES;
else if (strcmp(status->status.str.answer, CHECK_ANSWER_NO)
== 0)
status->answer = PMEMPOOL_CHECK_ANSWER_NO;
}
if (status->answer == PMEMPOOL_CHECK_ANSWER_EMPTY) {
/* invalid answer provided */
status_answer_push(ppc->data, ppc->data->check_status_cache);
ppc->data->check_status_cache = NULL;
CHECK_INFO(ppc, "Answer must be either %s or %s",
CHECK_ANSWER_YES, CHECK_ANSWER_NO);
return -1;
}
/* push answer */
PMDK_TAILQ_INSERT_TAIL(&ppc->data->answers,
ppc->data->check_status_cache, next);
ppc->data->check_status_cache = NULL;
return 0;
}
/*
* check_has_error - check if error exists
*/
bool
check_has_error(struct check_data *data)
{
return data->error != NULL;
}
/*
* check_has_answer - check if any answer exists
*/
bool
check_has_answer(struct check_data *data)
{
return !PMDK_TAILQ_EMPTY(&data->answers);
}
/*
* pop_answer -- (internal) pop single answer from answers queue
*/
static struct check_status *
pop_answer(struct check_data *data)
{
struct check_status *ret = NULL;
if (!PMDK_TAILQ_EMPTY(&data->answers)) {
ret = PMDK_TAILQ_FIRST(&data->answers);
PMDK_TAILQ_REMOVE(&data->answers, ret, next);
}
return ret;
}
/*
* check_status_get_util -- extract pmempool_check_status from check_status
*/
struct pmempool_check_status *
check_status_get_util(struct check_status *status)
{
return &status->status;
}
/*
* check_answer_loop -- loop through all available answers and process them
*/
int
check_answer_loop(PMEMpoolcheck *ppc, location *data, void *ctx, int fail_on_no,
int (*callback)(PMEMpoolcheck *, location *, uint32_t, void *ctx))
{
struct check_status *answer;
while ((answer = pop_answer(ppc->data)) != NULL) {
/* if answer is "no" we cannot fix an issue */
if (answer->answer != PMEMPOOL_CHECK_ANSWER_YES) {
if (fail_on_no ||
answer->answer != PMEMPOOL_CHECK_ANSWER_NO) {
CHECK_ERR(ppc,
"cannot complete repair, reverting changes");
ppc->result = CHECK_RESULT_NOT_CONSISTENT;
goto error;
}
ppc->result = CHECK_RESULT_REPAIRED;
check_status_release(ppc, answer);
continue;
}
/* perform fix */
if (callback(ppc, data, answer->question, ctx)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
goto error;
}
if (ppc->result == CHECK_RESULT_ERROR)
goto error;
/* fix succeeded */
ppc->result = CHECK_RESULT_REPAIRED;
check_status_release(ppc, answer);
}
return 0;
error:
check_status_release(ppc, answer);
return -1;
}
/*
* check_questions_sequence_validate -- generate return value from result
*
* Sequence of questions can result in one of the following results: CONSISTENT,
* REPAIRED, ASK_QUESTIONS of PROCESS_ANSWERS. If result == ASK_QUESTIONS it
* returns -1 to indicate existence of unanswered questions.
*/
int
check_questions_sequence_validate(PMEMpoolcheck *ppc)
{
ASSERT(ppc->result == CHECK_RESULT_CONSISTENT ||
ppc->result == CHECK_RESULT_ASK_QUESTIONS ||
ppc->result == CHECK_RESULT_PROCESS_ANSWERS ||
ppc->result == CHECK_RESULT_REPAIRED);
if (ppc->result == CHECK_RESULT_ASK_QUESTIONS) {
ASSERT(!PMDK_TAILQ_EMPTY(&ppc->data->questions));
return -1;
}
return 0;
}
/*
* check_get_time_str -- returns time in human-readable format
*/
const char *
check_get_time_str(time_t time)
{
static char str_buff[STR_MAX] = {0, };
struct tm *tm = util_localtime(&time);
if (tm)
strftime(str_buff, STR_MAX, TIME_STR_FMT, tm);
else {
int ret = util_snprintf(str_buff, STR_MAX, "unknown");
if (ret < 0) {
ERR("!snprintf");
return "";
}
}
return str_buff;
}
/*
* check_get_uuid_str -- returns uuid in human readable format
*/
const char *
check_get_uuid_str(uuid_t uuid)
{
static char uuid_str[UUID_STR_MAX] = {0, };
int ret = util_uuid_to_string(uuid, uuid_str);
if (ret != 0) {
ERR("failed to covert uuid to string");
return "";
}
return uuid_str;
}
/*
* pmempool_check_insert_arena -- insert arena to list
*/
void
check_insert_arena(PMEMpoolcheck *ppc, struct arena *arenap)
{
PMDK_TAILQ_INSERT_TAIL(&ppc->pool->arenas, arenap, next);
ppc->pool->narenas++;
}
| 15,575 | 22.247761 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/pool.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* pool.h -- internal definitions for pool processing functions
*/
#ifndef POOL_H
#define POOL_H
#include <stdbool.h>
#include <sys/types.h>
#include "libpmemobj.h"
#include "queue.h"
#include "set.h"
#include "log.h"
#include "blk.h"
#include "btt_layout.h"
#ifdef __cplusplus
extern "C" {
#endif
#include "alloc.h"
#include "fault_injection.h"
enum pool_type {
POOL_TYPE_UNKNOWN = (1 << 0),
POOL_TYPE_LOG = (1 << 1),
POOL_TYPE_BLK = (1 << 2),
POOL_TYPE_OBJ = (1 << 3),
POOL_TYPE_BTT = (1 << 4),
POOL_TYPE_ANY = POOL_TYPE_UNKNOWN | POOL_TYPE_LOG |
POOL_TYPE_BLK | POOL_TYPE_OBJ | POOL_TYPE_BTT,
};
struct pool_params {
enum pool_type type;
char signature[POOL_HDR_SIG_LEN];
features_t features;
size_t size;
mode_t mode;
int is_poolset;
int is_part;
int is_dev_dax;
int is_pmem;
union {
struct {
uint64_t bsize;
} blk;
struct {
char layout[PMEMOBJ_MAX_LAYOUT];
} obj;
};
};
struct pool_set_file {
int fd;
char *fname;
void *addr;
size_t size;
struct pool_set *poolset;
time_t mtime;
mode_t mode;
};
struct arena {
PMDK_TAILQ_ENTRY(arena) next;
struct btt_info btt_info;
uint32_t id;
bool valid;
bool zeroed;
uint64_t offset;
uint8_t *flog;
size_t flogsize;
uint32_t *map;
size_t mapsize;
};
struct pool_data {
struct pool_params params;
struct pool_set_file *set_file;
int blk_no_layout;
union {
struct pool_hdr pool;
struct pmemlog log;
struct pmemblk blk;
} hdr;
enum {
UUID_NOP = 0,
UUID_FROM_BTT,
UUID_NOT_FROM_BTT,
} uuid_op;
struct arena bttc;
PMDK_TAILQ_HEAD(arenashead, arena) arenas;
uint32_t narenas;
};
struct pool_data *pool_data_alloc(PMEMpoolcheck *ppc);
void pool_data_free(struct pool_data *pool);
void pool_params_from_header(struct pool_params *params,
const struct pool_hdr *hdr);
int pool_set_parse(struct pool_set **setp, const char *path);
void *pool_set_file_map(struct pool_set_file *file, uint64_t offset);
int pool_read(struct pool_data *pool, void *buff, size_t nbytes,
uint64_t off);
int pool_write(struct pool_data *pool, const void *buff, size_t nbytes,
uint64_t off);
int pool_copy(struct pool_data *pool, const char *dst_path, int overwrite);
int pool_set_part_copy(struct pool_set_part *dpart,
struct pool_set_part *spart, int overwrite);
int pool_memset(struct pool_data *pool, uint64_t off, int c, size_t count);
unsigned pool_set_files_count(struct pool_set_file *file);
int pool_set_file_map_headers(struct pool_set_file *file, int rdonly, int prv);
void pool_set_file_unmap_headers(struct pool_set_file *file);
void pool_hdr_default(enum pool_type type, struct pool_hdr *hdrp);
enum pool_type pool_hdr_get_type(const struct pool_hdr *hdrp);
enum pool_type pool_set_type(struct pool_set *set);
const char *pool_get_pool_type_str(enum pool_type type);
int pool_btt_info_valid(struct btt_info *infop);
int pool_blk_get_first_valid_arena(struct pool_data *pool,
struct arena *arenap);
int pool_blk_bsize_valid(uint32_t bsize, uint64_t fsize);
uint64_t pool_next_arena_offset(struct pool_data *pool, uint64_t header_offset);
uint64_t pool_get_first_valid_btt(struct pool_data *pool,
struct btt_info *infop, uint64_t offset, bool *zeroed);
size_t pool_get_min_size(enum pool_type);
#if FAULT_INJECTION
void
pmempool_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at);
int
pmempool_fault_injection_enabled(void);
#else
static inline void
pmempool_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
abort();
}
static inline int
pmempool_fault_injection_enabled(void)
{
return 0;
}
#endif
#ifdef __cplusplus
}
#endif
#endif
| 3,712 | 21.640244 | 80 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/pool.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* pool.c -- pool processing functions
*/
#include <stdio.h>
#include <stdint.h>
#include <sys/mman.h>
#include <unistd.h>
#include <fcntl.h>
#include <endian.h>
#ifndef _WIN32
#include <sys/ioctl.h>
#ifdef __FreeBSD__
#include <sys/disk.h>
#define BLKGETSIZE64 DIOCGMEDIASIZE
#else
#include <linux/fs.h>
#endif
#endif
#include "libpmem.h"
#include "libpmemlog.h"
#include "libpmemblk.h"
#include "libpmempool.h"
#include "out.h"
#include "pmempool.h"
#include "pool.h"
#include "lane.h"
#include "obj.h"
#include "btt.h"
#include "file.h"
#include "os.h"
#include "set.h"
#include "check_util.h"
#include "util_pmem.h"
#include "mmap.h"
/* arbitrary size of a maximum file part being read / write at once */
#define RW_BUFFERING_SIZE (128 * 1024 * 1024)
/*
* pool_btt_lseek -- (internal) perform lseek in BTT file mode
*/
static inline os_off_t
pool_btt_lseek(struct pool_data *pool, os_off_t offset, int whence)
{
os_off_t result;
if ((result = os_lseek(pool->set_file->fd, offset, whence)) == -1)
ERR("!lseek");
return result;
}
/*
* pool_btt_read -- (internal) perform read in BTT file mode
*/
static inline ssize_t
pool_btt_read(struct pool_data *pool, void *dst, size_t count)
{
size_t total = 0;
ssize_t nread;
while (count > total &&
(nread = util_read(pool->set_file->fd, dst, count - total))) {
if (nread == -1) {
ERR("!read");
return total ? (ssize_t)total : -1;
}
dst = (void *)((ssize_t)dst + nread);
total += (size_t)nread;
}
return (ssize_t)total;
}
/*
* pool_btt_write -- (internal) perform write in BTT file mode
*/
static inline ssize_t
pool_btt_write(struct pool_data *pool, const void *src, size_t count)
{
ssize_t nwrite = 0;
size_t total = 0;
while (count > total &&
(nwrite = util_write(pool->set_file->fd, src,
count - total))) {
if (nwrite == -1) {
ERR("!write");
return total ? (ssize_t)total : -1;
}
src = (void *)((ssize_t)src + nwrite);
total += (size_t)nwrite;
}
return (ssize_t)total;
}
/*
* pool_set_read_header -- (internal) read a header of a pool set
*/
static int
pool_set_read_header(const char *fname, struct pool_hdr *hdr)
{
struct pool_set *set;
int ret = 0;
if (util_poolset_read(&set, fname)) {
return -1;
}
/* open the first part set file to read the pool header values */
const struct pool_set_part *part = PART(REP(set, 0), 0);
int fdp = util_file_open(part->path, NULL, 0, O_RDONLY);
if (fdp < 0) {
ERR("cannot open poolset part file");
ret = -1;
goto err_pool_set;
}
/* read the pool header from first pool set file */
if (pread(fdp, hdr, sizeof(*hdr), 0) != sizeof(*hdr)) {
ERR("cannot read pool header from poolset");
ret = -1;
goto err_close_part;
}
err_close_part:
os_close(fdp);
err_pool_set:
util_poolset_free(set);
return ret;
}
/*
* pool_set_map -- (internal) map poolset
*/
static int
pool_set_map(const char *fname, struct pool_set **poolset, unsigned flags)
{
ASSERTeq(util_is_poolset_file(fname), 1);
struct pool_hdr hdr;
if (pool_set_read_header(fname, &hdr))
return -1;
util_convert2h_hdr_nocheck(&hdr);
/* parse pool type from first pool set file */
enum pool_type type = pool_hdr_get_type(&hdr);
if (type == POOL_TYPE_UNKNOWN) {
ERR("cannot determine pool type from poolset");
return -1;
}
/*
* Open the poolset, the values passed to util_pool_open are read
* from the first poolset file, these values are then compared with
* the values from all headers of poolset files.
*/
struct pool_attr attr;
util_pool_hdr2attr(&attr, &hdr);
if (util_pool_open(poolset, fname, 0 /* minpartsize */, &attr,
NULL, NULL, flags | POOL_OPEN_IGNORE_SDS |
POOL_OPEN_IGNORE_BAD_BLOCKS)) {
ERR("opening poolset failed");
return -1;
}
return 0;
}
/*
* pool_params_from_header -- parse pool params from pool header
*/
void
pool_params_from_header(struct pool_params *params, const struct pool_hdr *hdr)
{
memcpy(params->signature, hdr->signature, sizeof(params->signature));
memcpy(¶ms->features, &hdr->features, sizeof(params->features));
/*
* Check if file is a part of pool set by comparing the UUID with the
* next part UUID. If it is the same it means the pool consist of a
* single file.
*/
int uuid_eq_next = uuidcmp(hdr->uuid, hdr->next_part_uuid);
int uuid_eq_prev = uuidcmp(hdr->uuid, hdr->prev_part_uuid);
params->is_part = !params->is_poolset && (uuid_eq_next || uuid_eq_prev);
params->type = pool_hdr_get_type(hdr);
}
/*
* pool_check_type_to_pool_type -- (internal) convert check pool type to
* internal pool type value
*/
static enum pool_type
pool_check_type_to_pool_type(enum pmempool_pool_type check_pool_type)
{
switch (check_pool_type) {
case PMEMPOOL_POOL_TYPE_LOG:
return POOL_TYPE_LOG;
case PMEMPOOL_POOL_TYPE_BLK:
return POOL_TYPE_BLK;
case PMEMPOOL_POOL_TYPE_OBJ:
return POOL_TYPE_OBJ;
default:
ERR("can not convert pmempool_pool_type %u to pool_type",
check_pool_type);
return POOL_TYPE_UNKNOWN;
}
}
/*
* pool_parse_params -- parse pool type, file size and block size
*/
static int
pool_params_parse(const PMEMpoolcheck *ppc, struct pool_params *params,
int check)
{
LOG(3, NULL);
int is_btt = ppc->args.pool_type == PMEMPOOL_POOL_TYPE_BTT;
params->type = POOL_TYPE_UNKNOWN;
params->is_poolset = util_is_poolset_file(ppc->path) == 1;
int fd = util_file_open(ppc->path, NULL, 0, O_RDONLY);
if (fd < 0)
return -1;
int ret = 0;
os_stat_t stat_buf;
ret = os_fstat(fd, &stat_buf);
if (ret)
goto out_close;
ASSERT(stat_buf.st_size >= 0);
params->mode = stat_buf.st_mode;
struct pool_set *set;
void *addr;
if (params->is_poolset) {
/*
* Need to close the poolset because it will be opened with
* flock in the following instructions.
*/
os_close(fd);
fd = -1;
if (check) {
if (pool_set_map(ppc->path, &set, 0))
return -1;
} else {
ret = util_poolset_create_set(&set, ppc->path,
0, 0, true);
if (ret < 0) {
LOG(2, "cannot open pool set -- '%s'",
ppc->path);
return -1;
}
if (set->remote) {
ERR("poolsets with remote replicas are not "
"supported");
return -1;
}
if (util_pool_open_nocheck(set,
POOL_OPEN_IGNORE_BAD_BLOCKS))
return -1;
}
params->size = set->poolsize;
addr = set->replica[0]->part[0].addr;
/*
* XXX mprotect for device dax with length not aligned to its
* page granularity causes SIGBUS on the next page fault.
* The length argument of this call should be changed to
* set->poolsize once the kernel issue is solved.
*/
if (mprotect(addr, set->replica[0]->repsize,
PROT_READ) < 0) {
ERR("!mprotect");
goto out_unmap;
}
params->is_dev_dax = set->replica[0]->part[0].is_dev_dax;
params->is_pmem = set->replica[0]->is_pmem;
} else if (is_btt) {
params->size = (size_t)stat_buf.st_size;
#ifndef _WIN32
if (params->mode & S_IFBLK)
if (ioctl(fd, BLKGETSIZE64, ¶ms->size)) {
ERR("!ioctl");
goto out_close;
}
#endif
addr = NULL;
} else {
enum file_type type = util_file_get_type(ppc->path);
if (type < 0) {
ret = -1;
goto out_close;
}
ssize_t s = util_file_get_size(ppc->path);
if (s < 0) {
ret = -1;
goto out_close;
}
params->size = (size_t)s;
int map_sync;
addr = util_map(fd, 0, params->size, MAP_SHARED, 1, 0,
&map_sync);
if (addr == NULL) {
ret = -1;
goto out_close;
}
params->is_dev_dax = type == TYPE_DEVDAX;
params->is_pmem = params->is_dev_dax || map_sync ||
pmem_is_pmem(addr, params->size);
}
/* stop processing for BTT device */
if (is_btt) {
params->type = POOL_TYPE_BTT;
params->is_part = false;
goto out_close;
}
struct pool_hdr hdr;
memcpy(&hdr, addr, sizeof(hdr));
util_convert2h_hdr_nocheck(&hdr);
pool_params_from_header(params, &hdr);
if (ppc->args.pool_type != PMEMPOOL_POOL_TYPE_DETECT) {
enum pool_type declared_type =
pool_check_type_to_pool_type(ppc->args.pool_type);
if ((params->type & ~declared_type) != 0) {
ERR("declared pool type does not match");
errno = EINVAL;
ret = 1;
goto out_unmap;
}
}
if (params->type == POOL_TYPE_BLK) {
struct pmemblk pbp;
memcpy(&pbp, addr, sizeof(pbp));
params->blk.bsize = le32toh(pbp.bsize);
} else if (params->type == POOL_TYPE_OBJ) {
struct pmemobjpool *pop = addr;
memcpy(params->obj.layout, pop->layout,
PMEMOBJ_MAX_LAYOUT);
}
out_unmap:
if (params->is_poolset) {
ASSERTeq(fd, -1);
ASSERTne(addr, NULL);
util_poolset_close(set, DO_NOT_DELETE_PARTS);
} else if (!is_btt) {
ASSERTne(fd, -1);
ASSERTne(addr, NULL);
munmap(addr, params->size);
}
out_close:
if (fd != -1)
os_close(fd);
return ret;
}
/*
* pool_set_file_open -- (internal) opens pool set file or regular file
*/
static struct pool_set_file *
pool_set_file_open(const char *fname, struct pool_params *params, int rdonly)
{
LOG(3, NULL);
struct pool_set_file *file = calloc(1, sizeof(*file));
if (!file)
return NULL;
file->fname = strdup(fname);
if (!file->fname)
goto err;
const char *path = file->fname;
if (params->type != POOL_TYPE_BTT) {
int ret = util_poolset_create_set(&file->poolset, path,
0, 0, true);
if (ret < 0) {
LOG(2, "cannot open pool set -- '%s'", path);
goto err_free_fname;
}
unsigned flags = (rdonly ? POOL_OPEN_COW : 0) |
POOL_OPEN_IGNORE_BAD_BLOCKS;
if (util_pool_open_nocheck(file->poolset, flags))
goto err_free_fname;
file->size = file->poolset->poolsize;
/* get modification time from the first part of first replica */
path = file->poolset->replica[0]->part[0].path;
file->addr = file->poolset->replica[0]->part[0].addr;
} else {
int oflag = rdonly ? O_RDONLY : O_RDWR;
file->fd = util_file_open(fname, NULL, 0, oflag);
file->size = params->size;
}
os_stat_t buf;
if (os_stat(path, &buf)) {
ERR("%s", path);
goto err_close_poolset;
}
file->mtime = buf.st_mtime;
file->mode = buf.st_mode;
return file;
err_close_poolset:
if (params->type != POOL_TYPE_BTT)
util_poolset_close(file->poolset, DO_NOT_DELETE_PARTS);
else if (file->fd != -1)
os_close(file->fd);
err_free_fname:
free(file->fname);
err:
free(file);
return NULL;
}
/*
* pool_set_parse -- parse poolset file
*/
int
pool_set_parse(struct pool_set **setp, const char *path)
{
LOG(3, "setp %p path %s", setp, path);
int fd = os_open(path, O_RDONLY);
int ret = 0;
if (fd < 0)
return 1;
if (util_poolset_parse(setp, path, fd)) {
ret = 1;
goto err_close;
}
err_close:
os_close(fd);
return ret;
}
/*
* pool_data_alloc -- allocate pool data and open set_file
*/
struct pool_data *
pool_data_alloc(PMEMpoolcheck *ppc)
{
LOG(3, NULL);
struct pool_data *pool = calloc(1, sizeof(*pool));
if (!pool) {
ERR("!calloc");
return NULL;
}
PMDK_TAILQ_INIT(&pool->arenas);
pool->uuid_op = UUID_NOP;
if (pool_params_parse(ppc, &pool->params, 0))
goto error;
int rdonly = CHECK_IS_NOT(ppc, REPAIR);
int prv = CHECK_IS(ppc, DRY_RUN);
if (prv && pool->params.is_dev_dax) {
errno = ENOTSUP;
ERR("!cannot perform a dry run on dax device");
goto error;
}
pool->set_file = pool_set_file_open(ppc->path, &pool->params, prv);
if (pool->set_file == NULL)
goto error;
/*
* XXX mprotect for device dax with length not aligned to its
* page granularity causes SIGBUS on the next page fault.
* The length argument of this call should be changed to
* pool->set_file->poolsize once the kernel issue is solved.
*/
if (rdonly && mprotect(pool->set_file->addr,
pool->set_file->poolset->replica[0]->repsize,
PROT_READ) < 0)
goto error;
if (pool->params.type != POOL_TYPE_BTT) {
if (pool_set_file_map_headers(pool->set_file, rdonly, prv))
goto error;
}
return pool;
error:
pool_data_free(pool);
return NULL;
}
/*
* pool_set_file_close -- (internal) closes pool set file or regular file
*/
static void
pool_set_file_close(struct pool_set_file *file)
{
LOG(3, NULL);
if (file->poolset)
util_poolset_close(file->poolset, DO_NOT_DELETE_PARTS);
else if (file->addr) {
munmap(file->addr, file->size);
os_close(file->fd);
} else if (file->fd)
os_close(file->fd);
free(file->fname);
free(file);
}
/*
* pool_data_free -- close set_file and release pool data
*/
void
pool_data_free(struct pool_data *pool)
{
LOG(3, NULL);
if (pool->set_file) {
if (pool->params.type != POOL_TYPE_BTT)
pool_set_file_unmap_headers(pool->set_file);
pool_set_file_close(pool->set_file);
}
while (!PMDK_TAILQ_EMPTY(&pool->arenas)) {
struct arena *arenap = PMDK_TAILQ_FIRST(&pool->arenas);
if (arenap->map)
free(arenap->map);
if (arenap->flog)
free(arenap->flog);
PMDK_TAILQ_REMOVE(&pool->arenas, arenap, next);
free(arenap);
}
free(pool);
}
/*
* pool_set_file_map -- return mapped address at given offset
*/
void *
pool_set_file_map(struct pool_set_file *file, uint64_t offset)
{
if (file->addr == MAP_FAILED)
return NULL;
return (char *)file->addr + offset;
}
/*
* pool_read -- read from pool set file or regular file
*
* 'buff' has to be a buffer at least 'nbytes' long
* 'off' is an offset from the beginning of the pool
*/
int
pool_read(struct pool_data *pool, void *buff, size_t nbytes, uint64_t off)
{
if (off + nbytes > pool->set_file->size)
return -1;
if (pool->params.type != POOL_TYPE_BTT)
memcpy(buff, (char *)pool->set_file->addr + off, nbytes);
else {
if (pool_btt_lseek(pool, (os_off_t)off, SEEK_SET) == -1)
return -1;
if ((size_t)pool_btt_read(pool, buff, nbytes) != nbytes)
return -1;
}
return 0;
}
/*
* pool_write -- write to pool set file or regular file
*
* 'buff' has to be a buffer at least 'nbytes' long
* 'off' is an offset from the beginning of the pool
*/
int
pool_write(struct pool_data *pool, const void *buff, size_t nbytes,
uint64_t off)
{
if (off + nbytes > pool->set_file->size)
return -1;
if (pool->params.type != POOL_TYPE_BTT) {
memcpy((char *)pool->set_file->addr + off, buff, nbytes);
util_persist_auto(pool->params.is_pmem,
(char *)pool->set_file->addr + off, nbytes);
} else {
if (pool_btt_lseek(pool, (os_off_t)off, SEEK_SET) == -1)
return -1;
if ((size_t)pool_btt_write(pool, buff, nbytes) != nbytes)
return -1;
}
return 0;
}
/*
* pool_copy -- make a copy of the pool
*/
int
pool_copy(struct pool_data *pool, const char *dst_path, int overwrite)
{
struct pool_set_file *file = pool->set_file;
int dfd;
int exists = util_file_exists(dst_path);
if (exists < 0)
return -1;
if (exists) {
if (!overwrite) {
errno = EEXIST;
return -1;
}
dfd = util_file_open(dst_path, NULL, 0, O_RDWR);
} else {
errno = 0;
dfd = util_file_create(dst_path, file->size, 0);
}
if (dfd < 0)
return -1;
int result = 0;
os_stat_t stat_buf;
if (os_stat(file->fname, &stat_buf)) {
result = -1;
goto out_close;
}
if (fchmod(dfd, stat_buf.st_mode)) {
result = -1;
goto out_close;
}
void *daddr = mmap(NULL, file->size, PROT_READ | PROT_WRITE,
MAP_SHARED, dfd, 0);
if (daddr == MAP_FAILED) {
result = -1;
goto out_close;
}
if (pool->params.type != POOL_TYPE_BTT) {
void *saddr = pool_set_file_map(file, 0);
memcpy(daddr, saddr, file->size);
goto out_unmap;
}
void *buf = malloc(RW_BUFFERING_SIZE);
if (buf == NULL) {
ERR("!malloc");
result = -1;
goto out_unmap;
}
if (pool_btt_lseek(pool, 0, SEEK_SET) == -1) {
result = -1;
goto out_free;
}
ssize_t buf_read = 0;
void *dst = daddr;
while ((buf_read = pool_btt_read(pool, buf, RW_BUFFERING_SIZE))) {
if (buf_read == -1)
break;
memcpy(dst, buf, (size_t)buf_read);
dst = (void *)((ssize_t)dst + buf_read);
}
out_free:
free(buf);
out_unmap:
munmap(daddr, file->size);
out_close:
(void) os_close(dfd);
return result;
}
/*
* pool_set_part_copy -- make a copy of the poolset part
*/
int
pool_set_part_copy(struct pool_set_part *dpart, struct pool_set_part *spart,
int overwrite)
{
LOG(3, "dpart %p spart %p", dpart, spart);
int result = 0;
os_stat_t stat_buf;
if (os_fstat(spart->fd, &stat_buf)) {
ERR("!util_stat");
return -1;
}
size_t smapped = 0;
void *saddr = pmem_map_file(spart->path, 0, 0, S_IREAD, &smapped, NULL);
if (!saddr)
return -1;
size_t dmapped = 0;
int is_pmem;
void *daddr;
int exists = util_file_exists(dpart->path);
if (exists < 0) {
result = -1;
goto out_sunmap;
}
if (exists) {
if (!overwrite) {
errno = EEXIST;
result = -1;
goto out_sunmap;
}
daddr = pmem_map_file(dpart->path, 0, 0, S_IWRITE, &dmapped,
&is_pmem);
} else {
errno = 0;
daddr = pmem_map_file(dpart->path, dpart->filesize,
PMEM_FILE_CREATE | PMEM_FILE_EXCL,
stat_buf.st_mode, &dmapped, &is_pmem);
}
if (!daddr) {
result = -1;
goto out_sunmap;
}
#ifdef DEBUG
/* provide extra logging in case of wrong dmapped/smapped value */
if (dmapped < smapped) {
LOG(1, "dmapped < smapped: dmapped = %lu, smapped = %lu",
dmapped, smapped);
ASSERT(0);
}
#endif
if (is_pmem) {
pmem_memcpy_persist(daddr, saddr, smapped);
} else {
memcpy(daddr, saddr, smapped);
pmem_msync(daddr, smapped);
}
pmem_unmap(daddr, dmapped);
out_sunmap:
pmem_unmap(saddr, smapped);
return result;
}
/*
* pool_memset -- memset pool part described by off and count
*/
int
pool_memset(struct pool_data *pool, uint64_t off, int c, size_t count)
{
int result = 0;
if (pool->params.type != POOL_TYPE_BTT)
memset((char *)off, 0, count);
else {
if (pool_btt_lseek(pool, (os_off_t)off, SEEK_SET) == -1)
return -1;
size_t zero_size = min(count, RW_BUFFERING_SIZE);
void *buf = malloc(zero_size);
if (!buf) {
ERR("!malloc");
return -1;
}
memset(buf, c, zero_size);
ssize_t nwrite = 0;
do {
zero_size = min(zero_size, count);
nwrite = pool_btt_write(pool, buf, zero_size);
if (nwrite < 0) {
result = -1;
break;
}
count -= (size_t)nwrite;
} while (count > 0);
free(buf);
}
return result;
}
/*
* pool_set_files_count -- get total number of parts of all replicas
*/
unsigned
pool_set_files_count(struct pool_set_file *file)
{
unsigned ret = 0;
unsigned nreplicas = file->poolset->nreplicas;
for (unsigned r = 0; r < nreplicas; r++) {
struct pool_replica *rep = file->poolset->replica[r];
ret += rep->nparts;
}
return ret;
}
/*
* pool_set_file_map_headers -- map headers of each pool set part file
*/
int
pool_set_file_map_headers(struct pool_set_file *file, int rdonly, int prv)
{
if (!file->poolset)
return -1;
for (unsigned r = 0; r < file->poolset->nreplicas; r++) {
struct pool_replica *rep = file->poolset->replica[r];
for (unsigned p = 0; p < rep->nparts; p++) {
struct pool_set_part *part = &rep->part[p];
if (util_map_hdr(part,
prv ? MAP_PRIVATE : MAP_SHARED, rdonly)) {
part->hdr = NULL;
goto err;
}
}
}
return 0;
err:
pool_set_file_unmap_headers(file);
return -1;
}
/*
* pool_set_file_unmap_headers -- unmap headers of each pool set part file
*/
void
pool_set_file_unmap_headers(struct pool_set_file *file)
{
if (!file->poolset)
return;
for (unsigned r = 0; r < file->poolset->nreplicas; r++) {
struct pool_replica *rep = file->poolset->replica[r];
for (unsigned p = 0; p < rep->nparts; p++) {
struct pool_set_part *part = &rep->part[p];
util_unmap_hdr(part);
}
}
}
/*
* pool_get_signature -- (internal) return signature of specified pool type
*/
static const char *
pool_get_signature(enum pool_type type)
{
switch (type) {
case POOL_TYPE_LOG:
return LOG_HDR_SIG;
case POOL_TYPE_BLK:
return BLK_HDR_SIG;
case POOL_TYPE_OBJ:
return OBJ_HDR_SIG;
default:
return NULL;
}
}
/*
* pool_hdr_default -- return default pool header values
*/
void
pool_hdr_default(enum pool_type type, struct pool_hdr *hdrp)
{
memset(hdrp, 0, sizeof(*hdrp));
const char *sig = pool_get_signature(type);
ASSERTne(sig, NULL);
memcpy(hdrp->signature, sig, POOL_HDR_SIG_LEN);
switch (type) {
case POOL_TYPE_LOG:
hdrp->major = LOG_FORMAT_MAJOR;
hdrp->features = log_format_feat_default;
break;
case POOL_TYPE_BLK:
hdrp->major = BLK_FORMAT_MAJOR;
hdrp->features = blk_format_feat_default;
break;
case POOL_TYPE_OBJ:
hdrp->major = OBJ_FORMAT_MAJOR;
hdrp->features = obj_format_feat_default;
break;
default:
break;
}
}
/*
* pool_hdr_get_type -- return pool type based on pool header data
*/
enum pool_type
pool_hdr_get_type(const struct pool_hdr *hdrp)
{
if (memcmp(hdrp->signature, LOG_HDR_SIG, POOL_HDR_SIG_LEN) == 0)
return POOL_TYPE_LOG;
else if (memcmp(hdrp->signature, BLK_HDR_SIG, POOL_HDR_SIG_LEN) == 0)
return POOL_TYPE_BLK;
else if (memcmp(hdrp->signature, OBJ_HDR_SIG, POOL_HDR_SIG_LEN) == 0)
return POOL_TYPE_OBJ;
else
return POOL_TYPE_UNKNOWN;
}
/*
* pool_get_pool_type_str -- return human-readable pool type string
*/
const char *
pool_get_pool_type_str(enum pool_type type)
{
switch (type) {
case POOL_TYPE_BTT:
return "btt";
case POOL_TYPE_LOG:
return "pmemlog";
case POOL_TYPE_BLK:
return "pmemblk";
case POOL_TYPE_OBJ:
return "pmemobj";
default:
return "unknown";
}
}
/*
* pool_set_type -- get pool type of a poolset
*/
enum pool_type
pool_set_type(struct pool_set *set)
{
struct pool_hdr hdr;
/* open the first part file to read the pool header values */
const struct pool_set_part *part = PART(REP(set, 0), 0);
if (util_file_pread(part->path, &hdr, sizeof(hdr), 0) !=
sizeof(hdr)) {
ERR("cannot read pool header from poolset");
return POOL_TYPE_UNKNOWN;
}
util_convert2h_hdr_nocheck(&hdr);
enum pool_type type = pool_hdr_get_type(&hdr);
return type;
}
/*
* pool_btt_info_valid -- check consistency of BTT Info header
*/
int
pool_btt_info_valid(struct btt_info *infop)
{
if (memcmp(infop->sig, BTTINFO_SIG, BTTINFO_SIG_LEN) != 0)
return 0;
return util_checksum(infop, sizeof(*infop), &infop->checksum, 0, 0);
}
/*
* pool_blk_get_first_valid_arena -- get first valid BTT Info in arena
*/
int
pool_blk_get_first_valid_arena(struct pool_data *pool, struct arena *arenap)
{
arenap->zeroed = true;
uint64_t offset = pool_get_first_valid_btt(pool, &arenap->btt_info,
2 * BTT_ALIGNMENT, &arenap->zeroed);
if (offset != 0) {
arenap->offset = offset;
arenap->valid = true;
return 1;
}
return 0;
}
/*
* pool_next_arena_offset -- get offset of next arena
*
* Calculated offset is theoretical. Function does not check if such arena can
* exist.
*/
uint64_t
pool_next_arena_offset(struct pool_data *pool, uint64_t offset)
{
uint64_t lastoff = (pool->set_file->size & ~(BTT_ALIGNMENT - 1));
uint64_t nextoff = min(offset + BTT_MAX_ARENA, lastoff);
return nextoff;
}
/*
* pool_get_first_valid_btt -- return offset to first valid BTT Info
*
* - Return offset to valid BTT Info header in pool file.
* - Start looking from given offset.
* - Convert BTT Info header to host endianness.
* - Return the BTT Info header by pointer.
* - If zeroed pointer provided would check if all checked BTT Info are zeroed
* which is useful for BLK pools
*/
uint64_t
pool_get_first_valid_btt(struct pool_data *pool, struct btt_info *infop,
uint64_t offset, bool *zeroed)
{
/* if we have valid arena get BTT Info header from it */
if (pool->narenas != 0) {
struct arena *arenap = PMDK_TAILQ_FIRST(&pool->arenas);
memcpy(infop, &arenap->btt_info, sizeof(*infop));
return arenap->offset;
}
const size_t info_size = sizeof(*infop);
/* theoretical offsets to BTT Info header and backup */
uint64_t offsets[2] = {offset, 0};
while (offsets[0] < pool->set_file->size) {
/* calculate backup offset */
offsets[1] = pool_next_arena_offset(pool, offsets[0]) -
info_size;
/* check both offsets: header and backup */
for (int i = 0; i < 2; ++i) {
if (pool_read(pool, infop, info_size, offsets[i]))
continue;
/* check if all possible BTT Info are zeroed */
if (zeroed)
*zeroed &= util_is_zeroed((const void *)infop,
info_size);
/* check if read BTT Info is valid */
if (pool_btt_info_valid(infop)) {
btt_info_convert2h(infop);
return offsets[i];
}
}
/* jump to next arena */
offsets[0] += BTT_MAX_ARENA;
}
return 0;
}
/*
* pool_get_min_size -- return the minimum pool size of a pool of a given type
*/
size_t
pool_get_min_size(enum pool_type type)
{
switch (type) {
case POOL_TYPE_LOG:
return PMEMLOG_MIN_POOL;
case POOL_TYPE_BLK:
return PMEMBLK_MIN_POOL;
case POOL_TYPE_OBJ:
return PMEMOBJ_MIN_POOL;
default:
ERR("unknown type of a pool");
return SIZE_MAX;
}
}
#if FAULT_INJECTION
void
pmempool_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
core_inject_fault_at(type, nth, at);
}
int
pmempool_fault_injection_enabled(void)
{
return core_fault_injection_enabled();
}
#endif
| 24,738 | 21.009786 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmempool/check_write.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* check_write.c -- write fixed data back
*/
#include <stdint.h>
#include <endian.h>
#include "out.h"
#include "btt.h"
#include "libpmempool.h"
#include "pmempool.h"
#include "pool.h"
#include "check_util.h"
enum questions {
Q_REPAIR_MAP,
Q_REPAIR_FLOG,
};
/*
* log_write -- (internal) write all structures for log pool
*/
static int
log_write(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (CHECK_WITHOUT_FIXING(ppc))
return 0;
/* endianness conversion */
struct pmemlog *log = &ppc->pool->hdr.log;
log_convert2le(log);
if (pool_write(ppc->pool, log, sizeof(*log), 0)) {
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
return CHECK_ERR(ppc, "writing pmemlog structure failed");
}
return 0;
}
/*
* blk_write_flog -- (internal) convert and write flog to file
*/
static int
blk_write_flog(PMEMpoolcheck *ppc, struct arena *arenap)
{
if (!arenap->flog) {
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "flog is missing");
}
uint64_t flogoff = arenap->offset + arenap->btt_info.flogoff;
uint8_t *ptr = arenap->flog;
uint32_t i;
for (i = 0; i < arenap->btt_info.nfree; i++) {
struct btt_flog *flog = (struct btt_flog *)ptr;
btt_flog_convert2le(&flog[0]);
btt_flog_convert2le(&flog[1]);
ptr += BTT_FLOG_PAIR_ALIGN;
}
if (pool_write(ppc->pool, arenap->flog, arenap->flogsize, flogoff)) {
CHECK_INFO(ppc, "%s", ppc->path);
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
return CHECK_ERR(ppc, "arena %u: writing BTT FLOG failed\n",
arenap->id);
}
return 0;
}
/*
* blk_write_map -- (internal) convert and write map to file
*/
static int
blk_write_map(PMEMpoolcheck *ppc, struct arena *arenap)
{
if (!arenap->map) {
ppc->result = CHECK_RESULT_ERROR;
return CHECK_ERR(ppc, "map is missing");
}
uint64_t mapoff = arenap->offset + arenap->btt_info.mapoff;
uint32_t i;
for (i = 0; i < arenap->btt_info.external_nlba; i++)
arenap->map[i] = htole32(arenap->map[i]);
if (pool_write(ppc->pool, arenap->map, arenap->mapsize, mapoff)) {
CHECK_INFO(ppc, "%s", ppc->path);
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
return CHECK_ERR(ppc, "arena %u: writing BTT map failed\n",
arenap->id);
}
return 0;
}
/*
* blk_write -- (internal) write all structures for blk pool
*/
static int
blk_write(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
if (CHECK_WITHOUT_FIXING(ppc))
return 0;
/* endianness conversion */
ppc->pool->hdr.blk.bsize = htole32(ppc->pool->hdr.blk.bsize);
if (pool_write(ppc->pool, &ppc->pool->hdr.blk,
sizeof(ppc->pool->hdr.blk), 0)) {
CHECK_INFO(ppc, "%s", ppc->path);
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
return CHECK_ERR(ppc, "writing pmemblk structure failed");
}
return 0;
}
/*
* btt_data_write -- (internal) write BTT data
*/
static int
btt_data_write(PMEMpoolcheck *ppc, location *loc)
{
LOG(3, NULL);
struct arena *arenap;
PMDK_TAILQ_FOREACH(arenap, &ppc->pool->arenas, next) {
if (ppc->pool->uuid_op == UUID_NOT_FROM_BTT) {
memcpy(arenap->btt_info.parent_uuid,
ppc->pool->hdr.pool.poolset_uuid,
sizeof(arenap->btt_info.parent_uuid));
util_checksum(&arenap->btt_info,
sizeof(arenap->btt_info),
&arenap->btt_info.checksum, 1, 0);
}
if (pool_write(ppc->pool, &arenap->btt_info,
sizeof(arenap->btt_info), arenap->offset)) {
CHECK_INFO(ppc, "%s", ppc->path);
CHECK_ERR(ppc, "arena %u: writing BTT Info failed",
arenap->id);
goto error;
}
if (pool_write(ppc->pool, &arenap->btt_info,
sizeof(arenap->btt_info), arenap->offset +
le64toh(arenap->btt_info.infooff))) {
CHECK_INFO(ppc, "%s", ppc->path);
CHECK_ERR(ppc,
"arena %u: writing BTT Info backup failed",
arenap->id);
goto error;
}
if (blk_write_flog(ppc, arenap))
goto error;
if (blk_write_map(ppc, arenap))
goto error;
}
return 0;
error:
ppc->result = CHECK_RESULT_CANNOT_REPAIR;
return -1;
}
struct step {
int (*func)(PMEMpoolcheck *, location *loc);
enum pool_type type;
};
static const struct step steps[] = {
{
.func = log_write,
.type = POOL_TYPE_LOG,
},
{
.func = blk_write,
.type = POOL_TYPE_BLK,
},
{
.func = btt_data_write,
.type = POOL_TYPE_BLK | POOL_TYPE_BTT,
},
{
.func = NULL,
},
};
/*
* step_exe -- (internal) perform single step according to its parameters
*/
static inline int
step_exe(PMEMpoolcheck *ppc, location *loc)
{
ASSERT(loc->step < ARRAY_SIZE(steps));
const struct step *step = &steps[loc->step++];
/* check step conditions */
if (!(step->type & ppc->pool->params.type))
return 0;
return step->func(ppc, loc);
}
/*
* check_write -- write fixed data back
*/
void
check_write(PMEMpoolcheck *ppc)
{
/*
* XXX: Disabling individual checks based on type should be done in the
* step structure. This however requires refactor of the step
* processing code.
*/
if (CHECK_IS_NOT(ppc, REPAIR))
return;
location *loc = (location *)check_get_step_data(ppc->data);
/* do all steps */
while (loc->step != CHECK_STEP_COMPLETE &&
steps[loc->step].func != NULL) {
if (step_exe(ppc, loc))
return;
}
}
| 5,164 | 19.910931 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem/pmem_posix.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2019, Intel Corporation */
/*
* pmem_posix.c -- pmem utilities with Posix implementation
*/
#include <stddef.h>
#include <sys/mman.h>
#include "pmem.h"
#include "out.h"
#include "mmap.h"
/*
* is_pmem_detect -- implement pmem_is_pmem()
*
* This function returns true only if the entire range can be confirmed
* as being direct access persistent memory. Finding any part of the
* range is not direct access, or failing to look up the information
* because it is unmapped or because any sort of error happens, just
* results in returning false.
*/
int
is_pmem_detect(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
if (len == 0)
return 0;
int retval = util_range_is_pmem(addr, len);
LOG(4, "returning %d", retval);
return retval;
}
/*
* pmem_map_register -- memory map file and register mapping
*/
void *
pmem_map_register(int fd, size_t len, const char *path, int is_dev_dax)
{
LOG(3, "fd %d len %zu path %s id_dev_dax %d",
fd, len, path, is_dev_dax);
void *addr;
int map_sync;
addr = util_map(fd, 0, len, MAP_SHARED, 0, 0, &map_sync);
if (!addr)
return NULL;
enum pmem_map_type type = MAX_PMEM_TYPE;
if (is_dev_dax)
type = PMEM_DEV_DAX;
else if (map_sync)
type = PMEM_MAP_SYNC;
if (type != MAX_PMEM_TYPE) {
if (util_range_register(addr, len, path, type)) {
LOG(1, "can't track mapped region");
goto err_unmap;
}
}
return addr;
err_unmap:
util_unmap(addr, len);
return NULL;
}
/*
* pmem_os_init -- os-dependent part of pmem initialization
*/
void
pmem_os_init(is_pmem_func *func)
{
LOG(3, NULL);
*func = is_pmem_detect;
}
| 1,671 | 19.390244 | 71 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem/libpmem.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* libpmem.c -- pmem entry points for libpmem
*/
#include <stdio.h>
#include <stdint.h>
#include "libpmem.h"
#include "pmem.h"
#include "pmemcommon.h"
/*
* libpmem_init -- load-time initialization for libpmem
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
libpmem_init(void)
{
common_init(PMEM_LOG_PREFIX, PMEM_LOG_LEVEL_VAR, PMEM_LOG_FILE_VAR,
PMEM_MAJOR_VERSION, PMEM_MINOR_VERSION);
LOG(3, NULL);
pmem_init();
}
/*
* libpmem_fini -- libpmem cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
libpmem_fini(void)
{
LOG(3, NULL);
common_fini();
}
/*
* pmem_check_versionU -- see if library meets application version requirements
*/
#ifndef _WIN32
static inline
#endif
const char *
pmem_check_versionU(unsigned major_required, unsigned minor_required)
{
LOG(3, "major_required %u minor_required %u",
major_required, minor_required);
if (major_required != PMEM_MAJOR_VERSION) {
ERR("libpmem major version mismatch (need %u, found %u)",
major_required, PMEM_MAJOR_VERSION);
return out_get_errormsg();
}
if (minor_required > PMEM_MINOR_VERSION) {
ERR("libpmem minor version mismatch (need %u, found %u)",
minor_required, PMEM_MINOR_VERSION);
return out_get_errormsg();
}
return NULL;
}
#ifndef _WIN32
/*
* pmem_check_version -- see if library meets application version requirements
*/
const char *
pmem_check_version(unsigned major_required, unsigned minor_required)
{
return pmem_check_versionU(major_required, minor_required);
}
#else
/*
* pmem_check_versionW -- see if library meets application version requirements
*/
const wchar_t *
pmem_check_versionW(unsigned major_required, unsigned minor_required)
{
if (pmem_check_versionU(major_required, minor_required) != NULL)
return out_get_errormsgW();
else
return NULL;
}
#endif
/*
* pmem_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
pmem_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* pmem_errormsg -- return last error message
*/
const char *
pmem_errormsg(void)
{
return pmem_errormsgU();
}
#else
/*
* pmem_errormsgW -- return last error message as wchar_t
*/
const wchar_t *
pmem_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
| 2,387 | 17.952381 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem/libpmem_main.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* libpmem_main.c -- entry point for libpmem.dll
*
* XXX - This is a placeholder. All the library initialization/cleanup
* that is done in library ctors/dtors, as well as TLS initialization
* should be moved here.
*/
#include "win_mmap.h"
void libpmem_init(void);
void libpmem_fini(void);
int APIENTRY
DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved)
{
switch (dwReason) {
case DLL_PROCESS_ATTACH:
libpmem_init();
win_mmap_init();
break;
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
break;
case DLL_PROCESS_DETACH:
win_mmap_fini();
libpmem_fini();
break;
}
return TRUE;
}
| 712 | 18.27027 | 71 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem/pmem.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* pmem.c -- pmem entry points for libpmem
*
*
* PERSISTENT MEMORY INSTRUCTIONS ON X86
*
* The primary feature of this library is to provide a way to flush
* changes to persistent memory as outlined below (note that many
* of the decisions below are made at initialization time, and not
* repeated every time a flush is requested).
*
* To flush a range to pmem when CLWB is available:
*
* CLWB for each cache line in the given range.
*
* SFENCE to ensure the CLWBs above have completed.
*
* To flush a range to pmem when CLFLUSHOPT is available and CLWB is not
* (same as above but issue CLFLUSHOPT instead of CLWB):
*
* CLFLUSHOPT for each cache line in the given range.
*
* SFENCE to ensure the CLWBs above have completed.
*
* To flush a range to pmem when neither CLFLUSHOPT or CLWB are available
* (same as above but fences surrounding CLFLUSH are not required):
*
* CLFLUSH for each cache line in the given range.
*
* To memcpy a range of memory to pmem when MOVNT is available:
*
* Copy any non-64-byte portion of the destination using MOV.
*
* Use the flush flow above without the fence for the copied portion.
*
* Copy using MOVNTDQ, up to any non-64-byte aligned end portion.
* (The MOVNT instructions bypass the cache, so no flush is required.)
*
* Copy any unaligned end portion using MOV.
*
* Use the flush flow above for the copied portion (including fence).
*
* To memcpy a range of memory to pmem when MOVNT is not available:
*
* Just pass the call to the normal memcpy() followed by pmem_persist().
*
* To memset a non-trivial sized range of memory to pmem:
*
* Same as the memcpy cases above but store the given value instead
* of reading values from the source.
*
* These features are supported for ARM AARCH64 using equivalent ARM
* assembly instruction. Please refer to (arm_cacheops.h) for more details.
*
* INTERFACES FOR FLUSHING TO PERSISTENT MEMORY
*
* Given the flows above, three interfaces are provided for flushing a range
* so that the caller has the ability to separate the steps when necessary,
* but otherwise leaves the detection of available instructions to the libpmem:
*
* pmem_persist(addr, len)
*
* This is the common case, which just calls the two other functions:
*
* pmem_flush(addr, len);
* pmem_drain();
*
* pmem_flush(addr, len)
*
* CLWB or CLFLUSHOPT or CLFLUSH for each cache line
*
* pmem_drain()
*
* SFENCE unless using CLFLUSH
*
*
* INTERFACES FOR COPYING/SETTING RANGES OF MEMORY
*
* Given the flows above, the following interfaces are provided for the
* memmove/memcpy/memset operations to persistent memory:
*
* pmem_memmove_nodrain()
*
* Checks for overlapped ranges to determine whether to copy from
* the beginning of the range or from the end. If MOVNT instructions
* are available, uses the memory copy flow described above, otherwise
* calls the libc memmove() followed by pmem_flush(). Since no conditional
* compilation and/or architecture specific CFLAGS are in use at the
* moment, SSE2 ( thus movnt ) is just assumed to be available.
*
* pmem_memcpy_nodrain()
*
* Just calls pmem_memmove_nodrain().
*
* pmem_memset_nodrain()
*
* If MOVNT instructions are available, uses the memset flow described
* above, otherwise calls the libc memset() followed by pmem_flush().
*
* pmem_memmove_persist()
* pmem_memcpy_persist()
* pmem_memset_persist()
*
* Calls the appropriate _nodrain() function followed by pmem_drain().
*
*
* DECISIONS MADE AT INITIALIZATION TIME
*
* As much as possible, all decisions described above are made at library
* initialization time. This is achieved using function pointers that are
* setup by pmem_init() when the library loads.
*
* Func_fence is used by pmem_drain() to call one of:
* fence_empty()
* memory_barrier()
*
* Func_flush is used by pmem_flush() to call one of:
* flush_dcache()
* flush_dcache_invalidate_opt()
* flush_dcache_invalidate()
*
* Func_memmove_nodrain is used by memmove_nodrain() to call one of:
* memmove_nodrain_libc()
* memmove_nodrain_movnt()
*
* Func_memset_nodrain is used by memset_nodrain() to call one of:
* memset_nodrain_libc()
* memset_nodrain_movnt()
*
* DEBUG LOGGING
*
* Many of the functions here get called hundreds of times from loops
* iterating over ranges, making the usual LOG() calls at level 3
* impractical. The call tracing log for those functions is set at 15.
*/
#include <sys/mman.h>
#include <sys/stat.h>
#include <errno.h>
#include <fcntl.h>
#include "libpmem.h"
#include "pmem.h"
#include "pmem2_arch.h"
#include "out.h"
#include "os.h"
#include "mmap.h"
#include "file.h"
#include "valgrind_internal.h"
#include "os_deep.h"
#include "auto_flush.h"
struct pmem_funcs {
memmove_nodrain_func memmove_nodrain;
memset_nodrain_func memset_nodrain;
flush_func deep_flush;
flush_func flush;
fence_func fence;
};
static struct pmem_funcs Funcs;
static is_pmem_func Is_pmem = NULL;
/*
* pmem_has_hw_drain -- return whether or not HW drain was found
*
* Always false for x86: HW drain is done by HW with no SW involvement.
*/
int
pmem_has_hw_drain(void)
{
LOG(3, NULL);
return 0;
}
/*
* pmem_drain -- wait for any PM stores to drain from HW buffers
*/
void
pmem_drain(void)
{
LOG(15, NULL);
Funcs.fence();
}
/*
* pmem_has_auto_flush -- check if platform supports eADR
*/
int
pmem_has_auto_flush()
{
LOG(3, NULL);
return pmem2_auto_flush();
}
/*
* pmem_deep_flush -- flush processor cache for the given range
* regardless of eADR support on platform
*/
void
pmem_deep_flush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
Funcs.deep_flush(addr, len);
}
/*
* pmem_flush -- flush processor cache for the given range
*/
void
pmem_flush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
Funcs.flush(addr, len);
}
/*
* pmem_persist -- make any cached changes to a range of pmem persistent
*/
void
pmem_persist(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
pmem_flush(addr, len);
pmem_drain();
}
/*
* pmem_msync -- flush to persistence via msync
*
* Using msync() means this routine is less optimal for pmem (but it
* still works) but it also works for any memory mapped file, unlike
* pmem_persist() which is only safe where pmem_is_pmem() returns true.
*/
int
pmem_msync(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
/*
* msync requires addr to be a multiple of pagesize but there are no
* requirements for len. Align addr down and change len so that
* [addr, addr + len) still contains initial range.
*/
/* increase len by the amount we gain when we round addr down */
len += (uintptr_t)addr & (Pagesize - 1);
/* round addr down to page boundary */
uintptr_t uptr = (uintptr_t)addr & ~((uintptr_t)Pagesize - 1);
/*
* msync accepts addresses aligned to page boundary, so we may sync
* more and part of it may have been marked as undefined/inaccessible
* Msyncing such memory is not a bug, so as a workaround temporarily
* disable error reporting.
*/
VALGRIND_DO_DISABLE_ERROR_REPORTING;
int ret;
if ((ret = msync((void *)uptr, len, MS_SYNC)) < 0)
ERR("!msync");
VALGRIND_DO_ENABLE_ERROR_REPORTING;
/* full flush */
VALGRIND_DO_PERSIST(uptr, len);
return ret;
}
/*
* is_pmem_always -- (internal) always true (for meaningful parameters) version
* of pmem_is_pmem()
*/
static int
is_pmem_always(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
if (len == 0)
return 0;
return 1;
}
/*
* is_pmem_never -- (internal) never true version of pmem_is_pmem()
*/
static int
is_pmem_never(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
return 0;
}
/*
* pmem_is_pmem_init -- (internal) initialize Func_is_pmem pointer
*
* This should be done only once - on the first call to pmem_is_pmem().
* If PMEM_IS_PMEM_FORCE is set, it would override the default behavior
* of pmem_is_pmem().
*/
static void
pmem_is_pmem_init(void)
{
LOG(3, NULL);
static volatile unsigned init;
while (init != 2) {
if (!util_bool_compare_and_swap32(&init, 0, 1))
continue;
/*
* For debugging/testing, allow pmem_is_pmem() to be forced
* to always true or never true using environment variable
* PMEM_IS_PMEM_FORCE values of zero or one.
*
* This isn't #ifdef DEBUG because it has a trivial performance
* impact and it may turn out to be useful as a "chicken bit"
* for systems where pmem_is_pmem() isn't correctly detecting
* true persistent memory.
*/
char *ptr = os_getenv("PMEM_IS_PMEM_FORCE");
if (ptr) {
int val = atoi(ptr);
if (val == 0)
Is_pmem = is_pmem_never;
else if (val == 1)
Is_pmem = is_pmem_always;
VALGRIND_ANNOTATE_HAPPENS_BEFORE(&Is_pmem);
LOG(4, "PMEM_IS_PMEM_FORCE=%d", val);
}
if (Funcs.deep_flush == NULL)
Is_pmem = is_pmem_never;
if (!util_bool_compare_and_swap32(&init, 1, 2))
FATAL("util_bool_compare_and_swap32");
}
}
/*
* pmem_is_pmem -- return true if entire range is persistent memory
*/
int
pmem_is_pmem(const void *addr, size_t len)
{
LOG(10, "addr %p len %zu", addr, len);
static int once;
/* This is not thread-safe, but pmem_is_pmem_init() is. */
if (once == 0) {
pmem_is_pmem_init();
util_fetch_and_add32(&once, 1);
}
VALGRIND_ANNOTATE_HAPPENS_AFTER(&Is_pmem);
return Is_pmem(addr, len);
}
#define PMEM_FILE_ALL_FLAGS\
(PMEM_FILE_CREATE|PMEM_FILE_EXCL|PMEM_FILE_SPARSE|PMEM_FILE_TMPFILE)
#define PMEM_DAX_VALID_FLAGS\
(PMEM_FILE_CREATE|PMEM_FILE_SPARSE)
/*
* pmem_map_fileU -- create or open the file and map it to memory
*/
#ifndef _WIN32
static inline
#endif
void *
pmem_map_fileU(const char *path, size_t len, int flags,
mode_t mode, size_t *mapped_lenp, int *is_pmemp)
{
LOG(3, "path \"%s\" size %zu flags %x mode %o mapped_lenp %p "
"is_pmemp %p", path, len, flags, mode, mapped_lenp, is_pmemp);
int oerrno;
int fd;
int open_flags = O_RDWR;//O_RDONLY;//O_RDWR;O_RDONLY
int delete_on_err = 0;
int file_type = util_file_get_type(path);
#ifdef _WIN32
open_flags |= O_BINARY;
#endif
if (file_type == OTHER_ERROR)
return NULL;
if (flags & ~(PMEM_FILE_ALL_FLAGS)) {
ERR("invalid flag specified %x", flags);
errno = EINVAL;
return NULL;
}
if (file_type == TYPE_DEVDAX) {
if (flags & ~(PMEM_DAX_VALID_FLAGS)) {
ERR("flag unsupported for Device DAX %x", flags);
errno = EINVAL;
return NULL;
} else {
/* we are ignoring all of the flags */
flags = 0;
ssize_t actual_len = util_file_get_size(path);
if (actual_len < 0) {
ERR("unable to read Device DAX size");
errno = EINVAL;
return NULL;
}
if (len != 0 && len != (size_t)actual_len) {
ERR("Device DAX length must be either 0 or "
"the exact size of the device: %zu",
actual_len);
errno = EINVAL;
return NULL;
}
len = 0;
}
}
if (flags & PMEM_FILE_CREATE) {
if ((os_off_t)len < 0) {
ERR("invalid file length %zu", len);
errno = EINVAL;
return NULL;
}
open_flags |= O_CREAT;
}
if (flags & PMEM_FILE_EXCL)
open_flags |= O_EXCL;
if ((len != 0) && !(flags & PMEM_FILE_CREATE)) {
ERR("non-zero 'len' not allowed without PMEM_FILE_CREATE");
errno = EINVAL;
return NULL;
}
if ((len == 0) && (flags & PMEM_FILE_CREATE)) {
ERR("zero 'len' not allowed with PMEM_FILE_CREATE");
errno = EINVAL;
return NULL;
}
if ((flags & PMEM_FILE_TMPFILE) && !(flags & PMEM_FILE_CREATE)) {
ERR("PMEM_FILE_TMPFILE not allowed without PMEM_FILE_CREATE");
errno = EINVAL;
return NULL;
}
if (flags & PMEM_FILE_TMPFILE) {
if ((fd = util_tmpfile(path,
OS_DIR_SEP_STR"pmem.XXXXXX",
open_flags & O_EXCL)) < 0) {
LOG(2, "failed to create temporary file at \"%s\"",
path);
return NULL;
}
} else {
if ((fd = os_open(path, open_flags, mode)) < 0) {
ERR("!open %s", path);
return NULL;
}
if ((flags & PMEM_FILE_CREATE) && (flags & PMEM_FILE_EXCL))
delete_on_err = 1;
}
if (flags & PMEM_FILE_CREATE) {
/*
* Always set length of file to 'len'.
* (May either extend or truncate existing file.)
*/
if (os_ftruncate(fd, (os_off_t)len) != 0) {
ERR("!ftruncate");
goto err;
}
if ((flags & PMEM_FILE_SPARSE) == 0) {
if ((errno = os_posix_fallocate(fd, 0,
(os_off_t)len)) != 0) {
ERR("!posix_fallocate");
goto err;
}
}
} else {
ssize_t actual_size = util_fd_get_size(fd);
if (actual_size < 0) {
ERR("stat %s: negative size", path);
errno = EINVAL;
goto err;
}
len = (size_t)actual_size;
}
void *addr = pmem_map_register(fd, len, path, file_type == TYPE_DEVDAX);
if (addr == NULL)
goto err;
if (mapped_lenp != NULL)
*mapped_lenp = len;
if (is_pmemp != NULL)
*is_pmemp = pmem_is_pmem(addr, len);
LOG(3, "returning %p", addr);
VALGRIND_REGISTER_PMEM_MAPPING(addr, len);
VALGRIND_REGISTER_PMEM_FILE(fd, addr, len, 0);
(void) os_close(fd);
return addr;
err:
oerrno = errno;
(void) os_close(fd);
if (delete_on_err)
(void) os_unlink(path);
errno = oerrno;
return NULL;
}
#ifndef _WIN32
/*
* pmem_map_file -- create or open the file and map it to memory
*/
void *
pmem_map_file(const char *path, size_t len, int flags,
mode_t mode, size_t *mapped_lenp, int *is_pmemp)
{
return pmem_map_fileU(path, len, flags, mode, mapped_lenp, is_pmemp);
}
#else
/*
* pmem_map_fileW -- create or open the file and map it to memory
*/
void *
pmem_map_fileW(const wchar_t *path, size_t len, int flags, mode_t mode,
size_t *mapped_lenp, int *is_pmemp) {
char *upath = util_toUTF8(path);
if (upath == NULL)
return NULL;
void *ret = pmem_map_fileU(upath, len, flags, mode, mapped_lenp,
is_pmemp);
util_free_UTF8(upath);
return ret;
}
#endif
/*
* pmem_unmap -- unmap the specified region
*/
int
pmem_unmap(void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
#ifndef _WIN32
util_range_unregister(addr, len);
#endif
VALGRIND_REMOVE_PMEM_MAPPING(addr, len);
return util_unmap(addr, len);
}
/*
* pmem_memmove -- memmove to pmem
*/
void *
pmem_memmove(void *pmemdest, const void *src, size_t len, unsigned flags)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x",
pmemdest, src, len, flags);
#ifdef DEBUG
if (flags & ~PMEM_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, flags & ~PMEM_F_MEM_NODRAIN,
Funcs.flush);
if ((flags & (PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH)) == 0)
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memcpy -- memcpy to pmem
*/
void *
pmem_memcpy(void *pmemdest, const void *src, size_t len, unsigned flags)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x",
pmemdest, src, len, flags);
#ifdef DEBUG
if (flags & ~PMEM_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, flags & ~PMEM_F_MEM_NODRAIN,
Funcs.flush);
if ((flags & (PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH)) == 0)
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memset -- memset to pmem
*/
void *
pmem_memset(void *pmemdest, int c, size_t len, unsigned flags)
{
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x",
pmemdest, c, len, flags);
#ifdef DEBUG
if (flags & ~PMEM_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM_API_START();
Funcs.memset_nodrain(pmemdest, c, len, flags & ~PMEM_F_MEM_NODRAIN,
Funcs.flush);
if ((flags & (PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH)) == 0)
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memmove_nodrain -- memmove to pmem without hw drain
*/
void *
pmem_memmove_nodrain(void *pmemdest, const void *src, size_t len)
{
LOG(15, "pmemdest %p src %p len %zu", pmemdest, src, len);
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, 0, Funcs.flush);
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memcpy_nodrain -- memcpy to pmem without hw drain
*/
void *
pmem_memcpy_nodrain(void *pmemdest, const void *src, size_t len)
{
LOG(15, "pmemdest %p src %p len %zu", pmemdest, src, len);
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, 0, Funcs.flush);
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memmove_persist -- memmove to pmem
*/
void *
pmem_memmove_persist(void *pmemdest, const void *src, size_t len)
{
LOG(15, "pmemdest %p src %p len %zu", pmemdest, src, len);
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, 0, Funcs.flush);
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memcpy_persist -- memcpy to pmem
*/
void *
pmem_memcpy_persist(void *pmemdest, const void *src, size_t len)
{
LOG(15, "pmemdest %p src %p len %zu", pmemdest, src, len);
PMEM_API_START();
Funcs.memmove_nodrain(pmemdest, src, len, 0, Funcs.flush);
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memset_nodrain -- memset to pmem without hw drain
*/
void *
pmem_memset_nodrain(void *pmemdest, int c, size_t len)
{
LOG(15, "pmemdest %p c %d len %zu", pmemdest, c, len);
PMEM_API_START();
Funcs.memset_nodrain(pmemdest, c, len, 0, Funcs.flush);
PMEM_API_END();
return pmemdest;
}
/*
* pmem_memset_persist -- memset to pmem
*/
void *
pmem_memset_persist(void *pmemdest, int c, size_t len)
{
LOG(15, "pmemdest %p c %d len %zu", pmemdest, c, len);
PMEM_API_START();
Funcs.memset_nodrain(pmemdest, c, len, 0, Funcs.flush);
pmem_drain();
PMEM_API_END();
return pmemdest;
}
/*
* memmove_nodrain_libc -- (internal) memmove to pmem using libc
*/
static void *
memmove_nodrain_libc(void *pmemdest, const void *src, size_t len,
unsigned flags, flush_func flush)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x", pmemdest, src, len,
flags);
memmove(pmemdest, src, len);
if (!(flags & PMEM_F_MEM_NOFLUSH))
flush(pmemdest, len);
return pmemdest;
}
/*
* memset_nodrain_libc -- (internal) memset to pmem using libc
*/
static void *
memset_nodrain_libc(void *pmemdest, int c, size_t len, unsigned flags,
flush_func flush)
{
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", pmemdest, c, len,
flags);
memset(pmemdest, c, len);
if (!(flags & PMEM_F_MEM_NOFLUSH))
flush(pmemdest, len);
return pmemdest;
}
/*
* flush_empty -- (internal) do not flush the CPU cache
*/
static void
flush_empty(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_empty_nolog(addr, len);
}
/*
* fence_empty -- (internal) issue the fence instruction
*/
static void
fence_empty(void)
{
LOG(15, NULL);
VALGRIND_DO_FENCE;
}
/*
* pmem_init -- load-time initialization for pmem.c
*/
void
pmem_init(void)
{
LOG(3, NULL);
struct pmem2_arch_info info;
info.memmove_nodrain = NULL;
info.memset_nodrain = NULL;
info.flush = NULL;
info.fence = NULL;
info.flush_has_builtin_fence = 0;
pmem2_arch_init(&info);
int flush;
char *e = os_getenv("PMEM_NO_FLUSH");
if (e && (strcmp(e, "1") == 0)) {
flush = 0;
LOG(3, "Forced not flushing CPU_cache");
} else if (e && (strcmp(e, "0") == 0)) {
flush = 1;
LOG(3, "Forced flushing CPU_cache");
} else if (pmem2_auto_flush() == 1) {
flush = 0;
LOG(3, "Not flushing CPU_cache, eADR detected");
} else {
flush = 1;
LOG(3, "Flushing CPU cache");
}
Funcs.deep_flush = info.flush;
if (flush) {
Funcs.flush = info.flush;
Funcs.memmove_nodrain = info.memmove_nodrain;
Funcs.memset_nodrain = info.memset_nodrain;
if (info.flush_has_builtin_fence)
Funcs.fence = fence_empty;
else
Funcs.fence = info.fence;
} else {
Funcs.memmove_nodrain = info.memmove_nodrain_eadr;
Funcs.memset_nodrain = info.memset_nodrain_eadr;
Funcs.flush = flush_empty;
Funcs.fence = info.fence;
}
char *ptr = os_getenv("PMEM_NO_GENERIC_MEMCPY");
long long no_generic = 0;
if (ptr)
no_generic = atoll(ptr);
if (info.memmove_nodrain == NULL) {
if (no_generic) {
Funcs.memmove_nodrain = memmove_nodrain_libc;
LOG(3, "using libc memmove");
} else {
Funcs.memmove_nodrain = memmove_nodrain_generic;
LOG(3, "using generic memmove");
}
} else {
Funcs.memmove_nodrain = info.memmove_nodrain;
}
if (info.memset_nodrain == NULL) {
if (no_generic) {
Funcs.memset_nodrain = memset_nodrain_libc;
LOG(3, "using libc memset");
} else {
Funcs.memset_nodrain = memset_nodrain_generic;
LOG(3, "using generic memset");
}
} else {
Funcs.memset_nodrain = info.memset_nodrain;
}
if (Funcs.flush == flush_empty)
LOG(3, "not flushing CPU cache");
else if (Funcs.flush != Funcs.deep_flush)
FATAL("invalid flush function address");
pmem_os_init(&Is_pmem);
}
/*
* pmem_deep_persist -- perform deep persist on a memory range
*
* It merely acts as wrapper around an msync call in most cases, the only
* exception is the case of an mmap'ed DAX device on Linux.
*/
int
pmem_deep_persist(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
pmem_deep_flush(addr, len);
return pmem_deep_drain(addr, len);
}
/*
* pmem_deep_drain -- perform deep drain on a memory range
*/
int
pmem_deep_drain(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
return os_range_deep_common((uintptr_t)addr, len);
}
#if VG_PMEMCHECK_ENABLED
/*
* pmem_emit_log -- logs library and function names to pmemcheck store log
*/
void
pmem_emit_log(const char *func, int order)
{
util_emit_log("libpmem", func, order);
}
#endif
#if FAULT_INJECTION
void
pmem_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
core_inject_fault_at(type, nth, at);
}
int
pmem_fault_injection_enabled(void)
{
return core_fault_injection_enabled();
}
#endif
| 21,858 | 21.817328 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem/pmem_windows.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmem_windows.c -- pmem utilities with OS-specific implementation
*/
#include <memoryapi.h>
#include "pmem.h"
#include "out.h"
#include "mmap.h"
#include "win_mmap.h"
#include "sys/mman.h"
#if (NTDDI_VERSION >= NTDDI_WIN10_RS1)
typedef BOOL (WINAPI *PQVM)(
HANDLE, const void *,
enum WIN32_MEMORY_INFORMATION_CLASS, PVOID,
SIZE_T, PSIZE_T);
static PQVM Func_qvmi = NULL;
#endif
/*
* is_direct_mapped -- (internal) for each page in the given region
* checks with MM, if it's direct mapped.
*/
static int
is_direct_mapped(const void *begin, const void *end)
{
LOG(3, "begin %p end %p", begin, end);
#if (NTDDI_VERSION >= NTDDI_WIN10_RS1)
int retval = 1;
WIN32_MEMORY_REGION_INFORMATION region_info;
SIZE_T bytes_returned;
if (Func_qvmi == NULL) {
LOG(4, "QueryVirtualMemoryInformation not supported, "
"assuming non-DAX.");
return 0;
}
const void *begin_aligned = (const void *)rounddown((intptr_t)begin,
Pagesize);
const void *end_aligned = (const void *)roundup((intptr_t)end,
Pagesize);
for (const void *page = begin_aligned;
page < end_aligned;
page = (const void *)((char *)page + Pagesize)) {
if (Func_qvmi(GetCurrentProcess(), page,
MemoryRegionInfo, ®ion_info,
sizeof(region_info), &bytes_returned)) {
retval = region_info.DirectMapped;
} else {
LOG(4, "QueryVirtualMemoryInformation failed, assuming "
"non-DAX. Last error: %08x", GetLastError());
retval = 0;
}
if (retval == 0) {
LOG(4, "page %p is not direct mapped", page);
break;
}
}
return retval;
#else
/* if the MM API is not available the safest answer is NO */
return 0;
#endif /* NTDDI_VERSION >= NTDDI_WIN10_RS1 */
}
/*
* is_pmem_detect -- implement pmem_is_pmem()
*
* This function returns true only if the entire range can be confirmed
* as being direct access persistent memory. Finding any part of the
* range is not direct access, or failing to look up the information
* because it is unmapped or because any sort of error happens, just
* results in returning false.
*/
int
is_pmem_detect(const void *addr, size_t len)
{
LOG(3, "addr %p len %zu", addr, len);
if (len == 0)
return 0;
if (len > UINTPTR_MAX - (uintptr_t)addr) {
len = UINTPTR_MAX - (uintptr_t)addr;
LOG(4, "limit len to %zu to not get beyond address space", len);
}
int retval = 1;
const void *begin = addr;
const void *end = (const void *)((char *)addr + len);
LOG(4, "begin %p end %p", begin, end);
AcquireSRWLockShared(&FileMappingQLock);
PFILE_MAPPING_TRACKER mt;
PMDK_SORTEDQ_FOREACH(mt, &FileMappingQHead, ListEntry) {
if (mt->BaseAddress >= end) {
LOG(4, "ignoring all mapped ranges beyond given range");
break;
}
if (mt->EndAddress <= begin) {
LOG(4, "skipping all mapped ranges before given range");
continue;
}
if (!(mt->Flags & FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED)) {
LOG(4, "tracked range [%p, %p) is not direct mapped",
mt->BaseAddress, mt->EndAddress);
retval = 0;
break;
}
/*
* If there is a gap between the given region that we process
* currently and the mapped region in our tracking list, we
* need to process the gap by taking the long route of asking
* MM for each page in that range.
*/
if (begin < mt->BaseAddress &&
!is_direct_mapped(begin, mt->BaseAddress)) {
LOG(4, "untracked range [%p, %p) is not direct mapped",
begin, mt->BaseAddress);
retval = 0;
break;
}
/* push our begin to reflect what we have already processed */
begin = mt->EndAddress;
}
/*
* If we still have a range to verify, check with MM if the entire
* region is direct mapped.
*/
if (begin < end && !is_direct_mapped(begin, end)) {
LOG(4, "untracked end range [%p, %p) is not direct mapped",
begin, end);
retval = 0;
}
ReleaseSRWLockShared(&FileMappingQLock);
LOG(4, "returning %d", retval);
return retval;
}
/*
* pmem_map_register -- memory map file and register mapping
*/
void *
pmem_map_register(int fd, size_t len, const char *path, int is_dev_dax)
{
/* there is no device dax on windows */
ASSERTeq(is_dev_dax, 0);
return util_map(fd, 0, len, MAP_SHARED, 0, 0, NULL);
}
/*
* pmem_os_init -- os-dependent part of pmem initialization
*/
void
pmem_os_init(is_pmem_func *func)
{
LOG(3, NULL);
*func = is_pmem_detect;
#if NTDDI_VERSION >= NTDDI_WIN10_RS1
Func_qvmi = (PQVM)GetProcAddress(
GetModuleHandle(TEXT("KernelBase.dll")),
"QueryVirtualMemoryInformation");
#endif
}
| 6,186 | 27.643519 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem/pmem.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* pmem.h -- internal definitions for libpmem
*/
#ifndef PMEM_H
#define PMEM_H
#include <stddef.h>
#include "alloc.h"
#include "fault_injection.h"
#include "util.h"
#include "valgrind_internal.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PMEM_LOG_PREFIX "libpmem"
#define PMEM_LOG_LEVEL_VAR "PMEM_LOG_LEVEL"
#define PMEM_LOG_FILE_VAR "PMEM_LOG_FILE"
typedef int (*is_pmem_func)(const void *addr, size_t len);
void pmem_init(void);
void pmem_os_init(is_pmem_func *func);
int is_pmem_detect(const void *addr, size_t len);
void *pmem_map_register(int fd, size_t len, const char *path, int is_dev_dax);
#if FAULT_INJECTION
void
pmem_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at);
int
pmem_fault_injection_enabled(void);
#else
static inline void
pmem_inject_fault_at(enum pmem_allocation_type type, int nth,
const char *at)
{
abort();
}
static inline int
pmem_fault_injection_enabled(void)
{
return 0;
}
#endif
#ifdef __cplusplus
}
#endif
#endif
| 1,089 | 17.474576 | 78 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libvmem/README.md
|
This library has been moved to a
[separate repository](https://github.com/pmem/vmem).
| 86 | 28 | 52 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/auto_flush_windows.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
#ifndef PMEM2_AUTO_FLUSH_WINDOWS_H
#define PMEM2_AUTO_FLUSH_WINDOWS_H 1
#define ACPI_SIGNATURE 0x41435049 /* hex value of ACPI signature */
#define NFIT_REV_SIGNATURE 0x5449464e /* hex value of htonl(NFIT) signature */
#define NFIT_STR_SIGNATURE "NFIT"
#define NFIT_SIGNATURE_LEN 4
#define NFIT_OEM_ID_LEN 6
#define NFIT_OEM_TABLE_ID_LEN 8
#define NFIT_MAX_STRUCTURES 8
#define PCS_RESERVED 3
#define PCS_RESERVED_2 4
#define PCS_TYPE_NUMBER 7
/* check if bit on 'bit' position in number 'num' is set */
#define CHECK_BIT(num, bit) (((num) >> (bit)) & 1)
/*
* sets alignment of members of structure
*/
#pragma pack(1)
struct platform_capabilities
{
uint16_t type;
uint16_t length;
uint8_t highest_valid;
uint8_t reserved[PCS_RESERVED];
uint32_t capabilities;
uint8_t reserved2[PCS_RESERVED_2];
};
struct nfit_header
{
uint8_t signature[NFIT_SIGNATURE_LEN];
uint32_t length;
uint8_t revision;
uint8_t checksum;
uint8_t oem_id[NFIT_OEM_ID_LEN];
uint8_t oem_table_id[NFIT_OEM_TABLE_ID_LEN];
uint32_t oem_revision;
uint8_t creator_id[4];
uint32_t creator_revision;
uint32_t reserved;
};
#pragma pack()
#endif
| 1,215 | 22.843137 | 78 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/deep_flush_linux.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* deep_flush_linux.c -- deep_flush functionality
*/
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include "deep_flush.h"
#include "libpmem2.h"
#include "map.h"
#include "os.h"
#include "out.h"
#include "persist.h"
#include "pmem2_utils.h"
#include "region_namespace.h"
/*
* pmem2_deep_flush_write -- perform write to deep_flush file
* on given region_id
*/
int
pmem2_deep_flush_write(unsigned region_id)
{
LOG(3, "region_id %d", region_id);
char deep_flush_path[PATH_MAX];
int deep_flush_fd;
char rbuf[2];
if (util_snprintf(deep_flush_path, PATH_MAX,
"/sys/bus/nd/devices/region%u/deep_flush", region_id) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
if ((deep_flush_fd = os_open(deep_flush_path, O_RDONLY)) < 0) {
LOG(1, "!os_open(\"%s\", O_RDONLY)", deep_flush_path);
return 0;
}
if (read(deep_flush_fd, rbuf, sizeof(rbuf)) != 2) {
LOG(1, "!read(%d)", deep_flush_fd);
goto end;
}
if (rbuf[0] == '0' && rbuf[1] == '\n') {
LOG(3, "Deep flushing not needed");
goto end;
}
os_close(deep_flush_fd);
if ((deep_flush_fd = os_open(deep_flush_path, O_WRONLY)) < 0) {
LOG(1, "Cannot open deep_flush file %s to write",
deep_flush_path);
return 0;
}
if (write(deep_flush_fd, "1", 1) != 1) {
LOG(1, "Cannot write to deep_flush file %d", deep_flush_fd);
goto end;
}
end:
os_close(deep_flush_fd);
return 0;
}
/*
* pmem2_deep_flush_dax -- reads file type for map and check
* if it is device dax or reg file, depend on file type
* performs proper flush operation
*/
int
pmem2_deep_flush_dax(struct pmem2_map *map, void *ptr, size_t size)
{
int ret;
enum pmem2_file_type type = map->source.value.ftype;
if (type == PMEM2_FTYPE_REG) {
ret = pmem2_flush_file_buffers_os(map, ptr, size, 0);
if (ret) {
LOG(1, "cannot flush buffers addr %p len %zu",
ptr, size);
return ret;
}
} else if (type == PMEM2_FTYPE_DEVDAX) {
unsigned region_id;
int ret = pmem2_get_region_id(&map->source, ®ion_id);
if (ret < 0) {
LOG(1, "cannot find region id for dev %lu",
map->source.value.st_rdev);
return ret;
}
ret = pmem2_deep_flush_write(region_id);
if (ret) {
LOG(1, "cannot write to deep_flush file for region %d",
region_id);
return ret;
}
} else {
ASSERT(0);
}
return 0;
}
| 2,395 | 20.392857 | 67 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/pmem2_utils_ndctl.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
#include <errno.h>
#include <ndctl/libndctl.h>
#include "libpmem2.h"
#include "out.h"
#include "pmem2_utils.h"
#include "region_namespace_ndctl.h"
#include "source.h"
/*
* pmem2_device_dax_alignment -- checks the alignment of a given
* dax device from given source
*/
int
pmem2_device_dax_alignment(const struct pmem2_source *src, size_t *alignment)
{
int ret = 0;
size_t size = 0;
struct ndctl_ctx *ctx;
struct ndctl_namespace *ndns;
errno = ndctl_new(&ctx) * (-1);
if (errno) {
ERR("!ndctl_new");
return PMEM2_E_ERRNO;
}
ret = pmem2_region_namespace(ctx, src, NULL, &ndns);
if (ret) {
LOG(1, "getting region and namespace failed");
goto end;
}
struct ndctl_dax *dax = ndctl_namespace_get_dax(ndns);
if (dax)
size = ndctl_dax_get_align(dax);
else
ret = PMEM2_E_INVALID_ALIGNMENT_FORMAT;
end:
ndctl_unref(ctx);
*alignment = size;
LOG(4, "device alignment %zu", *alignment);
return ret;
}
/*
* pmem2_device_dax_size -- checks the size of a given
* dax device from given source structure
*/
int
pmem2_device_dax_size(const struct pmem2_source *src, size_t *size)
{
int ret = 0;
struct ndctl_ctx *ctx;
struct ndctl_namespace *ndns;
errno = ndctl_new(&ctx) * (-1);
if (errno) {
ERR("!ndctl_new");
return PMEM2_E_ERRNO;
}
ret = pmem2_region_namespace(ctx, src, NULL, &ndns);
if (ret) {
LOG(1, "getting region and namespace failed");
goto end;
}
struct ndctl_dax *dax = ndctl_namespace_get_dax(ndns);
if (dax) {
*size = ndctl_dax_get_size(dax);
} else {
ret = PMEM2_E_DAX_REGION_NOT_FOUND;
ERR("Issue while reading Device Dax size - cannot "
"find dax region");
}
end:
ndctl_unref(ctx);
LOG(4, "device size %zu", *size);
return ret;
}
| 1,795 | 18.521739 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/pmem2_utils.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* pmem2_utils.c -- libpmem2 utilities functions
*/
#include <errno.h>
#include "alloc.h"
#include "libpmem2.h"
#include "out.h"
#include "pmem2_utils.h"
#include "util.h"
/*
* pmem2_malloc -- allocate a buffer and handle an error
*/
void *
pmem2_malloc(size_t size, int *err)
{
void *ptr = Malloc(size);
*err = 0;
if (ptr == NULL) {
ERR("!malloc(%zu)", size);
*err = PMEM2_E_ERRNO;
}
return ptr;
}
/*
* pmem2_zalloc -- allocate a buffer, zero it and handle an error
*/
void *
pmem2_zalloc(size_t size, int *err)
{
void *ptr = Zalloc(size);
*err = 0;
if (ptr == NULL) {
ERR("!malloc(%zu)", size);
*err = PMEM2_E_ERRNO;
}
return ptr;
}
/*
* pmem2_realloc -- reallocate a buffer and handle an error
*/
void *
pmem2_realloc(void *ptr, size_t size, int *err)
{
void *newptr = Realloc(ptr, size);
*err = 0;
if (newptr == NULL) {
ERR("!realloc(%zu)", size);
*err = PMEM2_E_ERRNO;
}
return newptr;
}
int
pmem2_err_to_errno(int err)
{
if (err > 0)
FATAL("positive error code is a bug in libpmem2");
if (err == PMEM2_E_NOSUPP)
return ENOTSUP;
if (err <= PMEM2_E_UNKNOWN)
return EINVAL;
return -err;
}
#ifdef _WIN32
/*
* converts windows error codes to pmem2 error
*/
int
pmem2_lasterror_to_err()
{
int err = util_lasterror_to_errno(GetLastError());
if (err == -1)
return PMEM2_E_UNKNOWN;
return -err;
}
#endif
| 1,459 | 14.208333 | 65 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/source.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
#include "source.h"
#include "alloc.h"
#include "libpmem2.h"
#include "out.h"
#include "pmem2.h"
#include "pmem2_utils.h"
int
pmem2_source_from_anon(struct pmem2_source **src, size_t size)
{
int ret;
struct pmem2_source *srcp = pmem2_malloc(sizeof(**src), &ret);
if (ret)
return ret;
srcp->type = PMEM2_SOURCE_ANON;
srcp->value.size = size;
*src = srcp;
return 0;
}
int
pmem2_source_delete(struct pmem2_source **src)
{
Free(*src);
*src = NULL;
return 0;
}
| 560 | 15.5 | 63 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/badblocks.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* badblocks.c -- implementation of common bad blocks API
*/
#include "badblocks.h"
#include "alloc.h"
#include "out.h"
/*
* badblocks_new -- zalloc bad blocks structure
*/
struct badblocks *
badblocks_new(void)
{
LOG(3, " ");
struct badblocks *bbs = Zalloc(sizeof(struct badblocks));
if (bbs == NULL) {
ERR("!Zalloc");
}
return bbs;
}
/*
* badblocks_delete -- free bad blocks structure
*/
void
badblocks_delete(struct badblocks *bbs)
{
LOG(3, "badblocks %p", bbs);
if (bbs == NULL)
return;
Free(bbs->bbv);
Free(bbs);
}
| 635 | 14.142857 | 58 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/region_namespace.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* region_namespace.h -- internal definitions for libpmem2
* common region related functions
*/
#ifndef PMDK_REGION_NAMESPACE_H
#define PMDK_REGION_NAMESPACE_H 1
#include "os.h"
#include "pmem2_utils.h"
#include "source.h"
#ifdef __cplusplus
extern "C" {
#endif
int pmem2_get_region_id(const struct pmem2_source *src, unsigned *region_id);
#ifdef __cplusplus
}
#endif
#endif /* PMDK_REGION_NAMESPACE_H */
| 520 | 18.296296 | 77 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/libpmem2_main.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* libpmem2_main.c -- entry point for libpmem2.dll
*/
void libpmem2_init(void);
void libpmem2_fini(void);
int APIENTRY
DllMain(HINSTANCE hInstance, DWORD dwReason, LPVOID lpReserved)
{
switch (dwReason) {
case DLL_PROCESS_ATTACH:
libpmem2_init();
break;
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
break;
case DLL_PROCESS_DETACH:
libpmem2_fini();
break;
}
return TRUE;
}
| 482 | 15.655172 | 63 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/config.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* config.h -- internal definitions for pmem2_config
*/
#ifndef PMEM2_CONFIG_H
#define PMEM2_CONFIG_H
#include "libpmem2.h"
#define PMEM2_GRANULARITY_INVALID ((enum pmem2_granularity) (-1))
#define PMEM2_ADDRESS_ANY 0 /* default value of the address request type */
struct pmem2_config {
/* offset from the beginning of the file */
size_t offset;
size_t length; /* length of the mapping */
/* persistence granularity requested by user */
void *addr; /* address of the mapping */
int addr_request; /* address request type */
enum pmem2_granularity requested_max_granularity;
enum pmem2_sharing_type sharing; /* the way the file will be mapped */
unsigned protection_flag;
};
void pmem2_config_init(struct pmem2_config *cfg);
int pmem2_config_validate_length(const struct pmem2_config *cfg,
size_t file_len, size_t alignment);
int pmem2_config_validate_addr_alignment(const struct pmem2_config *cfg,
const struct pmem2_source *src);
#endif /* PMEM2_CONFIG_H */
| 1,070 | 28.75 | 75 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/auto_flush.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* auto_flush.h -- auto flush detection functionality
*/
#ifndef PMEM2_AUTO_FLUSH_H
#define PMEM2_AUTO_FLUSH_H 1
#ifdef __cplusplus
extern "C" {
#endif
int pmem2_auto_flush(void);
#ifdef __cplusplus
}
#endif
#endif
| 311 | 13.181818 | 53 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/map.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* map.h -- internal definitions for libpmem2
*/
#ifndef PMEM2_MAP_H
#define PMEM2_MAP_H
#include <stddef.h>
#include <stdbool.h>
#include "libpmem2.h"
#include "os.h"
#include "source.h"
#ifdef _WIN32
#include <windows.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef int (*pmem2_deep_flush_fn)(struct pmem2_map *map,
void *ptr, size_t size);
struct pmem2_map {
void *addr; /* base address */
size_t reserved_length; /* length of the mapping reservation */
size_t content_length; /* length of the mapped content */
/* effective persistence granularity */
enum pmem2_granularity effective_granularity;
pmem2_persist_fn persist_fn;
pmem2_flush_fn flush_fn;
pmem2_drain_fn drain_fn;
pmem2_deep_flush_fn deep_flush_fn;
pmem2_memmove_fn memmove_fn;
pmem2_memcpy_fn memcpy_fn;
pmem2_memset_fn memset_fn;
struct pmem2_source source;
};
enum pmem2_granularity get_min_granularity(bool eADR, bool is_pmem,
enum pmem2_sharing_type sharing);
struct pmem2_map *pmem2_map_find(const void *addr, size_t len);
int pmem2_register_mapping(struct pmem2_map *map);
int pmem2_unregister_mapping(struct pmem2_map *map);
void pmem2_map_init(void);
void pmem2_map_fini(void);
int pmem2_validate_offset(const struct pmem2_config *cfg,
size_t *offset, size_t alignment);
#ifdef __cplusplus
}
#endif
#endif /* map.h */
| 1,426 | 22.016129 | 67 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/deep_flush.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* deep_flush.h -- functions for deep flush functionality
*/
#ifndef PMEM2_DEEP_FLUSH_H
#define PMEM2_DEEP_FLUSH_H 1
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
int pmem2_deep_flush_write(unsigned region_id);
int pmem2_deep_flush_dax(struct pmem2_map *map, void *ptr, size_t size);
int pmem2_deep_flush_page(struct pmem2_map *map, void *ptr, size_t size);
int pmem2_deep_flush_cache(struct pmem2_map *map, void *ptr, size_t size);
int pmem2_deep_flush_byte(struct pmem2_map *map, void *ptr, size_t size);
#ifdef __cplusplus
}
#endif
#endif
| 644 | 22.035714 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/persist.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* persist.c -- pmem2_get_[persist|flush|drain]_fn
*/
#include <errno.h>
#include <stdlib.h>
#include "libpmem2.h"
#include "map.h"
#include "out.h"
#include "os.h"
#include "persist.h"
#include "deep_flush.h"
#include "pmem2_arch.h"
#include "pmem2_utils.h"
#include "valgrind_internal.h"
static struct pmem2_arch_info Info;
/*
* memmove_nodrain_libc -- (internal) memmove to pmem using libc
*/
static void *
memmove_nodrain_libc(void *pmemdest, const void *src, size_t len,
unsigned flags, flush_func flush)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
LOG(15, "pmemdest %p src %p len %zu flags 0x%x", pmemdest, src, len,
flags);
memmove(pmemdest, src, len);
if (!(flags & PMEM2_F_MEM_NOFLUSH))
flush(pmemdest, len);
return pmemdest;
}
/*
* memset_nodrain_libc -- (internal) memset to pmem using libc
*/
static void *
memset_nodrain_libc(void *pmemdest, int c, size_t len, unsigned flags,
flush_func flush)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", pmemdest, c, len,
flags);
memset(pmemdest, c, len);
if (!(flags & PMEM2_F_MEM_NOFLUSH))
flush(pmemdest, len);
return pmemdest;
}
/*
* pmem2_persist_init -- initialize persist module
*/
void
pmem2_persist_init(void)
{
Info.memmove_nodrain = NULL;
Info.memset_nodrain = NULL;
Info.memmove_nodrain_eadr = NULL;
Info.memset_nodrain_eadr = NULL;
Info.flush = NULL;
Info.fence = NULL;
Info.flush_has_builtin_fence = 0;
pmem2_arch_init(&Info);
char *ptr = os_getenv("PMEM_NO_GENERIC_MEMCPY");
long long no_generic = 0;
if (ptr)
no_generic = atoll(ptr);
if (Info.memmove_nodrain == NULL) {
if (no_generic) {
Info.memmove_nodrain = memmove_nodrain_libc;
Info.memmove_nodrain_eadr = memmove_nodrain_libc;
LOG(3, "using libc memmove");
} else {
Info.memmove_nodrain = memmove_nodrain_generic;
Info.memmove_nodrain_eadr = memmove_nodrain_generic;
LOG(3, "using generic memmove");
}
}
if (Info.memset_nodrain == NULL) {
if (no_generic) {
Info.memset_nodrain = memset_nodrain_libc;
Info.memset_nodrain_eadr = memset_nodrain_libc;
LOG(3, "using libc memset");
} else {
Info.memset_nodrain = memset_nodrain_generic;
Info.memset_nodrain_eadr = memset_nodrain_generic;
LOG(3, "using generic memset");
}
}
}
/*
* pmem2_drain -- wait for any PM stores to drain from HW buffers
*/
static void
pmem2_drain(void)
{
LOG(15, NULL);
Info.fence();
}
/*
* pmem2_log_flush -- log the flush attempt for the given range
*/
static inline void
pmem2_log_flush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len);
}
/*
* pmem2_flush_nop -- NOP version of the flush routine, used in cases where
* memory behind the mapping is already in persistence domain
*/
static void
pmem2_flush_nop(const void *addr, size_t len)
{
pmem2_log_flush(addr, len);
/* nothing more to do, other than telling pmemcheck about it */
VALGRIND_DO_FLUSH(addr, len);
}
/*
* pmem2_flush_cpu_cache -- flush processor cache for the given range
*/
static void
pmem2_flush_cpu_cache(const void *addr, size_t len)
{
pmem2_log_flush(addr, len);
Info.flush(addr, len);
}
/*
* pmem2_persist_noflush -- make all changes to a range of pmem persistent
*/
static void
pmem2_persist_noflush(const void *addr, size_t len)
{
pmem2_flush_nop(addr, len);
pmem2_drain();
}
/*
* pmem2_persist_cpu_cache -- make all changes to a range of pmem persistent
*/
static void
pmem2_persist_cpu_cache(const void *addr, size_t len)
{
pmem2_flush_cpu_cache(addr, len);
pmem2_drain();
}
/*
* pmem2_flush_file_buffers -- flush CPU and OS caches for the given range
*/
static int
pmem2_flush_file_buffers(const void *addr, size_t len, int autorestart)
{
int olderrno = errno;
pmem2_log_flush(addr, len);
/*
* Flushing using OS-provided mechanisms requires that the address
* be a multiple of the page size.
* Align address down and change len so that [addr, addr + len) still
* contains the initial range.
*/
/* round address down to page boundary */
uintptr_t new_addr = ALIGN_DOWN((uintptr_t)addr, Pagesize);
/* increase len by the amount we gain when we round addr down */
len += (uintptr_t)addr - new_addr;
addr = (const void *)new_addr;
int ret = 0;
/*
* Find all the mappings overlapping with the [addr, addr + len) range
* and flush them, one by one.
*/
do {
struct pmem2_map *map = pmem2_map_find(addr, len);
if (!map)
break;
size_t flush;
size_t remaining = map->reserved_length;
if (map->addr < addr) {
/*
* Addr is inside of the mapping, so we have to decrease
* the remaining length by an offset from the start
* of our mapping.
*/
remaining -= (uintptr_t)addr - (uintptr_t)map->addr;
} else if (map->addr == addr) {
/* perfect match, there's nothing to do in this case */
} else {
/*
* map->addr > addr, so we have to skip the hole
* between addr and map->addr.
*/
len -= (uintptr_t)map->addr - (uintptr_t)addr;
addr = map->addr;
}
if (len > remaining)
flush = remaining;
else
flush = len;
int ret1 = pmem2_flush_file_buffers_os(map, addr, flush,
autorestart);
if (ret1 != 0)
ret = ret1;
addr = ((const char *)addr) + flush;
len -= flush;
} while (len > 0);
errno = olderrno;
return ret;
}
/*
* pmem2_persist_pages -- flush processor cache for the given range
*/
static void
pmem2_persist_pages(const void *addr, size_t len)
{
/*
* Restarting on EINTR in general is a bad idea, but we don't have
* any way to communicate the failure outside.
*/
const int autorestart = 1;
int ret = pmem2_flush_file_buffers(addr, len, autorestart);
if (ret) {
/*
* 1) There's no way to propagate this error. Silently ignoring
* it would lead to data corruption.
* 2) non-pmem code path shouldn't be used in production.
*
* The only sane thing to do is to crash the application. Sorry.
*/
abort();
}
}
/*
* pmem2_drain_nop -- variant of pmem2_drain for page granularity;
* it is a NOP because the flush part has built-in drain
*/
static void
pmem2_drain_nop(void)
{
LOG(15, NULL);
}
/*
* pmem2_deep_flush_page -- do nothing - pmem2_persist_fn already did msync
*/
int
pmem2_deep_flush_page(struct pmem2_map *map, void *ptr, size_t size)
{
LOG(3, "map %p ptr %p size %zu", map, ptr, size);
return 0;
}
/*
* pmem2_deep_flush_cache -- flush buffers for fsdax or write
* to deep_flush for DevDax
*/
int
pmem2_deep_flush_cache(struct pmem2_map *map, void *ptr, size_t size)
{
LOG(3, "map %p ptr %p size %zu", map, ptr, size);
enum pmem2_file_type type = map->source.value.ftype;
/*
* XXX: this should be moved to pmem2_deep_flush_dax
* while refactoring abstraction
*/
if (type == PMEM2_FTYPE_DEVDAX)
pmem2_persist_cpu_cache(ptr, size);
int ret = pmem2_deep_flush_dax(map, ptr, size);
if (ret < 0) {
LOG(1, "cannot perform deep flush cache for map %p", map);
return ret;
}
return 0;
}
/*
* pmem2_deep_flush_byte -- flush cpu cache and perform deep flush for dax
*/
int
pmem2_deep_flush_byte(struct pmem2_map *map, void *ptr, size_t size)
{
LOG(3, "map %p ptr %p size %zu", map, ptr, size);
if (map->source.type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not support deep flush");
return PMEM2_E_NOSUPP;
}
ASSERT(map->source.type == PMEM2_SOURCE_FD ||
map->source.type == PMEM2_SOURCE_HANDLE);
enum pmem2_file_type type = map->source.value.ftype;
/*
* XXX: this should be moved to pmem2_deep_flush_dax
* while refactoring abstraction
*/
if (type == PMEM2_FTYPE_DEVDAX)
pmem2_persist_cpu_cache(ptr, size);
int ret = pmem2_deep_flush_dax(map, ptr, size);
if (ret < 0) {
LOG(1, "cannot perform deep flush byte for map %p", map);
return ret;
}
return 0;
}
/*
* pmem2_set_flush_fns -- set function pointers related to flushing
*/
void
pmem2_set_flush_fns(struct pmem2_map *map)
{
switch (map->effective_granularity) {
case PMEM2_GRANULARITY_PAGE:
map->persist_fn = pmem2_persist_pages;
map->flush_fn = pmem2_persist_pages;
map->drain_fn = pmem2_drain_nop;
map->deep_flush_fn = pmem2_deep_flush_page;
break;
case PMEM2_GRANULARITY_CACHE_LINE:
map->persist_fn = pmem2_persist_cpu_cache;
map->flush_fn = pmem2_flush_cpu_cache;
map->drain_fn = pmem2_drain;
map->deep_flush_fn = pmem2_deep_flush_cache;
break;
case PMEM2_GRANULARITY_BYTE:
map->persist_fn = pmem2_persist_noflush;
map->flush_fn = pmem2_flush_nop;
map->drain_fn = pmem2_drain;
map->deep_flush_fn = pmem2_deep_flush_byte;
break;
default:
abort();
}
}
/*
* pmem2_get_persist_fn - return a pointer to a function responsible for
* persisting data in range owned by pmem2_map
*/
pmem2_persist_fn
pmem2_get_persist_fn(struct pmem2_map *map)
{
return map->persist_fn;
}
/*
* pmem2_get_flush_fn - return a pointer to a function responsible for
* flushing data in range owned by pmem2_map
*/
pmem2_flush_fn
pmem2_get_flush_fn(struct pmem2_map *map)
{
return map->flush_fn;
}
/*
* pmem2_get_drain_fn - return a pointer to a function responsible for
* draining flushes in range owned by pmem2_map
*/
pmem2_drain_fn
pmem2_get_drain_fn(struct pmem2_map *map)
{
return map->drain_fn;
}
/*
* pmem2_memmove_nonpmem -- mem[move|cpy] followed by an msync
*/
static void *
pmem2_memmove_nonpmem(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memmove");
Info.memmove_nodrain(pmemdest, src, len, flags & ~PMEM2_F_MEM_NODRAIN,
Info.flush);
pmem2_persist_pages(pmemdest, len);
PMEM2_API_END("pmem2_memmove");
return pmemdest;
}
/*
* pmem2_memset_nonpmem -- memset followed by an msync
*/
static void *
pmem2_memset_nonpmem(void *pmemdest, int c, size_t len, unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memset");
Info.memset_nodrain(pmemdest, c, len, flags & ~PMEM2_F_MEM_NODRAIN,
Info.flush);
pmem2_persist_pages(pmemdest, len);
PMEM2_API_END("pmem2_memset");
return pmemdest;
}
/*
* pmem2_memmove -- mem[move|cpy] to pmem
*/
static void *
pmem2_memmove(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memmove");
Info.memmove_nodrain(pmemdest, src, len, flags, Info.flush);
if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0)
pmem2_drain();
PMEM2_API_END("pmem2_memmove");
return pmemdest;
}
/*
* pmem2_memset -- memset to pmem
*/
static void *
pmem2_memset(void *pmemdest, int c, size_t len, unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memset");
Info.memset_nodrain(pmemdest, c, len, flags, Info.flush);
if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0)
pmem2_drain();
PMEM2_API_END("pmem2_memset");
return pmemdest;
}
/*
* pmem2_memmove_eadr -- mem[move|cpy] to pmem, platform supports eADR
*/
static void *
pmem2_memmove_eadr(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memmove");
Info.memmove_nodrain_eadr(pmemdest, src, len, flags, Info.flush);
if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0)
pmem2_drain();
PMEM2_API_END("pmem2_memmove");
return pmemdest;
}
/*
* pmem2_memset_eadr -- memset to pmem, platform supports eADR
*/
static void *
pmem2_memset_eadr(void *pmemdest, int c, size_t len, unsigned flags)
{
#ifdef DEBUG
if (flags & ~PMEM2_F_MEM_VALID_FLAGS)
ERR("invalid flags 0x%x", flags);
#endif
PMEM2_API_START("pmem2_memset");
Info.memset_nodrain_eadr(pmemdest, c, len, flags, Info.flush);
if ((flags & (PMEM2_F_MEM_NODRAIN | PMEM2_F_MEM_NOFLUSH)) == 0)
pmem2_drain();
PMEM2_API_END("pmem2_memset");
return pmemdest;
}
/*
* pmem2_set_mem_fns -- set function pointers related to mem[move|cpy|set]
*/
void
pmem2_set_mem_fns(struct pmem2_map *map)
{
switch (map->effective_granularity) {
case PMEM2_GRANULARITY_PAGE:
map->memmove_fn = pmem2_memmove_nonpmem;
map->memcpy_fn = pmem2_memmove_nonpmem;
map->memset_fn = pmem2_memset_nonpmem;
break;
case PMEM2_GRANULARITY_CACHE_LINE:
map->memmove_fn = pmem2_memmove;
map->memcpy_fn = pmem2_memmove;
map->memset_fn = pmem2_memset;
break;
case PMEM2_GRANULARITY_BYTE:
map->memmove_fn = pmem2_memmove_eadr;
map->memcpy_fn = pmem2_memmove_eadr;
map->memset_fn = pmem2_memset_eadr;
break;
default:
abort();
}
}
/*
* pmem2_get_memmove_fn - return a pointer to a function
*/
pmem2_memmove_fn
pmem2_get_memmove_fn(struct pmem2_map *map)
{
return map->memmove_fn;
}
/*
* pmem2_get_memcpy_fn - return a pointer to a function
*/
pmem2_memcpy_fn
pmem2_get_memcpy_fn(struct pmem2_map *map)
{
return map->memcpy_fn;
}
/*
* pmem2_get_memset_fn - return a pointer to a function
*/
pmem2_memset_fn
pmem2_get_memset_fn(struct pmem2_map *map)
{
return map->memset_fn;
}
#if VG_PMEMCHECK_ENABLED
/*
* pmem2_emit_log -- logs library and function names to pmemcheck store log
*/
void
pmem2_emit_log(const char *func, int order)
{
util_emit_log("libpmem2", func, order);
}
#endif
| 13,665 | 21.58843 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/region_namespace_none.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
#include "region_namespace.h"
#include "out.h"
/*
* pmem2_get_region_id -- define behavior without ndctl
*/
int
pmem2_get_region_id(const struct pmem2_source *src, unsigned *region_id)
{
LOG(3, "Cannot read region id - ndctl is not available");
return 0;
}
| 345 | 19.352941 | 72 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/persist_posix.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* persist_posix.c -- POSIX-specific part of persist implementation
*/
#include <errno.h>
#include <stdint.h>
#include <sys/mman.h>
#include "out.h"
#include "persist.h"
#include "pmem2_utils.h"
#include "valgrind_internal.h"
/*
* pmem2_flush_file_buffers_os -- flush CPU and OS file caches for the given
* range
*/
int
pmem2_flush_file_buffers_os(struct pmem2_map *map, const void *addr, size_t len,
int autorestart)
{
/*
* msync accepts addresses aligned to the page boundary, so we may sync
* more and part of it may have been marked as undefined/inaccessible.
* Msyncing such memory is not a bug, so as a workaround temporarily
* disable error reporting.
*/
VALGRIND_DO_DISABLE_ERROR_REPORTING;
int ret;
do {
ret = msync((void *)addr, len, MS_SYNC);
if (ret < 0) {
ERR("!msync");
} else {
/* full flush */
VALGRIND_DO_PERSIST((uintptr_t)addr, len);
}
} while (autorestart && ret < 0 && errno == EINTR);
VALGRIND_DO_ENABLE_ERROR_REPORTING;
if (ret)
return PMEM2_E_ERRNO;
return 0;
}
| 1,126 | 21.098039 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/pmem2_utils_linux.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include "libpmem2.h"
#include "out.h"
#include "pmem2_utils.h"
#include "region_namespace.h"
#include "source.h"
/*
* pmem2_get_type_from_stat -- determine type of file based on output of stat
* syscall
*/
int
pmem2_get_type_from_stat(const os_stat_t *st, enum pmem2_file_type *type)
{
if (S_ISREG(st->st_mode)) {
*type = PMEM2_FTYPE_REG;
return 0;
}
if (S_ISDIR(st->st_mode)) {
*type = PMEM2_FTYPE_DIR;
return 0;
}
if (!S_ISCHR(st->st_mode)) {
ERR("file type 0%o not supported", st->st_mode & S_IFMT);
return PMEM2_E_INVALID_FILE_TYPE;
}
char spath[PATH_MAX];
int ret = util_snprintf(spath, PATH_MAX,
"/sys/dev/char/%u:%u/subsystem",
os_major(st->st_rdev), os_minor(st->st_rdev));
if (ret < 0) {
/* impossible */
ERR("!snprintf");
ASSERTinfo(0, "snprintf failed");
return PMEM2_E_ERRNO;
}
LOG(4, "device subsystem path \"%s\"", spath);
char npath[PATH_MAX];
char *rpath = realpath(spath, npath);
if (rpath == NULL) {
ERR("!realpath \"%s\"", spath);
return PMEM2_E_ERRNO;
}
char *basename = strrchr(rpath, '/');
if (!basename || strcmp("dax", basename + 1) != 0) {
LOG(3, "%s path does not match device dax prefix path", rpath);
return PMEM2_E_INVALID_FILE_TYPE;
}
*type = PMEM2_FTYPE_DEVDAX;
return 0;
}
| 1,507 | 20.239437 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/source_windows.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* source_windows.c -- windows specific pmem2_source implementation
*/
#include <Windows.h>
#include "config.h"
#include "libpmem2.h"
#include "config.h"
#include "out.h"
#include "pmem2_utils.h"
#include "source.h"
#include "util.h"
/*
* pmem2_source_from_fd -- create a new data source instance
*/
int
pmem2_source_from_fd(struct pmem2_source **src, int fd)
{
*src = NULL;
if (fd < 0)
return PMEM2_E_INVALID_FILE_HANDLE;
HANDLE handle = (HANDLE)_get_osfhandle(fd);
if (handle == INVALID_HANDLE_VALUE) {
/*
* _get_osfhandle aborts in an error case, so technically
* this is dead code. But according to MSDN it is
* setting an errno on failure, so we can return it in case of
* "windows magic" happen and this function "accidentally"
* will not abort.
*/
ERR("!_get_osfhandle");
if (errno == EBADF)
return PMEM2_E_INVALID_FILE_HANDLE;
return PMEM2_E_ERRNO;
}
return pmem2_source_from_handle(src, handle);
}
/*
* pmem2_win_stat -- retrieve information about handle
*/
static int
pmem2_win_stat(HANDLE handle, BY_HANDLE_FILE_INFORMATION *info)
{
if (!GetFileInformationByHandle(handle, info)) {
ERR("!!GetFileInformationByHandle");
if (GetLastError() == ERROR_INVALID_HANDLE)
return PMEM2_E_INVALID_FILE_HANDLE;
else
return pmem2_lasterror_to_err();
}
if (info->dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
ERR(
"using directory doesn't make any sense in context of pmem2");
return PMEM2_E_INVALID_FILE_TYPE;
}
return 0;
}
/*
* pmem2_source_from_fd -- create a new data source instance
*/
int
pmem2_source_from_handle(struct pmem2_source **src, HANDLE handle)
{
*src = NULL;
int ret;
if (handle == INVALID_HANDLE_VALUE)
return PMEM2_E_INVALID_FILE_HANDLE;
BY_HANDLE_FILE_INFORMATION file_info;
ret = pmem2_win_stat(handle, &file_info);
if (ret)
return ret;
/* XXX: winapi doesn't provide option to get open flags from HANDLE */
struct pmem2_source *srcp = pmem2_malloc(sizeof(**src), &ret);
if (ret)
return ret;
ASSERTne(srcp, NULL);
srcp->type = PMEM2_SOURCE_HANDLE;
srcp->value.handle = handle;
*src = srcp;
return 0;
}
/*
* pmem2_source_size -- get a size of the file handle stored in the provided
* source
*/
int
pmem2_source_size(const struct pmem2_source *src, size_t *size)
{
LOG(3, "type %d", src->type);
int ret;
if (src->type == PMEM2_SOURCE_ANON) {
*size = src->value.size;
return 0;
}
ASSERTeq(src->type, PMEM2_SOURCE_HANDLE);
BY_HANDLE_FILE_INFORMATION info;
ret = pmem2_win_stat(src->value.handle, &info);
if (ret)
return ret;
*size = ((size_t)info.nFileSizeHigh << 32) | info.nFileSizeLow;
LOG(4, "file length %zu", *size);
return 0;
}
/*
* pmem2_source_alignment -- get alignment from the system info
*/
int
pmem2_source_alignment(const struct pmem2_source *src, size_t *alignment)
{
LOG(3, "type %d", src->type);
SYSTEM_INFO info;
GetSystemInfo(&info);
*alignment = (size_t)info.dwAllocationGranularity;
if (!util_is_pow2(*alignment)) {
ERR("alignment (%zu) has to be a power of two", *alignment);
return PMEM2_E_INVALID_ALIGNMENT_VALUE;
}
LOG(4, "alignment %zu", *alignment);
return 0;
}
| 3,248 | 20.235294 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/errormsg.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* errormsg.c -- pmem2_errormsg* implementation
*/
#include "libpmem2.h"
#include "out.h"
/*
* pmem2_errormsgU -- return last error message
*/
#ifndef _WIN32
static inline
#endif
const char *
pmem2_errormsgU(void)
{
return out_get_errormsg();
}
#ifndef _WIN32
/*
* pmem2_errormsg -- return last error message
*/
const char *
pmem2_errormsg(void)
{
return pmem2_errormsgU();
}
#else
/*
* pmem2_errormsgW -- return last error message as wchar_t
*/
const wchar_t *
pmem2_errormsgW(void)
{
return out_get_errormsgW();
}
#endif
/*
* pmem2_perrorU -- prints a descriptive error message to the stderr
*/
#ifndef _WIN32
static inline void
pmem2_perrorU(const char *format, va_list args)
{
vfprintf(stderr, format, args);
fprintf(stderr, ": %s\n", pmem2_errormsg());
}
#else
void
pmem2_perrorU(const char *format, ...)
{
va_list args;
va_start(args, format);
vfprintf(stderr, format, args);
fprintf(stderr, ": %s\n", pmem2_errormsg());
va_end(args);
}
#endif
#ifndef _WIN32
/*
* pmem2_perror -- prints a descriptive error message to the stderr
*/
void
pmem2_perror(const char *format, ...)
{
va_list args;
va_start(args, format);
pmem2_perrorU(format, args);
va_end(args);
}
#else
/*
* pmem2_perrorW -- prints a descriptive error message to the stderr
*/
void
pmem2_perrorW(const wchar_t *format, ...)
{
va_list args;
va_start(args, format);
vfwprintf(stderr, format, args);
fwprintf(stderr, L": %s\n", pmem2_errormsgW());
va_end(args);
}
#endif
| 1,570 | 15.195876 | 68 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/pmem2_utils_none.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
#include <errno.h>
#include "libpmem2.h"
#include "out.h"
#include "pmem2_utils.h"
#include "source.h"
/*
* pmem2_device_dax_alignment -- checks the alignment of a given
* dax device from given source
*/
int
pmem2_device_dax_alignment(const struct pmem2_source *src, size_t *alignment)
{
ERR("Cannot read Device Dax alignment - ndctl is not available");
return PMEM2_E_NOSUPP;
}
/*
* pmem2_device_dax_size -- checks the size of a given dax device from
* given source
*/
int
pmem2_device_dax_size(const struct pmem2_source *src, size_t *size)
{
ERR("Cannot read Device Dax size - ndctl is not available");
return PMEM2_E_NOSUPP;
}
| 727 | 20.411765 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/auto_flush_linux.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* auto_flush_linux.c -- Linux auto flush detection
*/
#define _GNU_SOURCE
#include <inttypes.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <string.h>
#include <errno.h>
#include "out.h"
#include "os.h"
#include "fs.h"
#include "auto_flush.h"
#define BUS_DEVICE_PATH "/sys/bus/nd/devices"
#define PERSISTENCE_DOMAIN "persistence_domain"
#define DOMAIN_VALUE_LEN 32
/*
* check_cpu_cache -- (internal) check if file contains "cpu_cache" entry
*/
static int
check_cpu_cache(const char *domain_path)
{
LOG(3, "domain_path: %s", domain_path);
char domain_value[DOMAIN_VALUE_LEN];
int domain_fd;
int cpu_cache = 0;
if ((domain_fd = os_open(domain_path, O_RDONLY)) < 0) {
LOG(1, "!open(\"%s\", O_RDONLY)", domain_path);
goto end;
}
ssize_t len = read(domain_fd, domain_value,
DOMAIN_VALUE_LEN);
if (len < 0) {
ERR("!read(%d, %p, %d)", domain_fd,
domain_value, DOMAIN_VALUE_LEN);
cpu_cache = -1;
goto end;
} else if (len == 0) {
errno = EIO;
ERR("read(%d, %p, %d) empty string",
domain_fd, domain_value,
DOMAIN_VALUE_LEN);
cpu_cache = -1;
goto end;
} else if (domain_value[len - 1] != '\n') {
ERR("!read(%d, %p, %d) invalid format",
domain_fd, domain_value,
DOMAIN_VALUE_LEN);
cpu_cache = -1;
goto end;
}
domain_value[len - 1] = '\0';
LOG(15, "detected persistent_domain: %s", domain_value);
if (strcmp(domain_value, "cpu_cache") == 0) {
LOG(15, "cpu_cache in persistent_domain: %s", domain_path);
cpu_cache = 1;
} else {
LOG(15, "cpu_cache not in persistent_domain: %s", domain_path);
cpu_cache = 0;
}
end:
if (domain_fd >= 0)
os_close(domain_fd);
return cpu_cache;
}
/*
* check_domain_in_region -- (internal) check if region
* contains persistence_domain file
*/
static int
check_domain_in_region(const char *region_path)
{
LOG(3, "region_path: %s", region_path);
struct fs *reg = NULL;
struct fs_entry *reg_entry;
char domain_path[PATH_MAX];
int cpu_cache = 0;
reg = fs_new(region_path);
if (reg == NULL) {
ERR("!fs_new: \"%s\"", region_path);
cpu_cache = -1;
goto end;
}
while ((reg_entry = fs_read(reg)) != NULL) {
/*
* persistence_domain has to be a file type entry
* and it has to be first level child for region;
* there is no need to run into deeper levels
*/
if (reg_entry->type != FS_ENTRY_FILE ||
strcmp(reg_entry->name,
PERSISTENCE_DOMAIN) != 0 ||
reg_entry->level != 1)
continue;
int ret = util_snprintf(domain_path, PATH_MAX,
"%s/"PERSISTENCE_DOMAIN, region_path);
if (ret < 0) {
ERR("!snprintf");
cpu_cache = -1;
goto end;
}
cpu_cache = check_cpu_cache(domain_path);
}
end:
if (reg)
fs_delete(reg);
return cpu_cache;
}
/*
* pmem2_auto_flush -- check if platform supports auto flush for all regions
*
* Traverse "/sys/bus/nd/devices" path to find all the nvdimm regions,
* then for each region checks if "persistence_domain" file exists and
* contains "cpu_cache" string.
* If for any region "persistence_domain" entry does not exists, or its
* context is not as expected, assume eADR is not available on this platform.
*/
int
pmem2_auto_flush(void)
{
LOG(15, NULL);
char *device_path;
int cpu_cache = 0;
device_path = BUS_DEVICE_PATH;
os_stat_t sdev;
if (os_stat(device_path, &sdev) != 0 ||
S_ISDIR(sdev.st_mode) == 0) {
LOG(3, "eADR not supported");
return cpu_cache;
}
struct fs *dev = fs_new(device_path);
if (dev == NULL) {
ERR("!fs_new: \"%s\"", device_path);
return -1;
}
struct fs_entry *dev_entry;
while ((dev_entry = fs_read(dev)) != NULL) {
/*
* Skip if not a symlink, because we expect that
* region on sysfs path is a symlink.
* Skip if depth is different than 1, because region
* we are interested in should be the first level
* child for device.
*/
if ((dev_entry->type != FS_ENTRY_SYMLINK) ||
!strstr(dev_entry->name, "region") ||
dev_entry->level != 1)
continue;
LOG(15, "Start traversing region: %s", dev_entry->path);
cpu_cache = check_domain_in_region(dev_entry->path);
if (cpu_cache != 1)
goto end;
}
end:
fs_delete(dev);
return cpu_cache;
}
| 4,214 | 21.783784 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/config.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* config.c -- pmem2_config implementation
*/
#include <unistd.h>
#include "alloc.h"
#include "config.h"
#include "libpmem2.h"
#include "out.h"
#include "pmem2.h"
#include "pmem2_utils.h"
/*
* pmem2_config_init -- initialize cfg structure.
*/
void
pmem2_config_init(struct pmem2_config *cfg)
{
cfg->offset = 0;
cfg->length = 0;
cfg->addr = NULL;
cfg->addr_request = PMEM2_ADDRESS_ANY;
cfg->requested_max_granularity = PMEM2_GRANULARITY_INVALID;
cfg->sharing = PMEM2_SHARED;
cfg->protection_flag = PMEM2_PROT_READ | PMEM2_PROT_WRITE;
}
/*
* pmem2_config_new -- allocates and initialize cfg structure.
*/
int
pmem2_config_new(struct pmem2_config **cfg)
{
int ret;
*cfg = pmem2_malloc(sizeof(**cfg), &ret);
if (ret)
return ret;
ASSERTne(cfg, NULL);
pmem2_config_init(*cfg);
return 0;
}
/*
* pmem2_config_delete -- deallocate cfg structure.
*/
int
pmem2_config_delete(struct pmem2_config **cfg)
{
Free(*cfg);
*cfg = NULL;
return 0;
}
/*
* pmem2_config_set_required_store_granularity -- set granularity
* requested by user in the pmem2_config structure
*/
int
pmem2_config_set_required_store_granularity(struct pmem2_config *cfg,
enum pmem2_granularity g)
{
switch (g) {
case PMEM2_GRANULARITY_BYTE:
case PMEM2_GRANULARITY_CACHE_LINE:
case PMEM2_GRANULARITY_PAGE:
break;
default:
ERR("unknown granularity value %d", g);
return PMEM2_E_GRANULARITY_NOT_SUPPORTED;
}
cfg->requested_max_granularity = g;
return 0;
}
/*
* pmem2_config_set_offset -- set offset in the pmem2_config structure
*/
int
pmem2_config_set_offset(struct pmem2_config *cfg, size_t offset)
{
/* mmap func takes offset as a type of off_t */
if (offset > (size_t)INT64_MAX) {
ERR("offset is greater than INT64_MAX");
return PMEM2_E_OFFSET_OUT_OF_RANGE;
}
cfg->offset = offset;
return 0;
}
/*
* pmem2_config_set_length -- set length in the pmem2_config structure
*/
int
pmem2_config_set_length(struct pmem2_config *cfg, size_t length)
{
cfg->length = length;
return 0;
}
/*
* pmem2_config_validate_length -- validate that length in the pmem2_config
* structure is consistent with the file length
*/
int
pmem2_config_validate_length(const struct pmem2_config *cfg,
size_t file_len, size_t alignment)
{
ASSERTne(alignment, 0);
if (file_len == 0) {
ERR("file length is equal 0");
return PMEM2_E_SOURCE_EMPTY;
}
if (cfg->length % alignment) {
ERR("length is not a multiple of %lu", alignment);
return PMEM2_E_LENGTH_UNALIGNED;
}
/* overflow check */
const size_t end = cfg->offset + cfg->length;
if (end < cfg->offset) {
ERR("overflow of offset and length");
return PMEM2_E_MAP_RANGE;
}
/* let's align the file size */
size_t aligned_file_len = file_len;
if (file_len % alignment)
aligned_file_len = ALIGN_UP(file_len, alignment);
/* validate mapping fit into the file */
if (end > aligned_file_len) {
ERR("mapping larger than file size");
return PMEM2_E_MAP_RANGE;
}
return 0;
}
/*
* pmem2_config_set_sharing -- set the way pmem2_map will map the file
*/
int
pmem2_config_set_sharing(struct pmem2_config *cfg, enum pmem2_sharing_type type)
{
switch (type) {
case PMEM2_SHARED:
case PMEM2_PRIVATE:
cfg->sharing = type;
break;
default:
ERR("unknown sharing value %d", type);
return PMEM2_E_INVALID_SHARING_VALUE;
}
return 0;
}
/*
* pmem2_config_validate_addr_alignment -- validate that addr in the
* pmem2_config structure is a multiple of the alignment required for
* specific cfg
*/
int
pmem2_config_validate_addr_alignment(const struct pmem2_config *cfg,
const struct pmem2_source *src)
{
/* cannot NULL % alignment, NULL is valid */
if (!cfg->addr)
return 0;
size_t alignment;
int ret = pmem2_source_alignment(src, &alignment);
if (ret)
return ret;
ASSERTne(alignment, 0);
if ((size_t)cfg->addr % alignment) {
ERR("address %p is not a multiple of %lu", cfg->addr,
alignment);
return PMEM2_E_ADDRESS_UNALIGNED;
}
return 0;
}
/*
* pmem2_config_set_address -- set addr and addr_request in the config
* struct
*/
int
pmem2_config_set_address(struct pmem2_config *cfg, void *addr,
enum pmem2_address_request_type request_type)
{
if (request_type != PMEM2_ADDRESS_FIXED_NOREPLACE) {
ERR("invalid address request_type 0x%x", request_type);
return PMEM2_E_INVALID_ADDRESS_REQUEST_TYPE;
}
if (request_type == PMEM2_ADDRESS_FIXED_NOREPLACE && !addr) {
ERR(
"cannot use address request type PMEM2_ADDRESS_FIXED_NOREPLACE with addr being NULL");
return PMEM2_E_ADDRESS_NULL;
}
cfg->addr = addr;
cfg->addr_request = (int)request_type;
return 0;
}
/*
* pmem2_config_set_vm_reservation -- set vm_reservation in the
* pmem2_config structure
*/
int
pmem2_config_set_vm_reservation(struct pmem2_config *cfg,
struct pmem2_vm_reservation *rsv, size_t offset)
{
return PMEM2_E_NOSUPP;
}
/*
* pmem2_config_clear_address -- reset addr and addr_request in the config
* to the default values
*/
void
pmem2_config_clear_address(struct pmem2_config *cfg)
{
cfg->addr = NULL;
cfg->addr_request = PMEM2_ADDRESS_ANY;
}
/*
* pmem2_config_set_protection -- set protection flags
* in the config struct
*/
int
pmem2_config_set_protection(struct pmem2_config *cfg,
unsigned prot)
{
unsigned unknown_prot = prot & ~(PMEM2_PROT_READ | PMEM2_PROT_WRITE |
PMEM2_PROT_EXEC | PMEM2_PROT_NONE);
if (unknown_prot) {
ERR("invalid flag %u", prot);
return PMEM2_E_INVALID_PROT_FLAG;
}
cfg->protection_flag = prot;
return 0;
}
| 5,603 | 20.227273 | 89 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/ravl_interval.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ravl_interval.h -- internal definitions for ravl_interval
*/
#ifndef RAVL_INTERVAL_H
#define RAVL_INTERVAL_H
#include "libpmem2.h"
#include "os_thread.h"
#include "ravl.h"
struct ravl_interval;
struct ravl_interval_node;
typedef size_t ravl_interval_min(void *addr);
typedef size_t ravl_interval_max(void *addr);
struct ravl_interval *ravl_interval_new(ravl_interval_min *min,
ravl_interval_min *max);
void ravl_interval_delete(struct ravl_interval *ri);
int ravl_interval_insert(struct ravl_interval *ri, void *addr);
int ravl_interval_remove(struct ravl_interval *ri,
struct ravl_interval_node *rin);
struct ravl_interval_node *ravl_interval_find_equal(struct ravl_interval *ri,
void *addr);
struct ravl_interval_node *ravl_interval_find(struct ravl_interval *ri,
void *addr);
void *ravl_interval_data(struct ravl_interval_node *rin);
#endif
| 947 | 27.727273 | 77 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/memops_generic.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* memops_generic.c -- architecture-independent memmove & memset fallback
*
* This fallback is needed to fulfill guarantee that pmem_mem[cpy|set|move]
* will use at least 8-byte stores (for 8-byte aligned buffers and sizes),
* even when accelerated implementation is missing or disabled.
* This guarantee is needed to maintain correctness eg in pmemobj.
* Libc may do the same, but this behavior is not documented, so we can't rely
* on that.
*/
#include <stddef.h>
#include "out.h"
#include "pmem2_arch.h"
#include "util.h"
/*
* pmem2_flush_flags -- internal wrapper around pmem_flush
*/
static inline void
pmem2_flush_flags(const void *addr, size_t len, unsigned flags,
flush_func flush)
{
if (!(flags & PMEM2_F_MEM_NOFLUSH))
flush(addr, len);
}
/*
* cpy128 -- (internal) copy 128 bytes from src to dst
*/
static force_inline void
cpy128(uint64_t *dst, const uint64_t *src)
{
/*
* We use atomics here just to be sure compiler will not split stores.
* Order of stores doesn't matter.
*/
uint64_t tmp[16];
util_atomic_load_explicit64(&src[0], &tmp[0], memory_order_relaxed);
util_atomic_load_explicit64(&src[1], &tmp[1], memory_order_relaxed);
util_atomic_load_explicit64(&src[2], &tmp[2], memory_order_relaxed);
util_atomic_load_explicit64(&src[3], &tmp[3], memory_order_relaxed);
util_atomic_load_explicit64(&src[4], &tmp[4], memory_order_relaxed);
util_atomic_load_explicit64(&src[5], &tmp[5], memory_order_relaxed);
util_atomic_load_explicit64(&src[6], &tmp[6], memory_order_relaxed);
util_atomic_load_explicit64(&src[7], &tmp[7], memory_order_relaxed);
util_atomic_load_explicit64(&src[8], &tmp[8], memory_order_relaxed);
util_atomic_load_explicit64(&src[9], &tmp[9], memory_order_relaxed);
util_atomic_load_explicit64(&src[10], &tmp[10], memory_order_relaxed);
util_atomic_load_explicit64(&src[11], &tmp[11], memory_order_relaxed);
util_atomic_load_explicit64(&src[12], &tmp[12], memory_order_relaxed);
util_atomic_load_explicit64(&src[13], &tmp[13], memory_order_relaxed);
util_atomic_load_explicit64(&src[14], &tmp[14], memory_order_relaxed);
util_atomic_load_explicit64(&src[15], &tmp[15], memory_order_relaxed);
util_atomic_store_explicit64(&dst[0], tmp[0], memory_order_relaxed);
util_atomic_store_explicit64(&dst[1], tmp[1], memory_order_relaxed);
util_atomic_store_explicit64(&dst[2], tmp[2], memory_order_relaxed);
util_atomic_store_explicit64(&dst[3], tmp[3], memory_order_relaxed);
util_atomic_store_explicit64(&dst[4], tmp[4], memory_order_relaxed);
util_atomic_store_explicit64(&dst[5], tmp[5], memory_order_relaxed);
util_atomic_store_explicit64(&dst[6], tmp[6], memory_order_relaxed);
util_atomic_store_explicit64(&dst[7], tmp[7], memory_order_relaxed);
util_atomic_store_explicit64(&dst[8], tmp[8], memory_order_relaxed);
util_atomic_store_explicit64(&dst[9], tmp[9], memory_order_relaxed);
util_atomic_store_explicit64(&dst[10], tmp[10], memory_order_relaxed);
util_atomic_store_explicit64(&dst[11], tmp[11], memory_order_relaxed);
util_atomic_store_explicit64(&dst[12], tmp[12], memory_order_relaxed);
util_atomic_store_explicit64(&dst[13], tmp[13], memory_order_relaxed);
util_atomic_store_explicit64(&dst[14], tmp[14], memory_order_relaxed);
util_atomic_store_explicit64(&dst[15], tmp[15], memory_order_relaxed);
}
/*
* cpy64 -- (internal) copy 64 bytes from src to dst
*/
static force_inline void
cpy64(uint64_t *dst, const uint64_t *src)
{
/*
* We use atomics here just to be sure compiler will not split stores.
* Order of stores doesn't matter.
*/
uint64_t tmp[8];
util_atomic_load_explicit64(&src[0], &tmp[0], memory_order_relaxed);
util_atomic_load_explicit64(&src[1], &tmp[1], memory_order_relaxed);
util_atomic_load_explicit64(&src[2], &tmp[2], memory_order_relaxed);
util_atomic_load_explicit64(&src[3], &tmp[3], memory_order_relaxed);
util_atomic_load_explicit64(&src[4], &tmp[4], memory_order_relaxed);
util_atomic_load_explicit64(&src[5], &tmp[5], memory_order_relaxed);
util_atomic_load_explicit64(&src[6], &tmp[6], memory_order_relaxed);
util_atomic_load_explicit64(&src[7], &tmp[7], memory_order_relaxed);
util_atomic_store_explicit64(&dst[0], tmp[0], memory_order_relaxed);
util_atomic_store_explicit64(&dst[1], tmp[1], memory_order_relaxed);
util_atomic_store_explicit64(&dst[2], tmp[2], memory_order_relaxed);
util_atomic_store_explicit64(&dst[3], tmp[3], memory_order_relaxed);
util_atomic_store_explicit64(&dst[4], tmp[4], memory_order_relaxed);
util_atomic_store_explicit64(&dst[5], tmp[5], memory_order_relaxed);
util_atomic_store_explicit64(&dst[6], tmp[6], memory_order_relaxed);
util_atomic_store_explicit64(&dst[7], tmp[7], memory_order_relaxed);
}
/*
* cpy8 -- (internal) copy 8 bytes from src to dst
*/
static force_inline void
cpy8(uint64_t *dst, const uint64_t *src)
{
uint64_t tmp;
util_atomic_load_explicit64(src, &tmp, memory_order_relaxed);
util_atomic_store_explicit64(dst, tmp, memory_order_relaxed);
}
/*
* store8 -- (internal) store 8 bytes
*/
static force_inline void
store8(uint64_t *dst, uint64_t c)
{
util_atomic_store_explicit64(dst, c, memory_order_relaxed);
}
/*
* memmove_nodrain_generic -- generic memmove to pmem without hw drain
*/
void *
memmove_nodrain_generic(void *dst, const void *src, size_t len,
unsigned flags, flush_func flush)
{
LOG(15, "pmemdest %p src %p len %zu flags 0x%x", dst, src, len,
flags);
char *cdst = dst;
const char *csrc = src;
size_t remaining;
(void) flags;
if ((uintptr_t)cdst - (uintptr_t)csrc >= len) {
size_t cnt = (uint64_t)cdst & 7;
if (cnt > 0) {
cnt = 8 - cnt;
if (cnt > len)
cnt = len;
for (size_t i = 0; i < cnt; ++i)
cdst[i] = csrc[i];
pmem2_flush_flags(cdst, cnt, flags, flush);
cdst += cnt;
csrc += cnt;
len -= cnt;
}
uint64_t *dst8 = (uint64_t *)cdst;
const uint64_t *src8 = (const uint64_t *)csrc;
while (len >= 128 && CACHELINE_SIZE == 128) {
cpy128(dst8, src8);
pmem2_flush_flags(dst8, 128, flags, flush);
len -= 128;
dst8 += 16;
src8 += 16;
}
while (len >= 64) {
cpy64(dst8, src8);
pmem2_flush_flags(dst8, 64, flags, flush);
len -= 64;
dst8 += 8;
src8 += 8;
}
remaining = len;
while (len >= 8) {
cpy8(dst8, src8);
len -= 8;
dst8++;
src8++;
}
cdst = (char *)dst8;
csrc = (const char *)src8;
for (size_t i = 0; i < len; ++i)
*cdst++ = *csrc++;
if (remaining)
pmem2_flush_flags(cdst - remaining, remaining, flags,
flush);
} else {
cdst += len;
csrc += len;
size_t cnt = (uint64_t)cdst & 7;
if (cnt > 0) {
if (cnt > len)
cnt = len;
cdst -= cnt;
csrc -= cnt;
len -= cnt;
for (size_t i = cnt; i > 0; --i)
cdst[i - 1] = csrc[i - 1];
pmem2_flush_flags(cdst, cnt, flags, flush);
}
uint64_t *dst8 = (uint64_t *)cdst;
const uint64_t *src8 = (const uint64_t *)csrc;
while (len >= 128 && CACHELINE_SIZE == 128) {
dst8 -= 16;
src8 -= 16;
cpy128(dst8, src8);
pmem2_flush_flags(dst8, 128, flags, flush);
len -= 128;
}
while (len >= 64) {
dst8 -= 8;
src8 -= 8;
cpy64(dst8, src8);
pmem2_flush_flags(dst8, 64, flags, flush);
len -= 64;
}
remaining = len;
while (len >= 8) {
--dst8;
--src8;
cpy8(dst8, src8);
len -= 8;
}
cdst = (char *)dst8;
csrc = (const char *)src8;
for (size_t i = len; i > 0; --i)
*--cdst = *--csrc;
if (remaining)
pmem2_flush_flags(cdst, remaining, flags, flush);
}
return dst;
}
/*
* memset_nodrain_generic -- generic memset to pmem without hw drain
*/
void *
memset_nodrain_generic(void *dst, int c, size_t len, unsigned flags,
flush_func flush)
{
LOG(15, "pmemdest %p c 0x%x len %zu flags 0x%x", dst, c, len,
flags);
(void) flags;
char *cdst = dst;
size_t cnt = (uint64_t)cdst & 7;
if (cnt > 0) {
cnt = 8 - cnt;
if (cnt > len)
cnt = len;
for (size_t i = 0; i < cnt; ++i)
cdst[i] = (char)c;
pmem2_flush_flags(cdst, cnt, flags, flush);
cdst += cnt;
len -= cnt;
}
uint64_t *dst8 = (uint64_t *)cdst;
uint64_t u = (unsigned char)c;
uint64_t tmp = (u << 56) | (u << 48) | (u << 40) | (u << 32) |
(u << 24) | (u << 16) | (u << 8) | u;
while (len >= 128 && CACHELINE_SIZE == 128) {
store8(&dst8[0], tmp);
store8(&dst8[1], tmp);
store8(&dst8[2], tmp);
store8(&dst8[3], tmp);
store8(&dst8[4], tmp);
store8(&dst8[5], tmp);
store8(&dst8[6], tmp);
store8(&dst8[7], tmp);
store8(&dst8[8], tmp);
store8(&dst8[9], tmp);
store8(&dst8[10], tmp);
store8(&dst8[11], tmp);
store8(&dst8[12], tmp);
store8(&dst8[13], tmp);
store8(&dst8[14], tmp);
store8(&dst8[15], tmp);
pmem2_flush_flags(dst8, 128, flags, flush);
len -= 128;
dst8 += 16;
}
while (len >= 64) {
store8(&dst8[0], tmp);
store8(&dst8[1], tmp);
store8(&dst8[2], tmp);
store8(&dst8[3], tmp);
store8(&dst8[4], tmp);
store8(&dst8[5], tmp);
store8(&dst8[6], tmp);
store8(&dst8[7], tmp);
pmem2_flush_flags(dst8, 64, flags, flush);
len -= 64;
dst8 += 8;
}
size_t remaining = len;
while (len >= 8) {
store8(dst8, tmp);
len -= 8;
dst8++;
}
cdst = (char *)dst8;
for (size_t i = 0; i < len; ++i)
*cdst++ = (char)c;
if (remaining)
pmem2_flush_flags(cdst - remaining, remaining, flags, flush);
return dst;
}
| 9,345 | 26.488235 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/pmem2_arch.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* pmem2_arch.h -- core-arch interface
*/
#ifndef PMEM2_ARCH_H
#define PMEM2_ARCH_H
#include <stddef.h>
#include "libpmem2.h"
#include "util.h"
#include "valgrind_internal.h"
#ifdef __cplusplus
extern "C" {
#endif
struct pmem2_arch_info;
typedef void (*fence_func)(void);
typedef void (*flush_func)(const void *, size_t);
typedef void *(*memmove_nodrain_func)(void *pmemdest, const void *src,
size_t len, unsigned flags, flush_func flush);
typedef void *(*memset_nodrain_func)(void *pmemdest, int c, size_t len,
unsigned flags, flush_func flush);
struct pmem2_arch_info {
memmove_nodrain_func memmove_nodrain;
memmove_nodrain_func memmove_nodrain_eadr;
memset_nodrain_func memset_nodrain;
memset_nodrain_func memset_nodrain_eadr;
flush_func flush;
fence_func fence;
int flush_has_builtin_fence;
};
void pmem2_arch_init(struct pmem2_arch_info *info);
/*
* flush_empty_nolog -- (internal) do not flush the CPU cache
*/
static force_inline void
flush_empty_nolog(const void *addr, size_t len)
{
/* NOP, but tell pmemcheck about it */
VALGRIND_DO_FLUSH(addr, len);
}
void *memmove_nodrain_generic(void *pmemdest, const void *src, size_t len,
unsigned flags, flush_func flush);
void *memset_nodrain_generic(void *pmemdest, int c, size_t len, unsigned flags,
flush_func flush);
#ifdef __cplusplus
}
#endif
#endif
| 1,427 | 22.8 | 79 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/libpmem2.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* libpmem2.c -- pmem2 library constructor & destructor
*/
#include "libpmem2.h"
#include "map.h"
#include "out.h"
#include "persist.h"
#include "pmem2.h"
#include "util.h"
/*
* libpmem2_init -- load-time initialization for libpmem2
*
* Called automatically by the run-time loader.
*/
ATTR_CONSTRUCTOR
void
libpmem2_init(void)
{
util_init();
out_init(PMEM2_LOG_PREFIX, PMEM2_LOG_LEVEL_VAR, PMEM2_LOG_FILE_VAR,
PMEM2_MAJOR_VERSION, PMEM2_MINOR_VERSION);
LOG(3, NULL);
pmem2_map_init();
pmem2_persist_init();
}
/*
* libpmem2_fini -- libpmem2 cleanup routine
*
* Called automatically when the process terminates.
*/
ATTR_DESTRUCTOR
void
libpmem2_fini(void)
{
LOG(3, NULL);
pmem2_map_fini();
out_fini();
}
| 819 | 15.734694 | 68 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/region_namespace_ndctl.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* region_namespace_ndctl.c -- common ndctl functions
*/
#include <ndctl/libndctl.h>
#include <ndctl/libdaxctl.h>
#include <sys/sysmacros.h>
#include <fcntl.h>
#include "libpmem2.h"
#include "pmem2_utils.h"
#include "region_namespace_ndctl.h"
#include "region_namespace.h"
#include "out.h"
/*
* ndctl_match_devdax -- (internal) returns 0 if the devdax matches
* with the given file, 1 if it doesn't match,
* and a negative value in case of an error.
*/
static int
ndctl_match_devdax(dev_t st_rdev, const char *devname)
{
LOG(3, "st_rdev %lu devname %s", st_rdev, devname);
if (*devname == '\0')
return 1;
char path[PATH_MAX];
os_stat_t stat;
if (util_snprintf(path, PATH_MAX, "/dev/%s", devname) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
if (os_stat(path, &stat)) {
ERR("!stat %s", path);
return PMEM2_E_ERRNO;
}
if (st_rdev != stat.st_rdev) {
LOG(10, "skipping not matching device: %s", path);
return 1;
}
LOG(4, "found matching device: %s", path);
return 0;
}
#define BUFF_LENGTH 64
/*
* ndctl_match_fsdax -- (internal) returns 0 if the device matches
* with the given file, 1 if it doesn't match,
* and a negative value in case of an error.
*/
static int
ndctl_match_fsdax(dev_t st_dev, const char *devname)
{
LOG(3, "st_dev %lu devname %s", st_dev, devname);
if (*devname == '\0')
return 1;
char path[PATH_MAX];
char dev_id[BUFF_LENGTH];
if (util_snprintf(path, PATH_MAX, "/sys/block/%s/dev", devname) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
if (util_snprintf(dev_id, BUFF_LENGTH, "%d:%d",
major(st_dev), minor(st_dev)) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
int fd = os_open(path, O_RDONLY);
if (fd < 0) {
ERR("!open \"%s\"", path);
return PMEM2_E_ERRNO;
}
char buff[BUFF_LENGTH];
ssize_t nread = read(fd, buff, BUFF_LENGTH);
if (nread < 0) {
ERR("!read");
int oerrno = errno; /* save the errno */
os_close(fd);
errno = oerrno;
return PMEM2_E_ERRNO;
}
os_close(fd);
if (nread == 0) {
ERR("%s is empty", path);
return PMEM2_E_INVALID_DEV_FORMAT;
}
if (buff[nread - 1] != '\n') {
ERR("%s doesn't end with new line", path);
return PMEM2_E_INVALID_DEV_FORMAT;
}
buff[nread - 1] = '\0';
if (strcmp(buff, dev_id) != 0) {
LOG(10, "skipping not matching device: %s", path);
return 1;
}
LOG(4, "found matching device: %s", path);
return 0;
}
/*
* pmem2_region_namespace -- returns the region
* (and optionally the namespace)
* where the given file is located
*/
int
pmem2_region_namespace(struct ndctl_ctx *ctx,
const struct pmem2_source *src,
struct ndctl_region **pregion,
struct ndctl_namespace **pndns)
{
LOG(3, "ctx %p src %p pregion %p pnamespace %p",
ctx, src, pregion, pndns);
struct ndctl_bus *bus;
struct ndctl_region *region;
struct ndctl_namespace *ndns;
if (pregion)
*pregion = NULL;
if (pndns)
*pndns = NULL;
if (src->value.ftype == PMEM2_FTYPE_DIR) {
ERR("cannot check region or namespace of a directory");
return PMEM2_E_INVALID_FILE_TYPE;
}
FOREACH_BUS_REGION_NAMESPACE(ctx, bus, region, ndns) {
struct ndctl_btt *btt;
struct ndctl_dax *dax = NULL;
struct ndctl_pfn *pfn;
const char *devname;
if ((dax = ndctl_namespace_get_dax(ndns))) {
if (src->value.ftype == PMEM2_FTYPE_REG)
continue;
ASSERTeq(src->value.ftype, PMEM2_FTYPE_DEVDAX);
struct daxctl_region *dax_region;
dax_region = ndctl_dax_get_daxctl_region(dax);
if (!dax_region) {
ERR("!cannot find dax region");
return PMEM2_E_DAX_REGION_NOT_FOUND;
}
struct daxctl_dev *dev;
daxctl_dev_foreach(dax_region, dev) {
devname = daxctl_dev_get_devname(dev);
int ret = ndctl_match_devdax(src->value.st_rdev,
devname);
if (ret < 0)
return ret;
if (ret == 0) {
if (pregion)
*pregion = region;
if (pndns)
*pndns = ndns;
return 0;
}
}
} else {
if (src->value.ftype == PMEM2_FTYPE_DEVDAX)
continue;
ASSERTeq(src->value.ftype, PMEM2_FTYPE_REG);
if ((btt = ndctl_namespace_get_btt(ndns))) {
devname = ndctl_btt_get_block_device(btt);
} else if ((pfn = ndctl_namespace_get_pfn(ndns))) {
devname = ndctl_pfn_get_block_device(pfn);
} else {
devname =
ndctl_namespace_get_block_device(ndns);
}
int ret = ndctl_match_fsdax(src->value.st_dev, devname);
if (ret < 0)
return ret;
if (ret == 0) {
if (pregion)
*pregion = region;
if (pndns)
*pndns = ndns;
return 0;
}
}
}
LOG(10, "did not found any matching device");
return 0;
}
/*
* pmem2_region_get_id -- returns the region id
*/
int
pmem2_get_region_id(const struct pmem2_source *src, unsigned *region_id)
{
LOG(3, "src %p region_id %p", src, region_id);
struct ndctl_region *region;
struct ndctl_namespace *ndns;
struct ndctl_ctx *ctx;
errno = ndctl_new(&ctx) * (-1);
if (errno) {
ERR("!ndctl_new");
return PMEM2_E_ERRNO;
}
int rv = pmem2_region_namespace(ctx, src, ®ion, &ndns);
if (rv) {
LOG(1, "getting region and namespace failed");
goto end;
}
if (!region) {
ERR("unknown region");
rv = PMEM2_E_DAX_REGION_NOT_FOUND;
goto end;
}
*region_id = ndctl_region_get_id(region);
end:
ndctl_unref(ctx);
return rv;
}
| 5,467 | 20.111969 | 72 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/persist.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* persist.h -- internal definitions for libpmem2 persist module
*/
#ifndef PMEM2_PERSIST_H
#define PMEM2_PERSIST_H
#include <stddef.h>
#include "map.h"
#ifdef __cplusplus
extern "C" {
#endif
void pmem2_persist_init(void);
int pmem2_flush_file_buffers_os(struct pmem2_map *map, const void *addr,
size_t len, int autorestart);
void pmem2_set_flush_fns(struct pmem2_map *map);
void pmem2_set_mem_fns(struct pmem2_map *map);
#ifdef __cplusplus
}
#endif
#endif
| 557 | 17.6 | 72 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/source.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
#ifndef PMEM2_SOURCE_H
#define PMEM2_SOURCE_H
#include "os.h"
#define INVALID_FD (-1)
enum pmem2_file_type {
PMEM2_FTYPE_REG = 1,
PMEM2_FTYPE_DEVDAX = 2,
PMEM2_FTYPE_DIR = 3,
};
enum pmem2_source_type {
PMEM2_SOURCE_UNSPECIFIED,
PMEM2_SOURCE_ANON,
PMEM2_SOURCE_FD,
PMEM2_SOURCE_HANDLE,
MAX_PMEM2_SOURCE_TYPE
};
struct pmem2_source {
/* a source file descriptor / handle for the designed mapping */
enum pmem2_source_type type;
struct {
enum pmem2_file_type ftype;
union {
/* PMEM2_SOURCE_ANON */
size_t size;
#ifdef _WIN32
/* PMEM2_SOURCE_HANDLE */
HANDLE handle;
#else
/* PMEM2_SOURCE_FD */
struct {
int fd;
dev_t st_rdev;
dev_t st_dev;
};
#endif
};
} value;
};
#endif /* PMEM2_SOURCE_H */
| 831 | 15.64 | 65 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/pmem2_utils_other.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#include <errno.h>
#include <sys/stat.h>
#include "libpmem2.h"
#include "out.h"
#include "pmem2_utils.h"
#ifdef _WIN32
#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR)
#endif
int
pmem2_get_type_from_stat(const os_stat_t *st, enum pmem2_file_type *type)
{
if (S_ISREG(st->st_mode)) {
*type = PMEM2_FTYPE_REG;
return 0;
}
if (S_ISDIR(st->st_mode)) {
*type = PMEM2_FTYPE_DIR;
return 0;
}
ERR("file type 0%o not supported", st->st_mode & S_IFMT);
return PMEM2_E_INVALID_FILE_TYPE;
}
/*
* pmem2_device_dax_size -- checks the size of a given
* dax device from given source structure
*/
int
pmem2_device_dax_size(const struct pmem2_source *src, size_t *size)
{
const char *err =
"BUG: pmem2_device_dax_size should never be called on this OS";
ERR("%s", err);
ASSERTinfo(0, err);
return PMEM2_E_NOSUPP;
}
/*
* pmem2_device_dax_alignment -- checks the alignment of a given
* dax device from given source
*/
int
pmem2_device_dax_alignment(const struct pmem2_source *src, size_t *alignment)
{
const char *err =
"BUG: pmem2_device_dax_alignment should never be called on this OS";
ERR("%s", err);
ASSERTinfo(0, err);
return PMEM2_E_NOSUPP;
}
| 1,301 | 20.7 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/deep_flush.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* deep_flush.c -- pmem2_deep_flush implementation
*/
#include <stdlib.h>
#include "libpmem2.h"
#include "deep_flush.h"
#include "out.h"
/*
* pmem2_deep_flush -- performs deep flush operation
*/
int
pmem2_deep_flush(struct pmem2_map *map, void *ptr, size_t size)
{
LOG(3, "map %p ptr %p size %zu", map, ptr, size);
uintptr_t map_addr = (uintptr_t)map->addr;
uintptr_t map_end = map_addr + map->content_length;
uintptr_t flush_addr = (uintptr_t)ptr;
uintptr_t flush_end = flush_addr + size;
if (flush_addr < map_addr || flush_end > map_end) {
ERR("requested deep flush rage ptr %p size %zu"
"exceeds map range %p", ptr, size, map);
return PMEM2_E_DEEP_FLUSH_RANGE;
}
int ret = map->deep_flush_fn(map, ptr, size);
if (ret) {
LOG(1, "cannot perform deep flush operation for map %p", map);
return ret;
}
return 0;
}
| 929 | 21.682927 | 64 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/pmem2.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
/*
* pmem2.h -- internal definitions for libpmem2
*/
#ifndef PMEM2_H
#define PMEM2_H
#include "libpmem2.h"
#ifdef __cplusplus
extern "C" {
#endif
#define PMEM2_MAJOR_VERSION 0
#define PMEM2_MINOR_VERSION 0
#define PMEM2_LOG_PREFIX "libpmem2"
#define PMEM2_LOG_LEVEL_VAR "PMEM2_LOG_LEVEL"
#define PMEM2_LOG_FILE_VAR "PMEM2_LOG_FILE"
#ifdef __cplusplus
}
#endif
#endif
| 457 | 15.357143 | 47 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/usc_none.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* usc_none.c -- pmem2 usc function for non supported platform
*/
#include "libpmem2.h"
int
pmem2_source_device_id(const struct pmem2_source *src, char *id, size_t *len)
{
return PMEM2_E_NOSUPP;
}
int
pmem2_source_device_usc(const struct pmem2_source *src, uint64_t *usc)
{
return PMEM2_E_NOSUPP;
}
| 390 | 17.619048 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/map_posix.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* map_posix.c -- pmem2_map (POSIX)
*/
#include <errno.h>
#include <stdbool.h>
#include <string.h>
#include <sys/mman.h>
#include "libpmem2.h"
#include "alloc.h"
#include "auto_flush.h"
#include "config.h"
#include "file.h"
#include "map.h"
#include "out.h"
#include "persist.h"
#include "pmem2_utils.h"
#include "source.h"
#include "valgrind_internal.h"
#ifndef MAP_SYNC
#define MAP_SYNC 0x80000
#endif
#ifndef MAP_SHARED_VALIDATE
#define MAP_SHARED_VALIDATE 0x03
#endif
#define MEGABYTE ((uintptr_t)1 << 20)
#define GIGABYTE ((uintptr_t)1 << 30)
/* indicates the cases in which the error cannot occur */
#define GRAN_IMPOSSIBLE "impossible"
#ifdef __linux__
/* requested CACHE_LINE, available PAGE */
#define REQ_CL_AVAIL_PG \
"requested granularity not available because fd doesn't point to DAX-enabled file " \
"or kernel doesn't support MAP_SYNC flag (Linux >= 4.15)"
/* requested BYTE, available PAGE */
#define REQ_BY_AVAIL_PG REQ_CL_AVAIL_PG
/* requested BYTE, available CACHE_LINE */
#define REQ_BY_AVAIL_CL \
"requested granularity not available because the platform doesn't support eADR"
static const char *granularity_err_msg[3][3] = {
/* requested granularity / available granularity */
/* -------------------------------------------------------------------- */
/* BYTE CACHE_LINE PAGE */
/* -------------------------------------------------------------------- */
/* BYTE */ {GRAN_IMPOSSIBLE, REQ_BY_AVAIL_CL, REQ_BY_AVAIL_PG},
/* CL */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_CL_AVAIL_PG},
/* PAGE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE}};
#else
/* requested CACHE_LINE, available PAGE */
#define REQ_CL_AVAIL_PG \
"the operating system doesn't provide a method of detecting granularity"
/* requested BYTE, available PAGE */
#define REQ_BY_AVAIL_PG \
"the operating system doesn't provide a method of detecting whether the platform supports eADR"
static const char *granularity_err_msg[3][3] = {
/* requested granularity / available granularity */
/* -------------------------------------------------------------------- */
/* BYTE CACHE_LINE PAGE */
/* -------------------------------------------------------------------- */
/* BYTE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_BY_AVAIL_PG},
/* CL */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_CL_AVAIL_PG},
/* PAGE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE}};
#endif
/*
* get_map_alignment -- (internal) choose the desired mapping alignment
*
* The smallest supported alignment is 2 megabytes because of the object
* alignment requirements. Changing this value to 4 kilobytes constitutes a
* layout change.
*
* Use 1GB page alignment only if the mapping length is at least
* twice as big as the page size.
*/
static inline size_t
get_map_alignment(size_t len, size_t req_align)
{
size_t align = 2 * MEGABYTE;
if (req_align)
align = req_align;
else if (len >= 2 * GIGABYTE)
align = GIGABYTE;
return align;
}
/*
* map_reserve -- (internal) reserve an address for mmap()
*
* ALSR in 64-bit Linux kernel uses 28-bit of randomness for mmap
* (bit positions 12-39), which means the base mapping address is randomized
* within [0..1024GB] range, with 4KB granularity. Assuming additional
* 1GB alignment, it results in 1024 possible locations.
*/
static int
map_reserve(size_t len, size_t alignment, void **reserv, size_t *reslen,
const struct pmem2_config *cfg)
{
ASSERTne(reserv, NULL);
/* let's get addr from the cfg */
void *mmap_addr = cfg->addr;
int mmap_addr_flag = 0;
size_t dlength; /* dummy length */
/* if addr is initialized, dlength == len */
if (mmap_addr)
dlength = len;
else
dlength = len + alignment; /* dummy length */
/* "translate" pmem2 addr request type into linux flag */
if (cfg->addr_request == PMEM2_ADDRESS_FIXED_NOREPLACE) {
/*
* glibc started exposing this flag in version 4.17 but we can still
* imitate it even if it is not supported by libc or kernel
*/
#ifdef MAP_FIXED_NOREPLACE
mmap_addr_flag = MAP_FIXED_NOREPLACE;
#else
mmap_addr_flag = 0;
#endif
}
/*
* Create dummy mapping to find an unused region of given size.
* Request for increased size for later address alignment.
* Use MAP_PRIVATE with read-only access to simulate
* zero cost for overcommit accounting. Note: MAP_NORESERVE
* flag is ignored if overcommit is disabled (mode 2).
*/
char *daddr = mmap(mmap_addr, dlength, PROT_READ,
MAP_PRIVATE | MAP_ANONYMOUS | mmap_addr_flag, -1, 0);
if (daddr == MAP_FAILED) {
if (errno == EEXIST) {
ERR("!mmap MAP_FIXED_NOREPLACE");
return PMEM2_E_MAPPING_EXISTS;
}
ERR("!mmap MAP_ANONYMOUS");
return PMEM2_E_ERRNO;
}
/*
* When kernel does not support MAP_FIXED_NOREPLACE flag we imitate it.
* If kernel does not support flag and given addr is occupied, kernel
* chooses new addr randomly and returns it. We do not want that
* behavior, so we validate it and fail when addresses do not match.
*/
if (mmap_addr && cfg->addr_request == PMEM2_ADDRESS_FIXED_NOREPLACE) {
/* mapping passed and gave different addr, while it shouldn't */
if (daddr != mmap_addr) {
munmap(daddr, dlength);
ERR("mapping exists in the given address");
return PMEM2_E_MAPPING_EXISTS;
}
}
LOG(4, "system choice %p", daddr);
*reserv = (void *)roundup((uintptr_t)daddr, alignment);
/*
* since the last part of the reservation from (reserv + reslen == end)
* will be unmapped, the 'end' address has to be page-aligned.
* 'reserv' is already page-aligned (or even aligned to multiple of page
* size) so it is enough to page-align the 'reslen' value.
*/
*reslen = roundup(len, Pagesize);
LOG(4, "hint %p", *reserv);
/*
* The placeholder mapping is divided into few parts:
*
* 1 2 3 4 5
* |......|uuuuuuuuu|rrr|.................|
*
* Addresses:
* 1 == daddr
* 2 == reserv
* 3 == reserv + len
* 4 == reserv + reslen == end (has to be page-aligned)
* 5 == daddr + dlength
*
* Key:
* - '.' is an unused part of the placeholder
* - 'u' is where the actual mapping lies
* - 'r' is what reserved as padding
*/
/* unmap the placeholder before the actual mapping */
const size_t before = (uintptr_t)(*reserv) - (uintptr_t)daddr;
if (before) {
if (munmap(daddr, before)) {
ERR("!munmap");
return PMEM2_E_ERRNO;
}
}
/* unmap the placeholder after the actual mapping */
const size_t after = dlength - *reslen - before;
void *end = (void *)((uintptr_t)(*reserv) + (uintptr_t)*reslen);
if (after)
if (munmap(end, after)) {
ERR("!munmap");
return PMEM2_E_ERRNO;
}
return 0;
}
/*
* file_map -- (internal) memory map given file into memory
* If (flags & MAP_PRIVATE) it uses just mmap. Otherwise, it tries to mmap with
* (flags | MAP_SHARED_VALIDATE | MAP_SYNC) which allows flushing from the
* user-space. If MAP_SYNC fails and the user did not specify it by himself it
* falls back to the mmap with user-provided flags.
*/
static int
file_map(void *reserv, size_t len, int proto, int flags,
int fd, off_t offset, bool *map_sync, void **base)
{
LOG(15, "reserve %p len %zu proto %x flags %x fd %d offset %ld "
"map_sync %p", reserv, len, proto, flags, fd, offset,
map_sync);
ASSERTne(map_sync, NULL);
ASSERTne(base, NULL);
/*
* MAP_PRIVATE and MAP_SHARED are mutually exclusive, therefore mmap
* with MAP_PRIVATE is executed separately.
*/
if (flags & MAP_PRIVATE) {
*base = mmap(reserv, len, proto, flags, fd, offset);
if (*base == MAP_FAILED) {
ERR("!mmap");
return PMEM2_E_ERRNO;
}
LOG(4, "mmap with MAP_PRIVATE succeeded");
*map_sync = false;
return 0;
}
/* try to mmap with MAP_SYNC flag */
const int sync_flags = MAP_SHARED_VALIDATE | MAP_SYNC;
*base = mmap(reserv, len, proto, flags | sync_flags, fd, offset);
if (*base != MAP_FAILED) {
LOG(4, "mmap with MAP_SYNC succeeded");
*map_sync = true;
return 0;
}
/* try to mmap with MAP_SHARED flag (without MAP_SYNC) */
if (errno == EINVAL || errno == ENOTSUP) {
LOG(4, "mmap with MAP_SYNC not supported");
*base = mmap(reserv, len, proto, flags | MAP_SHARED, fd,
offset);
if (*base != MAP_FAILED) {
*map_sync = false;
return 0;
}
}
ERR("!mmap");
return PMEM2_E_ERRNO;
}
/*
* unmap -- (internal) unmap a memory range
*/
static int
unmap(void *addr, size_t len)
{
int retval = munmap(addr, len);
if (retval < 0) {
ERR("!munmap");
return PMEM2_E_ERRNO;
}
return 0;
}
/*
* pmem2_map -- map memory according to provided config
*/
int
pmem2_map(const struct pmem2_config *cfg, const struct pmem2_source *src,
struct pmem2_map **map_ptr)
{
LOG(3, "cfg %p src %p map_ptr %p", cfg, src, map_ptr);
int ret = 0;
struct pmem2_map *map;
size_t file_len;
*map_ptr = NULL;
if (cfg->requested_max_granularity == PMEM2_GRANULARITY_INVALID) {
ERR(
"please define the max granularity requested for the mapping");
return PMEM2_E_GRANULARITY_NOT_SET;
}
size_t src_alignment;
ret = pmem2_source_alignment(src, &src_alignment);
if (ret)
return ret;
/* get file size */
ret = pmem2_source_size(src, &file_len);
if (ret)
return ret;
/* get offset */
size_t effective_offset;
ret = pmem2_validate_offset(cfg, &effective_offset, src_alignment);
if (ret)
return ret;
ASSERTeq(effective_offset, cfg->offset);
if (src->type == PMEM2_SOURCE_ANON)
effective_offset = 0;
os_off_t off = (os_off_t)effective_offset;
/* map input and output variables */
bool map_sync = false;
/*
* MAP_SHARED - is required to mmap directly the underlying hardware
* MAP_FIXED - is required to mmap at exact address pointed by hint
*/
int flags = MAP_FIXED;
void *addr;
/* "translate" pmem2 protection flags into linux flags */
int proto = 0;
if (cfg->protection_flag == PMEM2_PROT_NONE)
proto = PROT_NONE;
if (cfg->protection_flag & PMEM2_PROT_EXEC)
proto |= PROT_EXEC;
if (cfg->protection_flag & PMEM2_PROT_READ)
proto |= PROT_READ;
if (cfg->protection_flag & PMEM2_PROT_WRITE)
proto |= PROT_WRITE;
if (src->type == PMEM2_SOURCE_FD) {
if (src->value.ftype == PMEM2_FTYPE_DIR) {
ERR("the directory is not a supported file type");
return PMEM2_E_INVALID_FILE_TYPE;
}
ASSERT(src->value.ftype == PMEM2_FTYPE_REG ||
src->value.ftype == PMEM2_FTYPE_DEVDAX);
if (cfg->sharing == PMEM2_PRIVATE &&
src->value.ftype == PMEM2_FTYPE_DEVDAX) {
ERR(
"device DAX does not support mapping with MAP_PRIVATE");
return PMEM2_E_SRC_DEVDAX_PRIVATE;
}
}
size_t content_length, reserved_length = 0;
ret = pmem2_config_validate_length(cfg, file_len, src_alignment);
if (ret)
return ret;
/* without user-provided length, map to the end of the file */
if (cfg->length)
content_length = cfg->length;
else
content_length = file_len - effective_offset;
size_t alignment = get_map_alignment(content_length,
src_alignment);
ret = pmem2_config_validate_addr_alignment(cfg, src);
if (ret)
return ret;
/* find a hint for the mapping */
void *reserv = NULL;
ret = map_reserve(content_length, alignment, &reserv, &reserved_length,
cfg);
if (ret != 0) {
if (ret == PMEM2_E_MAPPING_EXISTS)
LOG(1, "given mapping region is already occupied");
else
LOG(1, "cannot find a contiguous region of given size");
return ret;
}
ASSERTne(reserv, NULL);
if (cfg->sharing == PMEM2_PRIVATE) {
flags |= MAP_PRIVATE;
}
int map_fd = INVALID_FD;
if (src->type == PMEM2_SOURCE_FD) {
map_fd = src->value.fd;
} else if (src->type == PMEM2_SOURCE_ANON) {
flags |= MAP_ANONYMOUS;
} else {
ASSERT(0);
}
ret = file_map(reserv, content_length, proto, flags, map_fd, off,
&map_sync, &addr);
if (ret) {
/* unmap the reservation mapping */
munmap(reserv, reserved_length);
if (ret == -EACCES)
return PMEM2_E_NO_ACCESS;
else if (ret == -ENOTSUP)
return PMEM2_E_NOSUPP;
else
return ret;
}
LOG(3, "mapped at %p", addr);
bool eADR = (pmem2_auto_flush() == 1);
enum pmem2_granularity available_min_granularity =
src->type == PMEM2_SOURCE_ANON ? PMEM2_GRANULARITY_BYTE :
get_min_granularity(eADR, map_sync, cfg->sharing);
if (available_min_granularity > cfg->requested_max_granularity) {
const char *err = granularity_err_msg
[cfg->requested_max_granularity]
[available_min_granularity];
if (strcmp(err, GRAN_IMPOSSIBLE) == 0)
FATAL(
"unhandled granularity error: available_min_granularity: %d" \
"requested_max_granularity: %d",
available_min_granularity,
cfg->requested_max_granularity);
ERR("%s", err);
ret = PMEM2_E_GRANULARITY_NOT_SUPPORTED;
goto err;
}
/* prepare pmem2_map structure */
map = (struct pmem2_map *)pmem2_malloc(sizeof(*map), &ret);
if (!map)
goto err;
map->addr = addr;
map->reserved_length = reserved_length;
map->content_length = content_length;
map->effective_granularity = available_min_granularity;
pmem2_set_flush_fns(map);
pmem2_set_mem_fns(map);
map->source = *src;
map->source.value.fd = INVALID_FD; /* fd should not be used after map */
ret = pmem2_register_mapping(map);
if (ret)
goto err_register;
*map_ptr = map;
if (src->type == PMEM2_SOURCE_FD) {
VALGRIND_REGISTER_PMEM_MAPPING(map->addr, map->content_length);
VALGRIND_REGISTER_PMEM_FILE(src->value.fd,
map->addr, map->content_length, 0);
}
return 0;
err_register:
free(map);
err:
unmap(addr, reserved_length);
return ret;
}
/*
* pmem2_unmap -- unmap the specified mapping
*/
int
pmem2_unmap(struct pmem2_map **map_ptr)
{
LOG(3, "map_ptr %p", map_ptr);
int ret = 0;
struct pmem2_map *map = *map_ptr;
ret = pmem2_unregister_mapping(map);
if (ret)
return ret;
ret = unmap(map->addr, map->reserved_length);
if (ret)
return ret;
VALGRIND_REMOVE_PMEM_MAPPING(map->addr, map->content_length);
Free(map);
*map_ptr = NULL;
return ret;
}
| 13,869 | 25.879845 | 96 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/badblocks_none.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* badblocks_none.c -- fake bad blocks functions
*/
#include <errno.h>
#include "libpmem2.h"
#include "out.h"
/*
* pmem2_badblock_context_new -- allocate and create a new bad block context
*/
int
pmem2_badblock_context_new(const struct pmem2_source *src,
struct pmem2_badblock_context **bbctx)
{
return PMEM2_E_NOSUPP;
}
/*
* pmem2_badblock_context_delete -- delete and free the bad block context
*/
void
pmem2_badblock_context_delete(
struct pmem2_badblock_context **bbctx)
{
}
/*
* pmem2_badblock_next -- get the next bad block
*/
int
pmem2_badblock_next(struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb)
{
return PMEM2_E_NOSUPP;
}
/*
* pmem2_badblock_clear -- clear one bad block
*/
int
pmem2_badblock_clear(struct pmem2_badblock_context *bbctx,
const struct pmem2_badblock *bb)
{
return PMEM2_E_NOSUPP;
}
| 938 | 17.411765 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/map.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* map.c -- pmem2_map (common)
*/
#include "out.h"
#include "config.h"
#include "map.h"
#include "ravl_interval.h"
#include "os.h"
#include "os_thread.h"
#include "pmem2.h"
#include "pmem2_utils.h"
#include "ravl.h"
#include "sys_util.h"
#include <libpmem2.h>
/*
* pmem2_map_get_address -- get mapping address
*/
void *
pmem2_map_get_address(struct pmem2_map *map)
{
LOG(3, "map %p", map);
return map->addr;
}
/*
* pmem2_map_get_size -- get mapping size
*/
size_t
pmem2_map_get_size(struct pmem2_map *map)
{
LOG(3, "map %p", map);
return map->content_length;
}
/*
* pmem2_map_get_store_granularity -- returns granularity of the mapped
* file
*/
enum pmem2_granularity
pmem2_map_get_store_granularity(struct pmem2_map *map)
{
LOG(3, "map %p", map);
return map->effective_granularity;
}
/*
* parse_force_granularity -- parse PMEM2_FORCE_GRANULARITY environment variable
*/
static enum pmem2_granularity
parse_force_granularity()
{
char *ptr = os_getenv("PMEM2_FORCE_GRANULARITY");
if (ptr) {
char str[11]; /* strlen("CACHE_LINE") + 1 */
if (util_safe_strcpy(str, ptr, sizeof(str))) {
LOG(1, "Invalid value of PMEM2_FORCE_GRANULARITY");
return PMEM2_GRANULARITY_INVALID;
}
char *s = str;
while (*s) {
*s = (char)toupper((char)*s);
s++;
}
if (strcmp(str, "BYTE") == 0) {
return PMEM2_GRANULARITY_BYTE;
} else if (strcmp(str, "CACHE_LINE") == 0) {
return PMEM2_GRANULARITY_CACHE_LINE;
} else if (strcmp(str, "CACHELINE") == 0) {
return PMEM2_GRANULARITY_CACHE_LINE;
} else if (strcmp(str, "PAGE") == 0) {
return PMEM2_GRANULARITY_PAGE;
}
LOG(1, "Invalid value of PMEM2_FORCE_GRANULARITY");
}
return PMEM2_GRANULARITY_INVALID;
}
/*
* get_min_granularity -- checks min available granularity
*/
enum pmem2_granularity
get_min_granularity(bool eADR, bool is_pmem, enum pmem2_sharing_type sharing)
{
enum pmem2_granularity force = parse_force_granularity();
/* PMEM2_PRIVATE sharing does not require data flushing */
if (sharing == PMEM2_PRIVATE)
return PMEM2_GRANULARITY_BYTE;
if (force != PMEM2_GRANULARITY_INVALID)
return force;
if (!is_pmem)
return PMEM2_GRANULARITY_PAGE;
if (!eADR)
return PMEM2_GRANULARITY_CACHE_LINE;
return PMEM2_GRANULARITY_BYTE;
}
/*
* pmem2_validate_offset -- verify if the offset is a multiple of
* the alignment required for the config
*/
int
pmem2_validate_offset(const struct pmem2_config *cfg, size_t *offset,
size_t alignment)
{
ASSERTne(alignment, 0);
if (cfg->offset % alignment) {
ERR("offset is not a multiple of %lu", alignment);
return PMEM2_E_OFFSET_UNALIGNED;
}
*offset = cfg->offset;
return 0;
}
static struct ravl_interval *ri;
static os_rwlock_t lock;
/*
* mapping_min - return min boundary for mapping
*/
static size_t
mapping_min(void *map)
{
return (size_t)pmem2_map_get_address(map);
}
/*
* mapping_max - return max boundary for mapping
*/
static size_t
mapping_max(void *map)
{
return (size_t)pmem2_map_get_address(map) +
pmem2_map_get_size(map);
}
/*
* pmem2_map_init -- initialize the map module
*/
void
pmem2_map_init(void)
{
os_rwlock_init(&lock);
util_rwlock_wrlock(&lock);
ri = ravl_interval_new(mapping_min, mapping_max);
util_rwlock_unlock(&lock);
if (!ri)
abort();
}
/*
* pmem2_map_fini -- finalize the map module
*/
void
pmem2_map_fini(void)
{
util_rwlock_wrlock(&lock);
ravl_interval_delete(ri);
util_rwlock_unlock(&lock);
os_rwlock_destroy(&lock);
}
/*
* pmem2_register_mapping -- register mapping in the mappings tree
*/
int
pmem2_register_mapping(struct pmem2_map *map)
{
util_rwlock_wrlock(&lock);
int ret = ravl_interval_insert(ri, map);
util_rwlock_unlock(&lock);
return ret;
}
/*
* pmem2_unregister_mapping -- unregister mapping from the mappings tree
*/
int
pmem2_unregister_mapping(struct pmem2_map *map)
{
int ret = 0;
struct ravl_interval_node *node;
util_rwlock_wrlock(&lock);
node = ravl_interval_find_equal(ri, map);
if (node)
ret = ravl_interval_remove(ri, node);
else
ret = PMEM2_E_MAPPING_NOT_FOUND;
util_rwlock_unlock(&lock);
return ret;
}
/*
* pmem2_map_find -- find the earliest mapping overlapping with
* (addr, addr+size) range
*/
struct pmem2_map *
pmem2_map_find(const void *addr, size_t len)
{
struct pmem2_map map;
map.addr = (void *)addr;
map.reserved_length = len;
struct ravl_interval_node *node;
util_rwlock_rdlock(&lock);
node = ravl_interval_find(ri, &map);
util_rwlock_unlock(&lock);
if (!node)
return NULL;
return (struct pmem2_map *)ravl_interval_data(node);
}
| 4,629 | 18.535865 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/pmem2_utils.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* pmem2_utils.h -- libpmem2 utilities functions
*/
#ifndef PMEM2_UTILS_H
#define PMEM2_UTILS_H 1
#include <errno.h>
#include "os.h"
#include "out.h"
#include "source.h"
static inline int
pmem2_assert_errno(void)
{
if (!errno) {
ERR("errno is not set");
ASSERTinfo(0, "errno is not set");
return -EINVAL;
}
return -errno;
}
#define PMEM2_E_ERRNO (pmem2_assert_errno())
void *pmem2_malloc(size_t size, int *err);
void *pmem2_zalloc(size_t size, int *err);
void *pmem2_realloc(void *ptr, size_t size, int *err);
#ifdef _WIN32
int pmem2_lasterror_to_err();
#endif
int pmem2_get_type_from_stat(const os_stat_t *st, enum pmem2_file_type *type);
int pmem2_device_dax_size(const struct pmem2_source *src, size_t *size);
int pmem2_device_dax_alignment(const struct pmem2_source *src,
size_t *alignment);
#endif /* PMEM2_UTILS_H */
| 935 | 19.8 | 78 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/extent_none.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* extent_none.c - fake implementation of the FS extent query API
*/
#include "libpmem2.h"
#include "out.h"
#include "extent.h"
/*
* pmem2_extents_create_get -- allocate extents structure and get extents
* of the given file
*/
int
pmem2_extents_create_get(int fd, struct extents **exts)
{
LOG(3, "fd %i extents %p", fd, exts);
return PMEM2_E_NOSUPP;
}
/*
* pmem2_extents_destroy -- free extents structure
*/
void
pmem2_extents_destroy(struct extents **exts)
{
LOG(3, "extents %p", exts);
}
| 621 | 18.4375 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/auto_flush_windows.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* auto_flush_windows.c -- Windows auto flush detection
*/
#include <windows.h>
#include <inttypes.h>
#include "alloc.h"
#include "out.h"
#include "os.h"
#include "endian.h"
#include "auto_flush_windows.h"
/*
* is_nfit_available -- (internal) check if platform supports NFIT table.
*/
static int
is_nfit_available()
{
LOG(3, "is_nfit_available()");
DWORD signatures_size;
char *signatures = NULL;
int is_nfit = 0;
DWORD offset = 0;
signatures_size = EnumSystemFirmwareTables(ACPI_SIGNATURE, NULL, 0);
if (signatures_size == 0) {
ERR("!EnumSystemFirmwareTables");
return -1;
}
signatures = (char *)Malloc(signatures_size + 1);
if (signatures == NULL) {
ERR("!malloc");
return -1;
}
int ret = EnumSystemFirmwareTables(ACPI_SIGNATURE,
signatures, signatures_size);
signatures[signatures_size] = '\0';
if (ret != signatures_size) {
ERR("!EnumSystemFirmwareTables");
goto err;
}
while (offset <= signatures_size) {
int nfit_sig = strncmp(signatures + offset,
NFIT_STR_SIGNATURE, NFIT_SIGNATURE_LEN);
if (nfit_sig == 0) {
is_nfit = 1;
break;
}
offset += NFIT_SIGNATURE_LEN;
}
Free(signatures);
return is_nfit;
err:
Free(signatures);
return -1;
}
/*
* is_auto_flush_cap_set -- (internal) check if specific
* capabilities bits are set.
*
* ACPI 6.2A Specification:
* Bit[0] - CPU Cache Flush to NVDIMM Durability on
* Power Loss Capable. If set to 1, indicates that platform
* ensures the entire CPU store data path is flushed to
* persistent memory on system power loss.
* Bit[1] - Memory Controller Flush to NVDIMM Durability on Power Loss Capable.
* If set to 1, indicates that platform provides mechanisms to automatically
* flush outstanding write data from the memory controller to persistent memory
* in the event of platform power loss. Note: If bit 0 is set to 1 then this bit
* shall be set to 1 as well.
*/
static int
is_auto_flush_cap_set(uint32_t capabilities)
{
LOG(3, "is_auto_flush_cap_set capabilities 0x%" PRIx32, capabilities);
int CPU_cache_flush = CHECK_BIT(capabilities, 0);
int memory_controller_flush = CHECK_BIT(capabilities, 1);
LOG(15, "CPU_cache_flush %d, memory_controller_flush %d",
CPU_cache_flush, memory_controller_flush);
if (memory_controller_flush == 1 && CPU_cache_flush == 1)
return 1;
return 0;
}
/*
* parse_nfit_buffer -- (internal) parse nfit buffer
* if platform_capabilities struct is available return pcs structure.
*/
static struct platform_capabilities
parse_nfit_buffer(const unsigned char *nfit_buffer, unsigned long buffer_size)
{
LOG(3, "parse_nfit_buffer nfit_buffer %s, buffer_size %lu",
nfit_buffer, buffer_size);
uint16_t type;
uint16_t length;
size_t offset = sizeof(struct nfit_header);
struct platform_capabilities pcs = {0};
while (offset < buffer_size) {
type = *(nfit_buffer + offset);
length = *(nfit_buffer + offset + 2);
if (type == PCS_TYPE_NUMBER) {
if (length == sizeof(struct platform_capabilities)) {
memmove(&pcs, nfit_buffer + offset, length);
return pcs;
}
}
offset += length;
}
return pcs;
}
/*
* pmem2_auto_flush -- check if platform supports auto flush.
*/
int
pmem2_auto_flush(void)
{
LOG(3, NULL);
DWORD nfit_buffer_size = 0;
DWORD nfit_written = 0;
PVOID nfit_buffer = NULL;
struct nfit_header *nfit_data;
struct platform_capabilities *pc = NULL;
int eADR = 0;
int is_nfit = is_nfit_available();
if (is_nfit == 0) {
LOG(15, "ACPI NFIT table not available");
return 0;
}
if (is_nfit < 0 || is_nfit != 1) {
LOG(1, "!is_nfit_available");
return -1;
}
/* get the entire nfit size */
nfit_buffer_size = GetSystemFirmwareTable(
(DWORD)ACPI_SIGNATURE, (DWORD)NFIT_REV_SIGNATURE, NULL, 0);
if (nfit_buffer_size == 0) {
ERR("!GetSystemFirmwareTable");
return -1;
}
/* reserve buffer */
nfit_buffer = (unsigned char *)Malloc(nfit_buffer_size);
if (nfit_buffer == NULL) {
ERR("!malloc");
goto err;
}
/* write actual nfit to buffer */
nfit_written = GetSystemFirmwareTable(
(DWORD)ACPI_SIGNATURE, (DWORD)NFIT_REV_SIGNATURE,
nfit_buffer, nfit_buffer_size);
if (nfit_written == 0) {
ERR("!GetSystemFirmwareTable");
goto err;
}
if (nfit_buffer_size != nfit_written) {
errno = ERROR_INVALID_DATA;
ERR("!GetSystemFirmwareTable invalid data");
goto err;
}
nfit_data = (struct nfit_header *)nfit_buffer;
int nfit_sig = strncmp(nfit_data->signature,
NFIT_STR_SIGNATURE, NFIT_SIGNATURE_LEN);
if (nfit_sig != 0) {
ERR("!NFIT buffer has invalid data");
goto err;
}
struct platform_capabilities pcs = parse_nfit_buffer(
nfit_buffer, nfit_buffer_size);
eADR = is_auto_flush_cap_set(pcs.capabilities);
Free(nfit_buffer);
return eADR;
err:
Free(nfit_buffer);
return -1;
}
| 4,857 | 23.535354 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/badblocks_ndctl.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* badblocks_ndctl.c -- implementation of DIMMs API based on the ndctl library
*/
#define _GNU_SOURCE
#include <sys/types.h>
#include <libgen.h>
#include <limits.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/sysmacros.h>
#include <fcntl.h>
#include <ndctl/libndctl.h>
#include <ndctl/libdaxctl.h>
#include "libpmem2.h"
#include "pmem2_utils.h"
#include "source.h"
#include "region_namespace_ndctl.h"
#include "file.h"
#include "out.h"
#include "badblocks.h"
#include "set_badblocks.h"
#include "extent.h"
typedef int pmem2_badblock_next_type(
struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb);
typedef void *pmem2_badblock_get_next_type(
struct pmem2_badblock_context *bbctx);
struct pmem2_badblock_context {
/* file descriptor */
int fd;
/* pmem2 file type */
enum pmem2_file_type file_type;
/* ndctl context */
struct ndctl_ctx *ctx;
/*
* Function pointer to:
* - pmem2_badblock_next_namespace() or
* - pmem2_badblock_next_region()
*/
pmem2_badblock_next_type *pmem2_badblock_next_func;
/*
* Function pointer to:
* - pmem2_namespace_get_first_badblock() or
* - pmem2_namespace_get_next_badblock() or
* - pmem2_region_get_first_badblock() or
* - pmem2_region_get_next_badblock()
*/
pmem2_badblock_get_next_type *pmem2_badblock_get_next_func;
/* needed only by the ndctl namespace badblock iterator */
struct ndctl_namespace *ndns;
/* needed only by the ndctl region badblock iterator */
struct {
struct ndctl_bus *bus;
struct ndctl_region *region;
unsigned long long ns_res; /* address of the namespace */
unsigned long long ns_beg; /* the begining of the namespace */
unsigned long long ns_end; /* the end of the namespace */
} rgn;
/* file's extents */
struct extents *exts;
unsigned first_extent;
struct pmem2_badblock last_bb;
};
/* forward declarations */
static int pmem2_badblock_next_namespace(
struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb);
static int pmem2_badblock_next_region(
struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb);
static void *pmem2_namespace_get_first_badblock(
struct pmem2_badblock_context *bbctx);
static void *pmem2_region_get_first_badblock(
struct pmem2_badblock_context *bbctx);
/*
* badblocks_get_namespace_bounds -- (internal) returns the bounds
* (offset and size) of the given namespace
* relative to the beginning of its region
*/
static int
badblocks_get_namespace_bounds(struct ndctl_region *region,
struct ndctl_namespace *ndns,
unsigned long long *ns_offset,
unsigned long long *ns_size)
{
LOG(3, "region %p namespace %p ns_offset %p ns_size %p",
region, ndns, ns_offset, ns_size);
struct ndctl_pfn *pfn = ndctl_namespace_get_pfn(ndns);
struct ndctl_dax *dax = ndctl_namespace_get_dax(ndns);
ASSERTne(ns_offset, NULL);
ASSERTne(ns_size, NULL);
if (pfn) {
*ns_offset = ndctl_pfn_get_resource(pfn);
if (*ns_offset == ULLONG_MAX) {
ERR("(pfn) cannot read offset of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
*ns_size = ndctl_pfn_get_size(pfn);
if (*ns_size == ULLONG_MAX) {
ERR("(pfn) cannot read size of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
LOG(10, "(pfn) ns_offset 0x%llx ns_size %llu",
*ns_offset, *ns_size);
} else if (dax) {
*ns_offset = ndctl_dax_get_resource(dax);
if (*ns_offset == ULLONG_MAX) {
ERR("(dax) cannot read offset of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
*ns_size = ndctl_dax_get_size(dax);
if (*ns_size == ULLONG_MAX) {
ERR("(dax) cannot read size of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
LOG(10, "(dax) ns_offset 0x%llx ns_size %llu",
*ns_offset, *ns_size);
} else { /* raw or btt */
*ns_offset = ndctl_namespace_get_resource(ndns);
if (*ns_offset == ULLONG_MAX) {
ERR("(raw/btt) cannot read offset of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
*ns_size = ndctl_namespace_get_size(ndns);
if (*ns_size == ULLONG_MAX) {
ERR("(raw/btt) cannot read size of the namespace");
return PMEM2_E_CANNOT_READ_BOUNDS;
}
LOG(10, "(raw/btt) ns_offset 0x%llx ns_size %llu",
*ns_offset, *ns_size);
}
unsigned long long region_offset = ndctl_region_get_resource(region);
if (region_offset == ULLONG_MAX) {
ERR("!cannot read offset of the region");
return PMEM2_E_ERRNO;
}
LOG(10, "region_offset 0x%llx", region_offset);
*ns_offset -= region_offset;
return 0;
}
/*
* badblocks_devdax_clear_one_badblock -- (internal) clear one bad block
* in the dax device
*/
static int
badblocks_devdax_clear_one_badblock(struct ndctl_bus *bus,
unsigned long long address,
unsigned long long length)
{
LOG(3, "bus %p address 0x%llx length %llu (bytes)",
bus, address, length);
int ret;
struct ndctl_cmd *cmd_ars_cap = ndctl_bus_cmd_new_ars_cap(bus,
address, length);
if (cmd_ars_cap == NULL) {
ERR("ndctl_bus_cmd_new_ars_cap() failed (bus '%s')",
ndctl_bus_get_provider(bus));
return PMEM2_E_ERRNO;
}
ret = ndctl_cmd_submit(cmd_ars_cap);
if (ret) {
ERR("ndctl_cmd_submit() failed (bus '%s')",
ndctl_bus_get_provider(bus));
/* ndctl_cmd_submit() returns -errno */
goto out_ars_cap;
}
struct ndctl_range range;
ret = ndctl_cmd_ars_cap_get_range(cmd_ars_cap, &range);
if (ret) {
ERR("ndctl_cmd_ars_cap_get_range() failed");
/* ndctl_cmd_ars_cap_get_range() returns -errno */
goto out_ars_cap;
}
struct ndctl_cmd *cmd_clear_error = ndctl_bus_cmd_new_clear_error(
range.address, range.length, cmd_ars_cap);
ret = ndctl_cmd_submit(cmd_clear_error);
if (ret) {
ERR("ndctl_cmd_submit() failed (bus '%s')",
ndctl_bus_get_provider(bus));
/* ndctl_cmd_submit() returns -errno */
goto out_clear_error;
}
size_t cleared = ndctl_cmd_clear_error_get_cleared(cmd_clear_error);
LOG(4, "cleared %zu out of %llu bad blocks", cleared, length);
ASSERT(cleared <= length);
if (cleared < length) {
ERR("failed to clear %llu out of %llu bad blocks",
length - cleared, length);
errno = ENXIO; /* ndctl handles such error in this way */
ret = PMEM2_E_ERRNO;
} else {
ret = 0;
}
out_clear_error:
ndctl_cmd_unref(cmd_clear_error);
out_ars_cap:
ndctl_cmd_unref(cmd_ars_cap);
return ret;
}
/*
* pmem2_badblock_context_new -- allocate and create a new bad block context
*/
int
pmem2_badblock_context_new(const struct pmem2_source *src,
struct pmem2_badblock_context **bbctx)
{
LOG(3, "src %p bbctx %p", src, bbctx);
ASSERTne(bbctx, NULL);
if (src->type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not support bad blocks");
return PMEM2_E_NOSUPP;
}
ASSERTeq(src->type, PMEM2_SOURCE_FD);
struct ndctl_ctx *ctx;
struct ndctl_region *region;
struct ndctl_namespace *ndns;
struct pmem2_badblock_context *tbbctx = NULL;
enum pmem2_file_type pmem2_type;
int ret = PMEM2_E_UNKNOWN;
*bbctx = NULL;
errno = ndctl_new(&ctx) * (-1);
if (errno) {
ERR("!ndctl_new");
return PMEM2_E_ERRNO;
}
pmem2_type = src->value.ftype;
ret = pmem2_region_namespace(ctx, src, ®ion, &ndns);
if (ret) {
LOG(1, "getting region and namespace failed");
goto exit_ndctl_unref;
}
tbbctx = pmem2_zalloc(sizeof(struct pmem2_badblock_context), &ret);
if (ret)
goto exit_ndctl_unref;
tbbctx->fd = src->value.fd;
tbbctx->file_type = pmem2_type;
tbbctx->ctx = ctx;
if (region == NULL || ndns == NULL) {
/* did not found any matching device */
*bbctx = tbbctx;
return 0;
}
if (ndctl_namespace_get_mode(ndns) == NDCTL_NS_MODE_FSDAX) {
tbbctx->ndns = ndns;
tbbctx->pmem2_badblock_next_func =
pmem2_badblock_next_namespace;
tbbctx->pmem2_badblock_get_next_func =
pmem2_namespace_get_first_badblock;
} else {
unsigned long long ns_beg, ns_size, ns_end;
ret = badblocks_get_namespace_bounds(
region, ndns,
&ns_beg, &ns_size);
if (ret) {
LOG(1, "cannot read namespace's bounds");
goto error_free_all;
}
ns_end = ns_beg + ns_size - 1;
LOG(10,
"namespace: begin %llu, end %llu size %llu (in 512B sectors)",
B2SEC(ns_beg), B2SEC(ns_end + 1) - 1, B2SEC(ns_size));
tbbctx->rgn.bus = ndctl_region_get_bus(region);
tbbctx->rgn.region = region;
tbbctx->rgn.ns_beg = ns_beg;
tbbctx->rgn.ns_end = ns_end;
tbbctx->rgn.ns_res = ns_beg + ndctl_region_get_resource(region);
tbbctx->pmem2_badblock_next_func =
pmem2_badblock_next_region;
tbbctx->pmem2_badblock_get_next_func =
pmem2_region_get_first_badblock;
}
if (pmem2_type == PMEM2_FTYPE_REG) {
/* only regular files have extents */
ret = pmem2_extents_create_get(src->value.fd, &tbbctx->exts);
if (ret) {
LOG(1, "getting extents of fd %i failed",
src->value.fd);
goto error_free_all;
}
}
/* set the context */
*bbctx = tbbctx;
return 0;
error_free_all:
pmem2_extents_destroy(&tbbctx->exts);
Free(tbbctx);
exit_ndctl_unref:
ndctl_unref(ctx);
return ret;
}
/*
* pmem2_badblock_context_delete -- delete and free the bad block context
*/
void
pmem2_badblock_context_delete(struct pmem2_badblock_context **bbctx)
{
LOG(3, "bbctx %p", bbctx);
ASSERTne(bbctx, NULL);
if (*bbctx == NULL)
return;
struct pmem2_badblock_context *tbbctx = *bbctx;
pmem2_extents_destroy(&tbbctx->exts);
ndctl_unref(tbbctx->ctx);
Free(tbbctx);
*bbctx = NULL;
}
/*
* pmem2_namespace_get_next_badblock -- (internal) wrapper for
* ndctl_namespace_get_next_badblock
*/
static void *
pmem2_namespace_get_next_badblock(struct pmem2_badblock_context *bbctx)
{
LOG(3, "bbctx %p", bbctx);
return ndctl_namespace_get_next_badblock(bbctx->ndns);
}
/*
* pmem2_namespace_get_first_badblock -- (internal) wrapper for
* ndctl_namespace_get_first_badblock
*/
static void *
pmem2_namespace_get_first_badblock(struct pmem2_badblock_context *bbctx)
{
LOG(3, "bbctx %p", bbctx);
bbctx->pmem2_badblock_get_next_func = pmem2_namespace_get_next_badblock;
return ndctl_namespace_get_first_badblock(bbctx->ndns);
}
/*
* pmem2_region_get_next_badblock -- (internal) wrapper for
* ndctl_region_get_next_badblock
*/
static void *
pmem2_region_get_next_badblock(struct pmem2_badblock_context *bbctx)
{
LOG(3, "bbctx %p", bbctx);
return ndctl_region_get_next_badblock(bbctx->rgn.region);
}
/*
* pmem2_region_get_first_badblock -- (internal) wrapper for
* ndctl_region_get_first_badblock
*/
static void *
pmem2_region_get_first_badblock(struct pmem2_badblock_context *bbctx)
{
LOG(3, "bbctx %p", bbctx);
bbctx->pmem2_badblock_get_next_func = pmem2_region_get_next_badblock;
return ndctl_region_get_first_badblock(bbctx->rgn.region);
}
/*
* pmem2_badblock_next_namespace -- (internal) version of pmem2_badblock_next()
* called for ndctl with namespace badblock
* iterator
*
* This function works only for fsdax, but does not require any special
* permissions.
*/
static int
pmem2_badblock_next_namespace(struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p bb %p", bbctx, bb);
ASSERTne(bbctx, NULL);
ASSERTne(bb, NULL);
struct badblock *bbn;
bbn = bbctx->pmem2_badblock_get_next_func(bbctx);
if (bbn == NULL)
return PMEM2_E_NO_BAD_BLOCK_FOUND;
/*
* libndctl returns offset and length of a bad block
* both expressed in 512B sectors. Offset is relative
* to the beginning of the namespace.
*/
bb->offset = SEC2B(bbn->offset);
bb->length = SEC2B(bbn->len);
return 0;
}
/*
* pmem2_badblock_next_region -- (internal) version of pmem2_badblock_next()
* called for ndctl with region badblock iterator
*
* This function works for all types of namespaces, but requires read access to
* privileged device information.
*/
static int
pmem2_badblock_next_region(struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p bb %p", bbctx, bb);
ASSERTne(bbctx, NULL);
ASSERTne(bb, NULL);
unsigned long long bb_beg, bb_end;
unsigned long long beg, end;
struct badblock *bbn;
unsigned long long ns_beg = bbctx->rgn.ns_beg;
unsigned long long ns_end = bbctx->rgn.ns_end;
do {
bbn = bbctx->pmem2_badblock_get_next_func(bbctx);
if (bbn == NULL)
return PMEM2_E_NO_BAD_BLOCK_FOUND;
LOG(10,
"region bad block: begin %llu end %llu length %u (in 512B sectors)",
bbn->offset, bbn->offset + bbn->len - 1, bbn->len);
/*
* libndctl returns offset and length of a bad block
* both expressed in 512B sectors. Offset is relative
* to the beginning of the region.
*/
bb_beg = SEC2B(bbn->offset);
bb_end = bb_beg + SEC2B(bbn->len) - 1;
} while (bb_beg > ns_end || ns_beg > bb_end);
beg = (bb_beg > ns_beg) ? bb_beg : ns_beg;
end = (bb_end < ns_end) ? bb_end : ns_end;
/*
* Form a new bad block structure with offset and length
* expressed in bytes and offset relative to the beginning
* of the namespace.
*/
bb->offset = beg - ns_beg;
bb->length = end - beg + 1;
LOG(4,
"namespace bad block: begin %llu end %llu length %llu (in 512B sectors)",
B2SEC(beg - ns_beg), B2SEC(end - ns_beg), B2SEC(end - beg) + 1);
return 0;
}
/*
* pmem2_badblock_next -- get the next bad block
*/
int
pmem2_badblock_next(struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p bb %p", bbctx, bb);
ASSERTne(bbctx, NULL);
ASSERTne(bb, NULL);
struct pmem2_badblock bbn;
unsigned long long bb_beg;
unsigned long long bb_end;
unsigned long long bb_len;
unsigned long long bb_off;
unsigned long long ext_beg;
unsigned long long ext_end;
unsigned e;
int ret;
if (bbctx->rgn.region == NULL && bbctx->ndns == NULL) {
/* did not found any matching device */
return PMEM2_E_NO_BAD_BLOCK_FOUND;
}
struct extents *exts = bbctx->exts;
/* DAX devices have no extents */
if (!exts) {
ret = bbctx->pmem2_badblock_next_func(bbctx, &bbn);
*bb = bbn;
return ret;
}
/*
* There is at least one extent.
* Loop until:
* 1) a bad block overlaps with an extent or
* 2) there are no more bad blocks.
*/
int bb_overlaps_with_extent = 0;
do {
if (bbctx->last_bb.length) {
/*
* We have saved the last bad block to check it
* with the next extent saved
* in bbctx->first_extent.
*/
ASSERTne(bbctx->first_extent, 0);
bbn = bbctx->last_bb;
bbctx->last_bb.offset = 0;
bbctx->last_bb.length = 0;
} else {
ASSERTeq(bbctx->first_extent, 0);
/* look for the next bad block */
ret = bbctx->pmem2_badblock_next_func(bbctx, &bbn);
if (ret)
return ret;
}
bb_beg = bbn.offset;
bb_end = bb_beg + bbn.length - 1;
for (e = bbctx->first_extent;
e < exts->extents_count;
e++) {
ext_beg = exts->extents[e].offset_physical;
ext_end = ext_beg + exts->extents[e].length - 1;
/* check if the bad block overlaps with the extent */
if (bb_beg <= ext_end && ext_beg <= bb_end) {
/* bad block overlaps with the extent */
bb_overlaps_with_extent = 1;
if (bb_end > ext_end &&
e + 1 < exts->extents_count) {
/*
* The bad block is longer than
* the extent and there are
* more extents.
* Save the current bad block
* to check it with the next extent.
*/
bbctx->first_extent = e + 1;
bbctx->last_bb = bbn;
} else {
/*
* All extents were checked
* with the current bad block.
*/
bbctx->first_extent = 0;
bbctx->last_bb.length = 0;
bbctx->last_bb.offset = 0;
}
break;
}
}
/* check all extents with the next bad block */
if (bb_overlaps_with_extent == 0) {
bbctx->first_extent = 0;
bbctx->last_bb.length = 0;
bbctx->last_bb.offset = 0;
}
} while (bb_overlaps_with_extent == 0);
/* bad block overlaps with an extent */
bb_beg = (bb_beg > ext_beg) ? bb_beg : ext_beg;
bb_end = (bb_end < ext_end) ? bb_end : ext_end;
bb_len = bb_end - bb_beg + 1;
bb_off = bb_beg + exts->extents[e].offset_logical
- exts->extents[e].offset_physical;
LOG(10, "bad block found: physical offset: %llu, length: %llu",
bb_beg, bb_len);
/* make sure the offset is block-aligned */
unsigned long long not_block_aligned = bb_off & (exts->blksize - 1);
if (not_block_aligned) {
bb_off -= not_block_aligned;
bb_len += not_block_aligned;
}
/* make sure the length is block-aligned */
bb_len = ALIGN_UP(bb_len, exts->blksize);
LOG(4, "bad block found: logical offset: %llu, length: %llu",
bb_off, bb_len);
/*
* Return the bad block with offset and length
* expressed in bytes and offset relative
* to the beginning of the file.
*/
bb->offset = bb_off;
bb->length = bb_len;
return 0;
}
/*
* pmem2_badblock_clear_fsdax -- (internal) clear one bad block
* in a FSDAX device
*/
static int
pmem2_badblock_clear_fsdax(int fd, const struct pmem2_badblock *bb)
{
LOG(3, "fd %i badblock %p", fd, bb);
ASSERTne(bb, NULL);
LOG(10,
"clearing a bad block: fd %i logical offset %zu length %zu (in 512B sectors)",
fd, B2SEC(bb->offset), B2SEC(bb->length));
/* fallocate() takes offset as the off_t type */
if (bb->offset > (size_t)INT64_MAX) {
ERR("bad block's offset is greater than INT64_MAX");
return PMEM2_E_OFFSET_OUT_OF_RANGE;
}
/* fallocate() takes length as the off_t type */
if (bb->length > (size_t)INT64_MAX) {
ERR("bad block's length is greater than INT64_MAX");
return PMEM2_E_LENGTH_OUT_OF_RANGE;
}
off_t offset = (off_t)bb->offset;
off_t length = (off_t)bb->length;
/* deallocate bad blocks */
if (fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
offset, length)) {
ERR("!fallocate");
return PMEM2_E_ERRNO;
}
/* allocate new blocks */
if (fallocate(fd, FALLOC_FL_KEEP_SIZE, offset, length)) {
ERR("!fallocate");
return PMEM2_E_ERRNO;
}
return 0;
}
/*
* pmem2_badblock_clear_devdax -- (internal) clear one bad block
* in a DAX device
*/
static int
pmem2_badblock_clear_devdax(const struct pmem2_badblock_context *bbctx,
const struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p bb %p", bbctx, bb);
ASSERTne(bb, NULL);
ASSERTne(bbctx, NULL);
ASSERTne(bbctx->rgn.bus, NULL);
ASSERTne(bbctx->rgn.ns_res, 0);
LOG(4,
"clearing a bad block: offset %zu length %zu (in 512B sectors)",
B2SEC(bb->offset), B2SEC(bb->length));
int ret = badblocks_devdax_clear_one_badblock(bbctx->rgn.bus,
bb->offset + bbctx->rgn.ns_res,
bb->length);
if (ret) {
LOG(1,
"failed to clear a bad block: offset %zu length %zu (in 512B sectors)",
B2SEC(bb->offset),
B2SEC(bb->length));
return ret;
}
return 0;
}
/*
* pmem2_badblock_clear -- clear one bad block
*/
int
pmem2_badblock_clear(struct pmem2_badblock_context *bbctx,
const struct pmem2_badblock *bb)
{
LOG(3, "bbctx %p badblock %p", bbctx, bb);
ASSERTne(bbctx, NULL);
ASSERTne(bb, NULL);
if (bbctx->file_type == PMEM2_FTYPE_DEVDAX)
return pmem2_badblock_clear_devdax(bbctx, bb);
ASSERTeq(bbctx->file_type, PMEM2_FTYPE_REG);
return pmem2_badblock_clear_fsdax(bbctx->fd, bb);
}
| 19,316 | 24.218016 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/usc_ndctl.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* usc_ndctl.c -- pmem2 usc function for platforms using ndctl
*/
#include <ndctl/libndctl.h>
#include <ndctl/libdaxctl.h>
#include <sys/types.h>
#include <sys/sysmacros.h>
#include <fcntl.h>
#include "config.h"
#include "file.h"
#include "libpmem2.h"
#include "os.h"
#include "out.h"
#include "pmem2_utils.h"
#include "source.h"
#include "region_namespace_ndctl.h"
int
pmem2_source_device_usc(const struct pmem2_source *src, uint64_t *usc)
{
LOG(3, "type %d, uid %p", src->type, usc);
if (src->type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not support unsafe shutdown count");
return PMEM2_E_NOSUPP;
}
ASSERTeq(src->type, PMEM2_SOURCE_FD);
struct ndctl_ctx *ctx;
int ret = PMEM2_E_NOSUPP;
*usc = 0;
errno = ndctl_new(&ctx) * (-1);
if (errno) {
ERR("!ndctl_new");
return PMEM2_E_ERRNO;
}
struct ndctl_region *region = NULL;
ret = pmem2_region_namespace(ctx, src, ®ion, NULL);
if (ret < 0)
goto err;
ret = PMEM2_E_NOSUPP;
if (region == NULL) {
ERR(
"Unsafe shutdown count is not supported for this source");
goto err;
}
struct ndctl_dimm *dimm;
ndctl_dimm_foreach_in_region(region, dimm) {
long long dimm_usc = ndctl_dimm_get_dirty_shutdown(dimm);
if (dimm_usc < 0) {
ret = PMEM2_E_NOSUPP;
ERR(
"Unsafe shutdown count is not supported for this source");
goto err;
}
*usc += (unsigned long long)dimm_usc;
}
ret = 0;
err:
ndctl_unref(ctx);
return ret;
}
int
pmem2_source_device_id(const struct pmem2_source *src, char *id, size_t *len)
{
struct ndctl_ctx *ctx;
struct ndctl_dimm *dimm;
int ret;
struct ndctl_region *region = NULL;
const char *dimm_uid;
if (src->type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not have device id");
return PMEM2_E_NOSUPP;
}
ASSERTeq(src->type, PMEM2_SOURCE_FD);
errno = ndctl_new(&ctx) * (-1);
if (errno) {
ERR("!ndctl_new");
return PMEM2_E_ERRNO;
}
size_t len_base = 1; /* '\0' */
ret = pmem2_region_namespace(ctx, src, ®ion, NULL);
if (ret < 0)
goto err;
if (region == NULL) {
ret = PMEM2_E_NOSUPP;
goto err;
}
if (id == NULL) {
ndctl_dimm_foreach_in_region(region, dimm) {
dimm_uid = ndctl_dimm_get_unique_id(dimm);
if (dimm_uid == NULL) {
ret = PMEM2_E_NOSUPP;
goto err;
}
len_base += strlen(ndctl_dimm_get_unique_id(dimm));
}
goto end;
}
size_t count = 1;
ndctl_dimm_foreach_in_region(region, dimm) {
dimm_uid = ndctl_dimm_get_unique_id(dimm);
if (dimm_uid == NULL) {
ret = PMEM2_E_NOSUPP;
goto err;
}
count += strlen(dimm_uid);
if (count > *len) {
ret = PMEM2_E_BUFFER_TOO_SMALL;
goto err;
}
strncat(id, dimm_uid, *len);
}
end:
ret = 0;
if (id == NULL)
*len = len_base;
err:
ndctl_unref(ctx);
return ret;
}
| 2,831 | 18.265306 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/auto_flush_none.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019, Intel Corporation */
#include "auto_flush.h"
#include "out.h"
/*
* pmem2_auto_flush -- check if platform supports auto flush for all regions
*/
int
pmem2_auto_flush(void)
{
LOG(15, NULL);
return 0;
}
| 267 | 14.764706 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/region_namespace_ndctl.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* region_namespace_ndctl.h -- internal definitions for libpmem2
* common ndctl functions
*/
#ifndef PMDK_REGION_NAMESPACE_NDCTL_H
#define PMDK_REGION_NAMESPACE_NDCTL_H 1
#include "os.h"
#ifdef __cplusplus
extern "C" {
#endif
#define FOREACH_BUS_REGION_NAMESPACE(ctx, bus, region, ndns) \
ndctl_bus_foreach(ctx, bus) \
ndctl_region_foreach(bus, region) \
ndctl_namespace_foreach(region, ndns)
int pmem2_region_namespace(struct ndctl_ctx *ctx,
const struct pmem2_source *src,
struct ndctl_region **pregion,
struct ndctl_namespace **pndns);
#ifdef __cplusplus
}
#endif
#endif /* PMDK_REGION_NAMESPACE_NDCTL_H */
| 754 | 21.878788 | 64 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/persist_windows.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* persist_windows.c -- Windows-specific part of persist implementation
*/
#include <stdlib.h>
#include <windows.h>
#include "out.h"
#include "persist.h"
#include "pmem2_utils.h"
/*
* pmem2_flush_file_buffers_os -- flush CPU and OS file caches for the given
* range
*/
int
pmem2_flush_file_buffers_os(struct pmem2_map *map, const void *addr, size_t len,
int autorestart)
{
ASSERTeq(map->source.type, PMEM2_SOURCE_HANDLE);
if (FlushViewOfFile(addr, len) == FALSE) {
ERR("!!FlushViewOfFile");
return pmem2_lasterror_to_err();
}
if (FlushFileBuffers(map->source.value.handle) == FALSE) {
ERR("!!FlushFileBuffers");
return pmem2_lasterror_to_err();
}
return 0;
}
| 775 | 19.972973 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/vm_reservation.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* vm_reservation.c -- implementation of virtual memory allocation API
*/
#include "libpmem2.h"
/*
* pmem2_vm_reservation_new -- creates new virtual memory reservation
*/
int
pmem2_vm_reservation_new(struct pmem2_vm_reservation **rsv,
size_t size, void *address)
{
return PMEM2_E_NOSUPP;
}
/*
* pmem2_vm_reservation_delete -- deletes reservation bound to
* structure pmem2_vm_reservation
*/
int
pmem2_vm_reservation_delete(struct pmem2_vm_reservation **rsv)
{
return PMEM2_E_NOSUPP;
}
| 614 | 20.206897 | 70 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/deep_flush_windows.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* deep_flush_windows.c -- deeep_flush functionality
*/
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include "deep_flush.h"
#include "libpmem2.h"
#include "out.h"
#include "pmem2_utils.h"
#include "persist.h"
/*
* pmem2_deep_flush_dax -- performs flush buffer operation
*/
int
pmem2_deep_flush_dax(struct pmem2_map *map, void *ptr, size_t size)
{
int ret = pmem2_flush_file_buffers_os(map, ptr, size, 0);
if (ret) {
LOG(1, "cannot flush buffers addr %p len %zu", ptr, size);
return ret;
}
return 0;
}
/*
* pmem2_deep_flush_write -- perform write to deep_flush file
* on given region_id (Device Dax only)
*/
int
pmem2_deep_flush_write(unsigned region_id)
{
const char *err =
"BUG: pmem2_deep_flush_write should never be called on this OS";
ERR("%s", err);
ASSERTinfo(0, err);
/* not supported */
return PMEM2_E_NOSUPP;
}
| 947 | 18.75 | 67 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/extent.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* extent.h -- fs extent query API
*/
#ifndef PMDK_EXTENT_H
#define PMDK_EXTENT_H 1
#include <stdint.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
struct extent {
uint64_t offset_physical;
uint64_t offset_logical;
uint64_t length;
};
struct extents {
uint64_t blksize;
uint32_t extents_count;
struct extent *extents;
};
int pmem2_extents_create_get(int fd, struct extents **exts);
void pmem2_extents_destroy(struct extents **exts);
#ifdef __cplusplus
}
#endif
#endif /* PMDK_EXTENT_H */
| 608 | 15.026316 | 60 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/usc_windows.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* usc_windows.c -- pmem2 usc function for windows
*/
#include "alloc.h"
#include "source.h"
#include "out.h"
#include "libpmem2.h"
#include "pmem2_utils.h"
#define GUID_SIZE sizeof("XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX")
#define VOLUME_PATH_SIZE sizeof("\\\\?\\Volume{}") + (GUID_SIZE - 2 /* \0 */)
/*
* get_volume_handle -- returns volume handle
*/
static int
get_volume_handle(HANDLE handle, HANDLE *volume_handle)
{
wchar_t *volume;
wchar_t tmp[10];
DWORD len =
GetFinalPathNameByHandleW(handle, tmp, 10, VOLUME_NAME_GUID);
if (len == 0) {
ERR("!!GetFinalPathNameByHandleW");
return pmem2_lasterror_to_err();
}
len *= sizeof(wchar_t);
int err;
volume = pmem2_malloc(len, &err);
if (volume == NULL)
return err;
if (!GetFinalPathNameByHandleW(handle, volume, len,
VOLUME_NAME_GUID)) {
Free(volume);
ERR("!!GetFinalPathNameByHandleW");
return pmem2_lasterror_to_err();
}
ASSERTeq(volume[VOLUME_PATH_SIZE], '\\');
volume[VOLUME_PATH_SIZE] = '\0';
*volume_handle = CreateFileW(volume, /* path to the file */
/* request access to send ioctl to the file */
FILE_READ_ATTRIBUTES,
/* do not block access to the file */
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
NULL, /* security attributes */
OPEN_EXISTING, /* open only if it exists */
FILE_ATTRIBUTE_NORMAL, /* no attributes */
NULL); /* used only for new files */
Free(volume);
if (*volume_handle == INVALID_HANDLE_VALUE) {
ERR("!!CreateFileW");
return pmem2_lasterror_to_err();
}
return 0;
}
static int
get_device_guid(HANDLE handle, GUID *guid)
{
HANDLE vHandle;
int ret = get_volume_handle(handle, &vHandle);
if (vHandle == INVALID_HANDLE_VALUE)
return ret;
STORAGE_DEVICE_NUMBER_EX sdn;
sdn.DeviceNumber = -1;
DWORD dwBytesReturned = 0;
if (!DeviceIoControl(vHandle,
IOCTL_STORAGE_GET_DEVICE_NUMBER_EX,
NULL, 0,
&sdn, sizeof(sdn),
&dwBytesReturned, NULL)) {
/*
* IOCTL_STORAGE_GET_DEVICE_NUMBER_EX is not supported
* on this server
*/
ERR(
"Getting device id (IOCTL_STORAGE_GET_DEVICE_NUMBER_EX) is not supported on this system");
CloseHandle(vHandle);
return PMEM2_E_NOSUPP;
}
*guid = sdn.DeviceGuid;
CloseHandle(vHandle);
return 0;
}
int
pmem2_source_device_idW(const struct pmem2_source *src, wchar_t *id,
size_t *len)
{
if (src->type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not have device id");
return PMEM2_E_NOSUPP;
}
ASSERTeq(src->type, PMEM2_SOURCE_HANDLE);
if (id == NULL) {
*len = GUID_SIZE * sizeof(*id);
return 0;
}
if (*len < GUID_SIZE * sizeof(*id)) {
ERR("id buffer is to small");
return PMEM2_E_BUFFER_TOO_SMALL;
}
GUID guid;
int ret = get_device_guid(src->value.handle, &guid);
if (ret)
return ret;
_snwprintf(id, GUID_SIZE,
L"%08lX-%04hX-%04hX-%02hhX%02hhX-%02hhX%02hhX%02hhX%02hhX%02hhX%02hhX",
guid.Data1, guid.Data2, guid.Data3, guid.Data4[0],
guid.Data4[1], guid.Data4[2], guid.Data4[3],
guid.Data4[4], guid.Data4[5], guid.Data4[6],
guid.Data4[7]);
return 0;
}
int
pmem2_source_device_idU(const struct pmem2_source *src, char *id, size_t *len)
{
if (src->type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not have device id");
return PMEM2_E_NOSUPP;
}
ASSERTeq(src->type, PMEM2_SOURCE_HANDLE);
if (id == NULL) {
*len = GUID_SIZE * sizeof(*id);
return 0;
}
if (*len < GUID_SIZE * sizeof(*id)) {
ERR("id buffer is to small");
return PMEM2_E_BUFFER_TOO_SMALL;
}
GUID guid;
int ret = get_device_guid(src->value.handle, &guid);
if (ret)
return ret;
if (util_snprintf(id, GUID_SIZE,
"%08lX-%04hX-%04hX-%02hhX%02hhX-%02hhX%02hhX%02hhX%02hhX%02hhX%02hhX",
guid.Data1, guid.Data2, guid.Data3, guid.Data4[0],
guid.Data4[1], guid.Data4[2], guid.Data4[3],
guid.Data4[4], guid.Data4[5], guid.Data4[6],
guid.Data4[7]) < 0) {
ERR("!snprintf");
return PMEM2_E_ERRNO;
}
return 0;
}
int
pmem2_source_device_usc(const struct pmem2_source *src, uint64_t *usc)
{
LOG(3, "cfg %p, usc %p", src, usc);
if (src->type == PMEM2_SOURCE_ANON) {
ERR("Anonymous source does not support unsafe shutdown count");
return PMEM2_E_NOSUPP;
}
ASSERTeq(src->type, PMEM2_SOURCE_HANDLE);
*usc = 0;
HANDLE vHandle;
int err = get_volume_handle(src->value.handle, &vHandle);
if (vHandle == INVALID_HANDLE_VALUE)
return err;
STORAGE_PROPERTY_QUERY prop;
DWORD dwSize;
prop.PropertyId = StorageDeviceUnsafeShutdownCount;
prop.QueryType = PropertyExistsQuery;
prop.AdditionalParameters[0] = 0;
STORAGE_DEVICE_UNSAFE_SHUTDOWN_COUNT ret;
BOOL bResult = DeviceIoControl(vHandle,
IOCTL_STORAGE_QUERY_PROPERTY,
&prop, sizeof(prop),
&ret, sizeof(ret),
(LPDWORD)&dwSize, (LPOVERLAPPED)NULL);
if (!bResult) {
ERR(
"Getting unsafe shutdown count is not supported on this system");
CloseHandle(vHandle);
return PMEM2_E_NOSUPP;
}
prop.QueryType = PropertyStandardQuery;
bResult = DeviceIoControl(vHandle,
IOCTL_STORAGE_QUERY_PROPERTY,
&prop, sizeof(prop),
&ret, sizeof(ret),
(LPDWORD)&dwSize, (LPOVERLAPPED)NULL);
CloseHandle(vHandle);
if (!bResult) {
ERR("!!DeviceIoControl");
return pmem2_lasterror_to_err();
}
*usc = ret.UnsafeShutdownCount;
return 0;
}
| 5,261 | 22.283186 | 93 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/ravl_interval.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ravl_interval.c -- ravl_interval implementation
*/
#include "alloc.h"
#include "map.h"
#include "ravl_interval.h"
#include "pmem2_utils.h"
#include "sys_util.h"
#include "os_thread.h"
#include "ravl.h"
/*
* ravl_interval - structure representing two points
* on the number line
*/
struct ravl_interval {
struct ravl *tree;
ravl_interval_min *get_min;
ravl_interval_max *get_max;
};
/*
* ravl_interval_node - structure holding min, max functions and address
*/
struct ravl_interval_node {
void *addr;
ravl_interval_min *get_min;
ravl_interval_max *get_max;
};
/*
* ravl_interval_compare -- compare intervals by its boundaries,
* no overlapping allowed
*/
static int
ravl_interval_compare(const void *lhs, const void *rhs)
{
const struct ravl_interval_node *left = lhs;
const struct ravl_interval_node *right = rhs;
if (left->get_min(left->addr) < right->get_min(right->addr) &&
left->get_max(left->addr) <= right->get_min(right->addr))
return -1;
if (left->get_min(left->addr) > right->get_min(right->addr) &&
left->get_max(left->addr) >= right->get_min(right->addr))
return 1;
return 0;
}
/*
* ravl_interval_delete - finalize the ravl interval module
*/
void
ravl_interval_delete(struct ravl_interval *ri)
{
ravl_delete(ri->tree);
ri->tree = NULL;
Free(ri);
}
/*
* ravl_interval_new -- initialize the ravl interval module
*/
struct ravl_interval *
ravl_interval_new(ravl_interval_min *get_min, ravl_interval_max *get_max)
{
int ret;
struct ravl_interval *interval = pmem2_malloc(sizeof(*interval), &ret);
if (ret)
goto ret_null;
interval->tree = ravl_new_sized(ravl_interval_compare,
sizeof(struct ravl_interval_node));
if (!(interval->tree))
goto free_alloc;
interval->get_min = get_min;
interval->get_max = get_max;
return interval;
free_alloc:
Free(interval);
ret_null:
return NULL;
}
/*
* ravl_interval_insert -- insert interval entry into the tree
*/
int
ravl_interval_insert(struct ravl_interval *ri, void *addr)
{
struct ravl_interval_node rin;
rin.addr = addr;
rin.get_min = ri->get_min;
rin.get_max = ri->get_max;
if (ravl_emplace_copy(ri->tree, &rin))
return PMEM2_E_ERRNO;
return 0;
}
/*
* ravl_interval_remove -- remove interval entry from the tree
*/
int
ravl_interval_remove(struct ravl_interval *ri, struct ravl_interval_node *rin)
{
struct ravl_node *node = ravl_find(ri->tree, rin,
RAVL_PREDICATE_EQUAL);
if (!node)
return PMEM2_E_MAPPING_NOT_FOUND;
ravl_remove(ri->tree, node);
return 0;
}
/*
* ravl_interval_find_prior_or_eq -- find overlapping interval starting prior to
* the current one or at the same place
*/
static struct ravl_interval_node *
ravl_interval_find_prior_or_eq(struct ravl *tree,
struct ravl_interval_node *rin)
{
struct ravl_node *node;
struct ravl_interval_node *cur;
node = ravl_find(tree, rin, RAVL_PREDICATE_LESS_EQUAL);
if (!node)
return NULL;
cur = ravl_data(node);
/*
* If the end of the found interval is below the searched boundary, then
* this is not our interval.
*/
if (cur->get_max(cur->addr) <= rin->get_min(rin->addr))
return NULL;
return cur;
}
/*
* ravl_interval_find_later -- find overlapping interval starting later than
* the current one
*/
static struct ravl_interval_node *
ravl_interval_find_later(struct ravl *tree, struct ravl_interval_node *rin)
{
struct ravl_node *node;
struct ravl_interval_node *cur;
node = ravl_find(tree, rin, RAVL_PREDICATE_GREATER);
if (!node)
return NULL;
cur = ravl_data(node);
/*
* If the beginning of the found interval is above the end of
* the searched range, then this is not our interval.
*/
if (cur->get_min(cur->addr) >= rin->get_max(rin->addr))
return NULL;
return cur;
}
/*
* ravl_interval_find_equal -- find the interval with exact (min, max) range
*/
struct ravl_interval_node *
ravl_interval_find_equal(struct ravl_interval *ri, void *addr)
{
struct ravl_interval_node range;
range.addr = addr;
range.get_min = ri->get_min;
range.get_max = ri->get_max;
struct ravl_node *node;
node = ravl_find(ri->tree, &range, RAVL_PREDICATE_EQUAL);
if (!node)
return NULL;
return ravl_data(node);
}
/*
* ravl_interval_find -- find the earliest interval within (min, max) range
*/
struct ravl_interval_node *
ravl_interval_find(struct ravl_interval *ri, void *addr)
{
struct ravl_interval_node range;
range.addr = addr;
range.get_min = ri->get_min;
range.get_max = ri->get_max;
struct ravl_interval_node *cur;
cur = ravl_interval_find_prior_or_eq(ri->tree, &range);
if (!cur)
cur = ravl_interval_find_later(ri->tree, &range);
return cur;
}
/*
* ravl_interval_data -- returns the data contained within interval node
*/
void *
ravl_interval_data(struct ravl_interval_node *rin)
{
return (void *)rin->addr;
}
| 4,963 | 21.26009 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/source_posix.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
#include <errno.h>
#include <fcntl.h>
#include "os.h"
#include "source.h"
#include "alloc.h"
#include "libpmem2.h"
#include "out.h"
#include "pmem2.h"
#include "pmem2_utils.h"
#include "util.h"
/*
* pmem2_source_from_fd -- create a new data source instance
*/
int
pmem2_source_from_fd(struct pmem2_source **src, int fd)
{
*src = NULL;
if (fd < 0)
return PMEM2_E_INVALID_FILE_HANDLE;
int flags = fcntl(fd, F_GETFL);
if (flags == -1) {
ERR("!fcntl");
if (errno == EBADF)
return PMEM2_E_INVALID_FILE_HANDLE;
return PMEM2_E_ERRNO;
}
if ((flags & O_ACCMODE) == O_WRONLY) {
ERR("fd must be open with O_RDONLY or O_RDWR");
return PMEM2_E_INVALID_FILE_HANDLE;
}
/*
* XXX Files with FS_APPEND_FL attribute should also generate an error.
* If it is possible to filter them out pmem2_map would not generate
* -EACCESS trying to map them. Please update pmem2_map.3 when it will
* be fixed. For details please see the ioctl_iflags(2) manual page.
*/
os_stat_t st;
if (os_fstat(fd, &st) < 0) {
ERR("!fstat");
if (errno == EBADF)
return PMEM2_E_INVALID_FILE_HANDLE;
return PMEM2_E_ERRNO;
}
enum pmem2_file_type ftype;
int ret = pmem2_get_type_from_stat(&st, &ftype);
if (ret != 0)
return ret;
if (ftype == PMEM2_FTYPE_DIR) {
ERR("cannot set fd to directory in pmem2_source_from_fd");
return PMEM2_E_INVALID_FILE_TYPE;
}
struct pmem2_source *srcp = pmem2_malloc(sizeof(**src), &ret);
if (ret)
return ret;
ASSERTne(srcp, NULL);
srcp->type = PMEM2_SOURCE_FD;
srcp->value.ftype = ftype;
srcp->value.fd = fd;
srcp->value.st_rdev = st.st_rdev;
srcp->value.st_dev = st.st_dev;
*src = srcp;
return 0;
}
/*
* pmem2_source_size -- get a size of the file descriptor stored in the provided
* source
*/
int
pmem2_source_size(const struct pmem2_source *src, size_t *size)
{
LOG(3, "type %d", src->type);
if (src->type == PMEM2_SOURCE_ANON) {
*size = src->value.size;
return 0;
}
ASSERT(src->type == PMEM2_SOURCE_FD);
os_stat_t st;
if (os_fstat(src->value.fd, &st) < 0) {
ERR("!fstat");
if (errno == EBADF)
return PMEM2_E_INVALID_FILE_HANDLE;
return PMEM2_E_ERRNO;
}
switch (src->value.ftype) {
case PMEM2_FTYPE_DEVDAX: {
int ret = pmem2_device_dax_size(src, size);
if (ret)
return ret;
break;
}
case PMEM2_FTYPE_REG:
if (st.st_size < 0) {
ERR(
"kernel says size of regular file is negative (%ld)",
st.st_size);
return PMEM2_E_INVALID_FILE_HANDLE;
}
*size = (size_t)st.st_size;
break;
default:
FATAL(
"BUG: unhandled file type in pmem2_source_size");
}
LOG(4, "file length %zu", *size);
return 0;
}
/*
* pmem2_source_alignment -- get alignment from the file descriptor stored in
* the provided source
*/
int
pmem2_source_alignment(const struct pmem2_source *src, size_t *alignment)
{
LOG(3, "type %d", src->type);
if (src->type == PMEM2_SOURCE_ANON) {
*alignment = Pagesize;
return 0;
}
ASSERT(src->type == PMEM2_SOURCE_FD);
switch (src->value.ftype) {
case PMEM2_FTYPE_DEVDAX: {
int ret = pmem2_device_dax_alignment(src, alignment);
if (ret)
return ret;
break;
}
case PMEM2_FTYPE_REG:
*alignment = Pagesize;
break;
default:
FATAL(
"BUG: unhandled file type in pmem2_source_alignment");
}
if (!util_is_pow2(*alignment)) {
ERR("alignment (%zu) has to be a power of two", *alignment);
return PMEM2_E_INVALID_ALIGNMENT_VALUE;
}
LOG(4, "alignment %zu", *alignment);
return 0;
}
| 3,539 | 19.581395 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/map_windows.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* map_windows.c -- pmem2_map (Windows)
*/
#include <stdbool.h>
#include "libpmem2.h"
#include "alloc.h"
#include "auto_flush.h"
#include "config.h"
#include "map.h"
#include "out.h"
#include "persist.h"
#include "pmem2_utils.h"
#include "source.h"
#include "util.h"
#define HIDWORD(x) ((DWORD)((x) >> 32))
#define LODWORD(x) ((DWORD)((x) & 0xFFFFFFFF))
/* requested CACHE_LINE, available PAGE */
#define REQ_CL_AVAIL_PG \
"requested granularity not available because specified volume is not a direct access (DAX) volume"
/* requested BYTE, available PAGE */
#define REQ_BY_AVAIL_PG REQ_CL_AVAIL_PG
/* requested BYTE, available CACHE_LINE */
#define REQ_BY_AVAIL_CL \
"requested granularity not available because the platform doesn't support eADR"
/* indicates the cases in which the error cannot occur */
#define GRAN_IMPOSSIBLE "impossible"
static const char *granularity_err_msg[3][3] = {
/* requested granularity / available granularity */
/* -------------------------------------------------------------------- */
/* BYTE CACHE_LINE PAGE */
/* -------------------------------------------------------------------- */
/* BYTE */ {GRAN_IMPOSSIBLE, REQ_BY_AVAIL_CL, REQ_BY_AVAIL_PG},
/* CL */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, REQ_CL_AVAIL_PG},
/* PAGE */ {GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE, GRAN_IMPOSSIBLE}};
/*
* create_mapping -- creates file mapping object for a file
*/
static HANDLE
create_mapping(HANDLE hfile, size_t offset, size_t length, DWORD protect,
unsigned long *err)
{
size_t max_size = length + offset;
SetLastError(0);
HANDLE mh = CreateFileMapping(hfile,
NULL, /* security attributes */
protect,
HIDWORD(max_size),
LODWORD(max_size),
NULL);
*err = GetLastError();
if (!mh) {
ERR("!!CreateFileMapping");
return NULL;
}
if (*err == ERROR_ALREADY_EXISTS) {
ERR("!!CreateFileMapping");
CloseHandle(mh);
return NULL;
}
/* if the handle is valid the last error is undefined */
*err = 0;
return mh;
}
/*
* is_direct_access -- check if the specified volume is a
* direct access (DAX) volume
*/
static int
is_direct_access(HANDLE fh)
{
DWORD filesystemFlags;
if (!GetVolumeInformationByHandleW(fh, NULL, 0, NULL,
NULL, &filesystemFlags, NULL, 0)) {
ERR("!!GetVolumeInformationByHandleW");
/* always return a negative value */
return pmem2_lasterror_to_err();
}
if (filesystemFlags & FILE_DAX_VOLUME)
return 1;
return 0;
}
/*
* pmem2_map -- map memory according to provided config
*/
int
pmem2_map(const struct pmem2_config *cfg, const struct pmem2_source *src,
struct pmem2_map **map_ptr)
{
LOG(3, "cfg %p src %p map_ptr %p", cfg, src, map_ptr);
int ret = 0;
unsigned long err = 0;
size_t file_size;
*map_ptr = NULL;
if ((int)cfg->requested_max_granularity == PMEM2_GRANULARITY_INVALID) {
ERR(
"please define the max granularity requested for the mapping");
return PMEM2_E_GRANULARITY_NOT_SET;
}
ret = pmem2_source_size(src, &file_size);
if (ret)
return ret;
size_t src_alignment;
ret = pmem2_source_alignment(src, &src_alignment);
if (ret)
return ret;
size_t length;
ret = pmem2_config_validate_length(cfg, file_size, src_alignment);
if (ret)
return ret;
size_t effective_offset;
ret = pmem2_validate_offset(cfg, &effective_offset, src_alignment);
if (ret)
return ret;
if (src->type == PMEM2_SOURCE_ANON)
effective_offset = 0;
/* without user-provided length, map to the end of the file */
if (cfg->length)
length = cfg->length;
else
length = file_size - effective_offset;
HANDLE map_handle = INVALID_HANDLE_VALUE;
if (src->type == PMEM2_SOURCE_HANDLE) {
map_handle = src->value.handle;
} else if (src->type == PMEM2_SOURCE_ANON) {
/* no extra settings */
} else {
ASSERT(0);
}
DWORD proto = PAGE_READWRITE;
DWORD access = FILE_MAP_ALL_ACCESS;
/* Unsupported flag combinations */
if ((cfg->protection_flag == PMEM2_PROT_NONE) ||
(cfg->protection_flag == PMEM2_PROT_WRITE) ||
(cfg->protection_flag == PMEM2_PROT_EXEC) ||
(cfg->protection_flag == (PMEM2_PROT_WRITE |
PMEM2_PROT_EXEC))) {
ERR("Windows does not support "
"this protection flag combination.");
return PMEM2_E_NOSUPP;
}
/* Translate protection flags into Windows flags */
if (cfg->protection_flag & PMEM2_PROT_WRITE) {
if (cfg->protection_flag & PMEM2_PROT_EXEC) {
proto = PAGE_EXECUTE_READWRITE;
access = FILE_MAP_READ | FILE_MAP_WRITE |
FILE_MAP_EXECUTE;
} else {
/*
* Due to the already done exclusion
* of incorrect combinations, PROT_WRITE
* implies PROT_READ
*/
proto = PAGE_READWRITE;
access = FILE_MAP_READ | FILE_MAP_WRITE;
}
} else if (cfg->protection_flag & PMEM2_PROT_READ) {
if (cfg->protection_flag & PMEM2_PROT_EXEC) {
proto = PAGE_EXECUTE_READ;
access = FILE_MAP_READ | FILE_MAP_EXECUTE;
} else {
proto = PAGE_READONLY;
access = FILE_MAP_READ;
}
}
if (cfg->sharing == PMEM2_PRIVATE) {
if (cfg->protection_flag & PMEM2_PROT_EXEC) {
proto = PAGE_EXECUTE_WRITECOPY;
access = FILE_MAP_EXECUTE | FILE_MAP_COPY;
} else {
/*
* If FILE_MAP_COPY is set,
* protection is changed to read/write
*/
proto = PAGE_READONLY;
access = FILE_MAP_COPY;
}
}
/* create a file mapping handle */
HANDLE mh = create_mapping(map_handle, effective_offset, length,
proto, &err);
if (!mh) {
if (err == ERROR_ALREADY_EXISTS) {
ERR("mapping already exists");
return PMEM2_E_MAPPING_EXISTS;
} else if (err == ERROR_ACCESS_DENIED) {
return PMEM2_E_NO_ACCESS;
}
return pmem2_lasterror_to_err();
}
ret = pmem2_config_validate_addr_alignment(cfg, src);
if (ret)
return ret;
/* let's get addr from cfg struct */
LPVOID addr_hint = cfg->addr;
/* obtain a pointer to the mapping view */
void *base = MapViewOfFileEx(mh,
access,
HIDWORD(effective_offset),
LODWORD(effective_offset),
length,
addr_hint); /* hint address */
if (base == NULL) {
ERR("!!MapViewOfFileEx");
if (cfg->addr_request == PMEM2_ADDRESS_FIXED_NOREPLACE) {
DWORD ret_windows = GetLastError();
if (ret_windows == ERROR_INVALID_ADDRESS)
ret = PMEM2_E_MAPPING_EXISTS;
else
ret = pmem2_lasterror_to_err();
}
else
ret = pmem2_lasterror_to_err();
goto err_close_mapping_handle;
}
if (!CloseHandle(mh)) {
ERR("!!CloseHandle");
ret = pmem2_lasterror_to_err();
goto err_unmap_base;
}
enum pmem2_granularity available_min_granularity =
PMEM2_GRANULARITY_PAGE;
if (src->type == PMEM2_SOURCE_HANDLE) {
int direct_access = is_direct_access(src->value.handle);
if (direct_access < 0) {
ret = direct_access;
goto err_unmap_base;
}
bool eADR = (pmem2_auto_flush() == 1);
available_min_granularity =
get_min_granularity(eADR, direct_access, cfg->sharing);
} else if (src->type == PMEM2_SOURCE_ANON) {
available_min_granularity = PMEM2_GRANULARITY_BYTE;
} else {
ASSERT(0);
}
if (available_min_granularity > cfg->requested_max_granularity) {
const char *err = granularity_err_msg
[cfg->requested_max_granularity]
[available_min_granularity];
if (strcmp(err, GRAN_IMPOSSIBLE) == 0)
FATAL(
"unhandled granularity error: available_min_granularity: %d" \
"requested_max_granularity: %d",
available_min_granularity,
cfg->requested_max_granularity);
ERR("%s", err);
ret = PMEM2_E_GRANULARITY_NOT_SUPPORTED;
goto err_unmap_base;
}
/* prepare pmem2_map structure */
struct pmem2_map *map;
map = (struct pmem2_map *)pmem2_malloc(sizeof(*map), &ret);
if (!map)
goto err_unmap_base;
map->addr = base;
/*
* XXX probably in some cases the reserved length > the content length.
* Maybe it is worth to do the research.
*/
map->reserved_length = length;
map->content_length = length;
map->effective_granularity = available_min_granularity;
map->source = *src;
pmem2_set_flush_fns(map);
pmem2_set_mem_fns(map);
ret = pmem2_register_mapping(map);
if (ret)
goto err_register;
/* return a pointer to the pmem2_map structure */
*map_ptr = map;
return ret;
err_register:
free(map);
err_unmap_base:
UnmapViewOfFile(base);
return ret;
err_close_mapping_handle:
CloseHandle(mh);
return ret;
}
/*
* pmem2_unmap -- unmap the specified region
*/
int
pmem2_unmap(struct pmem2_map **map_ptr)
{
LOG(3, "mapp %p", map_ptr);
struct pmem2_map *map = *map_ptr;
int ret = pmem2_unregister_mapping(map);
if (ret)
return ret;
if (!UnmapViewOfFile(map->addr)) {
ERR("!!UnmapViewOfFile");
return pmem2_lasterror_to_err();
}
Free(map);
*map_ptr = NULL;
return 0;
}
| 8,611 | 23.123249 | 99 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/extent_linux.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* extent_linux.c - implementation of the linux fs extent query API
*/
#include <string.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <linux/fs.h>
#include <linux/fiemap.h>
#include "libpmem2.h"
#include "pmem2_utils.h"
#include "file.h"
#include "out.h"
#include "extent.h"
#include "alloc.h"
/*
* pmem2_extents_create_get -- allocate extents structure and get extents
* of the given file
*/
int
pmem2_extents_create_get(int fd, struct extents **exts)
{
LOG(3, "fd %i extents %p", fd, exts);
ASSERT(fd > 2);
ASSERTne(exts, NULL);
enum pmem2_file_type pmem2_type;
struct extents *pexts = NULL;
struct fiemap *fmap = NULL;
os_stat_t st;
if (os_fstat(fd, &st) < 0) {
ERR("!fstat %d", fd);
return PMEM2_E_ERRNO;
}
int ret = pmem2_get_type_from_stat(&st, &pmem2_type);
if (ret)
return ret;
/* directories do not have any extents */
if (pmem2_type == PMEM2_FTYPE_DIR) {
ERR(
"checking extents does not make sense in case of directories");
return PMEM2_E_INVALID_FILE_TYPE;
}
/* allocate extents structure */
pexts = pmem2_zalloc(sizeof(struct extents), &ret);
if (ret)
return ret;
/* save block size */
LOG(10, "fd %i: block size: %li", fd, (long int)st.st_blksize);
pexts->blksize = (uint64_t)st.st_blksize;
/* DAX device does not have any extents */
if (pmem2_type == PMEM2_FTYPE_DEVDAX) {
*exts = pexts;
return 0;
}
ASSERTeq(pmem2_type, PMEM2_FTYPE_REG);
fmap = pmem2_zalloc(sizeof(struct fiemap), &ret);
if (ret)
goto error_free;
fmap->fm_start = 0;
fmap->fm_length = (size_t)st.st_size;
fmap->fm_flags = 0;
fmap->fm_extent_count = 0;
fmap->fm_mapped_extents = 0;
if (ioctl(fd, FS_IOC_FIEMAP, fmap) != 0) {
ERR("!fiemap ioctl() for fd=%d failed", fd);
ret = PMEM2_E_ERRNO;
goto error_free;
}
size_t newsize = sizeof(struct fiemap) +
fmap->fm_mapped_extents * sizeof(struct fiemap_extent);
struct fiemap *newfmap = pmem2_realloc(fmap, newsize, &ret);
if (ret)
goto error_free;
fmap = newfmap;
memset(fmap->fm_extents, 0, fmap->fm_mapped_extents *
sizeof(struct fiemap_extent));
fmap->fm_extent_count = fmap->fm_mapped_extents;
fmap->fm_mapped_extents = 0;
if (ioctl(fd, FS_IOC_FIEMAP, fmap) != 0) {
ERR("!fiemap ioctl() for fd=%d failed", fd);
ret = PMEM2_E_ERRNO;
goto error_free;
}
LOG(4, "file with fd=%i has %u extents:", fd, fmap->fm_mapped_extents);
/* save number of extents */
pexts->extents_count = fmap->fm_mapped_extents;
pexts->extents = pmem2_malloc(
pexts->extents_count * sizeof(struct extent),
&ret);
if (ret)
goto error_free;
/* save extents */
unsigned e;
for (e = 0; e < fmap->fm_mapped_extents; e++) {
pexts->extents[e].offset_physical =
fmap->fm_extents[e].fe_physical;
pexts->extents[e].offset_logical =
fmap->fm_extents[e].fe_logical;
pexts->extents[e].length =
fmap->fm_extents[e].fe_length;
LOG(10, " #%u: off_phy: %lu off_log: %lu len: %lu",
e,
pexts->extents[e].offset_physical,
pexts->extents[e].offset_logical,
pexts->extents[e].length);
}
*exts = pexts;
Free(fmap);
return 0;
error_free:
Free(pexts->extents);
Free(pexts);
Free(fmap);
return ret;
}
/*
* pmem2_extents_destroy -- free extents structure
*/
void
pmem2_extents_destroy(struct extents **exts)
{
LOG(3, "extents %p", exts);
ASSERTne(exts, NULL);
if (*exts) {
Free((*exts)->extents);
Free(*exts);
*exts = NULL;
}
}
| 3,519 | 20.333333 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/deep_flush_other.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* deep_flush_other.c -- deep_flush functionality
*/
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include "deep_flush.h"
#include "libpmem2.h"
#include "out.h"
#include "pmem2_utils.h"
#include "persist.h"
/*
* pmem2_deep_flush_dax -- performs flush buffer operation
*/
int
pmem2_deep_flush_dax(struct pmem2_map *map, void *ptr, size_t size)
{
int ret = pmem2_flush_file_buffers_os(map, ptr, size, 0);
if (ret) {
LOG(1, "cannot flush buffers addr %p len %zu", ptr, size);
return ret;
}
return 0;
}
/*
* pmem2_deep_flush_write -- perform write to deep_flush file
* on given region_id (Device Dax only)
*/
int
pmem2_deep_flush_write(unsigned region_id)
{
const char *err =
"BUG: pmem2_deep_flush_write should never be called on this OS";
ERR("%s", err);
ASSERTinfo(0, err);
/* not supported */
return PMEM2_E_NOSUPP;
}
| 944 | 18.6875 | 67 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/flush.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#ifndef X86_64_FLUSH_H
#define X86_64_FLUSH_H
#include <emmintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "util.h"
#include "valgrind_internal.h"
#define FLUSH_ALIGN ((uintptr_t)64)
static force_inline void
pmem_clflush(const void *addr)
{
_mm_clflush(addr);
}
#ifdef _MSC_VER
static force_inline void
pmem_clflushopt(const void *addr)
{
_mm_clflushopt(addr);
}
static force_inline void
pmem_clwb(const void *addr)
{
_mm_clwb(addr);
}
#else
/*
* The x86 memory instructions are new enough that the compiler
* intrinsic functions are not always available. The intrinsic
* functions are defined here in terms of asm statements for now.
*/
static force_inline void
pmem_clflushopt(const void *addr)
{
asm volatile(".byte 0x66; clflush %0" : "+m" \
(*(volatile char *)(addr)));
}
static force_inline void
pmem_clwb(const void *addr)
{
asm volatile(".byte 0x66; xsaveopt %0" : "+m" \
(*(volatile char *)(addr)));
}
#endif /* _MSC_VER */
typedef void flush_fn(const void *, size_t);
/*
* flush_clflush_nolog -- flush the CPU cache, using clflush
*/
static force_inline void
flush_clflush_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
/*
* Loop through cache-line-size (typically 64B) aligned chunks
* covering the given range.
*/
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN)
_mm_clflush((char *)uptr);
}
/*
* flush_clflushopt_nolog -- flush the CPU cache, using clflushopt
*/
static force_inline void
flush_clflushopt_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
/*
* Loop through cache-line-size (typically 64B) aligned chunks
* covering the given range.
*/
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) {
pmem_clflushopt((char *)uptr);
}
}
/*
* flush_clwb_nolog -- flush the CPU cache, using clwb
*/
static force_inline void
flush_clwb_nolog(const void *addr, size_t len)
{
uintptr_t uptr;
/*
* Loop through cache-line-size (typically 64B) aligned chunks
* covering the given range.
*/
for (uptr = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uptr < (uintptr_t)addr + len; uptr += FLUSH_ALIGN) {
pmem_clwb((char *)uptr);
}
}
/*
* flush64b_empty -- (internal) do not flush the CPU cache
*/
static force_inline void
flush64b_empty(const void *addr)
{
/* NOP, but tell pmemcheck about it */
VALGRIND_DO_FLUSH(addr, 64);
}
#endif
| 2,521 | 20.193277 | 66 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/cpu.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* cpu.c -- CPU features detection
*/
/*
* Reference:
* http://www.intel.com/content/www/us/en/processors/
* architectures-software-developer-manuals.html
*
* https://support.amd.com/TechDocs/24594.pdf
*/
#include <string.h>
#include "out.h"
#include "cpu.h"
#define EAX_IDX 0
#define EBX_IDX 1
#define ECX_IDX 2
#define EDX_IDX 3
#if defined(__x86_64__) || defined(__amd64__)
#include <cpuid.h>
static inline void
cpuid(unsigned func, unsigned subfunc, unsigned cpuinfo[4])
{
__cpuid_count(func, subfunc, cpuinfo[EAX_IDX], cpuinfo[EBX_IDX],
cpuinfo[ECX_IDX], cpuinfo[EDX_IDX]);
}
#elif defined(_M_X64) || defined(_M_AMD64)
#include <intrin.h>
static inline void
cpuid(unsigned func, unsigned subfunc, unsigned cpuinfo[4])
{
__cpuidex(cpuinfo, func, subfunc);
}
#else
#error unsupported compiler
#endif
#ifndef bit_CLFLUSH
#define bit_CLFLUSH (1 << 19)
#endif
#ifndef bit_CLFLUSHOPT
#define bit_CLFLUSHOPT (1 << 23)
#endif
#ifndef bit_CLWB
#define bit_CLWB (1 << 24)
#endif
#ifndef bit_AVX
#define bit_AVX (1 << 28)
#endif
#ifndef bit_AVX512F
#define bit_AVX512F (1 << 16)
#endif
/*
* is_cpu_feature_present -- (internal) checks if CPU feature is supported
*/
static int
is_cpu_feature_present(unsigned func, unsigned reg, unsigned bit)
{
unsigned cpuinfo[4] = { 0 };
/* check CPUID level first */
cpuid(0x0, 0x0, cpuinfo);
if (cpuinfo[EAX_IDX] < func)
return 0;
cpuid(func, 0x0, cpuinfo);
return (cpuinfo[reg] & bit) != 0;
}
/*
* is_cpu_genuine_intel -- checks for genuine Intel CPU
*/
int
is_cpu_genuine_intel(void)
{
unsigned cpuinfo[4] = { 0 };
union {
char name[0x20];
unsigned cpuinfo[3];
} vendor;
memset(&vendor, 0, sizeof(vendor));
cpuid(0x0, 0x0, cpuinfo);
vendor.cpuinfo[0] = cpuinfo[EBX_IDX];
vendor.cpuinfo[1] = cpuinfo[EDX_IDX];
vendor.cpuinfo[2] = cpuinfo[ECX_IDX];
LOG(4, "CPU vendor: %s", vendor.name);
return (strncmp(vendor.name, "GenuineIntel",
sizeof(vendor.name))) == 0;
}
/*
* is_cpu_clflush_present -- checks if CLFLUSH instruction is supported
*/
int
is_cpu_clflush_present(void)
{
int ret = is_cpu_feature_present(0x1, EDX_IDX, bit_CLFLUSH);
LOG(4, "CLFLUSH %ssupported", ret == 0 ? "not " : "");
return ret;
}
/*
* is_cpu_clflushopt_present -- checks if CLFLUSHOPT instruction is supported
*/
int
is_cpu_clflushopt_present(void)
{
int ret = is_cpu_feature_present(0x7, EBX_IDX, bit_CLFLUSHOPT);
LOG(4, "CLFLUSHOPT %ssupported", ret == 0 ? "not " : "");
return ret;
}
/*
* is_cpu_clwb_present -- checks if CLWB instruction is supported
*/
int
is_cpu_clwb_present(void)
{
int ret = is_cpu_feature_present(0x7, EBX_IDX, bit_CLWB);
LOG(4, "CLWB %ssupported", ret == 0 ? "not " : "");
return ret;
}
/*
* is_cpu_avx_present -- checks if AVX instructions are supported
*/
int
is_cpu_avx_present(void)
{
int ret = is_cpu_feature_present(0x1, ECX_IDX, bit_AVX);
LOG(4, "AVX %ssupported", ret == 0 ? "not " : "");
return ret;
}
/*
* is_cpu_avx512f_present -- checks if AVX-512f instructions are supported
*/
int
is_cpu_avx512f_present(void)
{
int ret = is_cpu_feature_present(0x7, EBX_IDX, bit_AVX512F);
LOG(4, "AVX512f %ssupported", ret == 0 ? "not " : "");
return ret;
}
| 3,285 | 17.777143 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/cpu.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
#ifndef PMDK_CPU_H
#define PMDK_CPU_H 1
/*
* cpu.h -- definitions for "cpu" module
*/
int is_cpu_genuine_intel(void);
int is_cpu_clflush_present(void);
int is_cpu_clflushopt_present(void);
int is_cpu_clwb_present(void);
int is_cpu_avx_present(void);
int is_cpu_avx512f_present(void);
#endif
| 383 | 19.210526 | 44 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/init.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#include <string.h>
#include <xmmintrin.h>
#include "auto_flush.h"
#include "cpu.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "os.h"
#include "out.h"
#include "pmem2_arch.h"
#include "valgrind_internal.h"
#define MOVNT_THRESHOLD 256
size_t Movnt_threshold = MOVNT_THRESHOLD;
/*
* memory_barrier -- (internal) issue the fence instruction
*/
static void
memory_barrier(void)
{
LOG(15, NULL);
_mm_sfence(); /* ensure CLWB or CLFLUSHOPT completes */
}
/*
* flush_clflush -- (internal) flush the CPU cache, using clflush
*/
static void
flush_clflush(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_clflush_nolog(addr, len);
}
/*
* flush_clflushopt -- (internal) flush the CPU cache, using clflushopt
*/
static void
flush_clflushopt(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_clflushopt_nolog(addr, len);
}
/*
* flush_clwb -- (internal) flush the CPU cache, using clwb
*/
static void
flush_clwb(const void *addr, size_t len)
{
LOG(15, "addr %p len %zu", addr, len);
flush_clwb_nolog(addr, len);
}
#if SSE2_AVAILABLE || AVX_AVAILABLE || AVX512F_AVAILABLE
#define PMEM2_F_MEM_MOVNT (PMEM2_F_MEM_WC | PMEM2_F_MEM_NONTEMPORAL)
#define PMEM2_F_MEM_MOV (PMEM2_F_MEM_WB | PMEM2_F_MEM_TEMPORAL)
#define MEMCPY_TEMPLATE(isa, flush, perfbarrier) \
static void *\
memmove_nodrain_##isa##_##flush##perfbarrier(void *dest, const void *src, \
size_t len, unsigned flags, flush_func flushf)\
{\
if (len == 0 || src == dest)\
return dest;\
\
if (flags & PMEM2_F_MEM_NOFLUSH) \
memmove_mov_##isa##_noflush(dest, src, len); \
else if (flags & PMEM2_F_MEM_MOVNT)\
memmove_movnt_##isa ##_##flush##perfbarrier(dest, src, len);\
else if (flags & PMEM2_F_MEM_MOV)\
memmove_mov_##isa##_##flush(dest, src, len);\
else if (len < Movnt_threshold)\
memmove_mov_##isa##_##flush(dest, src, len);\
else\
memmove_movnt_##isa##_##flush##perfbarrier(dest, src, len);\
\
return dest;\
}
#define MEMCPY_TEMPLATE_EADR(isa, perfbarrier) \
static void *\
memmove_nodrain_##isa##_eadr##perfbarrier(void *dest, const void *src, \
size_t len, unsigned flags, flush_func flushf)\
{\
if (len == 0 || src == dest)\
return dest;\
\
if (flags & PMEM2_F_MEM_NOFLUSH)\
memmove_mov_##isa##_noflush(dest, src, len);\
else if (flags & PMEM2_F_MEM_NONTEMPORAL)\
memmove_movnt_##isa##_empty##perfbarrier(dest, src, len);\
else\
memmove_mov_##isa##_empty(dest, src, len);\
\
return dest;\
}
#define MEMSET_TEMPLATE(isa, flush, perfbarrier)\
static void *\
memset_nodrain_##isa##_##flush##perfbarrier(void *dest, int c, size_t len, \
unsigned flags, flush_func flushf)\
{\
if (len == 0)\
return dest;\
\
if (flags & PMEM2_F_MEM_NOFLUSH) \
memset_mov_##isa##_noflush(dest, c, len); \
else if (flags & PMEM2_F_MEM_MOVNT)\
memset_movnt_##isa##_##flush##perfbarrier(dest, c, len);\
else if (flags & PMEM2_F_MEM_MOV)\
memset_mov_##isa##_##flush(dest, c, len);\
else if (len < Movnt_threshold)\
memset_mov_##isa##_##flush(dest, c, len);\
else\
memset_movnt_##isa##_##flush##perfbarrier(dest, c, len);\
\
return dest;\
}
#define MEMSET_TEMPLATE_EADR(isa, perfbarrier) \
static void *\
memset_nodrain_##isa##_eadr##perfbarrier(void *dest, int c, size_t len, \
unsigned flags, flush_func flushf)\
{\
if (len == 0)\
return dest;\
\
if (flags & PMEM2_F_MEM_NOFLUSH)\
memset_mov_##isa##_noflush(dest, c, len);\
else if (flags & PMEM2_F_MEM_NONTEMPORAL)\
memset_movnt_##isa##_empty##perfbarrier(dest, c, len);\
else\
memset_mov_##isa##_empty(dest, c, len);\
\
return dest;\
}
#endif
#if SSE2_AVAILABLE
MEMCPY_TEMPLATE(sse2, clflush, _nobarrier)
MEMCPY_TEMPLATE(sse2, clflushopt, _nobarrier)
MEMCPY_TEMPLATE(sse2, clwb, _nobarrier)
MEMCPY_TEMPLATE_EADR(sse2, _nobarrier)
MEMSET_TEMPLATE(sse2, clflush, _nobarrier)
MEMSET_TEMPLATE(sse2, clflushopt, _nobarrier)
MEMSET_TEMPLATE(sse2, clwb, _nobarrier)
MEMSET_TEMPLATE_EADR(sse2, _nobarrier)
MEMCPY_TEMPLATE(sse2, clflush, _wcbarrier)
MEMCPY_TEMPLATE(sse2, clflushopt, _wcbarrier)
MEMCPY_TEMPLATE(sse2, clwb, _wcbarrier)
MEMCPY_TEMPLATE_EADR(sse2, _wcbarrier)
MEMSET_TEMPLATE(sse2, clflush, _wcbarrier)
MEMSET_TEMPLATE(sse2, clflushopt, _wcbarrier)
MEMSET_TEMPLATE(sse2, clwb, _wcbarrier)
MEMSET_TEMPLATE_EADR(sse2, _wcbarrier)
#endif
#if AVX_AVAILABLE
MEMCPY_TEMPLATE(avx, clflush, _nobarrier)
MEMCPY_TEMPLATE(avx, clflushopt, _nobarrier)
MEMCPY_TEMPLATE(avx, clwb, _nobarrier)
MEMCPY_TEMPLATE_EADR(avx, _nobarrier)
MEMSET_TEMPLATE(avx, clflush, _nobarrier)
MEMSET_TEMPLATE(avx, clflushopt, _nobarrier)
MEMSET_TEMPLATE(avx, clwb, _nobarrier)
MEMSET_TEMPLATE_EADR(avx, _nobarrier)
MEMCPY_TEMPLATE(avx, clflush, _wcbarrier)
MEMCPY_TEMPLATE(avx, clflushopt, _wcbarrier)
MEMCPY_TEMPLATE(avx, clwb, _wcbarrier)
MEMCPY_TEMPLATE_EADR(avx, _wcbarrier)
MEMSET_TEMPLATE(avx, clflush, _wcbarrier)
MEMSET_TEMPLATE(avx, clflushopt, _wcbarrier)
MEMSET_TEMPLATE(avx, clwb, _wcbarrier)
MEMSET_TEMPLATE_EADR(avx, _wcbarrier)
#endif
#if AVX512F_AVAILABLE
MEMCPY_TEMPLATE(avx512f, clflush, /* cstyle wa */)
MEMCPY_TEMPLATE(avx512f, clflushopt, /* */)
MEMCPY_TEMPLATE(avx512f, clwb, /* */)
MEMCPY_TEMPLATE_EADR(avx512f, /* */)
MEMSET_TEMPLATE(avx512f, clflush, /* */)
MEMSET_TEMPLATE(avx512f, clflushopt, /* */)
MEMSET_TEMPLATE(avx512f, clwb, /* */)
MEMSET_TEMPLATE_EADR(avx512f, /* */)
#endif
enum memcpy_impl {
MEMCPY_INVALID,
MEMCPY_SSE2,
MEMCPY_AVX,
MEMCPY_AVX512F
};
/*
* use_sse2_memcpy_memset -- (internal) SSE2 detected, use it if possible
*/
static void
use_sse2_memcpy_memset(struct pmem2_arch_info *info, enum memcpy_impl *impl,
int wc_workaround)
{
#if SSE2_AVAILABLE
*impl = MEMCPY_SSE2;
if (wc_workaround) {
info->memmove_nodrain_eadr =
memmove_nodrain_sse2_eadr_wcbarrier;
if (info->flush == flush_clflush)
info->memmove_nodrain =
memmove_nodrain_sse2_clflush_wcbarrier;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain =
memmove_nodrain_sse2_clflushopt_wcbarrier;
else if (info->flush == flush_clwb)
info->memmove_nodrain =
memmove_nodrain_sse2_clwb_wcbarrier;
else
ASSERT(0);
info->memset_nodrain_eadr = memset_nodrain_sse2_eadr_wcbarrier;
if (info->flush == flush_clflush)
info->memset_nodrain =
memset_nodrain_sse2_clflush_wcbarrier;
else if (info->flush == flush_clflushopt)
info->memset_nodrain =
memset_nodrain_sse2_clflushopt_wcbarrier;
else if (info->flush == flush_clwb)
info->memset_nodrain =
memset_nodrain_sse2_clwb_wcbarrier;
else
ASSERT(0);
} else {
info->memmove_nodrain_eadr =
memmove_nodrain_sse2_eadr_nobarrier;
if (info->flush == flush_clflush)
info->memmove_nodrain =
memmove_nodrain_sse2_clflush_nobarrier;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain =
memmove_nodrain_sse2_clflushopt_nobarrier;
else if (info->flush == flush_clwb)
info->memmove_nodrain =
memmove_nodrain_sse2_clwb_nobarrier;
else
ASSERT(0);
info->memset_nodrain_eadr =
memset_nodrain_sse2_eadr_nobarrier;
if (info->flush == flush_clflush)
info->memset_nodrain =
memset_nodrain_sse2_clflush_nobarrier;
else if (info->flush == flush_clflushopt)
info->memset_nodrain =
memset_nodrain_sse2_clflushopt_nobarrier;
else if (info->flush == flush_clwb)
info->memset_nodrain =
memset_nodrain_sse2_clwb_nobarrier;
else
ASSERT(0);
}
#else
LOG(3, "sse2 disabled at build time");
#endif
}
/*
* use_avx_memcpy_memset -- (internal) AVX detected, use it if possible
*/
static void
use_avx_memcpy_memset(struct pmem2_arch_info *info, enum memcpy_impl *impl,
int wc_workaround)
{
#if AVX_AVAILABLE
LOG(3, "avx supported");
char *e = os_getenv("PMEM_AVX");
if (e != NULL && strcmp(e, "0") == 0) {
LOG(3, "PMEM_AVX set to 0");
return;
}
LOG(3, "PMEM_AVX enabled");
*impl = MEMCPY_AVX;
if (wc_workaround) {
info->memmove_nodrain_eadr =
memmove_nodrain_avx_eadr_wcbarrier;
if (info->flush == flush_clflush)
info->memmove_nodrain =
memmove_nodrain_avx_clflush_wcbarrier;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain =
memmove_nodrain_avx_clflushopt_wcbarrier;
else if (info->flush == flush_clwb)
info->memmove_nodrain =
memmove_nodrain_avx_clwb_wcbarrier;
else
ASSERT(0);
info->memset_nodrain_eadr =
memset_nodrain_avx_eadr_wcbarrier;
if (info->flush == flush_clflush)
info->memset_nodrain =
memset_nodrain_avx_clflush_wcbarrier;
else if (info->flush == flush_clflushopt)
info->memset_nodrain =
memset_nodrain_avx_clflushopt_wcbarrier;
else if (info->flush == flush_clwb)
info->memset_nodrain =
memset_nodrain_avx_clwb_wcbarrier;
else
ASSERT(0);
} else {
info->memmove_nodrain_eadr =
memmove_nodrain_avx_eadr_nobarrier;
if (info->flush == flush_clflush)
info->memmove_nodrain =
memmove_nodrain_avx_clflush_nobarrier;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain =
memmove_nodrain_avx_clflushopt_nobarrier;
else if (info->flush == flush_clwb)
info->memmove_nodrain =
memmove_nodrain_avx_clwb_nobarrier;
else
ASSERT(0);
info->memset_nodrain_eadr =
memset_nodrain_avx_eadr_nobarrier;
if (info->flush == flush_clflush)
info->memset_nodrain =
memset_nodrain_avx_clflush_nobarrier;
else if (info->flush == flush_clflushopt)
info->memset_nodrain =
memset_nodrain_avx_clflushopt_nobarrier;
else if (info->flush == flush_clwb)
info->memset_nodrain =
memset_nodrain_avx_clwb_nobarrier;
else
ASSERT(0);
}
#else
LOG(3, "avx supported, but disabled at build time");
#endif
}
/*
* use_avx512f_memcpy_memset -- (internal) AVX512F detected, use it if possible
*/
static void
use_avx512f_memcpy_memset(struct pmem2_arch_info *info,
enum memcpy_impl *impl)
{
#if AVX512F_AVAILABLE
LOG(3, "avx512f supported");
char *e = os_getenv("PMEM_AVX512F");
if (e != NULL && strcmp(e, "0") == 0) {
LOG(3, "PMEM_AVX512F set to 0");
return;
}
LOG(3, "PMEM_AVX512F enabled");
*impl = MEMCPY_AVX512F;
info->memmove_nodrain_eadr = memmove_nodrain_avx512f_eadr;
if (info->flush == flush_clflush)
info->memmove_nodrain = memmove_nodrain_avx512f_clflush;
else if (info->flush == flush_clflushopt)
info->memmove_nodrain = memmove_nodrain_avx512f_clflushopt;
else if (info->flush == flush_clwb)
info->memmove_nodrain = memmove_nodrain_avx512f_clwb;
else
ASSERT(0);
info->memset_nodrain_eadr = memset_nodrain_avx512f_eadr;
if (info->flush == flush_clflush)
info->memset_nodrain = memset_nodrain_avx512f_clflush;
else if (info->flush == flush_clflushopt)
info->memset_nodrain = memset_nodrain_avx512f_clflushopt;
else if (info->flush == flush_clwb)
info->memset_nodrain = memset_nodrain_avx512f_clwb;
else
ASSERT(0);
#else
LOG(3, "avx512f supported, but disabled at build time");
#endif
}
/*
* pmem_get_cpuinfo -- configure libpmem based on CPUID
*/
static void
pmem_cpuinfo_to_funcs(struct pmem2_arch_info *info, enum memcpy_impl *impl)
{
LOG(3, NULL);
if (is_cpu_clflush_present()) {
LOG(3, "clflush supported");
info->flush = flush_clflush;
info->flush_has_builtin_fence = 1;
info->fence = memory_barrier;
}
if (is_cpu_clflushopt_present()) {
LOG(3, "clflushopt supported");
char *e = os_getenv("PMEM_NO_CLFLUSHOPT");
if (e && strcmp(e, "1") == 0) {
LOG(3, "PMEM_NO_CLFLUSHOPT forced no clflushopt");
} else {
info->flush = flush_clflushopt;
info->flush_has_builtin_fence = 0;
info->fence = memory_barrier;
}
}
if (is_cpu_clwb_present()) {
LOG(3, "clwb supported");
char *e = os_getenv("PMEM_NO_CLWB");
if (e && strcmp(e, "1") == 0) {
LOG(3, "PMEM_NO_CLWB forced no clwb");
} else {
info->flush = flush_clwb;
info->flush_has_builtin_fence = 0;
info->fence = memory_barrier;
}
}
/*
* XXX Disable this work around for Intel CPUs with optimized
* WC eviction.
*/
int wc_workaround = is_cpu_genuine_intel();
char *ptr = os_getenv("PMEM_WC_WORKAROUND");
if (ptr) {
if (strcmp(ptr, "1") == 0) {
LOG(3, "WC workaround forced to 1");
wc_workaround = 1;
} else if (strcmp(ptr, "0") == 0) {
LOG(3, "WC workaround forced to 0");
wc_workaround = 0;
} else {
LOG(3, "incorrect value of PMEM_WC_WORKAROUND (%s)",
ptr);
}
}
LOG(3, "WC workaround = %d", wc_workaround);
ptr = os_getenv("PMEM_NO_MOVNT");
if (ptr && strcmp(ptr, "1") == 0) {
LOG(3, "PMEM_NO_MOVNT forced no movnt");
} else {
use_sse2_memcpy_memset(info, impl, wc_workaround);
if (is_cpu_avx_present())
use_avx_memcpy_memset(info, impl, wc_workaround);
if (is_cpu_avx512f_present())
use_avx512f_memcpy_memset(info, impl);
}
}
/*
* pmem2_arch_init -- initialize architecture-specific list of pmem operations
*/
void
pmem2_arch_init(struct pmem2_arch_info *info)
{
LOG(3, NULL);
enum memcpy_impl impl = MEMCPY_INVALID;
pmem_cpuinfo_to_funcs(info, &impl);
/*
* For testing, allow overriding the default threshold
* for using non-temporal stores in pmem_memcpy_*(), pmem_memmove_*()
* and pmem_memset_*().
* It has no effect if movnt is not supported or disabled.
*/
const char *ptr = os_getenv("PMEM_MOVNT_THRESHOLD");
if (ptr) {
long long val = atoll(ptr);
if (val < 0) {
LOG(3, "Invalid PMEM_MOVNT_THRESHOLD");
} else {
LOG(3, "PMEM_MOVNT_THRESHOLD set to %zu", (size_t)val);
Movnt_threshold = (size_t)val;
}
}
if (info->flush == flush_clwb)
LOG(3, "using clwb");
else if (info->flush == flush_clflushopt)
LOG(3, "using clflushopt");
else if (info->flush == flush_clflush)
LOG(3, "using clflush");
else
FATAL("invalid deep flush function address");
if (impl == MEMCPY_AVX512F)
LOG(3, "using movnt AVX512F");
else if (impl == MEMCPY_AVX)
LOG(3, "using movnt AVX");
else if (impl == MEMCPY_SSE2)
LOG(3, "using movnt SSE2");
}
| 13,899 | 25.275992 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/avx.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
#ifndef PMEM_AVX_H
#define PMEM_AVX_H
#include <immintrin.h>
#include "util.h"
/*
* avx_zeroupper -- _mm256_zeroupper wrapper
*
* _mm256_zeroupper clears upper parts of avx registers.
*
* It's needed for 2 reasons:
* - it improves performance of non-avx code after avx
* - it works around problem discovered by Valgrind
*
* In optimized builds gcc inserts VZEROUPPER automatically before
* calling non-avx code (or at the end of the function). But in release
* builds it doesn't, so if we don't do this by ourselves, then when
* someone memcpy'ies uninitialized data, Valgrind complains whenever
* someone reads those registers.
*
* One notable example is loader, which tries to detect whether it
* needs to save whole ymm registers by looking at their current
* (possibly uninitialized) value.
*
* Valgrind complains like that:
* Conditional jump or move depends on uninitialised value(s)
* at 0x4015CC9: _dl_runtime_resolve_avx_slow
* (in /lib/x86_64-linux-gnu/ld-2.24.so)
* by 0x10B531: test_realloc_api (obj_basic_integration.c:185)
* by 0x10F1EE: main (obj_basic_integration.c:594)
*
* Note: We have to be careful to not read AVX registers after this
* intrinsic, because of this stupid gcc bug:
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82735
*/
static force_inline void
avx_zeroupper(void)
{
_mm256_zeroupper();
}
static force_inline __m128i
m256_get16b(__m256i ymm)
{
return _mm256_extractf128_si256(ymm, 0);
}
#ifdef _MSC_VER
static force_inline uint64_t
m256_get8b(__m256i ymm)
{
return (uint64_t)_mm_extract_epi64(m256_get16b(ymm), 0);
}
static force_inline uint32_t
m256_get4b(__m256i ymm)
{
return (uint32_t)m256_get8b(ymm);
}
static force_inline uint16_t
m256_get2b(__m256i ymm)
{
return (uint16_t)m256_get8b(ymm);
}
#else
static force_inline uint64_t
m256_get8b(__m256i ymm)
{
return (uint64_t)_mm256_extract_epi64(ymm, 0);
}
static force_inline uint32_t
m256_get4b(__m256i ymm)
{
return (uint32_t)_mm256_extract_epi32(ymm, 0);
}
static force_inline uint16_t
m256_get2b(__m256i ymm)
{
return (uint16_t)_mm256_extract_epi16(ymm, 0);
}
#endif
#endif
| 2,238 | 24.735632 | 72 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memcpy_memset.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
#ifndef MEMCPY_MEMSET_H
#define MEMCPY_MEMSET_H
#include <stddef.h>
#include <xmmintrin.h>
#include "pmem2_arch.h"
typedef void barrier_fn(void);
typedef void flush64b_fn(const void *);
static inline void
barrier_after_ntstores(void)
{
/*
* In this configuration pmem_drain does not contain sfence, so we have
* to serialize non-temporal store instructions.
*/
_mm_sfence();
}
static inline void
no_barrier_after_ntstores(void)
{
/*
* In this configuration pmem_drain contains sfence, so we don't have
* to serialize non-temporal store instructions
*/
}
static inline void
noflush(const void *addr, size_t len)
{
/* NOP, not even pmemcheck annotation */
}
static inline void
noflush64b(const void *addr)
{
/* NOP, not even pmemcheck annotation */
}
typedef void perf_barrier_fn(void);
static force_inline void
wc_barrier(void)
{
/*
* Currently, for SSE2 and AVX code paths, use of non-temporal stores
* on all generations of CPUs must be limited to the number of
* write-combining buffers (12) because otherwise, suboptimal eviction
* policy might impact performance when writing more data than WC
* buffers can simultaneously hold.
*
* The AVX512 code path is not affected, probably because we are
* overwriting whole cache lines.
*/
_mm_sfence();
}
static force_inline void
no_barrier(void)
{
}
#ifndef AVX512F_AVAILABLE
/*
* XXX not supported in MSVC version we currently use.
* Enable Windows tests pmem2_mem_ext when MSVC we
* use will support AVX512F.
*/
#ifdef _MSC_VER
#define AVX512F_AVAILABLE 0
#else
#define AVX512F_AVAILABLE 1
#endif
#endif
#ifndef AVX_AVAILABLE
#define AVX_AVAILABLE 1
#endif
#ifndef SSE2_AVAILABLE
#define SSE2_AVAILABLE 1
#endif
#if SSE2_AVAILABLE
void memmove_mov_sse2_clflush(char *dest, const char *src, size_t len);
void memmove_mov_sse2_clflushopt(char *dest, const char *src, size_t len);
void memmove_mov_sse2_clwb(char *dest, const char *src, size_t len);
void memmove_mov_sse2_empty(char *dest, const char *src, size_t len);
void memmove_mov_sse2_noflush(char *dest, const char *src, size_t len);
void memmove_movnt_sse2_clflush_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_clflushopt_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_clwb_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_empty_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_noflush_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_clflush_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_clflushopt_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_clwb_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_empty_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_sse2_noflush_wcbarrier(char *dest, const char *src,
size_t len);
void memset_mov_sse2_clflush(char *dest, int c, size_t len);
void memset_mov_sse2_clflushopt(char *dest, int c, size_t len);
void memset_mov_sse2_clwb(char *dest, int c, size_t len);
void memset_mov_sse2_empty(char *dest, int c, size_t len);
void memset_mov_sse2_noflush(char *dest, int c, size_t len);
void memset_movnt_sse2_clflush_nobarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_clflushopt_nobarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_clwb_nobarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_empty_nobarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_noflush_nobarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_clflush_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_clflushopt_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_clwb_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_empty_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_sse2_noflush_wcbarrier(char *dest, int c, size_t len);
#endif
#if AVX_AVAILABLE
void memmove_mov_avx_clflush(char *dest, const char *src, size_t len);
void memmove_mov_avx_clflushopt(char *dest, const char *src, size_t len);
void memmove_mov_avx_clwb(char *dest, const char *src, size_t len);
void memmove_mov_avx_empty(char *dest, const char *src, size_t len);
void memmove_mov_avx_noflush(char *dest, const char *src, size_t len);
void memmove_movnt_avx_clflush_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_clflushopt_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_clwb_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_empty_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_noflush_nobarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_clflush_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_clflushopt_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_clwb_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_empty_wcbarrier(char *dest, const char *src,
size_t len);
void memmove_movnt_avx_noflush_wcbarrier(char *dest, const char *src,
size_t len);
void memset_mov_avx_clflush(char *dest, int c, size_t len);
void memset_mov_avx_clflushopt(char *dest, int c, size_t len);
void memset_mov_avx_clwb(char *dest, int c, size_t len);
void memset_mov_avx_empty(char *dest, int c, size_t len);
void memset_mov_avx_noflush(char *dest, int c, size_t len);
void memset_movnt_avx_clflush_nobarrier(char *dest, int c, size_t len);
void memset_movnt_avx_clflushopt_nobarrier(char *dest, int c, size_t len);
void memset_movnt_avx_clwb_nobarrier(char *dest, int c, size_t len);
void memset_movnt_avx_empty_nobarrier(char *dest, int c, size_t len);
void memset_movnt_avx_noflush_nobarrier(char *dest, int c, size_t len);
void memset_movnt_avx_clflush_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_avx_clflushopt_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_avx_clwb_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_avx_empty_wcbarrier(char *dest, int c, size_t len);
void memset_movnt_avx_noflush_wcbarrier(char *dest, int c, size_t len);
#endif
#if AVX512F_AVAILABLE
void memmove_mov_avx512f_clflush(char *dest, const char *src, size_t len);
void memmove_mov_avx512f_clflushopt(char *dest, const char *src, size_t len);
void memmove_mov_avx512f_clwb(char *dest, const char *src, size_t len);
void memmove_mov_avx512f_empty(char *dest, const char *src, size_t len);
void memmove_mov_avx512f_noflush(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_clflush(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_clflushopt(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_clwb(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_empty(char *dest, const char *src, size_t len);
void memmove_movnt_avx512f_noflush(char *dest, const char *src, size_t len);
void memset_mov_avx512f_clflush(char *dest, int c, size_t len);
void memset_mov_avx512f_clflushopt(char *dest, int c, size_t len);
void memset_mov_avx512f_clwb(char *dest, int c, size_t len);
void memset_mov_avx512f_empty(char *dest, int c, size_t len);
void memset_mov_avx512f_noflush(char *dest, int c, size_t len);
void memset_movnt_avx512f_clflush(char *dest, int c, size_t len);
void memset_movnt_avx512f_clflushopt(char *dest, int c, size_t len);
void memset_movnt_avx512f_clwb(char *dest, int c, size_t len);
void memset_movnt_avx512f_empty(char *dest, int c, size_t len);
void memset_movnt_avx512f_noflush(char *dest, int c, size_t len);
#endif
extern size_t Movnt_threshold;
/*
* SSE2/AVX1 only:
*
* How much data WC buffers can hold at the same time, after which sfence
* is needed to flush them.
*
* For some reason sfence affects performance of reading from DRAM, so we have
* to prefetch the source data earlier.
*/
#define PERF_BARRIER_SIZE (12 * CACHELINE_SIZE /* 768 */)
/*
* How much to prefetch initially.
* Cannot be bigger than the size of L1 (32kB) - PERF_BARRIER_SIZE.
*/
#define INI_PREFETCH_SIZE (64 * CACHELINE_SIZE /* 4096 */)
static force_inline void
prefetch(const char *addr)
{
_mm_prefetch(addr, _MM_HINT_T0);
}
static force_inline void
prefetch_ini_fw(const char *src, size_t len)
{
size_t pref = MIN(len, INI_PREFETCH_SIZE);
for (size_t i = 0; i < pref; i += CACHELINE_SIZE)
prefetch(src + i);
}
static force_inline void
prefetch_ini_bw(const char *src, size_t len)
{
size_t pref = MIN(len, INI_PREFETCH_SIZE);
for (size_t i = 0; i < pref; i += CACHELINE_SIZE)
prefetch(src - i);
}
static force_inline void
prefetch_next_fw(const char *src, const char *srcend)
{
const char *begin = src + INI_PREFETCH_SIZE;
const char *end = begin + PERF_BARRIER_SIZE;
if (end > srcend)
end = srcend;
for (const char *addr = begin; addr < end; addr += CACHELINE_SIZE)
prefetch(addr);
}
static force_inline void
prefetch_next_bw(const char *src, const char *srcbegin)
{
const char *begin = src - INI_PREFETCH_SIZE;
const char *end = begin - PERF_BARRIER_SIZE;
if (end < srcbegin)
end = srcbegin;
for (const char *addr = begin; addr >= end; addr -= CACHELINE_SIZE)
prefetch(addr);
}
#endif
| 9,351 | 33.131387 | 79 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memset/memset_nt_sse2.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memset_sse2.h"
#include "out.h"
#include "valgrind_internal.h"
static force_inline void
mm_stream_si128(char *dest, unsigned idx, __m128i src)
{
_mm_stream_si128((__m128i *)dest + idx, src);
barrier();
}
static force_inline void
memset_movnt4x64b(char *dest, __m128i xmm)
{
mm_stream_si128(dest, 0, xmm);
mm_stream_si128(dest, 1, xmm);
mm_stream_si128(dest, 2, xmm);
mm_stream_si128(dest, 3, xmm);
mm_stream_si128(dest, 4, xmm);
mm_stream_si128(dest, 5, xmm);
mm_stream_si128(dest, 6, xmm);
mm_stream_si128(dest, 7, xmm);
mm_stream_si128(dest, 8, xmm);
mm_stream_si128(dest, 9, xmm);
mm_stream_si128(dest, 10, xmm);
mm_stream_si128(dest, 11, xmm);
mm_stream_si128(dest, 12, xmm);
mm_stream_si128(dest, 13, xmm);
mm_stream_si128(dest, 14, xmm);
mm_stream_si128(dest, 15, xmm);
}
static force_inline void
memset_movnt2x64b(char *dest, __m128i xmm)
{
mm_stream_si128(dest, 0, xmm);
mm_stream_si128(dest, 1, xmm);
mm_stream_si128(dest, 2, xmm);
mm_stream_si128(dest, 3, xmm);
mm_stream_si128(dest, 4, xmm);
mm_stream_si128(dest, 5, xmm);
mm_stream_si128(dest, 6, xmm);
mm_stream_si128(dest, 7, xmm);
}
static force_inline void
memset_movnt1x64b(char *dest, __m128i xmm)
{
mm_stream_si128(dest, 0, xmm);
mm_stream_si128(dest, 1, xmm);
mm_stream_si128(dest, 2, xmm);
mm_stream_si128(dest, 3, xmm);
}
static force_inline void
memset_movnt1x32b(char *dest, __m128i xmm)
{
mm_stream_si128(dest, 0, xmm);
mm_stream_si128(dest, 1, xmm);
}
static force_inline void
memset_movnt1x16b(char *dest, __m128i xmm)
{
_mm_stream_si128((__m128i *)dest, xmm);
}
static force_inline void
memset_movnt1x8b(char *dest, __m128i xmm)
{
uint64_t x = (uint64_t)_mm_cvtsi128_si64(xmm);
_mm_stream_si64((long long *)dest, (long long)x);
}
static force_inline void
memset_movnt1x4b(char *dest, __m128i xmm)
{
uint32_t x = (uint32_t)_mm_cvtsi128_si32(xmm);
_mm_stream_si32((int *)dest, (int)x);
}
static force_inline void
memset_movnt_sse2(char *dest, int c, size_t len, flush_fn flush,
barrier_fn barrier, perf_barrier_fn perf_barrier)
{
char *orig_dest = dest;
size_t orig_len = len;
__m128i xmm = _mm_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_sse2(dest, xmm, cnt, flush);
dest += cnt;
len -= cnt;
}
while (len >= PERF_BARRIER_SIZE) {
memset_movnt4x64b(dest, xmm);
dest += 4 * 64;
len -= 4 * 64;
memset_movnt4x64b(dest, xmm);
dest += 4 * 64;
len -= 4 * 64;
memset_movnt4x64b(dest, xmm);
dest += 4 * 64;
len -= 4 * 64;
COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (4 + 4 + 4) * 64);
if (len)
perf_barrier();
}
while (len >= 4 * 64) {
memset_movnt4x64b(dest, xmm);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_movnt2x64b(dest, xmm);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_movnt1x64b(dest, xmm);
dest += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memset_movnt1x32b(dest, xmm);
else if (len == 16)
memset_movnt1x16b(dest, xmm);
else if (len == 8)
memset_movnt1x8b(dest, xmm);
else if (len == 4)
memset_movnt1x4b(dest, xmm);
else
goto nonnt;
goto end;
}
nonnt:
memset_small_sse2(dest, xmm, len, flush);
end:
barrier();
VALGRIND_DO_FLUSH(orig_dest, orig_len);
}
/* variants without perf_barrier */
void
memset_movnt_sse2_noflush_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, noflush, barrier_after_ntstores,
no_barrier);
}
void
memset_movnt_sse2_empty_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_empty_nolog,
barrier_after_ntstores, no_barrier);
}
void
memset_movnt_sse2_clflush_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_clflush_nolog,
barrier_after_ntstores, no_barrier);
}
void
memset_movnt_sse2_clflushopt_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, no_barrier);
}
void
memset_movnt_sse2_clwb_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_clwb_nolog,
no_barrier_after_ntstores, no_barrier);
}
/* variants with perf_barrier */
void
memset_movnt_sse2_noflush_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, noflush, barrier_after_ntstores,
wc_barrier);
}
void
memset_movnt_sse2_empty_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_empty_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memset_movnt_sse2_clflush_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_clflush_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memset_movnt_sse2_clflushopt_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, wc_barrier);
}
void
memset_movnt_sse2_clwb_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_sse2(dest, c, len, flush_clwb_nolog,
no_barrier_after_ntstores, wc_barrier);
}
| 5,912 | 20.580292 | 71 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memset/memset_avx512f.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#ifndef PMEM2_MEMSET_AVX512F_H
#define PMEM2_MEMSET_AVX512F_H
#include <stddef.h>
#include "memset_avx.h"
static force_inline void
memset_small_avx512f(char *dest, __m256i ymm, size_t len, flush_fn flush)
{
/* We can't do better than AVX here. */
memset_small_avx(dest, ymm, len, flush);
}
#endif
| 390 | 19.578947 | 73 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memset/memset_nt_avx.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
#include <immintrin.h>
#include <stddef.h>
#include <stdint.h>
#include "pmem2_arch.h"
#include "avx.h"
#include "flush.h"
#include "memcpy_memset.h"
#include "memset_avx.h"
#include "out.h"
#include "valgrind_internal.h"
static force_inline void
mm256_stream_si256(char *dest, unsigned idx, __m256i src)
{
_mm256_stream_si256((__m256i *)dest + idx, src);
barrier();
}
static force_inline void
memset_movnt8x64b(char *dest, __m256i ymm)
{
mm256_stream_si256(dest, 0, ymm);
mm256_stream_si256(dest, 1, ymm);
mm256_stream_si256(dest, 2, ymm);
mm256_stream_si256(dest, 3, ymm);
mm256_stream_si256(dest, 4, ymm);
mm256_stream_si256(dest, 5, ymm);
mm256_stream_si256(dest, 6, ymm);
mm256_stream_si256(dest, 7, ymm);
mm256_stream_si256(dest, 8, ymm);
mm256_stream_si256(dest, 9, ymm);
mm256_stream_si256(dest, 10, ymm);
mm256_stream_si256(dest, 11, ymm);
mm256_stream_si256(dest, 12, ymm);
mm256_stream_si256(dest, 13, ymm);
mm256_stream_si256(dest, 14, ymm);
mm256_stream_si256(dest, 15, ymm);
}
static force_inline void
memset_movnt4x64b(char *dest, __m256i ymm)
{
mm256_stream_si256(dest, 0, ymm);
mm256_stream_si256(dest, 1, ymm);
mm256_stream_si256(dest, 2, ymm);
mm256_stream_si256(dest, 3, ymm);
mm256_stream_si256(dest, 4, ymm);
mm256_stream_si256(dest, 5, ymm);
mm256_stream_si256(dest, 6, ymm);
mm256_stream_si256(dest, 7, ymm);
}
static force_inline void
memset_movnt2x64b(char *dest, __m256i ymm)
{
mm256_stream_si256(dest, 0, ymm);
mm256_stream_si256(dest, 1, ymm);
mm256_stream_si256(dest, 2, ymm);
mm256_stream_si256(dest, 3, ymm);
}
static force_inline void
memset_movnt1x64b(char *dest, __m256i ymm)
{
mm256_stream_si256(dest, 0, ymm);
mm256_stream_si256(dest, 1, ymm);
}
static force_inline void
memset_movnt1x32b(char *dest, __m256i ymm)
{
mm256_stream_si256(dest, 0, ymm);
}
static force_inline void
memset_movnt1x16b(char *dest, __m256i ymm)
{
__m128i xmm0 = m256_get16b(ymm);
_mm_stream_si128((__m128i *)dest, xmm0);
}
static force_inline void
memset_movnt1x8b(char *dest, __m256i ymm)
{
uint64_t x = m256_get8b(ymm);
_mm_stream_si64((long long *)dest, (long long)x);
}
static force_inline void
memset_movnt1x4b(char *dest, __m256i ymm)
{
uint32_t x = m256_get4b(ymm);
_mm_stream_si32((int *)dest, (int)x);
}
static force_inline void
memset_movnt_avx(char *dest, int c, size_t len, flush_fn flush,
barrier_fn barrier, perf_barrier_fn perf_barrier)
{
char *orig_dest = dest;
size_t orig_len = len;
__m256i ymm = _mm256_set1_epi8((char)c);
size_t cnt = (uint64_t)dest & 63;
if (cnt > 0) {
cnt = 64 - cnt;
if (cnt > len)
cnt = len;
memset_small_avx(dest, ymm, cnt, flush);
dest += cnt;
len -= cnt;
}
while (len >= PERF_BARRIER_SIZE) {
memset_movnt8x64b(dest, ymm);
dest += 8 * 64;
len -= 8 * 64;
memset_movnt4x64b(dest, ymm);
dest += 4 * 64;
len -= 4 * 64;
COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (8 + 4) * 64);
if (len)
perf_barrier();
}
if (len >= 8 * 64) {
memset_movnt8x64b(dest, ymm);
dest += 8 * 64;
len -= 8 * 64;
}
if (len >= 4 * 64) {
memset_movnt4x64b(dest, ymm);
dest += 4 * 64;
len -= 4 * 64;
}
if (len >= 2 * 64) {
memset_movnt2x64b(dest, ymm);
dest += 2 * 64;
len -= 2 * 64;
}
if (len >= 1 * 64) {
memset_movnt1x64b(dest, ymm);
dest += 1 * 64;
len -= 1 * 64;
}
if (len == 0)
goto end;
/* There's no point in using more than 1 nt store for 1 cache line. */
if (util_is_pow2(len)) {
if (len == 32)
memset_movnt1x32b(dest, ymm);
else if (len == 16)
memset_movnt1x16b(dest, ymm);
else if (len == 8)
memset_movnt1x8b(dest, ymm);
else if (len == 4)
memset_movnt1x4b(dest, ymm);
else
goto nonnt;
goto end;
}
nonnt:
memset_small_avx(dest, ymm, len, flush);
end:
avx_zeroupper();
barrier();
VALGRIND_DO_FLUSH(orig_dest, orig_len);
}
/* variants without perf_barrier */
void
memset_movnt_avx_noflush_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, noflush, barrier_after_ntstores,
no_barrier);
}
void
memset_movnt_avx_empty_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_empty_nolog,
barrier_after_ntstores, no_barrier);
}
void
memset_movnt_avx_clflush_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_clflush_nolog,
barrier_after_ntstores, no_barrier);
}
void
memset_movnt_avx_clflushopt_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, no_barrier);
}
void
memset_movnt_avx_clwb_nobarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_clwb_nolog,
no_barrier_after_ntstores, no_barrier);
}
/* variants with perf_barrier */
void
memset_movnt_avx_noflush_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, noflush, barrier_after_ntstores,
wc_barrier);
}
void
memset_movnt_avx_empty_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_empty_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memset_movnt_avx_clflush_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_clflush_nolog,
barrier_after_ntstores, wc_barrier);
}
void
memset_movnt_avx_clflushopt_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_clflushopt_nolog,
no_barrier_after_ntstores, wc_barrier);
}
void
memset_movnt_avx_clwb_wcbarrier(char *dest, int c, size_t len)
{
LOG(15, "dest %p c %d len %zu", dest, c, len);
memset_movnt_avx(dest, c, len, flush_clwb_nolog,
no_barrier_after_ntstores, wc_barrier);
}
| 6,151 | 20.43554 | 71 |
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.