Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/utils/docker/test_package/test_package.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
#include <libpmemobj.h>
#include <stdio.h>
#include <sys/stat.h>
#define LAYOUT_NAME "test"
struct my_root {
int foo;
};
int
main(int argc, char *argv[])
{
if (argc < 2) {
printf("usage: %s file-name\n", argv[0]);
return 1;
}
const char *path = argv[1];
PMEMobjpool *pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR);
if (pop == NULL) {
printf("failed to create pool\n");
return 1;
}
PMEMoid root = pmemobj_root(pop, sizeof(struct my_root));
struct my_root *rootp = pmemobj_direct(root);
rootp->foo = 10;
pmemobj_persist(pop, &rootp->foo, sizeof(rootp->foo));
pmemobj_close(pop);
return 0;
}
| 735 | 16.52381 | 58 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/utils/docker/images/install-valgrind.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2016-2020, Intel Corporation
#
# install-valgrind.sh - installs valgrind for persistent memory
#
set -e
OS=$1
install_upstream_from_distro() {
case "$OS" in
fedora) dnf install -y valgrind ;;
ubuntu) apt-get install -y --no-install-recommends valgrind ;;
*) return 1 ;;
esac
}
install_upstream_3_16_1() {
git clone git://sourceware.org/git/valgrind.git
cd valgrind
# valgrind v3.16.1 upstream
git checkout VALGRIND_3_16_BRANCH
./autogen.sh
./configure
make -j$(nproc)
make -j$(nproc) install
cd ..
rm -rf valgrind
}
install_custom-pmem_from_source() {
git clone https://github.com/pmem/valgrind.git
cd valgrind
# valgrind v3.15 with pmemcheck
# 2020.04.01 Merge pull request #78 from marcinslusarz/opt3
git checkout 759686fd66cc0105df8311cfe676b0b2f9e89196
./autogen.sh
./configure
make -j$(nproc)
make -j$(nproc) install
cd ..
rm -rf valgrind
}
ARCH=$(uname -m)
case "$ARCH" in
ppc64le) install_upstream_3_16_1 ;;
*) install_custom-pmem_from_source ;;
esac
| 1,099 | 19.754717 | 66 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/utils/docker/images/build-image.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2016-2020, Intel Corporation
#
# build-image.sh <OS-VER> <ARCH> - prepares a Docker image with <OS>-based
# environment intended for the <ARCH> CPU architecture
# designed for building PMDK project, according to
# the Dockerfile.<OS-VER> file located in the same directory.
#
# The script can be run locally.
#
set -e
OS_VER=$1
CPU_ARCH=$2
function usage {
echo "Usage:"
echo " build-image.sh <OS-VER> <ARCH>"
echo "where:"
echo " <OS-VER> - can be for example 'ubuntu-19.10' provided "\
"a Dockerfile named 'Dockerfile.ubuntu-19.10' "\
"exists in the current directory and"
echo " <ARCH> - is a CPU architecture, for example 'x86_64'"
}
# Check if two first arguments are not empty
if [[ -z "$2" ]]; then
usage
exit 1
fi
# Check if the file Dockerfile.OS-VER exists
if [[ ! -f "Dockerfile.$OS_VER" ]]; then
echo "Error: Dockerfile.$OS_VER does not exist."
echo
usage
exit 1
fi
if [[ -z "${DOCKERHUB_REPO}" ]]; then
echo "Error: DOCKERHUB_REPO environment variable is not set"
exit 1
fi
# Build a Docker image tagged with ${DOCKERHUB_REPO}:OS-VER-ARCH
tag=${DOCKERHUB_REPO}:1.9-${OS_VER}-${CPU_ARCH}
docker build -t $tag \
--build-arg http_proxy=$http_proxy \
--build-arg https_proxy=$https_proxy \
-f Dockerfile.$OS_VER .
| 1,373 | 24.444444 | 76 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/utils/docker/images/install-libfabric.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2016-2020, Intel Corporation
#
# install-libfabric.sh - installs a customized version of libfabric
#
set -e
OS=$1
# Keep in sync with requirements in src/common.inc.
libfabric_ver=1.4.2
libfabric_url=https://github.com/ofiwg/libfabric/archive
libfabric_dir=libfabric-$libfabric_ver
libfabric_tarball=v${libfabric_ver}.zip
wget "${libfabric_url}/${libfabric_tarball}"
unzip $libfabric_tarball
cd $libfabric_dir
# XXX HACK HACK HACK
# Disable use of spin locks in libfabric.
#
# spinlocks do not play well (IOW at all) with cpu-constrained environments,
# like GitHub Actions, and this leads to timeouts of some PMDK's tests.
# This change speeds up pmempool_sync_remote/TEST28-31 by a factor of 20-30.
#
perl -pi -e 's/have_spinlock=1/have_spinlock=0/' configure.ac
# XXX HACK HACK HACK
./autogen.sh
./configure --prefix=/usr --enable-sockets
make -j$(nproc)
make -j$(nproc) install
cd ..
rm -f ${libfabric_tarball}
rm -rf ${libfabric_dir}
| 1,019 | 23.878049 | 76 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/utils/docker/images/install-libndctl.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2017-2019, Intel Corporation
#
# install-libndctl.sh - installs libndctl
#
set -e
OS=$2
echo "==== clone ndctl repo ===="
git clone https://github.com/pmem/ndctl.git
cd ndctl
git checkout $1
if [ "$OS" = "fedora" ]; then
echo "==== setup rpmbuild tree ===="
rpmdev-setuptree
RPMDIR=$HOME/rpmbuild/
VERSION=$(./git-version)
SPEC=./rhel/ndctl.spec
echo "==== create source tarball ====="
git archive --format=tar --prefix="ndctl-${VERSION}/" HEAD | gzip > "$RPMDIR/SOURCES/ndctl-${VERSION}.tar.gz"
echo "==== build ndctl ===="
./autogen.sh
./configure --disable-docs
make -j$(nproc)
echo "==== build ndctl packages ===="
rpmbuild -ba $SPEC
echo "==== install ndctl packages ===="
RPM_ARCH=$(uname -m)
rpm -i $RPMDIR/RPMS/$RPM_ARCH/*.rpm
echo "==== cleanup ===="
rm -rf $RPMDIR
else
echo "==== build ndctl ===="
./autogen.sh
./configure --disable-docs
make -j$(nproc)
echo "==== install ndctl ===="
make -j$(nproc) install
echo "==== cleanup ===="
fi
cd ..
rm -rf ndctl
| 1,057 | 16.344262 | 109 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/utils/docker/images/push-image.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2016-2020, Intel Corporation
#
# push-image.sh - pushes the Docker image to the Docker Hub.
#
# The script utilizes $DOCKERHUB_USER and $DOCKERHUB_PASSWORD variables
# to log in to Docker Hub. The variables can be set in the Travis project's
# configuration for automated builds.
#
set -e
source $(dirname $0)/../set-ci-vars.sh
if [[ -z "$OS" ]]; then
echo "OS environment variable is not set"
exit 1
fi
if [[ -z "$OS_VER" ]]; then
echo "OS_VER environment variable is not set"
exit 1
fi
if [[ -z "$CI_CPU_ARCH" ]]; then
echo "CI_CPU_ARCH environment variable is not set"
exit 1
fi
if [[ -z "${DOCKERHUB_REPO}" ]]; then
echo "DOCKERHUB_REPO environment variable is not set"
exit 1
fi
TAG="1.9-${OS}-${OS_VER}-${CI_CPU_ARCH}"
# Check if the image tagged with pmdk/OS-VER exists locally
if [[ ! $(docker images -a | awk -v pattern="^${DOCKERHUB_REPO}:${TAG}\$" \
'$1":"$2 ~ pattern') ]]
then
echo "ERROR: Docker image tagged ${DOCKERHUB_REPO}:${TAG} does not exists locally."
exit 1
fi
# Log in to the Docker Hub
docker login -u="$DOCKERHUB_USER" -p="$DOCKERHUB_PASSWORD"
# Push the image to the repository
docker push ${DOCKERHUB_REPO}:${TAG}
| 1,236 | 22.788462 | 84 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/RELEASE_STEPS.md
|
# PMDK release steps
This document contains all the steps required to make a new release of PMDK.
Make a release locally:
- add an entry to ChangeLog, remember to change the day of the week in the release date
- for major releases mention compatibility with the previous release
- update reference to stable release in README.md (update `git checkout tags/$VERSION-1`)
- git rm GIT_VERSION
- echo $VERSION > VERSION
- git add VERSION
- git commit -a -S -m "common: $VERSION release"
- git tag -a -s -m "PMDK Version $VERSION" $VERSION
Make a package:
- git archive --prefix="pmdk-$VERSION/" -o pmdk-$VERSION.tar.gz $VERSION
- uncompress the created archive in a new directory and create the final package:
```
$ cd pmdk-$VERSION
$ make doc
$ touch .skip-doc
$ cd ..
$ tar czf pmdk-$VERSION.tar.gz pmdk-$VERSION/ --owner=root --group=root
```
- verify the created archive (uncompress & build one last time)
- gpg --armor --detach-sign pmdk-$VERSION.tar.gz
Undo temporary release changes:
- git cherry-pick 1a620814f6affe0535441565007c352a67f995c0
- git rm VERSION
- git commit --reset-author
Publish changes:
- for major release:
- git push upstream HEAD:master $VERSION
- create and push stable-$VERSION branch
- create PR from stable-$VERSION to master
- for minor release:
- git push upstream HEAD:stable-$VER $VERSION
- create PR from stable-$VER to next stable (or master, if release is from last stable branch)
Publish package and make it official:
- go to https://github.com/pmem/pmdk/releases/new:
- tag version: $VERSION, release title: PMDK Version $VERSION, description: copy entry from ChangeLog
- upload pmdk-$VERSION.tar.gz & pmdk-$VERSION.tar.gz.asc
- announce the release on pmem group
Later, for major release:
- bump version of Docker images (build-CI.sh, build-local.sh, build-image.sh, push-image.sh, pull-or-rebuild-image.sh) to $VERSION+1
- add new branch to valid-branches.sh
- once gh-pages contains new documentation, add $VERSION section in _data/releases_linux.yml and _data/releases_windows.yml on gh-pages branch
| 2,074 | 38.903846 | 142 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/rpmemd/rpmemd.1.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(RPMEMD, 1)
collection: rpmemd
header: PMDK
date: rpmemd version 1.4
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2016-2018, Intel Corporation)
[comment]: <> (rpmemd.1.md -- man page for rpmemd)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[OPTIONS](#options)<br />
[CONFIGURATION FILES](#configuration-files)<br />
[EXAMPLE](#example)<br />
[DEFAULT CONFIGURATION](#default-configuration)<br />
[PERSISTENCY METHODS](#persistency-methods)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**rpmemd** - librpmem target node process (EXPERIMENTAL)
# SYNOPSIS #
```
$ rpmemd [--help] [--version] [<args>]
```
# DESCRIPTION #
The **rpmemd** process is executed on target node by **librpmem**(7) library over
**ssh**(1) and facilitates access to persistent memory over RDMA. The **rpmemd**
should not be run manually under normal conditions.
# OPTIONS #
Command line options overwrite the default **rpmemd** configuration, the global
configuration file and the user configuration file.
`-V, --version`
Displays **rpmemd** version and exits.
`-h, --help`
Prints synopsis and list of parameters and exits.
`-c, --config <path>`
Custom configuration file location. If the custom configuration file is
provided others are omitted. See **CONFIGURATION FILES**
section for details.
All options described in **CONFIGURATION FILES** section are
common for both the configuration file and the command line - the equivalent
of the following line in the config file:
`option = value`
is
`--option value`
in the command line.
The following command line options: **--persist-apm**, **--persist-general**
and **--use-syslog** should not be followed by any value. Presence of each of them
in the command line turns on an appropriate option.
See **CONFIGURATION FILES** section for details.
`-r, --remove <poolset>`
Remove a pool described by given pool set file descriptor. It is interpreted
as a path to the pool set file relative to the pool set directory.
`-f, --force`
Ignore errors when removing a pool file using **--remove** option.
# CONFIGURATION FILES #
The **rpmemd** searches for the configuration files with following priorities:
+ The global configuration file located in **/etc/rpmemd/rpmemd.conf**.
+ The user configuration file located in the user home directory
(**$HOME/.rpmemd.conf**).
The **rpmemd** can also read configuration from the custom configuration file
provided using **--config** command line option. See **OPTIONS**
section for details.
The default configuration is described in the
**DEFAULT CONFIGURATION** section.
The configuration file is a plain text file. Each line of the configuration file
can store only one configuration option defined as a *key=value* pair. Empty
lines and lines starting with *#* are omitted.
The allowed options are:
+ `log-file = <path>` - log file location
+ `poolset-dir = <path>` - pool set files directory
+ `persist-apm = {yes|no}` - enable **The Appliance Persistency Method**. This
option must be set only if the target platform has non-allocating writes IO
enabled. See **PERSISTENCY METHODS** section for details.
+ `persist-general = {yes|no}` - enable **The General Purpose Server Persistency
Method**. See **PERSISTENCY METHODS** section for details.
+ `use-syslog = {yes|no}` - use **syslog**(3) for logging messages instead of log
file
+ `log-level = <level>` - set log level value. Accepted *\<level\>* values are:
+ **err** - error conditions
+ **warn** - warning conditions
+ **notice** - normal, but significant conditions
+ **info** - informational message
+ **debug** - debug-level message
The **$HOME** sub-string in the *poolset-dir* path is replaced with the current user
home directory.
# EXAMPLE #
Example of the configuration file:
```
# This is an example of configuration file
log-file = $HOME/.logs/rpmemd.log
poolset-dir = $HOME/poolsets/
persist-apm = yes
persist-general = no
use-syslog = no # Use log file instead of syslog
log-level = info
```
# DEFAULT CONFIGURATION #
The **rpmemd** default configuration is equivalent of the following
configuration file:
```
log-file = /var/log/rpmemd.log
poolset-dir = $HOME
persist-apm = no
persist-general = yes
use-syslog = yes
log-level = err
```
# PERSISTENCY METHODS #
The **librpmem**(7) supports two methods for making data written to remote
persistent memory durable. The difference between the use of the two mechanisms
is based on whether **librpmem**(7) will make use of non-allocating writes on the
remote node.
+ **The General Purpose Server Persistency Method** does not have any
requirements for the platform on which the target daemon runs and can be enabled
by administrator using the *persist-general* option. This method utilize
**libpmem**(7) persistency mechanisms on remote node and requires additional
communication between initiator and remote node using the in-band connection.
+ **The Appliance Persistency Method** requires non-allocating writes enabled on
the platform and can be enabled by administrator using *persist-apm* option.
This method requires to issue an RDMA READ operation after the RDMA WRITE
operations performed on requested chunk of memory.
"Non-allocating write requests" is the Intel Integrated IO Controller mode
where all incoming PCIe writes will utilize non-allocating buffers for the write
requests. Non-allocating writes are guaranteed to bypass all of the CPU caches
and force the write requests to flow directly to the Integrated Memory
Controller without delay.
The **rpmemd** dynamically choose the appropriate persistency method and the
flushing to persistence primitive for GPSPM for each opened pool set name
depending on available persistency methods and whether all pool set parts are
stored in the persistent memory.
If the **Appliance Persistency Method** is enabled and the pool set is stored
in the persistent memory **rpmemd** will use the **Appliance Persistency
Method**. If the pool set is NOT stored in the persistent memory it will
fallback to the **General Puropose Server Persistency Method** with
**pmem_msync**(3).
If the **General Puropose Server Persistency Method** is enabled and the pool
set is stored in the persistent memory **rpmemd** will use **pmem_persist**(3).
If the pool set is NOT stored in the persistent momory it will use
**pmem_msync**(3).
See **pmem_persist**(3) and **pmem_msync**(3) for more details.
# SEE ALSO #
**ssh**(1), **pmem_msync**(3), **pmem_persist**(3),
**syslog**(3), **libpmem**(7), **libpmemobj**(7),
**librpmem**(7) and **<https://pmem.io>**
| 6,678 | 31.2657 | 84 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemlog/pmemlog_create.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMLOG_CREATE, 3)
collection: libpmemlog
header: PMDK
date: pmemlog API version 1.1
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pmemlog_create.3 -- man page for libpmemlog create, open, close and validate)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[CAVEATS](#caveats)<br />
[SEE ALSO](#see-also)<br />
# NAME #
_UW(pmemlog_create), _UW(pmemlog_open),
**pmemlog_close**(), _UW(pmemlog_check)
- create, open, close and validate persistent memory resident log file
# SYNOPSIS #
```c
#include <libpmemlog.h>
_UWFUNCR(PMEMlogpool, *pmemlog_open, *path)
_UWFUNCR1(PMEMlogpool, *pmemlog_create, *path, =q=size_t poolsize, mode_t mode=e=)
void pmemlog_close(PMEMlogpool *plp);
_UWFUNCR(int, pmemlog_check, *path)
```
_UNICODE()
# DESCRIPTION #
The _UW(pmemlog_create) function creates a log memory pool with the given
total *poolsize*. Since the transactional nature of a log memory pool requires
some space overhead in the memory pool, the resulting available log size is
less than *poolsize*, and is made available to the caller via the
**pmemlog_nbyte**(3) function. *path* specifies the name of the memory pool
file to be created. *mode* specifies the permissions to use when creating the
file as described by **creat**(2). The memory pool file is fully allocated
to the size *poolsize* using **posix_fallocate**(3).
The caller may choose to take responsibility for creating the memory pool file
by creating it before calling _UW(pmemlog_create) and then specifying
*poolsize* as zero. In this case _UW(pmemlog_create) will take the pool size
from the size of the existing file and will verify that the file appears to be
empty by searching for any non-zero data in the pool header at the beginning of
the file. The net pool size of a pool file is equal to the file size.
The minimum net pool size allowed by the library for a log pool
is defined in **\<libpmemlog.h\>** as **PMEMLOG_MIN_POOL**.
Depending on the configuration of the system, the available non-volatile
memory space may be divided into multiple memory devices.
In such case, the maximum size of the pmemlog memory pool
could be limited by the capacity of a single memory device.
**libpmemlog**(7) allows building persistent memory
resident logs spanning multiple memory devices by creation of
persistent memory pools consisting of multiple files, where each part of
such a *pool set* may be stored on a different memory device
or pmem-aware filesystem.
Creation of all the parts of the pool set can be done with _UW(pmemlog_create);
however, the recommended method for creating pool sets is with the
**pmempool**(1) utility.
When creating a pool set consisting of multiple files, the *path* argument
passed to _UW(pmemlog_create) must point to the special *set* file that defines
the pool layout and the location of all the parts of the pool set. The
*poolsize* argument must be 0. The meaning of the *mode* argument
does not change, except that the same *mode* is used for creation of all the
parts of the pool set.
The set file is a plain text file, the structure of which is described in
**poolset**(5).
The _UW(pmemlog_open) function opens an existing log memory pool.
Similar to _UW(pmemlog_create), *path* must identify either an existing
log memory pool file, or the *set* file used to create a pool set.
The application must have permission to open the file and memory map the
file or pool set with read/write permissions.
Be aware that if the pool contains bad blocks inside, opening can be aborted
by the SIGBUS signal, because currently the pool is not checked against
bad blocks during opening. It can be turned on by setting the CHECK_BAD_BLOCKS
compat feature. For details see description of this feature
in **pmempool-feature**(1).
The **pmemlog_close**() function closes the memory pool indicated by *plp*
and deletes the memory pool handle. The log memory pool itself lives on in
the file that contains it and may be re-opened at a later time using
_UW(pmemlog_open) as described above.
The _UW(pmemlog_check) function performs a consistency check of the file
indicated by *path*. _UW(pmemlog_check) opens the given *path* read-only so
it never makes any changes to the file. This function is not supported on
Device DAX.
# RETURN VALUE #
On success, _UW(pmemlog_create) returns a *PMEMlogpool\** handle to the
memory pool that is used with most of the functions from **libpmemlog**(7).
If an error prevents any of the pool set files from being
created, it returns NULL and sets *errno* appropriately.
On success, _UW(pmemlog_open) returns a *PMEMlogpool\** handle to the
memory pool that is used with most of the functions from **libpmemlog**(7).
If an error prevents the pool from being opened, or a pool set is being
opened and the actual size of any file does not match the corresponding part
size defined in the *set* file, _UW(pmemlog_open) returns NULL and sets
*errno* appropriately.
The **pmemlog_close**() function returns no value.
The _UW(pmemlog_check) function returns 1 if the persistent memory
resident log file is found to be consistent.
Any inconsistencies will cause _UW(pmemlog_check) to return 0,
in which case the use of the file with **libpmemlog** will result
in undefined behavior. The debug version of **libpmemlog** will provide
additional details on inconsistencies when **PMEMLOG_LOG_LEVEL** is at least 1,
as described in the **DEBUGGING AND ERROR HANDLING** section in
**libpmemlog**(7). _UW(pmemlog_check) will return -1 and set *errno* if it
cannot perform the consistency check due to other errors.
# CAVEATS #
Not all file systems support **posix_fallocate**(3). _UW(pmemlog_create) will
fail if the underlying file system does not support **posix_fallocate**(3).
_WINUX(=q= On Windows if _UW(pmemlog_create) is called on an existing file
with FILE_ATTRIBUTE_SPARSE_FILE and FILE_ATTRIBUTE_COMPRESSED set,
they will be removed, to physically allocate space for the pool.
This is a workaround for _chsize() performance issues. =e=)
# SEE ALSO #
**pmempool**(1), **creat**(2), **posix_fallocate**(3),
**pmemlog_nbyte**(3), **poolset**(5), **libpmemlog**(7)
and **<https://pmem.io>**
| 6,342 | 42.14966 | 93 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemlog/pmemlog_tell.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMLOG_TELL, 3)
collection: libpmemlog
header: PMDK
date: pmemlog API version 1.1
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pmemlog_tell.3 -- man page for pmemlog_tell, pmemlog_rewind and pmemlog_walk functions)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmemlog_tell**(), **pmemlog_rewind**(),
**pmemlog_walk**() - checks current write point for the log or walks through the log
# SYNOPSIS #
```c
#include <libpmemlog.h>
long long pmemlog_tell(PMEMlogpool *plp);
void pmemlog_rewind(PMEMlogpool *plp);
void pmemlog_walk(PMEMlogpool *plp, size_t chunksize,
int (*process_chunk)(const void *buf, size_t len, void *arg),
void *arg);
```
# DESCRIPTION #
The **pmemlog_tell**() function returns the current write point for the log,
expressed as a byte offset into the usable log space in the memory pool.
This offset starts off as zero on a newly-created log,
and is incremented by each successful append operation.
This function can be used to determine how much data is currently in the log.
The **pmemlog_rewind**() function resets the current write point for the log to zero.
After this call, the next append adds to the beginning of the log.
The **pmemlog_walk**() function walks through the log *plp*, from beginning to
end, calling the callback function *process_chunk* for each *chunksize* block
of data found. The argument *arg* is also passed to the callback to help
avoid the need for global state. The *chunksize* argument is useful for logs
with fixed-length records and may be specified as 0 to cause a single call
to the callback with the entire log contents passed as the *buf* argument. The
*len* argument tells the *process_chunk* function how much data *buf* is
holding. The callback function should return 1 if **pmemlog_walk**() should
continue walking through the log, or 0 to terminate the walk. The callback
function is called while holding **libpmemlog**(7) internal locks that make
calls atomic, so the callback function must not try to append to the log itself
or deadlock will occur.
# RETURN VALUE #
On success, **pmemlog_tell**() returns the current write point for the log.
On error, it returns -1 and sets *errno* appropriately.
The **pmemlog_rewind**() and **pmemlog_walk**() functions return no value.
# SEE ALSO #
**libpmemlog**(7) and **<https://pmem.io>**
| 2,565 | 34.638889 | 102 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemlog/libpmemlog.7.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(LIBPMEMLOG, 7)
collection: libpmemlog
header: PMDK
date: pmemlog API version 1.1
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2016-2018, Intel Corporation)
[comment]: <> (libpmemlog.7 -- man page for libpmemlog)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[CAVEATS](#caveats)<br />
[LIBRARY API VERSIONING](#library-api-versioning-1)<br />
[MANAGING LIBRARY BEHAVIOR](#managing-library-behavior-1)<br />
[DEBUGGING AND ERROR HANDLING](#debugging-and-error-handling)<br />
[EXAMPLE](#example)<br />
[BUGS](#bugs)<br />
[ACKNOWLEDGEMENTS](#acknowledgements)<br />
[SEE ALSO](#see-also)
# NAME #
**libpmemlog** - persistent memory resident log file
# SYNOPSIS #
```c
#include <libpmemlog.h>
cc ... -lpmemlog -lpmem
```
_UNICODE()
##### Library API versioning: #####
```c
_UWFUNC(pmemlog_check_version, =q=
unsigned major_required,
unsigned minor_required=e=)
```
##### Managing library behavior: #####
```c
void pmemlog_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s));
```
##### Error handling: #####
```c
_UWFUNCR(int, pmemlog_check, *path)
```
##### Other library functions: #####
A description of other **libpmemlog** functions can be found on the following
manual pages:
**pmemlog_append**(3), **pmemlog_create**(3), **pmemlog_ctl_exec**(3),
**pmemlog_ctl_get**(3), **pmemlog_ctl_set**(3), **pmemlog_nbyte**(3),
**pmemlog_tell**(3)
# DESCRIPTION #
**libpmemlog**
provides a log file in *persistent memory* (pmem) such that
additions to the log are appended atomically. This library is intended
for applications using direct access storage (DAX), which is storage
that supports load/store access without paging blocks from a block
storage device. Some types of *non-volatile memory DIMMs* (NVDIMMs) provide
this type of byte addressable access to storage. A *persistent memory aware
file system* is typically used to expose the direct access to applications.
Memory mapping a file from this type of file system
results in the load/store, non-paged access to pmem.
**libpmemlog** builds on thistype of memory mapped file.
This library is for applications that need a persistent log file
updated atomically (the updates cannot be *torn* by program interruption
such as power failures). This library builds on the low-level pmem
support provided by **libpmem**(7), handling the transactional update of
the log, flushing to persistence, and recovery for the application.
**libpmemlog** is one of a collection of persistent memory libraries available.
The others are:
+ **libpmemobj**(7), a general use persistent memory API,
providing memory allocation and transactional operations on variable-sized objects.
+ **libpmemblk**(7), providing pmem-resident arrays of fixed-sized blocks with atomic updates.
+ **libpmem**(7), low-level persistent memory support.
Under normal usage, **libpmemlog** will never print messages or intentionally
cause the process to exit. The only exception to this is the debugging
information, when enabled, as described under **DEBUGGING AND ERROR HANDLING** below.
To use the pmem-resident log file provided by **libpmemlog**, a *memory pool* is
first created. This is done with the **pmemlog_create**(3) function.
The other functions mentioned above in SYNOPSIS section
then operate on the resulting log memory pool.
Once created, the memory pool is represented by an opaque handle,
of type *PMEMlogpool\**, which is passed to most of the other
functions from **libpmemlog**. Internally, **libpmemlog** will use
either **pmem_persist**(3) or **msync**(2) when it needs to flush changes,
depending on whether the memory pool appears to be persistent memory
or a regular file (see the **pmem_is_pmem**(3) function in **libpmem**(7)
for more information). There is no need for applications to flush
changes directly when using the log memory API provided by **libpmemlog**.
# CAVEATS #
**libpmemlog** relies on the library destructor being called from the main
thread. For this reason, all functions that might trigger destruction (e.g.
**dlclose**(3)) should be called in the main thread. Otherwise some of the
resources associated with that thread might not be cleaned up properly.
# LIBRARY API VERSIONING #
This section describes how the library API is versioned,
allowing applications to work with an evolving API.
The _UW(pmemlog_check_version) function is used to determine whether the
installed **libpmemlog** supports the version of the library API required by
an application. The easiest way to do this is for the application to supply
the compile-time version information provided by defines in
**\<libpmemlog.h\>**, like this:
```c
reason = _U(pmemlog_check_version)(PMEMLOG_MAJOR_VERSION,
PMEMLOG_MINOR_VERSION);
if (reason != NULL) {
/* version check failed, reason string tells you why */
}
```
Any mismatch in the major version number is considered a failure,
but a library with a newer minor version number will pass this check
since increasing minor versions imply backwards compatibility.
An application can also check specifically for the existence of an interface
by checking for the version where that interface was introduced. These versions
are documented in this man page as follows: unless otherwise specified, all
interfaces described here are available in version 1.0 of the library. Interfaces
added after version 1.0 will contain the text *introduced
in version x.y* in the section of this manual describing the feature.
On success, _UW(pmemlog_check_version) returns NULL. Otherwise, the return
value is a static string describing the reason the version check failed. The
string returned by _UW(pmemlog_check_version) must not be modified or freed.
# MANAGING LIBRARY BEHAVIOR #
The **pmemlog_set_funcs**() function allows an application to override
memory allocation calls used internally by **libpmemlog**.
Passing in NULL for any of the handlers will cause the
**libpmemlog** default function to be used. The library does not make
heavy use of the system malloc functions, but it does
allocate approximately 4-8 kilobytes for each memory pool in use.
# DEBUGGING AND ERROR HANDLING #
The _UW(pmemlog_errormsg) function returns a pointer to a static buffer
containing the last error message logged for the current thread. If *errno*
was set, the error message may include a description of the corresponding
error code as returned by **strerror**(3). The error message buffer is
thread-local; errors encountered in one thread do not affect its value in
other threads. The buffer is never cleared by any library function; its
content is significant only when the return value of the immediately preceding
call to a **libpmemlog** function indicated an error, or if *errno* was set.
The application must not modify or free the error message string, but it may
be modified by subsequent calls to other library functions.
Two versions of **libpmemlog** are typically available on a development
system. The normal version, accessed when a program is linked using the
**-lpmemlog** option, is optimized for performance. That version skips checks
that impact performance and never logs any trace information or performs any
run-time assertions.
A second version of **libpmemlog**, accessed when a program uses the libraries
under _DEBUGLIBPATH(), contains run-time assertions and trace points. The
typical way to access the debug version is to set the environment variable
**LD_LIBRARY_PATH** to _LDLIBPATH(). Debugging output is
controlled using the following environment variables. These variables have
no effect on the non-debug version of the library.
+ **PMEMLOG_LOG_LEVEL**
The value of **PMEMLOG_LOG_LEVEL** enables trace points in the debug version
of the library, as follows:
+ **0** - This is the default level when **PMEMLOG_LOG_LEVEL** is not set.
No log messages are emitted at this level.
+ **1** - Additional details on any errors detected are logged,
in addition to returning the *errno*-based errors as usual.
The same information may be retrieved using _UW(pmemlog_errormsg).
+ **2** - A trace of basic operations is logged.
+ **3** - Enables a very verbose amount of function call tracing in the library.
+ **4** - Enables voluminous and fairly obscure tracing information
that is likely only useful to the **libpmemlog** developers.
Unless **PMEMLOG_LOG_FILE** is set, debugging output is written to *stderr*.
+ **PMEMLOG_LOG_FILE**
Specifies the name of a file name where all logging information should be
written. If the last character in the name is "-", the *PID* of the current
process will be appended to the file name when the log file is created. If
**PMEMLOG_LOG_FILE** is not set, logging output is written to *stderr*.
See also **libpmem**(7) for information about other environment
variables affecting **libpmemlog** behavior.
# EXAMPLE #
The following example illustrates how the **libpmemlog** API is used.
```c
#include <stdio.h>
#include <fcntl.h>
#include <errno.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <libpmemlog.h>
/* size of the pmemlog pool -- 1 GB */
#define POOL_SIZE ((size_t)(1 << 30))
/*
* printit -- log processing callback for use with pmemlog_walk()
*/
int
printit(const void *buf, size_t len, void *arg)
{
fwrite(buf, len, 1, stdout);
return 0;
}
int
main(int argc, char *argv[])
{
const char path[] = "/pmem-fs/myfile";
PMEMlogpool *plp;
size_t nbyte;
char *str;
/* create the pmemlog pool or open it if it already exists */
plp = _U(pmemlog_create)(path, POOL_SIZE, 0666);
if (plp == NULL)
plp = _U(pmemlog_open)(path);
if (plp == NULL) {
perror(path);
exit(1);
}
/* how many bytes does the log hold? */
nbyte = pmemlog_nbyte(plp);
printf("log holds %zu bytes", nbyte);
/* append to the log... */
str = "This is the first string appended";
if (pmemlog_append(plp, str, strlen(str)) < 0) {
perror("pmemlog_append");
exit(1);
}
str = "This is the second string appended";
if (pmemlog_append(plp, str, strlen(str)) < 0) {
perror("pmemlog_append");
exit(1);
}
/* print the log contents */
printf("log contains:");
pmemlog_walk(plp, 0, printit, NULL);
pmemlog_close(plp);
}
```
See <https://pmem.io/pmdk/libpmemlog>
for more examples using the **libpmemlog** API.
# BUGS #
Unlike **libpmemobj**(7), data replication is not supported in **libpmemlog**.
Thus, specifying replica sections in pool set files is not allowed.
# ACKNOWLEDGEMENTS #
**libpmemlog** builds on the persistent memory programming model recommended
by the SNIA NVM Programming Technical Work Group:
<https://snia.org/nvmp>
# SEE ALSO #
**msync**(2), **pmemlog_append**(3), **pmemlog_create**(3),
**pmemlog_ctl_exec**(3), **pmemlog_ctl_get**(3), **pmemlog_ctl_set**(3),
**pmemlog_nbyte**(3), **pmemlog_tell**(3), **strerror**(3),
**libpmem**(7), **libpmemblk**(7), **libpmemobj**(7)
and **<https://pmem.io>**
| 11,102 | 33.915094 | 94 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemlog/pmemlog_ctl_get.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMLOG_CTL_GET, 3)
collection: libpmemlog
header: PMDK
date: pmemlog API version 1.1
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2018-2019, Intel Corporation)
[comment]: <> (pmemlog_ctl_get.3 -- man page for libpmemlog CTL)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[CTL NAMESPACE](#ctl-namespace)<br />
[CTL EXTERNAL CONFIGURATION](#ctl-external-configuration)<br />
[SEE ALSO](#see-also)<br />
# NAME #
_UW(pmemlog_ctl_get),
_UW(pmemlog_ctl_set),
_UW(pmemlog_ctl_exec)
- Query and modify libpmemlog internal behavior (EXPERIMENTAL)
# SYNOPSIS #
```c
#include <libpmemlog.h>
_UWFUNCR2(int, pmemlog_ctl_get, PMEMlogpool *plp, *name, void *arg,
=q= (EXPERIMENTAL)=e=)
_UWFUNCR2(int, pmemlog_ctl_set, PMEMlogpool *plp, *name, void *arg,
=q= (EXPERIMENTAL)=e=)
_UWFUNCR2(int, pmemlog_ctl_exec, PMEMlogpool *plp, *name, void *arg,
=q= (EXPERIMENTAL)=e=)
```
_UNICODE()
# DESCRIPTION #
The _UW(pmemlog_ctl_get), _UW(pmemlog_ctl_set) and _UW(pmemlog_ctl_exec)
functions provide a uniform interface for querying and modifying the internal
behavior of **libpmemlog**(7) through the control (CTL) namespace.
The *name* argument specifies an entry point as defined in the CTL namespace
specification. The entry point description specifies whether the extra *arg* is
required. Those two parameters together create a CTL query. The functions and
the entry points are thread-safe unless
indicated otherwise below. If there are special conditions for calling an entry
point, they are explicitly stated in its description. The functions propagate
the return value of the entry point. If either *name* or *arg* is invalid, -1
is returned.
If the provided ctl query is valid, the CTL functions will always return 0
on success and -1 on failure, unless otherwise specified in the entry point
description.
See more in **pmem_ctl**(5) man page.
# CTL NAMESPACE #
prefault.at_create | rw | global | int | int | - | boolean
If set, every page of the pool will be touched and written to when the pool
is created, in order to trigger page allocation and minimize the performance
impact of pagefaults. Affects only the _UW(pmemlog_create) function.
Always returns 0.
prefault.at_open | rw | global | int | int | - | boolean
If set, every page of the pool will be touched and written to when the pool
is opened, in order to trigger page allocation and minimize the performance
impact of pagefaults. Affects only the _UW(pmemlog_open) function.
Always returns 0.
sds.at_create | rw | global | int | int | - | boolean
If set, force-enables or force-disables SDS feature during pool creation.
Affects only the _UW(pmemlog_create) function. See **pmempool_feature_query**(3)
for information about SDS (SHUTDOWN_STATE) feature.
Always returns 0.
copy_on_write.at_open | rw | global | int | int | - | boolean
If set, pool is mapped in such a way that modifications don't reach the
underlying medium. From the user's perspective this means that when the pool
is closed all changes are reverted. This feature is not supported for pools
located on Device DAX.
Always returns 0.
# CTL EXTERNAL CONFIGURATION #
In addition to direct function call, each write entry point can also be set
using two alternative methods.
The first method is to load a configuration directly from the **PMEMLOG_CONF**
environment variable.
The second method of loading an external configuration is to set the
**PMEMLOG_CONF_FILE** environment variable to point to a file that contains
a sequence of ctl queries.
See more in **pmem_ctl**(5) man page.
# SEE ALSO #
**libpmemlog**(7), **pmem_ctl**(5) and **<https://pmem.io>**
| 3,739 | 30.965812 | 80 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemlog/pmemlog_nbyte.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMLOG_NBYTE, 3)
collection: libpmemlog
header: PMDK
date: pmemlog API version 1.1
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pmemlog_nbyte.3 -- man page for pmemlog_nbyte function)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmemlog_nbyte**() - checks the amount of usable space in the log pool.
# SYNOPSIS #
```c
#include <libpmemlog.h>
size_t pmemlog_nbyte(PMEMlogpool *plp);
```
# DESCRIPTION #
The **pmemlog_nbyte**() function checks the amount of usable space in the log *plp*.
This function may be used on a log to determine how much usable space is
available after **libpmemlog**(7) has added its metadata to the memory pool.
# RETURN VALUE #
The **pmemlog_nbyte**() function returns the amount of usable space in the log *plp*.
# SEE ALSO #
**libpmemlog**(7) and **<https://pmem.io>**
| 1,063 | 22.130435 | 85 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemlog/pmemlog_append.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMLOG_APPEND, 3)
collection: libpmemlog
header: PMDK
date: pmemlog API version 1.1
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pmemlog_append.3 -- man page for pmemlog_append and pmemlog_appendv functions)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[NOTES](#notes)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmemlog_append**(), **pmemlog_appendv**() - append bytes to the persistent
memory resident log file
# SYNOPSIS #
```c
#include <libpmemlog.h>
int pmemlog_append(PMEMlogpool *plp, const void *buf, size_t count);
int pmemlog_appendv(PMEMlogpool *plp, const struct iovec *iov, int iovcnt);
```
# DESCRIPTION #
The **pmemlog_append**() function appends *count* bytes from *buf*
to the current write offset in the log memory pool *plp*.
Calling this function is analogous to appending to a file.
The append is atomic and cannot be torn by a program failure or system crash.
The **pmemlog_appendv**() function appends to the log memory pool *plp* from
the scatter/gather list *iov* in a manner
similar to **writev**(2). The entire list of buffers is appended atomically,
as if the buffers in *iov* were concatenated in order.
The append is atomic and cannot be torn by a program failure or system crash.
# RETURN VALUE #
On success, **pmemlog_append**() and **pmemlog_appendv**() return 0.
On error, they return -1 and set *errno* appropriately.
# ERRORS #
**EINVAL** The vector count *iovcnt* is less than zero.
**ENOSPC** There is no room for the data in the log file.
**EROFS** The log file is open in read-only mode.
# NOTES #
Since **libpmemlog**(7) is designed as a low-latency code path,
many of the checks routinely done by the operating system for **writev**(2)
are not practical in the library's implementation of **pmemlog_appendv**().
No attempt is made to detect NULL or incorrect pointers, for example.
# SEE ALSO #
**writev**(2), **libpmemlog**(7) and **<https://pmem.io>**
| 2,131 | 28.611111 | 93 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/librpmem/rpmem_persist.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(RPMEM_PERSIST, 3)
collection: librpmem
header: PMDK
date: rpmem API version 1.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (rpmem_persist.3 -- man page for rpmem persist, flush, drain and read functions)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[CAVEATS](#caveats)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**rpmem_persist**(), **rpmem_deep_persist**(), **rpmem_flush**(),
**rpmem_drain**(), **rpmem_read**()
- functions to copy and read remote pools
# SYNOPSIS #
```c
#include <librpmem.h>
int rpmem_persist(RPMEMpool *rpp, size_t offset,
size_t length, unsigned lane, unsigned flags);
int rpmem_deep_persist(RPMEMpool *rpp, size_t offset,
size_t length, unsigned lane);
int rpmem_flush(RPMEMpool *rpp, size_t offset,
size_t length, unsigned lane, unsigned flags);
int rpmem_drain(RPMEMpool *rpp, unsigned lane, unsigned flags);
int rpmem_read(RPMEMpool *rpp, void *buff, size_t offset,
size_t length, unsigned lane);
```
# DESCRIPTION #
The **rpmem_persist**() function copies data of given *length* at given
*offset* from the associated local memory pool and makes sure the data is
persistent on the remote node before the function returns. The remote node
is identified by the *rpp* handle which must be returned from either
**rpmem_open**(3) or **rpmem_create**(3). The *offset* is relative
to the *pool_addr* specified in the **rpmem_open**(3) or **rpmem_create**(3)
call. If the remote pool was created using **rpmem_create**() with non-NULL
*create_attr* argument, *offset* has to be greater or equal to 4096.
In that case the first 4096 bytes of the pool is used for storing the pool
metadata and cannot be overwritten.
If the pool was created with NULL *create_attr* argument, the pool metadata
is not stored with the pool and *offset* can be any nonnegative number.
The *offset* and *length* combined must not exceed the
*pool_size* passed to **rpmem_open**(3) or **rpmem_create**(3).
The **rpmem_persist**() operation is performed using the given *lane* number.
The lane must be less than the value returned by **rpmem_open**(3) or
**rpmem_create**(3) through the *nlanes* argument (so it can take a value
from 0 to *nlanes* - 1). The *flags* argument can be 0 or RPMEM_PERSIST_RELAXED
which means the persist operation will be done without any guarantees regarding
atomicity of memory transfer.
The **rpmem_deep_persist**() function works in the same way as
**rpmem_persist**(3) function, but additionally it flushes the data to the
lowest possible persistency domain available from software.
Please see **pmem_deep_persist**(3) for details.
The **rpmem_flush**() and **rpmem_drain**() functions are two halves of the
single **rpmem_persist**(). The **rpmem_persist**() copies data and makes it
persistent in the one shot, where **rpmem_flush**() and **rpmem_drain**() split
this operation into two stages. The **rpmem_flush**() copies data of given
*length* at a given *offset* from the associated local memory pool to the
remote node. The **rpmem_drain**() makes sure the data copied in all preceding
**rpmem_flush**() calls is persistent on the remote node before the function
returns. Data copied using **rpmem_flush**() can not be considered persistent
on the remote node before return from following **rpmem_drain**().
Single **rpmem_drain**() confirms persistence on the remote node of data copied
by all **rpmem_flush**() functions called before it and using the same *lane*.
The last **rpmem_flush**() + **rpmem_drain**() can be replaced with
**rpmem_persist**() at no cost.
The *flags* argument for **rpmem_flush**() can be 0 or RPMEM_FLUSH_RELAXED
which means the flush operation will be done without any guarantees regarding
atomicity of memory transfer. The *flags* argument for **rpmem_drain**() must be 0.
The **rpmem_flush**() function performance is affected by **RPMEM_WORK_QUEUE_SIZE**
environment variable (see **librpmem**(7) for more details).
The **rpmem_read**() function reads *length* bytes of data from a remote pool
at *offset* and copies it to the buffer *buff*. The operation is performed on
the specified *lane*. The lane must be less than the value returned by
**rpmem_open**(3) or **rpmem_create**(3) through the *nlanes* argument
(so it can take a value from 0 to *nlanes* - 1). The *rpp* must point to a
remote pool opened or created previously by **rpmem_open**(3) or
**rpmem_create**(3).
# RETURN VALUE #
The **rpmem_persist**() function returns 0 if the entire memory area was
made persistent on the remote node. Otherwise it returns a non-zero value
and sets *errno* appropriately.
The **rpmem_flush**() function returns 0 if duplication of the memory area to
the remote node was initialized successfully. Otherwise, it returns a non-zero
value and sets *errno* appropriately.
The **rpmem_drain**() function returns 0 if the memory areas duplicated by all
**rpmem_flush**() calls preceding the **rpmem_drain**() are made persistent
on the remote node. Otherwise, it returns a non-zero value and sets *errno*
appropriately.
The **rpmem_read**() function returns 0 if the data was read entirely.
Otherwise it returns a non-zero value and sets *errno* appropriately.
# CAVEATS #
Ordering of **rpmem_flush**() and **rpmem_persist**() operations which are using
different *lane* values is not guaranteed.
# SEE ALSO #
**rpmem_create**(3), **rpmem_open**(3), **rpmem_persist**(3),
**sysconf**(3), **limits.conf**(5), **libpmemobj**(7)
and **<https://pmem.io>**
| 5,655 | 42.175573 | 94 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/librpmem/librpmem.7.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(LIBRPMEM, 7)
collection: librpmem
header: PMDK
date: rpmem API version 1.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2016-2019, Intel Corporation)
[comment]: <> (librpmem.7 -- man page for librpmem)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[TARGET NODE ADDRESS FORMAT](#target-node-address-format)<br />
[REMOTE POOL ATTRIBUTES](#remote-pool-attributes)<br />
[SSH](#ssh)<br />
[FORK](#fork)<br />
[CAVEATS](#caveats)<br />
[LIBRARY API VERSIONING](#library-api-versioning-1)<br />
[ENVIRONMENT](#environment)<br />
[DEBUGGING AND ERROR HANDLING](#debugging-and-error-handling)<br />
[EXAMPLE](#example)<br />
[ACKNOWLEDGEMENTS](#acknowledgements)<br />
[SEE ALSO](#see-also)
# NAME #
**librpmem** - remote persistent memory support library (EXPERIMENTAL)
# SYNOPSIS #
```c
#include <librpmem.h>
cc ... -lrpmem
```
##### Library API versioning: #####
```c
const char *rpmem_check_version(
unsigned major_required,
unsigned minor_required);
```
##### Error handling: #####
```c
const char *rpmem_errormsg(void);
```
##### Other library functions: #####
A description of other **librpmem** functions can be found on the following
manual pages:
+ **rpmem_create**(3), **rpmem_persist**(3)
# DESCRIPTION #
**librpmem** provides low-level support for remote access to
*persistent memory* (pmem) utilizing RDMA-capable RNICs. The library can be
used to remotely replicate a memory region over the RDMA protocol. It utilizes
an appropriate persistency mechanism based on the remote node's platform
capabilities. **librpmem** utilizes the **ssh**(1) client to authenticate
a user on the remote node, and for encryption of the connection's out-of-band
configuration data. See **SSH**, below, for details.
The maximum replicated memory region size can not be bigger than the maximum
locked-in-memory address space limit. See **memlock** in **limits.conf**(5)
for more details.
This library is for applications that use remote persistent memory directly,
without the help of any library-supplied transactions or memory
allocation. Higher-level libraries that build on **libpmem**(7) are
available and are recommended for most applications, see:
+ **libpmemobj**(7), a general use persistent memory API, providing memory
allocation and transactional operations on variable-sized objects.
# TARGET NODE ADDRESS FORMAT #
```
[<user>@]<hostname>[:<port>]
```
The target node address is described by the *hostname* which the client
connects to, with an optional *user* name. The user must be authorized
to authenticate to the remote machine without querying for password/passphrase.
The optional *port* number is used to establish the SSH connection. The default
port number is 22.
# REMOTE POOL ATTRIBUTES #
The *rpmem_pool_attr* structure describes a remote pool and is stored in remote
pool's metadata. This structure must be passed to the **rpmem_create**(3)
function by caller when creating a pool on remote node. When opening the pool
using **rpmem_open**(3) function the appropriate fields are read from pool's
metadata and returned back to the caller.
```c
#define RPMEM_POOL_HDR_SIG_LEN 8
#define RPMEM_POOL_HDR_UUID_LEN 16
#define RPMEM_POOL_USER_FLAGS_LEN 16
struct rpmem_pool_attr {
char signature[RPMEM_POOL_HDR_SIG_LEN];
uint32_t major;
uint32_t compat_features;
uint32_t incompat_features;
uint32_t ro_compat_features;
unsigned char poolset_uuid[RPMEM_POOL_HDR_UUID_LEN];
unsigned char uuid[RPMEM_POOL_HDR_UUID_LEN];
unsigned char next_uuid[RPMEM_POOL_HDR_UUID_LEN];
unsigned char prev_uuid[RPMEM_POOL_HDR_UUID_LEN];
unsigned char user_flags[RPMEM_POOL_USER_FLAGS_LEN];
};
```
The *signature* field is an 8-byte field which describes the pool's on-media
format.
The *major* field is a major version number of the pool's on-media format.
The *compat_features* field is a mask describing compatibility of pool's
on-media format optional features.
The *incompat_features* field is a mask describing compatibility of pool's
on-media format required features.
The *ro_compat_features* field is a mask describing compatibility of pool's
on-media format features. If these features are not available,
the pool shall be opened in read-only mode.
The *poolset_uuid* field is an UUID of the pool which the remote pool is
associated with.
The *uuid* field is an UUID of a first part of the remote pool. This field can
be used to connect the remote pool with other pools in a list.
The *next_uuid* and *prev_uuid* fields are UUIDs of next and previous replicas
respectively. These fields can be used to connect the remote pool with other
pools in a list.
The *user_flags* field is a 16-byte user-defined flags.
# SSH #
**librpmem** utilizes the **ssh**(1) client to login and execute the
**rpmemd**(1) process on the remote node. By default, **ssh**(1)
is executed with the **-4** option, which forces using **IPv4** addressing.
For debugging purposes, both the ssh client and the commands executed
on the remote node may be overridden by setting the **RPMEM_SSH** and
**RPMEM_CMD** environment variables, respectively. See **ENVIRONMENT**
for details.
# FORK #
The **ssh**(1) client is executed
by **rpmem_open**(3) and **rpmem_create**(3) after forking a child process
using **fork**(2). The application must take this into account when
using **wait**(2) and **waitpid**(2), which may return the *PID* of
the **ssh**(1) process executed by **librpmem**.
If **fork**(2) support is not enabled in **libibverbs**,
**rpmem_open**(3) and **rpmem_create**(3) will fail.
By default, **fabric**(7) initializes **libibverbs** with **fork**(2) support
by calling the **ibv_fork_init**(3) function. See **fi_verbs**(7) for more
details.
# CAVEATS #
**librpmem** relies on the library destructor being called from the main thread.
For this reason, all functions that might trigger destruction (e.g.
**dlclose**(3)) should be called in the main thread. Otherwise some of the
resources associated with that thread might not be cleaned up properly.
**librpmem** registers a pool as a single memory region. A Chelsio T4 and T5
hardware can not handle a memory region greater than or equal to 8GB due to
a hardware bug. So *pool_size* value for **rpmem_create**(3) and **rpmem_open**(3)
using this hardware can not be greater than or equal to 8GB.
# LIBRARY API VERSIONING #
This section describes how the library API is versioned,
allowing applications to work with an evolving API.
The **rpmem_check_version**() function is used to see if the installed
**librpmem** supports the version of the library API required by an
application. The easiest way to do this is for the application to supply
the compile-time version information, supplied by defines in
**\<librpmem.h\>**, like this:
```c
reason = rpmem_check_version(RPMEM_MAJOR_VERSION,
RPMEM_MINOR_VERSION);
if (reason != NULL) {
/* version check failed, reason string tells you why */
}
```
Any mismatch in the major version number is considered a failure, but a
library with a newer minor version number will pass this check since
increasing minor versions imply backwards compatibility.
An application can also check specifically for the existence of an
interface by checking for the version where that interface was
introduced. These versions are documented in this man page as follows:
unless otherwise specified, all interfaces described here are available
in version 1.0 of the library. Interfaces added after version 1.0 will
contain the text *introduced in version x.y* in the section of this
manual describing the feature.
When the version check performed by **rpmem_check_version**() is
successful, the return value is NULL. Otherwise the return value is a
static string describing the reason for failing the version check. The
string returned by **rpmem_check_version**() must not be modified or
freed.
# ENVIRONMENT #
**librpmem** can change its default behavior based on the following
environment variables. These are largely intended for testing and are
not normally required.
+ **RPMEM_SSH**=*ssh_client*
Setting this environment variable overrides the default **ssh**(1) client
command name.
+ **RPMEM_CMD**=*cmd*
Setting this environment variable overrides the default command executed on
the remote node using either **ssh**(1) or the alternative remote shell command
specified by **RPMEM_SSH**.
**RPMEM_CMD** can contain multiple commands separated by a vertical bar (`|`).
Each consecutive command is executed on the remote node in order read from a
pool set file. This environment variable is read when the library is
initialized, so **RPMEM_CMD** must be set prior to application launch (or
prior to **dlopen**(3) if **librpmem** is being dynamically loaded).
+ **RPMEM_ENABLE_SOCKETS**=0\|1
Setting this variable to 1 enables using **fi_sockets**(7) provider for
in-band RDMA connection. The *sockets* provider does not support IPv6.
It is required to disable IPv6 system wide if **RPMEM_ENABLE_SOCKETS** == 1 and
*target* == localhost (or any other loopback interface address) and
**SSH_CONNECTION** variable (see **ssh**(1) for more details) contains IPv6
address after ssh to loopback interface. By default the *sockets* provider is
disabled.
* **RPMEM_ENABLE_VERBS**=0\|1
Setting this variable to 0 disables using **fi_verbs**(7) provider for
in-band RDMA connection. The *verbs* provider is enabled by default.
* **RPMEM_MAX_NLANES**=*num*
Limit the maximum number of lanes to *num*. See **LANES**, in **rpmem_create**(3), for details.
* **RPMEM_WORK_QUEUE_SIZE**=*size*
Suggest the work queue size. The effective work queue size can be greater than
suggested if **librpmem** requires it or it can be smaller if underlying hardware
does not support the suggested size. The work queue size affects the performance
of communication to the remote node.
**rpmem_flush**(3) operations can be added to the work queue up to the size of
this queue. When work queue is full any subsequent call has to wait till the work
queue will be drained. **rpmem_drain**(3) and **rpmem_persist**(3) among other
things also drain the work queue.
# DEBUGGING AND ERROR HANDLING #
If an error is detected during the call to a **librpmem** function, the
application may retrieve an error message describing the reason for the failure
from **rpmem_errormsg**(). This function returns a pointer to a static buffer
containing the last error message logged for the current thread. If *errno*
was set, the error message may include a description of the corresponding
error code as returned by **strerror**(3). The error message buffer is
thread-local; errors encountered in one thread do not affect its value in
other threads. The buffer is never cleared by any library function; its
content is significant only when the return value of the immediately preceding
call to a **librpmem** function indicated an error, or if *errno* was set.
The application must not modify or free the error message string, but it may
be modified by subsequent calls to other library functions.
Two versions of **librpmem** are typically available on a development
system. The normal version, accessed when a program is linked using the
**-lrpmem** option, is optimized for performance. That version skips checks
that impact performance and never logs any trace information or performs any
run-time assertions.
A second version of **librpmem**, accessed when a program uses the libraries
under _DEBUGLIBPATH(), contains run-time assertions and trace points. The
typical way to access the debug version is to set the environment variable
**LD_LIBRARY_PATH** to _LDLIBPATH(). Debugging output is
controlled using the following environment variables. These variables have
no effect on the non-debug version of the library.
+ **RPMEM_LOG_LEVEL**
The value of **RPMEM_LOG_LEVEL** enables trace points in the debug version
of the library, as follows:
+ **0** - This is the default level when **RPMEM_LOG_LEVEL** is not set.
No log messages are emitted at this level.
+ **1** - Additional details on any errors detected are logged
(in addition to returning the *errno*-based errors as usual).
The same information may be retrieved using **rpmem_errormsg**().
+ **2** - A trace of basic operations is logged.
+ **3** - Enables a very verbose amount of function call
tracing in the library.
+ **4** - Enables voluminous and fairly obscure tracing information
that is likely only useful to the **librpmem** developers.
Unless **RPMEM_LOG_FILE** is set, debugging output is written to *stderr*.
+ **RPMEM_LOG_FILE**
Specifies the name of a file where all logging information should be written.
If the last character in the name is "-", the *PID* of the current process will
be appended to the file name when the log file is created. If
**RPMEM_LOG_FILE** is not set, logging output is written to *stderr*.
# EXAMPLE #
The following example uses **librpmem** to create a remote pool on given
target node identified by given pool set name. The associated local memory
pool is zeroed and the data is made persistent on remote node. Upon success
the remote pool is closed.
```c
#include <assert.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <librpmem.h>
#define POOL_SIGNATURE "MANPAGE"
#define POOL_SIZE (32 * 1024 * 1024)
#define NLANES 4
#define DATA_OFF 4096
#define DATA_SIZE (POOL_SIZE - DATA_OFF)
static void
parse_args(int argc, char *argv[], const char **target, const char **poolset)
{
if (argc < 3) {
fprintf(stderr, "usage:\t%s <target> <poolset>\n", argv[0]);
exit(1);
}
*target = argv[1];
*poolset = argv[2];
}
static void *
alloc_memory()
{
long pagesize = sysconf(_SC_PAGESIZE);
if (pagesize < 0) {
perror("sysconf");
exit(1);
}
/* allocate a page size aligned local memory pool */
void *mem;
int ret = posix_memalign(&mem, pagesize, POOL_SIZE);
if (ret) {
fprintf(stderr, "posix_memalign: %s\n", strerror(ret));
exit(1);
}
assert(mem != NULL);
return mem;
}
int
main(int argc, char *argv[])
{
const char *target, *poolset;
parse_args(argc, argv, &target, &poolset);
unsigned nlanes = NLANES;
void *pool = alloc_memory();
int ret;
/* fill pool_attributes */
struct rpmem_pool_attr pool_attr;
memset(&pool_attr, 0, sizeof(pool_attr));
strncpy(pool_attr.signature, POOL_SIGNATURE, RPMEM_POOL_HDR_SIG_LEN);
/* create a remote pool */
RPMEMpool *rpp = rpmem_create(target, poolset, pool, POOL_SIZE,
&nlanes, &pool_attr);
if (!rpp) {
fprintf(stderr, "rpmem_create: %s\n", rpmem_errormsg());
return 1;
}
/* store data on local pool */
memset(pool, 0, POOL_SIZE);
/* make local data persistent on remote node */
ret = rpmem_persist(rpp, DATA_OFF, DATA_SIZE, 0, 0);
if (ret) {
fprintf(stderr, "rpmem_persist: %s\n", rpmem_errormsg());
return 1;
}
/* close the remote pool */
ret = rpmem_close(rpp);
if (ret) {
fprintf(stderr, "rpmem_close: %s\n", rpmem_errormsg());
return 1;
}
free(pool);
return 0;
}
```
# NOTE #
The **librpmem** API is experimental and may be subject to change in the future.
However, using the remote replication in **libpmemobj**(7) is safe and backward
compatibility will be preserved.
# ACKNOWLEDGEMENTS #
**librpmem** builds on the persistent memory programming model
recommended by the SNIA NVM Programming Technical Work Group:
<https://snia.org/nvmp>
# SEE ALSO #
**rpmemd**(1), **ssh**(1), **fork**(2), **dlclose**(3), **dlopen**(3),
**ibv_fork_init**(3), **rpmem_create**(3), **rpmem_drain**(3), **rpmem_flush**(3),
**rpmem_open**(3), **rpmem_persist**(3), **strerror**(3), **limits.conf**(5),
**fabric**(7), **fi_sockets**(7), **fi_verbs**(7), **libpmem**(7), **libpmemblk**(7),
**libpmemlog**(7), **libpmemobj**(7)
and **<https://pmem.io>**
| 15,878 | 33.822368 | 95 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/librpmem/rpmem_create.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(RPMEM_CREATE, 3)
collection: librpmem
header: PMDK
date: rpmem API version 1.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (rpmem_create.3 -- man page for most commonly used librpmem functions)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[NOTES](#notes)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**rpmem_create**(), **rpmem_open**(),
**rpmem_set_attr**(), **rpmem_close**(), **rpmem_remove**()
- most commonly used functions for remote access to *persistent memory*
# SYNOPSIS #
```c
#include <librpmem.h>
RPMEMpool *rpmem_create(const char *target, const char *pool_set_name,
void *pool_addr, size_t pool_size, unsigned *nlanes,
const struct rpmem_pool_attr *create_attr);
RPMEMpool *rpmem_open(const char *target, const char *pool_set_name,
void *pool_addr, size_t pool_size, unsigned *nlanes,
struct rpmem_pool_attr *open_attr);
int rpmem_set_attr(RPMEMpool *rpp, const struct rpmem_pool_attr *attr);
int rpmem_close(RPMEMpool *rpp);
int rpmem_remove(const char *target, const char *pool_set_name, int flags);
```
# DESCRIPTION #
The **rpmem_create**() function creates a remote pool on a given *target* node,
using pool *set* file *pool_set_name* to map the remote pool. *pool_set_name*
is a relative path in the root config directory on the *target* node.
For pool set file format and options see **poolset**(5).
*pool_addr* is a pointer to the associated local memory pool with size
*pool_size*. Both *pool_addr* and *pool_size* must be aligned to the system's
page size (see **sysconf**(3)). The size of the remote pool must be at least
*pool_size*. See **REMOTE POOL SIZE**, below, for details.
*nlanes* points to the maximum number of lanes which the caller is requesting.
Upon successful creation of the remote pool, \**nlanes* is set to the
maximum number of lanes supported by both the local and remote nodes.
See **LANES**, below, for details.
The *create_attr* structure contains the attributes used for creating the
remote pool. If the *create_attr* structure is not NULL, a pool with internal
metadata is created. The metadata is stored in the first 4096
bytes of the pool and can be read when opening the remote pool with
**rpmem_open**(). To prevent user from overwriting the pool metadata, this
region is not accessible to the user via **rpmem_persist**().
If *create_attr* is NULL or zeroed, remote pool set file must contain
the *NOHDRS* option. In that case the remote pool is created without internal
metadata in it and the entire pool space is available to the user.
See **rpmem_persist**(3) for details.
The **rpmem_open**() function opens the existing remote pool with *set* file
*pool_set_name* on remote node *target*. *pool_set_name* is a relative path
in the root config directory on the *target* node. *pool_addr* is a pointer to
the associated local memory pool of size *pool_size*.
Both *pool_addr* and *pool_size* must be aligned to the system's page
size (see **sysconf**(3)). The size of the remote pool must be at least
*pool_size*. See **REMOTE POOL SIZE**, below, for details.
*nlanes* points to the maximum number of lanes which the caller is requesting.
Upon successful opening of the remote pool, \**nlanes* is set to the
maximum number of lanes supported by both the local and remote nodes.
See **LANES**, below, for details.
The **rpmem_set_attr**() function overwrites the pool's attributes.
The *attr* structure contains the attributes used for overwriting the remote
pool attributes that were passed to **rpmem_create**() at pool creation.
If *attr* is NULL, a zeroed structure with attributes will be used.
New attributes are stored in the pool's metadata.
The **rpmem_close**() function closes the remote pool *rpp*. All resources
are released on both the local and remote nodes. The remote pool itself
persists on the remote node and may be re-opened at a later time using
**rpmem_open**().
The **rpmem_remove**() function removes the remote pool with *set* file name
*pool_set_name* from node *target*. The *pool_set_name* is a relative path in
the root config directory on the *target* node. By default only the pool part
files are removed; the pool *set* file is left untouched. If the pool is not
consistent, the **rpmem_remove**() function fails.
The *flags* argument determines the behavior of **rpmem_remove**(). *flags* may
be either 0 or the bitwise OR of one or more of the following flags:
+ **RPMEM_REMOVE_FORCE** - Ignore errors when opening an inconsistent pool.
The pool *set* file must still be in appropriate format for the pool to be
removed.
+ **RPMEM_REMOVE_POOL_SET** - Remove the pool *set* file after removing the
pool described by this pool set.
# RETURN VALUE #
On success, **rpmem_create**() returns an opaque handle to the remote pool
for use in subsequent **librpmem** calls. If any error prevents
the remote pool from being created, **rpmem_create**() returns
NULL and sets *errno* appropriately.
On success, **rpmem_open**() returns an opaque handle to the remote
pool for subsequent **librpmem** calls. If the *open_attr* argument
is not NULL, the remote pool attributes are returned in the provided structure.
If the remote pool was created without internal metadata, zeroes are returned
in the *open_attr* structure on successful call to **rpmem_open**().
If any error prevents the remote pool from being opened, **rpmem_open**()
returns NULL and sets *errno* appropriately.
On success, **rpmem_set_attr**() returns 0. On error, it returns -1 and sets
*errno* appropriately.
On success, **rpmem_close**() returns 0. On error, it returns a non-zero value
and sets *errno* appropriately.
On success, **rpmem_remove**() returns 0. On error, it returns a non-zero value
and sets *errno* appropriately.
# NOTES #
## REMOTE POOL SIZE ##
The size of a remote pool depends on the configuration in the pool set file
on the remote node (see **poolset**(5)). If no pool set options is used in
the remote pool set file, the remote pool size is the sum of the sizes of all
part files, decreased by 4096 bytes per part file. 4096 bytes of each part file
are utilized for storing internal metadata.
If the *SINGLEHDR* option is used in the remote pool set file, the remote pool
size is the sum of sizes of all part files, decreased once by 4096 bytes.
In this case only the first part contains internal metadata.
If a remote pool set file contains the *NOHDRS* option, the remote pool size
is the sum of sizes of all its part files. In this case none of the parts
contains internal metadata. For other consequences of using the *SINGLEHDR* and
*NOHDRS* options see **rpmem_persist**(3).
**RPMEM_MIN_PART** and **RPMEM_MIN_POOL** in **\<librpmem.h\>** define
the minimum size allowed by **librpmem** for a part file and a remote pool,
respectively.
## LANES ##
The term *lane* means an isolated path of execution. The underlying hardware
utilized by both local and remote nodes may have limited resources that
restrict the maximum number of parallel **rpmem_persist**(3) operations.
The maximum number of supported lanes is returned by the **rpmem_open**() and
**rpmem_create**() function calls. The caller passes the maximum number of
lanes requested in \**nlanes*. If the pool is successfully created or opened,
\**nlanes* is updated to reflect the minimum of the number of lanes requested
by the caller and the maximum number of lanes supported by underlying hardware.
The application is obligated to use at most the returned number of
lanes in parallel.
**rpmem_persist**(3) does not provide any locking mechanism; thus any
serialization of calls must be performed by the application if required.
Each lane requires a separate connection, represented by a file descriptor.
If the system runs out of free file descriptors during **rpmem_create**() or
**rpmem_open**(), these functions will fail. See **nofile** in
**limits.conf**(5) for more details.
# SEE ALSO #
**rpmem_persist**(3), **sysconf**(3), **limits.conf**(5),
**libpmemobj**(7), **librpmem**(7) and **<https://pmem.io>**
| 8,200 | 46.132184 | 84 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/pmreorder/pmreorder.1.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMREORDER, 1)
collection: pmreorder
header: PMDK
date: pmreorder version 1.5
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2018-2020, Intel Corporation)
[comment]: <> (pmreorder.1 -- man page for pmreorder)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[OPTIONS](#options)<br />
[ENGINES](#engines)<br />
[INSTRUMENTATION](#instrumentation)<br />
[PMEMCHECK STORE LOG](#pmemcheck-store-log)<br />
[ENVIRONMENT](#environment)<br />
[EXAMPLE](#example)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmreorder** - performs a persistent consistency check
using a store reordering mechanism
# SYNOPSIS #
```
$ python pmreorder <options>
```
# DESCRIPTION #
The pmreorder tool is a collection of python scripts designed
to parse and replay operations logged by pmemcheck -
a persistent memory checking tool.
Pmreorder performs the store reordering between persistent
memory barriers - a sequence of flush-fence operations.
It uses a consistency checking routine provided in the
command line options to check whether files are in a
consistent state.
Considering that logging, replaying and reordering of operations
are very time consuming, it is recommended to use as few stores as
possible in test workloads.
# OPTIONS #
`-h, --help`
Prints synopsis and list of options.
`-l <store_log>, --logfile <store_log>`
The pmemcheck log file to process.
`-c <prog|lib>, --checker <prog|lib>`
Consistency checker type.
`-p <path>, --path <path>`
Path to the consistency checker. Checker function has to return 0 for consistent cases and 1 otherwise.
`-n <name>, --name <name>`
The symbol name of the consistency checking function
in the library. Valid only if the checker type is `lib`.
`-o <pmreorder_output>, --output <pmreorder_output>`
Set the logger output file.
`-e <debug|info|warning|error|critical>,`
` --output-level <debug|info|warning|error|critical>`
Set the output log level.
`-r <NoReorderNoCheck|`
` NoReorderDoCheck|`
` ReorderFull|`
` ReorderPartial|`
` ReorderAccumulative|`
` ReorderReverseAccumulative>,`
`--default-engine <NoReorderNoCheck|`
` NoReorderDoCheck|`
` ReorderFull|`
` ReorderPartial|`
` ReorderAccumulative|`
` ReorderReverseAccumulative>`
Set the initial reorder engine. Default value is `NoReorderNoCheck`.
`-x <cli_macros|config_file>, --extended-macros <cli_macros|config_file>`
Assign an engine types to the defined marker.
`-v, --version`
Prints current version of pmreorder.
# ENGINES #
By default, the **NoReorderNoCheck** engine is used,
which means that for each set of stores, the tool
will pass-through all sequences of stores not reordered
and will not run consistency checker on them.
To enable different types of the reorder engine and
begin proper reordering tests, a number of other
engines exist:
+ **NoReorderDoCheck** - pass-through of unchanged operations.
Checks correctness of the stores as they were logged.
Useful for operations that do not require fail safety.
```
Example:
input: (a, b, c)
output: (a, b, c)
```
+ **ReorderAccumulative** - checks correctness on a growing
subset of the original sequence.
```
Example:
input: (a, b, c)
output:
()
(a)
(a, b)
(a, b, c)
```
+ **ReorderReverseAccumulative** - checks correctness on a reverted growing
subset of the original sequence.
```
Example:
input: (a, b, c)
output:
()
(c)
(c, b)
(c, b, a)
```
+ **ReorderPartial** - checks consistency on 3 randomly selected sequences
from a set of 1000 combinations of the original log, without repetitions.
```
Example:
input: (a, b, c)
output:
(b, c)
(b)
(a, b, c)
```
+ **ReorderFull** - for each set of stores generates and checks consistency
of all possible store permutations.
This might prove to be very computationally expensive for most workloads.
It can be useful for critical sections of code with limited number of stores.
```
Example:
input: (a, b, c)
output:
()
(a)
(b)
(c)
(a, b)
(a, c)
(b, a)
(b, c)
(c, a)
(c, b)
(a, b, c)
(a, c, b)
(b, a, c)
(b, c, a)
(c, a, b)
(c, b, a)
```
When the engine is passed with an `-r` option, it will be used
for each logged set of stores.
Additionally, the `-x` parameter can be used to switch engines
separately for any marked code sections.
For more details about `-x` extended macros functionality see section
INSTRUMENTATION below.
# INSTRUMENTATION #
The core of **pmreorder** is based on user-provided named markers.
Sections of code can be 'marked' depending on their importance,
and the degree of reordering can be customized by the use of various
provided engines.
For this purpose, Valgrind's pmemcheck tool exposes a
generic marker macro:
+ **VALGRIND_PMC_EMIT_LOG(value)**
It emits log to *store_log* during pmemcheck processing.
*value* is a user-defined marker name.
For more details about pmemcheck execution see
PMEMCHECK STORE LOG section below.
Example:
```
main.c
.
.
.
VALGRIND_PMC_EMIT_LOG("PMREORDER_MEMSET_PERSIST.BEGIN");
pmem_memset_persist(...);
VALGRIND_PMC_EMIT_LOG("PMREORDER_MEMSET_PERSIST.END");
.
.
.
```
There are a few rules for macros creation:
+ Valid macro can have any name,
but begin and end section have to match -
they are case sensitive.
+ Macro must have `.BEGIN` or `.END` suffix.
+ Macros can't be crossed.
Defined markers can be assigned engines types and configured
through the **pmreorder** tool using the `-x` parameter.
There are two ways to set macro options:
+ Using command line interface in format:
```
PMREORDER_MARKER_NAME1=Marker1,PMREORDER_MARKER_NAME2=Marker2
```
+ Using configuration file in .json format:
```
{
"PMREORDER_MARKER_NAME1":"Marker1",
"PMREORDER_MARKER_NAME2":"Marker2"
}
```
For more details about available
engines types, see ENGINES section above.
**libpmemobj**(7), **libpmem**(7) and **libpmem2**(7) also provide set of macros
that allow to change reordering engine on library or function level:
`<library_name|api_function_name>`
Example of configuration on function level:
```
{
"pmemobj_open":"NoReorderNoCheck",
"pmemobj_memcpy_persist":"ReorderPartial"
}
```
Example of configuration on library level
(affecting all library functions):
```
{
"libpmemobj":"NoReorderNoCheck"
}
```
List of marked **libpmemobj**(7) API functions:
```
pmemobj_alloc
pmemobj_cancel
pmemobj_check
pmemobj_close
pmemobj_create
pmemobj_ctl_exec
pmemobj_ctl_set
pmemobj_free
pmemobj_list_insert
pmemobj_list_insert_new
pmemobj_list_move
pmemobj_list_remove
pmemobj_memcpy
pmemobj_memmove
pmemobj_memset
pmemobj_memcpy_persist
pmemobj_memset_persist
pmemobj_open
pmemobj_publish
pmemobj_realloc
pmemobj_reserve
pmemobj_root
pmemobj_root_construct
pmemobj_strdup
pmemobj_tx_abort
pmemobj_tx_add_range
pmemobj_tx_add_range_direct
pmemobj_tx_alloc
pmemobj_tx_commit
pmemobj_tx_free
pmemobj_tx_publish
pmemobj_tx_realloc
pmemobj_tx_strdup
pmemobj_tx_wcsdup
pmemobj_tx_xadd_range
pmemobj_tx_xadd_range_direct
pmemobj_tx_xalloc
pmemobj_tx_zalloc
pmemobj_tx_zrealloc
pmemobj_wcsdup
pmemobj_xalloc
pmemobj_xreserve
pmemobj_zalloc
pmemobj_zrealloc
```
List of marked **libpmem**(7) API functions:
```
pmem_memmove
pmem_memcpy
pmem_memset
pmem_memmove_nodrain
pmem_memcpy_nodrain
pmem_memset_nodrain
pmem_memmove_persist
pmem_memcpy_persist
pmem_memset_persist
```
List of **libpmem2**(7) API functions, which return marked functions:
```
pmem2_get_memcpy_fn (marker for the returned function has "pmem2_memmove" name)
pmem2_get_memmove_fn (marker for the returned function has "pmem2_memmove" name)
pmem2_get_memset_fn (marker for the returned function has "pmem2_memset" name)
```
# PMEMCHECK STORE LOG #
To generate *store_log* for **pmreorder** run pmemcheck
with additional parameters:
```
valgrind \
--tool=pmemcheck \
-q \
--log-stores=yes \
--print-summary=no \
--log-file=store_log.log \
--log-stores-stacktraces=yes \
--log-stores-stacktraces-depth=2 \
--expect-fence-after-clflush=yes \
test_binary writer_parameter
```
For further details of pmemcheck parameters see
[pmemcheck documentation](https://pmem.io/valgrind/generated/pmc-manual.html)
# ENVIRONMENT #
By default all logging from PMDK libraries is disabled.
To enable API macros logging set environment variable:
+ **PMREORDER_EMIT_LOG**=1
# EXAMPLE #
```
python pmreorder.py \
-l store_log.log \
-r NoReorderDoCheck \
-o pmreorder_out.log \
-c prog \
-x PMREORDER_MARKER_NAME=ReorderPartial \
-p checker_binary checker_parameter
```
Checker binary will be used to run consistency checks on
"store_log.log", output of pmemcheck tool. Any inconsistent
stores found during **pmreorder** analysis will be logged
to `pmreorder_out.log`.
# SEE ALSO #
**<https://pmem.io>**
| 9,282 | 21.314904 | 103 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemblk/libpmemblk.7.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(LIBPMEMBLK, 7)
collection: libpmemblk
header: PMDK
date: pmemblk API version 1.1
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2016-2018, Intel Corporation)
[comment]: <> (libpmemblk.7 -- man page for libpmemblk)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[LIBRARY API VERSIONING](#library-api-versioning-1)<br />
[MANAGING LIBRARY BEHAVIOR](#managing-library-behavior-1)<br />
[DEBUGGING AND ERROR HANDLING](#debugging-and-error-handling)<br />
[EXAMPLE](#example)<br />
[BUGS](#bugs)<br />
[ACKNOWLEDGEMENTS](#acknowledgements)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**libpmemblk** - persistent memory resident array of blocks
# SYNOPSIS #
```c
#include <libpmemblk.h>
cc ... -lpmemblk -lpmem
```
_UNICODE()
##### Library API versioning: #####
```c
_UWFUNC(pmemblk_check_version, =q=
unsigned major_required,
unsigned minor_required=e=)
```
##### Managing library behavior: #####
```c
void pmemblk_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s));
```
##### Error handling: #####
```c
_UWFUNC(pmemblk_errormsg, void)
```
##### Other library functions: #####
A description of other **libpmemblk** functions can be found on the following
manual pages:
**pmemblk_bsize**(3), **pmemblk_create**(3),
**pmemblk_ctl_exec**(3), **pmemblk_ctl_get**(3), **pmemblk_ctl_set**(3),
**pmemblk_read**(3), **pmemblk_set_zero**(3),
# DESCRIPTION #
**libpmemblk**
provides an array of blocks in *persistent memory* (pmem) such that updates
to a single block are atomic. This library is intended for applications
using direct access storage (DAX), which is storage that supports load/store
access without paging blocks from a block storage device. Some types of
*non-volatile memory DIMMs* (NVDIMMs) provide this type of byte addressable
access to storage. A *persistent memory aware file system* is typically used
to expose the direct access to applications. Memory mapping a file from this
type of file system results in the load/store, non-paged access to pmem.
**libpmemblk** builds on this type of memory mapped file.
This library is for applications that need a potentially large array of blocks,
all the same size, where any given block is updated atomically (the update
cannot be *torn* by program interruption such as power failures). This library
builds on the low-level pmem support provided by **libpmem**(7), handling the
transactional update of the blocks, flushing to persistence, and recovery for
the application. **libpmemblk** is one of a collection of persistent memory
libraries available, the others are:
+ **libpmemobj**(7), a general use persistent memory API, providing memory
allocation and transactional operations on variable-sized objects.
+ **libpmemlog**(7), providing a pmem-resident log file.
+ **libpmem**(7), low-level persistent memory support.
Under normal usage, **libpmemblk** will never print messages or intentionally
cause the process to exit. The only exception to this is the debugging
information, when enabled, as described under **DEBUGGING AND ERROR HANDLING**
below.
To use the atomic block arrays supplied by **libpmemblk**, a *memory pool*
is first created using the _UW(pmemblk_create) function described
in **pmemblk_create**(3). The other **libpmemblk** functions operate on
the resulting block memory pool using the opaque handle, of type
*PMEMblkpool\**, that is returned by _UW(pmemblk_create) or _UW(pmemblk_open).
Internally, **libpmemblk** will use either
**pmem_persist**(3) or **msync**(2) when it needs to flush changes, depending
on whether the memory pool appears to be persistent memory or a regular file
(see the **pmem_is_pmem**(3) function in **libpmem**(7) for more information).
There is no need for applications to flush changes directly when using the
block memory API provided by **libpmemblk**.
# CAVEATS #
**libpmemblk** relies on the library destructor being called from the main
thread. For this reason, all functions that might trigger destruction (e.g.
**dlclose**(3)) should be called in the main thread. Otherwise some of the
resources associated with that thread might not be cleaned up properly.
# LIBRARY API VERSIONING #
This section describes how the library API is versioned,
allowing applications to work with an evolving API.
The _UW(pmemblk_check_version) function is used to determine whether the
installed **libpmemblk** supports the version of the library API required
by an application. The easiest way to do this is for the application to
supply the compile-time version information, supplied by defines in
**\<libpmemblk.h\>**, like this:
```c
reason = _U(pmemblk_check_version)(PMEMBLK_MAJOR_VERSION,
PMEMBLK_MINOR_VERSION);
if (reason != NULL) {
/* version check failed, reason string tells you why */
}
```
Any mismatch in the major version number is considered a failure, but a library
with a newer minor version number will pass this check since increasing minor
versions imply backwards compatibility.
An application can also check specifically for the existence of an interface
by checking for the version where that interface was introduced. These versions
are documented in this man page as follows: unless otherwise specified, all
interfaces described here are available in version 1.0 of the library.
Interfaces added after version 1.0 will contain the text *introduced in
version x.y* in the section of this manual describing the feature.
When the version check performed by _UW(pmemblk_check_version) is successful,
the return value is NULL. Otherwise the return value is a static string
describing the reason for failing the version check. The string returned by
_UW(pmemblk_check_version) must not be modified or freed.
# MANAGING LIBRARY BEHAVIOR #
The **pmemblk_set_funcs**() function allows an application to override memory
allocation calls used internally by **libpmemblk**.
Passing in NULL for any of the handlers will cause
the **libpmemblk** default function to be used.
The library does not make heavy use of the system malloc functions,
but it does allocate approximately 4-8 kilobytes for each memory pool in use.
# DEBUGGING AND ERROR HANDLING #
The _UW(pmemblk_errormsg) function returns a pointer to a static buffer
containing the last error message logged for the current thread. If *errno* was
set, the error message may include a description of the corresponding error
code, as returned by **strerror**(3). The error message buffer is thread-local;
errors encountered in one thread do not affect its value in other threads. The
buffer is never cleared by any library function; its content is significant
only when the return value of the immediately preceding call to a
**libpmemblk** function indicated an error, or if *errno* was set. The
application must not modify or free the error message string, but it may be
modified by subsequent calls to other library functions.
Two versions of **libpmemblk** are typically available on a development system.
The normal version, accessed when a program is linked using the **-lpmemblk**
option, is optimized for performance. That version skips checks that impact
performance and never logs any trace information or performs any run-time
assertions. If an error is detected in a call to **libpmemblk**,
the error message describing the failure may be retrieved with
_UW(pmemblk_errormsg) as described above.
A second version of **libpmemblk**, accessed when a program uses the libraries
under _DEBUGLIBPATH(), contains run-time assertions and trace points. The
typical way to access the debug version is to set the **LD_LIBRARY_PATH**
environment variable to _LDLIBPATH(). Debugging output is
controlled using the following environment variables. These variables have
no effect on the non-debug version of the library.
+ **PMEMBLK_LOG_LEVEL**
The value of **PMEMBLK_LOG_LEVEL** enables trace points in the debug version
of the library, as follows:
+ **0** - This is the default level when **PMEMBLK_LOG_LEVEL** is not set.
No log messages are emitted at this level.
+ **1** - Additional details on any errors detected are logged,
in addition to returning the *errno*-based errors as usual.
The same information may be retrieved using _UW(pmemblk_errormsg).
+ **2** - A trace of basic operations is logged.
+ **3** - Enables a very verbose amount of function call tracing
in the library.
+ **4** - Enables voluminous and fairly obscure tracing information
that is likely only useful to the **libpmemblk** developers.
Unless **PMEMBLK_LOG_FILE** is set, debugging output is written to *stderr*.
+ **PMEMBLK_LOG_FILE**
Specifies the name of a file
where all logging information should be written. If the last character in the
name is "-", the *PID* of the current process will be appended to the file name
when the log file is created. If **PMEMBLK_LOG_FILE** is not set, the logging
output is written to *stderr*.
See also **libpmem**(7) for information on other environment variables
that may affect **libpmemblk** behavior.
# EXAMPLE #
The following example illustrates how the **libpmemblk** API is used.
```c
#include <fcntl.h>
#include <errno.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <libpmemblk.h>
/* size of the pmemblk pool -- 1 GB */
#define POOL_SIZE ((size_t)(1 << 30))
/* size of each element in the pmem pool */
#define ELEMENT_SIZE 1024
int
main(int argc, char *argv[])
{
const char path[] = "/pmem-fs/myfile";
PMEMblkpool *pbp;
size_t nelements;
char buf[ELEMENT_SIZE];
/* create the pmemblk pool or open it if it already exists */
pbp = _U(pmemblk_create)(path, ELEMENT_SIZE, POOL_SIZE, 0666);
if (pbp == NULL)
pbp = _U(pmemblk_open)(path, ELEMENT_SIZE);
if (pbp == NULL) {
perror(path);
exit(1);
}
/* how many elements fit into the file? */
nelements = pmemblk_nblock(pbp);
printf("file holds %zu elements", nelements);
/* store a block at index 5 */
strcpy(buf, "hello, world");
if (pmemblk_write(pbp, buf, 5) < 0) {
perror("pmemblk_write");
exit(1);
}
/* read the block at index 10 (reads as zeros initially) */
if (pmemblk_read(pbp, buf, 10) < 0) {
perror("pmemblk_read");
exit(1);
}
/* zero out the block at index 5 */
if (pmemblk_set_zero(pbp, 5) < 0) {
perror("pmemblk_set_zero");
exit(1);
}
/* ... */
pmemblk_close(pbp);
}
```
See <https://pmem.io/pmdk/libpmemblk> for more examples using the **libpmemblk** API.
# BUGS #
Unlike **libpmemobj**(7), data replication is not supported in **libpmemblk**.
Thus, specifying replica sections in pool set files is not allowed.
# ACKNOWLEDGEMENTS #
**libpmemblk** builds on the persistent memory programming model recommended
by the SNIA NVM Programming Technical Work Group:
<https://snia.org/nvmp>
# SEE ALSO #
**msync**(2), **dlclose**(3), **pmemblk_bsize**(3), **pmemblk_create**(3),
**pmemblk_ctl_exec**(3), **pmemblk_ctl_get**(3), **pmemblk_ctl_set**(3),
**pmemblk_read**(3), **pmemblk_set_zero**(3), **pmem_is_pmem**(3),
**pmem_persist**(3), **strerror**(3), **libpmem**(7),
**libpmemlog**(7), **libpmemobj**(7) and **<https://pmem.io>**
| 11,294 | 34.857143 | 85 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemblk/pmemblk_bsize.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMBLK_BSIZE, 3)
collection: libpmemblk
header: PMDK
date: pmemblk API version 1.1
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pmemblk_bsize.3 -- man page for functions that check number of available blocks or usable space in block memory pool)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmemblk_bsize**(), **pmemblk_nblock**() - check number of available blocks or
usable space in block memory pool
# SYNOPSIS #
```c
#include <libpmemblk.h>
size_t pmemblk_bsize(PMEMblkpool *pbp);
size_t pmemblk_nblock(PMEMblkpool *pbp);
```
# DESCRIPTION #
The **pmemblk_bsize**() function returns the block size of the specified
block memory pool, that is, the value which was passed as *bsize* to
_UW(pmemblk_create). *pbp* must be a block memory pool handle as returned by
**pmemblk_open**(3) or **pmemblk_create**(3).
The **pmemblk_nblock**() function returns the usable space in the block memory
pool. *pbp* must be a block memory pool handle as returned by
**pmemblk_open**(3) or **pmemblk_create**(3).
# RETURN VALUE #
The **pmemblk_bsize**() function returns the block size of the specified block
memory pool.
The **pmemblk_nblock**() function returns the usable space in the block memory
pool, expressed as the number of blocks available.
# SEE ALSO #
**pmemblk_create**(3), **pmemblk_open**(3),
**libpmemblk**(7) and **<https://pmem.io>**
| 1,606 | 26.706897 | 132 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemblk/pmemblk_create.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMBLK_CREATE, 3)
collection: libpmemblk
header: PMDK
date: pmemblk API version 1.1
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pmemblk_create.3 -- man page for libpmemblk create, open, close and validate functions)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
_UW(pmemblk_create), _UW(pmemblk_open),
**pmemblk_close**(), _UW(pmemblk_check)
- create, open, close and validate block pool
# SYNOPSIS #
```c
#include <libpmemblk.h>
_UWFUNCR1(PMEMblkpool, *pmemblk_create, *path, =q=size_t bsize,
size_t poolsize, mode_t mode=e=)
_UWFUNCR1(PMEMblkpool, *pmemblk_open, *path, size_t bsize)
void pmemblk_close(PMEMblkpool *pbp);
_UWFUNCR1(int, pmemblk_check, *path, size_t bsize)
```
_UNICODE()
# DESCRIPTION #
The _UW(pmemblk_create) function creates a block memory pool with the given
total *poolsize*, divided into as many elements of size *bsize* as will fit in
the pool. Since the transactional nature of a block memory pool requires some
space overhead in the memory pool, the resulting number of available blocks is
less than *poolsize*/*bsize*, and is made available to the caller via the
**pmemblk_nblock**(3) function. Given the specifics of the implementation, the
number of available blocks for the user cannot be less than 256. This
translates to at least 512 internal blocks. *path* specifies the name of the
memory pool file to be created. *mode* specifies the permissions to use when
creating the file, as described by **creat**(2). The memory pool file is fully
allocated to the size *poolsize* using **posix_fallocate**(3). The caller may
choose to take responsibility for creating the memory pool file by creating it
before calling _UW(pmemblk_create), and then specifying *poolsize* as zero. In
this case _UW(pmemblk_create) will take the pool size from the size of the
existing file, and will verify that the file appears to be empty by searching
for any non-zero data in the pool header at the beginning of the file. The net
pool size of a pool file is equal to the file size. The minimum net pool size
allowed by the library for a block pool is defined in **\<libpmemblk.h\>** as
**PMEMBLK_MIN_POOL**. *bsize* can be any non-zero value; however,
**libpmemblk** will silently round up
the given size to **PMEMBLK_MIN_BLK**, as defined in **\<libpmemblk.h\>**.
Depending on the configuration of the system, the available non-volatile
memory space may be divided into multiple memory devices. In such case, the
maximum size of the pmemblk memory pool could be limited by the capacity of a
single memory device. **libpmemblk**(7) allows building a persistent memory
resident array spanning multiple memory devices by creation of persistent
memory pools consisting of multiple files, where each part of such a *pool set*
may be stored on a different memory device or pmem-aware filesystem.
Creation of all the parts of the pool set can be done with _UW(pmemblk_create);
however, the recommended method for creating pool sets is by using the
**pmempool**(1) utility.
When creating a pool set consisting of multiple files, the *path* argument
passed to _UW(pmemblk_create) must point to the special *set* file that defines
the pool layout and the location of all the parts of the pool set. The
*poolsize* argument must be 0. The meaning of the *mode* argument
does not change, except that the same *mode* is used for creation of all the
parts of the pool set.
For more information on pool set format, see **poolset**(5).
The _UW(pmemblk_open) function opens an existing block memory pool.
As with _UW(pmemblk_create), *path* must identify either an existing
block memory pool file, or the *set* file used to create a pool set.
The application must have permission to open the file and memory map the
file or pool set with read/write permissions. If *bsize* is non-zero,
_UW(pmemblk_open) will verify that the given block size matches the block
size used when the pool was created. Otherwise, _UW(pmemblk_open) will open
the pool without verifying the block size. The *bsize* can be determined
using the **pmemblk_bsize**(3) function.
Be aware that if the pool contains bad blocks inside, opening can be aborted
by the SIGBUS signal, because currently the pool is not checked against
bad blocks during opening. It can be turned on by setting the CHECK_BAD_BLOCKS
compat feature. For details see description of this feature
in **pmempool-feature**(1).
The **pmemblk_close**() function closes the memory pool
indicated by *pbp* and deletes the memory pool handle.
The block memory pool itself lives on in the file that contains it and may be
re-opened at a later time using _UW(pmemblk_open) as described above.
The _UW(pmemblk_check) function performs a consistency check of the file
indicated by *path*, and returns 1 if the memory pool is found to be
consistent. If the pool is found not to be consistent, further use of the
file with **libpmemblk** will result in undefined behavior. The debug version
of **libpmemblk** will provide additional details on inconsistencies when
**PMEMBLK_LOG_LEVEL** is at least 1, as described in the **DEBUGGING AND ERROR
HANDLING** section in **libpmemblk**(7). _UW(pmemblk_check) opens the given
*path* read-only so it never makes any changes to the file. This function is
not supported on Device DAX.
# RETURN VALUE #
On success, _UW(pmemblk_create) returns a *PMEMblkpool\** handle to the block
memory pool. On error, it returns NULL and sets *errno* appropriately.
On success, _UW(pmemblk_open) returns a *PMEMblkpool\** handle that can be
used with most of the functions in **libpmemblk**(7). On error, it returns
NULL and sets *errno* appropriately. Possible errors include:
+ failure to open *path*
+ *path* specifies a *set* file and any of the pool set files cannot be opened
+ *path* specifies a *set* file and the actual size of any file does not
match the corresponding part size defined in the *set* file
+ *bsize* is non-zero and does not match the block size given when the pool
was created. *errno* is set to **EINVAL** in this case.
The **pmemblk_close**() function returns no value.
_UW(pmemblk_check) returns 1 if the memory pool is found to be consistent.
If the check is successfully performed but the pool is found to be inconsistent,
_UW(pmemblk_check) returns 0. This includes the case where *bsize* is non-zero
and does not match the block size given when the pool was created. If the
consistency check cannot be performed, _UW(pmemblk_check) returns -1 and sets
*errno* appropriately.
# CAVEATS #
Not all file systems support **posix_fallocate**(3). _UW(pmemblk_create) will
fail if the underlying file system does not support **posix_fallocate**(3).
_WINUX(=q= On Windows if _UW(pmemblk_create) is called on an existing file
with FILE_ATTRIBUTE_SPARSE_FILE and FILE_ATTRIBUTE_COMPRESSED set,
they will be removed, to physically allocate space for the pool.
This is a workaround for _chsize() performance issues. =e=)
# SEE ALSO #
**pmempool**(1), **creat**(2), **pmemblk_nblock**(3),
**posix_fallocate**(3), **poolset**(5),
**libpmemblk**(7) and **<https://pmem.io>**
| 7,325 | 45.075472 | 102 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemblk/pmemblk_read.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMBLK_READ, 3)
collection: libpmemblk
header: PMDK
date: pmemblk API version 1.1
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pmemblk_read.3 -- man page for libpmemblk read and write functions)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmemblk_read**(), **pmemblk_write**() - read or write a block from a block
memory pool
# SYNOPSIS #
```c
#include <libpmemblk.h>
int pmemblk_read(PMEMblkpool *pbp, void *buf, long long blockno);
int pmemblk_write(PMEMblkpool *pbp, const void *buf, long long blockno);
```
# DESCRIPTION #
The **pmemblk_read**() function reads the block with block number *blockno*
from memory pool *pbp* into the buffer *buf*. Reading a block that has never
been written by **pmemblk_write**() will return a block of zeroes.
The **pmemblk_write**() function writes a block from *buf* to block number
*blockno* in the memory pool *pbp*. The write is atomic with respect to other
reads and writes. In addition, the write cannot be torn by program failure or
system crash; on recovery the block is guaranteed to contain either the old
data or the new data, never a mixture of both.
# RETURN VALUE #
On success, the **pmemblk_read**() and **pmemblk_write**() functions return 0.
On error, they return -1 and set *errno* appropriately.
# SEE ALSO #
**libpmemblk**(7) and **<https://pmem.io>**
| 1,578 | 27.709091 | 82 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemblk/pmemblk_ctl_get.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMBLK_CTL_GET, 3)
collection: libpmemblk
header: PMDK
date: pmemblk API version 1.1
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2018-2019, Intel Corporation)
[comment]: <> (pmemblk_ctl_get.3 -- man page for libpmemblk CTL)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[CTL NAMESPACE](#ctl-namespace)<br />
[CTL EXTERNAL CONFIGURATION](#ctl-external-configuration)<br />
[SEE ALSO](#see-also)<br />
# NAME #
_UW(pmemblk_ctl_get),
_UW(pmemblk_ctl_set),
_UW(pmemblk_ctl_exec)
- Query and modify libpmemblk internal behavior (EXPERIMENTAL)
# SYNOPSIS #
```c
#include <libpmemblk.h>
_UWFUNCR2(int, pmemblk_ctl_get, PMEMblkpool *pbp, *name, void *arg,
=q= (EXPERIMENTAL)=e=)
_UWFUNCR2(int, pmemblk_ctl_set, PMEMblkpool *pbp, *name, void *arg,
=q= (EXPERIMENTAL)=e=)
_UWFUNCR2(int, pmemblk_ctl_exec, PMEMblkpool *pbp, *name, void *arg,
=q= (EXPERIMENTAL)=e=)
```
_UNICODE()
# DESCRIPTION #
The _UW(pmemblk_ctl_get), _UW(pmemblk_ctl_set) and _UW(pmemblk_ctl_exec)
functions provide a uniform interface for querying and modifying the internal
behavior of **libpmemblk**(7) through the control (CTL) namespace.
The *name* argument specifies an entry point as defined in the CTL namespace
specification. The entry point description specifies whether the extra *arg* is
required. Those two parameters together create a CTL query. The functions and
the entry points are thread-safe unless
indicated otherwise below. If there are special conditions for calling an entry
point, they are explicitly stated in its description. The functions propagate
the return value of the entry point. If either *name* or *arg* is invalid, -1
is returned.
If the provided ctl query is valid, the CTL functions will always return 0
on success and -1 on failure, unless otherwise specified in the entry point
description.
See more in **pmem_ctl**(5) man page.
# CTL NAMESPACE #
prefault.at_create | rw | global | int | int | - | boolean
If set, every page of the pool will be touched and written to when the pool
is created, in order to trigger page allocation and minimize the performance
impact of pagefaults. Affects only the _UW(pmemblk_create) function.
Always returns 0.
prefault.at_open | rw | global | int | int | - | boolean
If set, every page of the pool will be touched and written to when the pool
is opened, in order to trigger page allocation and minimize the performance
impact of pagefaults. Affects only the _UW(pmemblk_open) function.
Always returns 0.
sds.at_create | rw | global | int | int | - | boolean
If set, force-enables or force-disables SDS feature during pool creation.
Affects only the _UW(pmemblk_create) function. See **pmempool_feature_query**(3)
for information about SDS (SHUTDOWN_STATE) feature.
Always returns 0.
copy_on_write.at_open | rw | global | int | int | - | boolean
If set, pool is mapped in such a way that modifications don't reach the
underlying medium. From the user's perspective this means that when the pool
is closed all changes are reverted. This feature is not supported for pools
located on Device DAX.
Always returns 0.
# CTL EXTERNAL CONFIGURATION #
In addition to direct function call, each write entry point can also be set
using two alternative methods.
The first method is to load a configuration directly from the **PMEMBLK_CONF**
environment variable.
The second method of loading an external configuration is to set the
**PMEMBLK_CONF_FILE** environment variable to point to a file that contains
a sequence of ctl queries.
See more in **pmem_ctl**(5) man page.
# SEE ALSO #
**libpmemblk**(7), **pmem_ctl**(5) and **<https://pmem.io>**
| 3,739 | 30.965812 | 80 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemblk/pmemblk_set_zero.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMBLK_SET_ZERO, 3)
collection: libpmemblk
header: PMDK
date: pmemblk API version 1.1
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pmemblk_set_zero.3 -- man page for block management functions)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmemblk_set_zero**(), **pmemblk_set_error**() - block management functions
# SYNOPSIS #
```c
#include <libpmemblk.h>
int pmemblk_set_zero(PMEMblkpool *pbp, long long blockno);
int pmemblk_set_error(PMEMblkpool *pbp, long long blockno);
```
# DESCRIPTION #
The **pmemblk_set_zero**() function writes zeros to block number *blockno* in
persistent memory resident array of blocks *pbp*. Using this function is faster
than actually writing a block of zeros since **libpmemblk**(7) uses metadata to
indicate the block should read back as zero.
The **pmemblk_set_error**() function sets the error state for block number
*blockno* in persistent memory resident array of blocks *pbp*.
A block in the error state returns *errno* **EIO** when read.
Writing the block clears the error state and returns the block to normal use.
# RETURN VALUE #
On success, **pmemblk_set_zero**() and **pmemblk_set_error**() return 0.
On error, they return -1 and set *errno* appropriately.
# SEE ALSO #
**libpmemblk**(7) and **<https://pmem.io>**
| 1,527 | 27.296296 | 79 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemobj/pmemobj_list_insert.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMOBJ_LIST_INSERT, 3)
collection: libpmemobj
header: PMDK
date: pmemobj API version 2.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pmemobj_list_insert.3 -- man page for non-transactional persistent atomic lists)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmemobj_list_insert**(), **pmemobj_list_insert_new**(),
**pmemobj_list_move**(), **pmemobj_list_remove**()
- non-transactional persistent atomic lists functions
# SYNOPSIS #
```c
#include <libpmemobj.h>
int pmemobj_list_insert(PMEMobjpool *pop, size_t pe_offset, void *head,
PMEMoid dest, int before, PMEMoid oid);
PMEMoid pmemobj_list_insert_new(PMEMobjpool *pop, size_t pe_offset,
void *head, PMEMoid dest, int before, size_t size,
uint64_t type_num, pmemobj_constr constructor, void arg);
int pmemobj_list_move(PMEMobjpool *pop,
size_t pe_old_offset, void *head_old,
size_t pe_new_offset, void *head_new,
PMEMoid dest, int before, PMEMoid oid);
int pmemobj_list_remove(PMEMobjpool *pop, size_t pe_offset,
void *head, PMEMoid oid, int free);
```
# DESCRIPTION #
In addition to the container operations on internal object collections
described in **pmemobj_first**(3), **libpmemobj**(7) provides
a mechanism for organizing persistent objects in user-defined, persistent,
atomic, circular, doubly-linked lists. All the routines and macros operating
on the persistent lists provide atomicity with respect to any power-fail
interruptions. If any of those operations is torn by program failure or system
crash, on recovery they are guaranteed to be entirely completed or discarded,
leaving the lists, persistent memory heap and internal object containers in a
consistent state.
The persistent atomic circular doubly linked lists support the following functionality:
+ Insertion of an object at the head of the list, or at the end of the list.
+ Insertion of an object before or after any element in the list.
+ Atomic allocation and insertion of a new object at the head of the list, or at the end of the list.
+ Atomic allocation and insertion of a new object before or after any element in the list.
+ Atomic moving of an element from one list to the specific location on another list.
+ Removal of any object in the list.
+ Atomic removal and freeing of any object in the list.
+ Forward or backward traversal through the list.
A list is headed by a *list_head* structure containing the object handle of the
first element on the list. The elements are doubly linked so that an arbitrary
element can be removed without the need to traverse the list. New elements can
be added to the list before or after an existing element, at the head of the
list, or at the tail of the list. A list may be traversed in either direction.
The user-defined structure of each element must contain a field of type
*list_entry* that holds the object handles to the previous and next element
on the list. Both the *list_head* and the *list_entry* structures are
declared in **\<libpmemobj.h\>**.
The functions below are intended to be used outside transactions - transactional
variants are described in manpages to functions mentioned at **TRANSACTIONAL OBJECT
MANIPULATION** in **libpmemobj**(7). Note that operations performed using this
non-transactional API are independent from their transactional counterparts.
If any non-transactional allocations or list manipulations are performed within
an open transaction, the changes will not be rolled back if such a transaction
is aborted or interrupted.
The list insertion and move functions use a common set of arguments to define
where an object will be inserted into the list. *dest* identifies the element
before or after which the object will be inserted, or, if *dest* is
**OID_NULL**, indicates that the object should be inserted at the head or
tail of the list. *before* determines where the object will be inserted:
+ **POBJ_LIST_DEST_BEFORE** - insert the element before the existing
element *dest*
+ **POBJ_LIST_DEST_AFTER** - insert the element after the existing element
*dest*
+ **POBJ_LIST_DEST_HEAD** - when *dest* is **OID_NULL**, insert the element
at the head of the list
+ **POBJ_LIST_DEST_TAIL** - when *dest* is **OID_NULL**, insert the element
at the tail of the list
>NOTE: Earlier versions of **libpmemobj**(7) do not define
**POBJ_LIST_DEST_BEFORE** and **POBJ_LIST_DEST_AFTER**. Use 1 for before,
and 0 for after.
The **pmemobj_list_insert**() function inserts the element represented by
object handle *oid* into the list referenced by *head*, at the location
specified by *dest* and *before* as described above. *pe_offset*
specifies the offset of the structure that connects the elements in
the list. All the handles *head*, *dest* and *oid* must point to objects
allocated from memory pool *pop*. *head* and *oid* cannot be **OID_NULL**.
The **pmemobj_list_insert_new**() function atomically allocates a new object
of given *size* and type *type_num* and inserts it into the list referenced
by *head* at the location specified by *dest* and *before* as described
above. *pe_offset* specifies the offset of the structure that connects the
elements in the list. The handles *head* and *dest* must point to objects
allocated from memory pool *pop*. Before returning,
**pmemobj_list_insert_new**() calls the *constructor* function, passing the
pool handle *pop*, the pointer to the newly allocated object *ptr*, and the
*arg* argument. It is guaranteed that the allocated object is either properly
initialized or, if the allocation is interrupted before the constructor
completes, the memory space reserved for the object is reclaimed. *head*
cannot be **OID_NULL**. The allocated object is also added to the internal
container associated with *type_num*, as described in **POBJ_FOREACH**(3).
The **pmemobj_list_move**() function moves the object represented by object
handle *oid* from the list referenced by *head_old* to the list referenced
by *head_new*, inserting it at the location specified by *dest* and *before*
as described above. *pe_old_offset* and *pe_new_offset* specify the offsets
of the structures that connect the elements in the old and new lists,
respectively. All the handles *head_old*, *head_new*, *dest* and *oid* must
point to objects allocated from memory pool *pop*. *head_old*, *head_new*
and *oid* cannot be **OID_NULL**.
The **pmemobj_list_remove**() function removes the object represented by object
handle *oid* from the list referenced by *head*. If *free* is set, it also
removes the object from the internal object container and frees the associated
memory space. *pe_offset* specifies the offset of the structure that connects
the elements in the list. Both *head* and *oid* must point to objects allocated
from memory pool *pop* and cannot be **OID_NULL**.
# RETURN VALUE #
On success, **pmemobj_list_insert**(), **pmemobj_list_remove**() and
**pmemobj_list_move**() return 0. On error, they return -1 and set
*errno* appropriately.
On success, **pmemobj_list_insert_new**() returns a handle to the newly
allocated object. If the constructor returns a non-zero value, the allocation
is canceled, -1 is returned, and *errno* is set to **ECANCELED**.
On other errors, **OID_NULL** is returned and *errno* is set appropriately.
# SEE ALSO #
**pmemobj_first**(3), **POBJ_FOREACH**(3), **libpmemobj**(7)
and **<https://pmem.io>**
| 7,567 | 45.146341 | 101 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemobj/toid_declare.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(TOID_DECLARE, 3)
collection: libpmemobj
header: PMDK
date: pmemobj API version 2.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (toid_declare.3 -- man page for obj type safety mechanism)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**TOID_DECLARE**(), **TOID_DECLARE_ROOT**(), **TOID**(),
**TOID_TYPE_NUM**(), **TOID_TYPE_NUM_OF**(), **TOID_VALID**(),
**OID_INSTANCEOF**(), **TOID_ASSIGN**(), **TOID_IS_NULL**(),
**TOID_EQUALS**(), **TOID_TYPEOF**(), **TOID_OFFSETOF**(),
**DIRECT_RW**(), **D_RW**(), **DIRECT_RO**(),
**D_RO**() - libpmemobj type safety mechanism
# SYNOPSIS #
```c
#include <libpmemobj.h>
TOID_DECLARE(TYPE, uint64_t type_num)
TOID_DECLARE_ROOT(ROOT_TYPE)
TOID(TYPE)
TOID_TYPE_NUM(TYPE)
TOID_TYPE_NUM_OF(TOID oid)
TOID_VALID(TOID oid)
OID_INSTANCEOF(PMEMoid oid, TYPE)
TOID_ASSIGN(TOID o, VALUE)
TOID_IS_NULL(TOID o)
TOID_EQUALS(TOID lhs, TOID rhs)
TOID_TYPEOF(TOID o)
TOID_OFFSETOF(TOID o, FILED)
DIRECT_RW(TOID oid)
D_RW(TOID oid)
DIRECT_RO(TOID oid)
D_RO(TOID oid)
```
# DESCRIPTION #
Operating on untyped object handles, as well as on direct untyped object
pointers (*void\**), may be confusing and error-prone. To facilitate
type safety, **libpmemobj**(7) defines a set of macros that provide static
type enforcement, catching potential errors at compile time. For example,
a compile-time error is generated when an attempt is made to assign a handle to
an object of one type to the object handle variable of another type of object.
The **TOID_DECLARE**() macro declares a typed *OID* of user-defined type
*TYPE* and type number *type_num*.
The **TOID_DECLARE_ROOT**() macro declares a typed *OID* of user-defined type
*ROOT_TYPE* and root object type number **POBJ_ROOT_TYPE_NUM**.
The **TOID**() macro declares a handle to an object of type *TYPE*,
where *TYPE* is the name of a user-defined structure. The typed *OID* must
be declared first using the **TOID_DECLARE**(), **TOID_DECLARE_ROOT**(),
**POBJ_LAYOUT_TOID**(3) or **POBJ_LAYOUT_ROOT**(3) macros.
The **TOID_TYPE_NUM**() macro returns the type number of the type specified
by *TYPE*.
The **TOID_TYPE_NUM_OF**() macro returns the type number of the object
specified by *oid*. The type number is read from the typed *OID*.
The **TOID_VALID**() macro validates whether the type number stored in
the object's metadata is equal to the type number read from the typed *OID*.
The **OID_INSTANCEOF**() macro checks whether the *oid* is of type *TYPE*.
The **TOID_ASSIGN**() macro assigns the object handle *VALUE* to typed *OID*
*o*.
The **TOID_IS_NULL**() macro evaluates to true if the object handle
represented by *o* is **OID_NULL**.
The **TOID_EQUALS**() macro evaluates to true if both the *lhs* and *rhs*
object handles reference the same persistent object.
The **TOID_TYPEOF**() macro returns the type of the object handle represented
by typed *OID* *o*.
The **TOID_OFFSETOF**() macro returns the offset of the *FIELD* member from
the start of the object represented by *o*.
The **DIRECT_RW**() macro and its shortened form **D_RW**() return a typed
write pointer (*TYPE\**) to an object represented by *oid*. If *oid* is
**OID_NULL**, the macro evaluates to NULL.
The **DIRECT_RO**() macro and its shortened form **D_RO**() return a typed
read-only (const) pointer (*TYPE\**) to an object represented by *oid*. If
*oid* is **OID_NULL**, the macro evaluates to NULL.
# SEE ALSO #
**OID_IS_NULL**(3), **POBJ_LAYOUT_ROOT**(3), **POBJ_LAYOUT_TOID**(3),
**libpmemobj**(7) and **<https://pmem.io>**
| 3,715 | 32.781818 | 79 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemobj/pmemobj_first.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMOBJ_FIRST, 3)
collection: libpmemobj
header: PMDK
date: pmemobj API version 2.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pmemobj_first.3 -- man page for pmemobj container operations)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmemobj_first**(), **pmemobj_next**(),
**POBJ_FIRST**(), **POBJ_FIRST_TYPE_NUM**(),
**POBJ_NEXT**(), **POBJ_NEXT_TYPE_NUM**(),
**POBJ_FOREACH**(), **POBJ_FOREACH_SAFE**(),
**POBJ_FOREACH_TYPE**(), **POBJ_FOREACH_SAFE_TYPE**()
- pmemobj container operations
# SYNOPSIS #
```c
#include <libpmemobj.h>
PMEMoid pmemobj_first(PMEMobjpool *pop);
PMEMoid pmemobj_next(PMEMoid oid);
POBJ_FIRST(PMEMobjpool *pop, TYPE)
POBJ_FIRST_TYPE_NUM(PMEMobjpool *pop, uint64_t type_num)
POBJ_NEXT(TOID oid)
POBJ_NEXT_TYPE_NUM(PMEMoid oid)
POBJ_FOREACH(PMEMobjpool *pop, PMEMoid varoid)
POBJ_FOREACH_SAFE(PMEMobjpool *pop, PMEMoid varoid, PMEMoid nvaroid)
POBJ_FOREACH_TYPE(PMEMobjpool *pop, TOID var)
POBJ_FOREACH_SAFE_TYPE(PMEMobjpool *pop, TOID var, TOID nvar)
```
# DESCRIPTION #
The **libpmemobj**(7) container operations provide a mechanism that allows
iteration through the internal object collection, either looking for a
specific object, or performing a specific operation on each object of a given
type. Software should not make any assumptions about the order of the objects
in the internal object containers.
The **pmemobj_first**() function returns the first object from the pool.
The **POBJ_FIRST**() macro returns the first object from the pool of
the type specified by *TYPE*.
The **POBJ_FIRST_TYPE_NUM**() macro returns the first object from the pool
of the type specified by *type_num*.
The **pmemobj_next**() function returns the next object from the pool.
The **POBJ_NEXT**() macro returns the next object of the same type
as the object referenced by *oid*.
The **POBJ_NEXT_TYPE_NUM**() macro returns the next object of the same type
number as the object referenced by *oid*.
The following four macros provide a more convenient way to iterate through the
internal collections, performing a specific operation on each object.
The **POBJ_FOREACH**() macro performs a specific operation on each allocated
object stored in the persistent memory pool *pop*. It traverses the internal
collection of all the objects, assigning a handle to each element in turn to
*varoid*.
The **POBJ_FOREACH_TYPE**() macro performs a specific operation on each
allocated object stored in the persistent memory pool *pop* that has the same
type as *var*. It traverses the internal collection of all the objects of the
specified type, assigning a handle to each element in turn to *var*.
The macros **POBJ_FOREACH_SAFE**() and **POBJ_FOREACH_SAFE_TYPE**() work in a
similar fashion as **POBJ_FOREACH**() and **POBJ_FOREACH_TYPE**(), except that
prior to performing the operation on the object, they preserve a handle to the
next object in the collection by assigning it to *nvaroid* or *nvar*,
respectively. This allows safe deletion of selected objects while iterating
through the collection.
# RETURN VALUE #
**pmemobj_first**() returns the first object from the pool, or, if the pool
is empty, **OID_NULL**.
**pmemobj_next**() returns the next object from the pool. If the object
referenced by *oid* is the last object in the collection, or if *oid*
is *OID_NULL*, **pmemobj_next**() returns **OID_NULL**.
# SEE ALSO #
**libpmemobj**(7) and **<https://pmem.io>**
| 3,618 | 33.798077 | 78 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemobj/pmemobj_alloc.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMOBJ_ALLOC, 3)
collection: libpmemobj
header: PMDK
date: pmemobj API version 2.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause
[comment]: <> (Copyright 2017-2020, Intel Corporation)
[comment]: <> (pmemobj_alloc.3 -- man page for non-transactional atomic allocations)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmemobj_alloc**(), **pmemobj_xalloc**(), **pmemobj_zalloc**(),
**pmemobj_realloc**(), **pmemobj_zrealloc**(), **pmemobj_strdup**(),
**pmemobj_wcsdup**(), **pmemobj_alloc_usable_size**(), **pmemobj_defrag**(),
**POBJ_NEW**(), **POBJ_ALLOC**(), **POBJ_ZNEW**(), **POBJ_ZALLOC**(),
**POBJ_REALLOC**(), **POBJ_ZREALLOC**(), **POBJ_FREE**()
- non-transactional atomic allocations
# SYNOPSIS #
```c
#include <libpmemobj.h>
typedef int (*pmemobj_constr)(**PMEMobjpool *pop, void *ptr, void *arg);
int pmemobj_alloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num, pmemobj_constr constructor, void *arg);
int pmemobj_xalloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num, uint64_t flags, pmemobj_constr constructor,
void *arg); (EXPERIMENTAL)
int pmemobj_zalloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num);
void pmemobj_free(PMEMoid *oidp);
int pmemobj_realloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num);
int pmemobj_zrealloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size,
uint64_t type_num);
int pmemobj_strdup(PMEMobjpool *pop, PMEMoid *oidp, const char *s,
uint64_t type_num);
int pmemobj_wcsdup(PMEMobjpool *pop, PMEMoid *oidp, const wchar_t *s,
uint64_t type_num);
size_t pmemobj_alloc_usable_size(PMEMoid oid);
int pmemobj_defrag(PMEMobjpool *pop, PMEMoid **oidv, size_t oidcnt,
struct pobj_defrag_result *result);
POBJ_NEW(PMEMobjpool *pop, TOID *oidp, TYPE, pmemobj_constr constructor,
void *arg)
POBJ_ALLOC(PMEMobjpool *pop, TOID *oidp, TYPE, size_t size,
pmemobj_constr constructor, void *arg)
POBJ_ZNEW(PMEMobjpool *pop, TOID *oidp, TYPE)
POBJ_ZALLOC(PMEMobjpool *pop, TOID *oidp, TYPE, size_t size)
POBJ_REALLOC(PMEMobjpool *pop, TOID *oidp, TYPE, size_t size)
POBJ_ZREALLOC(PMEMobjpool *pop, TOID *oidp, TYPE, size_t size)
POBJ_FREE(TOID *oidp)
```
# DESCRIPTION #
Functions described in this document provide the mechanism to allocate,
resize and free objects from the persistent memory pool in a thread-safe
and fail-safe manner. All the routines are atomic with respect to other threads
and any power-fail interruptions. If any of these operations is torn by program
failure or system crash, on recovery they are guaranteed to be entirely completed
or discarded, leaving the persistent memory heap and internal object containers
in a consistent state.
All these functions should be used outside transactions. If executed within
an open transaction they are considered durable immediately after completion.
Changes made with these functions will not be rolled back if the transaction
is aborted or interrupted. They have no information about other changes made
by transactional API, so if the same data is modified in a single transaction
using transactional and then non-transactional API, transaction abort
will likely corrupt the data.
The allocations are always aligned to a cache-line boundary.
The *pmemobj_constr* type represents a constructor for atomic allocation
from the persistent memory heap associated with memory pool *pop*. *ptr*
is a pointer to the allocated memory area and *arg* is a user-defined
argument passed to the constructor.
The **pmemobj_alloc**() function allocates a new object from the persistent
memory heap associated with memory pool *pop*. The *PMEMoid* of the allocated
object is stored in *oidp*. If *oidp* is NULL, then the newly allocated object
may be accessed only by iterating objects in the object container associated
with the type number *type_num*, as described in **POBJ_FOREACH**(3). If *oidp*
points to a memory location from the **pmemobj** heap, *oidp* is modified
atomically. Before returning, **pmemobj_alloc**() calls the *constructor*
function, passing the pool handle *pop*, the pointer to the newly allocated
object in *ptr*, and the *arg* argument. It is guaranteed that the
allocated object is either properly initialized, or if the allocation
is interrupted before the constructor completes, the memory space reserved
for the object is reclaimed. *size* can be any non-zero value; however,
due to internal padding and object metadata, the actual size of the allocation
will differ from the requested size by at least 64 bytes. For this reason,
making allocations of a size less than 64 bytes is extremely inefficient
and discouraged. The allocated object is added to the internal container
associated with *type_num*.
**pmemobj_xalloc**() is equivalent to **pmemobj_alloc**(), but with an
additional *flags* argument that is a bitmask of the following values:
+ **POBJ_XALLOC_ZERO** - zero the allocated object (equivalent of **pmemobj_zalloc**())
+ **POBJ_CLASS_ID(class_id)** - allocate an object from the allocation class
*class_id*. The class id cannot be 0.
+ **POBJ_ARENA_ID(arena_id)** - allocate an object from the arena specified by
*arena_id*. The arena must exist, otherwise, the behavior is undefined.
If *arena_id* is equal 0, then arena assigned to the current thread will be used.
The **pmemobj_zalloc**() function allocates a new zeroed object from
the persistent memory heap associated with memory pool *pop*. The *PMEMoid*
of the allocated object is stored in *oidp*. If *oidp* is NULL, then
the newly allocated object may be accessed only by iterating objects in the
object container associated with the type number *type_num*, as described in
**POBJ_FOREACH**(3). If *oidp* points to a memory location from the **pmemobj**
heap, *oidp* is modified atomically. *size* can be any non-zero value;
however, due to internal padding and object metadata, the actual size
of the allocation will differ from the requested one by at least 64 bytes.
For this reason, making allocations of a size less than 64 bytes is extremely
inefficient and discouraged. The allocated object is added to the internal
container associated with *type_num*.
The **pmemobj_free**() function frees the memory space represented by *oidp*,
which must have been allocated by a previous call to **pmemobj_alloc**(),
**pmemobj_xalloc**(), **pmemobj_zalloc**(), **pmemobj_realloc**(), or
**pmemobj_zrealloc**(). **pmemobj_free**() provides the same semantics as
**free**(3), but instead of operating on the process heap supplied by the
system, it operates on the persistent memory heap. If *oidp* is **OID_NULL**,
no operation is performed. If *oidp* is NULL or if it points to the root
object's *OID*, the behavior of **pmemobj_free**() is undefined. *oidp* is
set to **OID_NULL** after the memory is freed. If *oidp* points to a memory
location from the **pmemobj** heap, *oidp* is modified atomically.
The **pmemobj_realloc**() function changes the size of the object represented
by *oidp* to *size* bytes. **pmemobj_realloc**() provides similar semantics to
**realloc**(3), but operates on the persistent memory heap associated with
memory pool *pop*. The resized object is also added or moved to the internal
container associated with type number *type_num*. The contents will be
unchanged in the range from the start of the region up to the minimum of the
old and new sizes. If the new size is larger than the old size, the added
memory will *not* be initialized. If *oidp* is *OID_NULL*, then the call is
equivalent to *pmemobj_alloc(pop, size, type_num)*. If *size* is equal to zero,
and *oidp* is not **OID_NULL**, then the call is equivalent to
*pmemobj_free(oid)*. Unless *oidp* is **OID_NULL**, it must have been allocated
by an earlier call to **pmemobj_alloc**(), **pmemobj_xalloc**(),
**pmemobj_zalloc**(), **pmemobj_realloc**(), or **pmemobj_zrealloc**(). Note
that the object handle value may change as a result of reallocation. If the
object was moved, the memory space represented by *oid* is reclaimed. If
*oidp* points to a memory location from the **pmemobj** heap, *oidp* is
modified atomically. If *oidp* is NULL or if it points to the root
object's *OID*, the behavior of **pmemobj_realloc**() is undefined.
**pmemobj_zrealloc**() is equivalent to **pmemobj_realloc**(), except that
if the new size is larger than the old size, the added memory will be zeroed.
The **pmemobj_strdup**() function stores a handle to a new object in *oidp*
which is a duplicate of the string *s*. **pmemobj_strdup**() provides the
same semantics as **strdup**(3), but operates on the persistent memory heap
associated with memory pool *pop*. If *oidp* is NULL, then the newly allocated
object may be accessed only by iterating objects in the object container
associated with type number *type_num*, as described in **POBJ_FOREACH**(3).
If *oidp* points to a memory location from the **pmemobj** heap, *oidp*
is modified atomically. The allocated string object is also added to the
internal container associated with type number *type_num*. Memory for the new
string is obtained with **pmemobj_alloc**(), on the given memory pool, and can
be freed with **pmemobj_free**() on the same memory pool.
**pmemobj_wcsdup**() is equivalent to **pmemobj_strdup**(), but operates on
a wide character string (wchar_t) rather than a standard character string.
The **pmemobj_alloc_usable_size**() function provides the same semantics as
**malloc_usable_size**(3), but instead of the process heap supplied by the
system, it operates on the persistent memory heap.
The **POBJ_NEW**() macro is a wrapper around the **pmemobj_alloc**() function.
Instead of taking a pointer to *PMEMoid*, it takes a pointer to the typed *OID*
of type name *TYPE*, and passes the size and type number from the typed *OID*
to **pmemobj_alloc**().
The **POBJ_ALLOC**() macro is equivalent to **POBJ_NEW**, except that instead
of using the size of the typed *OID*, passes *size* to **pmemobj_alloc**().
The **POBJ_ZNEW**() macro is a wrapper around the **pmemobj_zalloc**()
function. Instead of taking a pointer to *PMEMoid*, it takes a pointer to the
typed *OID* of type name *TYPE*, and passes the size and type number from
the typed *OID* to **pmemobj_zalloc**().
The **POBJ_ZALLOC**() macro is equivalent to **POBJ_ZNEW**, except that instead
of using the size of the typed *OID*, passes *size* to **pmemobj_zalloc**().
The **POBJ_REALLOC**() macro is a wrapper around the **pmemobj_realloc**()
function. Instead of taking a pointer to *PMEMoid*, it takes a pointer to the
typed *OID* of type name *TYPE*, and passes the type number from the typed
*OID* to **pmemobj_realloc**().
The **POBJ_ZREALLOC**() macro is a wrapper around the **pmemobj_zrealloc**()
function. Instead of taking a pointer to *PMEMoid*, it takes a pointer to the
typed *OID* of type name *TYPE*, and passes the type number from the typed
*OID* to **pmemobj_zrealloc**().
The **POBJ_FREE**() macro is a wrapper around the **pmemobj_free**() function
which takes a pointer to the typed *OID* instead of to *PMEMoid*.
The **pmemobj_defrag**() function performs defragmentation
on the objects provided through the array of pointers to PMEMoids *oidv*
with size *oidcnt*. If an object from the provided array is selected to be moved
to a new location in the heap, it is reallocated and all provided pointers
to that object are atomically updated.
To maintain data structure consistency, applications should always provide
all pointers for an object to **pmemobj_defrag** method. This ensures that,
even in the presence of failures, all pointers to the object will either point
to the old or a new location.
All objects and pointers to objects should belong to the pool *pop* or,
in case of pointers, can also reside in volatile memory.
Defragmentation across pools is not supported.
Objects in the array that are *OID_NULL* are skipped over and no operation
is performed on them. All other objects must have been allocated
by an earlier call to **pmemobj_alloc**(), **pmemobj_xalloc**(),
**pmemobj_zalloc**(), **pmemobj_realloc**(), **pmemobj_zrealloc**(),
**pmemobj_strdup**() or **pmemobj_wcsdup**().
The *result* variable is an instance of *struct pobj_defrag_result* and,
if not NULL, can be used to read *total*, the number of objects found that
were processed, and *relocated*, the number of objects that were
relocated during defragmentation. These variables are always initialized and
can be non-zero, even if the return value of **pmemobj_defrag**() indicated a
failure. This is because the failure might have occurred after some objects were
already processed.
# RETURN VALUE #
On success, **pmemobj_alloc**() and **pmemobj_xalloc** return 0. If *oidp*
is not NULL, the *PMEMoid* of the newly allocated object is stored in *oidp*.
If the allocation fails, -1 is returned and *errno* is set appropriately. If
the constructor returns a non-zero value, the allocation is canceled, -1 is
returned, and *errno* is set to **ECANCELED**. If *size* equals 0, or the
*flags* for **pmemobj_xalloc** are invalid, -1 is returned, *errno* is set
to **EINVAL**, and *oidp* is left untouched.
On success, **pmemobj_zalloc**() returns 0. If *oidp* is not NULL, the
*PMEMoid* of the newly allocated object is stored in *oidp*. If the allocation
fails, it returns -1 and sets *errno* appropriately. If *size* equals 0, it
returns -1, sets *errno* to **EINVAL**, and leaves *oidp* untouched.
The **pmemobj_free**() function returns no value.
On success, **pmemobj_realloc**() and **pmemobj_zrealloc**() return 0 and
update *oidp* if necessary. On error, they return -1 and set *errno*
appropriately.
On success, **pmemobj_strdup**() and **pmemobj_wcsdup**() return 0. If
*oidp* is not NULL, the *PMEMoid* of the duplicated string object is stored
in *oidp*. If *s* is NULL, they return -1, set *errno* to **EINVAL**, and
leave *oidp* untouched. On other errors, they return -1 and set *errno*
appropriately.
The **pmemobj_alloc_usable_size**() function returns the number of usable bytes
in the object represented by *oid*. If *oid* is **OID_NULL**, it returns 0.
On success, **pmemobj_defrag**() returns 0. If defragmentation was
unsuccessful or only partially successful (i.e. if it was aborted halfway
through due to lack of resources), -1 is returned.
# SEE ALSO #
**free**(3), **POBJ_FOREACH**(3), **realloc**(3),
**strdup**(3), **wcsdup**(3), **libpmemobj**(7)
and **<https://pmem.io>**
| 14,596 | 51.132143 | 87 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemobj/pmemobj_mutex_zero.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMOBJ_MUTEX_ZERO, 3)
collection: libpmemobj
header: PMDK
date: pmemobj API version 2.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pmemobj_mutex_zero.3 -- man page for locking functions from libpmemobj library)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmemobj_mutex_zero**(), **pmemobj_mutex_lock**(), **pmemobj_mutex_timedlock**(),
**pmemobj_mutex_trylock**(), **pmemobj_mutex_unlock**(),
**pmemobj_rwlock_zero**(), **pmemobj_rwlock_rdlock**(), **pmemobj_rwlock_wrlock**(),
**pmemobj_rwlock_timedrdlock**(), **pmemobj_rwlock_timedwrlock**(), **pmemobj_rwlock_tryrdlock**(),
**pmemobj_rwlock_trywrlock**(), **pmemobj_rwlock_unlock**(),
**pmemobj_cond_zero**(), **pmemobj_cond_broadcast**(), **pmemobj_cond_signal**(),
**pmemobj_cond_timedwait**(), **pmemobj_cond_wait**()
- pmemobj synchronization primitives
# SYNOPSIS #
```c
#include <libpmemobj.h>
void pmemobj_mutex_zero(PMEMobjpool *pop, PMEMmutex *mutexp);
int pmemobj_mutex_lock(PMEMobjpool *pop, PMEMmutex *mutexp);
int pmemobj_mutex_timedlock(PMEMobjpool *pop, PMEMmutex *restrict mutexp,
const struct timespec *restrict abs_timeout);
int pmemobj_mutex_trylock(PMEMobjpool *pop, PMEMmutex *mutexp);
int pmemobj_mutex_unlock(PMEMobjpool *pop, PMEMmutex *mutexp);
void pmemobj_rwlock_zero(PMEMobjpool *pop, PMEMrwlock *rwlockp);
int pmemobj_rwlock_rdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp);
int pmemobj_rwlock_wrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp);
int pmemobj_rwlock_timedrdlock(PMEMobjpool *pop, PMEMrwlock *restrict rwlockp,
const struct timespec *restrict abs_timeout);
int pmemobj_rwlock_timedwrlock(PMEMobjpool *pop, PMEMrwlock *restrict rwlockp,
const struct timespec *restrict abs_timeout);
int pmemobj_rwlock_tryrdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp);
int pmemobj_rwlock_trywrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp);
int pmemobj_rwlock_unlock(PMEMobjpool *pop, PMEMrwlock *rwlockp);
void pmemobj_cond_zero(PMEMobjpool *pop, PMEMcond *condp);
int pmemobj_cond_broadcast(PMEMobjpool *pop, PMEMcond *condp);
int pmemobj_cond_signal(PMEMobjpool *pop, PMEMcond *condp);
int pmemobj_cond_timedwait(PMEMobjpool *pop, PMEMcond *restrict condp,
PMEMmutex *restrict mutexp, const struct timespec *restrict abs_timeout);
int pmemobj_cond_wait(PMEMobjpool *pop, PMEMcond *restrict condp,
PMEMmutex *restrict mutexp);
```
# DESCRIPTION #
**libpmemobj**(7) provides several types of synchronization primitives
designed to be used with persistent memory. The pmem-aware lock implementation
is based on the standard POSIX Threads Library, as described in
**pthread_mutex_init**(3), **pthread_rwlock_init**(3) and
**pthread_cond_init**(3). Pmem-aware locks provide semantics similar to
standard **pthread** locks, except that they are embedded in pmem-resident
objects and are considered initialized by zeroing them. Therefore, locks
allocated with **pmemobj_zalloc**(3) or **pmemobj_tx_zalloc**(3) do not require
another initialization step. For performance reasons, they are also padded up
to 64 bytes (cache line size).
On FreeBSD, since all **pthread** locks are dynamically
allocated, while the lock object is still padded up to 64 bytes
for consistency with Linux, only the pointer to the lock is embedded in the
pmem-resident object. **libpmemobj**(7) transparently manages freeing of the
locks when the pool is closed.
The fundamental property of pmem-aware locks is their automatic
reinitialization every time the persistent object store pool is opened. Thus,
all the pmem-aware locks may be considered initialized (unlocked) immediately
after the pool is opened, regardless of their state at the time the pool was
closed for the last time.
Pmem-aware mutexes, read/write locks and condition variables must be declared
with the *PMEMmutex*, *PMEMrwlock*, or *PMEMcond* type, respectively.
The **pmemobj_mutex_zero**() function explicitly initializes the pmem-aware
mutex *mutexp* by zeroing it. Initialization is not necessary if the object
containing the mutex has been allocated using **pmemobj_zalloc**(3) or
**pmemobj_tx_zalloc**(3).
The **pmemobj_mutex_lock**() function locks the pmem-aware mutex *mutexp*.
If the mutex is already locked, the calling thread will block until the mutex
becomes available. If this is the first use of the mutex since the opening of
the pool *pop*, the mutex is automatically reinitialized and then locked.
**pmemobj_mutex_timedlock**() performs the same action as
**pmemobj_mutex_lock**(), but will not wait beyond *abs_timeout* to obtain the
lock before returning.
The **pmemobj_mutex_trylock**() function locks pmem-aware mutex *mutexp*.
If the mutex is already locked, **pthread_mutex_trylock**() will not block
waiting for the mutex, but will return an error. If this is the first
use of the mutex since the opening of the pool *pop*, the mutex is
automatically reinitialized and then locked.
The **pmemobj_mutex_unlock**() function unlocks the pmem-aware mutex
*mutexp*. Undefined behavior follows if a thread tries to unlock a
mutex that has not been locked by it, or if a thread tries to release a mutex
that is already unlocked or has not been initialized.
The **pmemobj_rwlock_zero**() function is used to explicitly initialize the
pmem-aware read/write lock *rwlockp* by zeroing it. Initialization is not
necessary if the object containing the lock has been allocated using
**pmemobj_zalloc**(3) or **pmemobj_tx_zalloc**(3).
The **pmemobj_rwlock_rdlock**() function acquires a read lock on *rwlockp*,
provided that the lock is not presently held for writing and no writer threads
are presently blocked on the lock. If the read lock cannot be acquired
immediately, the calling thread blocks until it can acquire the lock. If this
is the first use of the lock since the opening of the pool *pop*, the lock is
automatically reinitialized and then acquired.
**pmemobj_rwlock_timedrdlock**() performs the same action as
**pmemobj_rwlock_rdlock**(), but will not wait beyond *abs_timeout* to obtain
the lock before returning. A thread may hold multiple concurrent read locks.
If so, **pmemobj_rwlock_unlock**() must be called once for each lock obtained.
The results of acquiring a read lock while the calling thread holds a write
lock are undefined.
The **pmemobj_rwlock_wrlock**() function blocks until a write lock can be
acquired against read/write lock *rwlockp*. If this is the first use of the
lock since the opening of the pool *pop*, the lock is automatically
reinitialized and then acquired.
**pmemobj_rwlock_timedwrlock**() performs the same action, but will not wait
beyond *abs_timeout* to obtain the lock before returning.
The **pmemobj_rwlock_tryrdlock**() function performs the same action as
**pmemobj_rwlock_rdlock**(), but does not block if the lock cannot be
immediately obtained. The results are undefined if the calling thread already
holds the lock at the time the call is made.
The **pmemobj_rwlock_trywrlock**() function performs the same action as
**pmemobj_rwlock_wrlock**(), but does not block if the lock cannot be immediately
obtained. The results are undefined if the calling thread already holds the lock
at the time the call is made.
The **pmemobj_rwlock_unlock**() function is used to release the read/write
lock previously obtained by **pmemobj_rwlock_rdlock**(),
**pmemobj_rwlock_wrlock**(), **pthread_rwlock_tryrdlock**(), or
**pmemobj_rwlock_trywrlock**().
The **pmemobj_cond_zero**() function explicitly initializes the pmem-aware
condition variable *condp* by zeroing it. Initialization is not necessary if
the object containing the condition variable has been allocated using
**pmemobj_zalloc**(3) or **pmemobj_tx_zalloc**(3).
The difference between **pmemobj_cond_broadcast**() and
**pmemobj_cond_signal**() is that the former unblocks all threads waiting
for the condition variable, whereas the latter blocks only one waiting thread.
If no threads are waiting on *condp*, neither function has any effect. If more
than one thread is blocked on a condition variable, the used scheduling policy
determines the order in which threads are unblocked. The same mutex used for
waiting must be held while calling either function. Although neither function
strictly enforces this requirement, undefined behavior may follow if the mutex
is not held.
The **pmemobj_cond_timedwait**() and **pmemobj_cond_wait**() functions block
on a condition variable. They must be called with mutex *mutexp* locked by
the calling thread, or undefined behavior results. These functions atomically
release mutex *mutexp* and cause the calling thread to block on the condition
variable *condp*; atomically here means "atomically with respect to access by
another thread to the mutex and then the condition variable". That is, if
another thread is able to acquire the mutex after the about-to-block thread
has released it, then a subsequent call to **pmemobj_cond_broadcast**() or
**pmemobj_cond_signal**() in that thread will behave as if it were issued
after the about-to-block thread has blocked. Upon successful return, the mutex
will be locked and owned by the calling thread.
# RETURN VALUE #
The **pmemobj_mutex_zero**(), **pmemobj_rwlock_zero**()
and **pmemobj_cond_zero**() functions return no value.
Other locking functions return 0 on success. Otherwise, an error
number will be returned to indicate the error.
# SEE ALSO #
**pmemobj_tx_zalloc**(3), **pmemobj_zalloc**(3), **pthread_cond_init**(3),
**pthread_mutex_init**(3), **pthread_rwlock_init**(3), **libpmem**(7),
**libpmemobj**(7) and **<https://pmem.io>**
| 9,761 | 47.567164 | 99 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemobj/pmemobj_root.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMOBJ_ROOT, 3)
collection: libpmemobj
header: PMDK
date: pmemobj API version 2.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pmemobj_root.3 -- man page for root object management)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmemobj_root**(), **pmemobj_root_construct**()
**POBJ_ROOT**(), **pmemobj_root_size**() - root object management
# SYNOPSIS #
```c
#include <libpmemobj.h>
PMEMoid pmemobj_root(PMEMobjpool *pop, size_t size);
PMEMoid pmemobj_root_construct(PMEMobjpool *pop, size_t size,
pmemobj_constr constructor, void *arg);
POBJ_ROOT(PMEMobjpool *pop, TYPE)
size_t pmemobj_root_size(PMEMobjpool *pop);
```
# DESCRIPTION #
The root object of a persistent memory pool is an entry point for all other
persistent objects allocated using the **libpmemobj** API. In other words,
every object stored in the persistent memory pool has the root
object at the end of its reference path. It may be assumed that for each
persistent memory pool the root object always exists, and there is exactly
one root object in each pool.
The **pmemobj_root**() function creates or resizes the root object for the
persistent memory pool *pop*. If this is the first call to **pmemobj_root**(),
the requested *size* is greater than zero and the root object does not exist,
it is implicitly allocated
in a thread-safe manner, so the function may be called by more than one
thread simultaneously (as long as all threads use the identical *size* value).
The size of the root object is guaranteed to be not less than the requested
*size*. If the requested size is larger than the current size, the root
object is automatically resized. In such case, the old data is preserved and
the extra space is zeroed. If the requested size is equal to or smaller than
the current size, the root object remains unchanged.
If the requested *size* is equal to zero, the root object is not allocated.
**pmemobj_root_construct**() performs the same actions as **pmemobj_root**(),
but instead of zeroing the newly allocated object a *constructor* function
is called to initialize the object. The constructor is also called on
reallocations.
The **POBJ_ROOT**() macro works the same way as the **pmemobj_root**() function
except it returns a typed *OID* value.
The **pmemobj_root_size**() function returns the current size of the root object
associated with the persistent memory pool *pop*.
# RETURN VALUE #
Upon success, **pmemobj_root**() returns a handle to the root object associated
with the persistent memory pool *pop*. The same root object handle is returned
in all the threads. If the requested object size is larger than the maximum
allocation size supported for the pool, or if there is not enough free
space in the pool to satisfy a reallocation request, **pmemobj_root**() returns
**OID_NULL** and sets *errno* to ENOMEM.
If the *size* was equal to zero and the root object has not been allocated,
**pmemobj_root**() returns **OID_NULL** and sets *errno* to EINVAL.
If the **pmemobj_root_construct**() constructor fails, the allocation is
canceled, **pmemobj_root_construct**() returns *OID_NULL*, and *errno* is set
to **ECANCELED**. **pmemobj_root_size**() can be used in the constructor to
check whether this is the first call to the constructor.
**POBJ_ROOT**() returns a typed *OID* of type *TYPE* instead of the
*PMEMoid* returned by **pmemobj_root**().
The **pmemobj_root_size**() function returns the current size of the root object
associated with the persistent memory pool *pop*. The returned size is the
largest value requested by any of the earlier **pmemobj_root**() calls. If the
root object has not been allocated yet, **pmemobj_root_size**() returns 0.
# SEE ALSO #
**libpmemobj**(7) and **<https://pmem.io>**
| 3,979 | 39.612245 | 80 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemobj/pmemobj_open.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMOBJ_OPEN, 3)
collection: libpmemobj
header: PMDK
date: pmemobj API version 2.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pmemobj_open.3 -- man page for most commonly used functions from libpmemobj library)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[CAVEATS](#caveats)<br />
[SEE ALSO](#see-also)<br />
# NAME #
_UW(pmemobj_open), _UW(pmemobj_create),
**pmemobj_close**(), _UW(pmemobj_check)
**pmemobj_set_user_data**(), **pmemobj_get_user_data**()
- create, open, close and validate persistent memory transactional object store
# SYNOPSIS #
```c
#include <libpmemobj.h>
_UWFUNCR1(PMEMobjpool, *pmemobj_open, *path, const char *layout)
_UWFUNCR1(PMEMobjpool, *pmemobj_create, *path, =q=const char *layout,
size_t poolsize, mode_t mode=e=)
void pmemobj_close(PMEMobjpool *pop);
_UWFUNCR1(int, pmemobj_check, *path, const char *layout)
void pmemobj_set_user_data(PMEMobjpool *pop, void *data);
void *pmemobj_get_user_data(PMEMobjpool *pop);
```
_UNICODE()
# DESCRIPTION #
To use the pmem-resident transactional object store provided by
**libpmemobj**(7), a *memory pool* must first be created
with the _UW(pmemobj_create) function described below. Existing pools
may be opened with the _UW(pmemobj_open) function.
None of the three functions described below are thread-safe with respect
to any other **libpmemobj**(7) function. In other words, when creating,
opening or deleting a pool, nothing else in the library can happen in parallel,
and therefore these functions should be called from the main thread.
Once created, the memory pool is represented by an opaque handle,
of type *PMEMobjpool\**, which is passed to most of the other **libpmemobj**(7)
functions. Internally, **libpmemobj**(7) will use either **pmem_persist**(3)
or **msync**(2) when it needs to flush changes, depending on whether the memory
pool appears to be persistent memory or a regular file (see the
**pmem_is_pmem**(3) function in **libpmem**(7) for more information). There is
no need for applications to flush changes directly when using the object
memory API provided by **libpmemobj**(7).
The _UW(pmemobj_create) function creates a transactional object store with the
given total *poolsize*. *path* specifies the name of the memory pool file to be
created. *layout* specifies the application's layout type in the form of a
string. The layout name is not interpreted by **libpmemobj**(7), but may be
used as a check when _UW(pmemobj_open) is called. The layout name, including
the terminating null byte ('\0'), cannot be longer than **PMEMOBJ_MAX_LAYOUT**
as defined in **\<libpmemobj.h\>**. A NULL *layout* is equivalent
to using an empty string as a layout name. *mode* specifies the permissions to
use when creating the file, as described by **creat**(2). The memory pool file
is fully allocated to the size *poolsize* using **posix_fallocate**(3). The
caller may choose to take responsibility for creating the memory pool file
by creating it before calling _UW(pmemobj_create), and then specifying
*poolsize* as zero. In this case _UW(pmemobj_create) will take the pool size
from the size of the existing file and will verify that the file appears to be
empty by searching for any non-zero data in the pool header at the beginning of
the file. The minimum net pool size allowed by the library for a local
transactional object store is defined in **\<libpmemobj.h\>** as
**PMEMOBJ_MIN_POOL**. _WINUX(,=q=For remote replicas the minimum file size
is defined in **\<librpmem.h\>** as **RPMEM_MIN_PART**.=e=)
Depending on the configuration of the system, the available non-volatile
memory space may be divided into multiple memory devices.
In such case, the maximum size of the pmemobj memory pool
could be limited by the capacity of a single memory device.
**libpmemobj**(7) allows building persistent memory
resident object store spanning multiple memory devices by creation of
persistent memory pools consisting of multiple files, where each part of
such a *pool set* may be stored on a different memory device
or pmem-aware filesystem.
Creation of all the parts of the pool set can be done with _UW(pmemobj_create);
however, the recommended method for creating pool sets is with the
**pmempool**(1) utility.
When creating a pool set consisting of multiple files, the *path* argument
passed to _UW(pmemobj_create) must point to the special *set* file that defines
the pool layout and the location of all the parts of the pool set. The
*poolsize* argument must be 0. The meaning of the *layout* and *mode* arguments
does not change, except that the same *mode* is used for creation of all the
parts of the pool set.
The *set* file is a plain text file, the structure of which is described in
**poolset**(5).
The _UW(pmemobj_open) function opens an existing object store memory pool.
Similar to _UW(pmemobj_create), *path* must identify either an existing
obj memory pool file, or the *set* file used to create a pool set.
If *layout* is non-NULL, it is compared to the layout
name provided to _UW(pmemobj_create) when the pool was first created. This can
be used to verify that the layout of the pool matches what was expected.
The application must have permission to open the file and memory map it with
read/write permissions.
Be aware that if the pool contains bad blocks inside, opening can be aborted
by the SIGBUS signal, because currently the pool is not checked against
bad blocks during opening. It can be turned on by setting the CHECK_BAD_BLOCKS
compat feature. For details see description of this feature
in **pmempool-feature**(1).
The **pmemobj_close**() function closes the memory pool indicated by *pop* and
deletes the memory pool handle. The object store itself lives on in the file
that contains it and may be re-opened at a later time using
_UW(pmemobj_open) as described above.
The _UW(pmemobj_check) function performs a consistency check of the file
indicated by *path*. _UW(pmemobj_check) opens the given *path* read-only so
it never makes any changes to the file. This function is not supported on
Device DAX.
The **pmemobj_set_user_data**() function associates custom volatile state,
represented by pointer *data*, with the given pool *pop*. This state can later
be retrieved using **pmemobj_get_user_data**() function. This state does not
survive pool close. If **pmemobj_set_user_data**() was not called for a given
pool, **pmemobj_get_user_data**() will return NULL.
# RETURN VALUE #
The _UW(pmemobj_create) function returns a memory pool handle to be used with
most of the functions in **libpmemobj**(7). On error it returns NULL
and sets *errno* appropriately.
The _UW(pmemobj_open) function returns a memory pool handle to be used with
most of the functions in **libpmemobj**(7). If an error prevents the pool
from being opened, or if the given *layout* does not match the pool's layout,
_UW(pmemobj_open) returns NULL and sets *errno* appropriately.
The **pmemobj_close**() function returns no value.
The _UW(pmemobj_check) function returns 1 if the memory pool is found to be
consistent. Any inconsistencies found will cause _UW(pmemobj_check) to
return 0, in which case the use of the file with **libpmemobj**(7) will result
in undefined behavior. The debug version of **libpmemobj**(7) will provide
additional details on inconsistencies when **PMEMOBJ_LOG_LEVEL** is at least 1,
as described in the **DEBUGGING AND ERROR HANDLING** section in
**libpmemobj**(7). _UW(pmemobj_check) returns -1 and sets *errno* if it cannot
perform the consistency check due to other errors.
# CAVEATS #
Not all file systems support **posix_fallocate**(3). _UW(pmemobj_create) will
fail if the underlying file system does not support **posix_fallocate**(3).
_WINUX(=q= On Windows if _UW(pmemobj_create) is called on an existing file
with FILE_ATTRIBUTE_SPARSE_FILE and FILE_ATTRIBUTE_COMPRESSED set,
they will be removed, to physically allocate space for the pool.
This is a workaround for _chsize() performance issues. =e=)
# SEE ALSO #
**creat**(2), **msync**(2), **pmem_is_pmem**(3), **pmem_persist**(3),
**posix_fallocate**(3), **libpmem**(7), **libpmemobj**(7)
and **<https://pmem.io>**
| 8,343 | 45.614525 | 99 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemobj/pmemobj_ctl_get.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMOBJ_CTL_GET, 3)
collection: libpmemobj
header: PMDK
date: pmemobj API version 2.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause
[comment]: <> (Copyright 2017-2020, Intel Corporation)
[comment]: <> (pmemobj_ctl_get.3 -- man page for libpmemobj CTL)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[CTL NAMESPACE](#ctl-namespace)<br />
[CTL EXTERNAL CONFIGURATION](#ctl-external-configuration)<br />
[SEE ALSO](#see-also)<br />
# NAME #
_UW(pmemobj_ctl_get),
_UW(pmemobj_ctl_set),
_UW(pmemobj_ctl_exec)
- Query and modify libpmemobj internal behavior (EXPERIMENTAL)
# SYNOPSIS #
```c
#include <libpmemobj.h>
_UWFUNCR2(int, pmemobj_ctl_get, PMEMobjpool *pop, *name, void *arg,
=q= (EXPERIMENTAL)=e=)
_UWFUNCR2(int, pmemobj_ctl_set, PMEMobjpool *pop, *name, void *arg,
=q= (EXPERIMENTAL)=e=)
_UWFUNCR2(int, pmemobj_ctl_exec, PMEMobjpool *pop, *name, void *arg,
=q= (EXPERIMENTAL)=e=)
```
_UNICODE()
# DESCRIPTION #
The _UW(pmemobj_ctl_get), _UW(pmemobj_ctl_set) and _UW(pmemobj_ctl_exec)
functions provide a uniform interface for querying and modifying the internal
behavior of **libpmemobj**(7) through the control (CTL) namespace.
The *name* argument specifies an entry point as defined in the CTL namespace
specification. The entry point description specifies whether the extra *arg* is
required. Those two parameters together create a CTL query. The functions and
the entry points are thread-safe unless
indicated otherwise below. If there are special conditions for calling an entry
point, they are explicitly stated in its description. The functions propagate
the return value of the entry point. If either *name* or *arg* is invalid, -1
is returned.
If the provided ctl query is valid, the CTL functions will always return 0
on success and -1 on failure, unless otherwise specified in the entry point
description.
See more in **pmem_ctl**(5) man page.
# CTL NAMESPACE #
prefault.at_create | rw | global | int | int | - | boolean
If set, every page of the pool will be touched and written to when the pool
is created, in order to trigger page allocation and minimize the performance
impact of pagefaults. Affects only the _UW(pmemobj_create) function.
prefault.at_open | rw | global | int | int | - | boolean
If set, every page of the pool will be touched and written to when the pool
is opened, in order to trigger page allocation and minimize the performance
impact of pagefaults. Affects only the _UW(pmemobj_open) function.
sds.at_create | rw | global | int | int | - | boolean
If set, force-enables or force-disables SDS feature during pool creation.
Affects only the _UW(pmemobj_create) function. See **pmempool_feature_query**(3)
for information about SDS (SHUTDOWN_STATE) feature.
copy_on_write.at_open | rw | global | int | int | - | boolean
If set, pool is mapped in such a way that modifications don't reach the
underlying medium. From the user's perspective this means that when the pool
is closed all changes are reverted. This feature is not supported for pools
located on Device DAX.
tx.debug.skip_expensive_checks | rw | - | int | int | - | boolean
Turns off some expensive checks performed by the transaction module in "debug"
builds. Ignored in "release" builds.
tx.debug.verify_user_buffers | rw | - | int | int | - | boolean
Enables verification of user buffers provided by
**pmemobj_tx_log_append_buffer**(3) API. For now the only verified aspect
is whether the same buffer is used simultaneously in 2 or more transactions
or more than once in the same transaction. This value should not be modified
at runtime if any transaction for the current pool is in progress.
tx.cache.size | rw | - | long long | long long | - | integer
Size in bytes of the transaction snapshot cache. In a larger cache the
frequency of persistent allocations is lower, but with higher fixed cost.
This should be set to roughly the sum of sizes of the snapshotted regions in
an average transaction in the pool.
This entry point is not thread safe and should not be modified if there are any
transactions currently running.
This value must be a in a range between 0 and **PMEMOBJ_MAX_ALLOC_SIZE**,
otherwise this entry point will fail.
tx.cache.threshold | rw | - | long long | long long | - | integer
This entry point is deprecated.
All snapshots, regardless of the size, use the transactional cache.
tx.post_commit.queue_depth | rw | - | int | int | - | integer
This entry point is deprecated.
tx.post_commit.worker | r- | - | void * | - | - | -
This entry point is deprecated.
tx.post_commit.stop | r- | - | void * | - | - | -
This entry point is deprecated.
heap.narenas.automatic | r- | - | unsigned | - | - | -
Reads the number of arenas used in automatic scheduling of memory operations
for threads. By default, this value is equal to the number of available processors.
An arena is a memory management structure which enables concurrency by taking
exclusive ownership of parts of the heap and allowing associated threads to allocate
without contention.
heap.narenas.total | r- | - | unsigned | - | - | -
Reads the number of all created arenas. It includes automatic arenas
created by default and arenas created using heap.arena.create CTL.
heap.narenas.max | rw- | - | unsigned | unsigned | - | -
Reads or writes the maximum number of arenas that can be created.
This entry point is not thread-safe with regards to heap
operations (allocations, frees, reallocs).
heap.arena.[arena_id].size | r- | - | uint64_t | - | - | -
Reads the total amount of memory in bytes which is currently
exclusively owned by the arena. Large differences in this value between
arenas might indicate an uneven scheduling of memory resources.
The arena id cannot be 0.
heap.thread.arena_id | rw- | - | unsigned | unsigned | - | -
Reads the index of the arena assigned to the current thread or
assigns arena with specific id to the current thread.
The arena id cannot be 0.
heap.arena.create | --x | - | - | - | unsigned | -
Creates and initializes one new arena in the heap.
This entry point reads an id of the new created arena.
Newly created arenas by this CTL are inactive, which means that
the arena will not be used in the automatic scheduling of
memory requests. To activate the new arena, use heap.arena.[arena_id].automatic CTL.
Arena created using this CTL can be used for allocation by explicitly
specifying the *arena_id* for **POBJ_ARENA_ID(id)** flag in
**pmemobj_tx_xalloc**()/**pmemobj_xalloc**()/**pmemobj_xreserve()** functions.
By default, the number of arenas is limited to 1024.
heap.arena.[arena_id].automatic | rw- | - | boolean | boolean | - | -
Reads or modifies the state of the arena.
If set, the arena is used in automatic scheduling of memory operations for threads.
This should be set to false if the application wants to manually manage allocator
scalability through explicitly assigning arenas to threads by using heap.thread.arena_id.
The arena id cannot be 0 and at least one automatic arena must exist.
heap.alloc_class.[class_id].desc | rw | - | `struct pobj_alloc_class_desc` |
`struct pobj_alloc_class_desc` | - | integer, integer, integer, string
Describes an allocation class. Allows one to create or view the internal
data structures of the allocator.
Creating custom allocation classes can be beneficial for both raw allocation
throughput, scalability and, most importantly, fragmentation. By carefully
constructing allocation classes that match the application workload,
one can entirely eliminate external and internal fragmentation. For example,
it is possible to easily construct a slab-like allocation mechanism for any
data structure.
The `[class_id]` is an index field. Only values between 0-254 are valid.
If setting an allocation class, but the `class_id` is already taken, the
function will return -1.
The values between 0-127 are reserved for the default allocation classes of the
library and can be used only for reading.
The recommended method for retrieving information about all allocation classes
is to call this entry point for all class ids between 0 and 254 and discard
those results for which the function returns an error.
This entry point takes a complex argument.
```
struct pobj_alloc_class_desc {
size_t unit_size;
size_t alignment;
unsigned units_per_block;
enum pobj_header_type header_type;
unsigned class_id;
};
```
The first field, `unit_size`, is an 8-byte unsigned integer that defines the
allocation class size. While theoretically limited only by
**PMEMOBJ_MAX_ALLOC_SIZE**, for most workloads this value should be between
8 bytes and 2 megabytes.
The `alignment` field specifies the user data alignment of objects allocated
using the class. If set, must be a power of two and an even divisor of unit
size. Alignment is limited to maximum of 2 megabytes.
All objects have default alignment of 64 bytes, but the user data alignment
is affected by the size of the chosen header.
The `units_per_block` field defines how many units a single block of memory
contains. This value will be adjusted to match the internal size of the
block (256 kilobytes or a multiple thereof). For example, given a class with
a `unit_size` of 512 bytes and a `units_per_block` of 1000, a single block of
memory for that class will have 512 kilobytes.
This is relevant because the bigger the block size, the less frequently blocks
need to be fetched, resulting in lower contention on global heap state.
If the CTL call is being done at runtime, the `units_per_block` variable of the
provided alloc class structure is modified to match the actual value.
The `header_type` field defines the header of objects from the allocation class.
There are three types:
- **POBJ_HEADER_LEGACY**, string value: `legacy`. Used for allocation classes
prior to version 1.3 of the library. Not recommended for use.
Incurs a 64 byte metadata overhead for every object.
Fully supports all features.
- **POBJ_HEADER_COMPACT**, string value: `compact`. Used as default for all
predefined allocation classes.
Incurs a 16 byte metadata overhead for every object.
Fully supports all features.
- **POBJ_HEADER_NONE**, string value: `none`. Header type that
incurs no metadata overhead beyond a single bitmap entry. Can be used
for very small allocation classes or when objects must be adjacent to
each other.
This header type does not support type numbers (type number is always
0) or allocations that span more than one unit.
The `class_id` field is an optional, runtime-only variable that allows the
user to retrieve the identifier of the class. This will be equivalent to the
provided `[class_id]`. This field cannot be set from a config file.
The allocation classes are a runtime state of the library and must be created
after every open. It is highly recommended to use the configuration file to
store the classes.
This structure is declared in the `libpmemobj/ctl.h` header file. Please refer
to this file for an in-depth explanation of the allocation classes and relevant
algorithms.
Allocation classes constructed in this way can be leveraged by explicitly
specifying the class using **POBJ_CLASS_ID(id)** flag in **pmemobj_tx_xalloc**()/**pmemobj_xalloc**()
functions.
Example of a valid alloc class query string:
```
heap.alloc_class.128.desc=500,0,1000,compact
```
This query, if executed, will create an allocation class with an id of 128 that
has a unit size of 500 bytes, has at least 1000 units per block and uses
a compact header.
For reading, function returns 0 if successful, if the allocation class does
not exist it sets the errno to **ENOENT** and returns -1;
This entry point can fail if any of the parameters of the allocation class
is invalid or if exactly the same class already exists.
heap.alloc_class.new.desc | -w | - | - | `struct pobj_alloc_class_desc` | - | integer, integer, integer, string
Same as `heap.alloc_class.[class_id].desc`, but instead of requiring the user
to provide the class_id, it automatically creates the allocation class with the
first available identifier.
This should be used when it's impossible to guarantee unique allocation class
naming in the application (e.g. when writing a library that uses libpmemobj).
The required class identifier will be stored in the `class_id` field of the
`struct pobj_alloc_class_desc`.
stats.enabled | rw | - | enum pobj_stats_enabled | enum pobj_stats_enabled | - |
string
Enables or disables runtime collection of statistics. There are two types of
statistics: persistent and transient ones. Persistent statistics survive pool
restarts, whereas transient ones don't. Statistics are not recalculated after
enabling; any operations that occur between disabling and re-enabling will not
be reflected in subsequent values.
Only transient statistics are enabled by default. Enabling persistent statistics
may have non-trivial performance impact.
stats.heap.curr_allocated | r- | - | uint64_t | - | - | -
Reads the number of bytes currently allocated in the heap. If statistics were
disabled at any time in the lifetime of the heap, this value may be
inaccurate.
This is a persistent statistic.
stats.heap.run_allocated | r- | - | uint64_t | - | - | -
Reads the number of bytes currently allocated using run-based allocation
classes, i.e., huge allocations are not accounted for in this statistic.
This is useful for comparison against stats.heap.run_active to estimate the
ratio between active and allocated memory.
This is a transient statistic and is rebuilt every time the pool is opened.
stats.heap.run_active | r- | - | uint64_t | - | - | -
Reads the number of bytes currently occupied by all run memory blocks, including
both allocated and free space, i.e., this is all the all space that's not
occupied by huge allocations.
This value is a sum of all allocated and free run memory. In systems where
memory is efficiently used, `run_active` should closely track
`run_allocated`, and the amount of active, but free, memory should be minimal.
A large relative difference between active memory and allocated memory is
indicative of heap fragmentation. This information can be used to make
a decision to call **pmemobj_defrag()**(3) if the fragmentation looks to be high.
However, for small heaps `run_active` might be disproportionately higher than
`run_allocated` because the allocator typically activates a significantly larger
amount of memory than is required to satisfy a single request in the
anticipation of future needs. For example, the first allocation of 100 bytes
in a heap will trigger activation of 256 kilobytes of space.
This is a transient statistic and is rebuilt lazily every time the pool
is opened.
heap.size.granularity | rw- | - | uint64_t | uint64_t | - | long long
Reads or modifies the granularity with which the heap grows when OOM.
Valid only if the poolset has been defined with directories.
A granularity of 0 specifies that the pool will not grow automatically.
This entry point can fail if the granularity value is non-zero and smaller
than *PMEMOBJ_MIN_PART*.
heap.size.extend | --x | - | - | - | uint64_t | -
Extends the heap by the given size. Must be larger than *PMEMOBJ_MIN_PART*.
This entry point can fail if the pool does not support extend functionality or
if there's not enough space left on the device.
debug.heap.alloc_pattern | rw | - | int | int | - | -
Single byte pattern that is used to fill new uninitialized memory allocation.
If the value is negative, no pattern is written. This is intended for
debugging, and is disabled by default.
# CTL EXTERNAL CONFIGURATION #
In addition to direct function call, each write entry point can also be set
using two alternative methods.
The first method is to load a configuration directly from the **PMEMOBJ_CONF**
environment variable.
The second method of loading an external configuration is to set the
**PMEMOBJ_CONF_FILE** environment variable to point to a file that contains
a sequence of ctl queries.
See more in **pmem_ctl**(5) man page.
# SEE ALSO #
**libpmemobj**(7), **pmem_ctl**(5) and **<https://pmem.io>**
| 16,141 | 39.45614 | 111 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemobj/libpmemobj.7.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(LIBPMEMOBJ, 7)
collection: libpmemobj
header: PMDK
date: pmemobj API version 2.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2016-2019, Intel Corporation)
[comment]: <> (libpmemobj.7 -- man page for libpmemobj)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[LIBRARY API VERSIONING](#library-api-versioning-1)<br />
[MANAGING LIBRARY BEHAVIOR](#managing-library-behavior)<br />
[DEBUGGING AND ERROR HANDLING](#debugging-and-error-handling)<br />
[EXAMPLE](#example)<br />
[ACKNOWLEDGEMENTS](#acknowledgements)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**libpmemobj** - persistent memory transactional object store
# SYNOPSIS #
```c
#include <libpmemobj.h>
cc _WINUX(,-std=gnu99) ... -lpmemobj -lpmem
```
_UNICODE()
##### Library API versioning: #####
```c
_UWFUNC(pmemobj_check_version, =q=
unsigned major_required,
unsigned minor_required=e=)
```
##### Managing library behavior: #####
```c
void pmemobj_set_funcs(
void *(*malloc_func)(size_t size),
void (*free_func)(void *ptr),
void *(*realloc_func)(void *ptr, size_t size),
char *(*strdup_func)(const char *s));
```
##### Error handling: #####
```c
_UWFUNC(pmemobj_errormsg, void)
```
##### Other library functions: #####
A description of other **libpmemobj** functions can be found on the following
manual pages:
+ control and statistics: **pmemobj_ctl_get**(3)
+ create, open, close and validate: **pmemobj_open**(3)
+ low-level memory manipulation: **pmemobj_memcpy_persist**(3)
+ locking: **pmemobj_mutex_zero**(3)
+ persistent object identifier: **OID_IS_NULL**(3)
+ type-safety: **TOID_DECLARE**(3)
+ layout declaration: **POBJ_LAYOUT_BEGIN**(3)
+ non-transactional atomic allocations: **pmemobj_alloc**(3)
+ root object management: **pmemobj_root**(3)
+ object containers: **pmemobj_first**(3)
+ non-transactional persistent atomic circular doubly-linked list:
**pmemobj_list_insert**(3), **POBJ_LIST_HEAD**(3)
+ transactional object manipulation: **pmemobj_tx_begin**(3),
**pmemobj_tx_add_range**(3), **pmemobj_tx_alloc**(3)
+ delayed atomicity actions: **pmemobj_action**(3) (EXPERIMENTAL)
# DESCRIPTION #
**libpmemobj** provides a transactional object store in *persistent memory*
(pmem) for applications that require transactions and persistent memory
management using direct access storage (DAX), which is storage that supports
load/store access without paging blocks from a block storage device. Some types
of *non-volatile memory DIMMs* (NVDIMMs) provide this type of byte addressable
access to storage. A *persistent memory aware file system* is typically used to
expose the direct access to applications. Memory mapping a file from this type
of file system results in load/store, non-paged access to pmem. **libpmemobj**
builds on this type of memory mapped file using the low-level pmem support
provided by **libpmem**(7), handling the transactional updates, flushing
changes to persistence, and managing recovery for the application.
_WINUX(,=q=**libpmemobj** requires the **-std=gnu99** compilation flag to
build properly.=e=)
**libpmemobj** is one of a collection of persistent memory libraries available.
The others are:
+ **libpmemblk**(7), providing pmem-resident arrays of fixed-sized blocks with
atomic updates.
+ **libpmemlog**(7), providing a pmem-resident log file.
+ **libpmem**(7), low-level persistent memory support.
Under normal usage, **libpmemobj** will never print messages or intentionally
cause the process to exit. The only exception to this is the debugging
information, when enabled, as described under **DEBUGGING AND ERROR HANDLING**,
below.
# LIBRARY API VERSIONING #
This section describes how the library API is versioned,
allowing applications to work with an evolving API.
The _UW(pmemobj_check_version) function is used to see if the installed
**libpmemobj** supports the version of the library API required by an
application. The easiest way to do this is for the application to supply
the compile-time version information, supplied by defines in
**\<libpmemobj.h\>**, like this:
```c
reason = _U(pmemobj_check_version)(PMEMOBJ_MAJOR_VERSION,
PMEMOBJ_MINOR_VERSION);
if (reason != NULL) {
/* version check failed, reason string tells you why */
}
```
Any mismatch in the major version number is considered a failure, but a library
with a newer minor version number will pass this check since increasing minor
versions imply backwards compatibility.
An application can also check specifically for the existence of an interface
by checking for the version where that interface was introduced. These versions
are documented in this man page as follows: unless otherwise specified, all
interfaces described here are available in version 1.0 of the library. Interfaces
added after version 1.0 will contain the text *introduced in version x.y* in
the section of this manual describing the feature.
On success, _UW(pmemobj_check_version) returns NULL. Otherwise, the return
value is a static string describing the reason the version check failed. The
string returned by _UW(pmemobj_check_version) must not be modified or freed.
# MANAGING LIBRARY BEHAVIOR #
The **pmemobj_set_funcs**() function allows an application to override memory
allocation calls used internally by **libpmemobj**. Passing in NULL for any of
the handlers will cause the **libpmemobj** default function to be used. The
library does not make heavy use of the system malloc functions, but it does
allocate approximately 4-8 kilobytes for each memory pool in use.
By default, **libpmemobj** supports up to 1024 parallel
transactions/allocations. For debugging purposes it is possible to decrease
this value by setting the **PMEMOBJ_NLANES** environment variable to the
desired limit.
# DEBUGGING AND ERROR HANDLING #
If an error is detected during the call to a **libpmemobj** function, the
application may retrieve an error message describing the reason for the failure
from _UW(pmemobj_errormsg). This function returns a pointer to a static buffer
containing the last error message logged for the current thread. If *errno*
was set, the error message may include a description of the corresponding
error code as returned by **strerror**(3). The error message buffer is
thread-local; errors encountered in one thread do not affect its value in
other threads. The buffer is never cleared by any library function; its
content is significant only when the return value of the immediately preceding
call to a **libpmemobj** function indicated an error, or if *errno* was set.
The application must not modify or free the error message string, but it may
be modified by subsequent calls to other library functions.
Two versions of **libpmemobj** are typically available on a development
system. The normal version, accessed when a program is linked using the
**-lpmemobj** option, is optimized for performance. That version skips checks
that impact performance and never logs any trace information or performs any
run-time assertions.
A second version of **libpmemobj**, accessed when a program uses the libraries
under _DEBUGLIBPATH(), contains run-time assertions and trace points. The
typical way to access the debug version is to set the environment variable
**LD_LIBRARY_PATH** to _LDLIBPATH(). Debugging output is
controlled using the following environment variables. These variables have
no effect on the non-debug version of the library.
+ **PMEMOBJ_LOG_LEVEL**
The value of **PMEMOBJ_LOG_LEVEL** enables trace points in the debug version
of the library, as follows:
+ **0** - This is the default level when **PMEMOBJ_LOG_LEVEL** is not set.
No log messages are emitted at this level.
+ **1** - Additional details on any errors detected are logged,
in addition to returning the *errno*-based errors as usual.
The same information may be retrieved using _UW(pmemobj_errormsg).
+ **2** - A trace of basic operations is logged.
+ **3** - Enables a very verbose amount of function call
tracing in the library.
+ **4** - Enables voluminous and fairly obscure tracing information
that is likely only useful to the **libpmemobj** developers.
Unless **PMEMOBJ_LOG_FILE** is set, debugging output is written to *stderr*.
+ **PMEMOBJ_LOG_FILE**
Specifies the name of a file where all logging information should be written.
If the last character in the name is "-", the *PID* of the current process will
be appended to the file name when the log file is created. If
**PMEMOBJ_LOG_FILE** is not set, logging output is written to *stderr*.
See also **libpmem**(7) to get information
about other environment variables affecting **libpmemobj** behavior.
# EXAMPLE #
See <https://pmem.io/pmdk/libpmemobj> for examples using the **libpmemobj** API.
# ACKNOWLEDGEMENTS #
**libpmemobj** builds on the persistent memory programming model recommended
by the SNIA NVM Programming Technical Work Group:
<https://snia.org/nvmp>
# SEE ALSO #
**OID_IS_NULL**(3), **pmemobj_alloc**(3), **pmemobj_ctl_exec**(3), **pmemobj_ctl_get**(3), **pmemobj_ctl_set**(3), **pmemobj_first**(3), **pmemobj_list_insert**(3), **pmemobj_memcpy_persist**(3), **pmemobj_mutex_zero**(3), **pmemobj_open**(3), **pmemobj_root**(3), **pmemobj_tx_add_range**(3), **pmemobj_tx_alloc**(3), **pmemobj_tx_begin**(3), **POBJ_LAYOUT_BEGIN**(3), **POBJ_LIST_HEAD**(3), **strerror**(3), **TOID_DECLARE**(3), **libpmem**(7), **libpmemblk**(7), **libpmemlog**(7)
and **<https://pmem.io>**
| 9,532 | 37.595142 | 483 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemobj/oid_is_null.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(OID_IS_NULL, 3)
collection: libpmemobj
header: PMDK
date: pmemobj API version 2.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (oid_is_null.3 -- man page for persistent object identifier and functions)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
_WINUX(,[NOTES](#notes)<br />)
[SEE ALSO](#see-also)<br />
# NAME #
**OID_IS_NULL**(), **OID_EQUALS**(),
**pmemobj_direct**(), **pmemobj_oid**(),
**pmemobj_type_num**(), **pmemobj_pool_by_oid**(),
**pmemobj_pool_by_ptr**() - functions that allow mapping
operations between object addresses, object handles, oids or type numbers
# SYNOPSIS #
```c
#include <libpmemobj.h>
OID_IS_NULL(PMEMoid oid)
OID_EQUALS(PMEMoid lhs, PMEMoid rhs)
void *pmemobj_direct(PMEMoid oid);
PMEMoid pmemobj_oid(const void *addr);
uint64_t pmemobj_type_num(PMEMoid oid);
PMEMobjpool *pmemobj_pool_by_oid(PMEMoid oid);
PMEMobjpool *pmemobj_pool_by_ptr(const void *addr);
void *pmemobj_volatile(PMEMobjpool *pop, struct pmemvlt *vlt,
size_t size, void *ptr,
int (*constr)(void *ptr, void *arg), void *arg); (EXPERIMENTAL)
```
# DESCRIPTION #
Each object stored in a persistent memory pool is represented by an object
handle of type *PMEMoid*. In practice, such a handle is a unique Object
IDentifier (*OID*) of global scope, which means that two objects from
different pools will never have the same *OID*. The special **OID_NULL**
macro defines a NULL-like handle that does not represent any object.
The size of a single object is limited by **PMEMOBJ_MAX_ALLOC_SIZE**.
Thus an allocation with a requested size greater than this value will fail.
An *OID* cannot be used as a direct pointer to an object. Each time
the program attempts to read or write object data, it must obtain the current
memory address of the object by converting its *OID* into a pointer.
In contrast to the memory address, the *OID* value for given object does not
change during the life of an object (except for *realloc*), and remains
valid after closing and reopening the pool. For this reason, if an object
contains a reference to another persistent object, for example, to build
some kind of a linked data structure, the reference must be an *OID* and not
a memory address.
**pmemobj_direct**() returns a pointer to the *PMEMoid* object with
handle *oid*.
**pmemobj_oid**() returns a *PMEMoid* handle to the object pointed
to by *addr*.
**pmemobj_type_num**() returns the type number of the *PMEMoid* object with
handle *oid*.
**pmemobj_pool_by_oid**() returns a *PMEMobjpool*\* handle to the pool
containing the *PMEMoid* object with handle *oid*.
**pmemobj_pool_by_ptr**() returns a *PMEMobjpool*\* handle to the pool
containing the address *addr*.
At the time of allocation (or reallocation), each object may be assigned
a number representing its type. Such a *type number* may be used to arrange the
persistent objects based on their actual user-defined structure type, thus
facilitating implementation of a simple run-time type safety mechanism. This
also allows iterating through all the objects of a given type that are stored
in the persistent memory pool. See **pmemobj_first**(3) for more information.
The **OID_IS_NULL**() macro checks if *PMEMoid* represents a NULL object.
The **OID_EQUALS**() macro compares two *PMEMoid* objects.
For special cases where volatile (transient) variables need to be stored on
persistent memory, there's a mechanism composed of *struct pmemvlt* type and
**pmemobj_volatile()** function. To use it, the *struct pmemvlt* needs to
be placed in the neighborhood of transient data region. The *PMEMvlt* macro
can be used to construct such a region.
The *struct pmemvlt* must be zeroed prior to use. This can be easily done in
object constructor or in a transaction directly after an allocation.
When the **pmemobj_volatile()** function is called on a *struct pmemvlt*,
it will return the pointer to the data and it will ensure that the provided
constructor function is called exactly once in the current instance of the
pmemobj pool.
The constructor is called with the *ptr* pointer to the data, and this function
will return the same pointer if the constructor returns *0*, otherwise NULL is
returned. The *size* argument must accurately describe the total size of the
volatile memory region that will be accessed. Calling **pmemobj_volatile()**
on the same region with different sizes is undefined behavior.
For this mechanism to be effective, all accesses to transient variables must
go through it, otherwise there's a risk of the constructor not being called
on the first load.
Maintaining transient state on persistent memory is challenging due to
difficulties with dynamic resources acquisition and subsequent resource release.
For example, one needs to consider what happens with volatile state of an object
which is being freed inside of a transaction, especially with regards to the
possibility of an abort.
It's generally recommended to entirely separate the persistent and transient
states, and when it's not possible, to only store types which do not require
lifecycle management (i.e., primitive types) inside of volatile regions.
# RETURN VALUE #
The **pmemobj_direct**() function returns a pointer to the object represented
by *oid*. If *oid* is **OID_NULL**, **pmemobj_direct**() returns NULL.
The **pmemobj_oid**() function returns a *PMEMoid* handle to the object pointed
to by *addr*. If *addr* is not from within a pmemobj pool, **OID_NULL** is
returned. If *addr* is not the start of an object (does not point to the
beginning of a valid allocation), the resulting *PMEMoid* can be safely used
only with:
+ **pmemobj_pool_by_oid**()
+ **pmemobj_direct**()
+ **pmemobj_tx_add_range**(3)
The **pmemobj_type_num**() function returns the type number of the object
represented by *oid*.
The **pmemobj_pool_by_oid**() function returns a handle to the pool that
contains the object represented by *oid*. If the pool is not open or
*oid* is **OID_NULL**, **pmemobj_pool_by_oid**() returns NULL.
The **pmemobj_pool_by_ptr**() function returns a handle to the pool that
contains the address, or NULL if the address does not belong to any open pool.
_WINUX(,=q=
# NOTES #
For performance reasons, on Linux and FreeBSD the **pmemobj_direct**()
function is inlined by default. To use the non-inlined variant of
**pmemobj_direct**(), define **PMEMOBJ_DIRECT_NON_INLINE** prior
to the *\#include* of **\<libpmemobj.h\>**, either with *\#define* or with
the *\-D* option to the compiler.=e=)
# EXAMPLES #
The following code shows how to store transient variables on persistent memory.
```c
struct my_data {
PMEMvlt(uint64_t) foo;
uint64_t bar;
};
int
my_data_constructor(void *ptr, void *arg)
{
uint64_t *foo = ptr;
*foo = 0;
return 0;
}
PMEMobjpool *pop = ...;
struct my_data *data = D_RW(...);
uint64_t *foo = pmemobj_volatile(pop, &data->foo.vlt, &data->foo.value,
my_data_constructor, NULL);
assert(*foo == 0);
```
# SEE ALSO #
**libpmemobj**(7) and **<https://pmem.io>**
| 7,183 | 36.416667 | 88 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemobj/pmemobj_tx_add_range.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMOBJ_TX_ADD_RANGE, 3)
collection: libpmemobj
header: PMDK
date: pmemobj API version 2.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2019, Intel Corporation)
[comment]: <> (pmemobj_tx_add_range.3 -- man page for transactional object manipulation)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmemobj_tx_add_range**(), **pmemobj_tx_add_range_direct**(),
**pmemobj_tx_xadd_range**(), **pmemobj_tx_xadd_range_direct**()
**TX_ADD**(), **TX_ADD_FIELD**(),
**TX_ADD_DIRECT**(), **TX_ADD_FIELD_DIRECT**(),
**TX_XADD**(), **TX_XADD_FIELD**(),
**TX_XADD_DIRECT**(), **TX_XADD_FIELD_DIRECT**(),
**TX_SET**(), **TX_SET_DIRECT**(),
**TX_MEMCPY**(), **TX_MEMSET**()
- transactional object manipulation
# SYNOPSIS #
```c
#include <libpmemobj.h>
int pmemobj_tx_add_range(PMEMoid oid, uint64_t off, size_t size);
int pmemobj_tx_add_range_direct(const void *ptr, size_t size);
int pmemobj_tx_xadd_range(PMEMoid oid, uint64_t off, size_t size, uint64_t flags);
int pmemobj_tx_xadd_range_direct(const void *ptr, size_t size, uint64_t flags);
TX_ADD(TOID o)
TX_ADD_FIELD(TOID o, FIELD)
TX_ADD_DIRECT(TYPE *p)
TX_ADD_FIELD_DIRECT(TYPE *p, FIELD)
TX_XADD(TOID o, uint64_t flags)
TX_XADD_FIELD(TOID o, FIELD, uint64_t flags)
TX_XADD_DIRECT(TYPE *p, uint64_t flags)
TX_XADD_FIELD_DIRECT(TYPE *p, FIELD, uint64_t flags)
TX_SET(TOID o, FIELD, VALUE)
TX_SET_DIRECT(TYPE *p, FIELD, VALUE)
TX_MEMCPY(void *dest, const void *src, size_t num)
TX_MEMSET(void *dest, int c, size_t num)
```
# DESCRIPTION #
**pmemobj_tx_add_range**() takes a "snapshot" of the memory block of given
*size*, located at given offset *off* in the object specified by *oid*, and
saves it to the undo log. The application is then free to directly modify the
object in that memory range. In case of a failure or abort, all the changes
within this range will be rolled back. The supplied block of memory has to be
within the pool registered in the transaction. This function must be called
during **TX_STAGE_WORK**.
The **pmemobj_tx_xadd_range**() function behaves exactly the same as
**pmemobj_tx_add_range**() when *flags* equals zero.
*flags* is a bitmask of the following values:
+ **POBJ_XADD_NO_FLUSH** - skip flush on commit (when application deals
with flushing or uses pmemobj_memcpy_persist)
+ **POBJ_XADD_NO_SNAPSHOT** - added range will not be "snapshotted", i.e. any
changes made within it during the transaction will not be rolled backed after
abort
+ **POBJ_XADD_ASSUME_INITIALIZED** - added range is assumed to be initialized.
If this flag is not specified, passing uninitialized memory will result in an
error when run under Valgrind memcheck.
+ **POBJ_XADD_NO_ABORT** - if the function does not end successfully,
do not abort the transaction.
**pmemobj_tx_add_range_direct**() behaves the same as
**pmemobj_tx_add_range**() with the exception that it operates on virtual
memory addresses and not persistent memory objects. It takes a "snapshot" of
a persistent memory block of given *size*, located at the given address *ptr*
in the virtual memory space and saves it to the undo log. The application is
then free to directly modify the object in that memory range. In case of a
failure or abort, all the changes within this range will be rolled back.
The supplied block of memory has to be within the pool registered in the
transaction. This function must be called during **TX_STAGE_WORK**.
The **pmemobj_tx_xadd_range_direct**() function behaves exactly the same as
**pmemobj_tx_add_range_direct**() when *flags* equals zero. *flags* is a
bitmask of the following values:
+ **POBJ_XADD_NO_FLUSH** - skip flush on commit (when application deals
with flushing or uses pmemobj_memcpy_persist)
+ **POBJ_XADD_NO_SNAPSHOT** - added range will not be "snapshotted", i.e. any
changes made within it during the transaction will not be rolled backed after
abort
+ **POBJ_XADD_ASSUME_INITIALIZED** - added range is assumed to be initialized.
If this flag is not specified, passing uninitialized memory will result in an
error when run under Valgrind memcheck.
+ **POBJ_XADD_NO_ABORT** - if the function does not end successfully,
do not abort the transaction.
Similarly to the macros controlling the transaction flow, **libpmemobj**
defines a set of macros that simplify the transactional operations on
persistent objects. Note that those macros operate on typed object handles,
thus eliminating the need to specify the size of the object, or the size and
offset of the field in the user-defined structure that is to be modified.
The **TX_ADD_FIELD**() macro saves the current value of given *FIELD* of the
object referenced by a handle *o* in the undo log. The application is then free
to directly modify the specified *FIELD*. In case of a failure or abort, the
saved value will be restored.
The **TX_XADD_FIELD**() macro works exactly like **TX_ADD_FIELD** when *flags*
equals 0. The *flags* argument is a bitmask of values described in
**pmemobj_tx_xadd_range**, above.
The **TX_ADD**() macro takes a "snapshot" of the entire object referenced by
object handle *o* and saves it in the undo log. The object size is determined
from its *TYPE*. The application is then free to directly modify the object.
In case of a failure or abort, all the changes within the object will be
rolled back.
The **TX_XADD**() macro works exactly like **TX_ADD** when *flags* equals 0.
The *flags* argument is a bitmask of values as described in
**pmemobj_tx_xadd_range**, above.
The **TX_ADD_FIELD_DIRECT**() macro saves the current value of the given
*FIELD* of the object referenced by (direct) pointer *p* in the undo log.
The application is then free to directly modify the specified *FIELD*. In case
of a failure or abort, the saved value will be restored.
The **TX_XADD_FIELD_DIRECT**() macro works exactly like **TX_ADD_FIELD_DIRECT**
when *flags* equals 0. The *flags* argument is a bitmask of values as described
in **pmemobj_tx_xadd_range_direct**, above.
The **TX_ADD_DIRECT**() macro takes a "snapshot" of the entire object
referenced by (direct) pointer *p* and saves it in the undo log. The object
size is determined from its *TYPE*. The application is then free to directly
modify the object. In case of a failure or abort, all the changes within the
object will be rolled back.
The **TX_XADD_DIRECT**() macro works exactly like **TX_ADD_DIRECT** when
*flags* equals 0. The *flags* argument is a bitmask of values as described in
**pmemobj_tx_xadd_range_direct**, above.
The **TX_SET**() macro saves the current value of the given *FIELD* of the
object referenced by handle *o* in the undo log, and then sets its new *VALUE*.
In case of a failure or abort, the saved value will be restored.
The **TX_SET_DIRECT**() macro saves in the undo log the current value of given
*FIELD* of the object referenced by (direct) pointer *p*, and then set its new
*VALUE*. In case of a failure or abort, the saved value will be restored.
The **TX_MEMCPY**() macro saves in the undo log the current content of *dest*
buffer and then overwrites the first *num* bytes of its memory area with
the data copied from the buffer pointed by *src*. In case of a failure or abort,
the saved value will be restored.
The **TX_MEMSET**() macro saves the current content of the *dest* buffer in the
undo log and then fills the first *num* bytes of its memory area with the
constant byte *c*. In case of a failure or abort, the saved value will be
restored.
# RETURN VALUE #
On success, **pmemobj_tx_add_range**() and **pmemobj_tx_add_range_direct**()
return 0. Otherwise, the stage is changed to **TX_STAGE_ONABORT**,
**errno** is set appropriately and transaction is aborted.
On success, **pmemobj_tx_xadd_range**() and **pmemobj_tx_xadd_range_direct**()
returns 0. Otherwise, the error number is returned, **errno** is set and
when flags do not contain **POBJ_XADD_NO_ABORT**, the transaction is aborted.
# SEE ALSO #
**pmemobj_tx_alloc**(3), **pmemobj_tx_begin**(3),
**libpmemobj**(7) and **<https://pmem.io>**
| 8,193 | 41.237113 | 88 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemobj/pmemobj_tx_begin.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMOBJ_TX_BEGIN, 3)
collection: libpmemobj
header: PMDK
date: pmemobj API version 2.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2020, Intel Corporation)
[comment]: <> (pmemobj_tx_begin.3 -- man page for transactional object manipulation)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[CAVEATS](#caveats)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmemobj_tx_stage**(),
**pmemobj_tx_begin**(), **pmemobj_tx_lock**(),
**pmemobj_tx_xlock**(), **pmemobj_tx_abort**(),
**pmemobj_tx_commit**(), **pmemobj_tx_end**(),
**pmemobj_tx_errno**(), **pmemobj_tx_process**(),
**TX_BEGIN_PARAM**(), **TX_BEGIN_CB**(),
**TX_BEGIN**(), **TX_ONABORT**,
**TX_ONCOMMIT**, **TX_FINALLY**, **TX_END**,
**pmemobj_tx_log_append_buffer**(), **pmemobj_tx_xlog_append_buffer**(),
**pmemobj_tx_log_auto_alloc**(), **pmemobj_tx_log_snapshots_max_size**(),
**pmemobj_tx_log_intents_max_size**(),
**pmemobj_tx_set_user_data**(),
**pmemobj_tx_get_user_data**(),
**pmemobj_tx_set_failure_behavior**(),
**pmemobj_tx_get_failure_behavior**()
- transactional object manipulation
# SYNOPSIS #
```c
#include <libpmemobj.h>
enum pobj_tx_stage pmemobj_tx_stage(void);
int pmemobj_tx_begin(PMEMobjpool *pop, jmp_buf *env, enum pobj_tx_param, ...);
int pmemobj_tx_lock(enum tx_lock lock_type, void *lockp);
int pmemobj_tx_xlock(enum tx_lock lock_type, void *lockp, uint64_t flags);
void pmemobj_tx_abort(int errnum);
void pmemobj_tx_commit(void);
int pmemobj_tx_end(void);
int pmemobj_tx_errno(void);
void pmemobj_tx_process(void);
TX_BEGIN_PARAM(PMEMobjpool *pop, ...)
TX_BEGIN_CB(PMEMobjpool *pop, cb, arg, ...)
TX_BEGIN(PMEMobjpool *pop)
TX_ONABORT
TX_ONCOMMIT
TX_FINALLY
TX_END
int pmemobj_tx_log_append_buffer(enum pobj_log_type type, void *addr, size_t size);
int pmemobj_tx_xlog_append_buffer(enum pobj_log_type type, void *addr, size_t size, uint64_t flags);
int pmemobj_tx_log_auto_alloc(enum pobj_log_type type, int on_off);
size_t pmemobj_tx_log_snapshots_max_size(size_t *sizes, size_t nsizes);
size_t pmemobj_tx_log_intents_max_size(size_t nintents);
void pmemobj_tx_set_user_data(void *data);
void *pmemobj_tx_get_user_data(void);
void pmemobj_tx_set_failure_behavior(enum pobj_tx_failure_behavior behavior);
enum pobj_tx_failure_behavior pmemobj_tx_get_failure_behavior(void);
```
# DESCRIPTION #
The non-transactional functions and macros described in **pmemobj_alloc**(3),
**pmemobj_list_insert**(3) and **POBJ_LIST_HEAD**(3) only guarantee the
atomicity of a single operation on an object. In case of more complex changes
involving multiple operations on an object, or allocation and modification
of multiple objects, data consistency and fail-safety may be provided only
by using *atomic transactions*.
A transaction is defined as series of operations on persistent memory
objects that either all occur, or nothing occurs. In particular,
if the execution of a transaction is interrupted by a power failure
or a system crash, it is guaranteed that after system restart,
all the changes made as a part of the uncompleted transaction
will be rolled back, restoring the consistent state of the memory
pool from the moment when the transaction was started.
Note that transactions do not provide atomicity with respect
to other threads. All the modifications performed within the transactions
are immediately visible to other threads. Therefore it is the responsibility
of the application to implement a proper thread synchronization mechanism.
Each thread may have only one transaction open at a time, but that
transaction may be nested. Nested transactions are flattened. Committing
the nested transaction does not commit the outer transaction; however, errors
in the nested transaction are propagated up to the outermost level, resulting
in the interruption of the entire transaction.
Each transaction is visible only for the thread that started it.
No other threads can add operations, commit or abort the transaction
initiated by another thread. Multiple threads may have transactions open on a
given memory pool at the same time.
Please see the **CAVEATS** section below for known limitations of the
transactional API.
The **pmemobj_tx_stage**() function returns the current *transaction stage*
for a thread. Stages are changed only by the **pmemobj_tx_\***() functions.
Transaction stages are defined as follows:
+ **TX_STAGE_NONE** - no open transaction in this thread
+ **TX_STAGE_WORK** - transaction in progress
+ **TX_STAGE_ONCOMMIT** - successfully committed
+ **TX_STAGE_ONABORT** - starting the transaction failed or transaction aborted
+ **TX_STAGE_FINALLY** - ready for clean up
The **pmemobj_tx_begin**() function starts a new transaction in the current
thread. If called within an open transaction, it starts a nested transaction.
The caller may use the *env* argument to provide a pointer to a
calling environment to be restored in case of transaction abort. This
information must be provided by the caller using the **setjmp**(3) macro.
A new transaction may be started only if the current stage is **TX_STAGE_NONE**
or **TX_STAGE_WORK**. If successful, the *transaction stage* changes to
**TX_STAGE_WORK**. Otherwise, the stage is changed to **TX_STAGE_ONABORT**.
Optionally, a list of parameters for the transaction may be provided.
Each parameter consists of a type followed by a type-specific number
of values. Currently there are 4 types:
+ **TX_PARAM_NONE**, used as a termination marker. No following value.
+ **TX_PARAM_MUTEX**, followed by one value, a pmem-resident PMEMmutex
+ **TX_PARAM_RWLOCK**, followed by one value, a pmem-resident PMEMrwlock
+ **TX_PARAM_CB**, followed by two values: a callback function
of type *pmemobj_tx_callback*, and a void pointer
Using **TX_PARAM_MUTEX** or **TX_PARAM_RWLOCK** causes the specified lock to
be acquired at the beginning of the transaction. **TX_PARAM_RWLOCK** acquires
the lock for writing. It is guaranteed that **pmemobj_tx_begin**() will acquire
all locks prior to successful completion, and they will be held by the current
thread until the outermost transaction is finished. Locks are taken in order
from left to right. To avoid deadlocks, the user is responsible for proper
lock ordering.
**TX_PARAM_CB** registers the specified callback function to be executed at
each transaction stage. For **TX_STAGE_WORK**, the callback is executed prior
to commit. For all other stages, the callback is executed as the first
operation after a stage change. It will also be called after each transaction;
in this case the *stage* parameter will be set to **TX_STAGE_NONE**.
*pmemobj_tx_callback* must be compatible with:
```
void func(PMEMobjpool *pop, enum pobj_tx_stage stage, void *arg)
```
*pop* is a pool identifier used in **pmemobj_tx_begin**(), *stage* is a current
transaction stage and *arg* is the second parameter of **TX_PARAM_CB**.
Without considering transaction nesting, this mechanism can be considered an
alternative method for executing code between stages (instead of
**TX_ONCOMMIT**, **TX_ONABORT**, etc). However, there are 2 significant
differences when nested transactions are used:
+ The registered function is executed only in the outermost transaction,
even if registered in an inner transaction.
+ There can be only one callback in the entire transaction, that is, the
callback cannot be changed in an inner transaction.
Note that **TX_PARAM_CB** does not replace the **TX_ONCOMMIT**, **TX_ONABORT**,
etc. macros. They can be used together: the callback will be executed *before*
a **TX_ONCOMMIT**, **TX_ONABORT**, etc. section.
**TX_PARAM_CB** can be used when the code dealing with transaction stage
changes is shared between multiple users or when it must be executed only
in the outer transaction. For example it can be very useful when the
application must synchronize persistent and transient state.
The **pmemobj_tx_lock**() function acquires the lock *lockp* of type
*lock_type* and adds it to the current transaction. *lock_type* may be
**TX_LOCK_MUTEX** or **TX_LOCK_RWLOCK**; *lockp* must be of type
*PMEMmutex* or *PMEMrwlock*, respectively. If *lock_type* is **TX_LOCK_RWLOCK**
the lock is acquired for writing. If the lock is not successfully
acquired, the function returns an error number. This function must be
called during **TX_STAGE_WORK**.
The **pmemobj_tx_xlock**() function behaves exactly the same as
**pmemobj_tx_lock**() when *flags* equals **POBJ_XLOCK_NO_ABORT**.
When *flags* equals 0 and if the lock is not successfully
acquired,the transaction is aborted.
*flags* is a bitmask of the following values:
+ **POBJ_XLOCK_NO_ABORT** - if the function does not end successfully,
do not abort the transaction.
**pmemobj_tx_abort**() aborts the current transaction and causes a transition
to **TX_STAGE_ONABORT**. If *errnum* is equal to 0, the transaction
error code is set to **ECANCELED**; otherwise, it is set to *errnum*.
This function must be called during **TX_STAGE_WORK**.
The **pmemobj_tx_commit**() function commits the current open transaction and
causes a transition to **TX_STAGE_ONCOMMIT**. If called in the context of the
outermost transaction, all the changes may be considered as durably written
upon successful completion. This function must be called during
**TX_STAGE_WORK**.
The **pmemobj_tx_end**() function performs a cleanup of the current
transaction. If called in the context of the outermost transaction, it releases
all the locks acquired by **pmemobj_tx_begin**() for outer and nested
transactions. If called in the context of a nested transaction, it returns
to the context of the outer transaction in **TX_STAGE_WORK**, without releasing
any locks. The **pmemobj_tx_end**() function can be called during
**TX_STAGE_NONE** if transitioned to this stage using **pmemobj_tx_process**().
If not already in **TX_STAGE_NONE**, it causes the transition to
**TX_STAGE_NONE**. **pmemobj_tx_end** must always be called for each
**pmemobj_tx_begin**(), even if starting the transaction failed. This function
must *not* be called during **TX_STAGE_WORK**.
The **pmemobj_tx_errno**() function returns the error code of the last transaction.
The **pmemobj_tx_process**() function performs the actions associated with the
current stage of the transaction, and makes the transition to the next stage.
It must be called in a transaction. The current stage must always be obtained
by a call to **pmemobj_tx_stage**(). **pmemobj_tx_process**() performs
the following transitions in the transaction stage flow:
+ **TX_STAGE_WORK** -> **TX_STAGE_ONCOMMIT**
+ **TX_STAGE_ONABORT** -> **TX_STAGE_FINALLY**
+ **TX_STAGE_ONCOMMIT** -> **TX_STAGE_FINALLY**
+ **TX_STAGE_FINALLY** -> **TX_STAGE_NONE**
+ **TX_STAGE_NONE** -> **TX_STAGE_NONE**
**pmemobj_tx_process**() must not be called after calling **pmemobj_tx_end**()
for the outermost transaction.
In addition to the above API, **libpmemobj**(7) offers a more intuitive method
of building transactions using the set of macros described below. When using
these macros, the complete transaction flow looks like this:
```c
TX_BEGIN(Pop) {
/* the actual transaction code goes here... */
} TX_ONCOMMIT {
/*
* optional - executed only if the above block
* successfully completes
*/
} TX_ONABORT {
/*
* optional - executed only if starting the transaction fails,
* or if transaction is aborted by an error or a call to
* pmemobj_tx_abort()
*/
} TX_FINALLY {
/*
* optional - if exists, it is executed after
* TX_ONCOMMIT or TX_ONABORT block
*/
} TX_END /* mandatory */
```
```c
TX_BEGIN_PARAM(PMEMobjpool *pop, ...)
TX_BEGIN_CB(PMEMobjpool *pop, cb, arg, ...)
TX_BEGIN(PMEMobjpool *pop)
```
The **TX_BEGIN_PARAM**(), **TX_BEGIN_CB**() and **TX_BEGIN**() macros start
a new transaction in the same way as **pmemobj_tx_begin**(), except that instead
of the environment buffer provided by a caller, they set up the local *jmp_buf*
buffer and use it to catch the transaction abort. The **TX_BEGIN**() macro
starts a transaction without any options. **TX_BEGIN_PARAM** may be used when
there is a need to acquire locks prior to starting a transaction (such as
for a multi-threaded program) or set up a transaction stage callback.
**TX_BEGIN_CB** is just a wrapper around **TX_BEGIN_PARAM** that validates
the callback signature. (For compatibility there is also a **TX_BEGIN_LOCK**
macro, which is an alias for **TX_BEGIN_PARAM**). Each of these macros must be
followed by a block of code with all the operations that are to be performed
atomically.
The **TX_ONABORT** macro starts a block of code that will be executed only
if starting the transaction fails due to an error in **pmemobj_tx_begin**(),
or if the transaction is aborted. This block is optional, but in practice
it should not be omitted. If it is desirable to crash the application when a
transaction aborts and there is no **TX_ONABORT** section, the application can
define the **POBJ_TX_CRASH_ON_NO_ONABORT** macro before inclusion of
**\<libpmemobj.h\>**. This provides a default **TX_ONABORT** section which
just calls **abort**(3).
The **TX_ONCOMMIT** macro starts a block of code that will be executed only
if the transaction is successfully committed, which means that the execution
of code in the **TX_BEGIN**() block has not been interrupted by an error or by
a call to **pmemobj_tx_abort**(). This block is optional.
The **TX_FINALLY** macro starts a block of code that will be executed regardless
of whether the transaction is committed or aborted. This block is optional.
The **TX_END** macro cleans up and closes the transaction started by the
**TX_BEGIN**() / **TX_BEGIN_PARAM**() / **TX_BEGIN_CB**() macros.
It is mandatory to terminate each transaction with this macro. If the transaction
was aborted, *errno* is set appropriately.
## TRANSACTION LOG TUNING ##
From libpmemobj implementation perspective there are two types of operations
in a transaction:
+ **snapshots**, where action must be persisted immediately,
+ **intents**, where action can be persisted at the transaction commit phase
**pmemobj_tx_add_range**(3) and all its variants belong to the **snapshots**
group.
**pmemobj_tx_alloc**(3) (with its variants), **pmemobj_tx_free**(3),
**pmemobj_tx_realloc**(3) (with its variants) and **pmemobj_tx_publish**(3)
belong to the **intents** group. Even though **pmemobj_tx_alloc**() allocates
memory immediately, it modifies only the runtime state and postpones persistent
memory modifications to the commit phase. **pmemobj_tx_free**(3) cannot free
the object immediately, because of possible transaction rollback, so it
postpones both the action and persistent memory modifications to the commit
phase. **pmemobj_tx_realloc**(3) is just a combination of those two.
**pmemobj_tx_publish**(3) postpones reservations and deferred frees to
the commit phase.
Those two types of operations (snapshots and intents) require that libpmemobj
builds a persistent log of operations. Intent log (also known as a "redo log")
is applied on commit and snapshot log (also known as an "undo log")
is applied on abort.
When libpmemobj transaction starts, it's not possible to predict how much
persistent memory space will be needed for those logs. This means that libpmemobj
must internally allocate this space whenever it's needed. This has two downsides:
+ when transaction snapshots a lot of memory or does a lot of allocations,
libpmemobj may need to do many internal allocations, which must be freed when
transaction ends, adding time overhead when big transactions are frequent,
+ transactions can start to fail due to not enough space for logs - this can
be especially problematic for transactions that want to **deallocate**
objects, as those might also fail
To solve both of these problems libpmemobj exposes the following functions:
+ **pmemobj_tx_log_append_buffer**(),
+ **pmemobj_tx_xlog_append_buffer**(),
+ **pmemobj_tx_log_auto_alloc**()
**pmemobj_tx_log_append_buffer**() appends a given range of memory
[*addr*, *addr* + *size*) to the log *type* of the current transaction.
*type* can be one of the two values (with meanings described above):
+ **TX_LOG_TYPE_SNAPSHOT**,
+ **TX_LOG_TYPE_INTENT**
The range of memory **must** belong to the same pool the transaction is on and
**must not** be used by more than one thread at the same time. The latter
condition can be verified with tx.debug.verify_user_buffers ctl (see
**pmemobj_ctl_get**(3)).
The **pmemobj_tx_xlog_append_buffer**() function behaves exactly the same as
**pmemobj_tx_log_append_buffer**() when *flags* equals zero.
*flags* is a bitmask of the following values:
+ **POBJ_XLOG_APPEND_BUFFER_NO_ABORT** - if the function does not end successfully,
do not abort the transaction.
**pmemobj_tx_log_snapshots_max_size** calculates the **maximum** size of
a buffer which will be able to hold *nsizes* snapshots, each of size *sizes[i]*.
Application should not expect this function to return the same value between
restarts. In future versions of libpmemobj this function can return smaller
(because of better accuracy or space optimizations) or higher (because
of higher alignment required for better performance) value. This function
is independent of transaction stage and can be called both inside and outside
of transaction. If the returned value S is greater than
**PMEMOBJ_MAX_ALLOC_SIZE**, the buffer should be split into N chunks of size
**PMEMOBJ_MAX_ALLOC_SIZE**, where N is equal to (S / **PMEMOBJ_MAX_ALLOC_SIZE**)
(rounded down) and the last chunk of size (S - (N * **PMEMOBJ_MAX_ALLOC_SIZE**)).
**pmemobj_tx_log_intents_max_size** calculates the **maximum** size of
a buffer which will be able to hold *nintents* intents.
Just like with **pmemobj_tx_log_snapshots_max_size**, application should not
expect this function to return the same value between restarts, for the same
reasons. This function is independent of transaction stage and can be called
both inside and outside of transaction.
**pmemobj_tx_log_auto_alloc**() disables (*on_off* set to 0) or enables
(*on_off* set to 1) automatic allocation of internal logs of given *type*.
It can be used to verify that the buffer set with
**pmemobj_tx_log_append_buffer**() is big enough to hold the log, without
reaching out-of-space scenario.
The **pmemobj_tx_set_user_data**() function associates custom volatile state,
represented by pointer *data*, with the current transaction. This state can
later be retrieved using **pmemobj_tx_get_user_data**() function.
If **pmemobj_tx_set_user_data**() was not called for a current transaction,
**pmemobj_tx_get_user_data**() will return NULL. These functions must be called
during **TX_STAGE_WORK** or **TX_STAGE_ONABORT** or **TX_STAGE_ONCOMMIT** or
**TX_STAGE_FINALLY**.
**pmemobj_tx_set_failure_behavior**() specifies what should happen in case of an error
within the transaction. It only affects functions which take a NO_ABORT flag.
If **pmemobj_tx_set_failure_behavior**() is called with POBJ_TX_FAILURE_RETURN a NO_ABORT
flag is implicitly passed to all functions which accept this flag. If called
with POBJ_TX_FAILURE_ABORT then all functions abort the transaction (unless NO_ABORT
flag is passed explicitly). This setting is inherited by inner transactions. It does
not affect any of the outer transactions. Aborting on failure is the default behavior.
**pmemobj_tx_get_failure_behavior**() returns failure behavior for the current transaction.
Both **pmemobj_tx_set_failure_behavior**() and **pmemobj_tx_get_failure_behavior**()
must be called during **TX_STAGE_WORK**.
# RETURN VALUE #
The **pmemobj_tx_stage**() function returns the stage of the current transaction
stage for a thread.
On success, **pmemobj_tx_begin**() returns 0. Otherwise, an error number is
returned.
The **pmemobj_tx_begin**() and **pmemobj_tx_lock**() functions return zero
if *lockp* is successfully added to the transaction. Otherwise, an error number
is returned.
The **pmemobj_tx_xlock**() function return zero if *lockp* is successfully
added to the transaction. Otherwise, the error number is returned, **errno** is set
and when flags do not contain **POBJ_XLOCK_NO_ABORT**, the transaction is aborted.
The **pmemobj_tx_abort**() and **pmemobj_tx_commit**() functions return no value.
The **pmemobj_tx_end**() function returns 0 if the transaction was successful.
Otherwise it returns the error code set by **pmemobj_tx_abort**().
Note that **pmemobj_tx_abort**() can be called internally by the library.
The **pmemobj_tx_errno**() function returns the error code of the last transaction.
The **pmemobj_tx_process**() function returns no value.
On success, **pmemobj_tx_log_append_buffer**() returns 0. Otherwise,
the stage is changed to **TX_STAGE_ONABORT**, **errno** is set appropriately
and transaction is aborted.
On success, **pmemobj_tx_xlog_append_buffer**() returns 0. Otherwise,
the error number is returned, **errno** is set and when flags do not contain
**POBJ_XLOG_NO_ABORT**, the transaction is aborted.
On success, **pmemobj_tx_log_auto_alloc**() returns 0. Otherwise,
the transaction is aborted and an error number is returned.
On success, **pmemobj_tx_log_snapshots_max_size**() returns size of the buffer.
On failure it returns *SIZE_MAX* and sets *errno* appropriately.
On success, **pmemobj_tx_log_intents_max_size**() returns size of the buffer.
On failure it returns *SIZE_MAX* and sets *errno* appropriately.
# CAVEATS #
Transaction flow control is governed by the **setjmp**(3) and **longjmp**(3)
macros, and they are used in both the macro and function flavors of the API.
The transaction will longjmp on transaction abort. This has one major drawback,
which is described in the ISO C standard subsection 7.13.2.1. It says that
**the values of objects of automatic storage duration that are local to the
function containing the setjmp invocation that do not have volatile-qualified
type and have been changed between the setjmp invocation and longjmp call are
indeterminate.**
The following example illustrates the issue described above.
```c
int *bad_example_1 = (int *)0xBAADF00D;
int *bad_example_2 = (int *)0xBAADF00D;
int *bad_example_3 = (int *)0xBAADF00D;
int * volatile good_example = (int *)0xBAADF00D;
TX_BEGIN(pop) {
bad_example_1 = malloc(sizeof(int));
bad_example_2 = malloc(sizeof(int));
bad_example_3 = malloc(sizeof(int));
good_example = malloc(sizeof(int));
/* manual or library abort called here */
pmemobj_tx_abort(EINVAL);
} TX_ONCOMMIT {
/*
* This section is longjmp-safe
*/
} TX_ONABORT {
/*
* This section is not longjmp-safe
*/
free(good_example); /* OK */
free(bad_example_1); /* undefined behavior */
} TX_FINALLY {
/*
* This section is not longjmp-safe on transaction abort only
*/
free(bad_example_2); /* undefined behavior */
} TX_END
free(bad_example_3); /* undefined behavior */
```
Objects which are not volatile-qualified, are of automatic storage duration
and have been changed between the invocations of **setjmp**(3) and
**longjmp**(3) (that also means within the work section of the transaction
after **TX_BEGIN**()) should not be used after a transaction abort, or should
be used with utmost care. This also includes code after the **TX_END** macro.
**libpmemobj**(7) is not cancellation-safe. The pool will never be corrupted
because of a canceled thread, but other threads may stall waiting on locks
taken by that thread. If the application wants to use **pthread_cancel**(3),
it must disable cancellation before calling any **libpmemobj**(7) APIs (see
**pthread_setcancelstate**(3) with **PTHREAD_CANCEL_DISABLE**), and re-enable
it afterwards. Deferring cancellation (**pthread_setcanceltype**(3) with
**PTHREAD_CANCEL_DEFERRED**) is not safe enough, because **libpmemobj**(7)
internally may call functions that are specified as cancellation points in POSIX.
**libpmemobj**(7) relies on the library destructor being called from the main
thread. For this reason, all functions that might trigger destruction (e.g.
**dlclose**(3)) should be called in the main thread. Otherwise some of the
resources associated with that thread might not be cleaned up properly.
# SEE ALSO #
**dlclose**(3), **longjmp**(3), **pmemobj_tx_add_range**(3),
**pmemobj_tx_alloc**(3), **pthread_setcancelstate**(3),
**pthread_setcanceltype**(3), **setjmp**(3),
**libpmemobj**(7) and **<https://pmem.io>**
| 24,485 | 43.682482 | 100 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemobj/pmemobj_memcpy_persist.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMOBJ_MEMCPY_PERSIST, 3)
collection: libpmemobj
header: PMDK
date: pmemobj API version 2.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pmemobj_memcpy_persist.3 -- man page for Low-level memory manipulation)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[EXAMPLES](#examples)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmemobj_persist**(), **pmemobj_xpersist**(), **pmemobj_flush**(),
**pmemobj_xflush**(), **pmemobj_drain**(), **pmemobj_memcpy**(),
**pmemobj_memmove**(), **pmemobj_memset**(), **pmemobj_memcpy_persist**(),
**pmemobj_memset_persist**() - low-level memory manipulation functions
# SYNOPSIS #
```c
#include <libpmemobj.h>
void pmemobj_persist(PMEMobjpool *pop, const void *addr,
size_t len);
void pmemobj_flush(PMEMobjpool *pop, const void *addr,
size_t len);
void pmemobj_drain(PMEMobjpool *pop);
int pmemobj_xpersist(PMEMobjpool *pop, const void *addr,
size_t len, unsigned flags);
int pmemobj_xflush(PMEMobjpool *pop, const void *addr,
size_t len, unsigned flags);
void *pmemobj_memcpy(PMEMobjpool *pop, void *dest,
const void *src, size_t len, unsigned flags);
void *pmemobj_memmove(PMEMobjpool *pop, void *dest,
const void *src, size_t len, unsigned flags);
void *pmemobj_memset(PMEMobjpool *pop, void *dest,
int c, size_t len, unsigned flags);
void *pmemobj_memcpy_persist(PMEMobjpool *pop, void *dest,
const void *src, size_t len);
void *pmemobj_memset_persist(PMEMobjpool *pop, void *dest,
int c, size_t len);
```
# DESCRIPTION #
The **libpmemobj**-specific low-level memory manipulation functions described
here leverage the knowledge of the additional configuration options available
for **libpmemobj**(7) pools, such as replication. They also take advantage of
the type of storage behind the pool and use appropriate flush/drain functions.
It is advised to use these functions in conjunction with **libpmemobj**(7)
objects rather than using low-level memory manipulation functions from
**libpmem**.
**pmemobj_persist**() forces any changes in the range \[*addr*, *addr*+*len*)
to be stored durably in persistent memory. Internally this may call either
**pmem_msync**(3) or **pmem_persist**(3). There are no alignment restrictions
on the range described by *addr* and *len*, but **pmemobj_persist**() may
expand the range as necessary to meet platform alignment requirements.
>WARNING:
Like **msync**(2), there is nothing atomic or transactional about this call.
Any unwritten stores in the given range will be written, but some stores may
have already been written by virtue of normal cache eviction/replacement
policies. Correctly written code must not depend on stores waiting until
**pmemobj_persist**() is called to become persistent - they can become
persistent at any time before **pmemobj_persist**() is called.
The **pmemobj_flush**() and **pmemobj_drain**() functions provide partial
versions of the **pmemobj_persist**() function described above.
These functions allow advanced programs to create their own variations of
**pmemobj_persist**().
For example, a program that needs to flush several discontiguous ranges can
call **pmemobj_flush**() for each range and then follow up by calling
**pmemobj_drain**() once. For more information on partial flushing operations,
see **pmem_flush**(3).
**pmemobj_xpersist**() is a version of **pmemobj_persist**() function with
additional *flags* argument.
It supports only the **PMEMOBJ_F_RELAXED** flag.
This flag indicates that memory transfer operation does
not require 8-byte atomicity guarantees.
**pmemobj_xflush**() is a version of **pmemobj_flush**() function with
additional *flags* argument.
It supports only the **PMEMOBJ_F_RELAXED** flag.
The **pmemobj_memmove**(), **pmemobj_memcpy**() and **pmemobj_memset**() functions
provide the same memory copying as their namesakes **memmove**(3), **memcpy**(3),
and **memset**(3), and ensure that the result has been flushed to persistence
before returning (unless **PMEMOBJ_MEM_NOFLUSH** flag was used).
Valid flags for those functions:
+ **PMEMOBJ_F_RELAXED** - This flag indicates that memory transfer operation
does not require 8-byte atomicity guarantees.
+ **PMEMOBJ_F_MEM_NOFLUSH** - Don't flush anything.
This implies **PMEMOBJ_F_MEM_NODRAIN**.
Using this flag only makes sense when it's followed by any function that
flushes data.
The remaining flags say *how* the operation should be done, and are merely hints.
+ **PMEMOBJ_F_MEM_NONTEMPORAL** - Use non-temporal instructions.
This flag is mutually exclusive with **PMEMOBJ_F_MEM_TEMPORAL**.
On x86\_64 this flag is mutually exclusive with **PMEMOBJ_F_MEM_NOFLUSH**.
+ **PMEMOBJ_F_MEM_TEMPORAL** - Use temporal instructions.
This flag is mutually exclusive with **PMEMOBJ_F_MEM_NONTEMPORAL**.
+ **PMEMOBJ_F_MEM_WC** - Use write combining mode.
This flag is mutually exclusive with **PMEMOBJ_F_MEM_WB**.
On x86\_64 this is an alias for **PMEMOBJ_F_MEM_NONTEMPORAL**.
On x86\_64 this flag is mutually exclusive with **PMEMOBJ_F_MEM_NOFLUSH**.
+ **PMEMOBJ_F_MEM_WB** - Use write back mode.
This flag is mutually exclusive with **PMEMOBJ_F_MEM_WC**.
On x86\_64 this is an alias for **PMEMOBJ_F_MEM_TEMPORAL**.
**pmemobj_memcpy_persist**() is an alias for **pmemobj_memcpy**() with flags equal to 0.
**pmemobj_memset_persist**() is an alias for **pmemobj_memset**() with flags equal to 0.
# RETURN VALUE #
**pmemobj_memmove**(), **pmemobj_memcpy**(), **pmemobj_memset**(),
**pmemobj_memcpy_persist**() and **pmemobj_memset_persist**() return destination
buffer.
**pmemobj_persist**(), **pmemobj_flush**() and **pmemobj_drain**()
do not return any value.
**pmemobj_xpersist**() and **pmemobj_xflush**() returns non-zero value and
sets errno to EINVAL only if not supported flags has been provided.
# EXAMPLES #
The following code is functionally equivalent to
**pmemobj_memcpy_persist**():
```c
void *
pmemobj_memcpy_persist(PMEMobjpool *pop, void *dest,
const void *src, size_t len)
{
void *retval = memcpy(dest, src, len);
pmemobj_persist(pop, dest, len);
return retval;
}
```
**pmemobj_persist**() can be thought of as this:
```c
void
pmemobj_persist(PMEMobjpool *pop, const void *addr, size_t len)
{
/* flush the processor caches */
pmemobj_flush(pop, addr, len);
/* wait for any pmem stores to drain from HW buffers */
pmemobj_drain(pop);
}
```
# SEE ALSO #
**memcpy**(3), **memset**(3), **pmem_msync**(3),
**pmem_persist**(3), **libpmem**(7) **libpmemobj**(7)
and **<https://pmem.io>**
| 6,675 | 34.892473 | 88 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemobj/pobj_list_head.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(POBJ_LIST_HEAD, 3)
collection: libpmemobj
header: PMDK
date: pmemobj API version 2.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pobj_list_head.3 -- man page for type-safe non-transactional persistent atomic lists)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**POBJ_LIST_HEAD**(),
**POBJ_LIST_ENTRY**(),
**POBJ_LIST_FIRST**(),
**POBJ_LIST_LAST**(),
**POBJ_LIST_EMPTY**(),
**POBJ_LIST_NEXT**(),
**POBJ_LIST_PREV**(),
**POBJ_LIST_FOREACH**(),
**POBJ_LIST_FOREACH_REVERSE**(),
**POBJ_LIST_INSERT_HEAD**(),
**POBJ_LIST_INSERT_TAIL**(),
**POBJ_LIST_INSERT_AFTER**(),
**POBJ_LIST_INSERT_BEFORE**(),
**POBJ_LIST_INSERT_NEW_HEAD**(),
**POBJ_LIST_INSERT_NEW_TAIL**(),
**POBJ_LIST_INSERT_NEW_AFTER**(),
**POBJ_LIST_INSERT_NEW_BEFORE**(),
**POBJ_LIST_REMOVE**(),
**POBJ_LIST_REMOVE_FREE**(),
**POBJ_LIST_MOVE_ELEMENT_HEAD**(),
**POBJ_LIST_MOVE_ELEMENT_TAIL**(),
**POBJ_LIST_MOVE_ELEMENT_AFTER**(),
**POBJ_LIST_MOVE_ELEMENT_BEFORE**()
- type-safe non-transactional persistent atomic lists
# SYNOPSIS #
```c
#include <libpmemobj.h>
POBJ_LIST_HEAD(HEADNAME, TYPE)
POBJ_LIST_ENTRY(TYPE)
POBJ_LIST_FIRST(POBJ_LIST_HEAD *head)
POBJ_LIST_LAST(POBJ_LIST_HEAD *head, POBJ_LIST_ENTRY FIELD)
POBJ_LIST_EMPTY(POBJ_LIST_HEAD *head)
POBJ_LIST_NEXT(TOID elm, POBJ_LIST_ENTRY FIELD)
POBJ_LIST_PREV(TOID elm, POBJ_LIST_ENTRY FIELD)
POBJ_LIST_FOREACH(TOID var, POBJ_LIST_HEAD *head, POBJ_LIST_ENTRY FIELD)
POBJ_LIST_FOREACH_REVERSE(TOID var, POBJ_LIST_HEAD *head, POBJ_LIST_ENTRY FIELD)
POBJ_LIST_INSERT_HEAD(PMEMobjpool *pop, POBJ_LIST_HEAD *head,
TOID elm, POBJ_LIST_ENTRY FIELD)
POBJ_LIST_INSERT_TAIL(PMEMobjpool *pop, POBJ_LIST_HEAD *head,
TOID elm, POBJ_LIST_ENTRY FIELD)
POBJ_LIST_INSERT_AFTER(PMEMobjpool *pop, POBJ_LIST_HEAD *head,
TOID listelm, TOID elm, POBJ_LIST_ENTRY FIELD)
POBJ_LIST_INSERT_BEFORE(PMEMobjpool *pop, POBJ_LIST_HEAD *head,
TOID listelm, TOID elm, POBJ_LIST_ENTRY FIELD)
POBJ_LIST_INSERT_NEW_HEAD(PMEMobjpool *pop, POBJ_LIST_HEAD *head,
POBJ_LIST_ENTRY FIELD, size_t size,
pmemobj_constr constructor, void *arg)
POBJ_LIST_INSERT_NEW_TAIL(PMEMobjpool *pop, POBJ_LIST_HEAD *head,
POBJ_LIST_ENTRY FIELD, size_t size,
pmemobj_constr constructor, void *arg)
POBJ_LIST_INSERT_NEW_AFTER(PMEMobjpool *pop, POBJ_LIST_HEAD *head,
TOID listelm, POBJ_LIST_ENTRY FIELD, size_t size,
pmemobj_constr constructor, void *arg)
POBJ_LIST_INSERT_NEW_BEFORE(PMEMobjpool *pop, POBJ_LIST_HEAD *head,
TOID listelm, POBJ_LIST_ENTRY FIELD, size_t size,
pmemobj_constr constructor, void *arg)
POBJ_LIST_REMOVE(PMEMobjpool *pop, POBJ_LIST_HEAD *head,
TOID elm, POBJ_LIST_ENTRY FIELD)
POBJ_LIST_REMOVE_FREE(PMEMobjpool *pop, POBJ_LIST_HEAD *head,
TOID elm, POBJ_LIST_ENTRY FIELD)
POBJ_LIST_MOVE_ELEMENT_HEAD(PMEMobjpool *pop, POBJ_LIST_HEAD *head,
POBJ_LIST_HEAD *head_new, TOID elm, POBJ_LIST_ENTRY FIELD,
POBJ_LIST_ENTRY field_new)
POBJ_LIST_MOVE_ELEMENT_TAIL(PMEMobjpool *pop, POBJ_LIST_HEAD *head,
POBJ_LIST_HEAD *head_new, TOID elm, POBJ_LIST_ENTRY FIELD,
POBJ_LIST_ENTRY field_new)
POBJ_LIST_MOVE_ELEMENT_AFTER(PMEMobjpool *pop, POBJ_LIST_HEAD *head,
POBJ_LIST_HEAD *head_new, TOID listelm, TOID elm,
POBJ_LIST_ENTRY FIELD, POBJ_LIST_ENTRY field_new)
POBJ_LIST_MOVE_ELEMENT_BEFORE(PMEMobjpool *pop, POBJ_LIST_HEAD *head,
POBJ_LIST_HEAD *head_new, TOID listelm, TOID elm,
POBJ_LIST_ENTRY FIELD, POBJ_LIST_ENTRY field_new)
```
# DESCRIPTION #
The following macros define and operate on a type-safe persistent
atomic circular doubly linked list data structure that consist of
a set of persistent objects of a well-known type. Unlike the functions
described in the previous section, these macros provide type enforcement
by requiring declaration of type of the objects stored in given list,
and not allowing mixing objects of different types in a single list.
The functionality and semantics of those macros is similar to circular
queues defined in **queue**(3).
The majority of the macros must specify the handle to the memory pool *pop* and
the name of the *field* in the user-defined structure, which must be of type
*POBJ_LIST_ENTRY* and is used to connect the elements in the list.
A list is headed by a structure defined by the **POBJ_LIST_HEAD**() macro.
This structure contains an object handle of the first element on the list.
The elements are doubly linked so that an arbitrary element can be removed
without a need to traverse the list. New elements can be added to the list
before or after an existing element, at the head of the list, or at the end
of the list. A list may be traversed in either direction. A *POBJ_LIST_HEAD*
structure is declared as follows:
```c
#define POBJ_LIST_HEAD(HEADNAME, TYPE)
struct HEADNAME
{
TOID(TYPE) pe_first;
PMEMmutex lock;
};
```
In the macro definitions, *TYPE* is the name of a user-defined structure,
that must contain a field of type *POBJ_LIST_ENTRY*. The argument *HEADNAME*
is the name of a user-defined structure that must be declared using the macro
*POBJ_LIST_HEAD*. See the examples below for further explanation of how these
macros are used.
The macro *POBJ_LIST_ENTRY* declares a structure that connects the elements
in the list.
```c
#define POBJ_LIST_ENTRY(TYPE)
struct
{
TOID(TYPE) pe_next;
TOID(TYPE) pe_prev;
};
```
The macro **POBJ_LIST_FIRST**() returns the first element on the list
referenced by *head*. If the list is empty **OID_NULL** is returned.
The macro **POBJ_LIST_LAST**() returns the last element on the list
referenced by *head*. If the list is empty **OID_NULL** is returned.
The macro **POBJ_LIST_EMPTY**() evaluates to 1 if the list referenced
by *head* is empty. Otherwise, 0 is returned.
The macro **POBJ_LIST_NEXT**() returns the element next to the element *elm*.
The macro **POBJ_LIST_PREV**() returns the element preceding the element *elm*.
The macro **POBJ_LIST_FOREACH**() traverses the list referenced by *head*
assigning a handle to each element in turn to *var* variable.
The macro **POBJ_LIST_FOREACH_REVERSE**() traverses the list referenced
by *head* in reverse order, assigning a handle to each element in turn to
*var* variable. The *field* argument is the name of the field of type
*POBJ_LIST_ENTRY* in the element structure.
The macro **POBJ_LIST_INSERT_HEAD**() inserts the element *elm* at the head
of the list referenced by *head*.
The macro **POBJ_LIST_INSERT_TAIL**() inserts the element *elm* at the end
of the list referenced by *head*.
The macro **POBJ_LIST_INSERT_AFTER**() inserts the element *elm* into
the list referenced by *head* after the element *listelm*. If *listelm*
value is **TOID_NULL**, the object is inserted at the end of the list.
The macro **POBJ_LIST_INSERT_BEFORE**() inserts the element *elm* into
the list referenced by *head* before the element *listelm*. If *listelm*
value is **TOID_NULL**, the object is inserted at the head of the list.
The macro **POBJ_LIST_INSERT_NEW_HEAD**() atomically allocates a new
object of size *size* and inserts it at the head of the list referenced
by *head*. The newly allocated object is also added to the internal object
container associated with a type number which is retrieved from the typed
*OID* of the first element on list.
The macro **POBJ_LIST_INSERT_NEW_TAIL**() atomically allocates a new
object of size *size* and inserts it at the tail of the list referenced
by *head*. The newly allocated object is also added to the internal object
container associated with a type number which is retrieved from
the typed *OID* of the first element on list.
The macro **POBJ_LIST_INSERT_NEW_AFTER**() atomically allocates a new
object of size *size* and inserts it into the list referenced by *head*
after the element *listelm*. If *listelm* value is **TOID_NULL**,
the object is inserted at the end of the list. The newly allocated object
is also added to the internal object container associated with with a type
number which is retrieved from the typed *OID* of the first element on list.
The macro **POBJ_LIST_INSERT_NEW_BEFORE**() atomically allocates a new
object of size *size* and inserts it into the list referenced by *head*
before the element *listelm*. If *listelm* value is **TOID_NULL**,
the object is inserted at the head of the list. The newly allocated object
is also added to the internal object container associated with with a type
number which is retrieved from the typed *OID* of the first element on list.
The macro **POBJ_LIST_REMOVE**() removes the element *elm* from the list
referenced by *head*.
The macro **POBJ_LIST_REMOVE_FREE**() removes the element *elm* from the list
referenced by *head* and frees the memory space represented by this element.
The macro **POBJ_LIST_MOVE_ELEMENT_HEAD**() moves the element *elm* from the
list referenced by *head* to the head of the list *head_new*. The *field* and
*field_new* arguments are the names of the fields of type *POBJ_LIST_ENTRY* in
the element structure that are used to connect the elements in both lists.
The macro **POBJ_LIST_MOVE_ELEMENT_TAIL**() moves the element *elm* from the
list referenced by *head* to the end of the list *head_new*. The *field* and
*field_new* arguments are the names of the fields of type *POBJ_LIST_ENTRY* in
the element structure that are used to connect the elements in both lists.
The macro **POBJ_LIST_MOVE_ELEMENT_AFTER**() atomically removes the element *elm*
from the list referenced by *head* and inserts it into the list referenced by
*head_new* after the element *listelm*. If *listelm* value is *TOID_NULL*,
the object is inserted at the end of the list. The *field* and *field_new*
arguments are the names of the fields of type *POBJ_LIST_ENTRY* in the element
structure that are used to connect the elements in both lists.
The macro **POBJ_LIST_MOVE_ELEMENT_BEFORE**() atomically removes the element
*elm* from the list referenced by *head* and inserts it into the list referenced
by *head_new* before the element *listelm*. If *listelm* value is **TOID_NULL**,
the object is inserted at the head of the list. The *field* and *field_new*
arguments are the names of the fields of type *POBJ_LIST_ENTRY* in the element
structure that are used to connect the elements in both lists.
# SEE ALSO #
**queue**(3), **libpmemobj**(7) and **<https://pmem.io>**
| 10,408 | 40.305556 | 100 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemobj/pmemobj_action.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMOBJ_ACTION, 3)
collection: libpmemobj
header: PMDK
date: pmemobj API version 2.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2019, Intel Corporation)
[comment]: <> (pmemobj_action.3 -- Delayed atomicity actions)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[EXAMPLES](#examples)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmemobj_reserve**(), **pmemobj_xreserve**(), **pmemobj_defer_free**(),
**pmemobj_set_value**(), **pmemobj_publish**(), **pmemobj_tx_publish**(),
**pmemobj_tx_xpublish**(), **pmemobj_cancel**(), **POBJ_RESERVE_NEW**(),
**POBJ_RESERVE_ALLOC**(), **POBJ_XRESERVE_NEW**(),**POBJ_XRESERVE_ALLOC**()
- Delayed atomicity actions (EXPERIMENTAL)
# SYNOPSIS #
```c
#include <libpmemobj.h>
PMEMoid pmemobj_reserve(PMEMobjpool *pop, struct pobj_action *act,
size_t size, uint64_t type_num); (EXPERIMENTAL)
PMEMoid pmemobj_xreserve(PMEMobjpool *pop, struct pobj_action *act,
size_t size, uint64_t type_num, uint64_t flags); (EXPERIMENTAL)
void pmemobj_defer_free(PMEMobjpool *pop, PMEMoid oid, struct pobj_action *act);
void pmemobj_set_value(PMEMobjpool *pop, struct pobj_action *act,
uint64_t *ptr, uint64_t value); (EXPERIMENTAL)
int pmemobj_publish(PMEMobjpool *pop, struct pobj_action *actv,
size_t actvcnt); (EXPERIMENTAL)
int pmemobj_tx_publish(struct pobj_action *actv, size_t actvcnt); (EXPERIMENTAL)
int pmemobj_tx_xpublish(struct pobj_action *actv, size_t actvcnt, uint64_t flags); (EXPERIMENTAL)
void pmemobj_cancel(PMEMobjpool *pop, struct pobj_action *actv,
size_t actvcnt); (EXPERIMENTAL)
POBJ_RESERVE_NEW(pop, t, act) (EXPERIMENTAL)
POBJ_RESERVE_ALLOC(pop, t, size, act) (EXPERIMENTAL)
POBJ_XRESERVE_NEW(pop, t, act, flags) (EXPERIMENTAL)
POBJ_XRESERVE_ALLOC(pop, t, size, act, flags) (EXPERIMENTAL)
```
# DESCRIPTION #
All of the functions described so far have an immediate effect on the persistent
state of the pool, and as such, the cost of maintaining fail-safety is paid
outright and, most importantly, in the calling thread. This behavior makes
implementing algorithms involving relaxed consistency guarantees difficult, if
not outright impossible.
The following set of functions introduce a mechanism that allows one to delay
the persistent publication of a set of prepared actions to an arbitrary moment
in time of the execution of a program.
The publication is fail-safe atomic in the scope of the entire collection of
actions. If a program exits without publishing the actions, or the actions are
canceled, any resources reserved by those actions are released and placed back in
the pool.
A single action is represented by a single `struct pobj_action`. Functions that
create actions take that structure by pointer, whereas functions that publish
actions take array of actions and the size of the array. The actions can be
created, and published, from different threads.
When creating actions, the *act* argument must be non-NULL and point to a
`struct pobj_action`, the structure will be populated by the function and must
not be modified or deallocated until after publishing.
The **pmemobj_reserve**() functions performs a transient reservation of an object.
Behaves similarly to **pmemobj_alloc**(3), but performs no modification to the
persistent state.
The object returned by this function can be freely modified without worrying
about fail-safe atomicity until the object has been published. Any modifications
of the object must be manually persisted, just like in the case of the atomic API.
**pmemobj_xreserve**() is equivalent to **pmemobj_reserve**(), but with an
additional *flags* argument that is a bitmask of the following values:
+ **POBJ_XALLOC_ZERO** - zero the allocated object (and persist it)
+ **POBJ_CLASS_ID(class_id)** - allocate an object from the allocation class
*class_id*. The class id cannot be 0.
+ **POBJ_ARENA_ID(arena_id)** - allocate an object from the arena specified by
*arena_id*. The arena must exist, otherwise, the behavior is undefined.
If *arena_id* is equal 0, then arena assigned to the current thread will be used.
**pmemobj_defer_free**() function creates a deferred free action, meaning that
the provided object will be freed when the action is published. Calling this
function with a NULL OID is invalid and causes undefined behavior.
The **pmemobj_set_value** function prepares an action that, once published, will
modify the memory location pointed to by *ptr* to *value*.
The **pmemobj_publish** function publishes the provided set of actions. The
publication is fail-safe atomic. Once done, the persistent state will reflect
the changes contained in the actions.
The **pmemobj_tx_publish** function moves the provided actions to the scope of
the transaction in which it is called. Only object reservations are supported
in transactional publish. Once done, the reserved objects will follow normal
transactional semantics. Can only be called during *TX_STAGE_WORK*.
The **pmemobj_tx_xpublish**() function behaves exactly the same as
**pmemobj_tx_publish**() when *flags* equals zero. *flags* is a
bitmask of the following values:
+ **POBJ_XPUBLISH_NO_ABORT** - if the function does not end successfully,
do not abort the transaction.
The **pmemobj_cancel** function releases any resources held by the provided
set of actions and invalidates all actions.
The **POBJ_RESERVE_NEW** macro is a typed variant of **pmemobj_reserve**.
The size of the reservation is determined from the provided type *t*.
The **POBJ_RESERVE_ALLOC** macro is a typed variant of **pmemobj_reserve**.
The *size* of the reservation is user-provided.
The **POBJ_XRESERVE_NEW** and the **POBJ_XRESERVE_ALLOC** macros are equivalent
to **POBJ_RESERVE_NEW** and the **POBJ_RESERVE_ALLOC**, but with an additional
*flags* argument defined for **pmemobj_xreserve**().
# EXAMPLES #
The following code shows atomic append of two objects into a singly linked list.
```c
struct list_node {
int value;
PMEMoid next;
};
/* statically allocate the array of actions */
struct pobj_action actv[4];
/* reserve, populate and persist the first object */
PMEMoid tail = pmemobj_reserve(pop, &actv[0], sizeof(struct list_node), 0);
if (TOID_IS_NULL(tail))
return -1;
D_RW(tail)->value = 1;
D_RW(tail)->next = OID_NULL;
pmemobj_persist(pop, D_RW(tail), sizeof(struct list_node));
/* reserve, populate and persist the second object */
PMEMoid head = pmemobj_reserve(pop, &actv[1], sizeof(struct list_node), 0);
if (TOID_IS_NULL(head))
return -1;
D_RW(head)->value = 2;
D_RW(head)->next = tail;
pmemobj_persist(pop, D_RW(head), sizeof(struct list_node));
/* create actions to set the PMEMoid to the new values */
pmemobj_set_value(pop, &actv[2], &D_RO(root)->head.pool_uuid_lo, head.pool_uuid_lo);
pmemobj_set_value(pop, &actv[3], &D_RO(root)->head.off, head.off);
/* atomically publish the above actions */
pmemobj_publish(pop, actv, 4);
```
# RETURN VALUE #
On success, **pmemobj_reserve**() functions return a handle to the newly
reserved object. Otherwise an *OID_NULL* is returned.
On success, **pmemobj_tx_publish**() returns 0. Otherwise,
the transaction is aborted, the stage is changed to *TX_STAGE_ONABORT*
and *errno* is set appropriately.
On success, **pmemobj_tx_xpublish**() returns 0. Otherwise, the error number
is returned, **errno** is set and when flags do not contain **POBJ_XPUBLISH_NO_ABORT**,
the transaction is aborted.
On success, **pmemobj_publish**() returns 0. Otherwise, returns -1 and *errno*
is set appropriately.
# SEE ALSO #
**pmemobj_alloc**(3), **pmemobj_tx_alloc**(3), **libpmemobj**(7)
and **<https://pmem.io>**
| 7,718 | 39.413613 | 97 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemobj/pmemobj_tx_alloc.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMOBJ_TX_ALLOC, 3)
collection: libpmemobj
header: PMDK
date: pmemobj API version 2.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2019, Intel Corporation)
[comment]: <> (pmemobj_tx_alloc.3 -- man page for transactional object manipulation)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmemobj_tx_alloc**(), **pmemobj_tx_zalloc**(),
**pmemobj_tx_xalloc**(), **pmemobj_tx_realloc**(),
**pmemobj_tx_zrealloc**(), **pmemobj_tx_strdup**(),
**pmemobj_tx_xstrdup**(), **pmemobj_tx_wcsdup**(),
**pmemobj_tx_xwcsdup**(), **pmemobj_tx_free**(),
**pmemobj_tx_xfree**()
**TX_NEW**(), **TX_ALLOC**(),
**TX_ZNEW**(), **TX_ZALLOC**(),
**TX_XALLOC**(), **TX_REALLOC**(),
**TX_ZREALLOC**(), **TX_STRDUP**(),
**TX_XSTRDUP**(), **TX_WCSDUP**(),
**TX_XWCSDUP**(), **TX_FREE**(),
**TX_XFREE**()
- transactional object manipulation
# SYNOPSIS #
```c
#include <libpmemobj.h>
PMEMoid pmemobj_tx_alloc(size_t size, uint64_t type_num);
PMEMoid pmemobj_tx_zalloc(size_t size, uint64_t type_num);
PMEMoid pmemobj_tx_xalloc(size_t size, uint64_t type_num, uint64_t flags);
PMEMoid pmemobj_tx_realloc(PMEMoid oid, size_t size, uint64_t type_num);
PMEMoid pmemobj_tx_zrealloc(PMEMoid oid, size_t size, uint64_t type_num);
PMEMoid pmemobj_tx_strdup(const char *s, uint64_t type_num);
PMEMoid pmemobj_tx_wcsdup(const wchar_t *s, uint64_t type_num);
int pmemobj_tx_free(PMEMoid oid);
int pmemobj_tx_xfree(PMEMoid oid, uint64_t flags);
TX_NEW(TYPE)
TX_ALLOC(TYPE, size_t size)
TX_ZNEW(TYPE)
TX_ZALLOC(TYPE, size_t size)
TX_XALLOC(TYPE, size_t size, uint64_t flags)
TX_REALLOC(TOID o, size_t size)
TX_ZREALLOC(TOID o, size_t size)
TX_STRDUP(const char *s, uint64_t type_num)
TX_WCSDUP(const wchar_t *s, uint64_t type_num)
TX_FREE(TOID o)
TX_XFREE(TOID o, uint64_t flags)
```
# DESCRIPTION #
The **pmemobj_tx_alloc**() function transactionally allocates a new object of
given *size* and *type_num*. In contrast to the non-transactional allocations,
the objects are added to the internal object containers of given *type_num*
only after the transaction is committed, making the objects visible to the
**POBJ_FOREACH_\***() macros. This function must be called during
**TX_STAGE_WORK**.
The **pmemobj_tx_zalloc**() function transactionally allocates a new zeroed
object of given *size* and *type_num*. This function must be called during
**TX_STAGE_WORK**.
The **pmemobj_tx_xalloc**() function transactionally allocates a new object
of given *size* and *type_num*. The *flags* argument is a bitmask of the
following values:
+ **POBJ_XALLOC_ZERO** - zero the allocated object (equivalent of pmemobj_tx_zalloc)
+ **POBJ_XALLOC_NO_FLUSH** - skip flush on commit
(when application deals with flushing or uses pmemobj_memcpy_persist)
+ **POBJ_CLASS_ID(class_id)** - allocate an object from the allocation
class with id equal to *class_id*
+ **POBJ_ARENA_ID(arena_id)** - allocate an object from the arena specified by
*arena_id*. The arena must exist, otherwise, the behavior is undefined.
If *arena_id* is equal 0, then arena assigned to the current thread will be used.
+ **POBJ_XALLOC_NO_ABORT** - if the function does not end successfully,
do not abort the transaction.
This function must be called during **TX_STAGE_WORK**.
The **pmemobj_tx_realloc**() function transactionally resizes an existing
object to the given *size* and changes its type to *type_num*. If *oid* is
**OID_NULL**, then the call is equivalent to *pmemobj_tx_alloc(pop, size,
type_num)*. If *size* is equal to zero and *oid* is not **OID_NULL**, then
the call is equivalent to *pmemobj_tx_free(oid)*. If the new size is larger
than the old size, the added memory will *not* be initialized. This function
must be called during **TX_STAGE_WORK**.
The **pmemobj_tx_zrealloc**() function transactionally resizes an existing
object to the given *size* and changes its type to *type_num*. If the new
size is larger than the old size, the extended new space is zeroed. This
function must be called during **TX_STAGE_WORK**.
The **pmemobj_tx_strdup**() function transactionally allocates a new object
containing a duplicate of the string *s* and assigns it a type *type_num*.
This function must be called during **TX_STAGE_WORK**.
The **pmemobj_tx_xstrdup**() function behaves exactly the same as
**pmemobj_tx_strdup**() when *flags* equals zero.
The *flags* argument is a bitmask of values described in **pmemobj_tx_xalloc** section.
The **pmemobj_tx_wcsdup**() function transactionally allocates a new object
containing a duplicate of the wide character string *s* and assigns it a type
*type_num*. This function must be called during **TX_STAGE_WORK**.
The **pmemobj_tx_xwcsdup**() function behaves exactly the same as
**pmemobj_tx_wcsdup**() when *flags* equals zero.
The *flags* argument is a bitmask of values described in **pmemobj_tx_xalloc** section.
The **pmemobj_tx_free**() function transactionally frees an existing object
referenced by *oid*. This function must be called during **TX_STAGE_WORK**.
The **pmemobj_tx_xfree**() function behaves exactly the same as
**pmemobj_tx_free**() when *flags* equals zero.
*flags* is a bitmask of the following value:
+ **POBJ_XFREE_NO_ABORT** - if the function does not end successfully,
do not abort the transaction.
This function must be called during **TX_STAGE_WORK**.
The **TX_NEW**() macro transactionally allocates a new object of given *TYPE*
and assigns it a type number read from the typed *OID*. The allocation size is
determined from the size of the user-defined structure *TYPE*. If successful
and called during **TX_STAGE_WORK** it returns a handle to the newly allocated
object. Otherwise, the stage is changed to **TX_STAGE_ONABORT**, **OID_NULL**
is returned, and *errno* is set appropriately.
The **TX_ALLOC**() macro transactionally allocates a new object of given *TYPE*
and assigns it a type number read from the typed *OID*. The allocation size is
passed by *size* parameter. If successful and called during **TX_STAGE_WORK**
it returns a handle to the newly allocated object. Otherwise, the stage is set
to **TX_STAGE_ONABORT**, **OID_NULL** is returned, and *errno* is set
appropriately.
The **TX_ZNEW**() macro transactionally allocates a new zeroed object of given
*TYPE* and assigns it a type number read from the typed *OID*. The allocation
size is determined from the size of the user-defined structure *TYPE*. If
successful and called during **TX_STAGE_WORK** it returns a handle to the newly
allocated object. Otherwise, stage changes to **TX_STAGE_ONABORT**, **OID_NULL**
is returned, and *errno* is set appropriately.
The **TX_ZALLOC**() macro transactionally allocates a new zeroed object of
given *TYPE* and assigns it a type number read from the typed *OID*. The
allocation size is passed by *size* argument. If successful and called during
**TX_STAGE_WORK** it returns a handle to the newly allocated object. Otherwise,
the stage is changed to **TX_STAGE_ONABORT**, **OID_NULL** is returned, and
*errno* is set appropriately.
The **TX_XALLOC**() macro transactionally allocates a new object of given
*TYPE* and assigns it a type number read from the typed *OID*. The allocation
size is passed by *size* argument. The *flags* argument is a bitmask of values
described in **pmemobj_tx_xalloc** section. If successful and called during
**TX_STAGE_WORK** it returns a handle to the newly allocated object. Otherwise,
the **OID_NULL** is returned, **errno** is set and when flags do not
contain **POBJ_XALLOC_NO_ABORT**, the transaction is aborted.
The **TX_REALLOC**() macro transactionally resizes an existing object
referenced by a handle *o* to the given *size*. If successful and called during
**TX_STAGE_WORK** it returns a handle to the reallocated object. Otherwise, the
stage is changed to **TX_STAGE_ONABORT**, **OID_NULL** is returned, and *errno*
is set appropriately.
The **TX_ZREALLOC**() macro transactionally resizes an existing object
referenced by a handle *o* to the given *size*. If the new size is larger than
the old size, the extended new space is zeroed. If successful and called during
**TX_STAGE_WORK** it returns a handle to the reallocated object. Otherwise, the
stage is changed to **TX_STAGE_ONABORT**, **OID_NULL** is returned, and *errno*
is set appropriately.
The **TX_STRDUP**() macro transactionally allocates a new object containing a
duplicate of the string *s* and assigns it type *type_num*. If successful
and called during **TX_STAGE_WORK** it returns a handle to the newly allocated
object. Otherwise, the stage is changed to **TX_STAGE_ONABORT**, **OID_NULL**
is returned, and *errno* is set appropriately.
The **TX_XSTRDUP**() macro transactionally allocates a new object containing a
duplicate of the string *s* and assigns it type *type_num*. The *flags* argument
is a bitmask of values described in **pmemobj_tx_xalloc** section. If successful and
called during **TX_STAGE_WORK** it returns a handle to the newly allocated
object. Otherwise, the **OID_NULL** is returned, **errno** is set and when flags
do not contain **POBJ_XALLOC_NO_ABORT**, the transaction is aborted.
The **TX_WCSDUP**() macro transactionally allocates a new object containing a
duplicate of the wide character string *s* and assigns it a type *type_num*. If
successful and called during **TX_STAGE_WORK**, it returns a handle to the
newly allocated object. Otherwise, the stage is changed to **TX_STAGE_ONABORT**,
**OID_NULL** is returned, and *errno* is set appropriately.
The **TX_XWCSDUP**() macro transactionally allocates a new object containing a
duplicate of the wide character string *s* and assigns it a type *type_num*.
The *flags* argument is a bitmask of values described in **pmemobj_tx_xalloc** section.
If successful and called during **TX_STAGE_WORK** it returns a handle to the
newly allocated object. Otherwise, the **OID_NULL** is returned, **errno** is set
and when flags do not contain **POBJ_XALLOC_NO_ABORT**, the transaction is aborted.
The **TX_FREE**() macro transactionally frees the memory space represented by
an object handle *o*. If *o* is **OID_NULL**, no operation is performed. If
successful and called during **TX_STAGE_WORK**, **TX_FREE**() returns 0.
Otherwise, the stage is changed to **TX_STAGE_ONABORT** and *errno* is set appropriately.
The **TX_XFREE**() macro transactionally frees the memory space represented by
an object handle *o*. If *o* is **OID_NULL**, no operation is performed.
The *flags* argument is a bitmask of values described in **pmemobj_tx_xfree**
section. If successful and called during **TX_STAGE_WORK**, **TX_FREE**()
returns 0. Otherwise, the error number is returned, **errno** is set and when
flags do not contain **POBJ_XFREE_NO_ABORT**, the transaction is aborted.
# RETURN VALUE #
On success, the **pmemobj_tx_alloc**(), **pmemobj_tx_zalloc**(),
**pmemobj_tx_strdup**() and **pmemobj_tx_wcsdup**()
functions return a handle to the newly allocated object. Otherwise, the stage
is changed to **TX_STAGE_ONABORT**, **OID_NULL** is returned, and *errno* is
set appropriately. If *size* equals 0, **OID_NULL** is returned and *errno* is
set appropriately.
On success, the **pmemobj_tx_xalloc**(), **pmemobj_tx_xstrdup**() and
**pmemobj_tx_xwcsdup**() functions return a handle to the newly allocated object.
Otherwise, the **OID_NULL** is returned, **errno** is set and when flags do not
contain **POBJ_XALLOC_NO_ABORT**, the transaction is aborted.
On success, **pmemobj_tx_realloc**() and **pmemobj_tx_zrealloc**() return
a handle to the resized object. Otherwise, the stage is changed to
**TX_STAGE_ONABORT**, **OID_NULL** is returned, and *errno* is set
appropriately. Note that the object handle value may change as a result of
reallocation.
On success, **pmemobj_tx_free**() returns 0. Otherwise, the stage is changed
to **TX_STAGE_ONABORT**, **errno** is set appropriately and transaction is aborted
On success **pmemobj_tx_xfree**() returns 0. Otherwise, the error number is
returned, **errno** is set and when flags do not contain **POBJ_XFREE_NO_ABORT**,
the transaction is aborted.
# SEE ALSO #
**pmemobj_tx_add_range**(3), **pmemobj_tx_begin**(3),
**libpmemobj**(7) and **<https://pmem.io>**
| 12,360 | 46.542308 | 89 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmemobj/pobj_layout_begin.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(POBJ_LAYOUT_BEGIN, 3)
collection: libpmemobj
header: PMDK
date: pmemobj API version 2.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pobj_layout_begin.3 -- man page for declaration of pool's layout)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[EXAMPLE](#example)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**POBJ_LAYOUT_BEGIN**(), **POBJ_LAYOUT_TOID**(),
**POBJ_LAYOUT_ROOT**(), **POBJ_LAYOUT_NAME**(),
**POBJ_LAYOUT_END**(), **POBJ_LAYOUT_TYPES_NUM**()
- persistent memory transactional object store layout
# SYNOPSIS #
```c
#include <libpmemobj.h>
POBJ_LAYOUT_BEGIN(layout)
POBJ_LAYOUT_TOID(layout, TYPE)
POBJ_LAYOUT_ROOT(layout, ROOT_TYPE)
POBJ_LAYOUT_NAME(layout)
POBJ_LAYOUT_END(layout)
POBJ_LAYOUT_TYPES_NUM(layout)
```
# DESCRIPTION #
**libpmemobj**(7) defines a set of macros for convenient declaration of a
pool's layout. The layout declaration consists of declarations of a number of
used types. The declared types will be assigned consecutive type numbers.
Declared types may be used in conjunction with type safety macros (see
**TOID_DECLARE**(3)). Once created, the layout declaration must not be changed
unless any new types are added at the end of the existing layout declaration.
Modifying any existing declaration may lead to changes in the type numbers of
declared types, which in consequence may cause data corruption.
The **POBJ_LAYOUT_BEGIN**() macro indicates a begin of declaration of layout.
The *LAYOUT* argument is a name of layout. This argument must be passed to all macros
related to the declaration of layout.
The **POBJ_LAYOUT_TOID**() macro declares a typed *OID* for type passed as *TYPE* argument
inside the declaration of layout. All types declared using this macro are assigned with
consecutive type numbers. This macro must be used between the **POBJ_LAYOUT_BEGIN**()
and **POBJ_LAYOUT_END**() macros, with the same name passed as *LAYOUT* argument.
The **POBJ_LAYOUT_ROOT**() macro declares a typed *OID* for type passed as *ROOT_TYPE*
argument inside the declaration of layout. The typed *OID* will be assigned with type number
for root object **POBJ_ROOT_TYPE_NUM**.
The **POBJ_LAYOUT_END**() macro ends the declaration of layout.
The **POBJ_LAYOUT_NAME**() macro returns the name of layout as a null-terminated string.
The **POBJ_LAYOUT_TYPES_NUM**() macro returns number of types declared using the **POBJ_LAYOUT_TOID**()
macro within the layout declaration.
# EXAMPLE #
This is an example of layout declaration:
```c
POBJ_LAYOUT_BEGIN(mylayout);
POBJ_LAYOUT_ROOT(mylayout, struct root);
POBJ_LAYOUT_TOID(mylayout, struct node);
POBJ_LAYOUT_TOID(mylayout, struct foo);
POBJ_LAYOUT_END(mylayout);
struct root
{
TOID(struct node) node;
};
struct node
{
TOID(struct node) next;
TOID(struct foo) foo;
};
```
The name of layout and the number of declared types can be retrieved using the following code:
```c
const char *layout_name = POBJ_LAYOUT_NAME(mylayout);
int num_of_types = POBJ_LAYOUT_TYPES_NUM(mylayout);
```
# SEE ALSO #
**TOID_DECLARE**(3), **libpmemobj**(7) and **<https://pmem.io>**
| 3,239 | 29.857143 | 103 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libvmmalloc/README.md
|
This library has been moved to a
[separate repository](https://github.com/pmem/vmem).
| 86 | 28 | 52 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmempool/pmempool_rm.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMPOOL_RM, 3)
collection: libpmempool
header: PMDK
date: pmempool API version 1.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pmempool_rm.3 -- man page for pool set management functions)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
_UW(pmempool_rm) - remove persistent memory pool
# SYNOPSIS #
```c
#include <libpmempool.h>
_UWFUNCR1(int, pmempool_rm, *path, int flags)
```
_UNICODE()
# DESCRIPTION #
The _UW(pmempool_rm) function removes the pool pointed to by *path*. The *path*
can point to a regular file, device dax or pool set file. If *path* is a pool
set file, _UW(pmempool_rm) will remove all part files from local replicas
using **unlink**(2)_WINUX(,=q=, and all remote replicas using **rpmem_remove**(3)
(see **librpmem**(7)),=e=) before removing the pool set file itself.
The *flags* argument determines the behavior of _UW(pmempool_rm).
It is either 0 or the bitwise OR of one or more of the following flags:
+ **PMEMPOOL_RM_FORCE** - Ignore all errors when removing part files from
local _WINUX(,or remote )replicas.
+ **PMEMPOOL_RM_POOLSET_LOCAL** - Also remove local pool set file.
_WINUX(,
+ **PMEMPOOL_RM_POOLSET_REMOTE** - Also remove remote pool set file.)
# RETURN VALUE #
On success, _UW(pmempool_rm) returns 0. On error, it returns -1 and sets
*errno* accordingly.
# SEE ALSO #
**rpmem_remove**(3), **unlink**(3), **libpmemlog**(7),
**libpmemobj**(7), **librpmem**(7) and **<https://pmem.io>**
| 1,688 | 25.809524 | 81 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmempool/pmempool_feature_query.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMPOOL_FEATURE_QUERY, 3)
collection: libpmempool
header: PMDK
date: pmempool API version 1.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2018, Intel Corporation)
[comment]: <> (pmempool_feature_query.3 -- man page for toggle and query pool
set features)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[COMPATIBILITY](#compatibility)<br />
[DISCLAIMER](#disclaimer)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
_UW(pmempool_feature_query), _UW(pmempool_feature_enable),
_UW(pmempool_feature_disable) - toggle or query pool set features
# SYNOPSIS #
```c
#include <libpmempool.h>
_UWFUNCR1(int, pmempool_feature_query, *path, =q=enum pmempool_feature feature, unsigned flags=e=)
_UWFUNCR1(int, pmempool_feature_enable, *path, =q=enum pmempool_feature feature, unsigned flags=e=)
_UWFUNCR1(int, pmempool_feature_disable, *path, =q=enum pmempool_feature feature, unsigned flags=e=)
```
_UNICODE()
# DESCRIPTION #
The *feature* argument accepts following values:
+ **PMEMPOOL_FEAT_SINGLEHDR** - only the first part in each replica contains the
pool part internal metadata. This value can be used only with
**pmempool_feature_query**(). It can not be enabled or disabled. For details see
**poolset**(5).
+ **PMEMPOOL_FEAT_CKSUM_2K** - only the first 2KiB of pool part internal metadata
is checksummed. Other features may depend on this one to store additional metadata
in otherwise unused second 2KiB part of a header.
When **PMEMPOOL_FEAT_CKSUM_2K** is disabled whole 4KiB is checksummed.
+ **PMEMPOOL_FEAT_SHUTDOWN_STATE** - enables additional check performed during
pool open which verifies pool consistency in the presence of dirty shutdown.
**PMEMPOOL_FEAT_CKSUM_2K** has to be enabled prior to
**PMEMPOOL_FEAT_SHUTDOWN_STATE** otherwise enabling **PMEMPOOL_FEAT_SHUTDOWN_STATE** will fail.
+ **PMEMPOOL_FEAT_CHECK_BAD_BLOCKS** - enables checking bad blocks performed
during opening a pool and fixing bad blocks performed by pmempool-sync
during syncing a pool. For details see **pmempool-feature**(1).
The _UW(pmempool_feature_query) function checks state of *feature* in the
pool set pointed by *path*.
The _UW(pmempool_feature_enable) function enables *feature* in the pool set
pointed by *path*.
The _UW(pmempool_feature_disable) function disables *feature* in the pool set
pointed by *path*.
# COMPATIBILITY #
Poolsets with features not defined in this document (e.g. enabled by the newer
software version) are not supported.
# DISCLAIMER #
_UW(pmempool_feature_query), _UW(pmempool_feature_enable) and
_UW(pmempool_feature_disable) are not fail safe.
# RETURN VALUE #
On success, _UW(pmempool_feature_query) returns 0 if *feature* is disabled or
1 if it is enabled. On error, it returns -1 and sets *errno* accordingly.
On success, _UW(pmempool_feature_enable) returns 0. On error, it returns -1
and sets *errno* accordingly.
On success, _UW(pmempool_feature_disable) returns 0. On error, it returns -1
and sets *errno* accordingly.
If *path* points poolset with remote replica **errno** is set to EINVAL and
function returns -1.
If non zero *flags* are provided **errno** is set to EINVAL and function
returns -1.
# SEE ALSO #
**poolset**(5) and **<https://pmem.io>**
| 3,372 | 31.12381 | 100 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmempool/pmempool_sync.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMPOOL_SYNC, 3)
collection: libpmempool
header: PMDK
date: pmempool API version 1.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pmempool_sync.3 -- man page for pmempool sync and transform)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[ERRORS](#errors)<br />
[NOTES](#notes)<br />
[SEE ALSO](#see-also)<br />
# NAME #
_UW(pmempool_sync), _UW(pmempool_transform) - pool set synchronization and transformation
# SYNOPSIS #
```c
#include <libpmempool.h>
_UWFUNCR1(int, pmempool_sync, *poolset_file,=q=
unsigned flags=e=, =q= (EXPERIMENTAL)=e=)
_UWFUNCR12(int, pmempool_transform, *poolset_file_src,
*poolset_file_dst, unsigned flags, =q= (EXPERIMENTAL)=e=)
```
_UNICODE()
# DESCRIPTION #
The _UW(pmempool_sync) function synchronizes data between replicas within
a pool set.
_UW(pmempool_sync) accepts two arguments:
* *poolset_file* - a path to a pool set file,
* *flags* - a combination of flags (ORed) which modify how synchronization
is performed.
>NOTE: Only the pool set file used to create the pool should be used
for syncing the pool.
>NOTE: The _UW(pmempool_sync) cannot do anything useful if there
are no replicas in the pool set. In such case, it fails with an error.
>NOTE: At the moment, replication is only supported for **libpmemobj**(7)
pools, so _UW(pmempool_sync) cannot be used with other pool types
(**libpmemlog**(7), **libpmemblk**(7)).
The following flags are available:
* **PMEMPOOL_SYNC_DRY_RUN** - do not apply changes, only check for viability of
synchronization.
_UW(pmempool_sync) checks that the metadata of all replicas in
a pool set is consistent, i.e. all parts are healthy, and if any of them is
not, the corrupted or missing parts are recreated and filled with data from
one of the healthy replicas.
_WINUX(,=q=If a pool set has the option *SINGLEHDR* (see **poolset**(5)),
the internal metadata of each replica is limited to the beginning of the first
part in the replica. If the option *NOHDRS* is used, replicas contain no
internal metadata. In both cases, only the missing parts or the ones which
cannot be opened are recreated with the _UW(pmempool_sync) function.=e=)
_UW(pmempool_transform) modifies the internal structure of a pool set.
It supports the following operations:
* adding one or more replicas,
* removing one or more replicas _WINUX(.,=q=,
* adding or removing pool set options.=e=)
Only one of the above operations can be performed at a time.
_UW(pmempool_transform) accepts three arguments:
* *poolset_file_src* - pathname of the pool *set* file for the source
pool set to be changed,
* *poolset_file_dst* - pathname of the pool *set* file that defines the new
structure of the pool set,
* *flags* - a combination of flags (ORed) which modify how synchronization
is performed.
The following flags are available:
* **PMEMPOOL_TRANSFORM_DRY_RUN** - do not apply changes, only check for viability of
transformation.
_WINUX(=q=When adding or deleting replicas, the two pool set files can differ only in the
definitions of replicas which are to be added or deleted. One cannot add and
remove replicas in the same step. Only one of these operations can be performed
at a time. Reordering replicas is not supported.
Also, to add a replica it is necessary for its effective size to match or
exceed the pool size. Otherwise the whole operation fails and no changes are
applied. The effective size of a replica is the sum of sizes of all its part
files decreased by 4096 bytes per each part file. The 4096 bytes of each part
file is utilized for storing internal metadata of the pool part files.=e=)
_WINUX(,=q=When adding or deleting replicas, the two pool set files can differ
only in the definitions of replicas which are to be added or deleted. When
adding or removing pool set options (see **poolset**(5)), the rest of both pool
set files have to be of the same structure. The operation of adding/removing
a pool set option can be performed on a pool set with local replicas only. To
add/remove a pool set option to/from a pool set with remote replicas, one has
to remove the remote replicas first, then add/remove the option, and finally
recreate the remote replicas having added/removed the pool set option to/from
the remote replicas' poolset files.
To add a replica it is necessary for its effective size to match or exceed the
pool size. Otherwise the whole operation fails and no changes are applied.
If none of the pool set options is used, the effective size of a replica is the
sum of sizes of all its part files decreased by 4096 bytes per each part file.
The 4096 bytes of each part file is utilized for storing internal metadata of
the pool part files.
If the option *SINGLEHDR* is used, the effective size of a replica is the sum of
sizes of all its part files decreased once by 4096 bytes. In this case only
the first part contains internal metadata.
If the option *NOHDRS* is used, the effective size of a replica is the sum of
sizes of all its part files. In this case none of the parts contains internal
metadata.=e=)
>NOTE: At the moment, *transform* operation is only supported for
**libpmemobj**(7) pools, so _UW(pmempool_transform) cannot be used with other
pool types (**libpmemlog**(7), **libpmemblk**(7)).
# RETURN VALUE #
_UW(pmempool_sync) and _UW(pmempool_transform) return 0 on success.
Otherwise, they return -1 and set *errno* appropriately.
# ERRORS #
**EINVAL** Invalid format of the input/output pool set file.
**EINVAL** Unsupported *flags* value.
**EINVAL** There is only master replica defined in the input pool set passed
to _UW(pmempool_sync).
**EINVAL** The source pool set passed to _UW(pmempool_transform) is not a
**libpmemobj** pool.
**EINVAL** The input and output pool sets passed to _UW(pmempool_transform)
are identical.
**EINVAL** Attempt to perform more than one transform operation at a time.
**ENOTSUP** The pool set contains a remote replica, but remote replication
is not supported (**librpmem**(7) is not available).
# NOTES #
The _UW(pmempool_sync) API is experimental and it may change in future
versions of the library.
The _UW(pmempool_transform) API is experimental and it may change in future
versions of the library.
# SEE ALSO #
**libpmemlog**(7), **libpmemobj**(7) and **<https://pmem.io>**
| 6,477 | 35.59887 | 89 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmempool/libpmempool.7.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(LIBPMEMPOOL, 7)
collection: libpmempool
header: PMDK
date: pmempool API version 1.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2016-2018, Intel Corporation)
[comment]: <> (libpmempool.7 -- man page for libpmempool)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[LIBRARY API VERSIONING](#library-api-versioning-1)<br />
[DEBUGGING AND ERROR HANDLING](#debugging-and-error-handling)<br />
[EXAMPLE](#example)<br />
[ACKNOWLEDGEMENTS](#acknowledgements)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**libpmempool** - persistent memory pool management library
# SYNOPSIS #
```c
#include <libpmempool.h>
cc _WINUX(,-std=gnu99) ... -lpmempool -lpmem
```
_UNICODE()
##### Library API versioning: #####
```c
_UWFUNC(pmempool_check_version, =q=
unsigned major_required,
unsigned minor_required=e=)
```
##### Error handling: #####
```c
_UWFUNC(pmempool_errormsg, void)
```
##### Other library functions: #####
A description of other **libpmempool** functions can be found on the following
manual pages:
+ health check functions: **pmempool_check_init**(3)
+ pool set synchronization and transformation: **pmempool_sync**(3)
+ pool set management functions: **pmempool_rm**(3)
+ toggle or query pool set features: **pmempool_feature_query**(3)
# DESCRIPTION #
**libpmempool**
provides a set of utilities for off-line analysis and
manipulation of a *pool*. A *pool* in this
manpage means a pmemobj pool, pmemblk pool, pmemlog pool or
BTT layout, independent of the underlying storage. Some
**libpmempool** functions are required to work without
any impact on the *pool* but some may create a new or modify
an existing *pool*.
**libpmempool**
is for applications that need high reliability or built-in
troubleshooting. It may be useful for testing and debugging
purposes also.
**libpmempool** introduces functionality of pool set health check,
synchronization, transformation and removal.
# CAVEATS #
**libpmempool** relies on the library destructor being called from the main
thread. For this reason, all functions that might trigger destruction (e.g.
**dlclose**(3)) should be called in the main thread. Otherwise some of the
resources associated with that thread might not be cleaned up properly.
_WINUX(,=q=**libpmempool** requires the **-std=gnu99** compilation flag to
build properly.=e=)
# LIBRARY API VERSIONING #
This section describes how the library API is versioned,
allowing applications to work with an evolving API.
The _UW(pmempool_check_version) function is used to see if
the installed **libpmempool** supports the version of the
library API required by an application. The easiest way to
do this for the application is to supply the compile-time
version information, supplied by defines in **\<libpmempool.h\>**, like this:
```c
reason = _U(pmempool_check_version)(PMEMPOOL_MAJOR_VERSION,
PMEMPOOL_MINOR_VERSION);
if (reason != NULL) {
/* version check failed, reason string tells you why */
}
```
Any mismatch in the major version number is considered a failure, but a
library with a newer minor version number will pass this
check since increasing minor versions imply backwards compatibility.
An application can also check specifically for the existence of an
interface by checking for the version where that interface
was introduced. These versions are documented in this man
page as follows: unless otherwise specified, all interfaces
described here are available in version 1.0 of the library.
Interfaces added after version 1.0 will contain the text
*introduced in version x.y* in the section of this manual
describing the feature.
When the version check performed by _UW(pmempool_check_version)
is successful, the return value is NULL. Otherwise the
return value is a static string describing the reason for
failing the version check. The string returned by
_UW(pmempool_check_version) must not be modified or freed.
# DEBUGGING AND ERROR HANDLING #
If an error is detected during the call to a **libpmempool** function, the
application may retrieve an error message describing the reason for the failure
from _UW(pmempool_errormsg). This function returns a pointer to a static buffer
containing the last error message logged for the current thread. If *errno*
was set, the error message may include a description of the corresponding
error code as returned by **strerror**(3). The error message buffer is
thread-local; errors encountered in one thread do not affect its value in
other threads. The buffer is never cleared by any library function; its
content is significant only when the return value of the immediately preceding
call to a **libpmempool** function indicated an error, or if *errno* was set.
The application must not modify or free the error message string, but it may
be modified by subsequent calls to other library functions.
Two versions of **libpmempool** are typically available on a development
system. The normal version, accessed when a program is linked using the
**-lpmempool** option, is optimized for performance. That version skips checks
that impact performance and never logs any trace information or performs any
run-time assertions.
A second version of **libpmempool**, accessed when a program uses the libraries
under _DEBUGLIBPATH(), contains run-time assertions and trace points. The
typical way to access the debug version is to set the environment variable
**LD_LIBRARY_PATH** to _LDLIBPATH(). Debugging output is
controlled using the following environment variables. These variables have
no effect on the non-debug version of the library.
+ **PMEMPOOL_LOG_LEVEL**
The value of **PMEMPOOL_LOG_LEVEL** enables trace points in the debug version
of the library, as follows:
+ **0** - This is the default level when **PMEMPOOL_LOG_LEVEL** is not set.
No log messages are emitted at this level.
+ **1** - Additional details on any errors detected are logged (in addition
to returning the *errno*-based errors as usual). The same information
may be retrieved using _UW(pmempool_errormsg).
+ **2** - A trace of basic operations is logged.
+ **3** - Enables a very verbose amount of function call tracing in the library.
+ **4** - Enables voluminous and fairly obscure tracing
information that is likely only useful to the **libpmempool** developers.
Unless **PMEMPOOL_LOG_FILE** is set, debugging output is written to *stderr*.
+ **PMEMPOOL_LOG_FILE**
Specifies the name of a file where
all logging information should be written. If the last character in the name
is "-", the *PID* of the current process will be appended to the file name when
the log file is created. If **PMEMPOOL_LOG_FILE** is not set, output is
written to *stderr*.
# EXAMPLE #
The following example illustrates how the **libpmempool** API is used.
The program detects the type and checks consistency of given pool.
If there are any issues detected, the pool is automatically repaired.
```c
#include <stddef.h>_WINUX(,=q=
#include <unistd.h>=e=)
#include <stdlib.h>
#include <stdio.h>
#include <libpmempool.h>
#define PATH "./pmem-fs/myfile"
#define CHECK_FLAGS (PMEMPOOL_CHECK_FORMAT_STR|PMEMPOOL_CHECK_REPAIR|\
PMEMPOOL_CHECK_VERBOSE)
int
main(int argc, char *argv[])
{
PMEMpoolcheck *ppc;
struct _U(pmempool_check_status) *status;
enum pmempool_check_result ret;
/* arguments for check */
struct _U(pmempool_check_args) args = {
.path = PATH,
.backup_path = NULL,
.pool_type = PMEMPOOL_POOL_TYPE_DETECT,
.flags = CHECK_FLAGS
};
/* initialize check context */
if ((ppc = _U(pmempool_check_init)(&args, sizeof(args))) == NULL) {
perror("_U(pmempool_check_init)");
exit(EXIT_FAILURE);
}
/* perform check and repair, answer 'yes' for each question */
while ((status = _U(pmempool_check)(ppc)) != NULL) {
switch (status->type) {
case PMEMPOOL_CHECK_MSG_TYPE_ERROR:
printf("%s\n", status->str.msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_INFO:
printf("%s\n", status->str.msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_QUESTION:
printf("%s\n", status->str.msg);
status->str.answer = "yes";
break;
default:
pmempool_check_end(ppc);
exit(EXIT_FAILURE);
}
}
/* finalize the check and get the result */
ret = pmempool_check_end(ppc);
switch (ret) {
case PMEMPOOL_CHECK_RESULT_CONSISTENT:
case PMEMPOOL_CHECK_RESULT_REPAIRED:
return 0;
default:
return 1;
}
}
```
See <https://pmem.io/pmdk/libpmempool> for more examples using the
**libpmempool** API.
# ACKNOWLEDGEMENTS #
**libpmempool** builds on the persistent memory programming model
recommended by the SNIA NVM Programming Technical Work Group:
<https://snia.org/nvmp>
# SEE ALSO #
**dlclose**(3), **pmempool_check_init**(3), **pmempool_feature_query**(3),
**pmempool_rm**(3), **pmempool_sync**(3), **strerror**(3), **libpmem**(7),
**libpmemblk**(7), **libpmemlog**(7), **libpmemobj**(7)**
and **<https://pmem.io>**
| 8,999 | 32.088235 | 80 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmempool/pmempool_check_init.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMPOOL_CHECK_INIT, 3)
collection: libpmempool
header: PMDK
date: pmempool API version 1.3
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pmempool_check_init.3 -- man page for pmempool health check functions)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[EXAMPLE](#example)<br />
[NOTES](#notes)<br />
[SEE ALSO](#see-also)<br />
# NAME #
_UW(pmempool_check_init), _UW(pmempool_check),
**pmempool_check_end**() - checks pmempool health
# SYNOPSIS #
```c
#include <libpmempool.h>
_UWFUNCR1UW(PMEMpoolcheck, *pmempool_check_init, struct pmempool_check_args,
*args,=q=
size_t args_size=e=)
_UWFUNCRUW(struct pmempool_check_status, *pmempool_check, PMEMpoolcheck *ppc)
enum pmempool_check_result pmempool_check_end(PMEMpoolcheck *ppc);
```
_UNICODE()
# DESCRIPTION #
To perform the checks provided by **libpmempool**, a *check context*
must first be initialized using the _UW(pmempool_check_init)
function described in this section. Once initialized, the
*check context* is represented by an opaque handle of
type *PMEMpoolcheck\**, which is passed to all of the
other functions available in **libpmempool**
To execute checks, _UW(pmempool_check) must be called iteratively.
Each call generates a new check status, represented by a
_UWS(pmempool_check_status) structure. Status messages are described
later below.
When the checks are completed, _UW(pmempool_check) returns NULL. The check
must be finalized using **pmempool_check_end**(), which returns an
*enum pmempool_check_result* describing the results of the entire check.
_UW(pmempool_check_init) initializes the check context. *args* describes
parameters of the check context. *args_size* should be equal to the size of
the _UWS(pmempool_check_args). _UWS(pmempool_check_args) is defined as follows:
_WINUX(=q=
```c
struct pmempool_check_argsU
{
/* path to the pool to check */
const char *path;
/* optional backup path */
const char *backup_path;
/* type of the pool */
enum pmempool_pool_type pool_type;
/* parameters */
int flags;
};
struct pmempool_check_argsW
{
/* path to the pool to check */
const wchar_t *path;
/* optional backup path */
const wchar_t *backup_path;
/* type of the pool */
enum pmempool_pool_type pool_type;
/* parameters */
int flags;
};
```
=e=,=q=
```c
struct pmempool_check_args
{
/* path to the pool to check */
const char *path;
/* optional backup path */
const char *backup_path;
/* type of the pool */
enum pmempool_pool_type pool_type;
/* parameters */
int flags;
};
```
=e=)
The *flags* argument accepts any combination of the following values (ORed):
+ **PMEMPOOL_CHECK_REPAIR** - perform repairs
+ **PMEMPOOL_CHECK_DRY_RUN** - emulate repairs, not supported on Device DAX
+ **PMEMPOOL_CHECK_ADVANCED** - perform hazardous repairs
+ **PMEMPOOL_CHECK_ALWAYS_YES** - do not ask before repairs
+ **PMEMPOOL_CHECK_VERBOSE** - generate info statuses
+ **PMEMPOOL_CHECK_FORMAT_STR** - generate string format statuses
*pool_type* must match the type of the *pool* being processed. Pool type
detection may be enabled by setting *pool_type* to
**PMEMPOOL_POOL_TYPE_DETECT**. A pool type detection failure ends the check.
*backup_path* may be:
+ NULL. No backup will be performed.
+ a non-existent file: *backup_path* will be created and backup will be
performed. *path* must be a single file *pool*.
+ an existing *pool set* file: Backup will be performed as defined by the
*backup_path* pool set. *path* must be a pool set, and *backup_path* must have
the same structure (the same number of parts with exactly the same size) as the
*path* pool set.
Backup is supported only if the source *pool set* has no defined replicas.
Neither *path* nor *backup_path* may specify a pool set with remote replicas.
The _UW(pmempool_check) function starts or resumes the check indicated by *ppc*.
When the next status is generated, the check is paused and _UW(pmempool_check)
returns a pointer to the _UWS(pmempool_check_status) structure:
_WINUX(=q=
{
```c
struct pmempool_check_statusU
{
enum pmempool_check_msg_type type; /* type of the status */
struct
{
const char *msg; /* status message string */
const char *answer; /* answer to message if applicable */
} str;
};
struct pmempool_check_statusW
{
enum pmempool_check_msg_type type; /* type of the status */
struct
{
const wchar_t *msg; /* status message string */
const wchar_t *answer; /* answer to message if applicable */
} str;
};
```
=e=,=q=
```c
struct pmempool_check_status
{
enum pmempool_check_msg_type type; /* type of the status */
struct
{
const char *msg; /* status message string */
const char *answer; /* answer to message if applicable */
} str;
};
```
=e=)
This structure can describe three types of statuses:
+ **PMEMPOOL_CHECK_MSG_TYPE_INFO** - detailed information about the check.
Generated only if a **PMEMPOOL_CHECK_VERBOSE** flag was set.
+ **PMEMPOOL_CHECK_MSG_TYPE_ERROR** - An error was encountered.
+ **PMEMPOOL_CHECK_MSG_TYPE_QUESTION** - question. Generated only if an
**PMEMPOOL_CHECK_ALWAYS_YES** flag was not set. It requires *answer* to be
set to "yes" or "no" before continuing.
After calling _UW(pmempool_check) again, the previously provided
_UWS(pmempool_check_status) pointer must be considered invalid.
The **pmempool_check_end**() function finalizes the check and releases all
related resources. *ppc* is invalid after calling **pmempool_check_end**().
# RETURN VALUE #
_UW(pmempool_check_init) returns an opaque handle of type *PMEMpoolcheck\**.
If the provided parameters are invalid or the initialization process fails,
_UW(pmempool_check_init) returns NULL and sets *errno* appropriately.
Each call to _UW(pmempool_check) returns a pointer to a
_UWS(pmempool_check_status) structure when a status is generated. When the
check completes, _UW(pmempool_check) returns NULL.
The **pmempool_check_end**() function returns an *enum pmempool_check_result*
summarizing the results of the finalized check. **pmempool_check_end**() can
return one of the following values:
+ **PMEMPOOL_CHECK_RESULT_CONSISTENT** - the *pool* is consistent
+ **PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT** - the *pool* is not consistent
+ **PMEMPOOL_CHECK_RESULT_REPAIRED** - the *pool* has issues but all repair
steps completed successfully
+ **PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR** - the *pool* has issues which
can not be repaired
+ **PMEMPOOL_CHECK_RESULT_ERROR** - the *pool* has errors or the check
encountered an issue
+ **PMEMPOOL_CHECK_RESULT_SYNC_REQ** - the *pool* has single healthy replica.
To fix remaining issues use **pmempool_sync**(3).
# EXAMPLE #
This is an example of a *check context* initialization:
```c
struct _U(pmempool_check_args) args =
{
.path = "/path/to/blk.pool",
.backup_path = NULL,
.pool_type = PMEMPOOL_POOL_TYPE_BLK,
.flags = PMEMPOOL_CHECK_REPAIR | PMEMPOOL_CHECK_DRY_RUN |
PMEMPOOL_CHECK_VERBOSE | PMEMPOOL_CHECK_FORMAT_STR
};
```
```c
PMEMpoolcheck *ppc = _U(pmempool_check_init)(&args, sizeof(args));
```
The check will process a *pool* of type **PMEMPOOL_POOL_TYPE_BLK**
located in the path */path/to/blk.pool*. Before the check it will
not create a backup of the *pool* (*backup_path == NULL*).
If the check finds any issues it will try to
perform repair steps (**PMEMPOOL_CHECK_REPAIR**), but it
will not make any changes to the *pool*
(**PMEMPOOL_CHECK_DRY_RUN**) and it will not perform any
dangerous repair steps (no **PMEMPOOL_CHECK_ADVANCED**).
The check will ask before performing any repair steps (no
**PMEMPOOL_CHECK_ALWAYS_YES**). It will also generate
detailed information about the check (**PMEMPOOL_CHECK_VERBOSE**).
The **PMEMPOOL_CHECK_FORMAT_STR** flag indicates string
format statuses (*struct pmempool_check_status*).
Currently this is the only supported status format so this flag is required.
# NOTES #
Currently, checking the consistency of a *pmemobj* pool is
**not** supported.
# SEE ALSO #
**libpmemlog**(7), **libpmemobj**(7) and **<https://pmem.io>**
| 8,202 | 28.401434 | 85 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem/pmem_flush.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM_FLUSH, 3)
collection: libpmem
header: PMDK
date: pmem API version 1.1
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (pmem_flush.3 -- man page for partial flushing operations
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem_flush**(), **pmem_drain**(),
**pmem_persist**(), **pmem_msync**(),
**pmem_deep_flush**(), **pmem_deep_drain**(), **pmem_deep_persist**(),
**pmem_has_hw_drain**(), **pmem_has_auto_flush**() - check persistency,
store persistent data and delete mappings
# SYNOPSIS #
```c
#include <libpmem.h>
void pmem_persist(const void *addr, size_t len);
int pmem_msync(const void *addr, size_t len);
void pmem_flush(const void *addr, size_t len);
void pmem_deep_flush(const void *addr, size_t len); (EXPERIMENTAL)
int pmem_deep_drain(const void *addr, size_t len); (EXPERIMENTAL)
int pmem_deep_persist(const void *addr, size_t len); (EXPERIMENTAL)
void pmem_drain(void);
int pmem_has_auto_flush(void); (EXPERIMENTAL)
int pmem_has_hw_drain(void);
```
# DESCRIPTION #
The functions in this section provide access to the stages of flushing
to persistence, for the less common cases where an application needs more
control of the flushing operations than the **pmem_persist**() function.
>WARNING:
Using **pmem_persist**() on a range where **pmem_is_pmem**(3)
returns false may not do anything useful -- use **msync**(2) instead.
The **pmem_persist**() function force any changes in the range
\[*addr*, *addr*+*len*) to be stored durably in
persistent memory. This is equivalent to calling **msync**(2)
but may be more optimal and will avoid calling into the kernel if
possible. There are no alignment restrictions on the range described by
*addr* and *len*, but **pmem_persist**() may expand the range as
necessary to meet platform alignment requirements.
>WARNING:
Like **msync**(2), there is nothing atomic or transactional
about this call. Any unwritten stores in the given range will be
written, but some stores may have already been written by virtue of
normal cache eviction/replacement policies. Correctly written code must
not depend on stores waiting until **pmem_persist**() is called to
become persistent -- they can become persistent at any time before
**pmem_persist**() is called.
The **pmem_msync**() function is like **pmem_persist**() in that it
forces any changes in the range \[*addr*, *addr*+*len*) to be stored
durably. Since it calls **msync**(), this function works on either
persistent memory or a memory mapped file on traditional storage.
**pmem_msync**() takes steps to ensure the alignment of addresses and
lengths passed to **msync**() meet the requirements of that system call.
It calls **msync**() with the **MS_SYNC** flag as described in
**msync**(2). Typically the application only checks for the existence of
persistent memory once, and then uses that result throughout the
program, for example:
```c
/* do this call once, after the pmem is memory mapped */
int is_pmem = pmem_is_pmem(rangeaddr, rangelen);
/* ... make changes to a range of pmem ... */
/* make the changes durable */
if (is_pmem)
pmem_persist(subrangeaddr, subrangelen);
else
pmem_msync(subrangeaddr, subrangelen);
/* ... */
```
_WINUX(,=q=
>WARNING:
On Linux, **pmem_msync**() and **msync**(2) have no effect on memory ranges
mapped from Device DAX. In case of memory ranges where **pmem_is_pmem**(3)
returns true use **pmem_persist**() to force the changes to be stored durably
in persistent memory.
=e=)
The **pmem_flush**() and **pmem_drain**() functions provide
partial versions of the **pmem_persist**() function.
**pmem_persist**() can be thought of as this:
```c
void
pmem_persist(const void *addr, size_t len)
{
/* flush the processor caches */
pmem_flush(addr, len);
/* wait for any pmem stores to drain from HW buffers */
pmem_drain();
}
```
These functions allow advanced programs to create their own variations
of **pmem_persist**(). For example, a program that needs to flush
several discontiguous ranges can call **pmem_flush**() for each range
and then follow up by calling **pmem_drain**() once.
The semantics of **pmem_deep_flush**() function is the same as
**pmem_flush**() function except that **pmem_deep_flush**() is indifferent to
**PMEM_NO_FLUSH** environment variable (see **ENVIRONMENT** section in **libpmem**(7))
and always flushes processor caches.
The behavior of **pmem_deep_persist**() function is the same as **pmem_persist**(),
except that it provides higher reliability by flushing persistent memory stores to
the most reliable persistence domain available to software rather than depending on
automatic WPQ (write pending queue) flush on power failure (ADR).
The **pmem_deep_flush**() and **pmem_deep_drain**() functions provide
partial versions of **pmem_deep_persist**() function.
**pmem_deep_persist**() can be thought of as this:
```
int pmem_deep_persist(const void *addr, size_t len)
{
/* flush the processor caches */
pmem_deep_flush(addr, len);
/* wait for any pmem stores to drain from HW buffers */
return pmem_deep_drain(addr, len);
}
```
Since this operation is usually much more expensive than **pmem_persist**(),
it should be used rarely. Typically the application should use this function
only to flush the most critical data, which are required to recover after
the power failure.
The **pmem_has_auto_flush**() function checks if the machine supports automatic
CPU cache flush on power failure or system crash.
Function returns true only when each NVDIMM in the system is covered by this
mechanism.
The **pmem_has_hw_drain**() function checks if the machine
supports an explicit *hardware drain*
instruction for persistent memory.
# RETURN VALUE #
The **pmem_persist**() function returns no value.
The **pmem_msync**() return value is the return value of
**msync**(), which can return -1 and set *errno* to indicate an error.
The **pmem_flush**(), **pmem_drain**() and **pmem_deep_flush**()
functions return no value.
The **pmem_deep_persist**() and **pmem_deep_drain**() return 0 on success.
Otherwise it returns -1 and sets *errno* appropriately. If *len* is equal zero
**pmem_deep_persist**() and **pmem_deep_drain**() return 0 but no flushing take place.
The **pmem_has_auto_flush**() function returns 1 if given platform supports
processor cache flushing on a power loss event. Otherwise it returns 0.
On error it returns -1 and sets *errno* appropriately.
The **pmem_has_hw_drain**() function returns true if the machine
supports an explicit *hardware drain*
instruction for persistent memory.
On Intel processors with persistent memory,
stores to persistent memory are considered persistent
once they are flushed from the CPU caches, so this
function always returns false. Despite that, programs using
**pmem_flush**() to flush ranges of memory should still follow up by calling
**pmem_drain**() once to ensure the flushes are complete. As mentioned above,
**pmem_persist**() handles calling both **pmem_flush**() and **pmem_drain**().
# SEE ALSO #
**msync**(2), **pmem_is_pmem**(3), **libpmem**(7)
and **<https://pmem.io>**
| 7,303 | 35.703518 | 86 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem/libpmem.7.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(LIBPMEM, 7)
collection: libpmem
header: PMDK
date: pmem API version 1.1
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2016-2018, Intel Corporation)
[comment]: <> (libpmem.7 -- man page for libpmem)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[CAVEATS](#caveats)<br />
[LIBRARY API VERSIONING](#library-api-versioning-1)<br />
[ENVIRONMENT](#environment)<br />
[DEBUGGING AND ERROR HANDLING](#debugging-and-error-handling)<br />
[EXAMPLE](#example)<br />
[ACKNOWLEDGEMENTS](#acknowledgements)<br />
[SEE ALSO](#see-also)
# NAME #
**libpmem** - persistent memory support library
# SYNOPSIS #
```c
#include <libpmem.h>
cc ... -lpmem
```
_UNICODE()
##### Library API versioning: #####
```c
_UWFUNC(pmem_check_version, =q=
unsigned major_required,
unsigned minor_required=e=)
```
##### Error handling: #####
```c
_UWFUNC(pmem_errormsg, void)
```
##### Other library functions: #####
A description of other **libpmem** functions can be found on the following
manual pages:
+ most commonly used functions: **pmem_is_pmem**(3)
+ partial flushing operations: **pmem_flush**(3)
+ copying to persistent memory: **pmem_memmove_persist**(3)
# DESCRIPTION #
**libpmem** provides low-level *persistent memory* (pmem) support for
applications using direct access storage (DAX), which is storage that
supports load/store access without paging blocks from a block storage
device. Some types of *non-volatile memory DIMMs* (NVDIMMs) provide this
type of byte addressable access to storage. A *persistent memory aware
file system* is typically used to expose the direct access to
applications. Memory mapping a file from this type of file system
results in the load/store, non-paged access to pmem.
This library is for applications that use persistent memory directly,
without the help of any library-supplied transactions or memory
allocation. Higher-level libraries that build on **libpmem** are
available and are recommended for most applications, see:
+ **libpmemobj**(7), a general use persistent memory API, providing memory
allocation and transactional operations on variable-sized objects.
+ **libpmemblk**(7), providing pmem-resident arrays of fixed-sized blocks
with atomic updates.
+ **libpmemlog**(7), providing a pmem-resident log file.
Under normal usage, **libpmem** will never print messages or
intentionally cause the process to exit.
The only exception to this is the debugging information, when enabled,
as described under **DEBUGGING AND ERROR HANDLING** below.
# CAVEATS #
**libpmem** relies on the library destructor being called from the main thread.
For this reason, all functions that might trigger destruction (e.g.
**dlclose**(3)) should be called in the main thread. Otherwise some of the
resources associated with that thread might not be cleaned up properly.
# LIBRARY API VERSIONING #
This section describes how the library API is versioned, allowing
applications to work with an evolving API.
The _UW(pmem_check_version) function is used to determine whether the installed
**libpmem** supports the version of the library API required by an
application. The easiest way to do this is for the application to supply
the compile-time version information, supplied by defines in
**\<libpmem.h\>**, like this:
```c
reason = _U(pmem_check_version)(PMEM_MAJOR_VERSION,
PMEM_MINOR_VERSION);
if (reason != NULL) {
/* version check failed, reason string tells you why */
}
```
Any mismatch in the major version number is considered a failure, but a
library with a newer minor version number will pass this check since
increasing minor versions imply backwards compatibility.
An application can also check specifically for the existence of an
interface by checking for the version where that interface was
introduced. These versions are documented in this man page as follows:
unless otherwise specified, all interfaces described here are available
in version 1.0 of the library. Interfaces added after version 1.0 will
contain the text *introduced in version x.y* in the section of this
manual describing the feature.
When the version check performed by _UW(pmem_check_version) is
successful, the return value is NULL. Otherwise the return value is a
static string describing the reason for failing the version check. The
string returned by _UW(pmem_check_version) must not be modified or
freed.
# ENVIRONMENT #
**libpmem** can change its default behavior based on the following
environment variables. These are largely intended for testing and are
not normally required.
+ **PMEM_IS_PMEM_FORCE**=*val*
If *val* is 0 (zero), then **pmem_is_pmem**(3) will always return
false. Setting *val* to 1 causes **pmem_is_pmem**(3) to always return
true. This variable is mostly used for testing but can be used to force
pmem behavior on a system where a range of pmem is not detectable as
pmem for some reason.
>NOTE:
Unlike the other variables, the value of
**PMEM_IS_PMEM_FORCE** is not queried (and cached) at
library initialization time, but on the first call to
**pmem_is_pmem**(3). This means that in case of
**libpmemlog**(7), **libpmemblk**(7), and **libpmemobj**(7),
**PMEM_IS_PMEM_FORCE** may still be set or modified by the program
until the first attempt to create or open the persistent
memory pool.
+ **PMEM_NO_CLWB**=1
Setting this environment variable to 1 forces **libpmem** to never issue
the **CLWB** instruction on Intel hardware, falling back to other cache
flush instructions instead (**CLFLUSHOPT** or **CLFLUSH** on Intel
hardware). Without this environment variable, **libpmem** will always
use the **CLWB** instruction for flushing processor caches on platforms
that support the instruction. This variable is intended for use during
library testing but may be required for some rare cases where using
**CLWB** has a negative impact on performance.
+ **PMEM_NO_CLFLUSHOPT**=1
Setting this environment variable to 1 forces **libpmem** to never issue
the **CLFLUSHOPT** instruction on Intel hardware, falling back to the
**CLFLUSH** instructions instead. Without this environment variable,
**libpmem** will always use the **CLFLUSHOPT** instruction for flushing
processor caches on platforms that support the instruction, but where
**CLWB** is not available. This variable is intended for use during
library testing.
+ **PMEM_NO_FLUSH**=1
Setting this environment variable to 1 forces most **libpmem** functions
to never issue any of **CLFLUSH**, **CLFLUSHOPT** or **CLWB** instructions
on Intel hardware. The only exceptions are **pmem_deep_flush**(3) and
**pmem_deep_persist**(3) functions.
+ **PMEM_NO_FLUSH**=0
Setting this environment variable to 0 forces to always flush CPU caches
using one of **CLFLUSH**, **CLFLUSHOPT** or **CLWB** instructions
even if **pmem_has_auto_flush**(3) function returns true and the platform
supports flushing the processor caches on power loss or system crash.
+ **PMEM_NO_MOVNT**=1
Setting this environment variable to 1 forces **libpmem** to never use
the *non-temporal* move instructions on Intel hardware. Without this
environment variable, **libpmem** will use the non-temporal instructions
for copying larger ranges to persistent memory on platforms that support
the instructions. This variable is intended for use during library
testing.
+ **PMEM_MOVNT_THRESHOLD**=*val*
This environment variable allows overriding the minimum length of
the **pmem_memmove_persist**(3) operations, for which **libpmem** uses
*non-temporal* move instructions. Setting this environment variable to 0
forces **libpmem** to always use the *non-temporal* move instructions if
available. It has no effect if **PMEM_NO_MOVNT** is set to 1.
This variable is intended for use during library testing.
+ **PMEM_MMAP_HINT**=*val*
This environment variable allows overriding
the hint address used by _UW(pmem_map_file). If set, it also disables
mapping address randomization. This variable is intended for use during
library testing and debugging. Setting it to some fairly large value
(i.e. 0x10000000000) will very likely result in mapping the file at the
specified address (if not used) or at the first unused region above
given address, without adding any random offset. When debugging, this
makes it easier to calculate the actual address of the persistent memory
block, based on its offset in the file. In case of **libpmemobj** it
simplifies conversion of a persistent object identifier (OID) into a
direct pointer to the object.
>NOTE:
**Setting this environment variable
affects all the PMDK libraries,** disabling mapping address randomization
and causing the specified address to be used as a hint about where to
place the mapping.
# DEBUGGING AND ERROR HANDLING #
If an error is detected during the call to a **libpmem** function, the
application may retrieve an error message describing the reason of the failure
from _UW(pmem_errormsg). This function returns a pointer to a static buffer
containing the last error message logged for the current thread. If *errno*
was set, the error message may include a description of the corresponding
error code as returned by **strerror**(3). The error message buffer is
thread-local; errors encountered in one thread do not affect its value in
other threads. The buffer is never cleared by any library function; its
content is significant only when the return value of the immediately preceding
call to a **libpmem** function indicated an error.
The application must not modify or free the error message string.
Subsequent calls to other library functions may modify the previous message.
Two versions of **libpmem** are typically available on a development
system. The normal version, accessed when a program is linked using the
**-lpmem** option, is optimized for performance. That version skips checks
that impact performance and never logs any trace information or performs any
run-time assertions.
A second version of **libpmem**, accessed when a program uses the libraries
under _DEBUGLIBPATH(), contains run-time assertions and trace points. The
typical way to access the debug version is to set the environment variable
**LD_LIBRARY_PATH** to _LDLIBPATH(). Debugging output is
controlled using the following environment variables. These variables have
no effect on the non-debug version of the library.
+ **PMEM_LOG_LEVEL**
The value of **PMEM_LOG_LEVEL** enables trace points in the debug version
of the library, as follows:
+ **0** - This is the default level when **PMEM_LOG_LEVEL** is not set.
No log messages are emitted at this level.
+ **1** - Additional details on any errors detected are logged, in addition
to returning the *errno*-based errors as usual. The same information
may be retrieved using _UW(pmem_errormsg).
+ **2** - A trace of basic operations is logged.
+ **3** - Enables a very verbose amount of function call tracing in the
library.
+ **4** - Enables voluminous and fairly obscure tracing
information that is likely only useful to the **libpmem** developers.
Unless **PMEM_LOG_FILE** is set, debugging output is written to *stderr*.
+ **PMEM_LOG_FILE**
Specifies the name of a file where
all logging information should be written. If the last character in the name
is "-", the *PID* of the current process will be appended to the file name when
the log file is created. If **PMEM_LOG_FILE** is not set, output is
written to *stderr*.
# EXAMPLE #
The following example uses **libpmem** to flush changes made to raw,
memory-mapped persistent memory.
>WARNING:
There is nothing transactional about the **pmem_persist**(3) or
**pmem_msync**(3) calls in this example. Interrupting the program may
result in a partial write to pmem. Use a transactional library such as
**libpmemobj**(7) to avoid torn updates.
```c
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <libpmem.h>
/* using 4k of pmem for this example */
#define PMEM_LEN 4096
#define PATH "/pmem-fs/myfile"
int
main(int argc, char *argv[])
{
char *pmemaddr;
size_t mapped_len;
int is_pmem;
/* create a pmem file and memory map it */
if ((pmemaddr = _U(pmem_map_file)(PATH, PMEM_LEN, PMEM_FILE_CREATE,
0666, &mapped_len, &is_pmem)) == NULL) {
perror("_U(pmem_map_file)");
exit(1);
}
/* store a string to the persistent memory */
strcpy(pmemaddr, "hello, persistent memory");
/* flush above strcpy to persistence */
if (is_pmem)
pmem_persist(pmemaddr, mapped_len);
else
pmem_msync(pmemaddr, mapped_len);
/*
* Delete the mappings. The region is also
* automatically unmapped when the process is
* terminated.
*/
pmem_unmap(pmemaddr, mapped_len);
}
```
See <https://pmem.io/pmdk/libpmem>
for more examples using the **libpmem** API.
# ACKNOWLEDGEMENTS #
**libpmem** builds on the persistent memory programming model
recommended by the SNIA NVM Programming Technical Work Group:
<https://snia.org/nvmp>
# SEE ALSO #
**dlclose**(3),
**pmem_flush**(3), **pmem_is_pmem**(3), **pmem_memmove_persist**(3),
**pmem_msync**(3), **pmem_persist**(3), **strerror**(3),
**libpmemblk**(7), **libpmemlog**(7), **libpmemobj**(7)
and **<https://pmem.io>**
| 13,311 | 35.173913 | 79 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem/pmem_is_pmem.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM_IS_PMEM, 3)
collection: libpmem
header: PMDK
date: pmem API version 1.1
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2019, Intel Corporation)
[comment]: <> (pmem_is_pmem.3 -- man page for libpmem persistence and mapping functions)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[NOTES](#notes)<br />
[CAVEATS](#caveats)<br />
[BUGS](#bugs)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem_is_pmem**(), _UW(pmem_map_file),
**pmem_unmap**() - check persistency, create and delete mappings
# SYNOPSIS #
```c
#include <libpmem.h>
int pmem_is_pmem(const void *addr, size_t len);
_UWFUNCR1(void, *pmem_map_file, *path, =q=size_t len, int flags,
mode_t mode, size_t *mapped_lenp, int *is_pmemp=e=)
int pmem_unmap(void *addr, size_t len);
```
_UNICODE()
# DESCRIPTION #
Most pmem-aware applications will take advantage of higher level
libraries that alleviate the need for the application to call into **libpmem**
directly. Application developers that wish to access raw memory mapped
persistence directly (via **mmap**(2)) and that wish to take on the
responsibility for flushing stores to persistence will find the
functions described in this section to be the most commonly used.
The **pmem_is_pmem**() function detects if the entire range
\[*addr*, *addr*+*len*) consists of persistent memory. Calling this function
with a memory range that originates from a source different than
**pmem_map_file()** is undefined. The implementation of **pmem_is_pmem**()
requires a non-trivial amount of work to determine if the given range is
entirely persistent memory. For this reason, it is better to call
**pmem_is_pmem**() once when a range of memory is first encountered,
save the result, and use the saved result to determine whether
**pmem_persist**(3) or **msync**(2) is appropriate for flushing changes to
persistence. Calling **pmem_is_pmem**() each time changes are flushed to
persistence will not perform well.
The _UW(pmem_map_file) function creates a new read/write mapping for a
file. If **PMEM_FILE_CREATE** is not specified in *flags*, the entire existing
file *path* is mapped, *len* must be zero, and *mode* is ignored. Otherwise,
*path* is opened or created as specified by *flags* and *mode*, and *len*
must be non-zero. _UW(pmem_map_file) maps the file using **mmap**(2), but it
also takes extra steps to make large page mappings more likely.
On success, _UW(pmem_map_file) returns a pointer to the mapped area. If
*mapped_lenp* is not NULL, the length of the mapping is stored into
\**mapped_lenp*. If *is_pmemp* is not NULL, a flag indicating whether the
mapped file is actual pmem, or if **msync**() must be used to flush writes
for the mapped range, is stored into \**is_pmemp*.
The *flags* argument is 0 or the bitwise OR of one or more of the
following file creation flags:
+ **PMEM_FILE_CREATE** - Create the file named *path* if it does not exist.
*len* must be non-zero and specifies the size of the file to be created.
If the file already exists, it will be extended or truncated to *len.*
The new or existing file is then fully allocated to size *len* using
**posix_fallocate**(3).
*mode* specifies the mode to use in case a new file is created (see
**creat**(2)).
The remaining flags modify the behavior of _UW(pmem_map_file) when
**PMEM_FILE_CREATE** is specified.
+ **PMEM_FILE_EXCL** - If specified in conjunction with **PMEM_FILE_CREATE**,
and *path* already exists, then _UW(pmem_map_file) will fail with **EEXIST**.
Otherwise, has the same meaning as **O_EXCL** on **open**(2), which is
generally undefined.
+ **PMEM_FILE_SPARSE** - When specified in conjunction with
**PMEM_FILE_CREATE**, create a sparse (holey) file using **ftruncate**(2)
rather than allocating it using **posix_fallocate**(3). Otherwise ignored.
+ **PMEM_FILE_TMPFILE** - Create a mapping for an unnamed temporary file.
Must be specified with **PMEM_FILE_CREATE**. *len* must be non-zero,
*mode* is ignored (the temporary file is always created with mode 0600),
and *path* must specify an existing directory name. If the underlying file
system supports **O_TMPFILE**, the unnamed temporary file is created in
the filesystem containing the directory *path*; if **PMEM_FILE_EXCL**
is also specified, the temporary file may not subsequently be linked into
the filesystem (see **open**(2)).
Otherwise, the file is created in *path* and immediately unlinked.
The *path* can point to a Device DAX. In this case only the
**PMEM_FILE_CREATE** and **PMEM_FILE_SPARSE** flags are valid, but they are
both ignored. For Device DAX mappings, *len* must be equal to
either 0 or the exact size of the device.
To delete mappings created with _UW(pmem_map_file), use **pmem_unmap**().
The **pmem_unmap**() function deletes all the mappings for the
specified address range, and causes further references to addresses
within the range to generate invalid memory references. It will use the
address specified by the parameter *addr*, where *addr* must be a
previously mapped region. **pmem_unmap**() will delete the mappings
using **munmap**(2).
# RETURN VALUE #
The **pmem_is_pmem**() function returns true only if the entire range
\[*addr*, *addr*+*len*) consists of persistent memory. A true return
from **pmem_is_pmem**() means it is safe to use **pmem_persist**(3)
and the related functions to make changes durable for that memory
range. See also **CAVEATS**.
On success, _UW(pmem_map_file) returns a pointer to the memory-mapped region
and sets \**mapped_lenp* and \**is_pmemp* if they are not NULL.
On error, it returns NULL, sets *errno* appropriately, and does not modify
\**mapped_lenp* or \**is_pmemp*.
On success, **pmem_unmap**() returns 0. On error, it returns -1 and sets
*errno* appropriately.
# NOTES #
On Linux, **pmem_is_pmem**() returns true only if the entire range
is mapped directly from Device DAX (/dev/daxX.Y) without an intervening
file system. In the future, as file systems become available that support
flushing with **pmem_persist**(3), **pmem_is_pmem**() will return true
as appropriate.
# CAVEATS #
The result of **pmem_is_pmem**() query is only valid for the mappings
created using _UW(pmem_map_file). For other memory regions, in particular
those created by a direct call to **mmap**(2), **pmem_is_pmem**() always
returns false, even if the queried range is entirely persistent memory.
Not all file systems support **posix_fallocate**(3). _UW(pmem_map_file) will
fail if **PMEM_FILE_CREATE** is specified without **PMEM_FILE_SPARSE** and
the underlying file system does not support **posix_fallocate**(3).
_WINUX(=q= On Windows if **PMEM_FILE_CREATE** is specified without
**PMEM_FILE_SPARSE** and the file exists, FILE_ATTRIBUTE_SPARSE_FILE and
FILE_ATTRIBUTE_COMPRESSED will be removed if the file has any, to physically
allocate space for the file. This is a workaround for _chsize() performance
issues. =e=)
# SEE ALSO #
**creat**(2), **ftruncate**(2), **mmap**(2), **msync**(2), **munmap**(2),
**open**(2), **pmem_persist**(3),
**posix_fallocate**(3), **libpmem**(7) and **<https://pmem.io>**
| 7,259 | 41.95858 | 88 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem/pmem_memmove_persist.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM_MEMMOVE_PERSIST, 3)
collection: libpmem
header: PMDK
date: pmem API version 1.1
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2020, Intel Corporation)
[comment]: <> (pmem_memmove_persist.3 -- man page for functions that provide optimized copying to persistent memory
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem_memmove**(), **pmem_memcpy**(), **pmem_memset**(),
**pmem_memmove_persist**(), **pmem_memcpy_persist**(), **pmem_memset_persist**(),
**pmem_memmove_nodrain**(), **pmem_memcpy_nodrain**(), **pmem_memset_nodrain**()
- functions that provide optimized copying to persistent memory
# SYNOPSIS #
```c
#include <libpmem.h>
void *pmem_memmove(void *pmemdest, const void *src, size_t len, unsigned flags);
void *pmem_memcpy(void *pmemdest, const void *src, size_t len, unsigned flags);
void *pmem_memset(void *pmemdest, int c, size_t len, unsigned flags);
void *pmem_memmove_persist(void *pmemdest, const void *src, size_t len);
void *pmem_memcpy_persist(void *pmemdest, const void *src, size_t len);
void *pmem_memset_persist(void *pmemdest, int c, size_t len);
void *pmem_memmove_nodrain(void *pmemdest, const void *src, size_t len);
void *pmem_memcpy_nodrain(void *pmemdest, const void *src, size_t len);
void *pmem_memset_nodrain(void *pmemdest, int c, size_t len);
```
# DESCRIPTION #
**pmem_memmove**(), **pmem_memcpy**() and **pmem_memset**() functions provide
the same memory copying as their namesakes **memmove**(3), **memcpy**(3) and
**memset**(3), and ensure that the result has been flushed to persistence before
returning (unless **PMEM_F_MEM_NOFLUSH** flag was used).
For example, the following code is functionally equivalent to **pmem_memmove**() (with flags equal to 0):
```c
memmove(dest, src, len);
pmem_persist(dest, len);
```
Calling **pmem_memmove**() may out-perform the above code, because
**libpmem**(7) implementation may take advantage of the fact that *pmemdest*
is persistent memory and use instructions such as *non-temporal* stores to
avoid the need to flush processor caches.
>WARNING:
Using these functions where **pmem_is_pmem**(3) returns false
may not do anything useful. Use libc functions in that case.
Unlike libc implementation, **libpmem** functions guarantee that if destination
buffer address and length are 8 byte aligned then all stores will be performed
using at least 8 byte store instructions. This means that a series of 8 byte
stores followed by **pmem_persist**(3) can be safely replaced by a single call
to one of the above functions.
The *flags* argument of all of the above functions has the same meaning.
It can be 0 or a bitwise OR of one or more of the following flags:
+ **PMEM_F_MEM_NODRAIN** - modifies the behavior to skip the final
**pmem_drain**() step. This allows applications to optimize cases where
several ranges are being copied to persistent memory, followed by a single
call to **pmem_drain**(). The following example illustrates how this flag
might be used to avoid multiple calls to **pmem_drain**() when copying several
ranges of memory to pmem:
```c
/* ... write several ranges to pmem ... */
pmem_memcpy(pmemdest1, src1, len1, PMEM_F_MEM_NODRAIN);
pmem_memcpy(pmemdest2, src2, len2, PMEM_F_MEM_NODRAIN);
/* ... */
/* wait for any pmem stores to drain from HW buffers */
pmem_drain();
```
+ **PMEM_F_MEM_NOFLUSH** - Don't flush anything. This implies **PMEM_F_MEM_NODRAIN**.
Using this flag only makes sense when it's followed by any function that
flushes data.
The remaining flags say *how* the operation should be done, and are merely hints.
+ **PMEM_F_MEM_NONTEMPORAL** - Use non-temporal instructions.
This flag is mutually exclusive with **PMEM_F_MEM_TEMPORAL**.
On x86\_64 this flag is mutually exclusive with **PMEM_F_MEM_NOFLUSH**.
+ **PMEM_F_MEM_TEMPORAL** - Use temporal instructions.
This flag is mutually exclusive with **PMEM_F_MEM_NONTEMPORAL**.
+ **PMEM_F_MEM_WC** - Use write combining mode.
This flag is mutually exclusive with **PMEM_F_MEM_WB**.
On x86\_64 this flag is mutually exclusive with **PMEM_F_MEM_NOFLUSH**.
+ **PMEM_F_MEM_WB** - Use write back mode.
This flag is mutually exclusive with **PMEM_F_MEM_WC**.
On x86\_64 this is an alias for **PMEM_F_MEM_TEMPORAL**.
Using an invalid combination of flags has undefined behavior.
Without any of the above flags **libpmem** will try to guess the best strategy
based on size. See **PMEM_MOVNT_THRESHOLD** description in **libpmem**(7) for
details.
**pmem_memmove_persist**() is an alias for **pmem_memmove**() with flags equal to 0.
**pmem_memcpy_persist**() is an alias for **pmem_memcpy**() with flags equal to 0.
**pmem_memset_persist**() is an alias for **pmem_memset**() with flags equal to 0.
**pmem_memmove_nodrain**() is an alias for **pmem_memmove**() with flags equal to **PMEM_F_MEM_NODRAIN**.
**pmem_memcpy_nodrain**() is an alias for **pmem_memcpy**() with flags equal to **PMEM_F_MEM_NODRAIN**.
**pmem_memset_nodrain**() is an alias for **pmem_memset**() with flags equal to **PMEM_F_MEM_NODRAIN**.
# RETURN VALUE #
All of the above functions return address of the destination buffer.
# CAVEATS #
After calling any of the functions with **PMEM_F_MEM_NODRAIN** flag you
should not expect memory to be visible to other threads before calling
**pmem_drain**(3) or any of the *\_persist* functions. This is because on
x86\_64 those functions may use non-temporal store instructions, which are
weakly ordered. See "Intel 64 and IA-32 Architectures Software Developer's Manual",
Volume 1, "Caching of Temporal vs. Non-Temporal Data" section for details.
# SEE ALSO #
**memcpy**(3), **memmove**(3), **memset**(3),
**libpmem**(7) and **<https://pmem.io>**
| 5,904 | 38.630872 | 115 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libvmem/README.md
|
This library has been moved to a
[separate repository](https://github.com/pmem/vmem).
| 86 | 28 | 52 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/pmempool/pmempool-info.1.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMPOOL-INFO, 1)
collection: pmempool
header: PMDK
date: pmem Tools version 1.4
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2016-2018, Intel Corporation)
[comment]: <> (pmempool-info.1 -- man page for pmempool-info)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RANGE](#range)<br />
[STATISTICS](#statistics)<br />
[EXAMPLE](#example)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmempool-info** - show information about persistent memory pool
# SYNOPSIS #
```
$ pmempool info [<options>] <file>
```
# DESCRIPTION #
The **pmempool** invoked with *info* command analyzes an existing pool
created by **PMDK** libraries provided by **file** parameter.
The **file** can be either existing pool file, a part file or a poolset file.
The main task of this command is to print all usable information from
pool headers and user data in human readable format. It automatically recognizes the pool
type by parsing and analyzing the pool header. The recognition
is done by checking the signature in the pool header. The main job of *info* command
is to present internal data structures as they are stored in file but *not* for checking
consistency. For this purpose there is the **pmempool-check**(1) command available.
The **pmempool** with *info* command analyzes pool file as long as it is possible
regarding *correctness* of internal meta-data (correct offsets, sizes etc.).
If it is *not* possible to analyze the rest of the file, **pmempool** exits
with error code and prints appropriate error message.
Currently there is lack of interprocess synchronization for pool files,
so the *info* command should be invoked off-line. Using **pmempool** on pool file which
may be modified by another process may lead to unexpected errors in pool file.
A poolset file passed to **pmempool info** may contain multiple replicas,
also remote ones, but **pmempool** currently does not read any data from remote replicas.
It prints only a remote node address and a remote replica descriptor.
**pmempool info** opens pool file in *read-only* mode so the file
will remain untouched after processing.
The *info* command may collect and print basic statistics about data usage.
The statistics are specific to the type of pool. See **STATISTICS** section for details.
Although the pool consistency is *not* checked by the *info* command,
it prints information about checksum errors and/or offsets errors.
##### Common options: #####
By default the *info* command of **pmempool** prints information about the most important
internal data structures from pool. The particular set of headers and meta-data depend on
pool type. The pool type is recognized automatically and appropriate information is displayed
in human-readable format.
To force processing specified file(s) as desired pool type use **-f** option with appropriate
name of pool type. The valid names off pool types are **blk**, **log**, **obj** or **btt**.
This option may be useful when the pool header is corrupted and automatic recognition of
pool type fails.
`-f, --force blk|log|obj|btt`
Force parsing pool as specified pool type.
>NOTE:
By default only pool headers and internal meta-data are displayed.
To display user data use **-d** option. Using **-r** option you can
specify number of blocks/bytes/data chunks or objects using special
text format. See **RANGE** section for details. The range refers to
*block numbers* in case of pmem blk pool type, to *chunk numbers*
in case of pmem log pool type and to *object numbers* in case of
pmem obj pool type.
See **EXAMPLES** section for an example of usage of these options.
`-d, --data`
Dump user data in hexadecimal format.
In case of pmem *blk* pool type data is dumped in *blocks*.
In case of pmem *log* pool type data is dumped as a wholeor in *chunks* if **-w**
option is used (See **Options for PMEMLOG** section for details).
`-r, --range <range>`
Range of blocks/data chunks/objects/zone headers/chunk headers/lanes.
See **RANGE** section for details about range format.
`-n, --human`
Print sizes in human-readable format with appropriate units (e.g. 4k, 8M, 16G)
`-x, --headers-hex`
Print pool's internal data in mixed format which consists of hexadecimal dump of
header's data and parsed format displayed in human-readable format. This
allows one to see how data is stored in file.
`-s, --stats`
Print pool's statistics. See **STATISTICS** section for details.
`-k, --bad-blocks=<yes|no>`
Print bad blocks found in the pool.
`-h, --help`
Display help message and exit.
##### Options for PMEMLOG: #####
`-w, --walk <size>`
Use this option to walk through used data with fixed data chunk size.
See **pmemlog_walk**(3) in **libpmemlog**(7) for details.
##### Options for PMEMBLK: #####
By default the *info* command displays the **pmemblk** header and
BTT (Block Translation Table) Info header in case of **pmemblk** pool type.
To display BTT Map and/or BTT FLOG (Free List and Log) use **-m**
and **-g** options respectively or increase verbosity level.
In order to display BTT Info header backup use **-B** option.
`-m, --map`
Print BTT Map entries.
`-g, --flog`
Print BTT FLOG entries.
`-B, --backup`
Print BTT Info header backup.
>NOTE:
By default the *info* command displays all data blocks when **-d** options is used.
However it is possible to skip blocks marked with *zero* and/or *error* flags.
It is also possible to skip blocks which are *not* marked with any flag. Skipping
blocks has impact on blocks ranges (e.g. display 10 blocks marked with error flag
in the range from 0 to 10000) and statistics.
`-z, --skip-zeros`
Skip blocks marked with *zero* flag.
`-e, --skip-error`
Skip blocks marked with *error* flag.
`-u, --skip-no-flag`
Skip blocks *not* marked with any flag.
##### Options for PMEMOBJ: #####
By default the *info* command displays pool header and **pmemobj** pool descriptor.
In order to print information about other data structures one of the
following options may be used.
`-l, --lanes [<range>]`
Print information about lanes. If range is not specified all lanes are displayed.
The range can be specified using **-r** option right after the **-l** option.
See **RANGE** section for details about range format.
`-R, --recovery`
Print information about only those lanes which require recovery process.
This option requires **-l**, **--lanes** option.
`-O, --object-store`
Print information about all allocated objects.
`-t, --types <range>`
Print information about allocated objects only from specified range of type numbers.
If **-s**, **--stats** option is specified the objects statistics refer to objects from
specified range of type numbers. This option requires **-O**, **--object-store** or **-s**,
**--stats** options. See **RANGE** section for details about range format.
`-E, --no-empty`
Ignore empty lists of objects. This option requires **-O**, **--object-store** option.
`-o, --root`
Print information about a root object.
`-A, --alloc-header`
Print object's allocation header. This option requires **-O**, **--object-store** or **-l**,
**--lanes** or **-o**, **--root** options.
`-a, --oob-header`
Print object's out of band header. This option requires **-O**, **--object-store** or **-l**,
**--lanes** or **-o**, **--root** options.
`-H, --heap`
Print information about **pmemobj** heap. By default only a heap header is displayed.
`-Z, --zones`
If the **-H**, **--heap** option is used, print information about zones from specified range.
If the **-O**, **--object-store** option is used, print information about objects only
from specified range of zones. This option requires **-O**, **--object-store**, **-H**, **--heap**
or **-s**, **--stats** options.
The range can be specified using **-r** option right after the **-Z** option.
See **RANGE** section for details about range format.
`-C, --chunks [<range>]`
If the **-H, --heap** option is used, print information about chunks from specified range.
By default information about chunks of types *used* , *free* and *run* are displayed.
If the **-O, --object-store** option is used, print information about objects from
specified range of chunks within a zone. This option requires **-O, --object-store**,
**-H, --heap** or **-s, --stats** options.
The range can be specified using **-r** option right after the **-C** option.
See **RANGE** section for details about range format.
`-T, --chunk-type used,free,run,footer`
Print only specified type(s) of chunks.
The multiple types may be specified separated by comma.
This option requires **-H, --heap** and **-C, --chunks** options.
`-b, --bitmap`
Print bitmap of used blocks in chunks of type run.
This option requires **-H, --heap** and **-C, --chunks** options.
`-p, --replica <num>`
Print information from *\<num\>* replica. The 0 value means the master pool file.
# RANGE #
Using **-r, --range** option it is possible to dump only a range of user data.
This section describes valid format of *\<range\>* string.
You can specify multiple ranges separated by commas.
`<first>-<last>`
All blocks/bytes/data chunks from *\<first\>* to *\<last\>* will be dumped.
`-<last>`
All blocks/bytes/data chunks up to *\<last\>* will be dumped.
`<first>-`
All blocks/bytes/data chunks starting from *\<first\>* will be dumped.
`<number>`
Only *\<number\>* block/byte/data chunk will be dumped.
# STATISTICS #
Below is the description of statistical measures for specific pool types.
##### PMEMLOG #####
+ **Total** - Total space in pool.
+ **Available** - Size and percentage of available space.
+ **Used** - Size and percentage of used space.
##### PMEMBLK #####
+ **Total blocks** - Total number of blocks in pool.
+ **Zeroed blocks** - Number and percentage of blocks marked with *zero* flag.
+ **Error blocks** - Number and percentage of blocks marked with *error* flag.
+ **Blocks without any flag** - Number and percentage of blocks *not* marked with any flag.
>NOTE:
In case of pmemblk, statistics are evaluated for blocks which meet requirements regarding:
*range* of blocks (**-r** option),
*skipped* types of blocks (**-z**, **-e**, **-u** options).
##### PMEMOBJ #####
+ **Object store**
+ **Number of objects** - Total number of objects and number of objects per type number.
+ **Number of bytes** - Total number of bytes and number of bytes per type number.
+ **Heap**
+ **Number of zones** - Total number of zones in the pool.
+ **Number of used zones** - Number of used zones in the pool.
+ **Zone** The zone's statistics are presented for each zone separately and the aggregated results from all zones.
+ **Number of chunks** - Total number of chunks in the zone and number of chunks of specified type.
+ **Chunks size** - Total size of all chunks in the zone and sum of sizes of chunks of specified type.
+ **Allocation classes**
+ **Units** - Total number of units of specified class.
+ **Used units** - Number of used units of specified class.
+ **Bytes** - Total number of bytes of specified class.
+ **Used bytes** - Number of used bytes of specified class.
+ **Total bytes** - Total number of bytes of all classes.
+ **Total used bytes** - Total number of used bytes of all classes.
# EXAMPLE #
```
$ pmempool info ./pmemblk
```
Parse and print information about "pmemblk" pool file.
```
$ pmempool info -f blk ./pmempool
```
Force parsing "pmempool" file as **pmemblk** pool type.
```
$ pmempool info -d ./pmemlog
```
Print information and data in hexadecimal dump format for file "pmemlog".
```
$ pmempool info -d -r10-100 -eu ./pmemblk
```
Print information from "pmemblk" file. Dump data blocks from 10 to 100,
skip blocks marked with error flag and not marked with any flag.
# SEE ALSO #
**pmempool**(1), **libpmemblk**(7), **libpmemlog**(7),
**libpmemobj**(7) and **<https://pmem.io>**
| 11,938 | 31.980663 | 114 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/pmempool/pmempool-create.1.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMPOOL-CREATE, 1)
collection: pmempool
header: PMDK
date: pmem Tools version 1.4
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2016-2019, Intel Corporation)
[comment]: <> (pmempool-create.1 -- man page for pmempool-create)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[EXAMPLE](#example)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmempool-create** - create a persistent memory pool
# SYNOPSIS #
```
$ pmempool create [<options>] [<type>] [<bsize>] <file>
```
# DESCRIPTION #
The **pmempool** invoked with *create* command creates a pool file
of specified type. Depending on a pool type it is possible to provide more properties of pool.
Valid pool types are: **blk**, **log** and **obj** which stands for
*pmemblk*, *pmemlog* and *pmemobj* pools respectively. By default
the pool file is created with *minimum* allowed size for specified
pool type. The minimum sizes for **blk**, **log** and **obj** pool
types are **PMEMBLK_MIN_POOL**, **PMEMLOG_MIN_POOL** and **PMEMOBJ_MIN_POOL**
respectively. See **libpmemblk**(7), **libpmemlog**(7)
and **libpmemobj**(7) for details.
For *pmemblk* pool type block size *\<bsize\>* is a required argument.
In order to set custom size of pool use **-s** option, or use **-M** option
to create a pool of maximum available size on underlying file system.
The *size* argument may be passed in format that permits only the upper-case
character for byte - B as specified in IEC 80000-13, IEEE 1541 and the
Metric Interchange Format. Standards accept SI units with obligatory
B - kB, MB, GB, ... which means multiplier by 1000 and IEC units with
optional "iB" - KiB, MiB, GiB, ..., K, M, G, ... - which means multiplier by 1024.
##### Available options: #####
`-s, --size <size>`
Size of pool file.
`-M, --max-size`
Set size of pool to available space of underlying file system.
`-m, --mode <octal>`
Set permissions to <octal> (the default is 0664) when creating the files.
If the file already exist the permissions are not changed.
`-i, --inherit <file>`
Create a new pool of the same size and other properties as *\<file\>*.
`-b, --clear-bad-blocks`
Clear bad blocks in existing files.
`-f, --force`
Remove the pool before creating.
`-v, --verbose`
Increase verbosity level.
`-h, --help`
Display help message and exit.
##### Options for PMEMBLK: #####
By default when creating a pmem **blk** pool, the **BTT** layout is *not*
written until the first *write operation* of block entry is performed.
Using **-w** option you can force writing the **BTT** layout by writing
zero data to specified block number. By default the *write operation*
is performed to block number 0. Please refer to **libpmemblk**(7) for details.
`-w, --write-layout`
Force writing the **BTT** layout by performing *write operation* to block number zero.
##### Options for PMEMOBJ: #####
By default when creating a pmem **obj** pool, the layout name provided to
the **libpmemobj** library is an empty string. Please refer to
**libpmemobj**(7) for details.
`-l, --layout <layout>`
Layout name of the **pmemobj** pool.
# EXAMPLE #
```
$ pmempool create blk 512 pool.blk
```
Create a blk pool file of minimum allowed size and block size 512 bytes
```
$ pmempool create log -M pool.log
```
Create a log pool file of maximum allowed size
```
$ pmempool create blk --size=4G --write-layout 1K pool.blk
```
Create a blk pool file of size 4G, block size 1K and write the BTT layout
```
$ pmempool create --layout my_layout obj pool.obj
```
Create an obj pool file of minimum allowed size and layout "my_layout"
```
$ pmempool create --inherit=pool.log new_pool.log
```
Create a pool file based on pool.log file
# SEE ALSO #
**pmempool**(1), **libpmemblk**(7), **libpmemlog**(7),
**libpmemobj**(7) and **<https://pmem.io>**
| 3,905 | 25.391892 | 94 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/pmempool/pmempool.1.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMPOOL, 1)
collection: pmempool
header: PMDK
date: pmem Tools version 1.4
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2016-2020, Intel Corporation)
[comment]: <> (pmempool.1 -- man page for pmempool)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[OPTIONS](#options)<br />
[COMMANDS](#commands)<br />
[DEBUGGING](#debugging)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmempool** - Persistent Memory Pool Management Tool
# SYNOPSIS #
```
$ pmempool [--help] [--version] <command> [<args>]
```
# DESCRIPTION #
The **pmempool** is a management tool for *Persistent Memory* pool files
created by **PMDK** libraries.
The main purpose of **pmempool** is to provide a user with a set of utilities
for off-line analysis and manipulation of pools created by pmem libraries.
The pmempool is a generic command which consists of subcommands for specific
purposes. Some of subcommands are required to work *without* any impact
on processed pool, but some of them *may* create a new or modify an existing one.
The **pmempool** may be useful for troubleshooting by system administrators
and for software developers who work on applications based on **PMDK**.
The latter may find these tools useful for testing and debugging purposes also.
# OPTIONS #
`-V, --version`
Prints the version of **pmempool**.
`-h, --help`
Prints synopsis and list of commands.
# COMMANDS #
Currently there is a following set of commands available:
+ **pmempool-info**(1) -
Prints information and statistics in human-readable format about specified pool.
+ **pmempool-check**(1) -
Checks pool's consistency and repairs pool if it is not consistent.
+ **pmempool-create**(1) -
Creates a pool of specified type with additional properties specific for this type of pool.
+ **pmempool-dump**(1) -
Dumps usable data from pool in hexadecimal or binary format.
+ **pmempool-rm**(1) -
Removes pool file or all pool files listed in pool set configuration file.
+ **pmempool-convert**(1) -
Updates the pool to the latest available layout version.
+ **pmempool-sync**(1) -
Synchronizes replicas within a poolset.
+ **pmempool-transform**(1) -
Modifies internal structure of a poolset.
+ **pmempool-feature**(1) -
Toggle or query a poolset features.
In order to get more information about specific *command* you can use **pmempool help <command>.**
# DEBUGGING #
The debug logs are available only in the debug version of the tool,
which is not provided by binary packages, but can be built from sources.
The **pmempool.static-debug** binary blob can be found
in the 'src/tools/pmempool/' subdirectory.
+ **PMEMPOOL_TOOL_LOG_LEVEL**
The value of **PMEMPOOL_TOOL_LOG_LEVEL** enables trace points in the debug version
of the tool, as follows:
+ **0** - This is the default level when **PMEMPOOL_TOOL_LOG_LEVEL** is not set.
No log messages are emitted at this level.
+ **1** - Additional details on any errors detected are logged (in addition
to returning the *errno*-based errors as usual).
+ **2** - A trace of basic operations is logged.
+ **3** - Enables a very verbose amount of function call tracing in the tool.
+ **4** - Enables voluminous and fairly obscure tracing
information that is likely only useful to the **pmempool** developers.
Unless **PMEMPOOL_TOOL_LOG_FILE** is set, debugging output is written to *stderr*.
+ **PMEMPOOL_TOOL_LOG_FILE**
Specifies the name of a file where all logging information should be written.
If the last character in the name is "-", the *PID* of the current process
will be appended to the file name when the log file is created.
If **PMEMPOOL_TOOL_LOG_FILE** is not set, output is written to *stderr*.
# SEE ALSO #
**libpmemblk**(7), **libpmemlog**(7), **libpmemobj**(7)
and **<https://pmem.io>**
| 3,865 | 28.968992 | 98 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/pmempool/pmempool-feature.1.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMPOOL-FEATURE, 1)
collection: pmempool
header: PMDK
date: pmem Tools version 1.4
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2018, Intel Corporation)
[comment]: <> (pmempool-feature.1 -- man page for pmempool-feature)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[COMPATIBILITY](#compatibility)<br />
[DISCLAIMER](#disclaimer)<br />
[EXAMPLE](#example)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmempool-feature** - toggle or query pool set features
# SYNOPSIS #
```
$ pmempool feature (-e|-d|-q feature-name) [options] <file>
```
# DESCRIPTION #
The **pmempool feature** command enables / disables or queries pool set features.
Available pool *feature-names* are:
+ **SINGLEHDR** - only the first part in each replica contains the pool part
internal metadata. This value can be used only with **-q**. It can not be
enabled or disabled. For details see **poolset**(5).
+ **CHECKSUM_2K** - only the first 2KiB of pool part internal metadata
is checksummed. Other features may depend on this one to store additional metadata
in otherwise unused second 2KiB part of a header.
When **CHECKSUM_2K** is disabled whole 4KiB is checksummed.
+ **SHUTDOWN_STATE** - enables additional check performed during
pool open which verifies pool consistency in the presence of dirty shutdown.
**CHECKSUM_2K** has to be enabled prior to **SHUTDOWN_STATE**
otherwise enabling **SHUTDOWN_STATE** will fail.
+ **CHECK_BAD_BLOCKS** - enables checking bad blocks performed during opening
a pool and fixing bad blocks performed by pmempool-sync during syncing a pool.
Currently (Linux kernel v4.19, libndctl v62) checking and fixing bad blocks
require read access to the following resource files (containing physical
addresses) of NVDIMM devices which only root can read by default:
```
/sys/bus/nd/devices/ndbus*/region*/resource
/sys/bus/nd/devices/ndbus*/region*/dax*/resource
/sys/bus/nd/devices/ndbus*/region*/pfn*/resource
/sys/bus/nd/devices/ndbus*/region*/namespace*/resource
```
It is possible to use poolset as *file* argument. But poolsets with remote
replicas are not supported.
##### Available options: #####
`-h, --help`
Print help message.
`-v, --verbose`
Increase verbosity level.
`-e, --enable feature-name`
Enable feature for pool set.
`-d, --disable feature-name`
Disable feature for pool set.
`-q, --query feature-name`
Print feature status.
# COMPATIBILITY #
Poolsets with features not defined in this document (e.g. enabled by the newer
software version) are not supported.
# DISCLAIMER #
```pmempool feature``` command is not fail safe.
# EXAMPLE #
```
$ pmempool feature --enable CHECKSUM_2K pool.set
```
Enables POOL_FEAT_CKSUM_2K incompat feature flag.
```
$ pmempool feature --disable CHECKSUM_2K pool.set
```
Disables POOL_FEAT_CKSUM_2K incompat feature flag.
```
$ pmempool feature --query CHECKSUM_2K pool.set
0
```
Prints POOL_FEAT_CKSUM_2K incompat feature flag value.
# SEE ALSO #
**poolset**(5) and **<https://pmem.io>**
| 3,108 | 24.072581 | 82 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/pmempool/pmempool-transform.1.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMPOOL-TRANSFORM, 1)
collection: pmempool
header: PMDK
date: pmem Tools version 1.4
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2016-2018, Intel Corporation)
[comment]: <> (pmempool-transform.1 -- man page for pmempool-transform)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[EXAMPLES](#examples)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmempool-transform** - Modify internal structure of a pool set.
# SYNOPSIS #
```
pmempool transform [options] <poolset_file_src> <poolset_file_dst>
```
# DESCRIPTION #
The **pmempool transform** command modifies internal structure of a pool set
defined by the `poolset_file_src` file, according to a structure described in
the `poolset_file_dst` file.
The following operations are supported:
* adding replicas - one or more new replicas can be added and synchronized with
other replicas in the pool set,
* removing replicas - one or more replicas can be removed from the pool set
_WINUX(.,=q=,
* adding or removing pool set options.=e=)
Only one of the above operations can be performed at a time.
Currently adding and removing replicas are allowed only for **pmemobj** pools
(see **libpmemobj**(7)).
The *poolset_file_src* argument provides the source pool set to be changed.
The *poolset_file_dst* argument points to the target pool set.
_WINUX(=q=When adding or deleting replicas, the two pool set files can differ only in the
definitions of replicas which are to be added or deleted. One cannot add and
remove replicas in the same step. Only one of these operations can be performed
at a time. Reordering replicas is not supported
Also, to add a replica it is necessary for its effective size to match or exceed
the pool size. Otherwise the whole operation fails and no changes are applied.
Effective size of a replica is the sum of sizes of all its part files decreased
by 4096 bytes per each part file. The 4096 bytes of each part file is
utilized for storing internal metadata of the pool part files.=e=)
_WINUX(,=q=When adding or deleting replicas, the two pool set files can differ
only in the definitions of replicas which are to be added or deleted. When
adding or removing pool set options (see **poolset**(5)), the rest of both pool
set files have to be of the same structure. The operation of adding/removing
a pool set option can be performed on a pool set with local replicas only. To
add/remove a pool set option to/from a pool set with remote replicas, one has
to remove the remote replicas first, then add/remove the option, and finally
recreate the remote replicas having added/removed the pool set option to/from
the remote replicas' poolset files.
To add a replica it is necessary for its effective size to match or exceed the
pool size. Otherwise the whole operation fails and no changes are applied.
If none of the poolset options is used, the effective size of a replica is the
sum of sizes of all its part files decreased by 4096 bytes per each part file.
The 4096 bytes of each part file is utilized for storing internal metadata of
the pool part files.
If the option *SINGLEHDR* is used, the effective size of a replica is the sum of
sizes of all its part files decreased once by 4096 bytes. In this case only
the first part contains internal metadata.
If the option *NOHDRS* is used, the effective size of a replica is the sum of
sizes of all its part files. In this case none of the parts contains internal
metadata.=e=)
##### Available options: #####
`-d, --dry-run`
: Enable dry run mode. In this mode no changes are applied, only check for
viability of the operation is performed.
`-v, --verbose`
: Increase verbosity level.
`-h, --help`
: Display help message and exit.
# EXAMPLES #
##### Example 1. #####
Let files `/path/poolset_file_src` and `/path/poolset_file_dst` have the
following contents:
```
PMEMPOOLSET
20M /0/partfile1
20M /0/partfile2
25M /0/partfile3
REPLICA
40M /1/partfile1
20M /1/partfile2
```
```
PMEMPOOLSET
20M /0/partfile1
20M /0/partfile2
25M /0/partfile3
REPLICA
40M /1/partfile1
20M /1/partfile2
REPLICA
50M /2/partfile1
20M /2/partfile2
```
Then, the command
`pmempool transform /path/poolset_file_src /path/poolset_file_dst`
adds a replica to the pool set. All other replicas remain unchanged and
the size of the pool remains 60M.
##### Example 2. #####
Let files `/path/poolset_file_src` and `/path/poolset_file_dst` have the
following contents:
```
PMEMPOOLSET
20M /0/partfile1
20M /0/partfile2
25M /0/partfile3
REPLICA
40M /1/partfile1
20M /1/partfile2
```
```
PMEMPOOLSET
20M /0/partfile1
20M /0/partfile2
25M /0/partfile3
```
Then
`pmempool_transform /path/poolset_file_src /path/poolset_file_dst`
deletes the second replica from the pool set. The first replica remains
unchanged and the size of the pool is still 60M.
# SEE ALSO #
**pmempool(1)**, **libpmemblk(7)**, **libpmemlog(7)**,
**libpmempool(7)** and **<https://pmem.io>**
| 5,016 | 28.168605 | 89 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/pmempool/pmempool-rm.1.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMPOOL-RM, 1)
collection: pmempool
header: PMDK
date: pmem Tools version 1.4
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2016-2018, Intel Corporation)
[comment]: <> (pmempool-rm.1 -- man page for pmempool-rm)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[EXAMPLE](#example)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmempool-rm** - remove a persistent memory pool
# SYNOPSIS #
```
$ pmempool rm [<options>] <file>..
```
# DESCRIPTION #
The **pmempool rm** command removes each specified file. If the specified file
is a pool set file, all pool files (single-file pool or part files) and remote
replicas are removed. By default the **pmempool rm** does not remove pool set
files. All local and remote pool files are removed using **unlink**(3) call,
except the pools created on **device dax** which are zeroed instead.
If specified file does not exist, the remote pool is broken or not accessible,
the **pmempool rm** command terminates with an error code. By default it prompts
before removing *write-protected* local files.
See **REMOTE REPLICATION** section for more details about support for remote
pools.
See **EXAMPLES** section for example usage of the *rm* command.
##### Available options: #####
`-h, --help`
Print help message
`-v, --verbose`
Be verbose and print all removing files.
`-s, --only-pools`
Remove only pool files and do not remove pool set files (default behaviour).
`-a, --all`
Remove all pool set files - local and remote.
`-l, --local`
Remove local pool set files.
`-r, --remote`
Remove remote pool set files.
`-f, --force`
Remove all specified files, ignore nonexistent files, never prompt.
`-i, --interactive`
Prompt before removing every single file or remote pool.
# REMOTE REPLICATION #
A remote pool is removed using **rpmem_remove**(3) function if **librpmem**(7)
library is available. If a pool set file contains remote replication but
**librpmem**(7) is not available, the **pmempool rm** command terminates with
an error code, unless the **-f, --force** option is specified.
# EXAMPLE #
```
$ pmempool rm pool.obj pool.blk
```
Remove specified pool files.
```
$ pmempool rm pool.set
```
Remove all pool files from the "pool.set", do not remove *pool.set* itself.
```
$ pmempool rm -a pool.set
```
Remove all pool files from the "pool.set", remove the local pool set file and all
remote pool set files.
# SEE ALSO #
**pmempool**(1), **libpmemblk**(7), **libpmemlog**(7),
**libpmemobj**(7), **librpmem**(7) and **<https://pmem.io>**
| 2,640 | 22.792793 | 81 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/pmempool/pmempool-sync.1.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMPOOL-SYNC, 1)
collection: pmempool
header: PMDK
date: pmem Tools version 1.4
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2016-2018, Intel Corporation)
[comment]: <> (pmempool-sync.1 -- man page for pmempool-sync)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[EXAMPLES](#examples)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmempool-sync** - Synchronize replicas or their parts within a pool set.
# SYNOPSIS #
```
pmempool sync [options] <poolset_file>
```
NOTE: Only the pool set file used to create the pool should be used
for syncing the pool.
# DESCRIPTION #
The **pmempool sync** command synchronizes data between replicas within
a pool set. It checks if metadata of all replicas in a pool set
are consistent, i.e. all parts are healthy, and if any of them is not,
the corrupted or missing parts are recreated and filled with data from one of
the healthy replicas.
Currently synchronizing data is allowed only for **pmemobj** pools (see
**libpmemobj**(7)).
_WINUX(,=q=If a pool set has the option *SINGLEHDR* or *NOHDRS*
(see **poolset**(5)), **pmempool sync** command has limited capability
of checking its metadata. This is due to limited or no, respectively, internal
metadata at the beginning of pool set parts in every replica when either of the
options is used. In that cases, only missing parts or the ones which cannot
be opened are recreated.=e=)
##### Available options: #####
`-b, --bad-blocks`
: Fix bad blocks - it causes creating or reading special recovery files.
When bad blocks are detected, special recovery files have to be created
in order to fix them safely. A separate recovery file is created for each part
of the pool. The recovery files are created in the same directory
where the poolset file is located using the following name pattern:
\<poolset-file-name\> _r \<replica-number\> _p \<part-number\> _badblocks.txt
These recovery files are automatically removed if the sync operation finishes
successfully.
If the last sync operation was interrupted and not finished correctly
(eg. the application crashed) and the bad blocks fixing procedure was
in progress, the bad block recovery files may be left over. In such case
bad blocks might have been cleared and zeroed, but the correct data from these
blocks was not recovered (not copied from a healthy replica), so the recovery
files MUST NOT be deleted manually, because it would cause a data loss.
Pmempool-sync should be run again with the '-b' option set. It will finish
the previously interrupted sync operation and copy correct data to zeroed
bad blocks using the left-over bad block recovery files (the bad blocks
will be read from the saved recovery files). Pmempool will delete the recovery
files automatically at the end of the sync operation.
Using this option may have limitations depending on the operating system.
For details see description of the CHECK_BAD_BLOCKS feature
in **pmempool-feature**(1).
`-d, --dry-run`
: Enable dry run mode. In this mode no changes are applied, only check for
viability of synchronization.
`-v, --verbose`
: Increase verbosity level.
`-h, --help`
: Display help message and exit.
# SEE ALSO #
**pmempool(1)**, **libpmemblk(7)**, **libpmemlog(7)**,
**libpmempool(7)** and **<https://pmem.io>**
| 3,384 | 33.896907 | 79 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/pmempool/pmempool-check.1.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMPOOL-CHECK, 1)
collection: pmempool
header: PMDK
date: pmem Tools version 1.4
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2016-2018, Intel Corporation)
[comment]: <> (pmempool-check.1 -- man page for pmempool-check)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[EXAMPLE](#example)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmempool-check** - check and repair persistent memory pool
# SYNOPSIS #
```
$ pmempool check [<options>] <file>
```
# DESCRIPTION #
The **pmempool** invoked with *check* command checks consistency of a given pool file.
If the pool file is consistent **pmempool** exits with 0 value. If the
pool file is not consistent non-zero error code is returned.
In case of any errors, the proper message is printed. The verbosity level
may be increased using **-v** option. The output messages may be also suppressed using
**-q** option.
It is possible to try to fix encountered problems using **-r** option.
In order to be sure this will not corrupt your data you can either create backup of the
pool file using **-b** option or just print what would be fixed
without modifying original pool using **-N** option.
> NOTE:
Currently, checking the consistency of a *pmemobj* pool is **not** supported.
##### Available options: #####
`-r, --repair`
Try to repair a pool file if possible.
`-y, --yes`
Answer yes on all questions.
`-d, --dry-run`
Don't execute, just show what would be done. Not supported on Device DAX.
`-N, --no-exec`
Deprecated alias for `dry-run`.
`-b, --backup <file>`
Create backup of a pool file before executing. Terminate if it is *not*
possible to create a backup file. This option requires **-r** option.
`-a, --advanced`
Perform advanced repairs. This option enables more aggressive steps in attempts
to repair a pool. This option requires `-r, --repair`.
`-q, --quiet`
Be quiet and don't print any messages.
`-v, --verbose`
Be more verbose.
`-h, --help`
Display help message and exit.
# EXAMPLE #
```
$ pmempool check pool.bin
```
Check consistency of "pool.bin" pool file
```
$ pmempool check --repair --backup pool.bin.backup pool.bin
```
Check consistency of "pool.bin" pool file, create backup and repair
if necessary.
```
$ pmempool check -rvN pool.bin
```
Check consistency of "pool.bin" pool file, print what would be repaired with
increased verbosity level.
# SEE ALSO #
**pmempool**(1), **libpmemblk**(7), **libpmemlog**(7),
**libpmemobj**(7), **libpmempool**(7) and **<https://pmem.io>**
| 2,614 | 21.73913 | 87 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/pmempool/pmempool-convert.1.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMPOOL-CONVERT, 1)
collection: pmempool
header: PMDK
date: pmem Tools version 1.4
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2016-2018, Intel Corporation)
[comment]: <> (pmempool-convert.1 -- man page for pmempool-convert)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[EXAMPLE](#example)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmempool-convert** - this is a wrapper around pmdk-convert tool. More information
can be found in **pmdk-convert**(1) man page.
# SEE ALSO #
**pmdk-convert**(1), **pmempool**(1), **libpmemblk**(7), **libpmemlog**(7),
**libpmemobj**(7), **libpmempool**(7) and **<https://pmem.io>**
| 756 | 24.233333 | 83 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/pmempool/pmempool-dump.1.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEMPOOL-DUMP, 1)
collection: pmempool
header: PMDK
date: pmem Tools version 1.4
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2016-2018, Intel Corporation)
[comment]: <> (pmempool-dump.1 -- man page for pmempool-dump)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RANGE](#range)<br />
[EXAMPLE](#example)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmempool-dump** - dump user data from persistent memory pool
# SYNOPSIS #
```
$ pmempool dump [<options>] <file>
```
# DESCRIPTION #
The **pmempool** invoked with *dump* command dumps user data from specified pool file.
The output format may be either binary or hexadecimal.
By default the output format is hexadecimal.
By default data is dumped to standard output. It is possible to dump data to other
file by specifying **-o** option. In this case data will be appended to this file.
Using **-r** option you can specify number of blocks/bytes/data chunks using
special text format. See **RANGE** section for details.
##### Available options: #####
`-b, --binary`
Dump data in binary format.
`-r, --range <range>`
Range of pool file to dump. This may be number of blocks for **blk** pool
type or either number of bytes or number of data chunks for **log** pool type.
`-c, --chunk <size>`
Size of chunk for **log** pool type. See **pmemlog_walk**(3) in **libpmemlog**(7) for details.
`-o, --output <file>`
Name of output file.
`-h, --help`
Display help message and exit.
# RANGE #
Using **-r**, **--range** option it is possible to dump only a range of user data.
This section describes valid format of *\<range\>* string.
You can specify multiple ranges separated by commas.
`<first>-<last>`
All blocks/bytes/data chunks from *\<first\>* to *\<last\>* will be dumped.
`-<last>`
All blocks/bytes/data chunks up to *\<last\>* will be dumped.
`<first>-`
All blocks/bytes/data chunks starting from *\<first\>* will be dumped.
`<number>`
Only *\<number\>* block/byte/data chunk will be dumped.
# EXAMPLE #
```
$ pmempool dump pool.bin
```
Dump user data from pool.bin file to standard output
```
$ pmempool dump -o output.bin -r1,10-100 pool_blk.bin
```
Dump block number 1 and blocks from 10 to 100 from pool_blk.bin
containing pmem blk pool to output.bin file
```
$ pmempool dump -r 1K-2K pool.bin
```
Dump data form 1K to 2K from pool.bin file.
# SEE ALSO #
**pmempool**(1), **libpmemblk**(7), **libpmemlog**(7),
**libpmemobj**(7) and **<https://pmem.io>**
| 2,580 | 21.25 | 94 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_map.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_MAP, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2019-2020, Intel Corporation)
[comment]: <> (pmem2_map.3 -- man page for libpmem2 pmem2_map operation)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_map**() - creates a mapping
# SYNOPSIS #
```c
#include <libpmem2.h>
struct pmem2_config;
struct pmem2_source;
struct pmem2_map;
int pmem2_map(const struct pmem2_config *config, const struct pmem2_source *source,
struct pmem2_map **map_ptr);
```
# DESCRIPTION #
The **pmem2_map**() function creates a new mapping in the virtual address space
of the calling process. This function requires a configuration
*config* of the mapping and the data source *source*.
For a mapping to succeed, the *config* structure must have the granularity
parameter set to the appropriate level. See **pmem2_config_set_required_store_granularity**(3)
and **libpmem2**(7) for more details.
If the **pmem2_map**() function succeeds in creating a new mapping it
instantiates a new *struct pmem2_map** object describing the mapping. The
pointer to this newly created object is stored in the user-provided variable
passed via the *map_ptr* pointer. If the mapping fails the variable pointed by
*map_ptr* will contain a NULL value and appropriate error value will be
returned. For a list of possible return values please see
[RETURN VALUE](#return-value).
All *struct pmem2_map* objects created via the **pmem2_map**() function have to
be destroyed using the **pmem2_unmap**() function. For details please see
**pmem2_unmap**(3) manual page.
# RETURN VALUE #
When **pmem2_map**() succeeds it returns 0. Otherwise, it returns
one of the following error values:
* **PMEM2_E_GRANULARITY_NOT_SET** - the store granularity for the mapping was
not set in the provided *config* structure. Please see**pmem2_config_set_required_store_granularity**(3)
and **libpmem2**(7).
* **PMEM2_E_MAP_RANGE** - *offset* + *length* is too big to represent it using
*size_t* data type
* **PMEM2_E_MAP_RANGE** - end of the mapping (*offset* + *length*) is outside
of the file. The file is too small.
* **PMEM2_E_SOURCE_EMPTY** - mapped file has size equal to 0.
* **PMEM2_E_MAPPING_EXISTS** - if the object exists before the function call.
For details please see **CreateFileMapping**() manual pages. (Windows only)
* **PMEM2_E_OFFSET_UNALIGNED** - argument unaligned, offset is not a multiple of
the alignment required for specific *\*source*. Please see
**pmem2_source_alignement**(3).
* **PMEM2_E_LENGTH_UNALIGNED** - argument unaligned, length is not a multiple of
the alignment required for specific *\*source*. Please see
**pmem2_source_alignement**(3).
* **PMEM2_E_SRC_DEVDAX_PRIVATE** - device DAX mapped with MAP_PRIVATE. (Linux only)
* **PMEM2_E_NOSUPP** - when config-provided protection flags combination is not supported.
* **PMEM2_E_NO_ACCESS - there is a conflict between mapping protection and file opening mode.
It can also return **-EACCES**, **-EAGAIN**, **-EBADF**, **-ENFILE**,
**-ENODEV**, **-ENOMEM**, **-EPERM**, **-ETXTBSY** from the underlying
**mmap**(2) function. It is used with and without **MAP_ANONYMOUS**.
**-EACCES** may be returned only if the file descriptor points to an
append-only file.
It can also return all errors from the underlying
**pmem2_source_size**() and **pmem2_source_alignment**() functions.
# SEE ALSO #
**mmap**(2), **open**(3),
**pmem2_config_set_required_store_granularity**(3),
**pmem2_source_alignment**(3), **pmem2_source_from_fd**(3),
**pmem2_source_size**(3), **pmem2_unmap**(3),
**libpmem2**(7) and **<http://pmem.io>**
| 3,844 | 33.954545 | 104 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_map_get_store_granularity.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_MAP_GET_STORE_GRANULARITY, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2019, Intel Corporation)
[comment]: <> (pmem2_map_get_store_granularity.3 -- man page for libpmem2 mapping)
[comment]: <> (operations)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_map_get_store_granularity**() - reads effective mapping granularity
# SYNOPSIS #
```c
#include <libpmem2.h>
enum pmem2_granularity {
PMEM2_GRANULARITY_BYTE,
PMEM2_GRANULARITY_CACHE_LINE,
PMEM2_GRANULARITY_PAGE,
};
enum pmem2_granularity pmem2_map_get_store_granularity(struct pmem2_map *map);
```
# DESCRIPTION #
The **pmem2_map_get_store_granularity**() function reads granularity of the created
mapping. The *map* parameter points to the structure describing mapping created
using the **pmem2_map**(3) function. Concept of the granularity is described in
**libpmem2**(7).
# RETURN VALUE #
The **pmem2_map_get_store_granularity**() function returns a granularity of the mapped
area.
# SEE ALSO #
**pmem2_map**(3), **libpmem2**(7) and **<http://pmem.io>**
| 1,308 | 23.240741 | 86 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_get_flush_fn.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_GET_FLUSH_FN, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (pmem2_get_flush_fn.3 -- man page for pmem2_get_flush_fn)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_get_flush_fn**() - get a flush function
# SYNOPSIS #
```c
#include <libpmem2.h>
typedef void (*pmem2_flush_fn)(const void *ptr, size_t size);
struct pmem2_map;
pmem2_flush_fn pmem2_get_flush_fn(struct pmem2_map *map);
```
# DESCRIPTION #
The **pmem2_get_flush_fn**() function returns a pointer to a function
responsible for efficiently flushing data in the range owned by the *map*.
Flushing data using *pmem2_flush_fn* **does not** guarantee that the data
is stored durably by the time it returns. To get this guarantee, application
should either use the persist operation (see **pmem2_get_persist_fn**(3))
or follow *pmem2_flush_fn* by a drain operation (see **pmem2_get_drain_fn**(3)).
There are no alignment restrictions on the range described by *ptr* and *size*,
but *pmem2_flush_fn* may expand the range as necessary to meet platform
alignment requirements.
There is nothing atomic or transactional about *pmem2_flush_fn*. Any
unwritten stores in the given range will be written, but some stores may have
already been written by virtue of normal cache eviction/replacement policies.
Correctly written code must not depend on stores waiting until
*pmem2_flush_fn* is called to be flushed -- they can be flushed
at any time before *pmem2_flush_fn* is called.
If two (or more) mappings share the same *pmem2_flush_fn* and they are
adjacent to each other, it is safe to call this function for a range spanning
those mappings.
# RETURN VALUE #
The **pmem2_get_flush_fn**() function never returns NULL.
**pmem2_get_flush_fn**() for the same *map* always returns the same function.
This means that it's safe to cache its return value. However, this function
is very cheap (because it returns a precomputed value), so caching may not
be necessary.
# SEE ALSO #
**pmem2_get_drain_fn**(3), **pmem2_get_persist_fn**(3), **pmem2_map**(3),
**libpmem2**(7) and **<http://pmem.io>**
| 2,379 | 30.733333 | 80 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_config_set_address.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_CONFIG_SET_ADDRESS, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (pmem2_config_set_address.3 -- man page for libpmem2 config API)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_config_set_address**() - set requested address in the pmem2_config structure
# SYNOPSIS #
```c
#include <libpmem2.h>
struct pmem2_config;
enum pmem2_address_request_type {
PMEM2_ADDRESS_FIXED_REPLACE,
PMEM2_ADDRESS_FIXED_NOREPLACE,
};
int pmem2_config_set_address(struct pmem2_config *cfg, void *addr,
enum pmem2_address_request_type request_type);
```
# DESCRIPTION #
The **pmem2_config_set_address**() function sets the starting address *\*addr* which will be used
for memory mapping. If the *\*addr* is not specified in the config, the starting address
will be chosen by the operating system. The *\request_type* specifies how strictly the address
should be enforced. *\*config* should be already initialized, please see **pmem2_config_new**(3)
for details. The *\*addr* cannot be **NULL** and must be a multiple of the alignment required for the
data source which will be used for mapping alongside the config. To retrieve the alignment required
for specific instance of **pmem2_source** use **pmem2_source_alignment**(3). To reset *\*addr* and
*\request_type* to the default values, please use **pmem2_config_clear_address**(3).
Possible address request types are:
* **PMEM2_ADDRESS_FIXED_REPLACE** - not supported yet.
* **PMEM2_ADDRESS_FIXED_NOREPLACE** - *\*addr* cannot be **NULL**, kernel tries to place the mapping
at exactly the address which was set by user. When any part of <*\*addr*, *\*addr* + length> address
space is occupied, **pmem2_map**(3) fails with the **PMEM2_E_MAPPING_EXISTS** return code.
# RETURN VALUE #
When **pmem2_config_set_address**() succeeds it returns 0. Otherwise, it returns one of the following
error values:
* **PMEM2_E_INVALID_ADDRESS_REQUEST_TYPE** - set address request type is invalid.
* **PMEM2_E_ADDRESS_NULL** - cannot use address request type **PMEM2_ADDRESS_FIXED_NOREPLACE**
when address is **NULL**.
# SEE ALSO #
**libpmem2**(7), **pmem2_config_clear_address**(3), **pmem2_config_new**(3), **pmem2_map**(3),
**pmem2_source_alignment**(3), **sysconf**(3) and **<http://pmem.io>**
| 2,553 | 34.472222 | 101 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_config_new.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_CONFIG_NEW, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2019, Intel Corporation)
[comment]: <> (pmem2_config_new.3 -- man page for pmem2_config_new and pmem2_config_delete)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_config_new**(), **pmem2_config_delete**() - allocate and free a
configuration for a libpmem2 mapping
# SYNOPSIS #
```c
#include <libpmem2.h>
struct pmem2_config;
int pmem2_config_new(struct pmem2_config **cfg);
int pmem2_config_delete(struct pmem2_config **cfg);
```
# DESCRIPTION #
The **pmem2_config_new**() function instantiates a new (opaque) configuration structure, *pmem2_config*, which is used to define mapping parameters for a **pmem2_map**() function, and returns it through the pointer in *\*cfg*.
New configuration is always initialized with default values for all possible parameters, which are specified alongside the corresponding setter function.
The **pmem2_config_delete**() function frees *\*cfg* returned by **pmem2_config_new**() and sets *\*cfg* to NULL.
If *\*cfg* is NULL, no operation is performed.
# RETURN VALUE #
The **pmem2_config_new**() function returns 0 on success or a negative error code on failure.
**pmem2_config_new**() does set *\*cfg* to NULL on failure.
The **pmem2_config_delete**() function returns 0.
# ERRORS #
**pmem2_config_new**() can fail with the following error:
- **-ENOMEM** - out of memory
# SEE ALSO #
**errno**(3), **pmem2_map**(3), **pmem2_config_set_handle**(3),
**pmem2_config_set_fd**(3), **pmem2_config_get_file_size**(3),
**libpmem2**(7) and **<http://pmem.io>**
| 1,846 | 29.278689 | 226 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_source_from_anon.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_SOURCE_FROM_ANON, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (pmem2_source_from_anon.3 -- man page for pmem2_source_from_anon
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[ERRORS](#errors)<br />
[CAVEATS](#caveats)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_source_from_anon**() - creates data source backed by anonymous memory pages
# SYNOPSIS #
```c
#include <libpmem2.h>
int pmem2_source_from_anon(struct pmem2_source **src, size_t size);
```
# DESCRIPTION #
The **pmem2_source_from_anon**() function instantiates a new *struct pmem2_source*
object describing an anonymous data source. Mappings created using this function
are not backed by any file and are zero-initialized.
The *size* argument for the function defines the length in bytes of the anonymous
source, as returned by **pmem2_source_size**(3). The application should set
this value so that it's greater than or equal to the size of any mapping created
with the anonymous source.
The offset value for mapping is ignored.
# RETURN VALUE #
**pmem2_source_from_anon**() functions return 0 on success or one of the error
values listed in the next section.
# ERRORS #
The **pmem2_source_from_anon**() can return **-ENOMEM** in case of insufficient
memory to allocate an instance of *struct pmem2_source*.
# SEE ALSO #
**errno**(3), **pmem2_config_set_length**(3), **pmem2_map**(3),
**pmem2_source_size**(3), **pmem2_config_set_length**(3), **libpmem2**(7)
and **<http://pmem.io>**
| 1,745 | 27.16129 | 83 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_config_set_offset.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_CONFIG_SET_OFFSET, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2019, Intel Corporation)
[comment]: <> (pmem2_config_set_offset.3 -- man page for libpmem2 config API)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_config_set_offset**() - set offset in the pmem2_config structure
# SYNOPSIS #
```c
#include <libpmem2.h>
struct pmem2_config;
int pmem2_config_set_offset(struct pmem2_config *config, size_t offset);
```
# DESCRIPTION #
The **pmem2_config_set_offset**() function configures the offset which will be used
to map the contents from the specified location of the source. *\*config* should be
already initialized, please see **pmem2_config_new**(3) for details. The *\offset*
must be a multiple of the alignment required for the config. The alignment
requirements are specific to a data source. To retrieve the alignment
required for specific instance of *pmem2_source** use **pmem2_source_alignment**(3).
# RETURN VALUE #
The **pmem2_config_set_offset**() function returns 0 on success. Otherwise, it returns:
* **PMEM2_E_OFFSET_OUT_OF_RANGE** - argument out of range, offset is greater than
**INT64_MAX**
# SEE ALSO #
**libpmem2**(7), **pmem2_source_alignment**(3), **pmem2_config_new**(3),
**pmem2_map**(3), **sysconf**(3) and **<http://pmem.io>**
| 1,555 | 27.814815 | 87 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_source_size.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_SOURCE_SIZE, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2019-2020, Intel Corporation)
[comment]: <> (pmem2_source_size.3 -- man page for pmem2_source_size)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_source_size**() - returns the size of the data source
# SYNOPSIS #
```c
#include <libpmem2.h>
struct pmem2_source;
int pmem2_source_size(const struct pmem2_source *source, size_t *size);
```
# DESCRIPTION #
The **pmem2_source_size**() function retrieves the size of the file
in bytes pointed by file descriptor or handle stored in the *source* and puts
it in *\*size*.
This function is a portable replacement for OS-specific APIs.
On Linux, it hides the quirkiness of Device DAX size detection.
# RETURN VALUE #
The **pmem2_source_size**() function returns 0 on success.
If the function fails, the *\*size* variable is left unmodified, and one of
the following errors is returned:
On all systems:
* **PMEM2_E_INVALID_FILE_HANDLE** - source contains an invalid file handle.
On Windows:
* **PMEM2_E_INVALID_FILE_TYPE** - handle points to a resource that is not
a regular file.
On Linux:
* **PMEM2_E_INVALID_FILE_TYPE** - file descriptor points to a directory,
block device, pipe, or socket.
* **PMEM2_E_INVALID_FILE_TYPE** - file descriptor points to a character
device other than Device DAX.
* **PMEM2_E_INVALID_SIZE_FORMAT** - kernel query for Device DAX size
returned data in invalid format.
* -**errno** set by failing **fstat**(2), while trying to validate the file
descriptor.
* -**errno** set by failing **realpath**(3), while trying to determine whether
fd points to a Device DAX.
* -**errno** set by failing **open**(2), while trying to determine Device DAX's
size.
* -**errno** set by failing **read**(2), while trying to determine Device DAX's
size.
* -**errno** set by failing **strtoull**(3), while trying to determine
Device DAX's size.
On FreeBSD:
* **PMEM2_E_INVALID_FILE_TYPE** - file descriptor points to a directory,
block device, pipe, socket, or character device.
* -**errno** set by failing **fstat**(2), while trying to validate the file
descriptor.
# SEE ALSO #
**errno**(3), **fstat**(2), **realpath**(3), **open**(2), **read**(2),
**strtoull**(3), **pmem2_config_new**(3), **libpmem2**(7)
and **<http://pmem.io>**
| 2,556 | 25.360825 | 79 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_source_device_id.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_SOURCE_DEVICE_ID, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (pmem2_source_device_id.3 -- man page for pmem2_source_device_id)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_source_device_id**() - returns the unique identifier of a device
# SYNOPSIS #
```c
#include <libpmem2.h>
struct pmem2_source;
int pmem2_source_device_id(const struct pmem2_source *source, char *id, size_t *len);
```
# DESCRIPTION #
The **pmem2_source_device_id**() function retrieves a unique identifier
of all NVDIMMs backing the data source. This function has two operating modes:
* if *\*id* is NULL the function calculates a buffer length required for
storing the identifier of the *\*source* device and puts this length in *\*len*
The more hardware devices back the data source, the longer the length is.
* if *\*id* is not NULL it must point to a buffer of length *\*len* provided by
the previous call to this function.
On success, **pmem2_source_device_id**() will store a unique identifier
of all hardware devices backing the data source.
For details on how to use the unique identifier for detecting *the unsafe shutdown*
please refer to **libpmem2_unsafe_shutdown**(7) manual page.
# RETURN VALUE #
The **pmem2_source_device_id**() function returns 0 on success.
If the function fails, the *\*id* and *\*len* variables contents are left unmodified,
and one of the following errors is returned:
On all systems:
* **PMEM2_E_BUFFER_TOO_SMALL** - the provided buffer of length *\*len* is too
small to store the full identifier of the backing devices.
* **PMEM2_E_NOSUPP** - the underlying platform does not expose hardware
identification.
On Windows:
* -**errno** equivalent of return code set by failing
**GetFinalPathNameByHandleW**(), while trying to resolve the volume path from the
file handle.
* -**errno** set by failing **malloc**(3), while trying to allocate a buffer
for storing volume path.
* -**errno** equivalent of return code set by failing
**CreateFileW**(), while trying to obtain a handle to the volume.
* -**errno** equivalent of return code set by failing
**DeviceIoControl **(), while trying to obtain volume **USC** value.
On Linux:
* -**errno** set by failing **fstat**(2), while trying to validate the file
descriptor.
* -**errno** set by failing **ndctl_new**(), while trying to initiate a new
NDCTL library context.
# SEE ALSO #
**fstat**(2), **errno**(3), **malloc**(3), **libpmem2_unsafe_shutdown**(7),
and **<http://pmem.io>**
| 2,786 | 29.626374 | 85 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_get_memmove_fn.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_GET_MEMMOVE_FN, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (pmem2_get_memmove_fn.3 -- man page for pmem2_get_memmove_fn)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_get_memmove_fn**(), **pmem2_get_memset_fn**(),
**pmem2_get_memcpy_fn**() - get a function that provides
optimized copying to persistent memory
# SYNOPSIS #
```c
#include <libpmem2.h>
typedef void *(*pmem2_memmove_fn)(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void *(*pmem2_memcpy_fn)(void *pmemdest, const void *src, size_t len,
unsigned flags);
typedef void *(*pmem2_memset_fn)(void *pmemdest, int c, size_t len,
unsigned flags);
struct pmem2_map;
pmem2_memmove_fn pmem2_get_memmove_fn(struct pmem2_map *map);
pmem2_memset_fn pmem2_get_memset_fn(struct pmem2_map *map);
pmem2_memcpy_fn pmem2_get_memcpy_fn(struct pmem2_map *map);
```
# DESCRIPTION #
The **pmem2_get_memmove_fn**(), **pmem2_get_memset_fn**(),
**pmem2_get_memcpy_fn**() functions return a pointer to a function
responsible for efficient storing and flushing of data for mapping *map*.
**pmem2_memmove_fn**(), **pmem2_memset_fn**() and **pmem2_memcpy_fn**()
functions provide the same memory copying functionalities as their namesakes
**memmove**(3), **memcpy**(3) and **memset**(3), and ensure that the result has
been flushed to persistence before returning (unless **PMEM2_F_MEM_NOFLUSH** flag was used).
For example, the following code:
```c
memmove(dest, src, len);
pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map);
persist_fn(dest, len);
```
is functionally equivalent to:
```c
pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map);
memmove_fn(dest, src, len, 0);
```
Unlike libc implementation, **libpmem2** functions guarantee that if destination
buffer address and length are 8 byte aligned then all stores will be performed
using at least 8 byte store instructions. This means that a series of 8 byte
stores followed by *persist_fn* can be safely replaced by a single *memmove_fn* call.
The *flags* argument of all of the above functions has the same meaning.
It can be 0 or a bitwise OR of one or more of the following flags:
+ **PMEM2_F_MEM_NODRAIN** - modifies the behavior to skip the final
*pmem2_drain_fn* step. This allows applications to optimize cases where
several ranges are being copied to persistent memory, followed by a single
call to *pmem2_drain_fn*. The following example illustrates how this flag
might be used to avoid multiple calls to *pmem2_drain_fn* when copying several
ranges of memory to pmem:
```c
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
pmem2_drain_fn drain_fn = pmem2_get_drain_fn(map);
/* ... write several ranges to pmem ... */
memcpy_fn(pmemdest1, src1, len1, PMEM2_F_MEM_NODRAIN);
memcpy_fn(pmemdest2, src2, len2, PMEM2_F_MEM_NODRAIN);
/* ... */
/* wait for any pmem stores to drain from HW buffers */
drain_fn();
```
+ **PMEM2_F_MEM_NOFLUSH** - Don't flush anything. This implies **PMEM2_F_MEM_NODRAIN**.
Using this flag only makes sense when it's followed by any function that
flushes data.
The remaining flags say *how* the operation should be done, and are merely hints.
+ **PMEM2_F_MEM_NONTEMPORAL** - Use non-temporal instructions.
This flag is mutually exclusive with **PMEM2_F_MEM_TEMPORAL**.
On x86\_64 this flag is mutually exclusive with **PMEM2_F_MEM_NOFLUSH**.
+ **PMEM2_F_MEM_TEMPORAL** - Use temporal instructions.
This flag is mutually exclusive with **PMEM2_F_MEM_NONTEMPORAL**.
+ **PMEM2_F_MEM_WC** - Use write combining mode.
This flag is mutually exclusive with **PMEM2_F_MEM_WB**.
On x86\_64 this flag is mutually exclusive with **PMEM2_F_MEM_NOFLUSH**.
+ **PMEM2_F_MEM_WB** - Use write back mode.
This flag is mutually exclusive with **PMEM2_F_MEM_WC**.
On x86\_64 this is an alias for **PMEM2_F_MEM_TEMPORAL**.
Using an invalid combination of flags has undefined behavior.
Without any of the above flags **libpmem2** will try to guess the best strategy
based on the data size. See **PMEM_MOVNT_THRESHOLD** description in **libpmem2**(7) for
details.
# RETURN VALUE #
The **pmem2_get_memmove_fn**(), **pmem2_get_memset_fn**(),
**pmem2_get_memcpy_fn**() functions never return NULL.
They return the same function for the same mapping.
This means that it's safe to cache their return values. However, these functions
are very cheap (because their return values are precomputed), so caching may not
be necessary.
If two (or more) mappings share the same *pmem2_memmove_fn*, *pmem2_memset_fn*,
*pmem2_memcpy_fn* and they are adjacent to each other, it is safe to call these
functions for a range spanning those mappings.
# SEE ALSO #
**memcpy**(3), **memmove**(3), **memset**(3), **pmem2_get_drain_fn**(3),
**pmem2_get_memcpy_fn**(3), **pmem2_get_memset_fn**(3), **pmem2_map**(3),
**pmem2_get_persist_fn**(3), **libpmem2**(7) and **<http://pmem.io>**
| 5,238 | 34.639456 | 92 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_errormsg.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_ERRORMSG, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2019, Intel Corporation)
[comment]: <> (pmem2_errormsg.3 -- man page for error handling in libpmem2)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
_UW(pmem2_errormsg) - returns last error message
# SYNOPSIS #
```c
#include <libpmem2.h>
_UWFUNC(pmem2_errormsg, void)
```
_UNICODE()
# DESCRIPTION #
If an error is detected during the call to a **libpmem2**(7) function, the
application may retrieve an error message describing the reason of the failure
from _UW(pmem2_errormsg). The error message buffer is thread-local;
errors encountered in one thread do not affect its value in
other threads. The buffer is never cleared by any library function; its
content is significant only when the return value of the immediately preceding
call to a **libpmem2**(7) function indicated an error.
The application must not modify or free the error message string.
Subsequent calls to other library functions may modify the previous message.
# RETURN VALUE #
The _UW(pmem2_errormsg) function returns a pointer to a static buffer
containing the last error message logged for the current thread. If *errno*
was set, the error message may include a description of the corresponding
error code as returned by **strerror**(3).
# SEE ALSO #
**strerror**(3), **libpmem2**(7) and **<https://pmem.io>**
| 1,624 | 27.508772 | 78 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_map_get_size.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_MAP_GET_SIZE, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2019, Intel Corporation)
[comment]: <> (pmem2_map_get_size.3 -- man page for libpmem2 mapping operations)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_map_get_size**() - reads mapping size
# SYNOPSIS #
```c
#include <libpmem2.h>
size_t pmem2_map_get_size(struct pmem2_map *map);
```
# DESCRIPTION #
The **pmem2_map_get_size**() function reads size of the created mapping.
The *map* parameter points to the structure describing mapping created using
the **pmem2_map**(3) function.
# RETURN VALUE #
The **pmem2_map_get_size**() function returns a size of the mapped area.
# SEE ALSO #
**pmem2_map**(3), **libpmem2**(7) and **<https://pmem.io>**
| 999 | 20.73913 | 80 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_perror.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_PERROR, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (pmem2_perror.3 -- man page for the error printing in libpmem2)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[SEE ALSO](#see-also)<br />
# NAME #
_UW(pmem2_perror) - prints a descriptive error message to stderr
# SYNOPSIS #
```c
#include <libpmem2.h>
_UWFUNCR1(void, pmem2_perror, *format, ...)
```
_UNICODE()
# DESCRIPTION #
The _UW(pmem2_perror) function produces a message on standard error stream describing
the last error encountered during library call.
_UW(pmem2_perror) takes a variable number of arguments. First, the argument string
*format* is printed - similarly to the **printf**(3), followed by a colon and a blank.
Then an error message retrieved from the _UW(pmem2_errormsg), and a new-line. To see
how the error message is generated, please see **pmem2_errormsg**(3).
# SEE ALSO #
**libpmem2**(7), **perror**(3), **pmem2_errormsg**(3), **printf**(3) and **<http://pmem.io>**
| 1,200 | 24.553191 | 93 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_source_device_usc.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_SOURCE_DEVICE_USC, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (pmem2_source_device_usc.3 -- man page for pmem2_source_device_usc)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_source_device_usc**() - returns the *unsafe shutdown counter* value of a
device
# SYNOPSIS #
```c
#include <libpmem2.h>
struct pmem2_source;
int pmem2_source_device_usc(const struct pmem2_source *source, uint64_t *usc);
```
# DESCRIPTION #
The **pmem2_source_device_usc**() function retrieves the sum of the
*unsafe shutdown count*(**USC**) values of all hardware devices backing
the data source and stores it in *\*usc*.
Please refer to **libpmem2_unsafe_shutdown**(7) for detailed description on how
to properly consume this information.
# RETURN VALUE #
The **pmem2_source_device_usc**() function returns 0 on success.
If the function fails, the *\*usc* variable content is left unmodified, and one of
the following errors is returned:
On all systems:
* **PMEM2_E_NOSUPP** - the underlying platform does not expose unsafe shutdown
count information.
On Windows:
* -**errno** equivalent of return code set by failing
**GetFinalPathNameByHandleW**(), while trying to resolve volume path from the
file handle.
* -**errno** set by failing **malloc**(3), while trying to allocate a buffer
for storing volume path.
* -**errno** equivalent of return code set by failing
**CreateFileW**(), while trying to obtain a handle to the volume.
* -**errno** equivalent of return code set by failing
**DeviceIoControl**(), while trying to obtain volume **USC** value.
On Linux:
* -**errno** set by failing **fstat**(2), while trying to validate the file
descriptor.
* -**errno** set by failing **ndctl_new**(), while trying to initiate a new
NDCTL library context.
* -**errno** set by failing **ndctl_dimm_get_dirty_shutdown**(),
while trying to obtain DIMM **USC** value.
# SEE ALSO #
**fstat**(2), **errno**(3), **malloc**(3), **libpmem2_unsafe_shutdown**(7),
and **<http://pmem.io>**
| 2,300 | 26.070588 | 82 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_source_from_fd.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_SOURCE_FROM_FD, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2019-2020, Intel Corporation)
[comment]: <> (pmem2_source_from_fd.3 -- man page for pmem2_source_from_fd
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[ERRORS](#errors)<br />
[CAVEATS](#caveats)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_source_from_fd**(), **pmem2_source_from_handle**(),
**pmem2_source_delete**() - creates or deletes an instance of persistent memory
data source
# SYNOPSIS #
```c
#include <libpmem2.h>
int pmem2_source_from_fd(struct pmem2_source *src, int fd);
int pmem2_source_from_handle(struct pmem2_source *src, HANDLE handle); /* Windows only */
int pmem2_source_delete(struct pmem2_source **src);
```
# DESCRIPTION #
On Linux the **pmem2_source_from_fd**() function validates the file descriptor
and instantiates a new *struct pmem2_source** object describing the data source.
On Windows the **pmem2_source_from_fd**() function converts a file descriptor to a file handle (using **_get_osfhandle**()), and passes
it to **pmem2_source_from_handle**().
By default **_get_osfhandle**() calls abort() in case of invalid file descriptor,
but this behavior can be suppressed by **_set_abort_behavior**() and **SetErrorMode**()
functions.
Please check MSDN documentation for more information about Windows CRT error handling.
*fd* must be opened with *O_RDONLY* or *O_RDWR* mode, but on Windows it is not
validated.
If *fd* is invalid, then the function fails.
The **pmem2_source_from_handle**() function validates the handle and instantiates
a new *struct pmem2_source** object describing the data source.
If *handle* is INVALID_HANDLE_VALUE, then the function fails.
The handle has to be created with an access mode of *GENERIC_READ* or
*(GENERIC_READ | GENERIC_WRITE)*. For details please see the **CreateFile**()
documentation.
The **pmem2_source_delete**() function frees *\*src* returned by **pmem2_source_from_fd**() or **pmem2_source_from_handle**() and sets *\*src* to NULL. If *\*src* is NULL, no operation is performed.
# RETURN VALUE #
**pmem2_source_from_fd**() and **pmem2_source_from_handle**() functions return 0 on success or one of the error values listed in the next section.
# ERRORS #
The **pmem2_source_from_fd**() function can return the following errors:
* **PMEM2_E_INVALID_FILE_HANDLE** - *fd* is not an open and valid file descriptor. On Windows the function can **abort**() on this failure based on CRT's abort() behavior.
* **PMEM2_E_INVALID_FILE_HANDLE** - *fd* is opened in O_WRONLY mode.
On Linux:
* **PMEM2_E_INVALID_FILE_TYPE** - *fd* points to a directory, block device, pipe, or socket.
* **PMEM2_E_INVALID_FILE_TYPE** - *fd* points to a character device other than Device DAX.
On Windows:
* **PMEM2_E_INVALID_FILE_TYPE** - *handle* points to a resource that is not a regular file.
On Windows **pmem2_source_from_fd**() can return all errors from the underlying **pmem2_source_from_handle**() function.
The **pmem2_source_from_handle**() can return the following errors:
* **PMEM2_E_INVALID_FILE_HANDLE** - *handle* points to a resource that is not a file.
* **PMEM2_E_INVALID_FILE_TYPE** - *handle* points to a directory.
The **pmem2_source_from_fd**() and **pmem2_source_from_handle**() functions can
also return **-ENOMEM** in case of insufficient memory to
allocate an instance of *struct pmem2_source*.
# CAVEATS #
On non-DAX Windows volumes, *fd*/*handle* must remain open while the mapping
is in use.
# SEE ALSO #
**errno**(3), **pmem2_map**(3), **libpmem2**(7)
and **<http://pmem.io>**
| 3,796 | 34.820755 | 198 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_map_get_address.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_MAP_GET_ADDRESS, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2019, Intel Corporation)
[comment]: <> (pmem2_map_get_address.3 -- man page for libpmem2 mapping operations)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_map_get_address**() - reads mapping address
# SYNOPSIS #
```c
#include <libpmem2.h>
void *pmem2_map_get_address(struct pmem2_map *map);
```
# DESCRIPTION #
The **pmem2_map_get_address**() function reads address of the created mapping.
The *map* parameter points to the structure describing mapping created using
the **pmem2_map**(3) function.
# RETURN VALUE #
The **pmem2_map_get_address**() function returns a pointer to the mapped area.
# SEE ALSO #
**pmem2_map**(3), **libpmem2**(7) and **<https://pmem.io>**
| 1,025 | 21.304348 | 83 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_config_set_sharing.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_CONFIG_SET_SHARING, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (pmem2_config_set_sharing.3 -- man page for libpmem2 config API)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_config_set_sharing**() - set sharing in the pmem2_config structure
# SYNOPSIS #
```c
#include <libpmem2.h>
struct pmem2_config;
enum pmem2_sharing_type {
PMEM2_SHARED,
PMEM2_PRIVATE,
};
int pmem2_config_set_sharing(struct pmem2_config *config, enum pmem2_sharing_type sharing);
```
# DESCRIPTION #
The **pmem2_config_set_sharing**() function configures the behavior and visibility
of writes to the mapping's pages. The possible values are listed below:
* **PMEM2_SHARED** - Writes are made directly to the underlying memory, making
them visible to other mappings of the same memory region. (default)
* **PMEM2_PRIVATE** - Writes do not affect the underlying memory and are
not visible to other mappings of the same memory region.
# RETURN VALUE #
The **pmem2_config_set_sharing**() function returns 0 on success. Otherwise, it
returns:
* **PMEM2_E_INVALID_SHARING_VALUE** - *sharing* value is invalid.
# SEE ALSO #
**libpmem2**(7), **pmem2_config_new**(3), **pmem2_map**(3), **sysconf**(3)
and **<http://pmem.io>**
| 1,531 | 24.533333 | 91 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_deep_flush.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_DEEP_FLUSH, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (pmem2_deep_flush.3 -- man page for pmem2_deep_flush)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_deep_flush**() - highly reliable persistent memory synchronization
# SYNOPSIS #
```c
#include <libpmem2.h>
int pmem2_deep_flush(struct pmem2_map *map, void *ptr, size_t size)
```
# DESCRIPTION #
The **pmem2_deep_flush**() function forces any changes in the range \[*ptr*, *ptr*+*len*)
from the *map* to be stored durably in the most reliable persistence domain
available to software. In particular, on supported platforms, this enables
the code not to rely on automatic cache or WPQ (write pending queue) flush on power failure (ADR/eADR).
Since this operation is usually much more expensive than regular persist, it
should be used sparingly. Typically, the application should only ever use this
function as a precaution against hardware failures, e.g., in code that detects
silent data corruption caused by unsafe shutdown (see more in **libpmem2_unsafe_shutdown**(7)).
Applications should generally not assume the support for this functionality
in the platform, and not treat **PMEM2_E_NOSUPP** as a fatal error.
# RETURN VALUE #
The **pmem2_deep_flush**() returns 0 on success or one of the following
error values on failure:
* **PMEM2_E_DEEP_FLUSH_RANGE** - the provided flush range is not a
subset of the map's address space.
* **PMEM2_E_DAX_REGION_NOT_FOUND** - the underlying device region id cannot be
detected.
* -**errno** set by failing **write**(2), while trying to use the Device Dax
*deep_flush* interface.
* -**errno** set by failing **open**(2), while trying to open the Device Dax
*deep_flush* interface.
* -**errno** set by failing **msync**(2), while trying to perform a deep
flush on a regular DAX volume.
# SEE ALSO #
**msync**(2), **open**(2), **pmem2_get_drain_fn**(3), **pmem2_get_persist_fn**(3)
**pmem2_map**(3), **write**(2), **libpmem2**(7) and **<http://pmem.io>**
| 2,292 | 30.847222 | 103 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_config_clear_address.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_CONFIG_CLEAR_ADDRESS, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (pmem2_config_clear_address.3 -- man page for libpmem2 config API)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_config_clear_address**() - reset addr and request_type to the default values
in the pmem2_config structure
# SYNOPSIS #
```c
#include <libpmem2.h>
struct pmem2_config;
void pmem2_config_clear_address(struct pmem2_config *cfg);
```
# DESCRIPTION #
The **pmem2_config_clear_address**() function resets *\*addr* and *\request_type* to the default values.
The function is used to revert changes set by **pmem2_config_set_address**(3).
If the *\*addr* is default, the starting mapping address will be chosen by the operating system, for
more information please see **pmem2_map**(3).
# RETURN VALUE #
**pmem2_config_clear_address**() does not return any value.
# SEE ALSO #
**libpmem2**(7), **pmem2_config_set_address**(3), **pmem2_map**(3), and **<http://pmem.io>**
| 1,276 | 25.061224 | 104 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_unmap.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_UNMAP, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause
[comment]: <> (Copyright 2019-2020, Intel Corporation)
[comment]: <> (pmem2_unmap.3 -- man page for libpmem2 pmem2_unmap operation)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_unmap**() - deletes a mapping
# SYNOPSIS #
```c
#include <libpmem2.h>
int pmem2_unmap(struct pmem2_map **map_ptr);
```
# DESCRIPTION #
The **pmem2_unmap**() function deletes the mapping described by the
*struct pmem2_map* object.
If **pmem2_unmap**() succeeds, deleting the mapping, it releases the
*struct pmem2_map* object describing it and writes a NULL value to *map_ptr*.
If the function fails, the *map_ptr* variable and the map object itself are left
unmodified and appropriate error value will be returned. For a list of possible
return values please see [RETURN VALUE](#return-value).
# RETURN VALUE #
When **pmem2_unmap**() succeeds it returns 0. Otherwise, it returns
one of the following error values:
* **PMEM2_E_MAPPING_NOT_FOUND** - mapping was not found (it was already
unmapped or pmem2_map state was corrupted)
On systems other than Windows it can also return **-EINVAL** from the underlying
**munmap**(2) function.
# SEE ALSO #
**pmem2_map(3)**, **libpmem2**(7) and **<http://pmem.io>**
| 1,509 | 25.034483 | 80 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_vm_reservation_new.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_VM_RESERVATION_NEW, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (pmem2_vm_reservation_new.3 -- man page for libpmem2 virtual memory reservation API)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_vm_reservation_new**(), **pmem2_vm_reservation_delete**() - creates or deletes virtual memory
reservation that is made basing on the pmem2_vm_reservation structure
# SYNOPSIS #
```c
#include <libpmem2.h>
struct pmem2_vm_reservation;
void pmem2_vm_reservation_new(struct pmem2_vm_reservation **rsv,
size_t size, void *address);
void pmem2_vm_reservation_delete(struct pmem2_vm_reservation **rsv);
```
# DESCRIPTION #
**pmem2_vm_reservation_new**() and **pmem2_vm_reservation_delete**() functions are not supported yet.
# RETURN VALUE #
**pmem2_vm_reservation_new**() returns PMEM2_E_NOSUPP .
**pmem2_vm_reservation_delete**() returns PMEM2_E_NOSUPP .
# SEE ALSO #
**libpmem2**(7), **pmem2_config_set_vm_reservation**(3) and **<http://pmem.io>**
| 1,277 | 25.081633 | 101 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_get_persist_fn.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_GET_PERSIST_FN, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (pmem2_get_persist_fn.3 -- man page for pmem2_get_persist_fn)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_get_persist_fn**() - get a persist function
# SYNOPSIS #
```c
#include <libpmem2.h>
typedef void (*pmem2_persist_fn)(const void *ptr, size_t size);
struct pmem2_map;
pmem2_persist_fn pmem2_get_persist_fn(struct pmem2_map *map);
```
# DESCRIPTION #
The **pmem2_get_persist_fn**() function returns a pointer to a function
responsible for efficiently persisting data in the range owned by the *map*.
Persisting data using *pmem2_persist_fn* guarantees that the data is stored
durably by the time it returns.
There are no alignment restrictions on the range described by *ptr* and *size*,
but *pmem2_persist_fn* may expand the range as necessary to meet platform
alignment requirements.
There is nothing atomic or transactional about *pmem2_persist_fn*. Any
unwritten stores in the given range will be written, but some stores may have
already been written by virtue of normal cache eviction/replacement policies.
Correctly written code must not depend on stores waiting until
*pmem2_persist_fn* is called to become persistent -- they can become persistent
at any time before *pmem2_persist_fn* is called.
If two (or more) mappings share the same *pmem2_persist_fn* and they are
adjacent to each other, it is safe to call this function for a range spanning
those mappings.
Internally *pmem2_persist_fn* performs two operations:
- memory flush (**pmem2_get_flush_fn**(3)), which can be reordered by
the CPU with other flushes
- drain (**pmem2_get_drain_fn**(3)), which makes sure that the flushes
before this operation won't be reordered after it
So this code:
```c
pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map);
persist_fn(addr, len);
```
is equivalent of:
```c
pmem2_flush_fn flush_fn = pmem2_get_flush_fn(map);
pmem2_drain_fn drain_fn = pmem2_get_drain_fn(map);
flush_fn(addr, len);
drain_fn();
```
Advanced applications may want to flush multiple discontiguous regions
and perform the drain operation only once.
# RETURN VALUE #
The **pmem2_get_persist_fn**() function never returns NULL.
**pmem2_get_persist_fn**() for the same *map* always returns the same function.
This means that it's safe to cache its return value. However, this function
is very cheap (because it returns a precomputed value), so caching may not be
necessary.
# SEE ALSO #
**pmem2_get_drain_fn**(3), **pmem2_get_flush_fn**(3), **pmem2_map**(3),
**libpmem2**(7) and **<http://pmem.io>**
| 2,887 | 27.88 | 79 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/libpmem2_unsafe_shutdown.7.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(LIBPMEM2_UNSAFE_SHUTDOWN, 7)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (libpmem2_unsafe_shutdown.7 -- man page for libpmem2 unsafe shutdown)
[NAME](#name)<br />
[DESCRIPTION](#description)<br />
[UNSAFE SHUTDOWN DETECTION](#unsafe-shutdown-detection)<br />
[SEE ALSO](#see-also)
# NAME #
**libpmem2_unsafe_shutdown** - libpmem2 unsafe shutdown
# DESCRIPTION #
In systems with the persistent memory support, *a power-fail protected domain*
covers a set of resources from which the platform will flush data to the
*a persistent medium* in case of *a power-failure*. Data stored on
*the persistent medium* is preserved across power cycles.
The hardware guarantees the feature to flush all data stored in
*the power-fail protected domain* to *the persistent medium*. However, nothing
is infallible, and Persistent Memory hardware can expose a monotonically
increasing *unsafe shutdown counter* (**USC**) that is incremented every time
a failure of the mechanism above is detected. This allows software to discover
situations where a running application was interrupted by a power failure that
led to an unsafe shutdown. Undiscovered unsafe shutdowns might cause silent data
corruption.
>Note: *The unsafe shutdown* may corrupt data stored on a device, in a file,
in a set of files, and a mapping spanning only a part of a file.
For the sake of simplicity, all of the above cases will be called *file* below.
# UNSAFE SHUTDOWN DETECTION #
Software can detect an unsafe shutdown by watching for the change between
unsafe shutdown count value across application startups. Any changes can be
indicative of unsafe shutdown occurrence.
Applications can implement a detection mechanism by storing the **USC** retrieved
from **pmem2_source_device_usc**(3) in Persistent Memory. Then, on subsequent
startups, the stored value must be compared with a newly retrieved one.
However, this detection method can result in false-positives. Moving the file to
different Persistent Memory devices with possibly different **USC** values would
lead to false unsafe shutdown detection.
Additionally, relying on **USC** value alone could result in the detection of
unsafe shutdown events that occur when such a shutdown has no chance of impacting
the data used by the application, e.g., when nothing is actively using the file.
Applications can avoid false-positives associated with moving the file by storing
device identification, obtained through **pmem2_source_device_id**(3), alongside
the **USC**. This enables the software to check if the underlying device has
changed, and reinitialize the stored **USC** in such cases.
The second behavior, detection of possibly irrelevant unsafe shutdown events,
if undesirable, can be prevented by storing a flag indicating whether the file
is in use, alongside all the rest of the relevant information.
The application should use **pmem2_deep_flush**(3) when storing any data related
to unsafe shutdown detection for higher reliability. This helps ensure that the
detection mechanism is not reliant on the correct functioning of the same hardware
features it is designed to safeguard.
General-purpose software should not assume the presence of **USC** in the platform,
and should instead appropriately handle any *PMEM2_E_NOSUPP* it encounters.
Doing otherwise might cause the software to be unnecessarily restrictive about
the hardware it supports and would prevent, e.g., testing on emulated PMEM.
# SEE ALSO #
**pmem2_deep_flush**(3), **pmem2_persist_fn**(3), **pmem2_source_device_id**(3),
**pmem2_source_device_usc**(3) and **<https://pmem.io>**
| 3,787 | 43.564706 | 83 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_badblock_clear.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_BADBLOCK_CLEAR, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (pmem2_badblock_clear.3 -- man page for pmem2_badblock_clear)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_badblock_clear**() - clear the given bad block
# SYNOPSIS #
```c
#include <libpmem2.h>
struct pmem2_badblock;
struct pmem2_badblock_context;
int pmem2_badblock_clear(
struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb);
```
# DESCRIPTION #
The **pmem2_badblock_clear**() function clears the given *\*bb* bad block.
It means that the **pmem2_badblock_clear**() function unmaps bad blocks
and maps new, healthy, blocks in place of the bad ones.
The new blocks are zeroed. The content of the bad blocks is lost.
It is not supported on Windows.
# RETURN VALUE #
The **pmem2_badblock_clear**() function clears the given *\*bb* bad block
and returns 0 on success or it returns a negative error code on failure.
# ERRORS #
**pmem2_badblock_clear**() can fail with the following errors:
* **PMEM2_E_OFFSET_OUT_OF_RANGE** - bad block's offset is greater than INT64_MAX
* **PMEM2_E_LENGTH_OUT_OF_RANGE** - bad block's length is greater than INT64_MAX
* **PMEM2_E_NOSUPP** - on Windows or when the OS does not support this functionality
* **-errno** - set by failing **fallocate**(2), while deallocating bad blocks
or allocating new blocks
* **-errno** - set by failing ndctl functions: **ndctl_bus_cmd_new_ars_cap**,
**ndctl_cmd_submit**, **ndctl_cmd_ars_cap_get_range** or
**ndctl_bus_cmd_new_clear_error** while trying to clear a bad block
in a DAX device
* **-ENXIO** - **ndctl_bus_cmd_new_clear_error** did not manage to clear
all bad blocks
# SEE ALSO #
**pmem2_badblock_context_new**(3), **pmem2_badblock_next**(3),
**libpmem2**(7) and **<http://pmem.io>**
| 2,085 | 25.74359 | 84 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_badblock_next.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_BADBLOCK_NEXT, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (pmem2_badblock_next.3 -- man page for pmem2_badblock_next)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_badblock_next**() - read the next bad block for the given bad block
context *\*bbctx*.
# SYNOPSIS #
```c
#include <libpmem2.h>
struct pmem2_badblock;
struct pmem2_badblock_context;
int pmem2_badblock_next(
struct pmem2_badblock_context *bbctx,
struct pmem2_badblock *bb);
```
# DESCRIPTION #
The **pmem2_badblock_next**() function reads the next bad block for the given
bad block context *\*bbctx*.
It is not supported on Windows.
# RETURN VALUE #
The **pmem2_badblock_next**() function returns 0 and stores the next bad block
in *\*bb* on success or it returns a negative error code when there are no more
bad blocks for the given bad block context *\*bbctx*.
# ERRORS #
**pmem2_badblock_next**() can fail with the following error:
* **PMEM2_E_NO_BAD_BLOCK_FOUND** - there are no more bad blocks for the given
bad block context *\*bbctx*, *\*bb* is undefined in this case.
* **PMEM2_E_NOSUPP** - on Windows or when the OS does not support this functionality
# SEE ALSO #
**pmem2_badblock_context_new**(3), **pmem2_badblock_clear**(3),
**libpmem2**(7) and **<http://pmem.io>**
| 1,588 | 23.446154 | 84 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/libpmem2.7.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(LIBPMEM2, 7)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause
[comment]: <> (Copyright 2019-2020, Intel Corporation)
[comment]: <> (libpmem2.7 -- man page for libpmem2)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[GRANULARITY](#granularity)<br />
[CAVEATS](#caveats)<br />
[ENVIRONMENT](#environment)<br />
[DEBUGGING](#debugging)<br />
[EXAMPLE](#example)<br />
[ACKNOWLEDGEMENTS](#acknowledgements)<br />
[SEE ALSO](#see-also)
# NAME #
**libpmem2** - persistent memory support library (EXPERIMENTAL)
# SYNOPSIS #
```c
#include <libpmem2.h>
cc ... -lpmem2
```
# DESCRIPTION #
# GRANULARITY #
The **libpmem2** library introduces the concept of granularity through which you
may easily distinguish between different types of
reaching *power-fail protected domain* by data. Data can reach this domain in
different ways depending on the platform capabilities.
Traditional block storage devices (SSD, HDD) must use system API calls such
as `msync()`, `fsync()` on Linux, or `FlushFileBuffers()`,`FlushViewOfFile()`
on Windows to write data reliably. Invoking these functions flushes the data
to the medium with page granularity. In the **libpmem2** library, this type
of flushing behavior is called **PMEM2_GRANULARITY_PAGE**.
In systems with persistent memory support, a *power-fail protected domain* may
cover different sets of resources: either the memory controller or the memory
controller and CPU caches. In this regard, **libpmem2** distinguishes two types
of granularity for persistent memory:
**PMEM2_GRANULARITY_CACHE_LINE** and **PMEM2_GRANULARITY_BYTE**.
If the *power-fail protected domain* covers only the memory controller, it is
required to flush CPU caches, so the granularity type, in this case, is called
**PMEM2_GRANULARITY_CACHE_LINE**. Depending on the architecture, there are
different types of machine instructions for flushing *cache lines*
(e.g., *CLWB*, *CLFLUSHOPT*, *CLFLUSH* for Intel x86_64 architecture). Usually,
to ensure the ordering of stores, such instructions must be followed
by a barrier (e.g., *SFENCE*).
The third type of granularity **PMEM2_GRANULARITY_BYTE** applies to platforms
where *power-fail protected domain* covers both the memory controller and
CPU caches. In such cases, cache flush instructions are no longer needed, and
the platform itself guarantees the persistence of data. But barriers might
still be required for ordering.
The library declares these granularity level in *pmem2_granularity* enum, which
the application must set in *pmem2_config* to the appropriate level for
a mapping a succeed. The software should set this config parameter to a value
that most accurately represents the target hardware characteristics and
the storage patterns of the application. For example, a database storage engine
that operates on large logical pages that reside either on SSDs or PMEM should
set this value to **PMEM2_GRANULARITY_PAGE**.
# CAVEATS #
# ENVIRONMENT #
**libpmem2** can change its default behavior based on the following
environment variables. These are primarily intended for testing and are
generally not required.
+ **PMEM2_FORCE_GRANULARITY**=*val*
Setting this environment variable to *val* forces **libpmem2** to
use persist method specific for forced granularity and skip
granularity autodetecting mechanism. The concept of the granularity is
described in *GRANULARITY* section above.
This variable is intended for use during library testing.
The *val* argument accepts following text values:
+ **BYTE** - force byte granularity.
+ **CACHE_LINE** - force cache line granularity.
+ **PAGE** - force page granularity.
Granularity values listed above are case-insensitive.
>NOTE:
The value of **PMEM2_FORCE_GRANULARITY** is not queried (and cached)
at library initialization time, but read during each **pmem2_map**(3) call.
This means that **PMEM2_FORCE_GRANULARITY** may still be set or modified
by the program until the first attempt to map a file.
+ **PMEM_NO_CLWB**=1
Setting this environment variable to 1 forces **libpmem2** to never issue
the **CLWB** instruction on Intel hardware, falling back to other cache
flush instructions on that hardware instead (**CLFLUSHOPT** or **CLFLUSH**).
Without this setting, **libpmem2** will always use the **CLWB** instruction
for flushing processor caches on platforms that support this instruction.
This variable is intended for use during library testing, but may be required
for some rare cases when using **CLWB** has a negative impact on performance.
+ **PMEM_NO_CLFLUSHOPT**=1
Setting this environment variable to 1 forces **libpmem2** to never issue
the **CLFLUSHOPT** instruction on Intel hardware, falling back to the
**CLFLUSH** instructions instead. Without this environment variable,
**libpmem2** will always use the **CLFLUSHOPT** instruction for flushing
processor caches on platforms that support the instruction, but where
**CLWB** is not available. This variable is intended for use during
library testing.
+ **PMEM_NO_MOVNT**=1
Setting this environment variable to 1 forces **libpmem2** to never use
the *non-temporal* move instructions on Intel hardware. Without this
environment variable, **libpmem2** will use the non-temporal instructions
for copying larger ranges to persistent memory on platforms that support
these instructions. This variable is intended for use during library
testing.
+ **PMEM_MOVNT_THRESHOLD**=*val*
This environment variable allows overriding the minimum length of
the *pmem2_memmove_fn* operations, for which **libpmem2** uses
*non-temporal* move instructions. Setting this environment variable to 0
forces **libpmem2** to always use the *non-temporal* move instructions if
available. It has no effect if **PMEM_NO_MOVNT** is set to 1.
This variable is intended for use during library testing.
# DEBUGGING #
Two versions of **libpmem2** are typically available on a development
system. The normal version, accessed when a program is linked using the
**-lpmem2** option, is optimized for performance. That version skips checks
that impact performance and never logs any trace information or performs any
run-time assertions.
A second version of **libpmem2**, accessed when a program uses the libraries
under _DEBUGLIBPATH(), contains run-time assertions and trace points. The
typical way to access the debug version is to set the environment variable
**LD_LIBRARY_PATH** to _LDLIBPATH(). Debugging output is
controlled using the following environment variables. These variables have
no effect on the non-debug version of the library.
+ **PMEM2_LOG_LEVEL**
The value of **PMEM2_LOG_LEVEL** enables trace points in the debug version
of the library, as follows:
+ **0** - This is the default level when **PMEM2_LOG_LEVEL** is not set.
No log messages are emitted at this level.
+ **1** - Additional details on any errors detected are logged, in addition
to returning the *errno*-based errors as usual. The same information
may be retrieved using _UW(pmem2_errormsg).
+ **2** - A trace of basic operations is logged.
+ **3** - Enables a very verbose amount of function call tracing in the
library.
+ **4** - Enables voluminous and fairly obscure tracing
information that is likely only useful to the **libpmem2** developers.
Unless **PMEM2_LOG_FILE** is set, debugging output is written to *stderr*.
+ **PMEM2_LOG_FILE**
Specifies the name of a file where
all logging information should be written. If the last character in the name
is "-", the *PID* of the current process will be appended to the file name when
the log file is created. If **PMEM2_LOG_FILE** is not set, output is
written to *stderr*.
# EXAMPLE #
# ACKNOWLEDGEMENTS #
**libpmem2** builds on the persistent memory programming model recommended
by the SNIA NVM Programming Technical Work Group:
<https://snia.org/nvmp>
# SEE ALSO #
**FlushFileBuffers**(), **fsync**(2), **msync**(2),
**pmem2_config_set_required_store_granularity**(3),
**pmem2_get_memset_fn**(3), **pmem2_map_get_store_granularity**(3),
**libpmemblk**(7), **libpmemlog**(7), **libpmemobj**(7)
and **<https://pmem.io>**
| 8,220 | 38.334928 | 80 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_config_set_protection.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_CONFIG_SET_PROTECTION, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (pmem2_config_set_protection.3 -- man page for libpmem2 config API)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_config_set_protection**() - set a protection flags in pmem2_config structure.
# SYNOPSIS #
```c
#include <libpmem2.h>
struct pmem2_config;
#define PMEM2_PROT_EXEC (1U << 29)
#define PMEM2_PROT_READ (1U << 30)
#define PMEM2_PROT_WRITE (1U << 31)
#define PMEM2_PROT_NONE 0
int pmem2_config_set_protection(struct pmem2_config *cfg,
unsigned prot);
```
# DESCRIPTION #
The **pmem2_config_set_protection**() function sets the protection flags
which will be used for memory mapping. The default value
in pmem2_config structure is **PMEM2_PROT_READ | PMEM2_PROT_WRITE**.
The *\prot* argument describes the desired memory protection of the mapping.
The memory protection cannot conflict with the file opening-mode.
*\*config* should be already initialized,
please see **pmem2_config_new**(3) for details.
It is either PROT_NONE or the bitwise OR of one or more of the following flags:
* **PMEM2_PROT_EXEC** - Pages may be executed.
* **PMEM2_PROT_READ** - Pages may be read.
* **PMEM2_PROT_WRITE** - Pages may be written.
* **PMEM2_PROT_NONE** - Pages may not be accessed. On Windows this flag is not supported.
# RETURN VALUE #
When **pmem2_config_set_protection**() succeeds it returns 0.
Otherwise, it returns one of the following error value:
* **PMEM2_E_INVALID_PROT_FLAG** - some or all of the provided flags are not valid.
# SEE ALSO #
**libpmem2**(7), **pmem2_config_new**(3), **pmem2_map**(3)
and **<http://pmem.io>**
| 1,949 | 26.083333 | 89 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_badblock_context_new.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_BADBLOCK_CONTEXT_NEW, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (pmem2_badblock_context_new.3 -- man page for)
[comment]: <> (pmem2_badblock_context_new and pmem2_badblock_context_delete)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_badblock_context_new**(), **pmem2_badblock_context_delete**() -
allocate and free a context for **pmem2_badblock_next**() and
**pmem2_badblock_clear**() operations
# SYNOPSIS #
```c
#include <libpmem2.h>
struct pmem2_source;
struct pmem2_badblock_context;
int pmem2_badblock_context_new(
const struct pmem2_source *src,
struct pmem2_badblock_context **bbctx);
void pmem2_badblock_context_delete(
struct pmem2_badblock_context **bbctx);
```
# DESCRIPTION #
The **pmem2_badblock_context_new**() function instantiates a new (opaque)
bad block context structure, *pmem2_badblock_context*, which is used to read
and clear bad blocks (by **pmem2_badblock_next**() and
**pmem2_badblock_clear**()). The function returns the bad block context
through the pointer in *\*bbctx*.
New bad block context structure is initialized with values read from the source
given as the first argument (*src*).
A bad block is an uncorrectable media error - a part of a storage media
that is either inaccessible or unwritable due to permanent physical damage.
In case of memory-mapped I/O, if a process tries to access (read or write)
the corrupted block, it will be terminated by the SIGBUS signal.
The **pmem2_badblock_context_delete**() function frees *\*bbctx* returned by
**pmem2_badblock_context_new**() and sets *\*bbctx* to NULL. If *\*bbctx*
is NULL, no operation is performed.
It is not supported on Windows.
# RETURN VALUE #
The **pmem2_badblock_context_new**() function returns 0 on success
or a negative error code on failure.
**pmem2_badblock_context_new**() does set *\*bbctx* to NULL on failure.
**pmem2_badblock_context_delete**() does not return any value.
# ERRORS #
**pmem2_badblock_context_new**() can fail with the following errors:
* **PMEM2_E_INVALID_FILE_TYPE** - *src* is not a regular file nor
a character device.
* **PMEM2_E_DAX_REGION_NOT_FOUND** - cannot find a DAX region for
the given *src*.
* **PMEM2_E_CANNOT_READ_BOUNDS** - cannot read offset or size of the namespace
of the given *src*.
* **PMEM2_E_NOSUPP** - on Windows or when the OS does not support this functionality
- **-ENOMEM** - out of memory
* **-errno** - set by failing **ndctl_new**, while trying to create
a new ndctl context.
* **-errno** - set by failing **fstat**(2), while trying to validate
the file descriptor of *src*.
* **-errno** - set by failing **realpath**(3), while trying to get
the canonicalized absolute sysfs pathname of DAX device given in *src*.
* **-errno** - set by failing **open**(2), while trying to open the FSDAX device
matching with the *src*.
* **-errno** - set by failing **read**(2), while trying to read from the FSDAX
device matching with the *src*.
* **-errno** - set by failing **ndctl_region_get_resource**, while reading
an offset of the region of the given *src*.
* **-errno** - set by failing **fiemap ioctl(2)**, while reading file extents
of the given *src*.
# SEE ALSO #
**pmem2_badblock_next**(3), **pmem2_badblock_clear**(3),
**libpmem2**(7) and **<http://pmem.io>**
| 3,582 | 29.887931 | 84 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_config_set_vm_reservation.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_CONFIG_SET_VM_RESERVATION, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (pmem2_config_set_vm_reservation.3 -- man page for libpmem2 config API)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_config_set_vm_reservation**() - sets the pmem2_vm_reservation structure basing on the
values in the pmem2_config structure
# SYNOPSIS #
```c
#include <libpmem2.h>
struct pmem2_config;
struct pmem2_vm_reservation;
int pmem2_config_set_vm_reservation(struct pmem2_config *cfg,
struct pmem2_vm_reservation *rsv, size_t offset);
```
# DESCRIPTION #
The **pmem2_config_set_vm_reservation**() function is not supported yet.
# RETURN VALUE #
**pmem2_config_set_vm_reservation**() returns PMEM2_E_NOSUPP .
# SEE ALSO #
**libpmem2**(7), **pmem2_vm_reservation_new**(3) and **<http://pmem.io>**
| 1,112 | 22.1875 | 93 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_get_drain_fn.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_GET_DRAIN_FN, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause
[comment]: <> (Copyright 2020, Intel Corporation)
[comment]: <> (pmem2_get_drain_fn.3 -- man page for pmem2_get_drain_fn)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_get_drain_fn**() - get a drain function
# SYNOPSIS #
```c
#include <libpmem2.h>
typedef void (*pmem2_drain_fn)(void);
struct pmem2_map;
pmem2_drain_fn pmem2_get_drain_fn(struct pmem2_map *map);
```
# DESCRIPTION #
The **pmem2_get_drain_fn**() function returns a pointer to a function
responsible for efficiently draining flushes (see **pmem2_get_flush_fn**(3))
in the range owned by *map*. Draining, in this context, means making sure
that the flushes before this operation won't be reordered after it.
While it is not strictly true, draining can be thought of as waiting for
previous flushes to complete.
If two (or more) mappings share the same drain function, it is safe to call
this function once for all flushes belonging to those mappings.
# RETURN VALUE #
The **pmem2_get_drain_fn**() function never returns NULL.
**pmem2_get_drain_fn**() for the same *map* always returns the same function.
This means that it's safe to cache its return value. However, this function
is very cheap (because it returns a precomputed value), so caching may not
be necessary.
# SEE ALSO #
**pmem2_get_flush_fn**(3), **pmem2_get_persist_fn**(3), **pmem2_map**(3),
**libpmem2**(7) and **<http://pmem.io>**
| 1,693 | 26.322581 | 77 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_source_alignment.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_SOURCE_ALIGNMENT, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2019-2020, Intel Corporation)
[comment]: <> (pmem2_source_alignment.3 -- man page for pmem2_source_alignment)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_source_alignment**() - returns data source alignment
# SYNOPSIS #
```c
#include <libpmem2.h>
struct pmem2_source;
int pmem2_source_alignment(const struct pmem2_source *source, size_t *alignment);
```
# DESCRIPTION #
The **pmem2_source_alignment**() function retrieves the alignment of offset and
length needed for **pmem2_map**(3) to succeed. The alignment is stored in
*\*alignment*.
# RETURN VALUE #
The **pmem2_source_alignment**() function returns 0 on success.
If the function fails, the *\*alignment* variable is left unmodified, and one of
the following errors is returned:
On all systems:
* **PMEM2_E_INVALID_ALIGNMENT_VALUE** - operating system returned unexpected
alignment value (eg. it is not a power of two).
on Linux:
* **PMEM2_E_INVALID_FILE_TYPE** - file descriptor points to a character
device other than Device DAX.
* **PMEM2_E_INVALID_ALIGNMENT_FORMAT** - kernel query for Device DAX alignment
returned data in invalid format.
* -**errno** set by failing **fstat**(2), while trying to validate the file
descriptor.
* -**errno** set by failing **realpath**(3), while trying to determine whether
fd points to a Device DAX.
* -**errno** set by failing **read**(2), while trying to determine Device DAX's
alignment.
* -**errno** set by failing **strtoull**(3), while trying to determine
Device DAX's alignment.
On FreeBSD:
* **PMEM2_E_INVALID_FILE_TYPE** - file descriptor points to a directory,
block device, pipe, socket, or character device.
* -**errno** set by failing **fstat**(2), while trying to validate the file
descriptor.
# SEE ALSO #
**errno**(3), **fstat**(2), **realpath**(3), **read**(2), **strtoull**(3),
**pmem2_config_new**(3), **pmem2_source_from_handle**(3),
**pmem2_source_from_fd**(3), **libpmem2**(7) and **<http://pmem.io>**
| 2,292 | 26.297619 | 81 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_config_set_length.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_CONFIG_SET_LENGTH, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2019, Intel Corporation)
[comment]: <> (pmem2_config_set_length.3 -- man page for libpmem2 config API)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_config_set_length**() - set length in the pmem2_config structure
# SYNOPSIS #
```c
#include <libpmem2.h>
struct pmem2_config;
int pmem2_config_set_length(struct pmem2_config *config, size_t length);
```
# DESCRIPTION #
The **pmem2_config_set_length**() function configures the length which will be used
for mapping. *\*config* should be already initialized, please see **pmem2_config_new**(3)
for details. The *\length* must be a multiple of the alignment required for the data
source which will be used for mapping alongside the config.
To retrieve the alignment required for specific instance of *pmem2_source** use
**pmem2_source_alignment**(3).
# RETURN VALUE #
The **pmem2_config_set_length**() function always returns 0.
# SEE ALSO #
**libpmem2**(7), **pmem2_map**(3), **pmem2_source_alignment**(3),
**pmem2_config_new**(3), **sysconf**(3) and **<http://pmem.io>**
| 1,380 | 26.078431 | 89 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/libpmem2/pmem2_config_set_required_store_granularity.3.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM2_CONFIG_SET_REQUIRED_STORE_GRANULARITY, 3)
collection: libpmem2
header: PMDK
date: pmem2 API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2019, Intel Corporation)
[comment]: <> (pmem2_config_set_required_store_granularity.3 -- man page for pmem2_config_set_required_store_granularity
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[RETURN VALUE](#return-value)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**pmem2_config_set_required_store_granularity**() - set a granularity
in pmem2_config structure.
# SYNOPSIS #
```c
#include <libpmem2.h>
enum pmem2_granularity {
PMEM2_GRANULARITY_BYTE,
PMEM2_GRANULARITY_CACHE_LINE,
PMEM2_GRANULARITY_PAGE,
};
int pmem2_config_set_required_store_granularity(struct pmem2_config *cfg,
enum pmem2_granularity g);
```
# DESCRIPTION #
The **pmem2_config_set_required_store_granularity**() sets a maximum permitted
granularity *g* requested by user in the *pmem2_config* structure.
Granularity must be one of the following values:
* **PMEM2_GRANULARITY_BYTE**
* **PMEM2_GRANULARITY_CACHE_LINE**
* **PMEM2_GRANULARITY_PAGE**
A description of the granularity concept can be found in **libpmem2**(7) manpage.
# RETURN VALUE #
**pmem2_config_set_required_store_granularity**() function returns 0 on success.
Otherwise, it returns one of the following error values:
* **PMEM2_E_GRANULARITY_NOT_SUPPORTED** - granularity *g* is not a valid value.
# SEE ALSO #
**pmem2_config_new**(3), **libpmem2**(7)
and **<http://pmem.io>**
| 1,626 | 24.030769 | 120 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/poolset/poolset.5.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(POOLSET, 5)
collection: poolset
header: PMDK
date: poolset API version 1.0
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2017-2018, Intel Corporation)
[comment]: <> (poolset.5 -- man page that describes format of pool set file)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[REPLICAS](#replicas)<br />
[POOL SET OPTIONS](#pool-set-options)<br />
[NOTES](#notes)<br />
[SEE ALSO](#see-also)<br />
# NAME #
poolset - persistent memory pool configuration file format
# SYNOPSIS #
```c
mypool.set
```
# DESCRIPTION #
Depending on the configuration of the system, the available non-volatile
memory space may be divided into multiple memory devices. In such case, the
maximum size of the transactional object store could be limited by the capacity
of a single memory device. Therefore, **libpmemobj**(7), **libpmemblk**(7) and
**libpmemlog**(7) allow building object stores spanning multiple memory devices
by creation of persistent memory pools consisting of multiple files, where each
part of such a *pool set* may be stored on a different pmem-aware filesystem.
To improve reliability and eliminate single point of failure, **libpmemobj**(7)
also allows all the data written to a persistent memory pool to be copied
to local _WINUX(,or remote) pool *replicas*, thereby providing backup for the
persistent memory pool by producing a *mirrored pool set*. In practice,
the pool replicas may be considered as binary copies of the "master" pool set.
Data replication is not supported in **libpmemblk**(7) and **libpmemlog**(7).
The *set* file for each type of pool is a plain text file. Lines in the file
are formatted as follows:
+ The first line of the file must be the literal string "PMEMPOOLSET"
+ The pool parts are specified, one per line, in the format:
*size* *pathname*
+ *Replica* sections, if any, start with the literal string "REPLICA".
See **REPLICAS**, below, for further details.
+ Pool set options, if any, start with literal string *OPTION*.
See **POOL SET OPTIONS** below for details.
+ Lines starting with "#" are considered comments and are ignored.
The *size* must be compliant with the format specified in IEC 80000-13, IEEE 1541
or the Metric Interchange Format. These standards accept SI units with
obligatory B - kB, MB, GB, ... (multiplier by 1000) suffixes, and IEC units
with optional "iB" - KiB, MiB, GiB, ..., K, M, G, ... - (multiplier by 1024)
suffixes.
*pathname* must be an absolute pathname.
The *pathname* of a part can point to a Device DAX. Device DAX is the
device-centric analogue of Filesystem DAX. It allows memory ranges to be
allocated and mapped without need of an intervening file system.
Pools created on Device DAX have additional options and restrictions:
+ The *size* may be set to "AUTO", in which case the size of the device will be
automatically resolved at pool creation time.
+ To concatenate more than one Device DAX device into a single pool set, the
configured internal alignment of the devices must be 4KiB, unless the
*SINGLEHDR* or *NOHDRS* option is used in the pool set file.
See **POOL SET OPTIONS** below for details.
Please see **ndctl-create-namespace**(1) for more information on Device DAX,
including how to configure desired alignment.
The minimum file size of each part of the pool set is defined as follows:
+ For block pools, as **PMEMBLK_MIN_PART** in **\<libpmemblk.h\>**
+ For object pools, as **PMEMOBJ_MIN_PART** in **\<libpmemobj.h\>**
+ For log pools, as **PMEMLOG_MIN_PART** in **\<libpmemlog.h\>**
The net pool size of the pool set is equal to:
```
net_pool_size = sum_over_all_parts(page_aligned_part_size - 4KiB) + 4KiB
```
where
```
page_aligned_part_size = part_size & ~(page_size - 1)
```
Note that page size is OS specific. For more information please see
**sysconf**(3).
The minimum net pool size of a pool set is defined as follows:
+ For block pools, as **PMEMBLK_MIN_POOL** in **\<libpmemblk.h\>**
+ For object pools, as **PMEMOBJ_MIN_POOL** in **\<libpmemobj.h\>**
+ For log pools, as **PMEMLOG_MIN_POOL** in **\<libpmemlog.h\>**
Here is an example "mypool.set" file:
```
PMEMPOOLSET
OPTION NOHDRS
100G /mountpoint0/myfile.part0
200G /mountpoint1/myfile.part1
400G /mountpoint2/myfile.part2
```
The files in the set may be created by running one of the following commands.
To create a block pool:
```
$ pmempool create blk <bsize> mypool.set
```
To create a log pool:
```
$ pmempool create log mypool.set
```
# REPLICAS #
Sections defining replica sets are optional. There may be multiple replica
sections.
Local replica sections begin with a line containing only the literal string
"REPLICA", followed by one or more pool part lines as described above.
_WINUX(,
=q=Remote replica sections consist of the *REPLICA* keyword, followed on
the same line by the address of a remote host and a relative path to a
remote pool set file:
```
REPLICA [<user>@]<hostname> [<relative-path>/]<remote-pool-set-file>
```
+ *hostname* must be in the format recognized by the **ssh**(1) remote login
client
+ *pathname* is relative to the root config directory on the target
node - see **librpmem**(7)
There are no other lines in the remote replica section - the REPLICA line
defines a remote replica entirely.
=e=)
Here is an example "myobjpool.set" file with replicas:
```
PMEMPOOLSET
100G /mountpoint0/myfile.part0
200G /mountpoint1/myfile.part1
400G /mountpoint2/myfile.part2
# local replica
REPLICA
500G /mountpoint3/mymirror.part0
200G /mountpoint4/mymirror.part1 _WINUX(,=q=
# remote replica
REPLICA user@example.com remote-objpool.set=e=)
```
The files in the object pool set may be created by running the following command:
```
$ pmempool create --layout="mylayout" obj myobjpool.set
```
_WINUX(,
=q=Remote replica cannot have replicas, i.e. a remote pool set file cannot
define any replicas.=e=)
# POOL SET OPTIONS #
Pool set options can appear anywhere after the line with *PMEMPOOLSET* string.
Pool set file can contain several pool set options. The following options are
supported:
+ *SINGLEHDR*
+ *NOHDRS*
If the *SINGLEHDR* option is used, only the first part in each replica contains
the pool part internal metadata. In that case the effective size of a replica
is the sum of sizes of all its part files decreased once by 4096 bytes.
The *NOHDRS* option can appear only in the remote pool set file, when
**librpmem** does not serve as a means of replication for **libpmemobj** pool.
In that case none of the pool parts contains internal metadata.
The effective size of such a replica is the sum of sizes of all its part files.
Options *SINGLEHDR* and *NOHDRS* are mutually exclusive. If both are specified
in a pool set file, creating or opening the pool will fail with an error.
When using the *SINGLEHDR* or *NOHDRS* option, one can concatenate more than one
Device DAX devices with any internal alignments in one replica.
The *SINGLEHDR* option concerns only replicas that are local to the pool set
file. That is if one wants to create a pool set with the *SINGLEHDR* option
and with remote replicas, one has to add this option to the local pool set file
as well as to every single remote pool set file.
Using the *SINGLEHDR* and *NOHDRS* options has important implications for data
integrity checking and recoverability in case of a pool set damage.
See _UW(pmempool_sync) API for more information about pool set recovery.
# DIRECTORIES #
Providing a directory as a part's *pathname* allows the pool to dynamically
create files and consequently removes the user-imposed limit on the size
of the pool.
The *size* argument of a part in a directory poolset becomes the size of the
address space reservation required for the pool. In other words, the size
argument is the maximum theoretical size of the mapping. This value can be
freely increased between instances of the application, but decreasing it below
the real required space will result in an error when attempting to open the
pool.
The directory must NOT contain user created files with extension *.pmem*,
otherwise the behavior is undefined. If a file created by the library within
the directory is in any way altered (resized, renamed) the behavior is
undefined.
A directory poolset must exclusively use directories to specify paths -
combining files and directories will result in an error. A single replica can
consist of one or more directories. If there are multiple directories, the
address space reservation is equal to the sum of the sizes.
The order in which the files are created is unspecified, but the library will
try to maintain equal usage of the directories.
By default pools grow in 128 megabyte increments.
Only poolsets with the *SINGLEHDR* option can safely use directories.
# NOTES #
Creation of all the parts of the pool set and the associated replica sets can
be done with the **pmemobj_create**(3), **pmemblk_create**(3) or
**pmemlog_create**(3) function, or by using the **pmempool**(1) utility.
Restoring data from a local _WINUX(,or remote) replica can be done by using the
**pmempool-sync**(1) command or the _UW(pmempool_sync) API from the
**libpmempool**(7) library.
Modifications of a pool set file configuration can be done by using the
**pmempool-transform**(1) command or the _UW(pmempool_transform) API from the
**libpmempool**(7) library.
When creating a pool set consisting of multiple files, or when creating
a replicated pool set, the *path* argument passed to **pmemobj_create**(3),
**pmemblk_create**(3) or **pmemlog_create**(3) must point to the special *set*
file that defines the pool layout and the location of all the parts of the
pool set.
When opening a pool set consisting of multiple files, or when opening a
replicated pool set, the *path* argument passed to **pmemobj_open**(3),
**pmemblk_open**(3) or **pmemlog_open**(3) must point to the same *set* file
that was used for pool set creation.
# SEE ALSO #
**ndctl-create-namespace**(1), **pmemblk_create**(3), **pmemlog_create**(3),
**pmemobj_create**(3), **sysconf**(3), **libpmemblk**(7), **libpmemlog**(7),
**libpmemobj**(7) and **<https://pmem.io>**
| 10,215 | 33.986301 | 81 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/daxio/daxio.1.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(DAXIO, 1)
collection: daxio
header: PMDK
date: daxio version 1.4
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2018, Intel Corporation)
[comment]: <> (daxio.1 -- man page for daxio)
[NAME](#name)<br />
[SYNOPSIS](#synopsis)<br />
[DESCRIPTION](#description)<br />
[OPTIONS](#options)<br />
[EXAMPLE](#example)<br />
[SEE ALSO](#see-also)<br />
# NAME #
**daxio** - Perform I/O on Device DAX devices or zero a Device DAX device
# SYNOPSIS #
```
$ daxio [<options>]
```
# DESCRIPTION #
The daxio utility performs I/O on Device DAX devices or zero
a Device DAX device. Since the standard I/O APIs (read/write) cannot be used
with Device DAX, data transfer is performed on a memory-mapped device.
The **daxio** may be used to dump Device DAX data to a file, restore data from
a backup copy, move/copy data to another device or to erase data from
a device.
There must be at least one Device DAX device involved either as the input
or output. If input or output is not specified, it will default to stdin
or stdout respectively.
No length specified will default to input file/device length or to the
output file/device length, if input is a special char file or stdin.
For a Device DAX device, **daxio** will attempt to clear bad blocks within
the range of writes before performing the I/O (it can be turned off using
the '--clear-bad-blocks=no' option).
# OPTIONS #
`-i, --input`
Input device or file to read from.
`-o, --output`
Output device or file to write to.
`-z, --zero`
Zero the output device for *len* size, or the entire device if no
length was provided. The output device must be a Device DAX device.
`-b, --clear-bad-blocks=<yes|no>`
Clear bad blocks within the range of writes before performing the I/O
(default: yes).
`-l, --len`
The length in bytes to perform the I/O. To make passing in size easier
for kibi, mebi, gibi, and tebi bytes, *len* may include unit suffix.
The *len* format must be compliant with the format specified in IEC 80000-13,
IEEE 1541 or the Metric Interchange Format. These standards accept SI units
with obligatory B - kB, MB, GB, ... (multiplier by 1000) suffixes,
and IEC units with optional "iB" - KiB, MiB, GiB, ..., K, M, G, ...
(multiplier by 1024) suffixes.
`-s, --seek`
The number of bytes to skip over on the output before performing a write.
The same suffixes are accepted as for *len*.
`-k, --skip`
The number of bytes to skip over on the input before performing a read.
The same suffixes are accepted as for *len*.
`-V, --version`
Prints the version of **daxio**.
`-h, --help`
Prints synopsis and list of options.
# EXAMPLE #
```
# daxio --zero /dev/dax1.0
# daxio --input=/dev/dax1.0 --output=/home/myfile --len=2M --seek=4096
# cat /dev/zero | daxio --output=/dev/dax1.0
# daxio --input=/dev/zero --output=/dev/dax1.0 --skip=4096
```
# SEE ALSO #
**daxctl**(1), **ndctl**(1)
and **<https://pmem.io>**
| 2,978 | 26.330275 | 78 |
md
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/doc/pmem_ctl/pmem_ctl.5.md
|
---
layout: manual
Content-Style: 'text/css'
title: _MP(PMEM_CTL, 5)
collection: pmem_ctl
header: PMDK
date: pmem_ctl API version 1.4
...
[comment]: <> (SPDX-License-Identifier: BSD-3-Clause)
[comment]: <> (Copyright 2018-2019, Intel Corporation)
[comment]: <> (pmem_ctl.5 -- man page for CTL)
[NAME](#name)<br />
[DESCRIPTION](#description)<br />
[CTL EXTERNAL CONFIGURATION](#ctl-external-configuration)<br />
[SEE ALSO](#see-also)<br />
# NAME #
ctl - interface for examination and modification of the library's internal state.
# DESCRIPTION #
The CTL namespace is organized in a tree structure. Starting from the root,
each node can be either internal, containing other elements, or a leaf.
Internal nodes themselves can only contain other nodes and cannot be entry
points. There are two types of those nodes: *named* and *indexed*. Named nodes
have string identifiers. Indexed nodes represent an abstract array index and
have an associated string identifier. The index itself is provided by the user.
A collection of indexes present on the path of an entry point is provided to
the handler functions as name and index pairs.
Entry points are the leaves of the CTL namespace structure. Each entry point
can read from the internal state, write to the internal state,
exec a function or a combination of these operations.
The entry points are listed in the following format:
name | r(ead)w(rite)x(ecute) | global/- | read argument type | write argument type | exec argument type | config argument type
A description of **pmem_ctl** functions can be found on the following
manual pages:
**libpmemblk_ctl_get**(3), **libpmemlog_ctl_get**(3), **libpmemobj_ctl_get**(3)
# CTL EXTERNAL CONFIGURATION #
In addition to direct function call, each write entry point can also be set
using two alternative methods.
The first method is to load a configuration directly from the
**PMEMBLK_CONF**/ **PMEMLOG_CONF**/ **PMEMOBJ_CONF** environment variable.
A properly formatted ctl config string is a single-line
sequence of queries separated by ';':
```
query0;query1;...;queryN
```
A single query is constructed from the name of the ctl write entry point and
the argument, separated by '=':
```
entry_point=entry_point_argument
```
The entry point argument type is defined by the entry point itself, but there
are three predefined primitives:
*) integer: represented by a sequence of [0-9] characters that form
a single number.
*) boolean: represented by a single character: y/n/Y/N/0/1, each
corresponds to true or false. If the argument contains any
trailing characters, they are ignored.
*) string: a simple sequence of characters.
There are also complex argument types that are formed from the primitives
separated by a ',':
```
first_arg,second_arg
```
In summary, a full configuration sequence looks like this:
```
(first_entry_point)=(arguments, ...);...;(last_entry_point)=(arguments, ...);
```
As an example, to set both prefault at_open and at_create variables:
```
PMEMBLK_CONF="prefault.at_open=1;prefault.at_create=1"
```
The second method of loading an external configuration is to set the
**PMEMBLK_CONF_FILE**/ **PMEMLOG_CONF_FILE**/ **PMEMOBJ_CONF_FILE**
environment variable to point to a file that contains
a sequence of ctl queries. The parsing rules are all the same, but the file
can also contain white-spaces and comments.
To create a comment, simply use '#' anywhere in a line and everything
afterwards, until a new line, will be ignored.
An example configuration file:
```
#########################
# My pmemblk configuration
#########################
#
# Global settings:
prefault. # modify the behavior of pre-faulting
at_open = 1; # prefault when the pool is opened
prefault.
at_create = 0; # but don't prefault when it's created
# Per-pool settings:
# ...
```
# SEE ALSO #
**libpmemblk_ctl_get**(3), **libpmemlog_ctl_get**(3), **libpmemobj_ctl_get**(3)
and **<https://pmem.io>**
| 3,940 | 29.789063 | 126 |
md
|
null |
NearPMSW-main/nearpm/shadow/include/txopt.cc
|
#include "txopt.h"
#include <string.h>
// source: http://stackoverflow.com/questions/1919183/how-to-allocate-and-free-aligned-memory-in-c
void *
aligned_malloc(int size) {
void *mem = malloc(size+64+sizeof(void*));
void **ptr = (void**)((uintptr_t)((uint64_t)mem+64+uint64_t(sizeof(void*))) & ~(64-1));
ptr[-1] = mem;
return ptr;
}
// source: http://stackoverflow.com/questions/1640258/need-a-fast-random-generator-for-c
static unsigned long x=123456789, y=362436069, z=521288629;
unsigned long xorshf96() { //period 2^96-1
unsigned long t;
x ^= x << 16;
x ^= x >> 5;
x ^= x << 1;
t = x;
x = y;
y = z;
z = t ^ x ^ y;
return z;
}
//volatile void s_fence();
// Flush the selected addresses
//volatile void metadata_cache_flush(uint64_t addr, unsigned size);
//volatile void cache_flush(uint64_t addr, unsigned size);
//volatile void flush_caches(uint64_t addr, unsigned size);
// Flush the one cacheline
//volatile inline void metadata_flush(uint64_t addr);
//volatile inline void cache_flush(uint64_t addr);
// Flush the whole caches
//volatile inline void metadata_flush();
//volatile inline void cache_flush();
//volatile void TX_OPT(uint64_t addr, unsigned size);
// Deduplication and Compression are transaparent
/*
class Dedup {
public:
};
class Compress {
public:
}
*/
uint64_t CounterAtomic::currAtomicAddr = COUNTER_ATOMIC_VADDR;
//uint64_t CounterAtomic::currCacheFlushAddr = CACHE_FLUSH_VADDR;
//uint64_t CounterAtomic::currCounterCacheFlushAddr = COUNTER_CACHE_FLUSH_VADDR;
void*
CounterAtomic::counter_atomic_malloc(unsigned _size) {
return (void*)getNextAtomicAddr(_size);
}
volatile void
metadata_cache_flush(void* addr, unsigned size) {
int num_cache_line = size / CACHE_LINE_SIZE;
if ((uint64_t)addr % CACHE_LINE_SIZE)
num_cache_line++;
for (int i = 0; i < num_cache_line; ++i)
*((volatile uint64_t*)METADATA_CACHE_FLUSH_VADDR) = (uint64_t)addr + i * CACHE_LINE_SIZE;
}
volatile void
cache_flush(void* addr, unsigned size) {
int num_cache_line = size / CACHE_LINE_SIZE;
if ((uint64_t)addr % CACHE_LINE_SIZE)
num_cache_line++;
for (int i = 0; i < num_cache_line; ++i)
*((volatile uint64_t*)CACHE_FLUSH_VADDR) = (uint64_t)addr + i * CACHE_LINE_SIZE;
}
volatile void
flush_caches(void* addr, unsigned size) {
cache_flush(addr, size);
metadata_cache_flush(addr, size);
}
// OPT with both data and addr ready
volatile void
OPT(void* opt_obj, bool reg, void* pmemaddr, void* data, unsigned size) {
// fprintf(stderr, "size: %u\n", size);
opt_packet_t opt_packet;
opt_packet.opt_obj = opt_obj;
//opt_packet.seg_id = i;
//opt_packet.pmemaddr = (void*)((uint64_t)(pmemaddr) + i * CACHE_LINE_SIZE);
opt_packet.pmemaddr = pmemaddr;
//opt_packet.data_ptr = (void*)((uint64_t)(data) + i * CACHE_LINE_SIZE);
//opt_packet.data_val = 0;
opt_packet.size = size;
opt_packet.type = (!reg ? FLAG_OPT : FLAG_OPT_REG);
//opt_packet.type = FLAG_OPT;
*((opt_packet_t*)TXOPT_VADDR) = opt_packet;
//*((opt_packet_t*)TXOPT_VADDR) = (opt_packet_t){opt_obj, pmemaddr, size, FLAG_OPT_DATA};
}
// OPT with both data (int) and addr ready
volatile void
OPT_VAL(void* opt_obj, bool reg, void* pmemaddr, int data_val) {
opt_packet_t opt_packet;
opt_packet.opt_obj = opt_obj;
opt_packet.pmemaddr = pmemaddr;
//opt_packet.data_ptr = 0;
//opt_packet.data_val = data_val;
opt_packet.size = sizeof(int);
opt_packet.type = (!reg ? FLAG_OPT_VAL : FLAG_OPT_VAL_REG);
//opt_packet.type = FLAG_OPT;
*((opt_packet_t*)TXOPT_VADDR) = opt_packet;
}
// OPT with only data ready
volatile void
OPT_DATA(void* opt_obj, bool reg, void* data, unsigned size) {
opt_packet_t opt_packet;
opt_packet.opt_obj = opt_obj;
opt_packet.pmemaddr = 0;
//opt_packet.data_ptr = (void*)((uint64_t)(data) + i * CACHE_LINE_SIZE);
//opt_packet.data_val = 0;
opt_packet.size = size;
opt_packet.type = (!reg ? FLAG_OPT_DATA : FLAG_OPT_DATA_REG);
//opt_packet.type = FLAG_OPT;
*((opt_packet_t*)TXOPT_VADDR) = opt_packet;
}
// OPT with only addr ready
volatile void
OPT_ADDR(void* opt_obj, bool reg, void* pmemaddr, unsigned size) {
opt_packet_t opt_packet;
opt_packet.opt_obj = opt_obj;
opt_packet.pmemaddr = pmemaddr;
//opt_packet.data_ptr = 0;
//opt_packet.data_val = 0;
opt_packet.size = size;
opt_packet.type = (!reg ? FLAG_OPT_ADDR : FLAG_OPT_ADDR_REG);
//opt_packet.type = FLAG_OPT;
*((opt_packet_t*)TXOPT_VADDR) = opt_packet;
}
// OPT with only data (int) ready
volatile void
OPT_DATA_VAL(void* opt_obj, bool reg, int data_val) {
opt_packet_t opt_packet;
opt_packet.opt_obj = opt_obj;
opt_packet.pmemaddr = 0;
//opt_packet.data_ptr = 0;
//opt_packet.data_val = data_val;
opt_packet.size = sizeof(int);
opt_packet.type = (!reg ? FLAG_OPT_DATA_VAL : FLAG_OPT_DATA_VAL_REG);
//opt_packet.type = FLAG_OPT;
*((opt_packet_t*)TXOPT_VADDR) = opt_packet;
}
volatile void
OPT_START(void* opt_obj) {
opt_packet_t opt_packet;
opt_packet.opt_obj = opt_obj;
opt_packet.type = FLAG_OPT_START;
}
volatile void
s_fence() {
std::atomic_thread_fence(std::memory_order_acq_rel);
}
CounterAtomic::CounterAtomic() {
val_addr = getNextAtomicAddr(CACHE_LINE_SIZE);
}
CounterAtomic::CounterAtomic(uint64_t _val) {
val_addr = getNextAtomicAddr(CACHE_LINE_SIZE);
*((volatile uint64_t*)val_addr) = _val;
}
CounterAtomic::CounterAtomic(bool _val) {
*((volatile uint64_t*)val_addr) = uint64_t(_val);
val_addr = getNextAtomicAddr(CACHE_LINE_SIZE);
}
uint64_t
CounterAtomic::getValue() {
return *((volatile uint64_t*)val_addr);
}
uint64_t
CounterAtomic::getPtr() {
return val_addr;
}
CounterAtomic&
CounterAtomic::operator=(uint64_t _val) {
*((volatile uint64_t*)val_addr) = _val;
return *this;
}
CounterAtomic&
CounterAtomic::operator+(uint64_t _val) {
*((volatile uint64_t*)val_addr) += _val;
return *this;
}
CounterAtomic&
CounterAtomic::operator++() {
uint64_t val = *((volatile uint64_t*)val_addr);
val++;
*((volatile uint64_t*)val_addr) = val;
return *this;
}
CounterAtomic&
CounterAtomic::operator--() {
uint64_t val = *((volatile uint64_t*)val_addr);
val--;
*((volatile uint64_t*)val_addr) = val;
return *this;
}
CounterAtomic&
CounterAtomic::operator-(uint64_t _val) {
*((volatile uint64_t*)val_addr) -= _val;
return *this;
}
bool
CounterAtomic::operator==(uint64_t _val) {
return *((volatile uint64_t*)val_addr) == _val;
}
bool
CounterAtomic::operator!=(uint64_t _val) {
return *((volatile uint64_t*)val_addr) != _val;
}
uint64_t
CounterAtomic::getNextAtomicAddr(unsigned _size) {
if (currAtomicAddr + _size >= COUNTER_ATOMIC_VADDR + NUM_COUNTER_ATOMIC_PAGE*4*1024) {
printf("@@not enough counter atomic space, current addr=%lu, size=%u\n", currAtomicAddr, _size);
exit(0);
}
currAtomicAddr += _size;
return (currAtomicAddr - _size);
}
volatile void
CounterAtomic::statOutput() {
*((volatile uint64_t*) (STATUS_OUTPUT_VADDR))= 0;
}
volatile void
CounterAtomic::initCounterCache() {
*((volatile uint64_t*) (INIT_METADATA_CACHE_VADDR))= 0;
}
| 6,969 | 24.345455 | 98 |
cc
|
null |
NearPMSW-main/nearpm/shadow/include/txopt.h
|
// The starting address of the selected counter_atomic writes
#ifndef TXOPT_H
#define TXOPT_H
#define COUNTER_ATOMIC_VADDR (4096UL*1024*1024)
#define NUM_COUNTER_ATOMIC_PAGE 262144
// The starting address of the flush cache instruction
#define CACHE_FLUSH_VADDR (4096UL*1024*1024+4*NUM_COUNTER_ATOMIC_PAGE*1024)
// The starting address of the flush metadata cache instruction
#define METADATA_CACHE_FLUSH_VADDR (4096UL*1024*1024+(4*NUM_COUNTER_ATOMIC_PAGE+4)*1024)
#define STATUS_OUTPUT_VADDR (METADATA_CACHE_FLUSH_VADDR + 1024UL)
#define INIT_METADATA_CACHE_VADDR (STATUS_OUTPUT_VADDR + 1024UL)
#define TXOPT_VADDR (INIT_METADATA_CACHE_VADDR+1024UL)
#define CACHE_LINE_SIZE 64UL
#include <vector>
#include <deque>
#include <cstdlib>
#include <cstdint>
#include <atomic>
#include <stdio.h>
#include <cassert>
enum opt_flag {
FLAG_OPT,
FLAG_OPT_VAL,
FLAG_OPT_ADDR,
FLAG_OPT_DATA,
FLAG_OPT_DATA_VAL,
/* register no execute */
FLAG_OPT_REG,
FLAG_OPT_VAL_REG,
FLAG_OPT_ADDR_REG,
FLAG_OPT_DATA_REG,
FLAG_OPT_DATA_VAL_REG,
/* execute registered OPT */
FLAG_OPT_START
};
struct opt_t {
//int pid;
int obj_id;
};
// Fields in the OPT packet
// Used by both SW and HW
struct opt_packet_t {
void* opt_obj;
void* pmemaddr;
//void* data_ptr;
//int seg_id;
//int data_val;
unsigned size;
opt_flag type;
};
// OPT with both data and addr ready
volatile void OPT(void* opt_obj, bool reg, void* pmemaddr, void* data, unsigned size);
//#define OPT(opt_obj, pmemaddr, data, size) \
// *((opt_packet_t*)TXOPT_VADDR) = (opt_packet_t){opt_obj, pmemaddr, size, FLAG_OPT_DATA};
// OPT with both data (int) and addr ready
volatile void OPT_VAL(void* opt_obj, bool reg, void* pmemaddr, int data_val);
// OPT with only data ready
volatile void OPT_DATA(void* opt_obj, bool reg, void* data, unsigned size);
// OPT with only addr ready
volatile void OPT_ADDR(void* opt_obj, bool reg, void* pmemaddr, unsigned size);
// OPT with only data (int) ready
volatile void OPT_DATA_VAL(void* opt_obj, bool reg, int data_val);
// Begin OPT operation
volatile void OPT_START(void* opt_obj);
// store barrier
volatile void s_fence();
// flush both metadata cache and data cache
volatile void flush_caches(void* addr, unsigned size);
// flush data cache only
volatile void cache_flush(void* addr, unsigned size);
// flush metadata cache only
volatile void metadata_cache_flush(void* addr, unsigned size);
// malloc that is cache-line aligned
void *aligned_malloc(int size);
class CounterAtomic {
public:
static void* counter_atomic_malloc(unsigned _size);
// size is num of bytes
static volatile void statOutput();
static volatile void initCounterCache();
uint64_t getValue();
uint64_t getPtr();
CounterAtomic();
CounterAtomic(uint64_t _val);
CounterAtomic(bool _val);
CounterAtomic& operator=(uint64_t _val);
CounterAtomic& operator+(uint64_t _val);
CounterAtomic& operator++();
CounterAtomic& operator--();
CounterAtomic& operator-(uint64_t _val);
bool operator==(uint64_t _val);
bool operator!=(uint64_t _val);
private:
void init();
static uint64_t getNextAtomicAddr(unsigned _size);
static uint64_t getNextCacheFlushAddr(unsigned _size);
//static uint64_t getNextPersistBarrierAddr(unsigned _size);
static uint64_t getNextCounterCacheFlushAddr(unsigned _size);
static uint64_t currAtomicAddr;
static uint64_t currCacheFlushAddr;
//static uint64_t currPersistentBarrierAddr;
static uint64_t currCounterCacheFlushAddr;
/*
static bool hasAllocateCacheFlush;
static bool hasAllocateCounterCacheFlush;
static bool hasAllocatePersistBarrier;
*/
//uint64_t val;
uint64_t val_addr = 0;
};
#endif
| 3,665 | 26.155556 | 90 |
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.