Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/futils.py
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
"""Test framework utilities"""
from os.path import join, abspath, dirname
import os
import sys
import configurator
# Constant paths to repository elements
ROOTDIR = abspath(join(dirname(__file__), '..'))
WIN_DEBUG_BUILDDIR = abspath(join(ROOTDIR, '..', 'x64', 'Debug'))
WIN_DEBUG_EXEDIR = abspath(join(WIN_DEBUG_BUILDDIR, 'tests'))
WIN_RELEASE_BUILDDIR = abspath(join(ROOTDIR, '..', 'x64', 'Release'))
WIN_RELEASE_EXEDIR = abspath(join(WIN_RELEASE_BUILDDIR, 'tests'))
if sys.platform == 'win32':
DEBUG_LIBDIR = abspath(join(WIN_DEBUG_BUILDDIR, 'libs'))
RELEASE_LIBDIR = abspath(join(WIN_RELEASE_BUILDDIR, 'libs'))
else:
DEBUG_LIBDIR = abspath(join(ROOTDIR, '..', 'debug'))
RELEASE_LIBDIR = abspath(join(ROOTDIR, '..', 'nondebug'))
def get_tool_path(ctx, name):
if sys.platform == 'win32':
if str(ctx.build) == 'debug':
return abspath(join(WIN_DEBUG_BUILDDIR, 'libs', name))
else:
return abspath(join(WIN_RELEASE_BUILDDIR, 'libs', name))
else:
return abspath(join(ROOTDIR, '..', 'tools', name, name))
def get_test_tool_path(build, name):
if sys.platform == 'win32':
if str(build) == 'debug':
return abspath(join(WIN_DEBUG_BUILDDIR, 'tests', name))
else:
return abspath(join(WIN_RELEASE_BUILDDIR, 'tests', name))
else:
return abspath(join(ROOTDIR, 'tools', name, name))
def get_lib_dir(ctx):
if str(ctx.build) == 'debug':
return DEBUG_LIBDIR
else:
return RELEASE_LIBDIR
def get_example_path(ctx, libname, name):
"""
Get the path to the example binary.
Paths to examples differ on Windows and Unix systems. On Windows,
the example binaries have a specific name: ex_libname_name.
On Unix systems, the example binaries are located in the catalog
"lib + libname/name" and have the same name as .c file.
"""
if sys.platform == 'win32':
binname = '_'.join(['ex', libname, name])
if str(ctx.build) == 'debug':
return abspath(join(WIN_DEBUG_BUILDDIR, 'examples', binname))
else:
return abspath(join(WIN_RELEASE_BUILDDIR, 'examples', binname))
else:
return abspath(join(ROOTDIR, '..', 'examples', 'lib' + libname,
name, name))
def tail(file, n):
"""
Replace the file content with the n last lines from the existing file.
The original file is saved under the name with ".old" suffix.
"""
with open(file, 'r') as f:
lines = f.readlines()
last_lines = lines[-n:]
os.rename(file, file + ".old")
with open(file, 'w') as f:
for line in last_lines:
f.write(line)
def count(file, substring):
"""
Count the number of occurrences of a string in the given file.
"""
with open(file, 'r') as f:
content = f.read()
return content.count(substring)
class Color:
"""
Set the font color. This functionality relies on ANSI espace sequences
and is currently disabled for Windows
"""
if sys.platform != 'win32':
RED = '\33[91m'
GREEN = '\33[92m'
END = '\33[0m'
else:
RED, GREEN, END = '', '', ''
class Message:
"""Simple level based logger"""
def __init__(self, level):
self.level = level
def print(self, msg):
if self.level >= 1:
print(msg)
def print_verbose(self, msg):
if self.level >= 2:
print(msg)
class Fail(Exception):
"""Thrown when test fails"""
def __init__(self, msg):
super().__init__(msg)
self.messages = []
self.messages.append(msg)
def __str__(self):
ret = '\n'.join(self.messages)
return ret
def fail(msg, exit_code=None):
if exit_code is not None:
msg = '{}{}Error {}'.format(msg, os.linesep, exit_code)
raise Fail(msg)
class Skip(Exception):
"""Thrown when test should be skipped"""
def __init__(self, msg):
super().__init__(msg)
config = configurator.Configurator().config
if config.fail_on_skip:
raise Fail(msg)
self.messages = []
self.messages.append(msg)
def __str__(self):
ret = '\n'.join(self.messages)
return ret
def skip(msg):
raise Skip(msg)
def set_kwargs_attrs(cls, kwargs):
for k, v in kwargs.items():
setattr(cls, '{}'.format(k), v)
def add_env_common(src, added):
"""
A common implementation of adding an environment variable
to the 'src' environment variables dictionary - taking into account
that the variable may or may be not already defined.
"""
for k, v in added.items():
if k in src:
src[k] = v + os.pathsep + src[k]
else:
src.update({k: v})
def to_list(var, *types):
"""
Some variables may be provided by the user either as a single instance of
a type or a sequence of instances (e. g. a string or list of strings).
To be conveniently treated by the framework code, their types
should be unified - casted to lists.
"""
if isinstance(var, tuple(types)):
return [var, ]
else:
return var
| 5,280 | 25.943878 | 77 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/basetest.py
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
"""Base tests class and its functionalities"""
import builtins
import subprocess as sp
import sys
import re
import os
from datetime import datetime
from os import path
from configurator import Configurator
import futils
import test_types
import shutil
if not hasattr(builtins, 'testcases'):
builtins.testcases = []
def get_testcases():
""""Get list of testcases imported from src/test tree"""
return builtins.testcases
def _test_string_repr(cls):
"""
Implementation of __str__ method for the test class. Needs to be available
both for initialized (as a BaseTest instance method)
as well as uninitialized object (as a _TestCase metaclass method)
"""
return '{}/{}'.format(cls.group, cls.name)
class _TestCase(type):
"""Metaclass for BaseTest that is used for registering imported tests"""
def __init__(cls, name, bases, dct):
type.__init__(cls, name, bases, dct)
# globally register class as test case
# only classes whose names start with 'TEST' are meant to be run
cls.name = cls.__name__
if cls.__module__ == '__main__':
cls.cwd = path.dirname(path.abspath(sys.argv[0]))
else:
cls.cwd = cls.__module__
cls.group = path.basename(cls.cwd)
if cls.name.startswith('TEST'):
builtins.testcases.append(cls)
try:
cls.testnum = int(cls.name.replace('TEST', ''))
except ValueError as e:
print('Invalid test class name {}, should be "TEST[number]"'
.format(cls.name))
raise e
cls.tc_dirname = cls.group + '_' + str(cls.testnum)
def __str__(cls):
return _test_string_repr(cls)
class BaseTest(metaclass=_TestCase):
"""
Framework base test class. Every test case needs to (directly or
indirectly) inherit from this class. Since this class implements only
very abstract test behaviour, it is advised for particular test cases
to use Test class inheriting from it.
"""
enabled = True
def __init__(self):
self.ctx = None
def __str__(self):
return _test_string_repr(self)
def _execute(self, c):
"""
Implementation of basic single contextualized test execution workflow.
Called by the test runner.
"""
self.ctx = c
try:
# pre-execution cleanup
self.ctx.clean()
self.clean()
self.ctx.setup()
self.setup(c)
start_time = datetime.now()
self.run(c)
self.elapsed = (datetime.now() - start_time).total_seconds()
self.ctx.check()
self.check(c)
except futils.Fail:
self._on_fail()
raise
except futils.Skip:
self.ctx.clean()
self.clean()
raise
except sp.TimeoutExpired:
msg = '{}: {}TIMEOUT{}\t({})'.format(self, futils.Color.RED,
futils.Color.END,
self.ctx)
raise futils.Fail(msg)
else:
self.ctx.clean()
self.clean()
def setup(self, ctx):
"""Test setup - not implemented by BaseTest"""
pass
def run(self, ctx):
"""
Main test body, run with specific context provided through
Context class instance. Needs to be implemented by each test
"""
raise NotImplementedError('{} does not implement run() method'.format(
self.__class__))
def check(self, ctx):
"""Run additional test checks - not implemented by BaseTest"""
pass
def clean(self):
"""Test cleanup - not implemented by BaseTest"""
pass
def _on_fail(self):
"""Custom behaviour on test fail - not implemented by BaseTest"""
pass
class Test(BaseTest):
"""
Generic implementation of BaseTest scaffolding used by particular test
case classes as a base.
"""
test_type = test_types.Medium
memcheck_check_leaks = True
match = True
def __init__(self):
super().__init__()
self.config = Configurator().config
self.msg = futils.Message(self.config.unittest_log_level)
def _get_utenv(self):
"""Get environment variables values used by C test framework"""
return {
'UNITTEST_NAME': str(self),
'UNITTEST_LOG_LEVEL': str(self.config.unittest_log_level),
'UNITTEST_NUM': str(self.testnum)
}
def get_log_files(self):
"""
Returns names of all log files for given test
"""
pattern = r'.*[a-zA-Z_]{}\.log'
log_files = []
files = os.scandir(self.cwd)
for file in files:
match = re.fullmatch(pattern.format(self.testnum), file.name)
if match:
log = path.abspath(path.join(self.cwd, file.name))
log_files.append(log)
return log_files
def _print_log_files(self):
"""
Prints all log files for given test
"""
log_files = self.get_log_files()
for file in log_files:
with open(file) as f:
self.ctx.dump_n_lines(f)
def _move_log_files(self, ctx):
"""
Move all log files for given tests
"""
path = "logs"
sub_dir = str(ctx).replace(':', '')
logs_dir = os.path.join(path, sub_dir)
os.makedirs(logs_dir, exist_ok=True)
log_files = self.get_log_files()
for file in log_files:
shutil.copy2(file, logs_dir)
def remove_log_files(self):
"""
Removes log files for given test
"""
log_files = self.get_log_files()
for file in log_files:
os.remove(file)
def setup(self, ctx):
"""Test setup"""
self.env = {}
self.env.update(self._get_utenv())
self.ctx.add_env(self.env)
self.remove_log_files()
def _on_fail(self):
self._print_log_files()
def check(self, ctx):
"""Run additional test checks"""
if self.match:
self._run_match()
self._move_log_files(ctx)
def _run_match(self):
"""Match log files"""
cwd_listdir = [path.join(self.cwd, f) for f in os.listdir(self.cwd)]
suffix = '{}.log.match'.format(self.testnum)
def is_matchfile(f):
"""Match file ends with specific suffix and a char before suffix
is not a digit"""
before_suffix = -len(suffix) - 1
return path.isfile(f) and f.endswith(suffix) and \
not f[before_suffix].isdigit()
match_files = filter(is_matchfile, cwd_listdir)
prefix = 'perl ' if sys.platform == 'win32' else ''
match_cmd = prefix + path.join(futils.ROOTDIR, 'match')
for mf in match_files:
cmd = '{} {}'.format(match_cmd, mf)
proc = sp.run(cmd.split(), stdout=sp.PIPE, cwd=self.cwd,
stderr=sp.STDOUT, universal_newlines=True)
if proc.returncode != 0:
futils.fail(proc.stdout, exit_code=proc.returncode)
else:
self.msg.print_verbose(proc.stdout)
| 7,434 | 27.929961 | 78 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_pmem2_setup_integration.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ut_pmem2_setup_integration.h -- libpmem2 setup functions using public API
* (for integration tests)
*/
#include <libpmem2.h>
#include "ut_pmem2_config.h"
#include "ut_pmem2_setup_integration.h"
#include "ut_pmem2_source.h"
#include "unittest.h"
/*
* ut_pmem2_prepare_config_integration -- fill pmem2_config in minimal scope
*/
void
ut_pmem2_prepare_config_integration(const char *file, int line,
const char *func, struct pmem2_config **cfg, struct pmem2_source **src,
int fd, enum pmem2_granularity granularity)
{
ut_pmem2_config_new(file, line, func, cfg);
ut_pmem2_config_set_required_store_granularity(file, line, func, *cfg,
granularity);
ut_pmem2_source_from_fd(file, line, func, src, fd);
}
| 804 | 26.758621 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_pmem2_source.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ut_pmem2_source.h -- utility helper functions for libpmem2 source tests
*/
#ifndef UT_PMEM2_SOURCE_H
#define UT_PMEM2_SOURCE_H 1
#include "ut_fh.h"
/* a pmem2_config_set_fd() that can't return NULL */
#define PMEM2_SOURCE_FROM_FD(src, fd) \
ut_pmem2_source_from_fd(__FILE__, __LINE__, __func__, src, fd)
/* a pmem2_config_set_fd() that can't return NULL */
#define PMEM2_SOURCE_FROM_FH(src, fh) \
ut_pmem2_source_from_fh(__FILE__, __LINE__, __func__, src, fh)
/* a pmem2_source_alignment() that can't return an error */
#define PMEM2_SOURCE_ALIGNMENT(src, al) \
ut_pmem2_source_alignment(__FILE__, __LINE__, __func__, src, al)
/* a pmem2_source_delete() that can't return NULL */
#define PMEM2_SOURCE_DELETE(src) \
ut_pmem2_source_delete(__FILE__, __LINE__, __func__, src)
/* a pmem2_source_source() that can't return NULL */
#define PMEM2_SOURCE_SIZE(src, size) \
ut_pmem2_source_size(__FILE__, __LINE__, __func__, src, size)
void ut_pmem2_source_from_fd(const char *file, int line, const char *func,
struct pmem2_source **src, int fd);
void ut_pmem2_source_from_fh(const char *file, int line, const char *func,
struct pmem2_source **src, struct FHandle *fhandle);
void ut_pmem2_source_alignment(const char *file, int line, const char *func,
struct pmem2_source *src, size_t *alignment);
void ut_pmem2_source_delete(const char *file, int line, const char *func,
struct pmem2_source **src);
void ut_pmem2_source_size(const char *file, int line, const char *func,
struct pmem2_source *src, size_t *size);
#endif /* UT_PMEM2_SOURCE_H */
| 1,667 | 33.040816 | 76 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_pmem2_setup.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ut_pmem2_setup.h -- libpmem2 setup functions using non-public API
* (only for unit tests)
*/
#ifndef UT_PMEM2_SETUP_H
#define UT_PMEM2_SETUP_H 1
#include "ut_fh.h"
void ut_pmem2_prepare_config(struct pmem2_config *cfg,
struct pmem2_source **src, struct FHandle **fh,
enum file_handle_type fh_type, const char *path, size_t length,
size_t offset, int access);
#endif /* UT_PMEM2_SETUP_H */
| 486 | 23.35 | 68 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_pmem2_setup.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ut_pmem2_setup.h -- libpmem2 setup functions using non-public API
* (only for unit tests)
*/
#include "../../libpmem2/config.h"
#include "ut_pmem2_source.h"
#include "ut_pmem2_setup.h"
#include "unittest.h"
/*
* ut_pmem2_prepare_config -- fill pmem2_config, this function can not set
* the wrong value
*/
void
ut_pmem2_prepare_config(struct pmem2_config *cfg, struct pmem2_source **src,
struct FHandle **fh, enum file_handle_type fh_type, const char *file,
size_t length, size_t offset, int access)
{
pmem2_config_init(cfg);
cfg->offset = offset;
cfg->length = length;
cfg->requested_max_granularity = PMEM2_GRANULARITY_PAGE;
*fh = UT_FH_OPEN(fh_type, file, access);
PMEM2_SOURCE_FROM_FH(src, *fh);
}
| 805 | 25 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/valgrind.py
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
#
"""Valgrind handling tools"""
import sys
import re
import subprocess as sp
from enum import Enum, unique
from os import path
import context as ctx
import futils
DISABLE = -1
ENABLE = 1
AUTO = 0
_IGNORED = (
"WARNING: Serious error when reading debug info",
"When reading debug info from ",
"Ignoring non-Dwarf2/3/4 block in .debug_info",
"Last block truncated in .debug_info; ignoring",
"parse_CU_Header: is neither DWARF2 nor DWARF3 nor DWARF4",
"brk segment overflow",
"see section Limitations in user manual",
"Warning: set address range perms: large range",
"further instances of this message will not be shown",
"get_Form_contents: DW_FORM_GNU_strp_alt used, but no alternate .debug_str"
)
@unique
class _Tool(Enum):
MEMCHECK = 1
PMEMCHECK = 2
HELGRIND = 3
DRD = 4
NONE = 5
def __str__(self):
return self.name.lower()
def __bool__(self):
return self != self.NONE
TOOLS = tuple(t for t in _Tool if t != _Tool.NONE)
MEMCHECK = _Tool.MEMCHECK
PMEMCHECK = _Tool.PMEMCHECK
HELGRIND = _Tool.HELGRIND
DRD = _Tool.DRD
NONE = _Tool.NONE
def enabled_tool(test):
"""Get Valgrind tool enabled by test"""
enabled = [t for t in TOOLS if getattr(test, t.name.lower()) == ENABLE]
if len(enabled) > 1:
raise ValueError('test "{}" enables more than one Valgrind tool'
.format(test))
elif len(enabled) == 1:
return enabled[0]
else:
return None
def disabled_tools(test):
"""Get Valgrind tools disabled by test"""
disabled = [t for t in TOOLS if getattr(test, t.name.lower()) == DISABLE]
return disabled
class Valgrind:
"""Valgrind management"""
def __init__(self, tool, cwd, testnum):
if sys.platform == 'win32':
raise NotImplementedError(
'Valgrind class should not be used on Windows')
self.tool = NONE if tool is None else tool
self.tool_name = self.tool.name.lower()
self.cwd = cwd
log_file_name = '{}{}.log'.format(self.tool.name.lower(), testnum)
self.log_file = path.join(cwd, log_file_name)
if self.tool == NONE:
self.valgrind_exe = None
else:
self.valgrind_exe = self._get_valgrind_exe()
if self.valgrind_exe is None:
return
self.verify()
self.opts = []
self.add_suppression('ld.supp')
if 'freebsd' in sys.platform:
self.add_suppression('freebsd.supp')
if tool == MEMCHECK:
self.add_suppression('memcheck-libunwind.supp')
self.add_suppression('memcheck-ndctl.supp')
self.add_suppression('memcheck-dlopen.supp')
# Before Skylake, Intel CPUs did not have clflushopt instruction, so
# pmem_flush and pmem_persist both translated to clflush.
# This means that missing pmem_drain after pmem_flush could only be
# detected on Skylake+ CPUs.
# This option tells pmemcheck to expect fence (sfence or
# VALGRIND_PMC_DO_FENCE client request, used by pmem_drain) after
# clflush and makes pmemcheck output the same on pre-Skylake and
# post-Skylake CPUs.
elif tool == PMEMCHECK:
self.add_opt('--expect-fence-after-clflush=yes')
elif tool == HELGRIND:
self.add_suppression('helgrind-log.supp')
elif tool == DRD:
self.add_suppression('drd-log.supp')
def __str__(self):
return self.tool.name.lower()
def __bool__(self):
return self.tool != NONE
@classmethod
def filter(cls, config, msg, tc):
"""
Acquire valgrind tool for the test to be run based on configuration
and test requirements
"""
vg_tool, kwargs = ctx.get_requirement(tc, 'enabled_valgrind', NONE)
disabled, _ = ctx.get_requirement(tc, 'disabled_valgrind', ())
if config.force_enable:
if vg_tool and vg_tool != config.force_enable:
raise futils.Skip(
"test enables the '{}' Valgrind tool while "
"execution configuration forces '{}'"
.format(vg_tool, config.force_enable))
elif config.force_enable in disabled:
raise futils.Skip(
"forced Valgrind tool '{}' is disabled by test"
.format(config.force_enable))
else:
vg_tool = config.force_enable
return [cls(vg_tool, tc.cwd, tc.testnum, **kwargs), ]
@property
def cmd(self):
"""Get Valgrind command with specified arguments"""
if self.tool == NONE:
return []
cmd = [self.valgrind_exe, '--tool={}'.format(self.tool_name),
'--log-file={}'.format(self.log_file)] + self.opts
return cmd
def setup(self, memcheck_check_leaks=True, **kwargs):
if self.tool == MEMCHECK and memcheck_check_leaks:
self.add_opt('--leak-check=full')
def check(self, **kwargs):
self.validate_log()
def _get_valgrind_exe(self):
"""
On some systems "valgrind" is a shell script that calls the actual
executable "valgrind.bin".
The wrapper script does not work well with LD_PRELOAD so we want
to call Valgrind directly
"""
try:
out = sp.check_output('which valgrind', shell=True,
universal_newlines=True)
except sp.CalledProcessError:
raise futils.Skip('Valgrind not found')
valgrind_bin = path.join(path.dirname(out), 'valgrind.bin')
if path.isfile(valgrind_bin):
return valgrind_bin
return 'valgrind'
def add_opt(self, opt):
"""Add option to Valgrind command"""
self.opts.append(opt)
def _get_version(self):
"""
Get Valgrind version represented as integer with patch version ignored
"""
out = sp.check_output('{} --version'.format(self.valgrind_exe),
shell=True, universal_newlines=True)
version = out.split('valgrind-')[1]
version_as_int = int(version.rsplit('.', 1)[0].replace('.', ''))
return version_as_int
def add_suppression(self, f):
"""
Add suppression file. Provided file path is
relative to tests root directory (pmdk/src/test)
"""
self.opts.append('--suppressions={}'
.format(path.join(futils.ROOTDIR, f)))
def validate_log(self):
"""
Check Valgrind test result based on Valgrind log file.
Return True if passed, False otherwise
"""
if self.tool == NONE or sys.platform == 'win32':
return True
no_ignored = []
# remove ignored warnings from log file
with open(self.log_file, 'r+') as f:
no_ignored = [ln for ln in f if not any(w in ln for w in _IGNORED)]
f.seek(0)
f.writelines(no_ignored)
f.truncate()
if path.isfile(self.log_file + '.match'):
# if there is a Valgrind log match file, do nothing - log file
# will be checked by 'match' tool
return
non_zero_errors = 'ERROR SUMMARY: [^0]'
errors_found = any(re.search(non_zero_errors, ln) for ln in no_ignored)
if any('Bad pmempool' in ln for ln in no_ignored) or errors_found:
raise futils.Fail('Valgrind log validation failed')
def verify(self):
"""
Checks that Valgrind can be used.
"""
if self.valgrind_exe is None:
raise futils.Skip('Valgrind not found')
# verify tool
cmd = '{} --tool={} --help'.format(self.valgrind_exe, self.tool_name)
try:
sp.check_output(cmd, shell=True, stderr=sp.STDOUT)
except sp.CalledProcessError:
raise futils.Skip("Valgrind tool '{}' was not found"
.format(self.tool_name))
def require_valgrind_enabled(valgrind):
def wrapped(tc):
if sys.platform == 'win32':
# do not run valgrind tests on windows
tc.enabled = False
return tc
tool = _require_valgrind_common(valgrind)
ctx.add_requirement(tc, 'enabled_valgrind', tool)
return tc
return wrapped
def require_valgrind_disabled(*valgrind):
def wrapped(tc):
disabled_tools = [_require_valgrind_common(v) for v in valgrind]
ctx.add_requirement(tc, 'disabled_valgrind', disabled_tools)
return tc
return wrapped
def _require_valgrind_common(v):
valid_tool_names = [str(t) for t in TOOLS]
if v not in valid_tool_names:
sys.exit('used name {} not in valid valgrind tool names which are: {}'
.format(v, valid_tool_names))
str_to_tool = next(t for t in TOOLS if v == str(t))
return str_to_tool
| 9,073 | 29.863946 | 79 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/configurator.py
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
#
"""Parser for user provided test configuration"""
import argparse
import os
import string
import sys
from datetime import timedelta
import builds
import context as ctx
import granularity
import futils
import test_types
import valgrind as vg
try:
import testconfig
except ImportError:
sys.exit('Please add valid testconfig.py file - see testconfig.py.example')
class _ConfigFromDict:
"""
Class fields are created from provided dictionary. Used for creating
a final config object
"""
def __init__(self, dict_):
for k, v in dict_.items():
setattr(self, k, v)
# special method triggered if class attribute was not found
# https://docs.python.org/3.5/reference/datamodel.html#object.__getattr__
def __getattr__(self, name):
if name == 'page_fs_dir':
raise futils.Skip('Configuration field "{}" not found. '
'No page granularity test directory '
'provided'.format(name))
if name == 'cacheline_fs_dir':
raise futils.Skip('Configuration field "{}" not found. '
'No cache line granularity test '
'directory provided'.format(name))
if name == 'byte_fs_dir':
raise futils.Skip('Configuration field "{}" not found. '
'No byte granularity test directory '
'provided'.format(name))
raise AttributeError('Provided test configuration may be '
'invalid. No "{}" field found in '
'configuration.'
.format(name))
def _str2list(config):
"""
Convert the string with test sequence to equivalent list.
example:
_str2list("0-3,6") --> [0, 1, 2, 3, 6]
_str2list("1,3-5") --> [1, 3, 4, 5]
"""
arg = config['test_sequence']
if not arg:
# test_sequence not set, do nothing
return
seq = []
try:
for number in arg.split(','):
if '-' in number:
number = number.split('-')
begin = int(number[0])
end = int(number[1])
step = 1 if begin < end else -1
seq.extend(range(begin, end + step, step))
else:
seq.append(int(number))
except (ValueError, IndexError):
print('Provided test sequence "{}" is invalid'.format(arg))
raise
config['test_sequence'] = seq
def _str2time(config):
"""
Convert the string with s, m, h, d suffixes to time format
example:
_str2time("5s") --> "0:00:05"
_str2time("15m") --> "0:15:00"
"""
string_ = config['timeout']
try:
timeout = int(string_[:-1])
except ValueError as e:
raise ValueError("invalid timeout argument: {}".format(string_)) from e
else:
if "d" in string_:
timeout = timedelta(days=timeout)
elif "m" in string_:
timeout = timedelta(minutes=timeout)
elif "h" in string_:
timeout = timedelta(hours=timeout)
elif "s" in string_:
timeout = timedelta(seconds=timeout)
config['timeout'] = timeout.total_seconds()
def _str2ctx(config):
"""Convert context classes from strings to actual classes"""
def class_from_string(name, base):
if name == 'all':
return base.__subclasses__()
try:
return next(b for b in base.__subclasses__()
if str(b) == name.lower())
except StopIteration:
print('Invalid config value: "{}".'.format(name))
raise
def convert_internal(key, base):
if not isinstance(config[key], list):
config[key] = ctx.expand(class_from_string(config[key], base))
else:
classes = [class_from_string(cl, base) for cl in config[key]]
config[key] = ctx.expand(*classes)
convert_internal('build', builds.Build)
convert_internal('test_type', test_types._TestType)
convert_internal('granularity', granularity.Granularity)
if config['force_enable'] is not None:
config['force_enable'] = next(
t for t in vg.TOOLS
if t.name.lower() == config['force_enable'])
class Configurator():
"""Parser for user test configuration"""
def __init__(self):
self.config = self.parse_config()
def parse_config(self):
"""
Parse and return test execution config object. Final config is
composed from 2 config values - values from testconfig.py file
and values provided by command line args.
"""
self.argparser = self._init_argparser()
try:
args_config = self._get_args_config()
# The order of configs addition in 'config' initialization
# is relevant - values from each next added config overwrite
# values of already existing keys.
config = {**testconfig.config, **args_config}
self._convert_to_usable_types(config)
# Remake dict into class object for convenient fields acquisition
config = _ConfigFromDict(config)
# device_dax_path may be either a single string with path
# or a sequence of paths
if sys.platform != 'win32':
config.device_dax_path = futils.to_list(config.device_dax_path,
str)
return config
except KeyError as e:
print("No config field '{}' found. "
"testconfig.py file may be invalid.".format(e.args[0]))
raise
def _convert_to_usable_types(self, config):
"""
Converts config values types as parsed from user input into
types usable by framework implementation
"""
_str2ctx(config)
_str2list(config)
_str2time(config)
def _get_args_config(self):
"""Return config values parsed from command line arguments"""
# 'group' positional argument added only if RUNTESTS.py is the
# execution entry point
from_runtest = os.path.basename(sys.argv[0]) == 'RUNTESTS.py'
if from_runtest:
self.argparser.add_argument('group', nargs='*',
help='Run only tests '
'from selected groups')
# remove possible whitespace and empty args
sys.argv = [arg for arg in sys.argv if arg and not arg.isspace()]
args = self.argparser.parse_args()
if from_runtest:
# test_sequence does not make sense if group is not set
if args.test_sequence and not args.group:
self.argparser.error('"--test_sequence" argument needs '
'to have "group" arg set')
# remove possible path characters added by shell hint
args.group = [g.strip(string.punctuation) for g in args.group]
# make into dict for type consistency
return {k: v for k, v in vars(args).items() if v is not None}
def _init_argparser(self):
def ctx_choices(cls):
return [str(c) for c in cls.__subclasses__()]
parser = argparse.ArgumentParser()
parser.add_argument('--fs_dir_force_pmem', type=int,
help='set PMEM_IS_PMEM_FORCE for tests run on'
' pmem fs')
parser.add_argument('-l', '--unittest_log_level', type=int,
help='set log level. 0 - silent, 1 - normal, '
'2 - verbose')
parser.add_argument('--keep_going', type=bool,
help='continue execution despite test fails')
parser.add_argument('-b', dest='build',
help='run only specified build type',
choices=ctx_choices(builds.Build), nargs='*')
parser.add_argument('-g', dest='granularity',
choices=ctx_choices(granularity.Granularity),
nargs='*', help='run tests on a filesystem'
' with specified granularity types.')
parser.add_argument('-t', dest='test_type',
help='run only specified test type where '
'check = short + medium',
choices=ctx_choices(test_types._TestType),
nargs='*')
parser.add_argument('-o', dest='timeout',
help="set timeout for test execution timeout: "
"integer with an optional suffix:''s' for seconds,"
" 'm' for minutes, 'h' for hours or 'd' for days.")
parser.add_argument('-u', dest='test_sequence',
help='run only tests from specified test sequence '
'e.g.: 0-2,5 will execute TEST0, '
'TEST1, TEST2 and TEST5',
default='')
parser.add_argument('--list-testcases', dest='list_testcases',
action='store_const', const=True,
help='List testcases only')
parser.add_argument('--fail-on-skip', dest='fail_on_skip',
action='store_const', const=True,
help='Skipping tests also fail')
tracers = parser.add_mutually_exclusive_group()
tracers.add_argument('--tracer', dest='tracer', help='run C binary '
'with provided tracer command. With this option '
'stdout and stderr are not redirected, enabling '
'interactive sessions.',
default='')
tracers.add_argument('--gdb', dest='tracer', action='store_const',
const='gdb --args', help='run gdb as a tracer')
tracers.add_argument('--cgdb', dest='tracer', action='store_const',
const='cgdb --args', help='run cgdb as a tracer')
if sys.platform != 'win32':
fe_choices = [str(t) for t in vg.TOOLS]
parser.add_argument('--force-enable', choices=fe_choices,
default=None)
return parser
| 10,571 | 36.892473 | 79 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/unittest.sh
|
#
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2014-2020, Intel Corporation
#
# Copyright (c) 2016, Microsoft Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
set -e
# make sure we have a well defined locale for string operations here
export LC_ALL="C"
#export LC_ALL="en_US.UTF-8"
if ! [ -f ../envconfig.sh ]; then
echo >&2 "envconfig.sh is missing -- is the tree built?"
exit 1
fi
. ../testconfig.sh
. ../envconfig.sh
if [ -t 1 ]; then
IS_TERMINAL_STDOUT=YES
fi
if [ -t 2 ]; then
IS_TERMINAL_STDERR=YES
fi
function is_terminal() {
local fd
fd=$1
case $(eval "echo \${IS_TERMINAL_${fd}}") in
YES) : ;;
*) false ;;
esac
}
function interactive_color() {
local color fd
color=$1
fd=$2
shift 2
if is_terminal ${fd} && command -v tput >/dev/null; then
echo "$(tput setaf $color || :)$*$(tput sgr0 || :)"
else
echo "$*"
fi
}
function interactive_red() {
interactive_color 1 "$@"
}
function interactive_green() {
interactive_color 2 "$@"
}
function verbose_msg() {
if [ "$UNITTEST_LOG_LEVEL" -ge 2 ]; then
echo "$*"
fi
}
function msg() {
if [ "$UNITTEST_LOG_LEVEL" -ge 1 ]; then
echo "$*"
fi
}
function fatal() {
echo "$*" >&2
exit 1
}
if [ -z "${UNITTEST_NAME}" ]; then
CURDIR=$(basename $(pwd))
SCRIPTNAME=$(basename $0)
export UNITTEST_NAME=$CURDIR/$SCRIPTNAME
export UNITTEST_NUM=$(echo $SCRIPTNAME | sed "s/TEST//")
fi
# defaults
[ "$UNITTEST_LOG_LEVEL" ] || UNITTEST_LOG_LEVEL=2
[ "$GREP" ] || GREP="grep -a"
[ "$TEST" ] || TEST=check
[ "$FS" ] || FS=any
[ "$BUILD" ] || BUILD=debug
[ "$CHECK_TYPE" ] || CHECK_TYPE=auto
[ "$CHECK_POOL" ] || CHECK_POOL=0
[ "$VERBOSE" ] || VERBOSE=0
[ -n "${SUFFIX+x}" ] || SUFFIX="😘⠏⠍⠙⠅ɗPMDKӜ⥺🙋"
export UNITTEST_LOG_LEVEL GREP TEST FS BUILD CHECK_TYPE CHECK_POOL VERBOSE SUFFIX
TOOLS=../tools
LIB_TOOLS="../../tools"
# Paths to some useful tools
[ "$PMEMPOOL" ] || PMEMPOOL=$LIB_TOOLS/pmempool/pmempool
[ "$DAXIO" ] || DAXIO=$LIB_TOOLS/daxio/daxio
[ "$PMEMSPOIL" ] || PMEMSPOIL=$TOOLS/pmemspoil/pmemspoil.static-nondebug
[ "$BTTCREATE" ] || BTTCREATE=$TOOLS/bttcreate/bttcreate.static-nondebug
[ "$PMEMWRITE" ] || PMEMWRITE=$TOOLS/pmemwrite/pmemwrite
[ "$PMEMALLOC" ] || PMEMALLOC=$TOOLS/pmemalloc/pmemalloc
[ "$PMEMOBJCLI" ] || PMEMOBJCLI=$TOOLS/pmemobjcli/pmemobjcli
[ "$PMEMDETECT" ] || PMEMDETECT=$TOOLS/pmemdetect/pmemdetect.static-nondebug
[ "$PMREORDER" ] || PMREORDER=$LIB_TOOLS/pmreorder/pmreorder.py
[ "$FIP" ] || FIP=$TOOLS/fip/fip
[ "$DDMAP" ] || DDMAP=$TOOLS/ddmap/ddmap
[ "$CMPMAP" ] || CMPMAP=$TOOLS/cmpmap/cmpmap
[ "$EXTENTS" ] || EXTENTS=$TOOLS/extents/extents
[ "$FALLOCATE_DETECT" ] || FALLOCATE_DETECT=$TOOLS/fallocate_detect/fallocate_detect.static-nondebug
[ "$OBJ_VERIFY" ] || OBJ_VERIFY=$TOOLS/obj_verify/obj_verify
[ "$USC_PERMISSION" ] || USC_PERMISSION=$TOOLS/usc_permission_check/usc_permission_check.static-nondebug
[ "$ANONYMOUS_MMAP" ] || ANONYMOUS_MMAP=$TOOLS/anonymous_mmap/anonymous_mmap.static-nondebug
# force globs to fail if they don't match
shopt -s failglob
# number of remote nodes required in the current unit test
NODES_MAX=-1
# sizes of alignments
SIZE_4KB=4096
SIZE_2MB=2097152
readonly PAGE_SIZE=$(getconf PAGESIZE)
# PMEMOBJ limitations
PMEMOBJ_MAX_ALLOC_SIZE=17177771968
# SSH and SCP options
SSH_OPTS="-o BatchMode=yes"
SCP_OPTS="-o BatchMode=yes -r -p"
NDCTL_MIN_VERSION="63"
# list of common files to be copied to all remote nodes
DIR_SRC="../.."
FILES_COMMON_DIR="\
$DIR_SRC/test/*.supp \
$DIR_SRC/tools/rpmemd/rpmemd \
$DIR_SRC/tools/pmempool/pmempool \
$DIR_SRC/test/tools/extents/extents \
$DIR_SRC/test/tools/obj_verify/obj_verify \
$DIR_SRC/test/tools/ctrld/ctrld \
$DIR_SRC/test/tools/fip/fip"
# Portability
VALGRIND_SUPP="--suppressions=../ld.supp \
--suppressions=../memcheck-libunwind.supp \
--suppressions=../memcheck-ndctl.supp"
if [ "$(uname -s)" = "FreeBSD" ]; then
DATE="gdate"
DD="gdd"
FALLOCATE="mkfile"
VM_OVERCOMMIT="[ $(sysctl vm.overcommit | awk '{print $2}') == 0 ]"
RM_ONEFS="-x"
STAT_MODE="-f%Lp"
STAT_PERM="-f%Sp"
STAT_SIZE="-f%z"
STRACE="truss"
VALGRIND_SUPP="$VALGRIND_SUPP --suppressions=../freebsd.supp"
else
DATE="date"
DD="dd"
FALLOCATE="fallocate -l"
VM_OVERCOMMIT="[ $(cat /proc/sys/vm/overcommit_memory) != 2 ]"
RM_ONEFS="--one-file-system"
STAT_MODE="-c%a"
STAT_PERM="-c%A"
STAT_SIZE="-c%s"
STRACE="strace"
fi
# array of lists of PID files to be cleaned in case of an error
NODE_PID_FILES[0]=""
case "$BUILD"
in
debug|static-debug)
if [ -z "$PMDK_LIB_PATH_DEBUG" ]; then
PMDK_LIB_PATH=../../debug
REMOTE_PMDK_LIB_PATH=../debug
else
PMDK_LIB_PATH=$PMDK_LIB_PATH_DEBUG
REMOTE_PMDK_LIB_PATH=$PMDK_LIB_PATH_DEBUG
fi
;;
nondebug|static-nondebug)
if [ -z "$PMDK_LIB_PATH_NONDEBUG" ]; then
PMDK_LIB_PATH=../../nondebug
REMOTE_PMDK_LIB_PATH=../nondebug
else
PMDK_LIB_PATH=$PMDK_LIB_PATH_NONDEBUG
REMOTE_PMDK_LIB_PATH=$PMDK_LIB_PATH_NONDEBUG
fi
;;
esac
export LD_LIBRARY_PATH=$PMDK_LIB_PATH:$GLOBAL_LIB_PATH:$LD_LIBRARY_PATH
export REMOTE_LD_LIBRARY_PATH=$REMOTE_PMDK_LIB_PATH:$GLOBAL_LIB_PATH:\$LD_LIBRARY_PATH
export PATH=$GLOBAL_PATH:$PATH
export REMOTE_PATH=$GLOBAL_PATH:\$PATH
export PKG_CONFIG_PATH=$GLOBAL_PKG_CONFIG_PATH:$PKG_CONFIG_PATH
export REMOTE_PKG_CONFIG_PATH=$GLOBAL_PKG_CONFIG_PATH:\$PKG_CONFIG_PATH
#
# When running static binary tests, append the build type to the binary
#
case "$BUILD"
in
static-*)
EXESUFFIX=.$BUILD
;;
esac
#
# The variable DIR is constructed so the test uses that directory when
# constructing test files. DIR is chosen based on the fs-type for
# this test, and if the appropriate fs-type doesn't have a directory
# defined in testconfig.sh, the test is skipped.
#
# This behavior can be overridden by setting DIR. For example:
# DIR=/force/test/dir ./TEST0
#
curtestdir=`basename $PWD`
# just in case
if [ ! "$curtestdir" ]; then
fatal "curtestdir does not have a value"
fi
curtestdir=test_$curtestdir
if [ ! "$UNITTEST_NUM" ]; then
fatal "UNITTEST_NUM does not have a value"
fi
if [ ! "$UNITTEST_NAME" ]; then
fatal "UNITTEST_NAME does not have a value"
fi
REAL_FS=$FS
if [ "$DIR" ]; then
DIR=$DIR/$curtestdir$UNITTEST_NUM
else
case "$FS"
in
pmem)
# if a variable is set - it must point to a valid directory
if [ "$PMEM_FS_DIR" == "" ]; then
fatal "$UNITTEST_NAME: PMEM_FS_DIR is not set"
fi
DIR=$PMEM_FS_DIR/$DIRSUFFIX/$curtestdir$UNITTEST_NUM
if [ "$PMEM_FS_DIR_FORCE_PMEM" = "1" ] || [ "$PMEM_FS_DIR_FORCE_PMEM" = "2" ]; then
export PMEM_IS_PMEM_FORCE=1
fi
;;
non-pmem)
# if a variable is set - it must point to a valid directory
if [ "$NON_PMEM_FS_DIR" == "" ]; then
fatal "$UNITTEST_NAME: NON_PMEM_FS_DIR is not set"
fi
DIR=$NON_PMEM_FS_DIR/$DIRSUFFIX/$curtestdir$UNITTEST_NUM
;;
any)
if [ "$PMEM_FS_DIR" != "" ]; then
DIR=$PMEM_FS_DIR/$DIRSUFFIX/$curtestdir$UNITTEST_NUM
REAL_FS=pmem
if [ "$PMEM_FS_DIR_FORCE_PMEM" = "1" ] || [ "$PMEM_FS_DIR_FORCE_PMEM" = "2" ]; then
export PMEM_IS_PMEM_FORCE=1
fi
elif [ "$NON_PMEM_FS_DIR" != "" ]; then
DIR=$NON_PMEM_FS_DIR/$DIRSUFFIX/$curtestdir$UNITTEST_NUM
REAL_FS=non-pmem
else
fatal "$UNITTEST_NAME: fs-type=any and both env vars are empty"
fi
;;
none)
DIR=/dev/null/not_existing_dir/$DIRSUFFIX/$curtestdir$UNITTEST_NUM
;;
*)
verbose_msg "$UNITTEST_NAME: SKIP fs-type $FS (not configured)"
exit 0
;;
esac
fi
#
# The default is to turn on library logging to level 3 and save it to local files.
# Tests that don't want it on, should override these environment variables.
#
export PMEM_LOG_LEVEL=3
export PMEM_LOG_FILE=pmem$UNITTEST_NUM.log
export PMEMBLK_LOG_LEVEL=3
export PMEMBLK_LOG_FILE=pmemblk$UNITTEST_NUM.log
export PMEMLOG_LOG_LEVEL=3
export PMEMLOG_LOG_FILE=pmemlog$UNITTEST_NUM.log
export PMEMOBJ_LOG_LEVEL=3
export PMEMOBJ_LOG_FILE=pmemobj$UNITTEST_NUM.log
export PMEMPOOL_LOG_LEVEL=3
export PMEMPOOL_LOG_FILE=pmempool$UNITTEST_NUM.log
export PMREORDER_LOG_FILE=pmreorder$UNITTEST_NUM.log
export OUT_LOG_FILE=out$UNITTEST_NUM.log
export ERR_LOG_FILE=err$UNITTEST_NUM.log
export TRACE_LOG_FILE=trace$UNITTEST_NUM.log
export PREP_LOG_FILE=prep$UNITTEST_NUM.log
export VALGRIND_LOG_FILE=${CHECK_TYPE}${UNITTEST_NUM}.log
export VALIDATE_VALGRIND_LOG=1
export RPMEM_LOG_LEVEL=3
export RPMEM_LOG_FILE=rpmem$UNITTEST_NUM.log
export RPMEMD_LOG_LEVEL=info
export RPMEMD_LOG_FILE=rpmemd$UNITTEST_NUM.log
export REMOTE_VARS="
RPMEMD_LOG_FILE
RPMEMD_LOG_LEVEL
RPMEM_LOG_FILE
RPMEM_LOG_LEVEL
PMEM_LOG_FILE
PMEM_LOG_LEVEL
PMEMOBJ_LOG_FILE
PMEMOBJ_LOG_LEVEL
PMEMPOOL_LOG_FILE
PMEMPOOL_LOG_LEVEL"
[ "$UT_DUMP_LINES" ] || UT_DUMP_LINES=30
export CHECK_POOL_LOG_FILE=check_pool_${BUILD}_${UNITTEST_NUM}.log
# In case a lock is required for Device DAXes
DEVDAX_LOCK=../devdax.lock
#
# store_exit_on_error -- store on a stack a sign that reflects the current state
# of the 'errexit' shell option
#
function store_exit_on_error() {
if [ "${-#*e}" != "$-" ]; then
estack+=-
else
estack+=+
fi
}
#
# restore_exit_on_error -- restore the state of the 'errexit' shell option
#
function restore_exit_on_error() {
if [ -z $estack ]; then
fatal "error: store_exit_on_error function has to be called first"
fi
eval "set ${estack:${#estack}-1:1}e"
estack=${estack%?}
}
#
# disable_exit_on_error -- store the state of the 'errexit' shell option and
# disable it
#
function disable_exit_on_error() {
store_exit_on_error
set +e
}
#
# get_files -- print list of files in the current directory matching the given regex to stdout
#
# This function has been implemented to workaround a race condition in
# `find`, which fails if any file disappears in the middle of the operation.
#
# example, to list all *.log files in the current directory
# get_files ".*\.log"
function get_files() {
disable_exit_on_error
ls -1 | grep -E "^$*$"
restore_exit_on_error
}
#
# get_executables -- print list of executable files in the current directory to stdout
#
# This function has been implemented to workaround a race condition in
# `find`, which fails if any file disappears in the middle of the operation.
#
function get_executables() {
disable_exit_on_error
for c in *
do
if [ -f $c -a -x $c ]
then
echo "$c"
fi
done
restore_exit_on_error
}
#
# convert_to_bytes -- converts the string with K, M, G or T suffixes
# to bytes
#
# example:
# "1G" --> "1073741824"
# "2T" --> "2199023255552"
# "3k" --> "3072"
# "1K" --> "1024"
# "10" --> "10"
#
function convert_to_bytes() {
size="$(echo $1 | tr '[:upper:]' '[:lower:]')"
if [[ $size == *kib ]]
then size=$(($(echo $size | tr -d 'kib') * 1024))
elif [[ $size == *mib ]]
then size=$(($(echo $size | tr -d 'mib') * 1024 * 1024))
elif [[ $size == *gib ]]
then size=$(($(echo $size | tr -d 'gib') * 1024 * 1024 * 1024))
elif [[ $size == *tib ]]
then size=$(($(echo $size | tr -d 'tib') * 1024 * 1024 * 1024 * 1024))
elif [[ $size == *pib ]]
then size=$(($(echo $size | tr -d 'pib') * 1024 * 1024 * 1024 * 1024 * 1024))
elif [[ $size == *kb ]]
then size=$(($(echo $size | tr -d 'kb') * 1000))
elif [[ $size == *mb ]]
then size=$(($(echo $size | tr -d 'mb') * 1000 * 1000))
elif [[ $size == *gb ]]
then size=$(($(echo $size | tr -d 'gb') * 1000 * 1000 * 1000))
elif [[ $size == *tb ]]
then size=$(($(echo $size | tr -d 'tb') * 1000 * 1000 * 1000 * 1000))
elif [[ $size == *pb ]]
then size=$(($(echo $size | tr -d 'pb') * 1000 * 1000 * 1000 * 1000 * 1000))
elif [[ $size == *b ]]
then size=$(($(echo $size | tr -d 'b')))
elif [[ $size == *k ]]
then size=$(($(echo $size | tr -d 'k') * 1024))
elif [[ $size == *m ]]
then size=$(($(echo $size | tr -d 'm') * 1024 * 1024))
elif [[ $size == *g ]]
then size=$(($(echo $size | tr -d 'g') * 1024 * 1024 * 1024))
elif [[ $size == *t ]]
then size=$(($(echo $size | tr -d 't') * 1024 * 1024 * 1024 * 1024))
elif [[ $size == *p ]]
then size=$(($(echo $size | tr -d 'p') * 1024 * 1024 * 1024 * 1024 * 1024))
fi
echo "$size"
}
#
# create_file -- create zeroed out files of a given length
#
# example, to create two files, each 1GB in size:
# create_file 1G testfile1 testfile2
#
function create_file() {
size=$(convert_to_bytes $1)
shift
for file in $*
do
$DD if=/dev/zero of=$file bs=1M count=$size iflag=count_bytes status=none >> $PREP_LOG_FILE
done
}
#
# create_nonzeroed_file -- create non-zeroed files of a given length
#
# A given first kilobytes of the file is zeroed out.
#
# example, to create two files, each 1GB in size, with first 4K zeroed
# create_nonzeroed_file 1G 4K testfile1 testfile2
#
function create_nonzeroed_file() {
offset=$(convert_to_bytes $2)
size=$(($(convert_to_bytes $1) - $offset))
shift 2
for file in $*
do
truncate -s ${offset} $file >> $PREP_LOG_FILE
$DD if=/dev/zero bs=1K count=${size} iflag=count_bytes 2>>$PREP_LOG_FILE | tr '\0' '\132' >> $file
done
}
#
# create_holey_file -- create holey files of a given length
#
# examples:
# create_holey_file 1024k testfile1 testfile2
# create_holey_file 2048M testfile1 testfile2
# create_holey_file 234 testfile1
# create_holey_file 2340b testfile1
#
# Input unit size is in bytes with optional suffixes like k, KB, M, etc.
#
function create_holey_file() {
size=$(convert_to_bytes $1)
shift
for file in $*
do
truncate -s ${size} $file >> $PREP_LOG_FILE
done
}
#
# create_poolset -- create a dummy pool set
#
# Creates a pool set file using the provided list of part sizes and paths.
# Optionally, it also creates the selected part files (zeroed, partially zeroed
# or non-zeroed) with requested size and mode. The actual file size may be
# different than the part size in the pool set file.
# 'r' or 'R' on the list of arguments indicate the beginning of the next
# replica set and 'm' or 'M' the beginning of the next remote replica set.
# 'o' or 'O' indicates the next argument is a pool set option.
# A remote replica requires two parameters: a target node and a pool set
# descriptor.
#
# Each part argument has the following format:
# psize:ppath[:cmd[:fsize[:mode]]]
#
# where:
# psize - part size or AUTO (only for DAX device)
# ppath - path
# cmd - (optional) can be:
# x - do nothing (may be skipped if there's no 'fsize', 'mode')
# z - create zeroed (holey) file
# n - create non-zeroed file
# h - create non-zeroed file, but with zeroed header (first page)
# d - create directory
# fsize - (optional) the actual size of the part file (if 'cmd' is not 'x')
# mode - (optional) same format as for 'chmod' command
#
# Each remote replica argument has the following format:
# node:desc
#
# where:
# node - target node
# desc - pool set descriptor
#
# example:
# The following command define a pool set consisting of two parts: 16MB
# and 32MB, a local replica with only one part of 48MB and a remote replica.
# The first part file is not created, the second is zeroed. The only replica
# part is non-zeroed. Also, the last file is read-only and its size
# does not match the information from pool set file. The last but one line
# describes a remote replica. The SINGLEHDR poolset option is set, so only
# the first part in each replica contains a pool header. The remote poolset
# also has to have the SINGLEHDR option.
#
# create_poolset ./pool.set 16M:testfile1 32M:testfile2:z \
# R 48M:testfile3:n:11M:0400 \
# M remote_node:remote_pool.set \
# O SINGLEHDR
#
function create_poolset() {
psfile=$1
shift 1
echo "PMEMPOOLSET" > $psfile
while [ "$1" ]
do
if [ "$1" = "M" ] || [ "$1" = "m" ] # remote replica
then
shift 1
cmd=$1
shift 1
# extract last ":" separated segment as descriptor
# extract everything before last ":" as node address
# this extraction method is compatible with IPv6 and IPv4
node=${cmd%:*}
desc=${cmd##*:}
echo "REPLICA $node $desc" >> $psfile
continue
fi
if [ "$1" = "R" ] || [ "$1" = "r" ]
then
echo "REPLICA" >> $psfile
shift 1
continue
fi
if [ "$1" = "O" ] || [ "$1" = "o" ]
then
echo "OPTION $2" >> $psfile
shift 2
continue
fi
cmd=$1
fparms=(${cmd//:/ })
shift 1
fsize=${fparms[0]}
fpath=${fparms[1]}
cmd=${fparms[2]}
asize=${fparms[3]}
mode=${fparms[4]}
if [ ! $asize ]; then
asize=$fsize
fi
if [ "$asize" != "AUTO" ]; then
asize=$(convert_to_bytes $asize)
fi
case "$cmd"
in
x)
# do nothing
;;
z)
# zeroed (holey) file
truncate -s $asize $fpath >> $PREP_LOG_FILE
;;
n)
# non-zeroed file
$DD if=/dev/zero bs=$asize count=1 2>>$PREP_LOG_FILE | tr '\0' '\132' >> $fpath
;;
h)
# non-zeroed file, except page size header
truncate -s $PAGE_SIZE $fpath >> prep$UNITTEST_NUM.log
$DD if=/dev/zero bs=$asize count=1 2>>$PREP_LOG_FILE | tr '\0' '\132' >> $fpath
truncate -s $asize $fpath >> $PREP_LOG_FILE
;;
d)
mkdir -p $fpath
;;
esac
if [ $mode ]; then
chmod $mode $fpath
fi
echo "$fsize $fpath" >> $psfile
done
}
function dump_last_n_lines() {
if [ "$1" != "" -a -f "$1" ]; then
ln=`wc -l < $1`
if [ $ln -gt $UT_DUMP_LINES ]; then
echo -e "Last $UT_DUMP_LINES lines of $1 below (whole file has $ln lines)." >&2
ln=$UT_DUMP_LINES
else
echo -e "$1 below." >&2
fi
paste -d " " <(yes $UNITTEST_NAME $1 | head -n $ln) <(tail -n $ln $1) >&2
echo >&2
fi
}
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=810295
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=780173
# https://bugs.kde.org/show_bug.cgi?id=303877
#
# valgrind issues an unsuppressable warning when exceeding
# the brk segment, causing matching failures. We can safely
# ignore it because malloc() will fallback to mmap() anyway.
function valgrind_ignore_warnings() {
cat $1 | grep -v \
-e "WARNING: Serious error when reading debug info" \
-e "When reading debug info from " \
-e "Ignoring non-Dwarf2/3/4 block in .debug_info" \
-e "Last block truncated in .debug_info; ignoring" \
-e "parse_CU_Header: is neither DWARF2 nor DWARF3 nor DWARF4" \
-e "brk segment overflow" \
-e "see section Limitations in user manual" \
-e "Warning: set address range perms: large range"\
-e "further instances of this message will not be shown"\
-e "get_Form_contents: DW_FORM_GNU_strp_alt used, but no alternate .debug_str"\
> $1.tmp
mv $1.tmp $1
}
#
# valgrind_ignore_messages -- cuts off Valgrind messages that are irrelevant
# to the correctness of the test, but changes during Valgrind rebase
# usage: valgrind_ignore_messages <log-file>
#
function valgrind_ignore_messages() {
if [ -e "$1.match" ]; then
cat $1 | grep -v \
-e "For lists of detected and suppressed errors, rerun with: -s" \
-e "For counts of detected and suppressed errors, rerun with: -v" \
> $1.tmp
mv $1.tmp $1
fi
}
#
# get_trace -- return tracing tool command line if applicable
# usage: get_trace <check type> <log file> [<node>]
#
function get_trace() {
if [ "$1" == "none" ]; then
echo "$TRACE"
return
fi
local exe=$VALGRINDEXE
local check_type=$1
local log_file=$2
local opts="$VALGRIND_OPTS"
local node=-1
[ "$#" -eq 3 ] && node=$3
if [ "$check_type" = "memcheck" -a "$MEMCHECK_DONT_CHECK_LEAKS" != "1" ]; then
opts="$opts --leak-check=full"
fi
if [ "$check_type" = "pmemcheck" ]; then
# Before Skylake, Intel CPUs did not have clflushopt instruction, so
# pmem_flush and pmem_persist both translated to clflush.
# This means that missing pmem_drain after pmem_flush could only be
# detected on Skylake+ CPUs.
# This option tells pmemcheck to expect fence (sfence or
# VALGRIND_PMC_DO_FENCE client request, used by pmem_drain) after
# clflush and makes pmemcheck output the same on pre-Skylake and
# post-Skylake CPUs.
opts="$opts --expect-fence-after-clflush=yes"
fi
opts="$opts $VALGRIND_SUPP"
if [ "$node" -ne -1 ]; then
exe=${NODE_VALGRINDEXE[$node]}
opts="$opts"
case "$check_type" in
memcheck)
opts="$opts --suppressions=../memcheck-libibverbs.supp"
;;
helgrind)
opts="$opts --suppressions=../helgrind-cxgb4.supp"
opts="$opts --suppressions=../helgrind-libfabric.supp"
;;
drd)
opts="$opts --suppressions=../drd-libfabric.supp"
;;
esac
fi
echo "$exe --tool=$check_type --log-file=$log_file $opts $TRACE"
return
}
#
# validate_valgrind_log -- validate valgrind log
# usage: validate_valgrind_log <log-file>
#
function validate_valgrind_log() {
[ "$VALIDATE_VALGRIND_LOG" != "1" ] && return
# fail if there are valgrind errors found or
# if it detects overlapping chunks
if [ ! -e "$1.match" ] && grep \
-e "ERROR SUMMARY: [^0]" \
-e "Bad mempool" \
$1 >/dev/null ;
then
msg=$(interactive_red STDERR "failed")
echo -e "$UNITTEST_NAME $msg with Valgrind. See $1. Last 20 lines below." >&2
paste -d " " <(yes $UNITTEST_NAME $1 | head -n 20) <(tail -n 20 $1) >&2
false
fi
}
#
# expect_normal_exit -- run a given command, expect it to exit 0
#
# if VALGRIND_DISABLED is not empty valgrind tool will be omitted
#
function expect_normal_exit() {
local VALGRIND_LOG_FILE=${CHECK_TYPE}${UNITTEST_NUM}.log
local N=$2
# in case of a remote execution disable valgrind check if valgrind is not
# enabled on node
local _CHECK_TYPE=$CHECK_TYPE
if [ "x$VALGRIND_DISABLED" != "x" ]; then
_CHECK_TYPE=none
fi
if [ "$1" == "run_on_node" -o "$1" == "run_on_node_background" ]; then
if [ -z $(is_valgrind_enabled_on_node $N) ]; then
_CHECK_TYPE="none"
fi
else
N=-1
fi
if [ -n "$TRACE" ]; then
case "$1"
in
*_on_node*)
msg "$UNITTEST_NAME: SKIP: TRACE is not supported if test is executed on remote nodes"
exit 0
esac
fi
local trace=$(get_trace $_CHECK_TYPE $VALGRIND_LOG_FILE $N)
if [ "$MEMCHECK_DONT_CHECK_LEAKS" = "1" -a "$CHECK_TYPE" = "memcheck" ]; then
export OLD_ASAN_OPTIONS="${ASAN_OPTIONS}"
export ASAN_OPTIONS="detect_leaks=0 ${ASAN_OPTIONS}"
fi
if [ "$CHECK_TYPE" = "helgrind" ]; then
export VALGRIND_OPTS="--suppressions=../helgrind-log.supp"
fi
if [ "$CHECK_TYPE" = "memcheck" ]; then
export VALGRIND_OPTS="$VALGRIND_OPTS --suppressions=../memcheck-dlopen.supp"
fi
local REMOTE_VALGRIND_LOG=0
if [ "$CHECK_TYPE" != "none" ]; then
case "$1"
in
run_on_node)
REMOTE_VALGRIND_LOG=1
trace="$1 $2 $trace"
[ $# -ge 2 ] && shift 2 || shift $#
;;
run_on_node_background)
trace="$1 $2 $3 $trace"
[ $# -ge 3 ] && shift 3 || shift $#
;;
wait_on_node|wait_on_node_port|kill_on_node)
[ "$1" = "wait_on_node" ] && REMOTE_VALGRIND_LOG=1
trace="$1 $2 $3 $4"
[ $# -ge 4 ] && shift 4 || shift $#
;;
esac
fi
if [ "$CHECK_TYPE" = "drd" ]; then
export VALGRIND_OPTS="$VALGRIND_OPTS --suppressions=../drd-log.supp"
fi
disable_exit_on_error
eval $ECHO $trace "$*"
ret=$?
if [ $REMOTE_VALGRIND_LOG -eq 1 ]; then
for node in $CHECK_NODES
do
local new_log_file=node\_$node\_$VALGRIND_LOG_FILE
copy_files_from_node $node "." ${NODE_TEST_DIR[$node]}/$VALGRIND_LOG_FILE
mv $VALGRIND_LOG_FILE $new_log_file
done
fi
restore_exit_on_error
if [ "$ret" -ne "0" ]; then
if [ "$ret" -gt "128" ]; then
msg="crashed (signal $(($ret - 128)))"
else
msg="failed with exit code $ret"
fi
msg=$(interactive_red STDERR $msg)
if [ -f $ERR_LOG_FILE ]; then
if [ "$UNITTEST_LOG_LEVEL" -ge "1" ]; then
echo -e "$UNITTEST_NAME $msg. $ERR_LOG_FILE below." >&2
cat $ERR_LOG_FILE >&2
else
echo -e "$UNITTEST_NAME $msg. $ERR_LOG_FILE above." >&2
fi
else
echo -e "$UNITTEST_NAME $msg." >&2
fi
# ignore Ctrl-C
if [ $ret != 130 ]; then
for f in $(get_files ".*[a-zA-Z_]${UNITTEST_NUM}\.log"); do
dump_last_n_lines $f
done
fi
[ $NODES_MAX -ge 0 ] && clean_all_remote_nodes
false
fi
if [ "$CHECK_TYPE" != "none" ]; then
if [ $REMOTE_VALGRIND_LOG -eq 1 ]; then
for node in $CHECK_NODES
do
local log_file=node\_$node\_$VALGRIND_LOG_FILE
valgrind_ignore_warnings $new_log_file
valgrind_ignore_messages $new_log_file
validate_valgrind_log $new_log_file
done
else
if [ -f $VALGRIND_LOG_FILE ]; then
valgrind_ignore_warnings $VALGRIND_LOG_FILE
valgrind_ignore_messages $VALGRIND_LOG_FILE
validate_valgrind_log $VALGRIND_LOG_FILE
fi
fi
fi
if [ "$MEMCHECK_DONT_CHECK_LEAKS" = "1" -a "$CHECK_TYPE" = "memcheck" ]; then
export ASAN_OPTIONS="${OLD_ASAN_OPTIONS}"
fi
}
#
# expect_abnormal_exit -- run a given command, expect it to exit non-zero
#
function expect_abnormal_exit() {
if [ -n "$TRACE" ]; then
case "$1"
in
*_on_node*)
msg "$UNITTEST_NAME: SKIP: TRACE is not supported if test is executed on remote nodes"
exit 0
esac
fi
if [ "$CHECK_TYPE" = "drd" ]; then
export VALGRIND_OPTS="$VALGRIND_OPTS --suppressions=../drd-log.supp"
fi
disable_exit_on_error
eval $ECHO ASAN_OPTIONS="detect_leaks=0 ${ASAN_OPTIONS}" $TRACE "$*"
ret=$?
restore_exit_on_error
if [ "$ret" -eq "0" ]; then
msg=$(interactive_red STDERR "succeeded")
echo -e "$UNITTEST_NAME command $msg unexpectedly." >&2
[ $NODES_MAX -ge 0 ] && clean_all_remote_nodes
false
fi
}
#
# check_pool -- run pmempool check on specified pool file
#
function check_pool() {
if [ "$CHECK_POOL" == "1" ]
then
if [ "$VERBOSE" != "0" ]
then
echo "$UNITTEST_NAME: checking consistency of pool ${1}"
fi
${PMEMPOOL}.static-nondebug check $1 2>&1 1>>$CHECK_POOL_LOG_FILE
fi
}
#
# check_pools -- run pmempool check on specified pool files
#
function check_pools() {
if [ "$CHECK_POOL" == "1" ]
then
for f in $*
do
check_pool $f
done
fi
}
#
# require_unlimited_vm -- require unlimited virtual memory
#
# This implies requirements for:
# - overcommit_memory enabled (/proc/sys/vm/overcommit_memory is 0 or 1)
# - unlimited virtual memory (ulimit -v is unlimited)
#
function require_unlimited_vm() {
$VM_OVERCOMMIT && [ $(ulimit -v) = "unlimited" ] && return
msg "$UNITTEST_NAME: SKIP required: overcommit_memory enabled and unlimited virtual memory"
exit 0
}
#
# require_linked_with_ndctl -- require an executable linked with libndctl
#
# usage: require_linked_with_ndctl <executable-file>
#
function require_linked_with_ndctl() {
[ "$1" == "" -o ! -x "$1" ] && \
fatal "$UNITTEST_NAME: ERROR: require_linked_with_ndctl() requires one argument - an executable file"
local lddndctl=$(ldd $1 | $GREP -ce "libndctl")
[ "$lddndctl" == "1" ] && return
msg "$UNITTEST_NAME: SKIP required: executable $1 linked with libndctl"
exit 0
}
#
# require_sudo_allowed -- require sudo command is allowed
#
function require_sudo_allowed() {
if [ "$ENABLE_SUDO_TESTS" != "y" ]; then
msg "$UNITTEST_NAME: SKIP: tests using 'sudo' are not enabled in testconfig.sh (ENABLE_SUDO_TESTS)"
exit 0
fi
if ! sh -c "timeout --signal=SIGKILL --kill-after=3s 3s sudo date" >/dev/null 2>&1
then
msg "$UNITTEST_NAME: SKIP required: sudo allowed"
exit 0
fi
}
#
# require_sudo_allowed_node -- require sudo command on a remote node
#
# usage: require_sudo_allowed_node <node-number>
#
function require_sudo_allowed_node() {
if [ "$ENABLE_SUDO_TESTS" != "y" ]; then
msg "$UNITTEST_NAME: SKIP: tests using 'sudo' are not enabled in testconfig.sh (ENABLE_SUDO_TESTS)"
exit 0
fi
if ! run_on_node $1 "timeout --signal=SIGKILL --kill-after=3s 3s sudo date" >/dev/null 2>&1
then
msg "$UNITTEST_NAME: SKIP required: sudo allowed on node $1"
exit 0
fi
}
#
# require_no_superuser -- require user without superuser rights
#
function require_no_superuser() {
local user_id=$(id -u)
[ "$user_id" != "0" ] && return
msg "$UNITTEST_NAME: SKIP required: run without superuser rights"
exit 0
}
#
# require_no_freebsd -- Skip test on FreeBSD
#
function require_no_freebsd() {
[ "$(uname -s)" != "FreeBSD" ] && return
msg "$UNITTEST_NAME: SKIP: Not supported on FreeBSD"
exit 0
}
#
# require_procfs -- Skip test if /proc is not mounted
#
function require_procfs() {
mount | grep -q "/proc" && return
msg "$UNITTEST_NAME: SKIP: /proc not mounted"
exit 0
}
#
# require_arch -- Skip tests if the running platform not matches
# any of the input list.
#
function require_arch() {
for i in "$@"; do
[[ "$(uname -m)" == "$i" ]] && return
done
msg "$UNITTEST_NAME: SKIP: Only supported on $1"
exit 0
}
#
# exclude_arch -- Skip tests if the running platform matches
# any of the input list.
#
function exclude_arch() {
for i in "$@"; do
if [[ "$(uname -m)" == "$i" ]]; then
msg "$UNITTEST_NAME: SKIP: Not supported on $1"
exit 0
fi
done
}
#
# require_x86_64 -- Skip tests if the running platform is not x86_64
#
function require_x86_64() {
require_arch x86_64
}
#
# require_ppc64 -- Skip tests if the running platform is not ppc64 or ppc64le
#
function require_ppc64() {
require_arch "ppc64" "ppc64le" "ppc64el"
}
#
# exclude_ppc64 -- Skip tests if the running platform is ppc64 or ppc64le
#
function exclude_ppc64() {
exclude_arch "ppc64" "ppc64le" "ppc64el"
}
#
# require_test_type -- only allow script to continue for a certain test type
#
function require_test_type() {
req_test_type=1
for type in $*
do
case "$TEST"
in
all)
# "all" is a synonym of "short + medium + long"
return
;;
check)
# "check" is a synonym of "short + medium"
[ "$type" = "short" -o "$type" = "medium" ] && return
;;
*)
[ "$type" = "$TEST" ] && return
;;
esac
done
verbose_msg "$UNITTEST_NAME: SKIP test-type $TEST ($* required)"
exit 0
}
#
# require_dev_dax_region -- check if region id file exist for dev dax
#
function require_dev_dax_region() {
local prefix="$UNITTEST_NAME: SKIP"
local cmd="$PMEMDETECT -r"
for path in ${DEVICE_DAX_PATH[@]}
do
disable_exit_on_error
out=$($cmd $path 2>&1)
ret=$?
restore_exit_on_error
if [ "$ret" == "0" ]; then
continue
elif [ "$ret" == "1" ]; then
msg "$prefix $out"
exit 0
else
fatal "$UNITTEST_NAME: pmemdetect: $out"
fi
done
DEVDAX_TO_LOCK=1
}
#
# lock_devdax -- acquire a lock on Device DAXes
#
lock_devdax() {
exec {DEVDAX_LOCK_FD}> $DEVDAX_LOCK
flock $DEVDAX_LOCK_FD
}
#
# unlock_devdax -- release a lock on Device DAXes
#
unlock_devdax() {
flock -u $DEVDAX_LOCK_FD
eval "exec ${DEVDAX_LOCK_FD}>&-"
}
#
# require_dev_dax_node -- common function for require_dax_devices and
# node_require_dax_device
#
# usage: require_dev_dax_node <N devices> [<node>]
#
function require_dev_dax_node() {
req_dax_dev=1
if [ "$req_dax_dev_align" == "1" ]; then
fatal "$UNITTEST_NAME: Do not use 'require_(node_)dax_devices' and "
"'require_(node_)dax_device_alignments' together. Use the latter instead."
fi
local min=$1
local node=$2
if [ -n "$node" ]; then
local DIR=${NODE_WORKING_DIR[$node]}/$curtestdir
local prefix="$UNITTEST_NAME: SKIP NODE $node:"
local device_dax_path=(${NODE_DEVICE_DAX_PATH[$node]})
if [ ${#device_dax_path[@]} -lt $min ]; then
msg "$prefix NODE_${node}_DEVICE_DAX_PATH does not specify enough dax devices (min: $min)"
exit 0
fi
local cmd="ssh $SSH_OPTS ${NODE[$node]} cd $DIR && LD_LIBRARY_PATH=$REMOTE_LD_LIBRARY_PATH ../pmemdetect -d"
else
local prefix="$UNITTEST_NAME: SKIP"
if [ ${#DEVICE_DAX_PATH[@]} -lt $min ]; then
msg "$prefix DEVICE_DAX_PATH does not specify enough dax devices (min: $min)"
exit 0
fi
local device_dax_path=${DEVICE_DAX_PATH[@]}
local cmd="$PMEMDETECT -d"
fi
for path in ${device_dax_path[@]}
do
disable_exit_on_error
out=$($cmd $path 2>&1)
ret=$?
restore_exit_on_error
if [ "$ret" == "0" ]; then
continue
elif [ "$ret" == "1" ]; then
msg "$prefix $out"
exit 0
else
fatal "$UNITTEST_NAME: pmemdetect: $out"
fi
done
DEVDAX_TO_LOCK=1
}
#
# require_ndctl_enable -- check NDCTL_ENABLE value and skip test if set to 'n'
#
function require_ndctl_enable() {
if ! is_ndctl_enabled $PMEMPOOL$EXE &> /dev/null ; then
msg "$UNITTEST_NAME: SKIP: ndctl is disabled - binary not compiled with libndctl"
exit 0
fi
return 0
}
#
# require_dax_devices -- only allow script to continue if there is a required
# number of Device DAX devices and ndctl is available
#
function require_dax_devices() {
require_ndctl_enable
require_pkg libndctl "$NDCTL_MIN_VERSION"
REQUIRE_DAX_DEVICES=$1
require_dev_dax_node $1
}
#
# require_node_dax_device -- only allow script to continue if specified node
# has enough Device DAX devices defined in testconfig.sh
#
function require_node_dax_device() {
validate_node_number $1
require_dev_dax_node $2 $1
}
#
# require_no_unicode -- overwrite unicode suffix to empty string
#
function require_no_unicode() {
export SUFFIX=""
}
#
# get_node_devdax_path -- get path of a Device DAX device on a node
#
# usage: get_node_devdax_path <node> <device>
#
get_node_devdax_path() {
local node=$1
local device=$2
local device_dax_path=(${NODE_DEVICE_DAX_PATH[$node]})
echo ${device_dax_path[$device]}
}
#
# dax_device_zero -- zero all local dax devices
#
dax_device_zero() {
for path in ${DEVICE_DAX_PATH[@]}
do
${PMEMPOOL}.static-debug rm -f $path
done
}
#
# node_dax_device_zero -- zero all dax devices on a node
#
node_dax_device_zero() {
local node=$1
local DIR=${NODE_WORKING_DIR[$node]}/$curtestdir
local prefix="$UNITTEST_NAME: SKIP NODE $node:"
local device_dax_path=(${NODE_DEVICE_DAX_PATH[$node]})
local cmd="ssh $SSH_OPTS ${NODE[$node]} cd $DIR && LD_LIBRARY_PATH=$REMOTE_LD_LIBRARY_PATH ../pmempool rm -f"
for path in ${device_dax_path[@]}
do
disable_exit_on_error
out=$($cmd $path 2>&1)
ret=$?
restore_exit_on_error
if [ "$ret" == "0" ]; then
continue
elif [ "$ret" == "1" ]; then
msg "$prefix $out"
exit 0
else
fatal "$UNITTEST_NAME: pmempool rm: $out"
fi
done
}
#
# get_devdax_size -- get the size of a device dax
#
function get_devdax_size() {
local device=$1
local path=${DEVICE_DAX_PATH[$device]}
local major_hex=$(stat -c "%t" $path)
local minor_hex=$(stat -c "%T" $path)
local major_dec=$((16#$major_hex))
local minor_dec=$((16#$minor_hex))
cat /sys/dev/char/$major_dec:$minor_dec/size
}
#
# get_node_devdax_size -- get the size of a device dax on a node
#
function get_node_devdax_size() {
local node=$1
local device=$2
local device_dax_path=(${NODE_DEVICE_DAX_PATH[$node]})
local path=${device_dax_path[$device]}
local cmd_prefix="ssh $SSH_OPTS ${NODE[$node]} "
disable_exit_on_error
out=$($cmd_prefix stat -c %t $path 2>&1)
ret=$?
restore_exit_on_error
if [ "$ret" != "0" ]; then
fatal "$UNITTEST_NAME: stat on node $node: $out"
fi
local major=$((16#$out))
disable_exit_on_error
out=$($cmd_prefix stat -c %T $path 2>&1)
ret=$?
restore_exit_on_error
if [ "$ret" != "0" ]; then
fatal "$UNITTEST_NAME: stat on node $node: $out"
fi
local minor=$((16#$out))
disable_exit_on_error
out=$($cmd_prefix "cat /sys/dev/char/$major:$minor/size" 2>&1)
ret=$?
restore_exit_on_error
if [ "$ret" != "0" ]; then
fatal "$UNITTEST_NAME: stat on node $node: $out"
fi
echo $out
}
#
# require_dax_device_node_alignments -- only allow script to continue if
# the internal Device DAX alignments on a remote nodes are as specified.
# If necessary, it sorts DEVICE_DAX_PATH entries to match
# the requested alignment order.
#
# usage: require_node_dax_device_alignments <node> <alignment1> [ alignment2 ... ]
#
function require_node_dax_device_alignments() {
req_dax_dev_align=1
if [ "$req_dax_dev" == "$1" ]; then
fatal "$UNITTEST_NAME: Do not use 'require_(node_)dax_devices' and "
"'require_(node_)dax_device_alignments' together. Use the latter instead."
fi
local node=$1
shift
if [ "$node" == "-1" ]; then
local device_dax_path=(${DEVICE_DAX_PATH[@]})
local cmd="$PMEMDETECT -a"
else
local device_dax_path=(${NODE_DEVICE_DAX_PATH[$node]})
local DIR=${NODE_WORKING_DIR[$node]}/$curtestdir
local cmd="ssh $SSH_OPTS ${NODE[$node]} cd $DIR && LD_LIBRARY_PATH=$REMOTE_LD_LIBRARY_PATH ../pmemdetect -a"
fi
local cnt=${#device_dax_path[@]}
local j=0
for alignment in $*
do
for (( i=j; i<cnt; i++ ))
do
path=${device_dax_path[$i]}
disable_exit_on_error
out=$($cmd $alignment $path 2>&1)
ret=$?
restore_exit_on_error
if [ "$ret" == "0" ]; then
if [ $i -ne $j ]; then
# swap device paths
tmp=${device_dax_path[$j]}
device_dax_path[$j]=$path
device_dax_path[$i]=$tmp
if [ "$node" == "-1" ]; then
DEVICE_DAX_PATH=(${device_dax_path[@]})
else
NODE_DEVICE_DAX_PATH[$node]=${device_dax_path[@]}
fi
fi
break
fi
done
if [ $i -eq $cnt ]; then
if [ "$node" == "-1" ]; then
msg "$UNITTEST_NAME: SKIP DEVICE_DAX_PATH"\
"does not specify enough dax devices or they don't have required alignments (min: $#, alignments: $*)"
else
msg "$UNITTEST_NAME: SKIP NODE $node: NODE_${node}_DEVICE_DAX_PATH"\
"does not specify enough dax devices or they don't have required alignments (min: $#, alignments: $*)"
fi
exit 0
fi
j=$(( j + 1 ))
done
}
#
# require_dax_device_alignments -- only allow script to continue if
# the internal Device DAX alignments are as specified.
# If necessary, it sorts DEVICE_DAX_PATH entries to match
# the requested alignment order.
#
# usage: require_dax_device_alignments alignment1 [ alignment2 ... ]
#
require_dax_device_alignments() {
require_node_dax_device_alignments -1 $*
}
#
# disable_eatmydata -- ensure invalid msyncs fail
#
# Distros (and people) like to use eatmydata to kill fsync-likes during builds
# and testing. This is nice for speed, but we actually rely on msync failing
# in some tests.
#
disable_eatmydata() {
export LD_PRELOAD="${LD_PRELOAD/#libeatmydata.so/}"
export LD_PRELOAD="${LD_PRELOAD/ libeatmydata.so/}"
export LD_PRELOAD="${LD_PRELOAD/:libeatmydata.so/}"
}
#
# require_fs_type -- only allow script to continue for a certain fs type
#
function require_fs_type() {
req_fs_type=1
for type in $*
do
# treat any as either pmem or non-pmem
[ "$type" = "$FS" ] ||
([ -n "${FORCE_FS:+x}" ] && [ "$type" = "any" ] &&
[ "$FS" != "none" ]) && return
done
verbose_msg "$UNITTEST_NAME: SKIP fs-type $FS ($* required)"
exit 0
}
#
# require_native_fallocate -- verify if filesystem supports fallocate
#
function require_native_fallocate() {
require_fs_type pmem non-pmem
set +e
$FALLOCATE_DETECT $1
status=$?
set -e
if [ $status -eq 1 ]; then
msg "$UNITTEST_NAME: SKIP: filesystem does not support fallocate"
exit 0
elif [ $status -ne 0 ]; then
msg "$UNITTEST_NAME: fallocate_detect failed"
exit 1
fi
}
#
# require_usc_permission -- verify if usc can be read with current permissions
#
function require_usc_permission() {
set +e
$USC_PERMISSION $1 2> $DIR/usc_permission.txt
status=$?
set -e
# check if there were any messages printed to stderr, skip test if there were
usc_stderr=$(cat $DIR/usc_permission.txt | wc -c)
rm -f $DIR/usc_permission.txt
if [ $status -eq 1 ] || [ $usc_stderr -ne 0 ]; then
msg "$UNITTEST_NAME: SKIP: missing permissions to read usc"
exit 0
elif [ $status -ne 0 ]; then
msg "$UNITTEST_NAME: usc_permission_check failed"
exit 1
fi
}
#
# require_fs_name -- verify if the $DIR is on the required file system
#
# Must be AFTER setup() because $DIR must exist
#
function require_fs_name() {
fsname=`df $DIR -PT | awk '{if (NR == 2) print $2}'`
for name in $*
do
if [ "$name" == "$fsname" ]; then
return
fi
done
msg "$UNITTEST_NAME: SKIP file system $fsname ($* required)"
exit 0
}
#
# require_build_type -- only allow script to continue for a certain build type
#
function require_build_type() {
for type in $*
do
[ "$type" = "$BUILD" ] && return
done
verbose_msg "$UNITTEST_NAME: SKIP build-type $BUILD ($* required)"
exit 0
}
#
# require_command -- only allow script to continue if specified command exists
#
function require_command() {
if ! which $1 >/dev/null 2>&1; then
msg "$UNITTEST_NAME: SKIP: '$1' command required"
exit 0
fi
}
#
# require_command_node -- only allow script to continue if specified command exists on a remote node
#
# usage: require_command_node <node-number>
#
function require_command_node() {
if ! run_on_node $1 "which $2 >/dev/null 2>&1"; then
msg "$UNITTEST_NAME: SKIP: node $1: '$2' command required"
exit 0
fi
}
#
# require_kernel_module -- only allow script to continue if specified kernel module exists
#
# usage: require_kernel_module <module_name> [path_to_modinfo]
#
function require_kernel_module() {
MODULE=$1
MODINFO=$2
if [ "$MODINFO" == "" ]; then
set +e
[ "$MODINFO" == "" ] && \
MODINFO=$(which modinfo 2>/dev/null)
set -e
[ "$MODINFO" == "" ] && \
[ -x /usr/sbin/modinfo ] && MODINFO=/usr/sbin/modinfo
[ "$MODINFO" == "" ] && \
[ -x /sbin/modinfo ] && MODINFO=/sbin/modinfo
[ "$MODINFO" == "" ] && \
msg "$UNITTEST_NAME: SKIP: modinfo command required" && \
exit 0
else
[ ! -x $MODINFO ] && \
msg "$UNITTEST_NAME: SKIP: modinfo command required" && \
exit 0
fi
$MODINFO -F name $MODULE &>/dev/null && true
if [ $? -ne 0 ]; then
msg "$UNITTEST_NAME: SKIP: '$MODULE' kernel module required"
exit 0
fi
}
#
# require_kernel_module_node -- only allow script to continue if specified kernel module exists on a remote node
#
# usage: require_kernel_module_node <node> <module_name> [path_to_modinfo]
#
function require_kernel_module_node() {
NODE_N=$1
MODULE=$2
MODINFO=$3
if [ "$MODINFO" == "" ]; then
set +e
[ "$MODINFO" == "" ] && \
MODINFO=$(run_on_node $NODE_N which modinfo 2>/dev/null)
set -e
[ "$MODINFO" == "" ] && \
run_on_node $NODE_N "test -x /usr/sbin/modinfo" && MODINFO=/usr/sbin/modinfo
[ "$MODINFO" == "" ] && \
run_on_node $NODE_N "test -x /sbin/modinfo" && MODINFO=/sbin/modinfo
[ "$MODINFO" == "" ] && \
msg "$UNITTEST_NAME: SKIP: node $NODE_N: modinfo command required" && \
exit 0
else
run_on_node $NODE_N "test ! -x $MODINFO" && \
msg "$UNITTEST_NAME: SKIP: node $NODE_N: modinfo command required" && \
exit 0
fi
run_on_node $NODE_N "$MODINFO -F name $MODULE &>/dev/null" && true
if [ $? -ne 0 ]; then
msg "$UNITTEST_NAME: SKIP: node $NODE_N: '$MODULE' kernel module required"
exit 0
fi
}
#
# require_pkg -- only allow script to continue if specified package exists
# usage: require_pkg <package name> [<package minimal version>]
#
function require_pkg() {
if ! command -v pkg-config 1>/dev/null
then
msg "$UNITTEST_NAME: SKIP pkg-config required"
exit 0
fi
local COMMAND="pkg-config $1"
local MSG="$UNITTEST_NAME: SKIP '$1' package"
if [ "$#" -eq "2" ]; then
COMMAND="$COMMAND --atleast-version $2"
MSG="$MSG (version >= $2)"
fi
MSG="$MSG required"
if ! $COMMAND
then
msg "$MSG"
exit 0
fi
}
#
# require_node_pkg -- only allow script to continue if specified package exists
# on specified node
# usage: require_node_pkg <node> <package name> [<package minimal version>]
#
function require_node_pkg() {
validate_node_number $1
local N=$1
shift
local DIR=${NODE_WORKING_DIR[$N]}/$curtestdir
local COMMAND="${NODE_ENV[$N]}"
if [ -n "${NODE_LD_LIBRARY_PATH[$N]}" ]; then
local PKG_CONFIG_PATH=${NODE_LD_LIBRARY_PATH[$N]//:/\/pkgconfig:}/pkgconfig
COMMAND="$COMMAND PKG_CONFIG_PATH=\$PKG_CONFIG_PATH:$PKG_CONFIG_PATH"
fi
COMMAND="$COMMAND PKG_CONFIG_PATH=$REMOTE_PKG_CONFIG_PATH"
COMMAND="$COMMAND pkg-config $1"
MSG="$UNITTEST_NAME: SKIP NODE $N: '$1' package"
if [ "$#" -eq "2" ]; then
COMMAND="$COMMAND --atleast-version $2"
MSG="$MSG (version >= $2)"
fi
MSG="$MSG required"
disable_exit_on_error
run_command ssh $SSH_OPTS ${NODE[$N]} "$COMMAND" 2>&1
ret=$?
restore_exit_on_error
if [ "$ret" == 1 ]; then
msg "$MSG"
exit 0
fi
}
#
# configure_valgrind -- only allow script to continue when settings match
#
function configure_valgrind() {
case "$1"
in
memcheck|pmemcheck|helgrind|drd|force-disable)
;;
*)
usage "bad test-type: $1"
;;
esac
if [ "$CHECK_TYPE" == "none" ]; then
if [ "$1" == "force-disable" ]; then
msg "$UNITTEST_NAME: all valgrind tests disabled"
elif [ "$2" = "force-enable" ]; then
CHECK_TYPE="$1"
require_valgrind_tool $1 $3
elif [ "$2" = "force-disable" ]; then
CHECK_TYPE=none
else
fatal "invalid parameter"
fi
else
if [ "$1" == "force-disable" ]; then
msg "$UNITTEST_NAME: SKIP RUNTESTS script parameter $CHECK_TYPE tries to enable valgrind test when all valgrind tests are disabled in TEST"
exit 0
elif [ "$CHECK_TYPE" != "$1" -a "$2" == "force-enable" ]; then
msg "$UNITTEST_NAME: SKIP RUNTESTS script parameter $CHECK_TYPE tries to enable different valgrind test than one defined in TEST"
exit 0
elif [ "$CHECK_TYPE" == "$1" -a "$2" == "force-disable" ]; then
msg "$UNITTEST_NAME: SKIP RUNTESTS script parameter $CHECK_TYPE tries to enable test defined in TEST as force-disable"
exit 0
fi
require_valgrind_tool $CHECK_TYPE $3
fi
if [ "$UT_VALGRIND_SKIP_PRINT_MISMATCHED" == 1 ]; then
export UT_SKIP_PRINT_MISMATCHED=1
fi
}
#
# valgrind_version_no_check -- returns Valgrind version without checking
# for valgrind first
#
function valgrind_version_no_check() {
$VALGRINDEXE --version | sed "s/valgrind-\([0-9]*\)\.\([0-9]*\).*/\1*100+\2/" | bc
}
#
# require_valgrind -- continue script execution only if
# valgrind package is installed
#
function require_valgrind() {
# bc is used inside valgrind_version_no_check
require_command bc
require_no_asan
disable_exit_on_error
VALGRINDEXE=`which valgrind 2>/dev/null`
local ret=$?
restore_exit_on_error
if [ $ret -ne 0 ]; then
msg "$UNITTEST_NAME: SKIP valgrind required"
exit 0
fi
[ $NODES_MAX -lt 0 ] && return;
if [ ! -z "$1" ]; then
available=$(valgrind_version_no_check)
required=`echo $1 | sed "s/\([0-9]*\)\.\([0-9]*\).*/\1*100+\2/" | bc`
if [ $available -lt $required ]; then
msg "$UNITTEST_NAME: SKIP valgrind required (ver $1 or later)"
exit 0
fi
fi
for N in $NODES_SEQ; do
if [ "${NODE_VALGRINDEXE[$N]}" = "" ]; then
disable_exit_on_error
NODE_VALGRINDEXE[$N]=$(ssh $SSH_OPTS ${NODE[$N]} "which valgrind 2>/dev/null")
ret=$?
restore_exit_on_error
if [ $ret -ne 0 ]; then
msg "$UNITTEST_NAME: SKIP valgrind required on remote node #$N"
exit 0
fi
fi
done
}
#
# valgrind_version -- returns Valgrind version
#
function valgrind_version() {
require_valgrind
valgrind_version_no_check
}
#
# require_valgrind_tool -- continue script execution only if valgrind with
# specified tool is installed
#
# usage: require_valgrind_tool <tool> [<binary>]
#
function require_valgrind_tool() {
require_valgrind
local tool=$1
local binary=$2
local dir=.
[ -d "$2" ] && dir="$2" && binary=
pushd "$dir" > /dev/null
[ -n "$binary" ] || binary=$(get_executables)
if [ -z "$binary" ]; then
fatal "require_valgrind_tool: error: no binary found"
fi
strings ${binary} 2>&1 | \
grep -q "compiled with support for Valgrind $tool" && true
if [ $? -ne 0 ]; then
msg "$UNITTEST_NAME: SKIP not compiled with support for Valgrind $tool"
exit 0
fi
if [ "$tool" == "helgrind" ]; then
valgrind --tool=$tool --help 2>&1 | \
grep -qi "$tool is Copyright (c)" && true
if [ $? -ne 0 ]; then
msg "$UNITTEST_NAME: SKIP Valgrind with $tool required"
exit 0;
fi
fi
if [ "$tool" == "pmemcheck" ]; then
out=`valgrind --tool=$tool --help 2>&1` && true
echo "$out" | grep -qi "$tool is Copyright (c)" && true
if [ $? -ne 0 ]; then
msg "$UNITTEST_NAME: SKIP Valgrind with $tool required"
exit 0;
fi
echo "$out" | grep -qi "expect-fence-after-clflush" && true
if [ $? -ne 0 ]; then
msg "$UNITTEST_NAME: SKIP pmemcheck does not support --expect-fence-after-clflush option. Please update it to the latest version."
exit 0;
fi
fi
popd > /dev/null
return 0
}
#
# set_valgrind_exe_name -- set the actual Valgrind executable name
#
# On some systems (Ubuntu), "valgrind" is a shell script that calls
# the actual executable "valgrind.bin".
# The wrapper script doesn't work well with LD_PRELOAD, so we want
# to call Valgrind directly.
#
function set_valgrind_exe_name() {
if [ "$VALGRINDEXE" = "" ]; then
fatal "set_valgrind_exe_name: error: valgrind is not set up"
fi
local VALGRINDDIR=`dirname $VALGRINDEXE`
if [ -x $VALGRINDDIR/valgrind.bin ]; then
VALGRINDEXE=$VALGRINDDIR/valgrind.bin
fi
[ $NODES_MAX -lt 0 ] && return;
for N in $NODES_SEQ; do
local COMMAND="\
[ -x $(dirname ${NODE_VALGRINDEXE[$N]})/valgrind.bin ] && \
echo $(dirname ${NODE_VALGRINDEXE[$N]})/valgrind.bin || \
echo ${NODE_VALGRINDEXE[$N]}"
NODE_VALGRINDEXE[$N]=$(ssh $SSH_OPTS ${NODE[$N]} $COMMAND)
if [ $? -ne 0 ]; then
fatal ${NODE_VALGRINDEXE[$N]}
fi
done
}
#
# require_no_asan_for - continue script execution only if passed binary does
# NOT require libasan
#
function require_no_asan_for() {
disable_exit_on_error
nm $1 | grep -q __asan_
ASAN_ENABLED=$?
restore_exit_on_error
if [ "$ASAN_ENABLED" == "0" ]; then
msg "$UNITTEST_NAME: SKIP: ASAN enabled"
exit 0
fi
}
#
# require_cxx11 -- continue script execution only if C++11 supporting compiler
# is installed
#
function require_cxx11() {
[ "$CXX" ] || CXX=c++
CXX11_AVAILABLE=`echo "int main(){return 0;}" |\
$CXX -std=c++11 -x c++ -o /dev/null - 2>/dev/null &&\
echo y || echo n`
if [ "$CXX11_AVAILABLE" == "n" ]; then
msg "$UNITTEST_NAME: SKIP: C++11 required"
exit 0
fi
}
#
# require_no_asan - continue script execution only if libpmem does NOT require
# libasan
#
function require_no_asan() {
case "$BUILD"
in
debug)
require_no_asan_for ../../debug/libpmem.so
;;
nondebug)
require_no_asan_for ../../nondebug/libpmem.so
;;
static-debug)
require_no_asan_for ../../debug/libpmem.a
;;
static-nondebug)
require_no_asan_for ../../nondebug/libpmem.a
;;
esac
}
#
# require_tty - continue script execution only if standard output is a terminal
#
function require_tty() {
if ! tty >/dev/null; then
msg "$UNITTEST_NAME: SKIP no terminal"
exit 0
fi
}
#
# require_binary -- continue script execution only if the binary has been compiled
#
# In case of conditional compilation, skip this test.
#
function require_binary() {
if [ -z "$1" ]; then
fatal "require_binary: error: binary not provided"
fi
if [ ! -x "$1" ]; then
msg "$UNITTEST_NAME: SKIP no binary found"
exit 0
fi
return
}
#
# require_sds -- continue script execution only if binary is compiled with
# shutdown state support
#
# usage: require_sds <binary>
#
function require_sds() {
local binary=$1
local dir=.
if [ -z "$binary" ]; then
fatal "require_sds: error: no binary found"
fi
strings ${binary} 2>&1 | \
grep -q "compiled with support for shutdown state" && true
if [ $? -ne 0 ]; then
msg "$UNITTEST_NAME: SKIP not compiled with support for shutdown state"
exit 0
fi
return 0
}
#
# require_no_sds -- continue script execution only if binary is NOT compiled with
# shutdown state support
#
# usage: require_no_sds <binary>
#
function require_no_sds() {
local binary=$1
local dir=.
if [ -z "$binary" ]; then
fatal "require_sds: error: no binary found"
fi
set +e
found=$(strings ${binary} 2>&1 | \
grep -c "compiled with support for shutdown state")
set -e
if [ "$found" -ne "0" ]; then
msg "$UNITTEST_NAME: SKIP compiled with support for shutdown state"
exit 0
fi
return 0
}
#
# is_ndctl_enabled -- check if binary is compiled with libndctl
#
# usage: is_ndctl_enabled <binary>
#
function is_ndctl_enabled() {
local binary=$1
local dir=.
if [ -z "$binary" ]; then
fatal "is_ndctl_enabled: error: no binary found"
fi
strings ${binary} 2>&1 | \
grep -q "compiled with libndctl" && true
return $?
}
#
# require_bb_enabled_by_default -- check if the binary has bad block
# checking feature enabled by default
#
# usage: require_bb_enabled_by_default <binary>
#
function require_bb_enabled_by_default() {
if ! is_ndctl_enabled $1 &> /dev/null ; then
msg "$UNITTEST_NAME: SKIP bad block checking feature disabled by default"
exit 0
fi
return 0
}
#
# require_bb_disabled_by_default -- check if the binary does not have bad
# block checking feature enabled by default
#
# usage: require_bb_disabled_by_default <binary>
#
function require_bb_disabled_by_default() {
if is_ndctl_enabled $1 &> /dev/null ; then
msg "$UNITTEST_NAME: SKIP bad block checking feature enabled by default"
exit 0
fi
return 0
}
#
# check_absolute_path -- continue script execution only if $DIR path is
# an absolute path; do not resolve symlinks
#
function check_absolute_path() {
if [ "${DIR:0:1}" != "/" ]; then
fatal "Directory \$DIR has to be an absolute path. $DIR was given."
fi
}
#
# run_command -- run a command in a verbose or quiet way
#
function run_command()
{
local COMMAND="$*"
if [ "$VERBOSE" != "0" ]; then
echo "$ $COMMAND"
$COMMAND
else
$COMMAND
fi
}
#
# validate_node_number -- validate a node number
#
function validate_node_number() {
[ $1 -gt $NODES_MAX ] \
&& fatal "error: node number ($1) greater than maximum allowed node number ($NODES_MAX)"
return 0
}
#
# clean_remote_node -- usage: clean_remote_node <node> <list-of-pid-files>
#
function clean_remote_node() {
validate_node_number $1
local N=$1
shift
local DIR=${NODE_WORKING_DIR[$N]}/$curtestdir
# register the list of PID files to be cleaned in case of an error
NODE_PID_FILES[$N]="${NODE_PID_FILES[$N]} $*"
# clean the remote node
disable_exit_on_error
for pidfile in ${NODE_PID_FILES[$N]}; do
require_ctrld_err $N $pidfile
run_command ssh $SSH_OPTS ${NODE[$N]} "\
cd $DIR && [ -f $pidfile ] && \
../ctrld $pidfile kill SIGINT && \
../ctrld $pidfile wait 1 ; \
rm -f $pidfile"
done;
restore_exit_on_error
return 0
}
#
# clean_all_remote_nodes -- clean all remote nodes in case of an error
#
function clean_all_remote_nodes() {
msg "$UNITTEST_NAME: CLEAN (cleaning processes on remote nodes)"
local N=0
disable_exit_on_error
for N in $NODES_SEQ; do
local DIR=${NODE_WORKING_DIR[$N]}/$curtestdir
for pidfile in ${NODE_PID_FILES[$N]}; do
run_command ssh $SSH_OPTS ${NODE[$N]} "\
cd $DIR && [ -f $pidfile ] && \
../ctrld $pidfile kill SIGINT && \
../ctrld $pidfile wait 1 ; \
rm -f $pidfile"
done
done
restore_exit_on_error
return 0
}
#
# export_vars_node -- export specified variables on specified node
#
function export_vars_node() {
local N=$1
shift
validate_node_number $N
for var in "$@"; do
NODE_ENV[$N]="${NODE_ENV[$N]} $var=${!var}"
done
}
#
# require_nodes_libfabric -- only allow script to continue if libfabric with
# optionally specified provider is available on
# specified node
# usage: require_nodes_libfabric <node> <provider> [<libfabric-version>]
#
function require_node_libfabric() {
validate_node_number $1
local N=$1
local provider=$2
# Minimal required version of libfabric.
# Keep in sync with requirements in src/common.inc.
local version=${3:-1.4.2}
require_pkg libfabric "$version"
# fi_info can be found in libfabric-bin
require_command fi_info
require_node_pkg $N libfabric "$version"
require_command_node $N fi_info
if [ "$RPMEM_PROVIDER" == "verbs" ]; then
if ! fi_info --list | grep -q verbs; then
msg "$UNITTEST_NAME: SKIP libfabric not compiled with verbs provider"
exit 0
fi
if ! run_on_node $N "fi_info --list | grep -q verbs"; then
msg "$UNITTEST_NAME: SKIP libfabric on node $N not compiled with verbs provider"
exit 0
fi
fi
local DIR=${NODE_WORKING_DIR[$N]}/$curtestdir
local COMMAND="$COMMAND ${NODE_ENV[$N]}"
COMMAND="$COMMAND LD_LIBRARY_PATH=${NODE_LD_LIBRARY_PATH[$N]}:$REMOTE_LD_LIBRARY_PATH"
COMMAND="$COMMAND ../fip ${NODE_ADDR[$N]} $provider"
disable_exit_on_error
fip_out=$(ssh $SSH_OPTS ${NODE[$N]} "cd $DIR && $COMMAND" 2>&1)
ret=$?
restore_exit_on_error
if [ "$ret" == "0" ]; then
return
elif [ "$ret" == "1" ]; then
msg "$UNITTEST_NAME: SKIP NODE $N: $fip_out"
exit 0
else
fatal "NODE $N: require_libfabric $provider: $fip_out"
fi
}
#
# check_if_node_is_reachable -- check if the $1 node is reachable
#
function check_if_node_is_reachable() {
disable_exit_on_error
run_command ssh $SSH_OPTS ${NODE[$1]} exit
local ret=$?
restore_exit_on_error
return $ret
}
#
# require_nodes -- only allow script to continue for a certain number
# of defined and reachable nodes
#
# Input arguments:
# NODE[] - (required) array of nodes' addresses
# NODE_WORKING_DIR[] - (required) array of nodes' working directories
#
function require_nodes() {
local N_NODES=${#NODE[@]}
local N=$1
[ -z "$N" ] \
&& fatal "require_nodes: missing reguired parameter: number of nodes"
# if it has already been called, check if number of required nodes is bigger than previously
[ -n "$NODES_MAX" ] \
&& [ $(($N - 1)) -le $NODES_MAX ] && return
[ $N -gt $N_NODES ] \
&& msg "$UNITTEST_NAME: SKIP: requires $N node(s), but $N_NODES node(s) provided" \
&& exit 0
NODES_MAX=$(($N - 1))
NODES_SEQ=$(seq -s' ' 0 $NODES_MAX)
# check if all required nodes are reachable
for N in $NODES_SEQ; do
# validate node's address
[ "${NODE[$N]}" = "" ] \
&& msg "$UNITTEST_NAME: SKIP: address of node #$N is not provided" \
&& exit 0
# validate the working directory
[ "${NODE_WORKING_DIR[$N]}" = "" ] \
&& fatal "error: working directory for node #$N (${NODE[$N]}) is not provided"
# check if the node is reachable
check_if_node_is_reachable $N
[ $? -ne 0 ] \
&& fatal "error: node #$N (${NODE[$N]}) is unreachable"
# clear the list of PID files for each node
NODE_PID_FILES[$N]=""
NODE_TEST_DIR[$N]=${NODE_WORKING_DIR[$N]}/$curtestdir
NODE_DIR[$N]=${NODE_WORKING_DIR[$N]}/$curtestdir/data/
require_node_log_files $N $ERR_LOG_FILE $OUT_LOG_FILE $TRACE_LOG_FILE
if [ "$CHECK_TYPE" != "none" -a "${NODE_VALGRINDEXE[$N]}" = "" ]; then
disable_exit_on_error
NODE_VALGRINDEXE[$N]=$(ssh $SSH_OPTS ${NODE[$N]} "which valgrind 2>/dev/null")
local ret=$?
restore_exit_on_error
if [ $ret -ne 0 ]; then
msg "$UNITTEST_NAME: SKIP valgrind required on remote node #$N"
exit 0
fi
fi
done
# remove all log files of the current unit test from the required nodes
# and export the 'log' variables to these nodes
for N in $NODES_SEQ; do
for f in $(get_files "node_${N}.*${UNITTEST_NUM}\.log"); do
rm -f $f
done
export_vars_node $N $REMOTE_VARS
done
# register function to clean all remote nodes in case of an error or SIGINT
trap clean_all_remote_nodes ERR SIGINT
return 0
}
#
# check_files_on_node -- check if specified files exist on given node
#
function check_files_on_node() {
validate_node_number $1
local N=$1
shift
local REMOTE_DIR=${NODE_DIR[$N]}
run_command ssh $SSH_OPTS ${NODE[$N]} "for f in $*; do if [ ! -f $REMOTE_DIR/\$f ]; then echo \"Missing file \$f on node #$N\" 1>&2; exit 1; fi; done"
}
#
# check_no_files_on_node -- check if specified files does not exist on given node
#
function check_no_files_on_node() {
validate_node_number $1
local N=$1
shift
local REMOTE_DIR=${NODE_DIR[$N]}
run_command ssh $SSH_OPTS ${NODE[$N]} "for f in $*; do if [ -f $REMOTE_DIR/\$f ]; then echo \"Not deleted file \$f on node #$N\" 1>&2; exit 1; fi; done"
}
#
# copy_files_to_node -- copy all required files to the given remote node
# usage: copy_files_to_node <node> <destination dir> <file_1> [<file_2>] ...
#
function copy_files_to_node() {
validate_node_number $1
local N=$1
local DEST_DIR=$2
shift 2
[ $# -eq 0 ] &&\
fatal "error: copy_files_to_node(): no files provided"
# copy all required files
run_command scp $SCP_OPTS $@ ${NODE[$N]}:$DEST_DIR > /dev/null
return 0
}
#
# copy_files_from_node -- copy all required files from the given remote node
# usage: copy_files_from_node <node> <destination_dir> <file_1> [<file_2>] ...
#
function copy_files_from_node() {
validate_node_number $1
local N=$1
local DEST_DIR=$2
[ ! -d $DEST_DIR ] &&\
fatal "error: destination directory $DEST_DIR does not exist"
shift 2
[ $# -eq 0 ] &&\
fatal "error: copy_files_from_node(): no files provided"
# compress required files, copy and extract
local temp_file=node_${N}_temp_file.tar
files=""
dir_name=""
files=$(basename -a $@)
dir_name=$(dirname $1)
run_command ssh $SSH_OPTS ${NODE[$N]} "cd $dir_name && tar -czf $temp_file $files"
run_command scp $SCP_OPTS ${NODE[$N]}:$dir_name/$temp_file $DEST_DIR > /dev/null
cd $DEST_DIR \
&& tar -xzf $temp_file \
&& rm $temp_file \
&& cd - > /dev/null
return 0
}
#
# copy_log_files -- copy log files from remote node
#
function copy_log_files() {
local NODE_SCP_LOG_FILES[0]=""
for N in $NODES_SEQ; do
local DIR=${NODE_WORKING_DIR[$N]}/$curtestdir
for file in ${NODE_LOG_FILES[$N]}; do
NODE_SCP_LOG_FILES[$N]="${NODE_SCP_LOG_FILES[$N]} ${NODE[$N]}:$DIR/${file}"
done
[ "${NODE_SCP_LOG_FILES[$N]}" ] && run_command scp $SCP_OPTS ${NODE_SCP_LOG_FILES[$N]} . &>> $PREP_LOG_FILE
for file in ${NODE_LOG_FILES[$N]}; do
[ -f $file ] && mv $file node_${N}_${file}
done
done
}
#
# rm_files_from_node -- removes all listed files from the given remote node
# usage: rm_files_from_node <node> <file_1> [<file_2>] ...
#
function rm_files_from_node() {
validate_node_number $1
local N=$1
shift
[ $# -eq 0 ] &&\
fatal "error: rm_files_from_node(): no files provided"
run_command ssh $SSH_OPTS ${NODE[$N]} "rm -f $@"
return 0
}
#
#
# require_node_log_files -- store log files which must be copied from
# specified node on failure
#
function require_node_log_files() {
validate_node_number $1
local N=$1
shift
NODE_LOG_FILES[$N]="${NODE_LOG_FILES[$N]} $*"
}
#
# require_ctrld_err -- store ctrld's log files to copy from specified
# node on failure
#
function require_ctrld_err() {
local N=$1
local PID_FILE=$2
local DIR=${NODE_WORKING_DIR[$N]}/$curtestdir
for cmd in run wait kill wait_port; do
NODE_LOG_FILES[$N]="${NODE_LOG_FILES[$N]} $PID_FILE.$cmd.ctrld.log"
done
}
#
# run_on_node -- usage: run_on_node <node> <command>
#
# Run the <command> in background on the remote <node>.
# LD_LIBRARY_PATH for the n-th remote node can be provided
# in the array NODE_LD_LIBRARY_PATH[n]
#
function run_on_node() {
validate_node_number $1
local N=$1
shift
local DIR=${NODE_WORKING_DIR[$N]}/$curtestdir
local COMMAND="UNITTEST_NUM=$UNITTEST_NUM UNITTEST_NAME=$UNITTEST_NAME"
COMMAND="$COMMAND UNITTEST_LOG_LEVEL=1"
COMMAND="$COMMAND ${NODE_ENV[$N]}"
COMMAND="$COMMAND PATH=$REMOTE_PATH"
COMMAND="$COMMAND LD_LIBRARY_PATH=${NODE_LD_LIBRARY_PATH[$N]}:$REMOTE_LD_LIBRARY_PATH $*"
run_command ssh $SSH_OPTS ${NODE[$N]} "cd $DIR && $COMMAND"
ret=$?
if [ "$ret" -ne "0" ]; then
copy_log_files
fi
return $ret
}
#
# run_on_node_background -- usage:
# run_on_node_background <node> <pid-file> <command>
#
# Run the <command> in background on the remote <node>
# and create a <pid-file> for this process.
# LD_LIBRARY_PATH for the n-th remote node
# can be provided in the array NODE_LD_LIBRARY_PATH[n]
#
function run_on_node_background() {
validate_node_number $1
local N=$1
local PID_FILE=$2
shift
shift
local DIR=${NODE_WORKING_DIR[$N]}/$curtestdir
local COMMAND="UNITTEST_NUM=$UNITTEST_NUM UNITTEST_NAME=$UNITTEST_NAME"
COMMAND="$COMMAND UNITTEST_LOG_LEVEL=1"
COMMAND="$COMMAND ${NODE_ENV[$N]}"
COMMAND="$COMMAND PATH=$REMOTE_PATH"
COMMAND="$COMMAND LD_LIBRARY_PATH=${NODE_LD_LIBRARY_PATH[$N]}:$REMOTE_LD_LIBRARY_PATH"
COMMAND="$COMMAND ../ctrld $PID_FILE run $RUNTEST_TIMEOUT $*"
# register the PID file to be cleaned in case of an error
NODE_PID_FILES[$N]="${NODE_PID_FILES[$N]} $PID_FILE"
run_command ssh $SSH_OPTS ${NODE[$N]} "cd $DIR && $COMMAND"
ret=$?
if [ "$ret" -ne "0" ]; then
copy_log_files
fi
return $ret
}
#
# wait_on_node -- usage: wait_on_node <node> <pid-file> [<timeout>]
#
# Wait until the process with the <pid-file> on the <node>
# exits or <timeout> expires.
#
function wait_on_node() {
validate_node_number $1
local N=$1
local PID_FILE=$2
local TIMEOUT=$3
local DIR=${NODE_WORKING_DIR[$N]}/$curtestdir
run_command ssh $SSH_OPTS ${NODE[$N]} "cd $DIR && ../ctrld $PID_FILE wait $TIMEOUT"
ret=$?
if [ "$ret" -ne "0" ]; then
copy_log_files
fi
return $ret
}
#
# wait_on_node_port -- usage: wait_on_node_port <node> <pid-file> <portno>
#
# Wait until the process with the <pid-file> on the <node>
# opens the port <portno>.
#
function wait_on_node_port() {
validate_node_number $1
local N=$1
local PID_FILE=$2
local PORTNO=$3
local DIR=${NODE_WORKING_DIR[$N]}/$curtestdir
run_command ssh $SSH_OPTS ${NODE[$N]} "cd $DIR && ../ctrld $PID_FILE wait_port $PORTNO"
ret=$?
if [ "$ret" -ne "0" ]; then
copy_log_files
fi
return $ret
}
#
# kill_on_node -- usage: kill_on_node <node> <pid-file> <signo>
#
# Send the <signo> signal to the process with the <pid-file>
# on the <node>.
#
function kill_on_node() {
validate_node_number $1
local N=$1
local PID_FILE=$2
local SIGNO=$3
local DIR=${NODE_WORKING_DIR[$N]}/$curtestdir
run_command ssh $SSH_OPTS ${NODE[$N]} "cd $DIR && ../ctrld $PID_FILE kill $SIGNO"
ret=$?
if [ "$ret" -ne "0" ]; then
copy_log_files
fi
return $ret
}
#
# obj_pool_desc_size -- returns the obj_pool_desc_size macro value
# in bytes which is two times the actual pagesize.
#
# This should be use to calculate the minimum zero size for pool
# creation on some tests.
#
function obj_pool_desc_size() {
echo "$(expr $(getconf PAGESIZE) \* 2)"
}
#
# log_pool_desc_size -- returns the minimum size of pool header
# in bytes which is two times the actual pagesize.
#
# This should be use to calculate the minimum zero size for pool
# creation on some tests.
#
function log_pool_desc_size() {
echo "$(expr $(getconf PAGESIZE) \* 2)"
}
#
# blk_pool_desc_size -- returns the minimum size of pool header
# in bytes which is two times the actual pagesize.
#
# This should be use to calculate the minimum zero size for pool
# creation on some tests.
#
function blk_pool_desc_size() {
echo "$(expr $(getconf PAGESIZE) \* 2)"
}
#
# create_holey_file_on_node -- create holey files of a given length
# usage: create_holey_file_on_node <node> <size>
#
# example, to create two files, each 1GB in size on node 0:
# create_holey_file_on_node 0 1G testfile1 testfile2
#
# Input unit size is in bytes with optional suffixes like k, KB, M, etc.
#
function create_holey_file_on_node() {
validate_node_number $1
local N=$1
size=$(convert_to_bytes $2)
shift 2
for file in $*
do
run_on_node $N truncate -s ${size} $file >> $PREP_LOG_FILE
done
}
#
# require_mmap_under_valgrind -- only allow script to continue if mapping is
# possible under Valgrind with required length
# (sum of required DAX devices size).
# This function is being called internally in
# setup() function.
#
function require_mmap_under_valgrind() {
local FILE_MAX_DAX_DEVICES="../tools/anonymous_mmap/max_dax_devices"
if [ -z "$REQUIRE_DAX_DEVICES" ]; then
return
fi
if [ ! -f "$FILE_MAX_DAX_DEVICES" ]; then
fatal "$FILE_MAX_DAX_DEVICES not found. Run make test."
fi
if [ "$REQUIRE_DAX_DEVICES" -gt "$(< $FILE_MAX_DAX_DEVICES)" ]; then
msg "$UNITTEST_NAME: SKIP: anonymous mmap under Valgrind not possible for $REQUIRE_DAX_DEVICES DAX device(s)."
exit 0
fi
}
#
# setup -- print message that test setup is commencing
#
function setup() {
DIR=$DIR$SUFFIX
# writes test working directory to temporary file
# that allows read location of data after test failure
if [ -f "$TEMP_LOC" ]; then
echo "$DIR" > $TEMP_LOC
fi
# test type must be explicitly specified
if [ "$req_test_type" != "1" ]; then
fatal "error: required test type is not specified"
fi
# fs type "none" must be explicitly enabled
if [ "$FS" = "none" -a "$req_fs_type" != "1" ]; then
exit 0
fi
# fs type "any" must be explicitly enabled
if [ "$FS" = "any" -a "$req_fs_type" != "1" ]; then
exit 0
fi
if [ "$CHECK_TYPE" != "none" ]; then
require_valgrind
# detect possible Valgrind mmap issues and skip uncertain tests
require_mmap_under_valgrind
export VALGRIND_LOG_FILE=$CHECK_TYPE${UNITTEST_NUM}.log
MCSTR="/$CHECK_TYPE"
else
MCSTR=""
fi
[ -n "$RPMEM_PROVIDER" ] && PROV="/$RPMEM_PROVIDER"
[ -n "$RPMEM_PM" ] && PM="/$RPMEM_PM"
msg "$UNITTEST_NAME: SETUP ($TEST/$REAL_FS/$BUILD$MCSTR$PROV$PM)"
for f in $(get_files ".*[a-zA-Z_]${UNITTEST_NUM}\.log"); do
rm -f $f
done
# $DIR has to be an absolute path
check_absolute_path
if [ "$FS" != "none" ]; then
if [ -d "$DIR" ]; then
rm $RM_ONEFS -rf -- $DIR
fi
mkdir -p $DIR
fi
if [ "$TM" = "1" ]; then
start_time=$($DATE +%s.%N)
fi
if [ "$DEVDAX_TO_LOCK" == 1 ]; then
lock_devdax
fi
export PMEMBLK_CONF="fallocate.at_create=0;"
export PMEMOBJ_CONF="fallocate.at_create=0;"
export PMEMLOG_CONF="fallocate.at_create=0;"
}
#
# check_log_empty -- if match file does not exist, assume log should be empty
#
function check_log_empty() {
if [ ! -f ${1}.match ] && [ $(get_size $1) -ne 0 ]; then
echo "unexpected output in $1"
dump_last_n_lines $1
exit 1
fi
}
#
# check_local -- check local test results (using .match files)
#
function check_local() {
if [ "$UT_SKIP_PRINT_MISMATCHED" == 1 ]; then
option=-q
fi
check_log_empty $ERR_LOG_FILE
FILES=$(get_files "[^0-9w]*${UNITTEST_NUM}\.log\.match")
if [ -n "$FILES" ]; then
../match $option $FILES
fi
}
#
# match -- execute match
#
function match() {
../match $@
}
#
# check -- check local or remote test results (using .match files)
#
function check() {
if [ $NODES_MAX -lt 0 ]; then
check_local
else
FILES=$(get_files "node_[0-9]+_[^0-9w]*${UNITTEST_NUM}\.log\.match")
local NODE_MATCH_FILES[0]=""
local NODE_SCP_MATCH_FILES[0]=""
for file in $FILES; do
local N=`echo $file | cut -d"_" -f2`
local DIR=${NODE_WORKING_DIR[$N]}/$curtestdir
local FILE=`echo $file | cut -d"_" -f3 | sed "s/\.match$//g"`
validate_node_number $N
NODE_MATCH_FILES[$N]="${NODE_MATCH_FILES[$N]} $FILE"
NODE_SCP_MATCH_FILES[$N]="${NODE_SCP_MATCH_FILES[$N]} ${NODE[$N]}:$DIR/$FILE"
done
for N in $NODES_SEQ; do
[ "${NODE_SCP_MATCH_FILES[$N]}" ] && run_command scp $SCP_OPTS ${NODE_SCP_MATCH_FILES[$N]} . > /dev/null
for file in ${NODE_MATCH_FILES[$N]}; do
mv $file node_${N}_${file}
done
done
if [ "$UT_SKIP_PRINT_MISMATCHED" == 1 ]; then
option=-q
fi
for N in $NODES_SEQ; do
check_log_empty node_${N}_${ERR_LOG_FILE}
done
if [ -n "$FILES" ]; then
match $option $FILES
fi
fi
# Move logs to build folder
LOG_DIR=logs/$TEST/$REAL_FS/$BUILD$MCSTR$PROV$PM
if [ ! -d $LOG_DIR ]; then mkdir --parents $LOG_DIR; fi
for f in $(get_files ".*[a-zA-Z_]${UNITTEST_NUM}\.log"); do
mv -f $f $LOG_DIR/$f
done
}
#
# pass -- print message that the test has passed
#
function pass() {
if [ "$DEVDAX_TO_LOCK" == 1 ]; then
unlock_devdax
fi
if [ "$TM" = "1" ]; then
end_time=$($DATE +%s.%N)
start_time_sec=$($DATE -d "0 $start_time sec" +%s)
end_time_sec=$($DATE -d "0 $end_time sec" +%s)
days=$(((end_time_sec - start_time_sec) / (24*3600)))
days=$(printf "%03d" $days)
tm=$($DATE -d "0 $end_time sec - $start_time sec" +%H:%M:%S.%N)
tm=$(echo "$days:$tm" | sed -e "s/^000://g" -e "s/^00://g" -e "s/^00://g" -e "s/\([0-9]*\)\.\([0-9][0-9][0-9]\).*/\1.\2/")
tm="\t\t\t[$tm s]"
else
tm=""
fi
msg=$(interactive_green STDOUT "PASS")
if [ "$UNITTEST_LOG_LEVEL" -ge 1 ]; then
echo -e "$UNITTEST_NAME: $msg$tm"
fi
if [ "$FS" != "none" ]; then
rm $RM_ONEFS -rf -- $DIR
fi
}
# Length of pool file's signature
SIG_LEN=8
# Offset and length of pmemobj layout
LAYOUT_OFFSET=$(getconf PAGE_SIZE)
LAYOUT_LEN=1024
# Length of arena's signature
ARENA_SIG_LEN=16
# Signature of BTT Arena
ARENA_SIG="BTT_ARENA_INFO"
# Offset to first arena
ARENA_OFF=$(($(getconf PAGE_SIZE) * 2))
#
# check_file -- check if file exists and print error message if not
#
check_file()
{
if [ ! -f $1 ]
then
fatal "Missing file: ${1}"
fi
}
#
# check_files -- check if files exist and print error message if not
#
check_files()
{
for file in $*
do
check_file $file
done
}
#
# check_no_file -- check if file has been deleted and print error message if not
#
check_no_file()
{
if [ -f $1 ]
then
fatal "Not deleted file: ${1}"
fi
}
#
# check_no_files -- check if files has been deleted and print error message if not
#
check_no_files()
{
for file in $*
do
check_no_file $file
done
}
#
# get_size -- return size of file (0 if file does not exist)
#
get_size()
{
if [ ! -f $1 ]; then
echo "0"
else
stat $STAT_SIZE $1
fi
}
#
# get_mode -- return mode of file
#
get_mode()
{
stat $STAT_MODE $1
}
#
# check_size -- validate file size
#
check_size()
{
local size=$1
local file=$2
local file_size=$(get_size $file)
if [[ $size != $file_size ]]
then
fatal "error: wrong size ${file_size} != ${size}"
fi
}
#
# check_mode -- validate file mode
#
check_mode()
{
local mode=$1
local file=$2
local file_mode=$(get_mode $file)
if [[ $mode != $file_mode ]]
then
fatal "error: wrong mode ${file_mode} != ${mode}"
fi
}
#
# check_signature -- check if file contains specified signature
#
check_signature()
{
local sig=$1
local file=$2
local file_sig=$($DD if=$file bs=1 count=$SIG_LEN 2>/dev/null | tr -d \\0)
if [[ $sig != $file_sig ]]
then
fatal "error: $file: signature doesn't match ${file_sig} != ${sig}"
fi
}
#
# check_signatures -- check if multiple files contain specified signature
#
check_signatures()
{
local sig=$1
shift 1
for file in $*
do
check_signature $sig $file
done
}
#
# check_layout -- check if pmemobj pool contains specified layout
#
check_layout()
{
local layout=$1
local file=$2
local file_layout=$($DD if=$file bs=1\
skip=$LAYOUT_OFFSET count=$LAYOUT_LEN 2>/dev/null | tr -d \\0)
if [[ $layout != $file_layout ]]
then
fatal "error: layout doesn't match ${file_layout} != ${layout}"
fi
}
#
# check_arena -- check if file contains specified arena signature
#
check_arena()
{
local file=$1
local sig=$($DD if=$file bs=1 skip=$ARENA_OFF count=$ARENA_SIG_LEN 2>/dev/null | tr -d \\0)
if [[ $sig != $ARENA_SIG ]]
then
fatal "error: can't find arena signature"
fi
}
#
# dump_pool_info -- dump selected pool metadata and/or user data
#
function dump_pool_info() {
# ignore selected header fields that differ by definition
${PMEMPOOL}.static-nondebug info $* | sed -e "/^UUID/,/^Checksum/d"
}
#
# compare_replicas -- check replicas consistency by comparing `pmempool info` output
#
function compare_replicas() {
disable_exit_on_error
diff <(dump_pool_info $1 $2) <(dump_pool_info $1 $3) -I "^path" -I "^size"
restore_exit_on_error
}
#
# get_node_dir -- returns node dir for current test
# usage: get_node_dir <node>
#
function get_node_dir() {
validate_node_number $1
echo ${NODE_WORKING_DIR[$1]}/$curtestdir
}
#
# init_rpmem_on_node -- prepare rpmem environment variables on node
# usage: init_rpmem_on_node <master-node> <slave-node-1> [<slave-node-2> ...]
#
# example:
# The following command initialize rpmem environment variables on the node 1
# to perform replication to node 0, node 2 and node 3.
# Additionally:
# - on node 2 rpmemd pid will be stored in file.pid
# - on node 3 no pid file will be created (SKIP) and rpmemd will use
# file.conf config file
#
# init_rpmem_on_node 1 0 2:file.pid 3:SKIP:file.conf
#
function init_rpmem_on_node() {
local master=$1
shift
validate_node_number $master
case "$RPMEM_PM" in
APM|GPSPM)
;;
*)
msg "$UNITTEST_NAME: SKIP required: RPMEM_PM is invalid or empty"
exit 0
;;
esac
# Workaround for SIGSEGV in the infinipath-psm during abort
# The infinipath-psm is registering a signal handler and do not unregister
# it when rpmem handle is dlclosed. SIGABRT (potentially any other signal)
# would try to call the signal handler which does not exist after dlclose.
# Issue require a fix in the infinipath-psm or the libfabric.
IPATH_NO_BACKTRACE=1
export_vars_node $master IPATH_NO_BACKTRACE
RPMEM_CMD=""
local SEPARATOR="|"
for slave in "$@"
do
slave=(${slave//:/ })
conf=${slave[2]}
pid=${slave[1]}
slave=${slave[0]}
validate_node_number $slave
local poolset_dir=${NODE_DIR[$slave]}
if [ -n "${RPMEM_POOLSET_DIR[$slave]}" ]; then
poolset_dir=${RPMEM_POOLSET_DIR[$slave]}
fi
local trace=
if [ -n "$(is_valgrind_enabled_on_node $slave)" ]; then
log_file=${CHECK_TYPE}${UNITTEST_NUM}.log
trace=$(get_trace $CHECK_TYPE $log_file $slave)
fi
if [ -n "$pid" -a "$pid" != "SKIP" ]; then
trace="$trace ../ctrld $pid exe"
fi
if [ -n ${UNITTEST_DO_NOT_CHECK_OPEN_FILES+x} ]; then
export_vars_node $slave UNITTEST_DO_NOT_CHECK_OPEN_FILES
fi
if [ -n ${IPATH_NO_BACKTRACE+x} ]; then
export_vars_node $slave IPATH_NO_BACKTRACE
fi
CMD="cd ${NODE_TEST_DIR[$slave]} && "
# Force pmem for APM. Otherwise in case of lack of a pmem rpmemd will
# silently fallback to GPSPM.
[ "$RPMEM_PM" == "APM" ] && CMD="$CMD PMEM_IS_PMEM_FORCE=1"
CMD="$CMD ${NODE_ENV[$slave]}"
CMD="$CMD PATH=$REMOTE_PATH"
CMD="$CMD LD_LIBRARY_PATH=${NODE_LD_LIBRARY_PATH[$slave]}:$REMOTE_LD_LIBRARY_PATH"
CMD="$CMD $trace ../rpmemd"
CMD="$CMD --log-file=$RPMEMD_LOG_FILE"
CMD="$CMD --log-level=$RPMEMD_LOG_LEVEL"
CMD="$CMD --poolset-dir=$poolset_dir"
if [ -n "$conf" ]; then
CMD="$CMD --config=$conf"
fi
if [ "$RPMEM_PM" == "APM" ]; then
CMD="$CMD --persist-apm"
fi
if [ "$RPMEM_CMD" ]; then
RPMEM_CMD="$RPMEM_CMD$SEPARATOR$CMD"
else
RPMEM_CMD=$CMD
fi
require_node_log_files $slave rpmemd$UNITTEST_NUM.log
done
RPMEM_CMD="\"$RPMEM_CMD\""
RPMEM_ENABLE_SOCKETS=0
RPMEM_ENABLE_VERBS=0
case "$RPMEM_PROVIDER" in
sockets)
RPMEM_ENABLE_SOCKETS=1
;;
verbs)
RPMEM_ENABLE_VERBS=1
;;
*)
msg "$UNITTEST_NAME: SKIP required: RPMEM_PROVIDER is invalid or empty"
exit 0
;;
esac
export_vars_node $master RPMEM_CMD
export_vars_node $master RPMEM_ENABLE_SOCKETS
export_vars_node $master RPMEM_ENABLE_VERBS
if [ -n ${UNITTEST_DO_NOT_CHECK_OPEN_FILES+x} ]; then
export_vars_node $master UNITTEST_DO_NOT_CHECK_OPEN_FILES
fi
if [ -n ${PMEMOBJ_NLANES+x} ]; then
export_vars_node $master PMEMOBJ_NLANES
fi
if [ -n ${RPMEM_MAX_NLANES+x} ]; then
export_vars_node $master RPMEM_MAX_NLANES
fi
require_node_log_files $master rpmem$UNITTEST_NUM.log
require_node_log_files $master $PMEMOBJ_LOG_FILE
}
#
# init_valgrind_on_node -- prepare valgrind on nodes
# usage: init_valgrind_on_node <node list>
#
function init_valgrind_on_node() {
# When librpmem is preloaded libfabric does not close all opened files
# before list of opened files is checked.
local UNITTEST_DO_NOT_CHECK_OPEN_FILES=1
local LD_PRELOAD=../$BUILD/librpmem.so
CHECK_NODES=""
for node in "$@"
do
validate_node_number $node
export_vars_node $node LD_PRELOAD
export_vars_node $node UNITTEST_DO_NOT_CHECK_OPEN_FILES
CHECK_NODES="$CHECK_NODES $node"
done
}
#
# is_valgrind_enabled_on_node -- echo the node number if the node has
# initialized valgrind environment by calling
# init_valgrind_on_node
# usage: is_valgrind_enabled_on_node <node>
#
function is_valgrind_enabled_on_node() {
for node in $CHECK_NODES
do
if [ "$node" -eq "$1" ]; then
echo $1
return
fi
done
return
}
#
# pack_all_libs -- put all libraries and their links to one tarball
#
function pack_all_libs() {
local LIBS_TAR_DIR=$(pwd)/$1
cd $DIR_SRC
tar -cf $LIBS_TAR_DIR ./debug/*.so* ./nondebug/*.so*
cd - > /dev/null
}
#
# copy_common_to_remote_nodes -- copy common files to all remote nodes
#
function copy_common_to_remote_nodes() {
local NODES_ALL_MAX=$((${#NODE[@]} - 1))
local NODES_ALL_SEQ=$(seq -s' ' 0 $NODES_ALL_MAX)
DIR_SYNC=$1
if [ "$DIR_SYNC" != "" ]; then
[ ! -d $DIR_SYNC ] \
&& fatal "error: $DIR_SYNC does not exist or is not a directory"
fi
# add all libraries to the 'to-copy' list
local LIBS_TAR=libs.tar
pack_all_libs $LIBS_TAR
if [ "$DIR_SYNC" != "" -a "$(ls $DIR_SYNC)" != "" ]; then
FILES_COMMON_DIR="$DIR_SYNC/* $LIBS_TAR"
else
FILES_COMMON_DIR="$FILES_COMMON_DIR $LIBS_TAR"
fi
for N in $NODES_ALL_SEQ; do
# validate node's address
[ "${NODE[$N]}" = "" ] \
&& fatal "error: address of node #$N is not provided"
check_if_node_is_reachable $N
[ $? -ne 0 ] \
&& msg "warning: node #$N (${NODE[$N]}) is unreachable, skipping..." \
&& continue
# validate the working directory
[ "${NODE_WORKING_DIR[$N]}" = "" ] \
&& msg ": warning: working directory for node #$N (${NODE[$N]}) is not provided, skipping..." \
&& continue
# create the working dir if it does not exist
run_command ssh $SSH_OPTS ${NODE[$N]} "mkdir -p ${NODE_WORKING_DIR[$N]}"
# copy all common files
run_command scp $SCP_OPTS $FILES_COMMON_DIR ${NODE[$N]}:${NODE_WORKING_DIR[$N]} > /dev/null
# unpack libraries
run_command ssh $SSH_OPTS ${NODE[$N]} "cd ${NODE_WORKING_DIR[$N]} \
&& tar -xf $LIBS_TAR && rm -f $LIBS_TAR"
done
rm -f $LIBS_TAR
}
#
# copy_test_to_remote_nodes -- copy all unit test binaries to all remote nodes
#
function copy_test_to_remote_nodes() {
local NODES_ALL_MAX=$((${#NODE[@]} - 1))
local NODES_ALL_SEQ=$(seq -s' ' 0 $NODES_ALL_MAX)
for N in $NODES_ALL_SEQ; do
# validate node's address
[ "${NODE[$N]}" = "" ] \
&& fatal "error: address of node #$N is not provided"
check_if_node_is_reachable $N
[ $? -ne 0 ] \
&& msg "warning: node #$N (${NODE[$N]}) is unreachable, skipping..." \
&& continue
# validate the working directory
[ "${NODE_WORKING_DIR[$N]}" = "" ] \
&& msg ": warning: working directory for node #$N (${NODE[$N]}) is not provided, skipping..." \
&& continue
local DIR=${NODE_WORKING_DIR[$N]}/$curtestdir
# create a new test dir
run_command ssh $SSH_OPTS ${NODE[$N]} "rm -rf $DIR && mkdir -p $DIR"
# create the working data dir
run_command ssh $SSH_OPTS ${NODE[$N]} "mkdir -p \
${DIR}/data"
# copy all required files
[ $# -gt 0 ] && run_command scp $SCP_OPTS $* ${NODE[$N]}:$DIR > /dev/null
done
return 0
}
#
# enable_log_append -- turn on appending to the log files rather than truncating them
# It also removes all log files created by tests: out*.log, err*.log and trace*.log
#
function enable_log_append() {
rm -f $OUT_LOG_FILE
rm -f $ERR_LOG_FILE
rm -f $TRACE_LOG_FILE
export UNITTEST_LOG_APPEND=1
}
# clean data directory on all remote
# nodes if remote test failed
if [ "$CLEAN_FAILED_REMOTE" == "y" ]; then
NODES_ALL=$((${#NODE[@]} - 1))
MYPID=$$
for ((i=0;i<=$NODES_ALL;i++));
do
if [[ -z "${NODE_WORKING_DIR[$i]}" || -z "$curtestdir" ]]; then
echo "Invalid path to tests data: ${NODE_WORKING_DIR[$i]}/$curtestdir/data/"
exit 1
fi
N[$i]=${NODE_WORKING_DIR[$i]}/$curtestdir/data/
run_command ssh $SSH_OPTS ${NODE[$i]} "rm -rf ${N[$i]}; mkdir ${N[$i]}"
if [ $? -eq 0 ]; then
verbose_msg "Removed data from: ${NODE[$i]}:${N[$i]}"
fi
done
exit 0
fi
# calculate the minimum of two or more numbers
minimum() {
local min=$1
shift
for val in $*; do
if [[ "$val" < "$min" ]]; then
min=$val
fi
done
echo $min
}
#
# count_lines - count number of lines that match pattern $1 in file $2
#
function count_lines() {
# grep returns 1 on no match
disable_exit_on_error
$GREP -ce "$1" $2
restore_exit_on_error
}
#
# get_pmemcheck_version() - return pmemcheck API major or minor version
# usage: get_pmemcheck_version <0|1>
#
function get_pmemcheck_version()
{
PMEMCHECK_VERSION=$($VALGRINDEXE --tool=pmemcheck true 2>&1 \
| head -n 1 | sed "s/.*-\([0-9.]*\),.*/\1/")
OIFS=$IFS
IFS="."
PMEMCHECK_MAJ_MIN=($PMEMCHECK_VERSION)
IFS=$OIFS
PMEMCHECK_VERSION_PART=${PMEMCHECK_MAJ_MIN[$1]}
echo "$PMEMCHECK_VERSION_PART"
}
#
# require_pmemcheck_version_ge - check if pmemcheck API
# version is greater or equal to required value
# usage: require_pmemcheck_version_ge <major> <minor> [binary]
#
function require_pmemcheck_version_ge()
{
require_valgrind_tool pmemcheck $3
REQUIRE_MAJOR=$1
REQUIRE_MINOR=$2
PMEMCHECK_MAJOR=$(get_pmemcheck_version 0)
PMEMCHECK_MINOR=$(get_pmemcheck_version 1)
# compare MAJOR
if [ $PMEMCHECK_MAJOR -gt $REQUIRE_MAJOR ]; then
return 0
fi
# compare MINOR
if [ $PMEMCHECK_MAJOR -eq $REQUIRE_MAJOR ]; then
if [ $PMEMCHECK_MINOR -ge $REQUIRE_MINOR ]; then
return 0
fi
fi
msg "$UNITTEST_NAME: SKIP pmemcheck API version:" \
"$PMEMCHECK_MAJOR.$PMEMCHECK_MINOR" \
"is less than required" \
"$REQUIRE_MAJOR.$REQUIRE_MINOR"
exit 0
}
#
# require_pmemcheck_version_lt - check if pmemcheck API
# version is less than required value
# usage: require_pmemcheck_version_lt <major> <minor> [binary]
#
function require_pmemcheck_version_lt()
{
require_valgrind_tool pmemcheck $3
REQUIRE_MAJOR=$1
REQUIRE_MINOR=$2
PMEMCHECK_MAJOR=$(get_pmemcheck_version 0)
PMEMCHECK_MINOR=$(get_pmemcheck_version 1)
# compare MAJOR
if [ $PMEMCHECK_MAJOR -lt $REQUIRE_MAJOR ]; then
return 0
fi
# compare MINOR
if [ $PMEMCHECK_MAJOR -eq $REQUIRE_MAJOR ]; then
if [ $PMEMCHECK_MINOR -lt $REQUIRE_MINOR ]; then
return 0
fi
fi
msg "$UNITTEST_NAME: SKIP pmemcheck API version:" \
"$PMEMCHECK_MAJOR.$PMEMCHECK_MINOR" \
"is greater or equal than" \
"$REQUIRE_MAJOR.$REQUIRE_MINOR"
exit 0
}
#
# require_python_3 -- check if python3 is available
#
function require_python3()
{
if hash python3 &>/dev/null;
then
PYTHON_EXE=python3
else
PYTHON_EXE=python
fi
case "$($PYTHON_EXE --version 2>&1)" in
*" 3."*)
return
;;
*)
msg "$UNITTEST_NAME: SKIP: required python version 3"
exit 0
;;
esac
}
#
# require_pmreorder -- check all necessary conditions to run pmreorder
# usage: require_pmreorder [binary]
#
function require_pmreorder()
{
# python3 and valgrind are necessary
require_python3
# pmemcheck is required to generate store_log
configure_valgrind pmemcheck force-enable $1
# pmreorder tool does not support unicode yet
require_no_unicode
}
#
# pmreorder_run_tool -- run pmreorder with parameters and return exit status
#
# 1 - reorder engine type [nochecker|full|noreorder|partial|accumulative]
# 2 - marker-engine pairs in format: MARKER=ENGINE,MARKER1=ENGINE1 or
# config file in json format: { "MARKER":"ENGINE","MARKER1":"ENGINE1" }
# 3 - the path to the checker binary/library and remaining parameters which
# will be passed to the consistency checker binary.
# If you are using a library checker, prepend '-n funcname'
#
function pmreorder_run_tool()
{
rm -f pmreorder$UNITTEST_NUM.log
disable_exit_on_error
$PYTHON_EXE $PMREORDER \
-l store_log$UNITTEST_NUM.log \
-o pmreorder$UNITTEST_NUM.log \
-r $1 \
-x $2 \
-p "$3"
ret=$?
restore_exit_on_error
echo $ret
}
#
# pmreorder_expect_success -- run pmreoreder with forwarded parameters,
# expect it to exit zero
#
function pmreorder_expect_success()
{
ret=$(pmreorder_run_tool "$@" | tail -n1)
if [ "$ret" -ne "0" ]; then
msg=$(interactive_red STDERR "failed with exit code $ret")
# exit code 130 - script terminated by user (Control-C)
if [ "$ret" -ne "130" ]; then
echo -e "$UNITTEST_NAME $msg." >&2
dump_last_n_lines $PMREORDER_LOG_FILE
fi
false
fi
}
#
# pmreorder_expect_failure -- run pmreoreder with forwarded parameters,
# expect it to exit non zero
#
function pmreorder_expect_failure()
{
ret=$(pmreorder_run_tool "$@" | tail -n1)
if [ "$ret" -eq "0" ]; then
msg=$(interactive_red STDERR "succeeded")
echo -e "$UNITTEST_NAME command $msg unexpectedly." >&2
false
fi
}
#
# pmreorder_create_store_log -- perform a reordering test
#
# This function expects 5 additional parameters. They are in order:
# 1 - the pool file to be tested
# 2 - the application and necessary parameters to run pmemcheck logging
#
function pmreorder_create_store_log()
{
#copy original file and perform store logging
cp $1 "$1.pmr"
rm -f store_log$UNITTEST_NUM.log
$VALGRINDEXE \
--tool=pmemcheck -q \
--log-stores=yes \
--print-summary=no \
--log-file=store_log$UNITTEST_NUM.log \
--log-stores-stacktraces=yes \
--log-stores-stacktraces-depth=2 \
--expect-fence-after-clflush=yes \
$2
# uncomment this line for debug purposes
# mv $1 "$1.bak"
mv "$1.pmr" $1
}
#
# require_free_space -- check if there is enough free space to run the test
# Example, checking if there is 1 GB of free space on disk:
# require_free_space 1G
#
function require_free_space() {
req_free_space=$(convert_to_bytes $1)
# actually require 5% or 8MB (whichever is higher) more, just in case
# file system requires some space for its meta data
pct=$((5 * $req_free_space / 100))
abs=$(convert_to_bytes 8M)
if [ $pct -gt $abs ]; then
req_free_space=$(($req_free_space + $pct))
else
req_free_space=$(($req_free_space + $abs))
fi
output=$(df -k $DIR)
found=false
i=1
for elem in $(echo "$output" | head -1); do
if [ ${elem:0:5} == "Avail" ]; then
found=true
break
else
let "i+=1"
fi
done
if [ $found = true ]; then
row=$(echo "$output" | tail -1)
free_space=$(( $(echo $row | awk "{print \$$i}")*1024 ))
else
msg "$UNITTEST_NAME: SKIP: unable to check free space"
exit 0
fi
if [ $free_space -lt $req_free_space ]; then
msg "$UNITTEST_NAME: SKIP: not enough free space ($1 required)"
exit 0
fi
}
#
# require_max_devdax_size -- checks that dev dax is smaller than requested
#
# usage: require_max_devdax_size <dev-dax-num> <max-size>
#
function require_max_devdax_size() {
cur_sz=$(get_devdax_size 0)
max_size=$2
if [ $cur_sz -ge $max_size ]; then
msg "$UNITTEST_NAME: SKIP: DevDAX $1 is too big for this test (max $2 required)"
exit 0
fi
}
#
# require_max_block_size -- checks that block size is smaller or equal than requested
#
# usage: require_max_block_size <file> <max-block-size>
#
function require_max_block_size() {
cur_sz=$(stat --file-system --format=%S $1)
max_size=$2
if [ $cur_sz -gt $max_size ]; then
msg "$UNITTEST_NAME: SKIP: block size of $1 is too big for this test (max $2 required)"
exit 0
fi
}
#
# require_badblock_tests_enabled - check if tests for bad block support are not enabled
# Input arguments:
# 1) test device type
#
function require_badblock_tests_enabled() {
require_sudo_allowed
require_command ndctl
require_bb_enabled_by_default $PMEMPOOL$EXESUFFIX
if [ "$BADBLOCK_TEST_TYPE" == "nfit_test" ]; then
require_kernel_module nfit_test
# nfit_test dax device is created by the test and is
# used directly - no device dax path is needed to be provided by the
# user. Some tests though may use an additional filesystem for the
# pool replica - hence 'any' filesystem is required.
if [ $1 == "dax_device" ]; then
require_fs_type any
# nfit_test block device is created by the test and mounted on
# a filesystem of any type provided by the user
elif [ $1 == "block_device" ]; then
require_fs_type any
fi
elif [ "$BADBLOCK_TEST_TYPE" == "real_pmem" ]; then
if [ $1 == "dax_device" ]; then
require_fs_type any
require_dax_devices 1
require_binary $DAXIO$EXESUFFIX
elif [ $1 == "block_device" ]; then
require_fs_type pmem
fi
else
msg "$UNITTEST_NAME: SKIP: bad block tests are not enabled in testconfig.sh"
exit 0
fi
}
#
# require_badblock_tests_enabled_node - check if tests for bad block support are not enabled
# on given remote node
#
function require_badblock_tests_enabled_node() {
require_sudo_allowed_node $1
require_command_node $1 ndctl
require_bb_enabled_by_default $PMEMPOOL$EXESUFFIX
if [ "$BADBLOCK_TEST_TYPE" == "nfit_test" ]; then
require_kernel_module_node $1 nfit_test
elif [ "$BADBLOCK_TEST_TYPE" == "real_pmem" ]; then
:
else
msg "$UNITTEST_NAME: SKIP: bad block tests are not enabled in testconfig.sh"
exit 0
fi
require_sudo_allowed
require_kernel_module nfit_test
require_command ndctl
}
#
# create_recovery_file - create bad block recovery file
#
# Usage: create_recovery_file <file> [<offset_1> <length_1> ...]
#
# Offsets and length should be in page sizes.
#
function create_recovery_file() {
[ $# -lt 1 ] && fatal "create_recovery_file(): not enough parameters: $*"
FILE=$1
shift
rm -f $FILE
while [ $# -ge 2 ]; do
OFFSET=$1
LENGTH=$2
shift 2
echo "$(($OFFSET * $PAGE_SIZE)) $(($LENGTH * $PAGE_SIZE))" >> $FILE
done
# write the finish flag
echo "0 0" >> $FILE
}
#
# zero_blocks - zero blocks in a file
#
# Usage: zero_blocks <file> <offset> <length>
#
# Offsets and length should be in page sizes.
#
function zero_blocks() {
[ $# -lt 3 ] && fatal "zero_blocks(): not enough parameters: $*"
FILE=$1
shift
while [ $# -ge 2 ]; do
OFFSET=$1
LENGTH=$2
shift 2
dd if=/dev/zero of=$FILE bs=$PAGE_SIZE seek=$OFFSET count=$LENGTH conv=notrunc status=none
done
}
#
# turn_on_checking_bad_blocks -- set the compat_feature POOL_FEAT_CHECK_BAD_BLOCKS on
#
function turn_on_checking_bad_blocks()
{
FILE=$1
expect_normal_exit "$PMEMPOOL feature -e CHECK_BAD_BLOCKS $FILE &>> $PREP_LOG_FILE"
}
#
# turn_on_checking_bad_blocks_node -- set the compat_feature POOL_FEAT_CHECK_BAD_BLOCKS on
#
function turn_on_checking_bad_blocks_node()
{
FILE=$2
expect_normal_exit run_on_node $1 "../pmempool feature -e CHECK_BAD_BLOCKS $FILE &>> $PREP_LOG_FILE"
}
| 95,035 | 23.943832 | 153 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_pmem2_map.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ut_pmem2_map.h -- utility helper functions for libpmem2 map tests
*/
#include <libpmem2.h>
#include "unittest.h"
#include "ut_pmem2_map.h"
#include "ut_pmem2_utils.h"
/*
* ut_pmem2_map -- allocates map (cannot fail)
*/
void
ut_pmem2_map(const char *file, int line, const char *func,
struct pmem2_config *cfg, struct pmem2_source *src,
struct pmem2_map **map)
{
int ret = pmem2_map(cfg, src, map);
ut_pmem2_expect_return(file, line, func, ret, 0);
UT_ASSERTne(*map, NULL);
}
| 572 | 21.92 | 68 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/builds.py
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019, Intel Corporation
#
"""Build context classes"""
import sys
import context as ctx
import futils
class Build(metaclass=ctx.CtxType):
"""Base and factory class for standard build classes"""
exesuffix = ''
def set_env_common(self):
if sys.platform == 'win32':
self.env = {'PATH': self.libdir}
else:
self.env = {'LD_LIBRARY_PATH': self.libdir}
@classmethod
def filter(cls, config, msg, tc):
req_builds, kwargs = ctx.get_requirement(tc, 'build', None)
builds = []
for b in ctx.filter_contexts(config.build, req_builds):
try:
builds.append(b(**kwargs))
except futils.Skip as s:
msg.print_verbose('{}: SKIP: {}'.format(tc, s))
return builds
class Debug(Build):
"""Set this context for a debug build"""
is_preferred = True
def __init__(self):
if sys.platform == 'win32':
self.exedir = futils.WIN_DEBUG_EXEDIR
self.libdir = futils.DEBUG_LIBDIR
self.set_env_common()
class Release(Build):
"""Set this context for a release build"""
is_preferred = True
def __init__(self):
if sys.platform == 'win32':
self.exedir = futils.WIN_RELEASE_EXEDIR
self.libdir = futils.RELEASE_LIBDIR
self.set_env_common()
# Build types not available on Windows
if sys.platform != 'win32':
class Static_Debug(Build):
"""Set this context for a static_debug build"""
def __init__(self):
self.exesuffix = '.static-debug'
self.libdir = futils.DEBUG_LIBDIR
class Static_Release(Build):
"""Set this context for a static_release build"""
def __init__(self):
self.exesuffix = '.static-nondebug'
self.libdir = futils.RELEASE_LIBDIR
def require_build(build, **kwargs):
def wrapped(tc):
builds = ctx.str_to_ctx_common(build, Build)
ctx.add_requirement(tc, 'build', builds, **kwargs)
return tc
return wrapped
| 2,104 | 24.987654 | 67 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_pmem2_setup_integration.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* ut_pmem2_setup_integration.h -- libpmem2 setup functions using public API
* (for integration tests)
*/
#ifndef UT_PMEM2_SETUP_INTEGRATION_H
#define UT_PMEM2_SETUP_INTEGRATION_H 1
#include "ut_fh.h"
/* a prepare_config() that can't set wrong value */
#define PMEM2_PREPARE_CONFIG_INTEGRATION(cfg, src, fd, g) \
ut_pmem2_prepare_config_integration( \
__FILE__, __LINE__, __func__, cfg, src, fd, g)
void ut_pmem2_prepare_config_integration(const char *file, int line,
const char *func, struct pmem2_config **cfg, struct pmem2_source **src,
int fd, enum pmem2_granularity granularity);
#endif /* UT_PMEM2_SETUP_INTEGRATION_H */
| 728 | 29.375 | 76 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_movnt_align/pmem_movnt_align.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmem_movnt_align.c -- unit test for functions with non-temporal stores
*
* usage: pmem_movnt_align [C|F|B|S]
*
* C - pmem_memcpy_persist()
* B - pmem_memmove_persist() in backward direction
* F - pmem_memmove_persist() in forward direction
* S - pmem_memset_persist()
*/
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include "libpmem.h"
#include "unittest.h"
#include "movnt_align_common.h"
#define N_BYTES (Ut_pagesize * 2)
static int Heavy;
static void *
pmem_memcpy_persist_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memcpy_persist(pmemdest, src, len);
}
static void *
pmem_memcpy_nodrain_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memcpy_nodrain(pmemdest, src, len);
}
static void *
pmem_memmove_persist_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memmove_persist(pmemdest, src, len);
}
static void *
pmem_memmove_nodrain_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memmove_nodrain(pmemdest, src, len);
}
static void *
pmem_memset_persist_wrapper(void *pmemdest, int c, size_t len, unsigned flags)
{
(void) flags;
return pmem_memset_persist(pmemdest, c, len);
}
static void *
pmem_memset_nodrain_wrapper(void *pmemdest, int c, size_t len, unsigned flags)
{
(void) flags;
return pmem_memset_nodrain(pmemdest, c, len);
}
static void
check_memmove_variants(size_t doff, size_t soff, size_t len)
{
check_memmove(doff, soff, len, pmem_memmove_persist_wrapper, 0);
if (!Heavy)
return;
check_memmove(doff, soff, len, pmem_memmove_nodrain_wrapper, 0);
for (int i = 0; i < ARRAY_SIZE(Flags); ++i)
check_memmove(doff, soff, len, pmem_memmove, Flags[i]);
}
static void
check_memcpy_variants(size_t doff, size_t soff, size_t len)
{
check_memcpy(doff, soff, len, pmem_memcpy_persist_wrapper, 0);
if (!Heavy)
return;
check_memcpy(doff, soff, len, pmem_memcpy_nodrain_wrapper, 0);
for (int i = 0; i < ARRAY_SIZE(Flags); ++i)
check_memcpy(doff, soff, len, pmem_memcpy, Flags[i]);
}
static void
check_memset_variants(size_t off, size_t len)
{
check_memset(off, len, pmem_memset_persist_wrapper, 0);
if (!Heavy)
return;
check_memset(off, len, pmem_memset_nodrain_wrapper, 0);
for (int i = 0; i < ARRAY_SIZE(Flags); ++i)
check_memset(off, len, pmem_memset, Flags[i]);
}
int
main(int argc, char *argv[])
{
if (argc != 3)
UT_FATAL("usage: %s type heavy=[0|1]", argv[0]);
char type = argv[1][0];
Heavy = argv[2][0] == '1';
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem_movnt_align %c %s %savx %savx512f", type,
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
size_t page_size = Ut_pagesize;
size_t s;
switch (type) {
case 'C': /* memcpy */
/* mmap with guard pages */
Src = MMAP_ANON_ALIGNED(N_BYTES, 0);
Dst = MMAP_ANON_ALIGNED(N_BYTES, 0);
if (Src == NULL || Dst == NULL)
UT_FATAL("!mmap");
Scratch = MALLOC(N_BYTES);
/* check memcpy with 0 size */
check_memcpy_variants(0, 0, 0);
/* check memcpy with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memcpy_variants(0, 0, N_BYTES - s);
/* check memcpy with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memcpy_variants(s, 0, N_BYTES - s);
/* check memcpy with unaligned begin and end */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memcpy_variants(s, s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Src, N_BYTES);
MUNMAP_ANON_ALIGNED(Dst, N_BYTES);
FREE(Scratch);
break;
case 'B': /* memmove backward */
/* mmap with guard pages */
Src = MMAP_ANON_ALIGNED(2 * N_BYTES - page_size, 0);
Dst = Src + N_BYTES - page_size;
if (Src == NULL)
UT_FATAL("!mmap");
/* check memmove in backward direction with 0 size */
check_memmove_variants(0, 0, 0);
/* check memmove in backward direction with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(0, 0, N_BYTES - s);
/* check memmove in backward direction with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, 0, N_BYTES - s);
/*
* check memmove in backward direction with unaligned begin
* and end
*/
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Src, 2 * N_BYTES - page_size);
break;
case 'F': /* memmove forward */
/* mmap with guard pages */
Dst = MMAP_ANON_ALIGNED(2 * N_BYTES - page_size, 0);
Src = Dst + N_BYTES - page_size;
if (Src == NULL)
UT_FATAL("!mmap");
/* check memmove in forward direction with 0 size */
check_memmove_variants(0, 0, 0);
/* check memmove in forward direction with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(0, 0, N_BYTES - s);
/* check memmove in forward direction with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, 0, N_BYTES - s);
/*
* check memmove in forward direction with unaligned begin
* and end
*/
for (s = 0; s < CACHELINE_SIZE; s++)
check_memmove_variants(s, s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Dst, 2 * N_BYTES - page_size);
break;
case 'S': /* memset */
/* mmap with guard pages */
Dst = MMAP_ANON_ALIGNED(N_BYTES, 0);
if (Dst == NULL)
UT_FATAL("!mmap");
Scratch = MALLOC(N_BYTES);
/* check memset with 0 size */
check_memset_variants(0, 0);
/* check memset with unaligned size */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memset_variants(0, N_BYTES - s);
/* check memset with unaligned begin */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memset_variants(s, N_BYTES - s);
/* check memset with unaligned begin and end */
for (s = 0; s < CACHELINE_SIZE; s++)
check_memset_variants(s, N_BYTES - 2 * s);
MUNMAP_ANON_ALIGNED(Dst, N_BYTES);
FREE(Scratch);
break;
default:
UT_FATAL("!wrong type of test");
break;
}
DONE(NULL);
}
| 6,229 | 23.92 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/libpmempool_sync/libpmempool_sync.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* libpmempool_sync -- a unittest for libpmempool sync.
*
*/
#include <stddef.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include "unittest.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "libpmempool_sync");
if (argc != 3)
UT_FATAL("usage: %s poolset_file flags", argv[0]);
int ret = pmempool_sync(argv[1], (unsigned)strtoul(argv[2], NULL, 0));
if (ret)
UT_OUT("result: %d, errno: %d", ret, errno);
else
UT_OUT("result: %d", ret);
DONE(NULL);
}
| 581 | 18.4 | 71 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_granularity_detection/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
#
import testframework as t
from testframework import granularity as g
import futils as f
@g.require_granularity(g.ANY)
class PMEM2_CONFIG(t.BaseTest):
test_type = t.Short
class TEST0(PMEM2_CONFIG):
"""test granularity detection with PMEM2_FORCE_GRANULARITY set to page"""
def run(self, ctx):
ctx.env['PMEM2_FORCE_GRANULARITY'] = "page"
ctx.exec(f.get_test_tool_path(ctx.build, "gran_detecto"),
'-p', ctx.testdir)
class TEST1(PMEM2_CONFIG):
"""test granularity detection with PMEM2_FORCE_GRANULARITY
set to cache_line"""
def run(self, ctx):
ctx.env['PMEM2_FORCE_GRANULARITY'] = "cache_line"
ctx.exec(f.get_test_tool_path(ctx.build, "gran_detecto"),
'-c', ctx.testdir)
class TEST2(PMEM2_CONFIG):
"""test granularity detection with PMEM2_FORCE_GRANULARITY set to byte"""
def run(self, ctx):
ctx.env['PMEM2_FORCE_GRANULARITY'] = "byte"
ctx.exec(f.get_test_tool_path(ctx.build, "gran_detecto"),
'-b', ctx.testdir)
class TEST3(PMEM2_CONFIG):
"""test granularity detection with PMEM2_FORCE_GRANULARITY
set to CaCHe_Line"""
def run(self, ctx):
ctx.env['PMEM2_FORCE_GRANULARITY'] = "CaCHe_Line"
ctx.exec(f.get_test_tool_path(ctx.build, "gran_detecto"),
'-c', ctx.testdir)
class TEST4(PMEM2_CONFIG):
"""test granularity detection with PMEM2_FORCE_GRANULARITY
set to CACHELINE"""
def run(self, ctx):
ctx.env['PMEM2_FORCE_GRANULARITY'] = "CACHELINE"
ctx.exec(f.get_test_tool_path(ctx.build, "gran_detecto"),
'-c', ctx.testdir)
| 1,744 | 29.086207 | 77 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_memblock/obj_memblock.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* obj_memblock.c -- unit test for memblock interface
*/
#include "memblock.h"
#include "memops.h"
#include "obj.h"
#include "unittest.h"
#include "heap.h"
#define NCHUNKS 10
static PMEMobjpool *pop;
FUNC_MOCK(operation_add_typed_entry, int, struct operation_context *ctx,
void *ptr, uint64_t value,
ulog_operation_type type, enum operation_log_type en_type)
FUNC_MOCK_RUN_DEFAULT {
uint64_t *pval = ptr;
switch (type) {
case ULOG_OPERATION_SET:
*pval = value;
break;
case ULOG_OPERATION_AND:
*pval &= value;
break;
case ULOG_OPERATION_OR:
*pval |= value;
break;
default:
UT_ASSERT(0);
}
return 0;
}
FUNC_MOCK_END
FUNC_MOCK(operation_add_entry, int, struct operation_context *ctx, void *ptr,
uint64_t value, ulog_operation_type type)
FUNC_MOCK_RUN_DEFAULT {
/* just call the mock above - the entry type doesn't matter */
return operation_add_typed_entry(ctx, ptr, value, type,
LOG_TRANSIENT);
}
FUNC_MOCK_END
static void
test_detect(void)
{
struct memory_block mhuge_used = { .chunk_id = 0, 0, 0, 0 };
struct memory_block mhuge_free = { .chunk_id = 1, 0, 0, 0 };
struct memory_block mrun = { .chunk_id = 2, 0, 0, 0 };
struct heap_layout *layout = pop->heap.layout;
layout->zone0.chunk_headers[0].size_idx = 1;
layout->zone0.chunk_headers[0].type = CHUNK_TYPE_USED;
layout->zone0.chunk_headers[1].size_idx = 1;
layout->zone0.chunk_headers[1].type = CHUNK_TYPE_FREE;
layout->zone0.chunk_headers[2].size_idx = 1;
layout->zone0.chunk_headers[2].type = CHUNK_TYPE_RUN;
memblock_rebuild_state(&pop->heap, &mhuge_used);
memblock_rebuild_state(&pop->heap, &mhuge_free);
memblock_rebuild_state(&pop->heap, &mrun);
UT_ASSERTeq(mhuge_used.type, MEMORY_BLOCK_HUGE);
UT_ASSERTeq(mhuge_free.type, MEMORY_BLOCK_HUGE);
UT_ASSERTeq(mrun.type, MEMORY_BLOCK_RUN);
}
static void
test_block_size(void)
{
struct memory_block mhuge = { .chunk_id = 0, 0, 0, 0 };
struct memory_block mrun = { .chunk_id = 1, 0, 0, 0 };
struct palloc_heap *heap = &pop->heap;
struct heap_layout *layout = heap->layout;
layout->zone0.chunk_headers[0].size_idx = 1;
layout->zone0.chunk_headers[0].type = CHUNK_TYPE_USED;
layout->zone0.chunk_headers[1].size_idx = 1;
layout->zone0.chunk_headers[1].type = CHUNK_TYPE_RUN;
struct chunk_run *run = (struct chunk_run *)
&layout->zone0.chunks[1];
run->hdr.block_size = 1234;
memblock_rebuild_state(&pop->heap, &mhuge);
memblock_rebuild_state(&pop->heap, &mrun);
UT_ASSERTne(mhuge.m_ops, NULL);
UT_ASSERTne(mrun.m_ops, NULL);
UT_ASSERTeq(mhuge.m_ops->block_size(&mhuge), CHUNKSIZE);
UT_ASSERTeq(mrun.m_ops->block_size(&mrun), 1234);
}
static void
test_prep_hdr(void)
{
struct memory_block mhuge_used = { .chunk_id = 0, 0, .size_idx = 1, 0 };
struct memory_block mhuge_free = { .chunk_id = 1, 0, .size_idx = 1, 0 };
struct memory_block mrun_used = { .chunk_id = 2, 0,
.size_idx = 4, .block_off = 0 };
struct memory_block mrun_free = { .chunk_id = 2, 0,
.size_idx = 4, .block_off = 4 };
struct memory_block mrun_large_used = { .chunk_id = 2, 0,
.size_idx = 64, .block_off = 64 };
struct memory_block mrun_large_free = { .chunk_id = 2, 0,
.size_idx = 64, .block_off = 128 };
struct palloc_heap *heap = &pop->heap;
struct heap_layout *layout = heap->layout;
layout->zone0.chunk_headers[0].size_idx = 1;
layout->zone0.chunk_headers[0].type = CHUNK_TYPE_USED;
layout->zone0.chunk_headers[1].size_idx = 1;
layout->zone0.chunk_headers[1].type = CHUNK_TYPE_FREE;
layout->zone0.chunk_headers[2].size_idx = 1;
layout->zone0.chunk_headers[2].type = CHUNK_TYPE_RUN;
struct chunk_run *run = (struct chunk_run *)&layout->zone0.chunks[2];
run->hdr.block_size = 128;
uint64_t *bitmap = (uint64_t *)run->content;
bitmap[0] = 0b1111;
bitmap[1] = ~0ULL;
bitmap[2] = 0ULL;
memblock_rebuild_state(heap, &mhuge_used);
memblock_rebuild_state(heap, &mhuge_free);
memblock_rebuild_state(heap, &mrun_used);
memblock_rebuild_state(heap, &mrun_free);
memblock_rebuild_state(heap, &mrun_large_used);
memblock_rebuild_state(heap, &mrun_large_free);
UT_ASSERTne(mhuge_used.m_ops, NULL);
mhuge_used.m_ops->prep_hdr(&mhuge_used, MEMBLOCK_FREE, NULL);
UT_ASSERTeq(layout->zone0.chunk_headers[0].type, CHUNK_TYPE_FREE);
mhuge_free.m_ops->prep_hdr(&mhuge_free, MEMBLOCK_ALLOCATED, NULL);
UT_ASSERTeq(layout->zone0.chunk_headers[1].type, CHUNK_TYPE_USED);
mrun_used.m_ops->prep_hdr(&mrun_used, MEMBLOCK_FREE, NULL);
UT_ASSERTeq(bitmap[0], 0ULL);
mrun_free.m_ops->prep_hdr(&mrun_free, MEMBLOCK_ALLOCATED, NULL);
UT_ASSERTeq(bitmap[0], 0b11110000);
mrun_large_used.m_ops->prep_hdr(&mrun_large_used, MEMBLOCK_FREE, NULL);
UT_ASSERTeq(bitmap[1], 0ULL);
mrun_large_free.m_ops->prep_hdr(&mrun_large_free,
MEMBLOCK_ALLOCATED, NULL);
UT_ASSERTeq(bitmap[2], ~0ULL);
}
static int
fake_persist(void *base, const void *addr, size_t size, unsigned flags)
{
return 0;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_memblock");
PMEMobjpool pool;
pop = &pool;
pop->heap.layout = ZALLOC(sizeof(struct heap_layout) +
NCHUNKS * sizeof(struct chunk));
pop->heap.p_ops.persist = fake_persist;
test_detect();
test_block_size();
test_prep_hdr();
FREE(pop->heap.layout);
DONE(NULL);
}
| 5,320 | 27.153439 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_memblock/mocks_windows.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of memops functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmemobj
* files, when compiled for the purpose of obj_memblock test.
* It would replace default implementation with mocked functions defined
* in obj_memblock.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL
#define operation_add_typed_entry __wrap_operation_add_typed_entry
#define operation_add_entry __wrap_operation_add_entry
#endif
| 634 | 29.238095 | 73 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_sds/obj_sds.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* util_sds.c -- unit test for shutdown status functions
*/
#include "unittest.h"
#include "shutdown_state.h"
#include <stdlib.h>
#include <libpmemobj.h>
static char **uids;
static size_t uids_size;
static size_t uid_it;
static uint64_t *uscs;
static size_t uscs_size;
static size_t usc_it;
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_sds");
if (argc < 2)
UT_FATAL("usage: %s init fail file (uuid usc)...", argv[0]);
unsigned files = (unsigned)(argc - 3) / 2;
uids = MALLOC(files * sizeof(uids[0]));
uscs = MALLOC(files * sizeof(uscs[0]));
uids_size = files;
uscs_size = files;
int init = atoi(argv[1]);
int fail = atoi(argv[2]);
char *path = argv[3];
char **args = argv + 4;
for (unsigned i = 0; i < files; i++) {
uids[i] = args[i * 2];
uscs[i] = strtoull(args[i * 2 + 1], NULL, 0);
}
PMEMobjpool *pop;
if (init) {
if ((pop = pmemobj_create(path, "LAYOUT", 0, 0600)) == NULL) {
UT_FATAL("!%s: pmemobj_create", path);
}
#if !defined(_WIN32) && !NDCTL_ENABLED
pmemobj_close(pop);
pmempool_feature_enable(path, PMEMPOOL_FEAT_SHUTDOWN_STATE, 0);
if ((pop = pmemobj_open(path, "LAYOUT")) == NULL) {
UT_FATAL("!%s: pmemobj_open", path);
}
#endif
} else {
if ((pop = pmemobj_open(path, "LAYOUT")) == NULL) {
UT_FATAL("!%s: pmemobj_open", path);
}
}
if (!fail)
pmemobj_close(pop);
FREE(uids);
FREE(uscs);
if (fail)
exit(1);
DONE(NULL);
}
FUNC_MOCK(pmem2_source_device_id, int, const struct pmem2_source *src,
char *uid, size_t *len)
FUNC_MOCK_RUN_DEFAULT {
if (uid_it < uids_size) {
if (uid != NULL) {
strcpy(uid, uids[uid_it]);
uid_it++;
} else {
*len = strlen(uids[uid_it]) + 1;
}
} else {
return -1;
}
return 0;
}
FUNC_MOCK_END
FUNC_MOCK(pmem2_source_device_usc, int, const struct pmem2_source *src,
uint64_t *usc)
FUNC_MOCK_RUN_DEFAULT {
if (usc_it < uscs_size) {
*usc = uscs[usc_it];
usc_it++;
} else {
return -1;
}
return 0;
}
FUNC_MOCK_END
#ifdef _MSC_VER
/*
* Since libpmemobj is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
#endif
| 2,218 | 18.637168 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_sds/mocks_windows.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of dimm functions
*/
#ifndef WRAP_REAL
#define pmem2_source_device_usc __wrap_pmem2_source_device_usc
#define pmem2_source_device_idU __wrap_pmem2_source_device_id
#endif
| 299 | 24 | 62 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmreorder_flushes/pmreorder_flushes.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* pmreorder_flushes.c -- test for store reordering with flushes
* in different barriers
*
* usage: pmreorder_flushes g|c file
*
* g - write data in a specific manner - some flushes
* of the stores are made in different barriers,
* c - check data consistency - stores should be applied only
* after flush - no matter in which barrier the flush will happen
*
*/
#include "unittest.h"
#include "util.h"
#include "valgrind_internal.h"
#define STORE_SIZE 64
static FILE *fp;
struct stores_fields {
char A[STORE_SIZE];
char B[STORE_SIZE];
char C[STORE_SIZE];
char D[STORE_SIZE];
char E[STORE_SIZE];
};
/*
* write_consistent -- (internal) write data in a specific order
*/
static void
write_consistent(struct stores_fields *sf)
{
/*
* STORE (A)
* STORE (B)
* STORE (C)
*
* FLUSH (A, B) (no flush C)
* FENCE
*/
pmem_memset(&sf->A, -1, sizeof(sf->A), PMEM_F_MEM_NODRAIN);
pmem_memset(&sf->B, 2, sizeof(sf->B), PMEM_F_MEM_NODRAIN);
pmem_memset(&sf->C, 3, sizeof(sf->C), PMEM_F_MEM_NOFLUSH);
pmem_drain();
/*
* STORE (A)
* STORE (D)
*
* FLUSH (D) (no flush A, still no flush C)
* FENCE
*/
pmem_memset(sf->A, 1, sizeof(sf->A), PMEM_F_MEM_NOFLUSH);
pmem_memset(sf->D, 4, sizeof(sf->D), PMEM_F_MEM_NODRAIN);
pmem_drain();
/*
* There are two transitive stores now: A (which does not change
* it's value) and C (which is modified).
*
* STORE (D)
* STORE (C)
*
* FLUSH (D) (still no flush A and C)
* FENCE
*/
pmem_memset(sf->D, 5, sizeof(sf->D), PMEM_F_MEM_NODRAIN);
pmem_memset(sf->C, 8, sizeof(sf->C), PMEM_F_MEM_NOFLUSH);
pmem_drain();
/*
* E is modified just to add additional step to the log.
* Values of A and C should still be -1, 2.
*
* STORE (E)
* FLUSH (E)
* FENCE
*/
pmem_memset(sf->E, 6, sizeof(sf->E), PMEM_F_MEM_NODRAIN);
pmem_drain();
/*
* FLUSH (A, C)
* FENCE
*/
pmem_flush(sf->A, sizeof(sf->A));
pmem_flush(sf->C, sizeof(sf->C));
pmem_drain();
}
/*
* check_consistency -- (internal) check if stores are made in proper manner
*/
static int
check_consistency(struct stores_fields *sf)
{
fprintf(fp, "A=%d B=%d C=%d D=%d E=%d\n",
sf->A[0], sf->B[0], sf->C[0], sf->D[0], sf->E[0]);
return 0;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "pmreorder_flushes");
util_init();
if ((argc < 4) || (strchr("gc", argv[1][0]) == NULL) ||
argv[1][1] != '\0')
UT_FATAL("usage: %s g|c file log_file", argv[0]);
int fd = OPEN(argv[2], O_RDWR);
size_t size;
/* mmap and register in valgrind pmemcheck */
void *map = pmem_map_file(argv[2], 0, 0, 0, &size, NULL);
UT_ASSERTne(map, NULL);
struct stores_fields *sf = map;
char opt = argv[1][0];
/* clear the struct to get a consistent start state for writing */
if (strchr("g", opt))
pmem_memset_persist(sf, 0, sizeof(*sf));
switch (opt) {
case 'g':
write_consistent(sf);
break;
case 'c':
fp = os_fopen(argv[3], "a");
if (fp == NULL)
UT_FATAL("!fopen");
int ret;
ret = check_consistency(sf);
fclose(fp);
return ret;
default:
UT_FATAL("Unrecognized option %c", opt);
}
CLOSE(fd);
DONE(NULL);
}
| 3,207 | 20.105263 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/libpmempool_api_win/libpmempool_test_win.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* libpmempool_test_win -- test of libpmempool.
*
*/
#include <stddef.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include "unittest.h"
/*
* Exact copy of the struct pmempool_check_args from libpmempool 1.0 provided to
* test libpmempool against various pmempool_check_args structure versions.
*/
struct pmempool_check_args_1_0 {
const wchar_t *path;
const wchar_t *backup_path;
enum pmempool_pool_type pool_type;
int flags;
};
/*
* check_pool -- check given pool
*/
static void
check_pool(struct pmempool_check_argsW *args, size_t args_size)
{
const char *status2str[] = {
[PMEMPOOL_CHECK_RESULT_CONSISTENT] = "consistent",
[PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT] = "not consistent",
[PMEMPOOL_CHECK_RESULT_REPAIRED] = "repaired",
[PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR] = "cannot repair",
[PMEMPOOL_CHECK_RESULT_ERROR] = "fatal",
};
PMEMpoolcheck *ppc = pmempool_check_initW(args, args_size);
if (!ppc) {
char buff[UT_MAX_ERR_MSG];
ut_strerror(errno, buff, UT_MAX_ERR_MSG);
UT_OUT("Error: %s", buff);
return;
}
struct pmempool_check_statusW *status = NULL;
while ((status = pmempool_checkW(ppc)) != NULL) {
char *msg = ut_toUTF8(status->str.msg);
switch (status->type) {
case PMEMPOOL_CHECK_MSG_TYPE_ERROR:
UT_OUT("%s", msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_INFO:
UT_OUT("%s", msg);
break;
case PMEMPOOL_CHECK_MSG_TYPE_QUESTION:
UT_OUT("%s", msg);
status->str.answer = L"yes";
break;
default:
pmempool_check_end(ppc);
free(msg);
exit(EXIT_FAILURE);
}
free(msg);
}
enum pmempool_check_result ret = pmempool_check_end(ppc);
UT_OUT("status = %s", status2str[ret]);
}
/*
* print_usage -- print usage of program
*/
static void
print_usage(wchar_t *name)
{
UT_OUT("Usage: %S [-t <pool_type>] [-r <repair>] [-d <dry_run>] "
"[-y <always_yes>] [-f <flags>] [-a <advanced>] "
"[-b <backup_path>] <pool_path>", name);
}
/*
* set_flag -- parse the value and set the flag according to a obtained value
*/
static void
set_flag(const wchar_t *value, int *flags, int flag)
{
if (_wtoi(value) > 0)
*flags |= flag;
else
*flags &= ~flag;
}
int
wmain(int argc, wchar_t *argv[])
{
STARTW(argc, argv, "libpmempool_test_win");
struct pmempool_check_args_1_0 args = {
.path = NULL,
.backup_path = NULL,
.pool_type = PMEMPOOL_POOL_TYPE_LOG,
.flags = PMEMPOOL_CHECK_FORMAT_STR |
PMEMPOOL_CHECK_REPAIR | PMEMPOOL_CHECK_VERBOSE
};
size_t args_size = sizeof(struct pmempool_check_args_1_0);
for (int i = 1; i < argc - 1; i += 2) {
wchar_t *optarg = argv[i + 1];
if (wcscmp(L"-t", argv[i]) == 0) {
if (wcscmp(optarg, L"blk") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_BLK;
} else if (wcscmp(optarg, L"log") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_LOG;
} else if (wcscmp(optarg, L"obj") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_OBJ;
} else if (wcscmp(optarg, L"btt") == 0) {
args.pool_type = PMEMPOOL_POOL_TYPE_BTT;
} else {
args.pool_type =
(uint32_t)wcstoul(optarg, NULL, 0);
}
} else if (wcscmp(L"-r", argv[i]) == 0) {
set_flag(optarg, &args.flags, PMEMPOOL_CHECK_REPAIR);
} else if (wcscmp(L"-d", argv[i]) == 0) {
set_flag(optarg, &args.flags, PMEMPOOL_CHECK_DRY_RUN);
} else if (wcscmp(L"-a", argv[i]) == 0) {
set_flag(optarg, &args.flags, PMEMPOOL_CHECK_ADVANCED);
} else if (wcscmp(L"-y", argv[i]) == 0) {
set_flag(optarg, &args.flags,
PMEMPOOL_CHECK_ALWAYS_YES);
} else if (wcscmp(L"-s", argv[i]) == 0) {
args_size = wcstoul(optarg, NULL, 0);
} else if (wcscmp(L"-b", argv[i]) == 0) {
args.backup_path = optarg;
} else {
print_usage(argv[0]);
UT_FATAL("unknown option: %c", argv[i][1]);
}
}
args.path = argv[argc - 1];
check_pool((struct pmempool_check_argsW *)&args, args_size);
DONEW(NULL);
}
| 3,912 | 24.743421 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_is_pmem_windows/pmem_is_pmem_windows.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* Copyright (c) 2015-2017, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* pmem_is_pmem_windows.c -- Windows specific unit test for is_pmem_detect()
*
* usage: pmem_is_pmem_windows file [env]
*/
#include "unittest.h"
#include "pmem.h"
#include "queue.h"
#include "win_mmap.h"
#include "util.h"
#define NTHREAD 16
static void *Addr;
static size_t Size;
static int pmem_is_pmem_force = 0;
enum test_mmap_scenarios {
TEST_MMAP_SCENARIO_UNKNOWN,
TEST_MMAP_SCENARIO_BEGIN_HOLE,
TEST_MMAP_SCENARIO_END_HOLE,
TEST_MMAP_SCENARIO_MIDDLE_HOLE,
TEST_MMAP_SCENARIO_NO_HOLE
};
enum test_mmap_scenarios
get_mmap_scenarios(char *name)
{
if (stricmp(name, "nothing") == 0)
return TEST_MMAP_SCENARIO_NO_HOLE;
if (stricmp(name, "begin") == 0)
return TEST_MMAP_SCENARIO_BEGIN_HOLE;
if (stricmp(name, "end") == 0)
return TEST_MMAP_SCENARIO_END_HOLE;
if (stricmp(name, "middle") == 0)
return TEST_MMAP_SCENARIO_MIDDLE_HOLE;
return TEST_MMAP_SCENARIO_UNKNOWN;
}
/*
* mmap_file_mapping_comparer -- (internal) compares the two file mapping
* trackers
*/
static LONG_PTR
mmap_file_mapping_comparer(PFILE_MAPPING_TRACKER a, PFILE_MAPPING_TRACKER b)
{
return ((LONG_PTR)a->BaseAddress - (LONG_PTR)b->BaseAddress);
}
/*
* worker -- the work each thread performs
*/
static void *
worker(void *arg)
{
int *ret = (int *)arg;
/*
* We honor the force just to let the scenarios that require pmem fs
* work in the environment that forces pmem.
*
* NOTE: We can't use pmem_is_pmem instead of checking for the ENV
* variable explicitly, because we want to call is_pmem_detect that is
* defined in this test so that it will use the FileMappingQHead
* that's defined here. Because we are crafting the Q in the test.
*/
if (pmem_is_pmem_force)
*ret = 1;
else
*ret = is_pmem_detect(Addr, Size);
return NULL;
}
extern SRWLOCK FileMappingQLock;
extern struct FMLHead FileMappingQHead;
int
main(int argc, char *argv[])
{
HANDLE file_map;
SIZE_T chunk_length;
enum test_mmap_scenarios scenario;
int still_holey = 1;
int already_holey = 0;
START(argc, argv, "pmem_is_pmem_windows");
if (argc != 3)
UT_FATAL("usage: %s file {begin|end|middle|nothing}", argv[0]);
util_init(); /* to initialize Mmap_align */
char *str_pmem_is_pmem_force = os_getenv("PMEM_IS_PMEM_FORCE");
if (str_pmem_is_pmem_force && atoi(str_pmem_is_pmem_force) == 1)
pmem_is_pmem_force = 1;
scenario = get_mmap_scenarios(argv[2]);
UT_ASSERT(scenario != TEST_MMAP_SCENARIO_UNKNOWN);
int fd = OPEN(argv[1], O_RDWR);
os_stat_t stbuf;
FSTAT(fd, &stbuf);
Size = stbuf.st_size;
chunk_length = Mmap_align;
/*
* We don't support too small a file size.
*/
UT_ASSERT(Size / 8 > chunk_length);
file_map = CreateFileMapping((HANDLE)_get_osfhandle(fd), NULL,
PAGE_READONLY, 0, 0, NULL);
UT_ASSERT(file_map != NULL);
Addr = MapViewOfFile(file_map, FILE_MAP_READ, 0, 0, 0);
/*
* let's setup FileMappingQHead such that, it appears to have lot of
* DAX mapping created through our mmap. Here are our cases based
* on the input:
* - entire region in mapped through our mmap
* - there is a region at the beginning that's not mapped through our
* mmap
* - there is a region at the end that's not mapped through our mmap
* - there is a region in the middle that mapped through our mmap
*/
for (size_t offset = 0;
offset < Size;
offset += chunk_length) {
void *base_address = (void *)((char *)Addr + offset);
switch (scenario) {
case TEST_MMAP_SCENARIO_BEGIN_HOLE:
if (still_holey &&
((offset == 0) || ((rand() % 2) == 0)) &&
(offset < (Size / 2)))
continue;
else
still_holey = 0;
break;
case TEST_MMAP_SCENARIO_END_HOLE:
if ((offset > (Size / 2)) &&
(already_holey || ((rand() % 2) == 0) ||
(offset >= (Size - chunk_length)))) {
already_holey = 1;
continue;
} else
UT_ASSERT(!already_holey);
break;
case TEST_MMAP_SCENARIO_MIDDLE_HOLE:
if ((((offset > (Size / 8)) && ((rand() % 2) == 0)) ||
(offset > (Size / 8) * 6)) &&
(offset < (Size / 8) * 7))
continue;
break;
}
PFILE_MAPPING_TRACKER mt =
MALLOC(sizeof(struct FILE_MAPPING_TRACKER));
mt->Flags = FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED;
mt->FileHandle = (HANDLE)_get_osfhandle(fd);
mt->FileMappingHandle = file_map;
mt->BaseAddress = base_address;
mt->EndAddress = (void *)((char *)base_address + chunk_length);
mt->Access = FILE_MAP_READ;
mt->Offset = offset;
AcquireSRWLockExclusive(&FileMappingQLock);
PMDK_SORTEDQ_INSERT(&FileMappingQHead, mt, ListEntry,
FILE_MAPPING_TRACKER,
mmap_file_mapping_comparer);
ReleaseSRWLockExclusive(&FileMappingQLock);
}
CloseHandle(file_map);
CLOSE(fd);
os_thread_t threads[NTHREAD];
int ret[NTHREAD];
/* kick off NTHREAD threads */
for (int i = 0; i < NTHREAD; i++)
THREAD_CREATE(&threads[i], NULL, worker, &ret[i]);
/* wait for all the threads to complete */
for (int i = 0; i < NTHREAD; i++)
THREAD_JOIN(&threads[i], NULL);
/* verify that all the threads return the same value */
for (int i = 1; i < NTHREAD; i++)
UT_ASSERTeq(ret[0], ret[i]);
UT_OUT("%d", ret[0]);
DONE(NULL);
}
/*
* Since libpmem is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmem_init)
MSVC_DESTR(libpmem_fini)
| 6,946 | 27.239837 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_fragmentation2/obj_fragmentation2.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* obj_fragmentation.c -- measures average heap external fragmentation
*
* This test is based on the workloads proposed in:
* Log-structured Memory for DRAM-based Storage
* by Stephen M. Rumble, Ankita Kejriwal, and John Ousterhout
*
* https://www.usenix.org/system/files/conference/fast14/fast14-paper_rumble.pdf
*/
#include <stdlib.h>
#include <math.h>
#include "rand.h"
#include "unittest.h"
#define LAYOUT_NAME "obj_fragmentation"
#define MEGABYTE (1ULL << 20)
#define GIGABYTE (1ULL << 30)
#define RRAND(max, min)\
((min) == (max) ? (min) : (rnd64() % ((max) - (min)) + (min)))
static PMEMoid *objects;
static size_t nobjects;
static size_t allocated_current;
#define MAX_OBJECTS (200ULL * 1000000)
#define ALLOC_TOTAL (5000ULL * MEGABYTE)
#define ALLOC_CURR (1000 * MEGABYTE)
#define FREES_P 200
#define DEFAULT_FILE_SIZE (3 * GIGABYTE)
static void
shuffle_objects(size_t start, size_t end)
{
PMEMoid tmp;
size_t dest;
for (size_t n = start; n < end; ++n) {
dest = RRAND(nobjects - 1, 0);
tmp = objects[n];
objects[n] = objects[dest];
objects[dest] = tmp;
}
}
static PMEMoid
remove_last()
{
UT_ASSERT(nobjects > 0);
PMEMoid obj = objects[--nobjects];
return obj;
}
static void
delete_objects(PMEMobjpool *pop, float pct)
{
size_t nfree = (size_t)(nobjects * pct);
PMEMoid oid = pmemobj_root(pop, 1);
shuffle_objects(0, nobjects);
while (nfree--) {
oid = remove_last();
allocated_current -= pmemobj_alloc_usable_size(oid);
pmemobj_free(&oid);
}
}
/*
* object_next_size -- generates random sizes in range with
* exponential distribution
*/
static size_t
object_next_size(size_t max, size_t min)
{
float fmax = (float)max;
float fmin = (float)min;
float n = (float)rnd64() / ((float)UINT64_MAX / 1.0f);
return (size_t)(fmin + (fmax - fmin) * (float)exp(n * - 4.0));
}
/*
* allocate_exponential -- allocates objects from a large range of sizes.
*
* This is designed to stress the recycler subsystem that will have to
* constantly look for freed/empty runs and reuse them.
*
* For small pools (single digit gigabytes), this test will show large
* fragmentation because it can use a large number of runs - which is fine.
*/
static void
allocate_exponential(PMEMobjpool *pop, size_t size_min, size_t size_max)
{
size_t allocated_total = 0;
PMEMoid oid;
while (allocated_total < ALLOC_TOTAL) {
size_t s = object_next_size(size_max, size_min);
int ret = pmemobj_alloc(pop, &oid, s, 0, NULL, NULL);
if (ret != 0) {
/* delete a random percentage of allocated objects */
float delete_pct = (float)RRAND(90, 10) / 100.0f;
delete_objects(pop, delete_pct);
continue;
}
s = pmemobj_alloc_usable_size(oid);
objects[nobjects++] = oid;
UT_ASSERT(nobjects < MAX_OBJECTS);
allocated_total += s;
allocated_current += s;
}
}
static void
allocate_objects(PMEMobjpool *pop, size_t size_min, size_t size_max)
{
size_t allocated_total = 0;
size_t sstart = 0;
PMEMoid oid;
while (allocated_total < ALLOC_TOTAL) {
size_t s = RRAND(size_max, size_min);
pmemobj_alloc(pop, &oid, s, 0, NULL, NULL);
UT_ASSERTeq(OID_IS_NULL(oid), 0);
s = pmemobj_alloc_usable_size(oid);
objects[nobjects++] = oid;
UT_ASSERT(nobjects < MAX_OBJECTS);
allocated_total += s;
allocated_current += s;
if (allocated_current > ALLOC_CURR) {
shuffle_objects(sstart, nobjects);
for (int i = 0; i < FREES_P; ++i) {
oid = remove_last();
allocated_current -=
pmemobj_alloc_usable_size(oid);
pmemobj_free(&oid);
}
sstart = nobjects;
}
}
}
typedef void workload(PMEMobjpool *pop);
static void w0(PMEMobjpool *pop) {
allocate_objects(pop, 100, 100);
}
static void w1(PMEMobjpool *pop) {
allocate_objects(pop, 100, 100);
allocate_objects(pop, 130, 130);
}
static void w2(PMEMobjpool *pop) {
allocate_objects(pop, 100, 100);
delete_objects(pop, 0.9F);
allocate_objects(pop, 130, 130);
}
static void w3(PMEMobjpool *pop) {
allocate_objects(pop, 100, 150);
allocate_objects(pop, 200, 250);
}
static void w4(PMEMobjpool *pop) {
allocate_objects(pop, 100, 150);
delete_objects(pop, 0.9F);
allocate_objects(pop, 200, 250);
}
static void w5(PMEMobjpool *pop) {
allocate_objects(pop, 100, 200);
delete_objects(pop, 0.5);
allocate_objects(pop, 1000, 2000);
}
static void w6(PMEMobjpool *pop) {
allocate_objects(pop, 1000, 2000);
delete_objects(pop, 0.9F);
allocate_objects(pop, 1500, 2500);
}
static void w7(PMEMobjpool *pop) {
allocate_objects(pop, 50, 150);
delete_objects(pop, 0.9F);
allocate_objects(pop, 5000, 15000);
}
static void w8(PMEMobjpool *pop) {
allocate_objects(pop, 2 * MEGABYTE, 2 * MEGABYTE);
}
static void w9(PMEMobjpool *pop) {
allocate_exponential(pop, 1, 5 * MEGABYTE);
}
static workload *workloads[] = {
w0, w1, w2, w3, w4, w5, w6, w7, w8, w9
};
static float workloads_target[] = {
0.01f, 0.01f, 0.01f, 0.9f, 0.8f, 0.7f, 0.3f, 0.8f, 0.73f, 3.0f
};
static float workloads_defrag_target[] = {
0.01f, 0.01f, 0.01f, 0.01f, 0.01f, 0.05f, 0.09f, 0.13f, 0.01f, 0.16f
};
/*
* Last two workloads operates mostly on huge chunks, so run
* stats are useless.
*/
static float workloads_stat_target[] = {
0.01f, 1.1f, 1.1f, 0.86f, 0.76f, 1.01f, 0.23f, 1.24f, 2100.f, 2100.f
};
static float workloads_defrag_stat_target[] = {
0.01f, 0.01f, 0.01f, 0.02f, 0.02f, 0.04f, 0.08f, 0.12f, 2100.f, 2100.f
};
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_fragmentation2");
if (argc < 3)
UT_FATAL("usage: %s filename workload [seed] [defrag]",
argv[0]);
const char *path = argv[1];
PMEMobjpool *pop = pmemobj_create(path, LAYOUT_NAME, DEFAULT_FILE_SIZE,
S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
int w = atoi(argv[2]);
if (argc > 3)
randomize((unsigned)atoi(argv[3]));
else
randomize(0);
int defrag = argc > 4 ? atoi(argv[4]) != 0 : 0;
objects = ZALLOC(sizeof(PMEMoid) * MAX_OBJECTS);
UT_ASSERTne(objects, NULL);
workloads[w](pop);
/* this is to trigger global recycling */
pmemobj_defrag(pop, NULL, 0, NULL);
size_t active = 0;
size_t allocated = 0;
pmemobj_ctl_get(pop, "stats.heap.run_active", &active);
pmemobj_ctl_get(pop, "stats.heap.run_allocated", &allocated);
float stat_frag = 0;
if (active != 0 && allocated != 0) {
stat_frag = ((float)active / allocated) - 1.f;
UT_ASSERT(stat_frag <= workloads_stat_target[w]);
}
if (defrag) {
PMEMoid **objectsf = ZALLOC(sizeof(PMEMoid) * nobjects);
for (size_t i = 0; i < nobjects; ++i)
objectsf[i] = &objects[i];
pmemobj_defrag(pop, objectsf, nobjects, NULL);
FREE(objectsf);
active = 0;
allocated = 0;
/* this is to trigger global recycling */
pmemobj_defrag(pop, NULL, 0, NULL);
pmemobj_ctl_get(pop, "stats.heap.run_active", &active);
pmemobj_ctl_get(pop, "stats.heap.run_allocated", &allocated);
if (active != 0 && allocated != 0) {
stat_frag = ((float)active / allocated) - 1.f;
UT_ASSERT(stat_frag <= workloads_defrag_stat_target[w]);
}
}
PMEMoid oid;
size_t remaining = 0;
size_t chunk = (100); /* calc at chunk level */
while (pmemobj_alloc(pop, &oid, chunk, 0, NULL, NULL) == 0)
remaining += pmemobj_alloc_usable_size(oid) + 16;
size_t allocated_sum = 0;
oid = pmemobj_root(pop, 1);
for (size_t n = 0; n < nobjects; ++n) {
if (OID_IS_NULL(objects[n]))
continue;
oid = objects[n];
allocated_sum += pmemobj_alloc_usable_size(oid) + 16;
}
size_t used = DEFAULT_FILE_SIZE - remaining;
float frag = ((float)used / allocated_sum) - 1.f;
UT_OUT("FRAG: %f\n", frag);
UT_ASSERT(frag <= (defrag ?
workloads_defrag_target[w] : workloads_target[w]));
pmemobj_close(pop);
FREE(objects);
DONE(NULL);
}
| 7,747 | 22.337349 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_fragmentation2/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
#
from os import path
import testframework as t
from testframework import granularity as g
@g.require_granularity(g.CACHELINE)
@t.require_build('release')
class Base(t.Test):
test_type = t.Long
seed = '12345'
defrag = '1'
def run(self, ctx):
testfile = path.join(ctx.testdir, 'testfile')
# this test is extremely long otherwise
ctx.env['PMEM_NO_FLUSH'] = '1'
ctx.exec('obj_fragmentation2',
testfile, ctx.workload(), self.seed, self.defrag)
# These tests last too long under drd/helgrind/memcheck/pmemcheck
# Exceptions: workloads no. 6 and 8 under memcheck/pmemcheck (run with TEST1)
@t.require_valgrind_disabled('drd', 'helgrind', 'memcheck', 'pmemcheck')
@t.add_params('workload', [0, 1, 2, 3, 4, 5, 7, 9])
class TEST0(Base):
pass
@t.require_valgrind_disabled('drd', 'helgrind')
@t.add_params('workload', [6, 8])
class TEST1(Base):
pass
| 1,014 | 25.025641 | 77 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/getopt/getopt.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* getopt.c -- test for windows getopt() implementation
*/
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include "unittest.h"
/*
* long_options -- command line arguments
*/
static const struct option long_options[] = {
{ "arg_a", no_argument, NULL, 'a' },
{ "arg_b", no_argument, NULL, 'b' },
{ "arg_c", no_argument, NULL, 'c' },
{ "arg_d", no_argument, NULL, 'd' },
{ "arg_e", no_argument, NULL, 'e' },
{ "arg_f", no_argument, NULL, 'f' },
{ "arg_g", no_argument, NULL, 'g' },
{ "arg_h", no_argument, NULL, 'h' },
{ "arg_A", required_argument, NULL, 'A' },
{ "arg_B", required_argument, NULL, 'B' },
{ "arg_C", required_argument, NULL, 'C' },
{ "arg_D", required_argument, NULL, 'D' },
{ "arg_E", required_argument, NULL, 'E' },
{ "arg_F", required_argument, NULL, 'F' },
{ "arg_G", required_argument, NULL, 'G' },
{ "arg_H", required_argument, NULL, 'H' },
{ "arg_1", optional_argument, NULL, '1' },
{ "arg_2", optional_argument, NULL, '2' },
{ "arg_3", optional_argument, NULL, '3' },
{ "arg_4", optional_argument, NULL, '4' },
{ "arg_5", optional_argument, NULL, '5' },
{ "arg_6", optional_argument, NULL, '6' },
{ "arg_7", optional_argument, NULL, '7' },
{ "arg_8", optional_argument, NULL, '8' },
{ NULL, 0, NULL, 0 },
};
int
main(int argc, char *argv[])
{
int opt;
int option_index;
START(argc, argv, "getopt");
while ((opt = getopt_long(argc, argv,
"abcdefghA:B:C:D:E:F:G::H1::2::3::4::5::6::7::8::",
long_options, &option_index)) != -1) {
switch (opt) {
case '?':
UT_OUT("unknown argument");
break;
case 'a':
case 'b':
case 'c':
case 'd':
case 'e':
case 'f':
case 'g':
case 'h':
UT_OUT("arg_%c", opt);
break;
case 'A':
case 'B':
case 'C':
case 'D':
case 'E':
case 'F':
case 'G':
case 'H':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
UT_OUT("arg_%c=%s", opt,
optarg == NULL ? "null": optarg);
break;
}
}
while (optind < argc) {
UT_OUT("%s", argv[optind++]);
}
DONE(NULL);
}
| 2,159 | 21.736842 | 55 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_config/rpmemd_config_test.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* rpmemd_log_test.c -- unit tests for rpmemd_log
*/
#include <stddef.h>
#include <inttypes.h>
#include <sys/param.h>
#include <syslog.h>
#include <pwd.h>
#include "unittest.h"
#include "rpmemd_log.h"
#include "rpmemd_config.h"
static const char *config_print_fmt =
"log_file\t\t%s\n"
"poolset_dir:\t\t%s\n"
"persist_apm:\t\t%s\n"
"persist_general:\t%s\n"
"use_syslog:\t\t%s\n"
"max_lanes:\t\t%" PRIu64 "\n"
"log_level:\t\t%s";
/*
* bool_to_str -- convert bool value to a string ("yes" / "no")
*/
static inline const char *
bool_to_str(bool v)
{
return v ? "yes" : "no";
}
/*
* config_print -- print rpmemd_config to the stdout
*/
static void
config_print(struct rpmemd_config *config)
{
UT_ASSERT(config->log_level < MAX_RPD_LOG);
UT_OUT(
config_print_fmt,
config->log_file,
config->poolset_dir,
bool_to_str(config->persist_apm),
bool_to_str(config->persist_general),
bool_to_str(config->use_syslog),
config->max_lanes,
rpmemd_log_level_to_str(config->log_level));
}
/*
* parse_test_params -- parse command line options specific to the test
*
* usage: rpmemd_config [rpmemd options] [test options]
*
* Available test options:
* - print_HOME_env prints current HOME_ENV value
*/
static void
parse_test_params(int *argc, char *argv[])
{
if (*argc <= 1)
return;
if (strcmp(argv[*argc - 1], "print_HOME_env") == 0) {
char *home = os_getenv(HOME_ENV);
if (home) {
UT_OUT("$%s == %s", HOME_ENV, home);
} else {
UT_OUT("$%s is not set", HOME_ENV);
}
} else {
return;
}
*argc -= 1;
}
int
main(int argc, char *argv[])
{
/* workaround for getpwuid open fd */
getpwuid(getuid());
START(argc, argv, "rpmemd_config");
int ret = rpmemd_log_init("rpmemd_log", NULL, 0);
UT_ASSERTeq(ret, 0);
parse_test_params(&argc, argv);
struct rpmemd_config config;
ret = rpmemd_config_read(&config, argc, argv);
if (ret) {
UT_OUT("invalid config");
} else {
config_print(&config);
}
rpmemd_log_close();
if (!ret)
rpmemd_config_free(&config);
DONE(NULL);
}
| 2,111 | 17.857143 | 71 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_sds/util_sds.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* util_sds.c -- unit test for shutdown state functions
*/
#include <stdlib.h>
#include "unittest.h"
#include "ut_pmem2.h"
#include "shutdown_state.h"
#include "set.h"
#define PMEM_LEN 4096
static char **uids;
static size_t uids_size;
static size_t uid_it;
static uint64_t *uscs;
static size_t uscs_size;
static size_t usc_it;
static pmem2_persist_fn persist;
#define FAIL(X, Y) \
if ((X) == (Y)) { \
goto out; \
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_sds");
if (argc < 2)
UT_FATAL("usage: %s init fail (file uuid usc)...", argv[0]);
unsigned files = (unsigned)(argc - 2) / 3;
char **pmemaddr = MALLOC(files * sizeof(char *));
int *fds = MALLOC(files * sizeof(fds[0]));
struct pmem2_map **maps = MALLOC(files * sizeof(maps[0]));
uids = MALLOC(files * sizeof(uids[0]));
uscs = MALLOC(files * sizeof(uscs[0]));
uids_size = files;
uscs_size = files;
int init = atoi(argv[1]);
int fail_on = atoi(argv[2]);
char **args = argv + 3;
struct pmem2_config *cfg;
PMEM2_CONFIG_NEW(&cfg);
pmem2_config_set_required_store_granularity(cfg,
PMEM2_GRANULARITY_PAGE);
for (unsigned i = 0; i < files; i++) {
fds[i] = OPEN(args[i * 3], O_CREAT | O_RDWR, 0666);
POSIX_FALLOCATE(fds[i], 0, PMEM_LEN);
struct pmem2_source *src;
PMEM2_SOURCE_FROM_FD(&src, fds[i]);
if (pmem2_map(cfg, src, &maps[i])) {
UT_FATAL("pmem2_map: %s", pmem2_errormsg());
}
pmemaddr[0] = pmem2_map_get_address(maps[i]);
uids[i] = args[i * 3 + 1];
uscs[i] = strtoull(args[i * 3 + 2], NULL, 0);
PMEM2_SOURCE_DELETE(&src);
}
persist = pmem2_get_persist_fn(maps[0]);
FAIL(fail_on, 1);
struct pool_replica *rep = MALLOC(
sizeof(*rep) + sizeof(struct pool_set_part));
memset(rep, 0, sizeof(*rep) + sizeof(struct pool_set_part));
struct shutdown_state *pool_sds = (struct shutdown_state *)pmemaddr[0];
if (init) {
/* initialize pool shutdown state */
shutdown_state_init(pool_sds, rep);
FAIL(fail_on, 2);
for (unsigned i = 0; i < files; i++) {
if (shutdown_state_add_part(pool_sds, fds[i], rep))
UT_FATAL("shutdown_state_add_part");
FAIL(fail_on, 3);
}
} else {
/* verify a shutdown state saved in the pool */
struct shutdown_state current_sds;
shutdown_state_init(¤t_sds, NULL);
FAIL(fail_on, 2);
for (unsigned i = 0; i < files; i++) {
if (shutdown_state_add_part(¤t_sds,
fds[i], NULL))
UT_FATAL("shutdown_state_add_part");
FAIL(fail_on, 3);
}
if (shutdown_state_check(¤t_sds, pool_sds, rep)) {
UT_FATAL(
"An ADR failure is detected, the pool might be corrupted");
}
}
FAIL(fail_on, 4);
shutdown_state_set_dirty(pool_sds, rep);
/* pool is open */
FAIL(fail_on, 5);
/* close pool */
shutdown_state_clear_dirty(pool_sds, rep);
FAIL(fail_on, 6);
out: for (unsigned i = 0; i < files; i++) {
pmem2_unmap(&maps[i]);
CLOSE(fds[i]);
}
PMEM2_CONFIG_DELETE(&cfg);
FREE(pmemaddr);
FREE(uids);
FREE(uscs);
FREE(fds);
FREE(maps);
DONE(NULL);
}
FUNC_MOCK(pmem2_source_device_id, int, const struct pmem2_source *src,
char *uid, size_t *len)
FUNC_MOCK_RUN_DEFAULT {
if (uid_it < uids_size) {
if (uid != NULL) {
strcpy(uid, uids[uid_it]);
uid_it++;
} else {
*len = strlen(uids[uid_it]) + 1;
}
} else {
return -1;
}
return 0;
}
FUNC_MOCK_END
FUNC_MOCK(pmem2_source_device_usc, int, const struct pmem2_source *src,
uint64_t *usc)
FUNC_MOCK_RUN_DEFAULT {
if (usc_it < uscs_size) {
*usc = uscs[usc_it];
usc_it++;
} else {
return -1;
}
return 0;
}
FUNC_MOCK_END
int os_part_deep_common(struct pool_replica *rep, unsigned partidx, void *addr,
size_t len, int flush);
/*
* os_part_deep_common -- XXX temporally workaround until we will have pmem2
* integrated with common
*/
int
os_part_deep_common(struct pool_replica *rep, unsigned partidx, void *addr,
size_t len, int flush)
{
/*
* this is test - we don't need to deep persist anything -
* just call regular persist to make valgrind happy
*/
persist(addr, len);
return 0;
}
#ifdef _MSC_VER
MSVC_CONSTR(libpmem2_init)
MSVC_DESTR(libpmem2_fini)
#endif
| 4,175 | 21.572973 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_sds/mocks_windows.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of dimm functions
*/
#ifndef WRAP_REAL
#define pmem2_source_device_usc __wrap_pmem2_source_device_usc
#define pmem2_source_device_idU __wrap_pmem2_source_device_id
#endif
| 299 | 24 | 62 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_file_create/util_file_create.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2018, Intel Corporation */
/*
* util_file_create.c -- unit test for util_file_create()
*
* usage: util_file_create minlen len:path [len:path]...
*/
#include "unittest.h"
#include "file.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "util_file_create");
if (argc < 3)
UT_FATAL("usage: %s minlen len:path...", argv[0]);
char *fname;
size_t minsize = strtoul(argv[1], &fname, 0);
for (int arg = 2; arg < argc; arg++) {
size_t size = strtoul(argv[arg], &fname, 0);
if (*fname != ':')
UT_FATAL("usage: %s minlen len:path...", argv[0]);
fname++;
int fd;
if ((fd = util_file_create(fname, size, minsize)) == -1)
UT_OUT("!%s: util_file_create", fname);
else {
UT_OUT("%s: created", fname);
os_close(fd);
}
}
DONE(NULL);
}
| 829 | 19.243902 | 58 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_add_range/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2017, Intel Corporation
#
#
# obj_tx_add_range/config.sh -- test configuration
#
# Extend timeout for this test, as it may take a few minutes
# when run on a non-pmem file system.
CONF_GLOBAL_TIMEOUT='10m'
| 281 | 19.142857 | 60 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_add_range/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
"""unit tests for pmemobj_tx_add_range and pmemobj_tx_xadd_range"""
from os import path
import testframework as t
@t.require_valgrind_disabled('memcheck', 'pmemcheck')
class TEST0(t.Test):
test_type = t.Medium
def run(self, ctx):
testfile = path.join(ctx.testdir, 'testfile0')
ctx.exec('obj_tx_add_range', testfile, '0')
@t.require_valgrind_enabled('pmemcheck')
class TEST1(t.Test):
test_type = t.Medium
def run(self, ctx):
ctx.valgrind.add_opt('--mult-stores=no')
testfile = path.join(ctx.testdir, 'testfile1')
ctx.exec('obj_tx_add_range', testfile, '0')
@t.require_valgrind_disabled('memcheck')
class TEST2(t.Test):
test_type = t.Medium
def run(self, ctx):
testfile = path.join(ctx.testdir, 'testfile2')
ctx.exec('obj_tx_add_range', testfile, '1')
@t.require_valgrind_enabled('memcheck')
@t.require_build('debug')
class TEST3(t.Test):
test_type = t.Medium
def run(self, ctx):
testfile = path.join(ctx.testdir, 'testfile3')
ctx.exec('obj_tx_add_range', testfile, '0')
| 1,182 | 23.142857 | 67 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_add_range/obj_tx_add_range.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_tx_add_range.c -- unit test for pmemobj_tx_add_range
*/
#include <string.h>
#include <stddef.h>
#include "tx.h"
#include "unittest.h"
#include "util.h"
#include "valgrind_internal.h"
#define LAYOUT_NAME "tx_add_range"
#define OBJ_SIZE 1024
#define OVERLAP_SIZE 100
#define ROOT_TAB_SIZE\
(TX_DEFAULT_RANGE_CACHE_SIZE / sizeof(int))
#define REOPEN_COUNT 10
enum type_number {
TYPE_OBJ,
TYPE_OBJ_ABORT,
TYPE_OBJ_WRONG_UUID,
};
TOID_DECLARE(struct object, 0);
TOID_DECLARE(struct overlap_object, 1);
TOID_DECLARE_ROOT(struct root);
struct root {
int val;
int tab[ROOT_TAB_SIZE];
};
struct object {
size_t value;
char data[OBJ_SIZE - sizeof(size_t)];
};
struct overlap_object {
uint8_t data[OVERLAP_SIZE];
};
#define VALUE_OFF (offsetof(struct object, value))
#define VALUE_SIZE (sizeof(size_t))
#define DATA_OFF (offsetof(struct object, data))
#define DATA_SIZE (OBJ_SIZE - sizeof(size_t))
#define TEST_VALUE_1 1
#define TEST_VALUE_2 2
/*
* do_tx_zalloc -- do tx allocation with specified type number
*/
static PMEMoid
do_tx_zalloc(PMEMobjpool *pop, uint64_t type_num)
{
PMEMoid ret = OID_NULL;
TX_BEGIN(pop) {
ret = pmemobj_tx_zalloc(sizeof(struct object), type_num);
} TX_END
return ret;
}
/*
* do_tx_alloc -- do tx allocation and initialize first num bytes
*/
static PMEMoid
do_tx_alloc(PMEMobjpool *pop, uint64_t type_num, uint64_t init_num)
{
PMEMoid ret = OID_NULL;
TX_BEGIN(pop) {
ret = pmemobj_tx_alloc(sizeof(struct object), type_num);
pmemobj_memset(pop, pmemobj_direct(ret), 0, init_num, 0);
} TX_END
return ret;
}
/*
* do_tx_add_range_alloc_commit -- call pmemobj_add_range on object allocated
* within the same transaction and commit the transaction
*/
static void
do_tx_add_range_alloc_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
UT_ASSERT(!TOID_IS_NULL(obj));
ret = pmemobj_tx_add_range(obj.oid, VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
ret = pmemobj_tx_add_range(obj.oid, DATA_OFF, DATA_SIZE);
UT_ASSERTeq(ret, 0);
pmemobj_memset_persist(pop, D_RW(obj)->data, TEST_VALUE_2,
DATA_SIZE);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
size_t i;
for (i = 0; i < DATA_SIZE; i++)
UT_ASSERTeq(D_RO(obj)->data[i], TEST_VALUE_2);
}
/*
* do_tx_add_range_alloc_abort -- call pmemobj_add_range on object allocated
* within the same transaction and abort the transaction
*/
static void
do_tx_add_range_alloc_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ_ABORT));
UT_ASSERT(!TOID_IS_NULL(obj));
ret = pmemobj_tx_add_range(obj.oid, VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
ret = pmemobj_tx_add_range(obj.oid, DATA_OFF, DATA_SIZE);
UT_ASSERTeq(ret, 0);
pmemobj_memset_persist(pop, D_RW(obj)->data, TEST_VALUE_2,
DATA_SIZE);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_OBJ_ABORT));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_add_range_twice_commit -- call pmemobj_add_range one the same area
* twice and commit the transaction
*/
static void
do_tx_add_range_twice_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
UT_ASSERT(!TOID_IS_NULL(obj));
TX_BEGIN(pop) {
ret = pmemobj_tx_add_range(obj.oid, VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
ret = pmemobj_tx_add_range(obj.oid, VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_2;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_2);
}
/*
* do_tx_add_range_twice_abort -- call pmemobj_add_range one the same area
* twice and abort the transaction
*/
static void
do_tx_add_range_twice_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
UT_ASSERT(!TOID_IS_NULL(obj));
TX_BEGIN(pop) {
ret = pmemobj_tx_add_range(obj.oid, VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
ret = pmemobj_tx_add_range(obj.oid, VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_2;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, 0);
}
/*
* do_tx_add_range_abort_after_nested -- call pmemobj_tx_add_range and
* commit the tx
*/
static void
do_tx_add_range_abort_after_nested(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj1;
TOID(struct object) obj2;
TOID_ASSIGN(obj1, do_tx_zalloc(pop, TYPE_OBJ));
TOID_ASSIGN(obj2, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
ret = pmemobj_tx_add_range(obj1.oid, VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj1)->value = TEST_VALUE_1;
TX_BEGIN(pop) {
ret = pmemobj_tx_add_range(obj2.oid,
DATA_OFF, DATA_SIZE);
UT_ASSERTeq(ret, 0);
pmemobj_memset_persist(pop, D_RW(obj2)->data,
TEST_VALUE_2, DATA_SIZE);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj1)->value, 0);
size_t i;
for (i = 0; i < DATA_SIZE; i++)
UT_ASSERTeq(D_RO(obj2)->data[i], 0);
}
/*
* do_tx_add_range_abort_nested -- call pmemobj_tx_add_range and
* commit the tx
*/
static void
do_tx_add_range_abort_nested(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj1;
TOID(struct object) obj2;
TOID_ASSIGN(obj1, do_tx_zalloc(pop, TYPE_OBJ));
TOID_ASSIGN(obj2, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
ret = pmemobj_tx_add_range(obj1.oid, VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj1)->value = TEST_VALUE_1;
TX_BEGIN(pop) {
ret = pmemobj_tx_add_range(obj2.oid,
DATA_OFF, DATA_SIZE);
UT_ASSERTeq(ret, 0);
pmemobj_memset_persist(pop, D_RW(obj2)->data,
TEST_VALUE_2, DATA_SIZE);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj1)->value, 0);
size_t i;
for (i = 0; i < DATA_SIZE; i++)
UT_ASSERTeq(D_RO(obj2)->data[i], 0);
}
/*
* do_tx_add_range_abort_after_commit -- call pmemobj_tx_add_range with
* non-zero data, commit first tx, and abort second tx
*
* This is the test for issue injected in commit:
* 2ab13304664b353b82730f49b78fc67eea33b25b (ulog-invalidation).
*/
static void
do_tx_add_range_abort_after_commit(PMEMobjpool *pop)
{
int ret;
size_t i;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
/* 1. Set data to non-zero value. */
pmemobj_memset_persist(pop, D_RW(obj)->data,
TEST_VALUE_1, DATA_SIZE);
for (i = 0; i < DATA_SIZE; i++)
UT_ASSERTeq(D_RO(obj)->data[i], TEST_VALUE_1);
/* 2. Do the snapshot using non-zero value. */
TX_BEGIN(pop) {
ret = pmemobj_tx_add_range(obj.oid,
DATA_OFF, DATA_SIZE);
UT_ASSERTeq(ret, 0);
/*
* You can modify data here, but it is not necessary
* to reproduce abort/apply ulog issue.
*/
pmemobj_memset_persist(pop, D_RW(obj)->data,
TEST_VALUE_2, DATA_SIZE);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
for (i = 0; i < DATA_SIZE; i++)
UT_ASSERTeq(D_RO(obj)->data[i], TEST_VALUE_2);
/*
* 3. Do the second snapshot and then abort the transaction.
*/
for (i = 0; i < DATA_SIZE; i++)
UT_ASSERTeq(D_RO(obj)->data[i], TEST_VALUE_2);
TX_BEGIN(pop) {
ret = pmemobj_tx_add_range(obj.oid, VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
/* 4. All data must be recovered after tx abort. */
UT_ASSERTeq(D_RO(obj)->value, 0);
}
/*
* do_tx_add_range_commit_nested -- call pmemobj_tx_add_range and commit the tx
*/
static void
do_tx_add_range_commit_nested(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj1;
TOID(struct object) obj2;
TOID_ASSIGN(obj1, do_tx_zalloc(pop, TYPE_OBJ));
TOID_ASSIGN(obj2, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
ret = pmemobj_tx_add_range(obj1.oid, VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj1)->value = TEST_VALUE_1;
TX_BEGIN(pop) {
ret = pmemobj_tx_add_range(obj2.oid,
DATA_OFF, DATA_SIZE);
UT_ASSERTeq(ret, 0);
pmemobj_memset_persist(pop, D_RW(obj2)->data,
TEST_VALUE_2, DATA_SIZE);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj1)->value, TEST_VALUE_1);
size_t i;
for (i = 0; i < DATA_SIZE; i++)
UT_ASSERTeq(D_RO(obj2)->data[i], TEST_VALUE_2);
}
/*
* do_tx_add_range_abort -- call pmemobj_tx_add_range and abort the tx
*/
static void
do_tx_add_range_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
ret = pmemobj_tx_add_range(obj.oid, VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, 0);
}
/*
* do_tx_add_huge_range_abort -- call pmemobj_tx_add_range on a huge range and
* commit the tx
*/
static void
do_tx_add_huge_range_abort(PMEMobjpool *pop)
{
int ret;
size_t snapshot_s = TX_DEFAULT_RANGE_CACHE_THRESHOLD + 1;
PMEMoid obj;
pmemobj_zalloc(pop, &obj, snapshot_s, 0);
TX_BEGIN(pop) {
ret = pmemobj_tx_add_range(obj, 0, snapshot_s);
UT_ASSERTeq(ret, 0);
memset(pmemobj_direct(obj), 0xc, snapshot_s);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERT(util_is_zeroed(pmemobj_direct(obj), snapshot_s));
}
/*
* do_tx_add_range_commit -- call pmemobj_tx_add_range and commit the tx
*/
static void
do_tx_add_range_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
ret = pmemobj_tx_add_range(obj.oid, VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
}
/*
* do_tx_xadd_range_no_flush_commit -- call pmemobj_tx_xadd_range with
* POBJ_XADD_NO_FLUSH set and commit the tx
*/
static void
do_tx_xadd_range_no_flush_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
ret = pmemobj_tx_xadd_range(obj.oid, VALUE_OFF, VALUE_SIZE,
POBJ_XADD_NO_FLUSH);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
/* let pmemcheck find we didn't flush it */
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
}
/*
* do_tx_xadd_range_no_snapshot_commit -- call pmemobj_tx_xadd_range with
* POBJ_XADD_NO_SNAPSHOT flag set and commit the tx
*/
static void
do_tx_xadd_range_no_snapshot_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
ret = pmemobj_tx_xadd_range(obj.oid, VALUE_OFF, VALUE_SIZE,
POBJ_XADD_NO_SNAPSHOT);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
}
/*
* do_tx_xadd_range_twice_no_snapshot_abort -- call pmemobj_tx_add_range twice
* - with POBJ_XADD_NO_SNAPSHOT flag set and without it - and abort the tx
*/
static void
do_tx_xadd_range_twice_no_snapshot_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
ret = pmemobj_tx_xadd_range(obj.oid, VALUE_OFF, VALUE_SIZE,
POBJ_XADD_NO_SNAPSHOT);
UT_ASSERTeq(ret, 0);
/* Previously set flag on this range should NOT be overridden */
ret = pmemobj_tx_add_range(obj.oid, VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
}
/*
* do_tx_xadd_range_no_snapshot_abort -- call pmemobj_tx_range with
* POBJ_XADD_NO_SNAPSHOT flag, modify the value inside aborted transaction
*/
static void
do_tx_xadd_range_no_snapshot_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
D_RW(obj)->value = TEST_VALUE_1;
TX_BEGIN(pop) {
ret = pmemobj_tx_xadd_range(obj.oid, VALUE_OFF, VALUE_SIZE,
POBJ_XADD_NO_SNAPSHOT);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_2;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
/*
* value added with NO_SNAPSHOT flag should NOT
* be rolled back after abort
*/
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_2);
}
/*
* do_tx_xadd_range_no_snapshot_fields -- call pmemobj_tx_add_range
* on selected fields and NO_SNAPSHOT flag set
*/
static void
do_tx_xadd_range_no_snapshot_fields(PMEMobjpool *pop)
{
TOID(struct overlap_object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, 1));
char after_abort[OVERLAP_SIZE];
memcpy(after_abort, D_RO(obj)->data, OVERLAP_SIZE);
TX_BEGIN(pop) {
/*
* changes of ranges with NO_SNAPSHOT flag set
* should not be reverted after abort
*/
TX_XADD_FIELD(obj, data[1], POBJ_XADD_NO_SNAPSHOT);
D_RW(obj)->data[1] = 1;
after_abort[1] = 1;
TX_ADD_FIELD(obj, data[2]);
D_RW(obj)->data[2] = 2;
TX_XADD_FIELD(obj, data[5], POBJ_XADD_NO_SNAPSHOT);
D_RW(obj)->data[5] = 5;
after_abort[5] = 5;
TX_ADD_FIELD(obj, data[7]);
D_RW(obj)->data[7] = 7;
TX_XADD_FIELD(obj, data[8], POBJ_XADD_NO_SNAPSHOT);
D_RW(obj)->data[8] = 8;
after_abort[8] = 8;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(memcmp(D_RW(obj)->data, after_abort, OVERLAP_SIZE), 0);
}
/*
* do_tx_xadd_range_no_uninit_check -- call pmemobj_tx_xadd_range for
* initialized memory with POBJ_XADD_ASSUME_INITIALIZED flag set and commit the
* tx
*/
static void
do_tx_xadd_range_no_uninit_check_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
ret = pmemobj_tx_xadd_range(obj.oid, VALUE_OFF, VALUE_SIZE,
POBJ_XADD_ASSUME_INITIALIZED);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
}
/*
* do_tx_xadd_range_no_uninit_check -- call pmemobj_tx_xadd_range for
* uninitialized memory with POBJ_XADD_ASSUME_INITIALIZED flag set and commit
* the tx
*/
static void
do_tx_xadd_range_no_uninit_check_commit_uninit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, 0));
TX_BEGIN(pop) {
ret = pmemobj_tx_xadd_range(obj.oid, VALUE_OFF, VALUE_SIZE,
POBJ_XADD_ASSUME_INITIALIZED);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_xadd_range(obj.oid, DATA_OFF, DATA_SIZE,
POBJ_XADD_ASSUME_INITIALIZED);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
D_RW(obj)->data[256] = TEST_VALUE_2;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERTeq(D_RO(obj)->data[256], TEST_VALUE_2);
}
/*
* do_tx_xadd_range_no_uninit_check -- call pmemobj_tx_xadd_range for
* partially uninitialized memory with POBJ_XADD_ASSUME_INITIALIZED flag set
* only for uninitialized part and commit the tx
*/
static void
do_tx_xadd_range_no_uninit_check_commit_part_uninit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, VALUE_SIZE));
TX_BEGIN(pop) {
ret = pmemobj_tx_add_range(obj.oid, VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_xadd_range(obj.oid, DATA_OFF, DATA_SIZE,
POBJ_XADD_ASSUME_INITIALIZED);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
D_RW(obj)->data[256] = TEST_VALUE_2;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERTeq(D_RO(obj)->data[256], TEST_VALUE_2);
}
/*
* do_tx_add_range_no_uninit_check -- call pmemobj_tx_add_range for
* partially uninitialized memory.
*/
static void
do_tx_add_range_no_uninit_check_commit_no_flag(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, VALUE_SIZE));
TX_BEGIN(pop) {
ret = pmemobj_tx_add_range(obj.oid, VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_add_range(obj.oid, DATA_OFF, DATA_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
D_RW(obj)->data[256] = TEST_VALUE_2;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERTeq(D_RO(obj)->data[256], TEST_VALUE_2);
}
/*
* do_tx_xadd_range_no_uninit_check_abort -- call pmemobj_tx_range with
* POBJ_XADD_ASSUME_INITIALIZED flag, modify the value inside aborted
* transaction
*/
static void
do_tx_xadd_range_no_uninit_check_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, 0));
TX_BEGIN(pop) {
ret = pmemobj_tx_xadd_range(obj.oid, VALUE_OFF, VALUE_SIZE,
POBJ_XADD_ASSUME_INITIALIZED);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_xadd_range(obj.oid, DATA_OFF, DATA_SIZE,
POBJ_XADD_ASSUME_INITIALIZED);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
D_RW(obj)->data[256] = TEST_VALUE_2;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_add_range_overlapping -- call pmemobj_tx_add_range with overlapping
*/
static void
do_tx_add_range_overlapping(PMEMobjpool *pop)
{
TOID(struct overlap_object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, 1));
/*
* -+-+-+-+-
* +++++++++
*/
TX_BEGIN(pop) {
TX_ADD_FIELD(obj, data[1]);
D_RW(obj)->data[1] = 1;
TX_ADD_FIELD(obj, data[3]);
D_RW(obj)->data[3] = 3;
TX_ADD_FIELD(obj, data[5]);
D_RW(obj)->data[5] = 5;
TX_ADD_FIELD(obj, data[7]);
D_RW(obj)->data[7] = 7;
TX_ADD(obj);
memset(D_RW(obj)->data, 0xFF, OVERLAP_SIZE);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERT(util_is_zeroed(D_RO(obj)->data, OVERLAP_SIZE));
/*
* ++++----++++
* --++++++++--
*/
TX_BEGIN(pop) {
pmemobj_tx_add_range(obj.oid, 0, 4);
memset(D_RW(obj)->data + 0, 1, 4);
pmemobj_tx_add_range(obj.oid, 8, 4);
memset(D_RW(obj)->data + 8, 2, 4);
pmemobj_tx_add_range(obj.oid, 2, 8);
memset(D_RW(obj)->data + 2, 3, 8);
TX_ADD(obj);
memset(D_RW(obj)->data, 0xFF, OVERLAP_SIZE);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERT(util_is_zeroed(D_RO(obj)->data, OVERLAP_SIZE));
/*
* ++++----++++
* ----++++----
*/
TX_BEGIN(pop) {
pmemobj_tx_add_range(obj.oid, 0, 4);
memset(D_RW(obj)->data + 0, 1, 4);
pmemobj_tx_add_range(obj.oid, 8, 4);
memset(D_RW(obj)->data + 8, 2, 4);
pmemobj_tx_add_range(obj.oid, 4, 4);
memset(D_RW(obj)->data + 4, 3, 4);
TX_ADD(obj);
memset(D_RW(obj)->data, 0xFF, OVERLAP_SIZE);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERT(util_is_zeroed(D_RO(obj)->data, OVERLAP_SIZE));
/*
* ++++-++-++++
* --++++++++--
*/
TX_BEGIN(pop) {
pmemobj_tx_add_range(obj.oid, 0, 4);
memset(D_RW(obj)->data + 0, 1, 4);
pmemobj_tx_add_range(obj.oid, 5, 2);
memset(D_RW(obj)->data + 5, 2, 2);
pmemobj_tx_add_range(obj.oid, 8, 4);
memset(D_RW(obj)->data + 8, 3, 4);
pmemobj_tx_add_range(obj.oid, 2, 8);
memset(D_RW(obj)->data + 2, 4, 8);
TX_ADD(obj);
memset(D_RW(obj)->data, 0xFF, OVERLAP_SIZE);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERT(util_is_zeroed(D_RO(obj)->data, OVERLAP_SIZE));
/*
* ++++
* ++++
*/
TX_BEGIN(pop) {
pmemobj_tx_add_range(obj.oid, 0, 4);
memset(D_RW(obj)->data, 1, 4);
pmemobj_tx_add_range(obj.oid, 0, 4);
memset(D_RW(obj)->data, 2, 4);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERT(util_is_zeroed(D_RO(obj)->data, OVERLAP_SIZE));
}
/*
* do_tx_add_range_flag_merge_right -- call pmemobj_tx_add_range with
* overlapping ranges, but different flags
*/
static void
do_tx_add_range_flag_merge_right(PMEMobjpool *pop)
{
TOID(struct overlap_object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, 1));
/*
* ++++--------
* --++++++++--
*/
TX_BEGIN(pop) {
pmemobj_tx_xadd_range(obj.oid, 0, 4, POBJ_XADD_NO_FLUSH);
memset(D_RW(obj)->data, 1, 4);
pmemobj_tx_add_range(obj.oid, 2, 8);
memset(D_RW(obj)->data + 2, 3, 8);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_add_range_flag_merge_left -- call pmemobj_tx_add_range with
* overlapping ranges, but different flags
*/
static void
do_tx_add_range_flag_merge_left(PMEMobjpool *pop)
{
TOID(struct overlap_object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, 1));
/*
* --------++++
* --++++++++--
*/
TX_BEGIN(pop) {
pmemobj_tx_xadd_range(obj.oid, 8, 4, POBJ_XADD_NO_FLUSH);
memset(D_RW(obj)->data + 8, 2, 4);
pmemobj_tx_add_range(obj.oid, 2, 8);
memset(D_RW(obj)->data + 2, 3, 8);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_add_range_flag_merge_middle -- call pmemobj_tx_add_range with
* three adjacent ranges, but different flags
*/
static void
do_tx_add_range_flag_merge_middle(PMEMobjpool *pop)
{
TOID(struct overlap_object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, 1));
/*
* ++++----++++
* ----++++----
*/
TX_BEGIN(pop) {
pmemobj_tx_xadd_range(obj.oid, 0, 4, POBJ_XADD_NO_FLUSH);
memset(D_RW(obj)->data, 1, 4);
pmemobj_tx_xadd_range(obj.oid, 8, 4, POBJ_XADD_NO_FLUSH);
memset(D_RW(obj)->data + 8, 2, 4);
pmemobj_tx_add_range(obj.oid, 4, 4);
memset(D_RW(obj)->data + 4, 3, 4);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_add_range_reopen -- check for persistent memory leak in undo log set
*/
static void
do_tx_add_range_reopen(char *path)
{
for (int i = 0; i < REOPEN_COUNT; i++) {
PMEMobjpool *pop = pmemobj_open(path, LAYOUT_NAME);
UT_ASSERTne(pop, NULL);
TOID(struct root) root = POBJ_ROOT(pop, struct root);
UT_ASSERT(!TOID_IS_NULL(root));
UT_ASSERTeq(D_RO(root)->val, i);
for (int j = 0; j < ROOT_TAB_SIZE; j++)
UT_ASSERTeq(D_RO(root)->tab[j], i);
TX_BEGIN(pop) {
TX_SET(root, val, i + 1);
TX_ADD_FIELD(root, tab);
for (int j = 0; j < ROOT_TAB_SIZE; j++)
D_RW(root)->tab[j] = i + 1;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_close(pop);
}
}
static void
do_tx_add_range_too_large(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
pmemobj_tx_add_range(obj.oid, 0,
PMEMOBJ_MAX_ALLOC_SIZE + 1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERTne(errno, 0);
}
static void
do_tx_add_range_zero(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
pmemobj_tx_add_range(obj.oid, 0, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTne(errno, 0);
}
/*
* do_tx_add_range_wrong_uuid -- call pmemobj_tx_xadd_range with
* POBJ_TX_NO_ABORT flag and wrong uuid
*/
static void
do_tx_add_range_wrong_uuid(PMEMobjpool *pop)
{
PMEMoid oid = do_tx_alloc(pop, TYPE_OBJ_WRONG_UUID, 0);
oid.pool_uuid_lo = ~oid.pool_uuid_lo;
TX_BEGIN(pop) {
pmemobj_tx_xadd_range(oid, 0, 0, 0);
}TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(errno, EINVAL);
TX_BEGIN(pop) {
pmemobj_tx_xadd_range(oid, 0, 0, POBJ_XADD_NO_ABORT);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(errno, EINVAL);
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
pmemobj_tx_add_range(oid, 0, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(errno, EINVAL);
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
pmemobj_tx_xadd_range(oid, 0, 0, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(errno, EINVAL);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_add_range");
util_init();
if (argc != 3)
UT_FATAL("usage: %s [file] [0|1]", argv[0]);
int do_reopen = atoi(argv[2]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL * 2,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
if (do_reopen) {
pmemobj_close(pop);
do_tx_add_range_reopen(argv[1]);
} else {
do_tx_add_range_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_commit_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_abort_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_abort_after_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_abort_after_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_twice_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_twice_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_alloc_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_alloc_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_overlapping(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_too_large(pop);
VALGRIND_WRITE_STATS;
do_tx_add_huge_range_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_zero(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_snapshot_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_snapshot_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_twice_no_snapshot_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_snapshot_fields(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_uninit_check_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_uninit_check_commit_uninit(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_uninit_check_commit_part_uninit(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_uninit_check_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_no_uninit_check_commit_no_flag(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_wrong_uuid(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_flag_merge_left(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_flag_merge_right(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_flag_merge_middle(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_flush_commit(pop);
pmemobj_close(pop);
}
DONE(NULL);
}
| 26,111 | 21.588235 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_critnib/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2020, Intel Corporation
#
import testframework as t
from testframework import granularity as g
@g.no_testdir()
class TEST0(t.Test):
test_type = t.Medium
def run(self, ctx):
ctx.exec('obj_critnib')
| 281 | 16.625 | 42 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_critnib/obj_critnib.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_critnib.c -- unit test for critnib hash table
*/
#include <errno.h>
#include "critnib.h"
#include "unittest.h"
#include "util.h"
#include "../libpmemobj/obj.h"
#define TEST_INSERTS 100
#define TEST_VAL(x) ((void *)((uintptr_t)(x)))
static int Rcounter_malloc;
static void *
__wrap_malloc(size_t size)
{
switch (util_fetch_and_add32(&Rcounter_malloc, 1)) {
case 1: /* internal out_err malloc */
default:
return malloc(size);
case 2: /* tab malloc */
case 0: /* critnib malloc */
return NULL;
}
}
static void
test_critnib_new_delete(void)
{
struct critnib *c = NULL;
/* critnib malloc fail */
c = critnib_new();
UT_ASSERTeq(c, NULL);
/* first insert malloc fail */
c = critnib_new();
UT_ASSERTeq(critnib_insert(c, 0, NULL), ENOMEM);
critnib_delete(c);
/* all ok */
c = critnib_new();
UT_ASSERTne(c, NULL);
critnib_delete(c);
}
static void
test_insert_get_remove(void)
{
struct critnib *c = critnib_new();
UT_ASSERTne(c, NULL);
for (unsigned i = 0; i < TEST_INSERTS; ++i)
UT_ASSERTeq(critnib_insert(c, i, TEST_VAL(i)), 0);
for (unsigned i = 0; i < TEST_INSERTS; ++i)
UT_ASSERTeq(critnib_get(c, i), TEST_VAL(i));
for (unsigned i = 0; i < TEST_INSERTS; ++i)
UT_ASSERTeq(critnib_remove(c, i), TEST_VAL(i));
for (unsigned i = 0; i < TEST_INSERTS; ++i)
UT_ASSERTeq(critnib_remove(c, i), NULL);
for (unsigned i = 0; i < TEST_INSERTS; ++i)
UT_ASSERTeq(critnib_get(c, i), NULL);
critnib_delete(c);
}
static uint64_t
rnd15()
{
return rand() & 0x7fff; /* Windows provides only 15 bits */
}
static uint64_t
rnd64()
{
return rnd15()
| rnd15() << 15
| rnd15() << 30
| rnd15() << 45
| rnd15() << 60;
}
static void
test_smoke()
{
struct critnib *c = critnib_new();
critnib_insert(c, 123, (void *)456);
UT_ASSERTeq(critnib_get(c, 123), (void *)456);
UT_ASSERTeq(critnib_get(c, 124), 0);
critnib_delete(c);
}
static void
test_key0()
{
struct critnib *c = critnib_new();
critnib_insert(c, 1, (void *)1);
critnib_insert(c, 0, (void *)2);
critnib_insert(c, 65536, (void *)3);
UT_ASSERTeq(critnib_get(c, 1), (void *)1);
UT_ASSERTeq(critnib_remove(c, 1), (void *)1);
UT_ASSERTeq(critnib_get(c, 0), (void *)2);
UT_ASSERTeq(critnib_remove(c, 0), (void *)2);
UT_ASSERTeq(critnib_get(c, 65536), (void *)3);
UT_ASSERTeq(critnib_remove(c, 65536), (void *)3);
critnib_delete(c);
}
static void
test_1to1000()
{
struct critnib *c = critnib_new();
for (uint64_t i = 0; i < 1000; i++)
critnib_insert(c, i, (void *)i);
for (uint64_t i = 0; i < 1000; i++)
UT_ASSERTeq(critnib_get(c, i), (void *)i);
critnib_delete(c);
}
static void
test_insert_delete()
{
struct critnib *c = critnib_new();
for (uint64_t i = 0; i < 10000; i++) {
UT_ASSERTeq(critnib_get(c, i), (void *)0);
critnib_insert(c, i, (void *)i);
UT_ASSERTeq(critnib_get(c, i), (void *)i);
UT_ASSERTeq(critnib_remove(c, i), (void *)i);
UT_ASSERTeq(critnib_get(c, i), (void *)0);
}
critnib_delete(c);
}
static void
test_insert_bulk_delete()
{
struct critnib *c = critnib_new();
for (uint64_t i = 0; i < 10000; i++) {
UT_ASSERTeq(critnib_get(c, i), (void *)0);
critnib_insert(c, i, (void *)i);
UT_ASSERTeq(critnib_get(c, i), (void *)i);
}
for (uint64_t i = 0; i < 10000; i++) {
UT_ASSERTeq(critnib_get(c, i), (void *)i);
UT_ASSERTeq(critnib_remove(c, i), (void *)i);
UT_ASSERTeq(critnib_get(c, i), (void *)0);
}
critnib_delete(c);
}
static void
test_ffffffff_and_friends()
{
static uint64_t vals[] = {
0,
0x7fffffff,
0x80000000,
0xffffffff,
0x7fffffffFFFFFFFF,
0x8000000000000000,
0xFfffffffFFFFFFFF,
};
struct critnib *c = critnib_new();
for (int i = 0; i < ARRAY_SIZE(vals); i++)
critnib_insert(c, vals[i], (void *)~vals[i]);
for (int i = 0; i < ARRAY_SIZE(vals); i++)
UT_ASSERTeq(critnib_get(c, vals[i]), (void *)~vals[i]);
for (int i = 0; i < ARRAY_SIZE(vals); i++)
UT_ASSERTeq(critnib_remove(c, vals[i]), (void *)~vals[i]);
critnib_delete(c);
}
static void
test_insert_delete_random()
{
struct critnib *c = critnib_new();
for (uint64_t i = 0; i < 10000; i++) {
uint64_t v = rnd64();
critnib_insert(c, v, (void *)v);
UT_ASSERTeq(critnib_get(c, v), (void *)v);
UT_ASSERTeq(critnib_remove(c, v), (void *)v);
UT_ASSERTeq(critnib_get(c, v), 0);
}
critnib_delete(c);
}
static void
test_le_basic()
{
struct critnib *c = critnib_new();
#define INS(x) critnib_insert(c, (x), (void *)(x))
INS(1);
INS(2);
INS(3);
INS(0);
INS(4);
INS(0xf);
INS(0xe);
INS(0x11);
INS(0x12);
INS(0x20);
#define GET_SAME(x) UT_ASSERTeq(critnib_get(c, (x)), (void *)(x))
#define GET_NULL(x) UT_ASSERTeq(critnib_get(c, (x)), NULL)
GET_NULL(122);
GET_SAME(1);
GET_SAME(2);
GET_SAME(3);
GET_SAME(4);
GET_NULL(5);
GET_SAME(0x11);
GET_SAME(0x12);
#define LE(x, y) UT_ASSERTeq(critnib_find_le(c, (x)), (void *)(y))
LE(1, 1);
LE(2, 2);
LE(5, 4);
LE(6, 4);
LE(0x11, 0x11);
LE(0x15, 0x12);
LE(0xfffffff, 0x20);
#undef INS
#undef GET_SAME
#undef GET_NULL
#undef LE
critnib_delete(c);
}
/*
* Spread the bits somehow -- more than a few (4 here) children per node
* is unlikely to bring interested cases. This function leaves two bits
* per nib, producing taller trees.
*/
static uint64_t
expand_bits(int y)
{
uint64_t x = (uint64_t)y;
return (x & 0xc000) << 14 | (x & 0x3000) << 12 | (x & 0x0c00) << 10 |
(x & 0x0300) << 8 | (x & 0x00c0) << 6 | (x & 0x0030) << 4 |
(x & 0x000c) << 2 | (x & 0x0003);
}
static void
test_le_brute()
{
struct critnib *c = critnib_new();
char ws[65536] = {
0,
};
for (uint32_t cnt = 0; cnt < 1024; cnt++) {
int w = rand() & 0xffff;
if (ws[w]) {
critnib_remove(c, expand_bits(w));
ws[w] = 0;
} else {
critnib_insert(c, expand_bits(w),
(void *)expand_bits(w));
ws[w] = 1;
}
for (uint32_t cnt2 = 0; cnt2 < 1024; cnt2++) {
w = rand() & 0xffff;
int v;
for (v = w; v >= 0 && !ws[v]; v--)
;
uint64_t res =
(uint64_t)critnib_find_le(c, expand_bits(w));
uint64_t exp = (v >= 0) ? expand_bits(v) : 0;
UT_ASSERTeq(res, exp);
}
}
critnib_delete(c);
}
static void
test_same_only()
{
struct critnib *c = critnib_new();
critnib_insert(c, 123, (void *)456);
critnib_insert(c, 123, (void *)457);
UT_ASSERTeq(critnib_get(c, 123), (void *)456);
UT_ASSERTeq(critnib_get(c, 124), 0);
critnib_delete(c);
}
static void
test_same_two()
{
struct critnib *c = critnib_new();
critnib_insert(c, 122, (void *)111);
critnib_insert(c, 123, (void *)456);
critnib_insert(c, 123, (void *)457);
UT_ASSERTeq(critnib_get(c, 122), (void *)111);
UT_ASSERTeq(critnib_get(c, 123), (void *)456);
UT_ASSERTeq(critnib_get(c, 124), 0);
critnib_delete(c);
}
static void
test_remove_nonexist()
{
struct critnib *c = critnib_new();
/* root */
UT_ASSERTeq(critnib_remove(c, 1), NULL);
/* in a leaf node */
critnib_insert(c, 2, (void *)2);
UT_ASSERTeq(critnib_remove(c, 1), NULL);
/* in a non-leaf node */
critnib_insert(c, 3, (void *)3);
UT_ASSERTeq(critnib_remove(c, 1), NULL);
critnib_delete(c);
}
static void
test_fault_injection()
{
if (!pmemobj_fault_injection_enabled())
return;
struct critnib *c = critnib_new();
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "alloc_node");
/*
* The first critnib_insert() call should succeed
* - it sets the critnib's root.
*/
int ret = critnib_insert(c, 1 /* any value */, NULL);
UT_ASSERTeq(ret, 0);
/*
* The second critnib_insert() call should fail
* in the alloc_node() function.
*/
ret = critnib_insert(c, 2 /* any value other than the previous one */,
NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOMEM);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_critnib");
set_func_malloc(__wrap_malloc);
test_critnib_new_delete();
test_insert_get_remove();
test_fault_injection();
test_smoke();
test_key0();
test_1to1000();
test_insert_delete();
test_insert_bulk_delete();
test_ffffffff_and_friends();
test_insert_delete_random();
test_le_basic();
test_le_brute();
test_same_only();
test_same_two();
test_remove_nonexist();
DONE(NULL);
}
| 8,135 | 19.088889 | 72 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_recreate/obj_recreate.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* obj_recreate.c -- recreate pool on dirty file and check consistency
*/
#include "unittest.h"
POBJ_LAYOUT_BEGIN(recreate);
POBJ_LAYOUT_ROOT(recreate, struct root);
POBJ_LAYOUT_TOID(recreate, struct foo);
POBJ_LAYOUT_END(recreate);
struct foo {
int bar;
};
struct root {
TOID(struct foo) foo;
};
#define LAYOUT_NAME "obj_recreate"
#define N PMEMOBJ_MIN_POOL
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_recreate");
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(recreate) != 1);
if (argc < 2)
UT_FATAL("usage: %s file-name [trunc]", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop = NULL;
/* create pool 2*N */
pop = pmemobj_create(path, LAYOUT_NAME, 2 * N, S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
/* allocate 1.5*N */
TOID(struct root) root = (TOID(struct root))pmemobj_root(pop,
(size_t)(1.5 * N));
/* use root object for something */
POBJ_NEW(pop, &D_RW(root)->foo, struct foo, NULL, NULL);
pmemobj_close(pop);
int fd = OPEN(path, O_RDWR);
if (argc >= 3 && strcmp(argv[2], "trunc") == 0) {
UT_OUT("truncating");
/* shrink file to N */
FTRUNCATE(fd, N);
}
size_t zero_len = Ut_pagesize;
/* zero first page */
void *p = MMAP(NULL, zero_len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
memset(p, 0, zero_len);
MUNMAP(p, zero_len);
CLOSE(fd);
/* create pool on existing file */
pop = pmemobj_create(path, LAYOUT_NAME, 0, S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
/* try to allocate 0.7*N */
root = (TOID(struct root))pmemobj_root(pop, (size_t)(0.5 * N));
if (TOID_IS_NULL(root))
UT_FATAL("couldn't allocate root object");
/* validate root object is empty */
if (!TOID_IS_NULL(D_RW(root)->foo))
UT_FATAL("root object is already filled after pmemobj_create!");
pmemobj_close(pop);
DONE(NULL);
}
| 1,968 | 21.123596 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_ctl/util_ctl.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* util_ctl.c -- tests for the control module
*/
#include "unittest.h"
#include "ctl.h"
#include "out.h"
#include "pmemcommon.h"
#include "fault_injection.h"
#define LOG_PREFIX "ut"
#define LOG_LEVEL_VAR "TEST_LOG_LEVEL"
#define LOG_FILE_VAR "TEST_LOG_FILE"
#define MAJOR_VERSION 1
#define MINOR_VERSION 0
struct pool {
struct ctl *ctl;
};
static char *testconfig_path;
static int test_config_written;
static int
CTL_READ_HANDLER(test_rw)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC);
int *arg_rw = arg;
*arg_rw = 0;
return 0;
}
static int
CTL_WRITE_HANDLER(test_rw)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
int *arg_rw = arg;
*arg_rw = 1;
test_config_written++;
return 0;
}
static struct ctl_argument CTL_ARG(test_rw) = CTL_ARG_INT;
static int
CTL_WRITE_HANDLER(test_wo)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
int *arg_wo = arg;
*arg_wo = 1;
test_config_written++;
return 0;
}
static struct ctl_argument CTL_ARG(test_wo) = CTL_ARG_INT;
#define TEST_CONFIG_VALUE "abcd"
static int
CTL_WRITE_HANDLER(test_config)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
UT_ASSERTeq(source, CTL_QUERY_CONFIG_INPUT);
char *config_value = arg;
UT_ASSERTeq(strcmp(config_value, TEST_CONFIG_VALUE), 0);
test_config_written++;
return 0;
}
static struct ctl_argument CTL_ARG(test_config) = CTL_ARG_STRING(8);
struct complex_arg {
int a;
char b[5];
long long c;
int d;
};
#define COMPLEX_ARG_TEST_A 12345
#define COMPLEX_ARG_TEST_B "abcd"
#define COMPLEX_ARG_TEST_C 3147483647
#define COMPLEX_ARG_TEST_D 1
static int
CTL_WRITE_HANDLER(test_config_complex_arg)(void *ctx,
enum ctl_query_source source, void *arg,
struct ctl_indexes *indexes)
{
UT_ASSERTeq(source, CTL_QUERY_CONFIG_INPUT);
struct complex_arg *c = arg;
UT_ASSERTeq(c->a, COMPLEX_ARG_TEST_A);
UT_ASSERT(strcmp(COMPLEX_ARG_TEST_B, c->b) == 0);
UT_ASSERTeq(c->c, COMPLEX_ARG_TEST_C);
UT_ASSERTeq(c->d, COMPLEX_ARG_TEST_D);
test_config_written++;
return 0;
}
static struct ctl_argument CTL_ARG(test_config_complex_arg) = {
.dest_size = sizeof(struct complex_arg),
.parsers = {
CTL_ARG_PARSER_STRUCT(struct complex_arg, a, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct complex_arg, b, ctl_arg_string),
CTL_ARG_PARSER_STRUCT(struct complex_arg, c, ctl_arg_integer),
CTL_ARG_PARSER_STRUCT(struct complex_arg, d, ctl_arg_boolean),
CTL_ARG_PARSER_END
}
};
static int
CTL_READ_HANDLER(test_ro)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC);
int *arg_ro = arg;
*arg_ro = 0;
return 0;
}
static int
CTL_READ_HANDLER(index_value)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC);
long *index_value = arg;
struct ctl_index *idx = PMDK_SLIST_FIRST(indexes);
UT_ASSERT(strcmp(idx->name, "test_index") == 0);
*index_value = idx->value;
return 0;
}
static int
CTL_RUNNABLE_HANDLER(test_runnable)(void *ctx,
enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC);
int *arg_runnable = arg;
*arg_runnable = 0;
return 0;
}
static const struct ctl_node CTL_NODE(test_index)[] = {
CTL_LEAF_RO(index_value),
CTL_NODE_END
};
static const struct ctl_node CTL_NODE(debug)[] = {
CTL_LEAF_RO(test_ro),
CTL_LEAF_WO(test_wo),
CTL_LEAF_RUNNABLE(test_runnable),
CTL_LEAF_RW(test_rw),
CTL_INDEXED(test_index),
CTL_LEAF_WO(test_config),
CTL_LEAF_WO(test_config_complex_arg),
CTL_NODE_END
};
static int
CTL_WRITE_HANDLER(gtest_config)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
UT_ASSERTeq(source, CTL_QUERY_CONFIG_INPUT);
char *config_value = arg;
UT_ASSERTeq(strcmp(config_value, TEST_CONFIG_VALUE), 0);
test_config_written = 1;
return 0;
}
static struct ctl_argument CTL_ARG(gtest_config) = CTL_ARG_STRING(8);
static int
CTL_READ_HANDLER(gtest_ro)(void *ctx, enum ctl_query_source source,
void *arg, struct ctl_indexes *indexes)
{
UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC);
int *arg_ro = arg;
*arg_ro = 0;
return 0;
}
static const struct ctl_node CTL_NODE(global_debug)[] = {
CTL_LEAF_RO(gtest_ro),
CTL_LEAF_WO(gtest_config),
CTL_NODE_END
};
static int
util_ctl_get(struct pool *pop, const char *name, void *arg)
{
LOG(3, "pop %p name %s arg %p", pop, name, arg);
return ctl_query(pop ? pop->ctl : NULL, pop,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_READ, arg);
}
static int
util_ctl_set(struct pool *pop, const char *name, void *arg)
{
LOG(3, "pop %p name %s arg %p", pop, name, arg);
return ctl_query(pop ? pop->ctl : NULL, pop,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_WRITE, arg);
}
static int
util_ctl_exec(struct pool *pop, const char *name, void *arg)
{
LOG(3, "pop %p name %s arg %p", pop, name, arg);
return ctl_query(pop ? pop->ctl : NULL, pop,
CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_RUNNABLE, arg);
}
static void
test_ctl_parser(struct pool *pop)
{
errno = 0;
int ret;
ret = util_ctl_get(pop, NULL, NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "a.b.c.d", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "debug.", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, ".", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "..", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "1.2.3.4", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "debug.1.", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "debug.1.invalid", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
/* test methods set read to 0 and write to 1 if successful */
int arg_read = 1;
int arg_write = 0;
errno = 0;
/* correct name, wrong args */
ret = util_ctl_get(pop, "debug.test_rw", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_set(pop, "debug.test_rw", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "debug.test_wo", &arg_read);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "debug.test_wo", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_set(pop, "debug.test_ro", &arg_write);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_set(pop, "debug.test_ro", NULL);
UT_ASSERTne(ret, 0);
UT_ASSERTne(errno, 0);
errno = 0;
ret = util_ctl_get(pop, "debug.test_rw", &arg_read);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 0);
UT_ASSERTeq(arg_write, 0);
UT_ASSERTeq(errno, 0);
ret = util_ctl_set(pop, "debug.test_rw", &arg_write);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 0);
UT_ASSERTeq(arg_write, 1);
arg_read = 1;
arg_write = 0;
ret = util_ctl_get(pop, "debug.test_ro", &arg_read);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 0);
UT_ASSERTeq(arg_write, 0);
arg_read = 1;
arg_write = 0;
ret = util_ctl_set(pop, "debug.test_wo", &arg_write);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 1);
UT_ASSERTeq(arg_write, 1);
long index_value = 0;
ret = util_ctl_get(pop, "debug.5.index_value", &index_value);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(index_value, 5);
ret = util_ctl_get(pop, "debug.10.index_value", &index_value);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(index_value, 10);
arg_read = 1;
arg_write = 1;
int arg_runnable = 1;
ret = util_ctl_exec(pop, "debug.test_runnable", &arg_runnable);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 1);
UT_ASSERTeq(arg_write, 1);
UT_ASSERTeq(arg_runnable, 0);
}
static void
test_string_config(struct pool *pop)
{
UT_ASSERTne(pop, NULL);
int ret;
test_config_written = 0;
ret = ctl_load_config_from_string(pop->ctl, pop, "");
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(test_config_written, 0);
test_config_written = 0;
ret = ctl_load_config_from_string(pop->ctl, pop, ";;");
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(test_config_written, 0);
test_config_written = 0;
ret = ctl_load_config_from_string(pop->ctl, pop, ";=;");
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(test_config_written, 0);
test_config_written = 0;
ret = ctl_load_config_from_string(pop->ctl, pop, "=");
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(test_config_written, 0);
test_config_written = 0;
ret = ctl_load_config_from_string(pop->ctl, pop,
"debug.test_wo=");
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(test_config_written, 0);
test_config_written = 0;
ret = ctl_load_config_from_string(pop->ctl, pop, "=b");
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(test_config_written, 0);
test_config_written = 0;
ret = ctl_load_config_from_string(pop->ctl, pop,
"debug.test_wo=111=222");
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(test_config_written, 0);
test_config_written = 0;
ret = ctl_load_config_from_string(pop->ctl, pop,
"debug.test_wo=333;debug.test_rw=444;");
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(test_config_written, 2);
test_config_written = 0;
ret = ctl_load_config_from_string(pop->ctl, pop,
"debug.test_config="TEST_CONFIG_VALUE";");
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(test_config_written, 1);
}
static void
config_file_create(const char *buf)
{
/* the test script will take care of removing this file for us */
FILE *f = os_fopen(testconfig_path, "w+");
fwrite(buf, sizeof(char), strlen(buf), f);
fclose(f);
}
static void
create_and_test_file_config(struct pool *pop, const char *buf, int ret,
int result)
{
config_file_create(buf);
test_config_written = 0;
int r = ctl_load_config_from_file(pop ? pop->ctl : NULL,
pop, testconfig_path);
UT_ASSERTeq(r, ret);
UT_ASSERTeq(test_config_written, result);
}
static void
test_too_large_file(struct pool *pop)
{
char *too_large_buf = calloc(1, 1 << 21);
UT_ASSERTne(too_large_buf, NULL);
memset(too_large_buf, 0xc, (1 << 21) - 1);
config_file_create(too_large_buf);
int ret = ctl_load_config_from_file(pop->ctl, pop,
testconfig_path);
UT_ASSERTne(ret, 0);
free(too_large_buf);
}
static void
test_file_config(struct pool *pop)
{
create_and_test_file_config(pop,
"debug.test_config="TEST_CONFIG_VALUE";", 0, 1);
create_and_test_file_config(pop,
"debug.test_config="TEST_CONFIG_VALUE";"
"debug.test_config="TEST_CONFIG_VALUE";", 0, 2);
create_and_test_file_config(pop,
"#this is a comment\n"
"debug.test_config="TEST_CONFIG_VALUE";", 0, 1);
create_and_test_file_config(pop,
"debug.#this is a comment\n"
"test_config#this is a comment\n"
"="TEST_CONFIG_VALUE";", 0, 1);
create_and_test_file_config(pop,
"debug.test_config="TEST_CONFIG_VALUE";#this is a comment",
0, 1);
create_and_test_file_config(pop,
"\n\n\ndebug\n.\ntest\t_\tconfig="TEST_CONFIG_VALUE";\n", 0, 1);
create_and_test_file_config(pop,
" d e b u g . t e s t _ c o n f i g = "TEST_CONFIG_VALUE";",
0, 1);
create_and_test_file_config(pop,
"#debug.test_config="TEST_CONFIG_VALUE";", 0, 0);
create_and_test_file_config(pop,
"debug.#this is a comment\n"
"test_config#this is a not properly terminated comment"
"="TEST_CONFIG_VALUE";", -1, 0);
create_and_test_file_config(pop,
"invalid", -1, 0);
create_and_test_file_config(pop,
"", 0, 0);
create_and_test_file_config(pop,
"debug.test_config_complex_arg=;", -1, 0);
create_and_test_file_config(pop,
"debug.test_config_complex_arg=1,2,3;", -1, 0);
create_and_test_file_config(pop,
"debug.test_config_complex_arg=12345,abcd,,1;", -1, 0);
create_and_test_file_config(pop,
"debug.test_config_complex_arg=12345,abcd,3147483647,1;", 0, 1);
create_and_test_file_config(NULL,
"global_debug.gtest_config="TEST_CONFIG_VALUE";", 0, 1);
create_and_test_file_config(NULL, "private.missing.query=1;"
"global_debug.gtest_config="TEST_CONFIG_VALUE";", 0, 1);
test_too_large_file(pop);
int ret = ctl_load_config_from_file(pop->ctl,
pop, "does_not_exist");
UT_ASSERTne(ret, 0);
}
static void
test_ctl_global_namespace(struct pool *pop)
{
int arg_read = 1;
int ret = util_ctl_get(pop, "global_debug.gtest_ro", &arg_read);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(arg_read, 0);
}
static void
test_ctl_arg_parsers()
{
char *input;
input = "";
int boolean = -1;
int ret = ctl_arg_boolean(input, &boolean, sizeof(int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(boolean, -1);
input = "abcdefgh";
boolean = -1;
ret = ctl_arg_boolean(input, &boolean, sizeof(int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(boolean, -1);
input = "-999";
boolean = -1;
ret = ctl_arg_boolean(input, &boolean, sizeof(int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(boolean, -1);
input = "N";
boolean = -1;
ret = ctl_arg_boolean(input, &boolean, sizeof(int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(boolean, 0);
input = "0";
boolean = -1;
ret = ctl_arg_boolean(input, &boolean, sizeof(int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(boolean, 0);
input = "yes";
boolean = -1;
ret = ctl_arg_boolean(input, &boolean, sizeof(int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(boolean, 1);
input = "Yes";
boolean = -1;
ret = ctl_arg_boolean(input, &boolean, sizeof(int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(boolean, 1);
input = "1";
boolean = -1;
ret = ctl_arg_boolean(input, &boolean, sizeof(int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(boolean, 1);
input = "1234";
boolean = -1;
ret = ctl_arg_boolean(input, &boolean, sizeof(int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(boolean, 1);
input = "";
int small_int = -1;
ret = ctl_arg_integer(input, &small_int, sizeof(small_int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(small_int, -1);
input = "abcd";
small_int = -1;
ret = ctl_arg_integer(input, &small_int, sizeof(small_int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(small_int, -1);
input = "12345678901234567890";
small_int = -1;
ret = ctl_arg_integer(input, &small_int, sizeof(small_int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(small_int, -1);
input = "-12345678901234567890";
small_int = -1;
ret = ctl_arg_integer(input, &small_int, sizeof(small_int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(small_int, -1);
input = "2147483648"; /* INT_MAX + 1 */
small_int = -1;
ret = ctl_arg_integer(input, &small_int, sizeof(small_int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(small_int, -1);
input = "-2147483649"; /* INT_MIN - 2 */
small_int = -1;
ret = ctl_arg_integer(input, &small_int, sizeof(small_int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(small_int, -1);
input = "0";
small_int = -1;
ret = ctl_arg_integer(input, &small_int, sizeof(small_int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(small_int, 0);
input = "500";
small_int = -1;
ret = ctl_arg_integer(input, &small_int, sizeof(small_int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(small_int, 500);
input = "-500";
small_int = -1;
ret = ctl_arg_integer(input, &small_int, sizeof(small_int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(small_int, -500);
input = "";
long long ll_int = -1;
ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(ll_int, -1);
input = "12345678901234567890";
ll_int = -1;
ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(ll_int, -1);
input = "-12345678901234567890";
ll_int = -1;
ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int));
UT_ASSERTeq(ret, -1);
UT_ASSERTeq(ll_int, -1);
input = "2147483648";
ll_int = -1;
ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(ll_int, 2147483648);
input = "-2147483649";
ll_int = -1;
ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int));
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(ll_int, -2147483649LL);
input = "";
char string[1000] = {0};
ret = ctl_arg_string(input, string, 0);
UT_ASSERTeq(ret, -1);
input = "abcd";
ret = ctl_arg_string(input, string, 3);
UT_ASSERTeq(ret, -1);
input = "abcdefg";
ret = ctl_arg_string(input, string, 3);
UT_ASSERTeq(ret, -1);
input = "abcd";
ret = ctl_arg_string(input, string, 4);
UT_ASSERTeq(ret, -1);
input = "abc";
ret = ctl_arg_string(input, string, 4);
UT_ASSERTeq(ret, 0);
UT_ASSERT(strcmp(input, string) == 0);
}
static void
test_fault_injection(struct pool *pop)
{
if (!core_fault_injection_enabled())
return;
UT_ASSERTne(pop, NULL);
core_inject_fault_at(PMEM_MALLOC, 1, "ctl_parse_args");
test_config_written = 0;
int ret = ctl_load_config_from_string(pop->ctl, pop,
"debug.test_wo=333;debug.test_rw=444;");
UT_ASSERTne(ret, 0);
UT_ASSERTeq(errno, ENOMEM);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_ctl");
common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR,
MAJOR_VERSION, MINOR_VERSION);
if (argc != 2)
UT_FATAL("usage: %s testconfig", argv[0]);
testconfig_path = argv[1];
CTL_REGISTER_MODULE(NULL, global_debug);
test_ctl_global_namespace(NULL);
struct pool *pop = malloc(sizeof(pop));
pop->ctl = ctl_new();
test_ctl_global_namespace(NULL);
CTL_REGISTER_MODULE(pop->ctl, debug);
test_ctl_global_namespace(pop);
test_fault_injection(pop);
test_ctl_parser(pop);
test_string_config(pop);
test_file_config(pop);
test_ctl_arg_parsers();
ctl_delete(pop->ctl);
free(pop);
common_fini();
DONE(NULL);
}
| 17,492 | 22.639189 | 72 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_is_absolute/util_is_absolute.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016, Intel Corporation */
/*
* util_is_absolute.c -- unit test for testing if path is absolute
*
* usage: util_is_absolute path [path ...]
*/
#include "unittest.h"
#include "file.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "util_is_absolute");
for (int i = 1; i < argc; i++) {
UT_OUT("\"%s\" - %d", argv[i],
util_is_absolute_path(argv[i]));
}
DONE(NULL);
}
| 443 | 16.76 | 66 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_has_auto_flush_win/mocks_windows.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of EnumSystemFirmwareTables and
* GetSystemFirmwareTable
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmem
* files, when compiled for the purpose of pmem_has_auto_flush_win test.
* It would replace default implementation with mocked functions defined
* in mocks_windows.c
*
* This WRAP_REAL define could be also passed as preprocessor definition.
*/
#include <windows.h>
#ifndef WRAP_REAL
#define EnumSystemFirmwareTables __wrap_EnumSystemFirmwareTables
#define GetSystemFirmwareTable __wrap_GetSystemFirmwareTable
UINT
__wrap_EnumSystemFirmwareTables(DWORD FirmwareTableProviderSignature,
PVOID pFirmwareTableEnumBuffer, DWORD BufferSize);
UINT
__wrap_GetSystemFirmwareTable(DWORD FirmwareTableProviderSignature,
DWORD FirmwareTableID, PVOID pFirmwareTableBuffer, DWORD BufferSize);
#endif
| 988 | 33.103448 | 73 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_has_auto_flush_win/pmem_has_auto_flush_win.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* pmem_has_auto_flush_win.h -- header file for windows mocks
* for pmem_has_auto_flush_win
*/
#ifndef PMDK_HAS_AUTO_FLUSH_WIN_H
#define PMDK_HAS_AUTO_FLUSH_WIN_H 1
extern size_t Is_nfit;
extern size_t Pc_type;
extern size_t Pc_capabilities;
#endif
| 338 | 20.1875 | 61 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_has_auto_flush_win/mocks_windows.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* mocks_windows.c -- mocked functions used in auto_flush_windows.c
*/
#include "util.h"
#include "unittest.h"
#include "set.h"
#include "pmemcommon.h"
#include "auto_flush_windows.h"
#include "pmem_has_auto_flush_win.h"
#include <errno.h>
extern size_t Is_nfit;
extern size_t Pc_type;
extern size_t Pc_capabilities;
FUNC_MOCK_DLLIMPORT(EnumSystemFirmwareTables, UINT,
DWORD FirmwareTableProviderSignature,
PVOID pFirmwareTableBuffer,
DWORD BufferSize)
FUNC_MOCK_RUN_DEFAULT {
if (FirmwareTableProviderSignature != ACPI_SIGNATURE)
return _FUNC_REAL(EnumSystemFirmwareTables)
(FirmwareTableProviderSignature,
pFirmwareTableBuffer, BufferSize);
if (Is_nfit == 1 && pFirmwareTableBuffer != NULL &&
BufferSize != 0) {
UT_OUT("Mock NFIT available");
strncpy(pFirmwareTableBuffer, NFIT_STR_SIGNATURE, BufferSize);
}
return NFIT_SIGNATURE_LEN + sizeof(struct nfit_header);
}
FUNC_MOCK_END
FUNC_MOCK_DLLIMPORT(GetSystemFirmwareTable, UINT,
DWORD FirmwareTableProviderSignature,
DWORD FirmwareTableID,
PVOID pFirmwareTableBuffer,
DWORD BufferSize)
FUNC_MOCK_RUN_DEFAULT {
if (FirmwareTableProviderSignature != ACPI_SIGNATURE ||
FirmwareTableID != NFIT_REV_SIGNATURE)
return _FUNC_REAL(GetSystemFirmwareTable)
(FirmwareTableProviderSignature, FirmwareTableID,
pFirmwareTableBuffer, BufferSize);
if (pFirmwareTableBuffer == NULL && BufferSize == 0) {
UT_OUT("GetSystemFirmwareTable mock");
return sizeof(struct platform_capabilities) +
sizeof(struct nfit_header);
}
struct nfit_header nfit;
struct platform_capabilities pc;
/* fill nfit */
char sig[NFIT_SIGNATURE_LEN] = NFIT_STR_SIGNATURE;
strncpy(nfit.signature, sig, NFIT_SIGNATURE_LEN);
nfit.length = sizeof(nfit);
memcpy(pFirmwareTableBuffer, &nfit, nfit.length);
/* fill platform_capabilities */
pc.length = sizeof(pc);
/* [...] 0000 0011 - proper capabilities bits combination */
pc.capabilities = (uint32_t)Pc_capabilities;
pc.type = (uint16_t)Pc_type;
memcpy((char *)pFirmwareTableBuffer + nfit.length, &pc, pc.length);
return BufferSize;
}
FUNC_MOCK_END
| 2,173 | 28.378378 | 68 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_has_auto_flush_win/pmem_has_auto_flush_win.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2019, Intel Corporation */
/*
* pmem_has_auto_flush_win.c -- unit test for pmem_has_auto_flush_win()
*
* usage: pmem_has_auto_flush_win <option>
* options:
* n - is nfit available or not (y or n)
* type: number of platform capabilities structure
* capabilities: platform capabilities bits
*/
#include <stdbool.h>
#include <errno.h>
#include "unittest.h"
#include "pmem.h"
#include "pmemcommon.h"
#include "set.h"
#include "mocks_windows.h"
#include "pmem_has_auto_flush_win.h"
#include "util.h"
#define LOG_PREFIX "ut"
#define LOG_LEVEL_VAR "TEST_LOG_LEVEL"
#define LOG_FILE_VAR "TEST_LOG_FILE"
#define MAJOR_VERSION 1
#define MINOR_VERSION 0
size_t Is_nfit = 0;
size_t Pc_type = 0;
size_t Pc_capabilities = 3;
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem_has_auto_flush_win");
common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR,
MAJOR_VERSION, MINOR_VERSION);
if (argc < 4)
UT_FATAL("usage: pmem_has_auto_flush_win "
"<option> <type> <capabilities>",
argv[0]);
pmem_init();
Pc_type = (size_t)atoi(argv[2]);
Pc_capabilities = (size_t)atoi(argv[3]);
Is_nfit = argv[1][0] == 'y';
int eADR = pmem_has_auto_flush();
UT_OUT("pmem_has_auto_flush ret: %d", eADR);
common_fini();
DONE(NULL);
}
| 1,305 | 21.517241 | 71 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_bucket/obj_bucket.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_bucket.c -- unit test for bucket
*/
#include "bucket.h"
#include "container_ravl.h"
#include "util.h"
#include "unittest.h"
#include "obj.h"
#define TEST_CHUNK_ID 10
#define TEST_ZONE_ID 20
#define TEST_SIZE_IDX 30
#define TEST_BLOCK_OFF 40
struct container_test {
struct block_container super;
};
static const struct memory_block *inserted_memblock;
static int
container_test_insert(struct block_container *c,
const struct memory_block *m)
{
inserted_memblock = m;
return 0;
}
static int
container_test_get_rm_bestfit(struct block_container *c,
struct memory_block *m)
{
if (inserted_memblock == NULL)
return ENOMEM;
*m = *inserted_memblock;
inserted_memblock = NULL;
return 0;
}
static int
container_test_get_rm_exact(struct block_container *c,
const struct memory_block *m)
{
if (inserted_memblock == NULL)
return ENOMEM;
if (inserted_memblock->chunk_id == m->chunk_id) {
inserted_memblock = NULL;
return 0;
}
return ENOMEM;
}
static void
container_test_destroy(struct block_container *c)
{
FREE(c);
}
static struct block_container_ops container_test_ops = {
.insert = container_test_insert,
.get_rm_exact = container_test_get_rm_exact,
.get_rm_bestfit = container_test_get_rm_bestfit,
.is_empty = NULL,
.rm_all = NULL,
.destroy = container_test_destroy,
};
static struct block_container *
container_new_test(void)
{
struct container_test *c = MALLOC(sizeof(struct container_test));
c->super.c_ops = &container_test_ops;
return &c->super;
}
static void *
mock_get_real_data(const struct memory_block *m)
{
return NULL;
}
static size_t
mock_get_real_size(const struct memory_block *m)
{
return 0;
}
static const struct memory_block_ops mock_ops = {
.block_size = NULL,
.prep_hdr = NULL,
.get_lock = NULL,
.get_state = NULL,
.get_user_data = NULL,
.get_real_data = mock_get_real_data,
.get_user_size = NULL,
.get_real_size = mock_get_real_size,
.write_header = NULL,
.reinit_header = NULL,
.get_extra = NULL,
.get_flags = NULL,
};
static void
test_fault_injection()
{
if (!pmemobj_fault_injection_enabled())
return;
pmemobj_inject_fault_at(PMEM_MALLOC, 1, "bucket_new");
struct bucket *b = bucket_new(container_new_test(), NULL);
UT_ASSERTeq(b, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
test_bucket_insert_get(void)
{
struct bucket *b = bucket_new(container_new_test(), NULL);
UT_ASSERT(b != NULL);
struct memory_block m = {TEST_CHUNK_ID, TEST_ZONE_ID,
TEST_SIZE_IDX, TEST_BLOCK_OFF};
m.m_ops = &mock_ops;
/* get from empty */
UT_ASSERT(b->c_ops->get_rm_bestfit(b->container, &m) != 0);
UT_ASSERT(bucket_insert_block(b, &m) == 0);
UT_ASSERT(b->c_ops->get_rm_bestfit(b->container, &m) == 0);
UT_ASSERT(m.chunk_id == TEST_CHUNK_ID);
UT_ASSERT(m.zone_id == TEST_ZONE_ID);
UT_ASSERT(m.size_idx == TEST_SIZE_IDX);
UT_ASSERT(m.block_off == TEST_BLOCK_OFF);
bucket_delete(b);
}
static void
test_bucket_remove(void)
{
struct bucket *b = bucket_new(container_new_test(), NULL);
UT_ASSERT(b != NULL);
struct memory_block m = {TEST_CHUNK_ID, TEST_ZONE_ID,
TEST_SIZE_IDX, TEST_BLOCK_OFF};
m.m_ops = &mock_ops;
UT_ASSERT(bucket_insert_block(b, &m) == 0);
UT_ASSERT(b->c_ops->get_rm_exact(b->container, &m) == 0);
bucket_delete(b);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_bucket");
test_bucket_insert_get();
test_bucket_remove();
test_fault_injection();
DONE(NULL);
}
#ifdef _MSC_VER
/*
* Since libpmemobj is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
#endif
| 3,659 | 18.891304 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_pmalloc_oom_mt/obj_pmalloc_oom_mt.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_pmalloc_oom_mt.c -- build multithreaded out of memory test
*
*/
#include <stddef.h>
#include "unittest.h"
#define TEST_ALLOC_SIZE (32 * 1024)
#define LAYOUT_NAME "oom_mt"
static int allocated;
static PMEMobjpool *pop;
static void *
oom_worker(void *arg)
{
allocated = 0;
while (pmemobj_alloc(pop, NULL, TEST_ALLOC_SIZE, 0, NULL, NULL) == 0)
allocated++;
PMEMoid iter, iter2;
POBJ_FOREACH_SAFE(pop, iter, iter2)
pmemobj_free(&iter);
return NULL;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_pmalloc_oom_mt");
if (argc != 2)
UT_FATAL("usage: %s file-name", argv[0]);
const char *path = argv[1];
if ((pop = pmemobj_create(path, LAYOUT_NAME,
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
os_thread_t t;
THREAD_CREATE(&t, NULL, oom_worker, NULL);
THREAD_JOIN(&t, NULL);
int first_thread_allocated = allocated;
THREAD_CREATE(&t, NULL, oom_worker, NULL);
THREAD_JOIN(&t, NULL);
UT_ASSERTeq(first_thread_allocated, allocated);
pmemobj_close(pop);
DONE(NULL);
}
| 1,153 | 17.612903 | 70 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/mmap/mmap.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* mmap.c -- test memory mapping routines
*
* This test is intended to be used for testing Windows implementation
* of memory mapping routines - mmap(), munmap(), msync() and mprotect().
* Those functions should provide the same functionality as their Linux
* counterparts, at least with respect to the features that are used
* in PMDK libraries.
*
* Known issues and differences between Linux and Windows implementation
* are described in src/common/mmap_windows.c.
*/
#include "unittest.h"
#include <sys/mman.h>
#include <signal.h>
#include <setjmp.h>
#ifdef _WIN32
#define MMAP_ALIGN ((uintptr_t)65536)
#else
#define MMAP_ALIGN ((uintptr_t)4096)
#endif
#define PAGE_SIZE 4096
#define MMAP_SIZE MMAP_ALIGN
#define FILE_SIZE (MMAP_SIZE * 4)
#define CHECK_RO 1
#define CHECK_PRIV 2
static ut_jmp_buf_t Jmp;
/*
* signal_handler -- called on SIGSEGV
*/
static void
signal_handler(int sig)
{
ut_siglongjmp(Jmp);
}
/*
* check_access -- check access to mapped memory
*/
static void
check_access(char *addr, size_t len, int prot)
{
volatile int i;
/* arrange to catch SEGV */
struct sigaction v;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
SIGACTION(SIGSEGV, &v, NULL);
char pat[PAGE_SIZE];
char buf[PAGE_SIZE];
for (i = 0; i < len / PAGE_SIZE; i++) {
/* check read access */
if (!ut_sigsetjmp(Jmp)) {
memcpy(buf, addr + PAGE_SIZE * i, PAGE_SIZE);
if ((prot & PROT_READ) == 0)
UT_FATAL("memory can be read");
} else {
if (prot & PROT_READ)
UT_FATAL("memory cannot be read");
}
}
/* fill up mapped region with new pattern */
memset(pat, 0xA5, PAGE_SIZE);
for (i = 0; i < len / PAGE_SIZE; i++) {
if (!ut_sigsetjmp(Jmp)) {
memcpy(addr + PAGE_SIZE * i, pat, PAGE_SIZE);
if ((prot & PROT_WRITE) == 0)
UT_FATAL("memory can be written");
} else {
if (prot & PROT_WRITE)
UT_FATAL("memory cannot be written");
}
}
}
/*
* check_mapping -- check access to memory mapped file
*/
static void
check_mapping(int fd, char *addr, size_t len, int prot,
int flags, os_off_t offset)
{
volatile int i;
/* arrange to catch SEGV */
struct sigaction v;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
SIGACTION(SIGSEGV, &v, NULL);
char pat[PAGE_SIZE] = { 0 };
char buf[PAGE_SIZE];
if ((flags & CHECK_RO) == 0 && fd != -1) {
/* write some pattern to the file */
memset(pat, 0x5A, PAGE_SIZE);
for (i = 0; i < len / PAGE_SIZE; i++) {
LSEEK(fd, offset + PAGE_SIZE * i, SEEK_SET);
WRITE(fd, pat, PAGE_SIZE);
LSEEK(fd, offset + PAGE_SIZE * i, SEEK_SET);
if (READ(fd, buf, PAGE_SIZE) == PAGE_SIZE) {
if (memcmp(pat, buf, PAGE_SIZE))
UT_FATAL("first %d bytes do not match",
PAGE_SIZE);
}
}
}
check_access(addr, len, prot);
munmap(addr, len);
/* same memcpy from above should now fail */
for (i = 0; i < len / PAGE_SIZE; i++) {
if (!ut_sigsetjmp(Jmp)) {
memcpy(addr + PAGE_SIZE * i, pat, PAGE_SIZE);
UT_FATAL("unmap failed");
}
}
if (fd != -1) {
/* expected pattern */
if ((flags & (CHECK_PRIV | CHECK_RO)) != 0 ||
(prot & PROT_WRITE) == 0)
memset(pat, 0x5A, PAGE_SIZE);
else
memset(pat, 0xA5, PAGE_SIZE);
for (i = 0; i < len / PAGE_SIZE; i++) {
LSEEK(fd, offset + PAGE_SIZE * i, SEEK_SET);
if (READ(fd, buf, PAGE_SIZE) == PAGE_SIZE) {
if (memcmp(pat, buf, PAGE_SIZE))
UT_FATAL("first %d bytes do not match",
PAGE_SIZE);
}
}
}
}
/*
* test_mmap_flags -- test supported flags
*/
static void
test_mmap_flags(int fd)
{
char *ptr1;
/* PRIVATE + SHARED */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_SHARED, fd, 0);
UT_ASSERTeq(ptr1, MAP_FAILED);
/* no PRIVATE/SHARED */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE, 0, fd, 0);
UT_ASSERTeq(ptr1, MAP_FAILED);
/* ANON but no PRIVATE/SHARED */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_ANON, fd, 0);
UT_ASSERTeq(ptr1, MAP_FAILED);
}
/*
* test_mmap_len -- test various lengths and offsets
*/
static void
test_mmap_len(int fd)
{
char *ptr;
/* len == 0 */
ptr = mmap(NULL, 0, PROT_READ|PROT_WRITE,
MAP_PRIVATE, fd, 0);
UT_ASSERTeq(ptr, MAP_FAILED);
/* len > file_size */
ptr = mmap(NULL, FILE_SIZE + MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_PRIVATE, fd, 0);
UT_ASSERTne(ptr, MAP_FAILED);
check_mapping(fd, ptr, FILE_SIZE, PROT_READ|PROT_WRITE, CHECK_PRIV, 0);
UT_ASSERTeq(munmap(ptr + FILE_SIZE, MMAP_SIZE), 0);
/* offset == 0 */
ptr = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_SHARED, fd, 0);
UT_ASSERTne(ptr, MAP_FAILED);
check_mapping(fd, ptr, MMAP_SIZE, PROT_READ|PROT_WRITE, 0, 0);
/* offset == PAGE_SIZE */
ptr = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_SHARED, fd, PAGE_SIZE);
#ifndef _WIN32
UT_ASSERTne(ptr, MAP_FAILED);
check_mapping(fd, ptr, MMAP_SIZE, PROT_READ|PROT_WRITE, 0, PAGE_SIZE);
#else
/* XXX - on Windows, offset must be aligned to allocation granularity */
UT_ASSERTeq(ptr, MAP_FAILED);
#endif
/* offset == MMAP_ALIGN */
ptr = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_SHARED, fd, MMAP_ALIGN);
UT_ASSERTne(ptr, MAP_FAILED);
check_mapping(fd, ptr, MMAP_SIZE, PROT_READ|PROT_WRITE, 0, MMAP_ALIGN);
/* unaligned offset */
ptr = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 100);
UT_ASSERTeq(ptr, MAP_FAILED);
/* offset + len > file_size */
ptr = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE,
MAP_PRIVATE, fd, MMAP_SIZE);
UT_ASSERTne(ptr, MAP_FAILED);
check_mapping(fd, ptr, FILE_SIZE - MMAP_SIZE, PROT_READ|PROT_WRITE,
CHECK_PRIV, MMAP_SIZE);
UT_ASSERTeq(munmap(ptr + FILE_SIZE - MMAP_SIZE, MMAP_SIZE), 0);
/* offset beyond file_size */
ptr = mmap(NULL, MMAP_SIZE, PROT_READ, MAP_SHARED, fd,
FILE_SIZE + MMAP_SIZE);
#ifndef _WIN32
UT_ASSERTne(ptr, MAP_FAILED);
check_mapping(fd, ptr, MMAP_SIZE, PROT_READ, CHECK_PRIV,
FILE_SIZE + MMAP_SIZE);
#else
UT_ASSERTeq(ptr, MAP_FAILED);
#endif
}
/*
* test_mmap_hint -- test hint address
*/
static void
test_mmap_hint(int fd)
{
char *ptr1;
char *ptr2;
/* map entire file first to get unused address */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE,
MAP_PRIVATE, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
check_mapping(fd, ptr1, FILE_SIZE, PROT_READ|PROT_WRITE, CHECK_PRIV, 0);
/* now try to map a part of it at specified address */
ptr2 = mmap(ptr1 + MMAP_ALIGN, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_PRIVATE, fd, 0);
UT_ASSERTeq(ptr2, ptr1 + MMAP_ALIGN);
check_mapping(fd, ptr2, MMAP_SIZE, PROT_READ|PROT_WRITE, CHECK_PRIV, 0);
/* non-aligned hint address - should be ignored */
ptr2 = mmap(ptr1 + 100, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_PRIVATE, fd, 0);
UT_ASSERTne(ptr2, MAP_FAILED);
UT_ASSERTne(ptr2, ptr1 + 100);
check_mapping(fd, ptr2, MMAP_SIZE, PROT_READ|PROT_WRITE, CHECK_PRIV, 0);
/* hint address is busy */
ptr1 = mmap(NULL, FILE_SIZE / 2, PROT_READ|PROT_WRITE,
MAP_PRIVATE, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
ptr2 = mmap(ptr1 + MMAP_SIZE, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_PRIVATE, fd, 0);
UT_ASSERTne(ptr2, MAP_FAILED);
UT_ASSERT(ptr2 < ptr1 || ptr2 >= ptr1 + FILE_SIZE / 2);
munmap(ptr1, FILE_SIZE / 2);
check_mapping(fd, ptr2, MMAP_SIZE, PROT_READ|PROT_WRITE, CHECK_PRIV, 0);
}
/*
* test_mmap_fixed -- test MAP_FIXED flag
*/
static void
test_mmap_fixed(int fd)
{
char *ptr1;
char *ptr2;
/* map entire file first to get unused address */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE,
MAP_PRIVATE, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
check_mapping(fd, ptr1, FILE_SIZE, PROT_READ|PROT_WRITE, CHECK_PRIV, 0);
/* now try to map a part of it at specified address */
ptr2 = mmap(ptr1 + MMAP_ALIGN, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_FIXED, fd, 0);
UT_ASSERTeq(ptr2, ptr1 + MMAP_ALIGN);
check_mapping(fd, ptr2, MMAP_SIZE, PROT_READ|PROT_WRITE, CHECK_PRIV, 0);
/* non-aligned hint address - should fail */
ptr2 = mmap(ptr1 + 100, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_FIXED, fd, 0);
UT_ASSERTeq(ptr2, MAP_FAILED);
/* hint address is busy */
ptr1 = mmap(NULL, MMAP_SIZE * 2, PROT_READ|PROT_WRITE,
MAP_PRIVATE, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
ptr2 = mmap(ptr1 + MMAP_SIZE, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_FIXED, fd, 0);
UT_ASSERTne(ptr2, MAP_FAILED);
UT_ASSERTeq(ptr2, ptr1 + MMAP_SIZE);
check_mapping(fd, ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE, CHECK_PRIV, 0);
check_mapping(fd, ptr2, MMAP_SIZE, PROT_READ|PROT_WRITE, CHECK_PRIV, 0);
}
/*
* test_mmap_anon -- test anonymous mappings
*/
static void
test_mmap_anon(int fd)
{
char *ptr1;
char *ptr2;
/* fd == -1, but no MAP_ANON - should fail */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE,
MAP_SHARED, -1, 0);
UT_ASSERTeq(ptr1, MAP_FAILED);
/* fd should be ignored */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE,
MAP_ANON|MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
check_mapping(-1, ptr1, FILE_SIZE, PROT_READ|PROT_WRITE, 0, 0);
/* offset should be ignored */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE,
MAP_ANON|MAP_SHARED, -1, MMAP_ALIGN);
UT_ASSERTne(ptr1, MAP_FAILED);
check_mapping(-1, ptr1, FILE_SIZE, PROT_READ|PROT_WRITE, 0, 0);
/* now try to map a part of it at specified address */
ptr2 = mmap(ptr1 + MMAP_ALIGN, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_ANON|MAP_SHARED, -1, 0);
UT_ASSERTeq(ptr2, ptr1 + MMAP_ALIGN);
check_mapping(-1, ptr2, MMAP_SIZE, PROT_READ|PROT_WRITE, 0, 0);
/* non-aligned hint address - should be ignored */
ptr2 = mmap(ptr1 + 100, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_ANON|MAP_SHARED, -1, 0);
UT_ASSERTne(ptr2, MAP_FAILED);
UT_ASSERTne(ptr2, ptr1 + 100);
check_mapping(-1, ptr2, MMAP_SIZE, PROT_READ|PROT_WRITE, 0, 0);
/* non-aligned hint address + MAP_FIXED - should fail */
ptr2 = mmap(ptr1 + 100, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_ANON|MAP_SHARED|MAP_FIXED, -1, 0);
UT_ASSERTeq(ptr2, MAP_FAILED);
/* hint address is busy */
ptr1 = mmap(NULL, FILE_SIZE / 2, PROT_READ|PROT_WRITE,
MAP_ANON|MAP_SHARED, -1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
ptr2 = mmap(ptr1 + MMAP_SIZE, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_ANON|MAP_SHARED, -1, 0);
UT_ASSERTne(ptr2, MAP_FAILED);
UT_ASSERT(ptr2 < ptr1 || ptr2 >= ptr1 + FILE_SIZE / 2);
munmap(ptr1, FILE_SIZE / 2);
check_mapping(-1, ptr2, MMAP_SIZE, PROT_READ|PROT_WRITE, 0, 0);
/* hint address is busy + MAP_FIXED */
ptr1 = mmap(NULL, MMAP_SIZE * 2, PROT_READ|PROT_WRITE,
MAP_ANON|MAP_SHARED, -1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
ptr2 = mmap(ptr1 + MMAP_SIZE, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_ANON|MAP_SHARED|MAP_FIXED, -1, 0);
UT_ASSERTne(ptr2, MAP_FAILED);
UT_ASSERTeq(ptr2, ptr1 + MMAP_SIZE);
check_mapping(-1, ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE, 0, 0);
check_mapping(-1, ptr2, MMAP_SIZE, PROT_READ|PROT_WRITE, 0, 0);
}
/*
* test_mmap_prot -- test R/W protection
*/
static void
test_mmap_prot(int fd, int fd_ro)
{
char *ptr1;
/* read/write */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
check_mapping(fd, ptr1, FILE_SIZE, PROT_READ|PROT_WRITE, 0, 0);
/* read/write on file opened in read-only mode - should fail */
errno = 0;
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE,
MAP_SHARED, fd_ro, 0);
UT_ASSERTeq(ptr1, MAP_FAILED);
UT_ASSERTeq(errno, EACCES);
/* read-only */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
check_mapping(fd, ptr1, FILE_SIZE, PROT_READ, 0, 0);
/* read-only on file opened in read-only mode - should succeed */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ, MAP_SHARED, fd_ro, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
check_mapping(fd_ro, ptr1, FILE_SIZE, PROT_READ, CHECK_RO, 0);
/* no access */
ptr1 = mmap(NULL, FILE_SIZE, PROT_NONE, MAP_SHARED, fd, 0);
#ifndef _WIN32
UT_ASSERTne(ptr1, MAP_FAILED);
check_mapping(fd, ptr1, FILE_SIZE, PROT_NONE, 0, 0);
#else
/* XXX - PROT_NONE not supported yet */
UT_ASSERTeq(ptr1, MAP_FAILED);
#endif
/* no access on read-only file */
ptr1 = mmap(NULL, FILE_SIZE, PROT_NONE, MAP_SHARED, fd_ro, 0);
#ifndef _WIN32
UT_ASSERTne(ptr1, MAP_FAILED);
check_mapping(fd_ro, ptr1, FILE_SIZE, PROT_NONE, CHECK_RO, 0);
#else
/* XXX - PROT_NONE not supported yet */
UT_ASSERTeq(ptr1, MAP_FAILED);
#endif
}
/*
* test_mmap_prot_anon -- test R/W protection on anonymous mappings
*/
static void
test_mmap_prot_anon(void)
{
char *ptr1;
/* read/write */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE,
MAP_SHARED|MAP_ANON, -1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
check_mapping(-1, ptr1, FILE_SIZE, PROT_READ|PROT_WRITE, 0, 0);
/* read-only */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ, MAP_SHARED|MAP_ANON, -1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
check_mapping(-1, ptr1, FILE_SIZE, PROT_READ, 0, 0);
/* no access */
ptr1 = mmap(NULL, FILE_SIZE, PROT_NONE, MAP_SHARED|MAP_ANON, -1, 0);
#ifndef _WIN32
UT_ASSERTne(ptr1, MAP_FAILED);
check_mapping(-1, ptr1, FILE_SIZE, PROT_NONE, 0, 0);
#else
/* XXX - PROT_NONE not supported yet */
UT_ASSERTeq(ptr1, MAP_FAILED);
#endif
}
/*
* test_mmap_shared -- test shared mappings
*/
static void
test_mmap_shared(int fd)
{
char *ptr1;
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
check_mapping(fd, ptr1, FILE_SIZE, PROT_READ|PROT_WRITE, 0, 0);
}
/*
* test_munmap -- test mapping deletion
*/
static void
test_munmap(int fd)
{
char *ptr1;
char *ptr2;
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
/* unaligned address - should fail */
errno = 0;
UT_ASSERTeq(munmap(ptr1 + 100, FILE_SIZE), -1);
UT_ASSERTeq(errno, EINVAL);
check_mapping(fd, ptr1, FILE_SIZE, PROT_READ|PROT_WRITE, 0, 0);
/* unaligned length - should succeed */
UT_ASSERTeq(munmap(ptr1, FILE_SIZE - 100), 0);
check_mapping(fd, ptr1, FILE_SIZE, PROT_NONE, 0, 0);
check_mapping(fd, ptr1 + FILE_SIZE - 100, 100, PROT_NONE, 0, 0);
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
/* len == 0 - should fail */
errno = 0;
UT_ASSERTne(munmap(ptr1, 0), 0);
UT_ASSERTeq(errno, EINVAL);
check_mapping(fd, ptr1, FILE_SIZE, PROT_READ|PROT_WRITE, 0, 0);
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
/* delete entire mapping (len > file_size) */
UT_ASSERTeq(munmap(ptr1, FILE_SIZE + MMAP_SIZE), 0);
check_mapping(fd, ptr1, FILE_SIZE, PROT_NONE, 0, 0);
/* delete non existing mapping - should succeed */
UT_ASSERTeq(munmap(ptr1, FILE_SIZE), 0);
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
/* partial unmap */
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);
check_mapping(fd, ptr1, MMAP_SIZE, PROT_NONE, 0, 0);
check_mapping(fd, ptr1 + MMAP_SIZE, FILE_SIZE - MMAP_SIZE,
PROT_READ|PROT_WRITE, 0, MMAP_SIZE);
/* unmap pages from two adjacent mappings */
ptr1 = mmap(ptr1, MMAP_SIZE * 2, PROT_READ|PROT_WRITE,
MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
ptr2 = mmap(ptr1 + MMAP_SIZE * 2, MMAP_SIZE * 2, PROT_READ|PROT_WRITE,
MAP_SHARED, fd, MMAP_SIZE * 2);
UT_ASSERTeq(ptr2, ptr1 + MMAP_SIZE * 2);
UT_ASSERTeq(munmap(ptr1 + MMAP_SIZE, MMAP_SIZE * 2), 0);
check_mapping(fd, ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE, 0, 0);
check_mapping(fd, ptr1 + MMAP_SIZE, MMAP_SIZE * 2,
PROT_NONE, 0, MMAP_SIZE);
check_mapping(fd, ptr1 + MMAP_SIZE * 3, MMAP_SIZE,
PROT_READ|PROT_WRITE, 0, MMAP_SIZE * 3);
}
#define MS_ALL (MS_SYNC|MS_ASYNC|MS_INVALIDATE)
/*
* test_msync -- test synchronizing a file with a memory map
*/
static void
test_msync(int fd)
{
char *ptr1;
char *ptr2;
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
ptr2 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_ASSERTeq(munmap(ptr2, FILE_SIZE), 0);
/* unknown flag - should fail */
errno = 0;
UT_ASSERTne(msync(ptr1, MMAP_SIZE, MS_ALL + 1), 0);
UT_ASSERTeq(errno, EINVAL);
/* SYNC + ASYNC - should fail */
errno = 0;
UT_ASSERTne(msync(ptr1, MMAP_SIZE, MS_SYNC|MS_ASYNC), 0);
UT_ASSERTeq(errno, EINVAL);
/* no SYNC, nor ASYNC - should fail according to POSIX... */
errno = 0;
#ifndef _WIN32
/* ... but it is allowed on Linux */
UT_ASSERTeq(msync(ptr1, MMAP_SIZE, 0), 0);
UT_ASSERTeq(errno, 0);
#else
UT_ASSERTne(msync(ptr1, MMAP_SIZE, 0), 0);
UT_ASSERTeq(errno, EINVAL);
#endif
/* len == 0 - should succeed */
UT_ASSERTeq(msync(ptr1, 0, MS_SYNC), 0);
/* len == SIZE_MAX - should fail */
errno = 0;
#ifndef _WIN32
/* ... but it is allowed on Linux */
UT_ASSERTeq(msync(ptr1, SIZE_MAX, MS_SYNC), 0);
UT_ASSERTeq(errno, 0);
#else
UT_ASSERTne(msync(ptr1, SIZE_MAX, MS_SYNC), 0);
UT_ASSERTeq(errno, ENOMEM);
#endif
/* unaligned pointer - should fail */
errno = 0;
UT_ASSERTne(msync(ptr1 + 100, FILE_SIZE, MS_SYNC), 0);
UT_ASSERTeq(errno, EINVAL);
/* invalid pointer - should fail */
UT_ASSERTne(msync(ptr2, FILE_SIZE, MS_SYNC), 0);
/* unaligned length - should succeed */
UT_ASSERTeq(msync(ptr1, FILE_SIZE - 100, MS_SYNC), 0);
/* len > mapping size - should fail */
UT_ASSERTeq(munmap(ptr1 + FILE_SIZE / 2, FILE_SIZE / 2), 0);
errno = 0;
UT_ASSERTne(msync(ptr1, FILE_SIZE, MS_SYNC), 0);
UT_ASSERTeq(errno, ENOMEM);
/* partial sync */
UT_ASSERTeq(msync(ptr1 + PAGE_SIZE, MMAP_SIZE, MS_SYNC), 0);
UT_ASSERTeq(munmap(ptr1, FILE_SIZE), 0);
/* range includes invalid addresses - should fail */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_ASSERTeq(munmap(ptr1 + MMAP_SIZE, MMAP_SIZE), 0);
UT_ASSERTeq(munmap(ptr1 + MMAP_SIZE * 3, MMAP_SIZE), 0);
errno = 0;
UT_ASSERTne(msync(ptr1, FILE_SIZE, MS_SYNC), 0);
UT_ASSERTeq(errno, ENOMEM);
UT_ASSERTeq(munmap(ptr1, FILE_SIZE), 0);
/* synchronize two adjacent mappings */
ptr1 = mmap(ptr1, MMAP_SIZE * 2, PROT_READ|PROT_WRITE,
MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
ptr2 = mmap(ptr1 + MMAP_SIZE * 2, MMAP_SIZE * 2, PROT_READ|PROT_WRITE,
MAP_SHARED, fd, MMAP_SIZE * 2);
UT_ASSERTeq(ptr2, ptr1 + MMAP_SIZE * 2);
UT_ASSERTeq(msync(ptr1 + MMAP_SIZE, MMAP_SIZE * 2, MS_SYNC), 0);
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE * 4), 0);
/* anonymous mapping */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE,
MAP_SHARED|MAP_ANON, -1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_ASSERTeq(msync(ptr1, FILE_SIZE, MS_SYNC), 0);
UT_ASSERTeq(munmap(ptr1, FILE_SIZE), 0);
}
#define PROT_ALL (PROT_READ|PROT_WRITE|PROT_EXEC)
/*
* test_mprotect -- test memory protection
*/
static void
test_mprotect(int fd, int fd_ro)
{
char *ptr1;
char *ptr2;
/* unknown PROT flag - should succeed */
ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_ASSERTeq(mprotect(ptr1, MMAP_SIZE, PROT_ALL + 1), 0);
check_access(ptr1, MMAP_SIZE, PROT_NONE);
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);
/* len == 0 - should succeed */
ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_ASSERTeq(mprotect(ptr1, 0, PROT_READ), 0);
check_access(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE);
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);
/* len > mapping size - should fail */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
UT_ASSERTeq(munmap(ptr1 + FILE_SIZE / 2, FILE_SIZE / 2), 0);
errno = 0;
UT_ASSERTne(mprotect(ptr1, FILE_SIZE, PROT_READ), 0);
UT_ASSERTeq(errno, ENOMEM);
UT_ASSERTeq(munmap(ptr1, FILE_SIZE), 0);
/* change protection: R/O => R/W */
ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
#ifndef _WIN32
UT_ASSERTeq(mprotect(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE), 0);
check_access(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE);
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);
#else
/* XXX - not supported yet */
UT_ASSERTne(mprotect(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE), 0);
check_access(ptr1, MMAP_SIZE, PROT_READ);
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);
#endif
/* change protection; R/W => R/O */
ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_ASSERTeq(mprotect(ptr1, MMAP_SIZE, PROT_READ), 0);
check_access(ptr1, MMAP_SIZE, PROT_READ);
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);
/* change protection; R/W => none */
ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_ASSERTeq(mprotect(ptr1, MMAP_SIZE, PROT_NONE), 0);
check_access(ptr1, MMAP_SIZE, PROT_NONE);
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);
/* unaligned pointer - should fail */
ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
errno = 0;
UT_ASSERTne(mprotect(ptr1 + 100, MMAP_SIZE, PROT_READ), 0);
UT_ASSERTeq(errno, EINVAL);
check_access(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE);
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);
/* invalid pointer - should fail */
errno = 0;
UT_ASSERTne(mprotect(ptr1, MMAP_SIZE, PROT_READ), 0);
UT_ASSERTeq(errno, ENOMEM);
/* unaligned len - should succeed */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_ASSERTeq(mprotect(ptr1, PAGE_SIZE + 100, PROT_READ), 0);
check_access(ptr1, PAGE_SIZE * 2, PROT_READ);
check_access(ptr1 + PAGE_SIZE * 2, FILE_SIZE - PAGE_SIZE * 2,
PROT_READ|PROT_WRITE);
UT_ASSERTeq(munmap(ptr1, FILE_SIZE), 0);
/* partial protection change (on page boundary) */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_ASSERTeq(mprotect(ptr1 + PAGE_SIZE, PAGE_SIZE, PROT_READ), 0);
UT_ASSERTeq(mprotect(ptr1 + PAGE_SIZE * 2, PAGE_SIZE, PROT_NONE), 0);
check_access(ptr1, PAGE_SIZE, PROT_READ|PROT_WRITE);
check_access(ptr1 + PAGE_SIZE, PAGE_SIZE, PROT_READ);
check_access(ptr1 + PAGE_SIZE * 2, PAGE_SIZE, PROT_NONE);
check_access(ptr1 + PAGE_SIZE * 3, FILE_SIZE - PAGE_SIZE * 3,
PROT_READ|PROT_WRITE);
UT_ASSERTeq(munmap(ptr1, FILE_SIZE), 0);
/* range includes invalid addresses - should fail */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_ASSERTeq(munmap(ptr1 + MMAP_SIZE, MMAP_SIZE), 0);
UT_ASSERTeq(munmap(ptr1 + MMAP_SIZE * 3, MMAP_SIZE), 0);
check_access(ptr1 + MMAP_SIZE, MMAP_SIZE, PROT_NONE);
check_access(ptr1 + MMAP_SIZE * 3, MMAP_SIZE, PROT_NONE);
errno = 0;
UT_ASSERTne(mprotect(ptr1, MMAP_SIZE * 4, PROT_READ), 0);
UT_ASSERTeq(errno, ENOMEM);
#ifndef _WIN32
/* protection changed for all the pages up to the first invalid */
check_access(ptr1, MMAP_SIZE, PROT_READ);
check_access(ptr1 + MMAP_SIZE * 2, MMAP_SIZE, PROT_READ|PROT_WRITE);
#else
/* XXX - protection changed for all the valid pages */
check_access(ptr1, MMAP_SIZE, PROT_READ);
check_access(ptr1 + MMAP_SIZE * 2, MMAP_SIZE, PROT_READ);
#endif
UT_ASSERTeq(munmap(ptr1, FILE_SIZE), 0);
/* change protection on two adjacent mappings */
ptr1 = mmap(ptr1, MMAP_SIZE * 2, PROT_READ|PROT_WRITE,
MAP_SHARED, fd, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
ptr2 = mmap(ptr1 + MMAP_SIZE * 2, MMAP_SIZE * 2, PROT_READ|PROT_WRITE,
MAP_SHARED, fd, MMAP_SIZE * 2);
UT_ASSERTeq(ptr2, ptr1 + MMAP_SIZE * 2);
UT_ASSERTeq(mprotect(ptr1 + MMAP_SIZE, MMAP_SIZE * 2, PROT_NONE), 0);
check_access(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE);
check_access(ptr1 + MMAP_SIZE, MMAP_SIZE * 2, PROT_NONE);
check_access(ptr1 + MMAP_SIZE * 3, MMAP_SIZE, PROT_READ|PROT_WRITE);
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE * 4), 0);
/* change protection to R/W on file opened in read-only mode */
ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ, MAP_SHARED, fd_ro, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
errno = 0;
UT_ASSERTne(mprotect(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE), 0);
UT_ASSERTeq(errno, EACCES);
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);
}
/*
* test_mprotect_anon -- test memory protection on anonymous mappings
*/
static void
test_mprotect_anon(void)
{
char *ptr1;
char *ptr2;
/* unknown PROT flag - should succeed */
ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANON, -1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_ASSERTeq(mprotect(ptr1, MMAP_SIZE, PROT_ALL + 1), 0);
check_access(ptr1, MMAP_SIZE, PROT_NONE);
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);
/* len == 0 - should succeed */
ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_SHARED|MAP_ANON, -1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_ASSERTeq(mprotect(ptr1, 0, PROT_READ), 0);
check_access(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE);
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);
/* change protection: R/O => R/W */
ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ, MAP_PRIVATE|MAP_ANON, -1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
#ifndef _WIN32
UT_ASSERTeq(mprotect(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE), 0);
check_access(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE);
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);
#else
/* XXX - not supported yet */
UT_ASSERTne(mprotect(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE), 0);
check_access(ptr1, MMAP_SIZE, PROT_READ);
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);
#endif
/* change protection; R/W => R/O */
ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_SHARED|MAP_ANON, -1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_ASSERTeq(mprotect(ptr1, MMAP_SIZE, PROT_READ), 0);
check_access(ptr1, MMAP_SIZE, PROT_READ);
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);
/* change protection; R/W => none */
ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANON, -1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_ASSERTeq(mprotect(ptr1, MMAP_SIZE, PROT_NONE), 0);
check_access(ptr1, MMAP_SIZE, PROT_NONE);
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);
/* unaligned pointer - should fail */
ptr1 = mmap(NULL, MMAP_SIZE, PROT_READ|PROT_WRITE,
MAP_SHARED|MAP_ANON, -1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
errno = 0;
UT_ASSERTne(mprotect(ptr1 + 100, MMAP_SIZE, PROT_READ), 0);
UT_ASSERTeq(errno, EINVAL);
check_access(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE);
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE), 0);
/* invalid pointer - should fail */
errno = 0;
UT_ASSERTne(mprotect(ptr1, MMAP_SIZE, PROT_READ), 0);
UT_ASSERTeq(errno, ENOMEM);
/* unaligned len - should succeed */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANON, -1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_ASSERTeq(mprotect(ptr1, PAGE_SIZE + 100, PROT_READ), 0);
check_access(ptr1, PAGE_SIZE * 2, PROT_READ);
check_access(ptr1 + PAGE_SIZE * 2, FILE_SIZE - PAGE_SIZE * 2,
PROT_READ|PROT_WRITE);
UT_ASSERTeq(munmap(ptr1, FILE_SIZE), 0);
/* partial protection change (on page boundary) */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE,
MAP_SHARED|MAP_ANON, -1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_ASSERTeq(mprotect(ptr1 + PAGE_SIZE, PAGE_SIZE, PROT_READ), 0);
UT_ASSERTeq(mprotect(ptr1 + PAGE_SIZE * 2, PAGE_SIZE, PROT_NONE), 0);
check_access(ptr1, PAGE_SIZE, PROT_READ|PROT_WRITE);
check_access(ptr1 + PAGE_SIZE, PAGE_SIZE, PROT_READ);
check_access(ptr1 + PAGE_SIZE * 2, PAGE_SIZE, PROT_NONE);
check_access(ptr1 + PAGE_SIZE * 3, FILE_SIZE - PAGE_SIZE * 3,
PROT_READ|PROT_WRITE);
UT_ASSERTeq(munmap(ptr1, FILE_SIZE), 0);
/* range includes invalid addresses - should fail */
ptr1 = mmap(NULL, FILE_SIZE, PROT_READ|PROT_WRITE,
MAP_SHARED|MAP_ANON, -1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
UT_ASSERTeq(munmap(ptr1 + MMAP_SIZE, MMAP_SIZE), 0);
UT_ASSERTeq(munmap(ptr1 + MMAP_SIZE * 3, MMAP_SIZE), 0);
check_access(ptr1 + MMAP_SIZE, MMAP_SIZE, PROT_NONE);
check_access(ptr1 + MMAP_SIZE * 3, MMAP_SIZE, PROT_NONE);
errno = 0;
UT_ASSERTne(mprotect(ptr1, MMAP_SIZE * 4, PROT_READ), 0);
UT_ASSERTeq(errno, ENOMEM);
#ifndef _WIN32
/* protection changed for all the pages up to the first invalid */
check_access(ptr1, MMAP_SIZE, PROT_READ);
check_access(ptr1 + MMAP_SIZE * 2, MMAP_SIZE, PROT_READ|PROT_WRITE);
#else
/* XXX - protection changed for all the valid pages */
check_access(ptr1, MMAP_SIZE, PROT_READ);
check_access(ptr1 + MMAP_SIZE * 2, MMAP_SIZE, PROT_READ);
#endif
UT_ASSERTeq(munmap(ptr1, FILE_SIZE), 0);
/* change protection on two adjacent mappings */
ptr1 = mmap(ptr1, MMAP_SIZE * 2, PROT_READ|PROT_WRITE,
MAP_SHARED|MAP_ANON, -1, 0);
UT_ASSERTne(ptr1, MAP_FAILED);
ptr2 = mmap(ptr1 + MMAP_SIZE * 2, MMAP_SIZE * 2, PROT_READ|PROT_WRITE,
MAP_SHARED|MAP_ANON, -1, MMAP_SIZE * 2);
UT_ASSERTeq(ptr2, ptr1 + MMAP_SIZE * 2);
UT_ASSERTeq(mprotect(ptr1 + MMAP_SIZE, MMAP_SIZE * 2, PROT_NONE), 0);
check_access(ptr1, MMAP_SIZE, PROT_READ|PROT_WRITE);
check_access(ptr1 + MMAP_SIZE, MMAP_SIZE * 2, PROT_NONE);
check_access(ptr1 + MMAP_SIZE * 3, MMAP_SIZE, PROT_READ|PROT_WRITE);
UT_ASSERTeq(munmap(ptr1, MMAP_SIZE * 4), 0);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "mmap");
if (argc != 2)
UT_FATAL("usage: %s file", argv[0]);
int fd = OPEN(argv[1], O_RDWR);
int fd_ro = OPEN(argv[1], O_RDONLY);
POSIX_FALLOCATE(fd, 0, FILE_SIZE);
test_mmap_flags(fd);
test_mmap_len(fd);
test_mmap_hint(fd);
test_mmap_fixed(fd);
test_mmap_anon(fd);
test_mmap_shared(fd);
test_mmap_prot(fd, fd_ro);
test_mmap_prot_anon();
test_munmap(fd);
test_msync(fd);
test_mprotect(fd, fd_ro);
test_mprotect_anon();
CLOSE(fd_ro);
CLOSE(fd);
DONE(NULL);
}
| 31,058 | 30.404449 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/log_walker/log_walker.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* log_walker.c -- unit test to verify pool's write-protection in debug mode
*
* usage: log_walker file
*
*/
#include <sys/param.h>
#include "unittest.h"
/*
* do_append -- call pmemlog_append() & print result
*/
static void
do_append(PMEMlogpool *plp)
{
const char *str[6] = {
"1st append string\n",
"2nd append string\n",
"3rd append string\n",
"4th append string\n",
"5th append string\n",
"6th append string\n"
};
for (int i = 0; i < 6; ++i) {
int rv = pmemlog_append(plp, str[i], strlen(str[i]));
switch (rv) {
case 0:
UT_OUT("append str[%i] %s", i, str[i]);
break;
case -1:
UT_OUT("!append str[%i] %s", i, str[i]);
break;
default:
UT_OUT("!append: wrong return value");
break;
}
}
}
/*
* try_to_store -- try to store to the buffer 'buf'
*
* It is a walker function for pmemlog_walk
*/
static int
try_to_store(const void *buf, size_t len, void *arg)
{
memset((void *)buf, 0, len);
return 0;
}
/*
* do_walk -- call pmemlog_walk() & print result
*/
static void
do_walk(PMEMlogpool *plp)
{
pmemlog_walk(plp, 0, try_to_store, NULL);
UT_OUT("walk all at once");
}
static ut_jmp_buf_t Jmp;
/*
* signal_handler -- called on SIGSEGV
*/
static void
signal_handler(int sig)
{
UT_OUT("signal: %s", os_strsignal(sig));
ut_siglongjmp(Jmp);
}
int
main(int argc, char *argv[])
{
PMEMlogpool *plp;
START(argc, argv, "log_walker");
if (argc != 2)
UT_FATAL("usage: %s file-name", argv[0]);
const char *path = argv[1];
int fd = OPEN(path, O_RDWR);
/* pre-allocate 2MB of persistent memory */
POSIX_FALLOCATE(fd, (os_off_t)0, (size_t)(2 * 1024 * 1024));
CLOSE(fd);
if ((plp = pmemlog_create(path, 0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemlog_create: %s", path);
/* append some data */
do_append(plp);
/* arrange to catch SEGV */
struct sigaction v;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
SIGACTION(SIGSEGV, &v, NULL);
if (!ut_sigsetjmp(Jmp)) {
do_walk(plp);
}
pmemlog_close(plp);
DONE(NULL);
}
| 2,125 | 16.716667 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_alloc/obj_tx_alloc.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_tx_alloc.c -- unit test for pmemobj_tx_alloc and pmemobj_tx_zalloc
*/
#include <assert.h>
#include <sys/param.h>
#include <string.h>
#include "unittest.h"
#include "libpmemobj.h"
#include "util.h"
#include "valgrind_internal.h"
#define LAYOUT_NAME "tx_alloc"
#define TEST_VALUE_1 1
#define TEST_VALUE_2 2
#define OBJ_SIZE (200 * 1024)
enum type_number {
TYPE_NO_TX,
TYPE_COMMIT,
TYPE_ABORT,
TYPE_ZEROED_COMMIT,
TYPE_ZEROED_ABORT,
TYPE_XCOMMIT,
TYPE_XABORT,
TYPE_XZEROED_COMMIT,
TYPE_XZEROED_ABORT,
TYPE_XNOFLUSHED_COMMIT,
TYPE_COMMIT_NESTED1,
TYPE_COMMIT_NESTED2,
TYPE_ABORT_NESTED1,
TYPE_ABORT_NESTED2,
TYPE_ABORT_AFTER_NESTED1,
TYPE_ABORT_AFTER_NESTED2,
TYPE_OOM,
};
TOID_DECLARE(struct object, TYPE_OOM);
struct object {
size_t value;
char data[OBJ_SIZE - sizeof(size_t)];
};
/*
* do_tx_alloc_oom -- allocates objects until OOM
*/
static void
do_tx_alloc_oom(PMEMobjpool *pop)
{
int do_alloc = 1;
size_t alloc_cnt = 0;
do {
TX_BEGIN(pop) {
TOID(struct object) obj = TX_NEW(struct object);
D_RW(obj)->value = alloc_cnt;
} TX_ONCOMMIT {
alloc_cnt++;
} TX_ONABORT {
do_alloc = 0;
} TX_END
} while (do_alloc);
size_t bitmap_size = howmany(alloc_cnt, 8);
char *bitmap = (char *)MALLOC(bitmap_size);
memset(bitmap, 0, bitmap_size);
size_t obj_cnt = 0;
TOID(struct object) i;
POBJ_FOREACH_TYPE(pop, i) {
UT_ASSERT(D_RO(i)->value < alloc_cnt);
UT_ASSERT(!isset(bitmap, D_RO(i)->value));
setbit(bitmap, D_RO(i)->value);
obj_cnt++;
}
FREE(bitmap);
UT_ASSERTeq(obj_cnt, alloc_cnt);
TOID(struct object) o = POBJ_FIRST(pop, struct object);
while (!TOID_IS_NULL(o)) {
TOID(struct object) next = POBJ_NEXT(o);
POBJ_FREE(&o);
o = next;
}
}
/*
* do_tx_alloc_abort_after_nested -- aborts transaction after allocation
* in nested transaction
*/
static void
do_tx_alloc_abort_after_nested(PMEMobjpool *pop)
{
TOID(struct object) obj1;
TOID(struct object) obj2;
TX_BEGIN(pop) {
TOID_ASSIGN(obj1, pmemobj_tx_alloc(sizeof(struct object),
TYPE_ABORT_AFTER_NESTED1));
UT_ASSERT(!TOID_IS_NULL(obj1));
D_RW(obj1)->value = TEST_VALUE_1;
TX_BEGIN(pop) {
TOID_ASSIGN(obj2, pmemobj_tx_zalloc(
sizeof(struct object),
TYPE_ABORT_AFTER_NESTED2));
UT_ASSERT(!TOID_IS_NULL(obj2));
UT_ASSERT(util_is_zeroed(D_RO(obj2),
sizeof(struct object)));
D_RW(obj2)->value = TEST_VALUE_2;
} TX_ONCOMMIT {
UT_ASSERTeq(D_RO(obj2)->value, TEST_VALUE_2);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj1, OID_NULL);
TOID_ASSIGN(obj2, OID_NULL);
} TX_END
TOID(struct object) first;
/* check the obj1 object */
UT_ASSERT(TOID_IS_NULL(obj1));
first.oid = POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_AFTER_NESTED1);
UT_ASSERT(TOID_IS_NULL(first));
/* check the obj2 object */
UT_ASSERT(TOID_IS_NULL(obj2));
first.oid = POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_AFTER_NESTED2);
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_alloc_abort_nested -- aborts transaction in nested transaction
*/
static void
do_tx_alloc_abort_nested(PMEMobjpool *pop)
{
TOID(struct object) obj1;
TOID(struct object) obj2;
TX_BEGIN(pop) {
TOID_ASSIGN(obj1, pmemobj_tx_alloc(sizeof(struct object),
TYPE_ABORT_NESTED1));
UT_ASSERT(!TOID_IS_NULL(obj1));
D_RW(obj1)->value = TEST_VALUE_1;
TX_BEGIN(pop) {
TOID_ASSIGN(obj2, pmemobj_tx_zalloc(
sizeof(struct object),
TYPE_ABORT_NESTED2));
UT_ASSERT(!TOID_IS_NULL(obj2));
UT_ASSERT(util_is_zeroed(D_RO(obj2),
sizeof(struct object)));
D_RW(obj2)->value = TEST_VALUE_2;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj2, OID_NULL);
} TX_END
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj1, OID_NULL);
} TX_END
TOID(struct object) first;
/* check the obj1 object */
UT_ASSERT(TOID_IS_NULL(obj1));
first.oid = POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_NESTED1);
UT_ASSERT(TOID_IS_NULL(first));
/* check the obj2 object */
UT_ASSERT(TOID_IS_NULL(obj2));
first.oid = POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_NESTED2);
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_alloc_commit_nested -- allocates two objects, one in nested transaction
*/
static void
do_tx_alloc_commit_nested(PMEMobjpool *pop)
{
TOID(struct object) obj1;
TOID(struct object) obj2;
TX_BEGIN(pop) {
TOID_ASSIGN(obj1, pmemobj_tx_alloc(sizeof(struct object),
TYPE_COMMIT_NESTED1));
UT_ASSERT(!TOID_IS_NULL(obj1));
D_RW(obj1)->value = TEST_VALUE_1;
TX_BEGIN(pop) {
TOID_ASSIGN(obj2, pmemobj_tx_zalloc(
sizeof(struct object),
TYPE_COMMIT_NESTED2));
UT_ASSERT(!TOID_IS_NULL(obj2));
UT_ASSERT(util_is_zeroed(D_RO(obj2),
sizeof(struct object)));
D_RW(obj2)->value = TEST_VALUE_2;
} TX_ONCOMMIT {
UT_ASSERTeq(D_RO(obj1)->value, TEST_VALUE_1);
UT_ASSERTeq(D_RO(obj2)->value, TEST_VALUE_2);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
} TX_ONCOMMIT {
UT_ASSERTeq(D_RO(obj1)->value, TEST_VALUE_1);
UT_ASSERTeq(D_RO(obj2)->value, TEST_VALUE_2);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID(struct object) first;
TOID(struct object) next;
/* check the obj1 object */
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_NESTED1));
UT_ASSERT(TOID_EQUALS(first, obj1));
UT_ASSERTeq(D_RO(first)->value, TEST_VALUE_1);
TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid));
UT_ASSERT(TOID_IS_NULL(next));
/* check the obj2 object */
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_NESTED2));
UT_ASSERT(TOID_EQUALS(first, obj2));
UT_ASSERTeq(D_RO(first)->value, TEST_VALUE_2);
TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid));
UT_ASSERT(TOID_IS_NULL(next));
}
/*
* do_tx_alloc_abort -- allocates an object and aborts the transaction
*/
static void
do_tx_alloc_abort(PMEMobjpool *pop)
{
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_alloc(sizeof(struct object),
TYPE_ABORT));
UT_ASSERT(!TOID_IS_NULL(obj));
D_RW(obj)->value = TEST_VALUE_1;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT));
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_alloc_zerolen -- allocates an object of zero size to trigger tx abort
*/
static void
do_tx_alloc_zerolen(PMEMobjpool *pop)
{
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_alloc(0, TYPE_ABORT));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT));
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_alloc_huge -- allocates a huge object to trigger tx abort
*/
static void
do_tx_alloc_huge(PMEMobjpool *pop)
{
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_alloc(PMEMOBJ_MAX_ALLOC_SIZE + 1,
TYPE_ABORT));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT));
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_alloc_commit -- allocates and object
*/
static void
do_tx_alloc_commit(PMEMobjpool *pop)
{
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_alloc(sizeof(struct object),
TYPE_COMMIT));
UT_ASSERT(!TOID_IS_NULL(obj));
D_RW(obj)->value = TEST_VALUE_1;
} TX_ONCOMMIT {
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT));
UT_ASSERT(TOID_EQUALS(first, obj));
UT_ASSERTeq(D_RO(first)->value, D_RO(obj)->value);
TOID(struct object) next;
next = POBJ_NEXT(first);
UT_ASSERT(TOID_IS_NULL(next));
}
/*
* do_tx_zalloc_abort -- allocates a zeroed object and aborts the transaction
*/
static void
do_tx_zalloc_abort(PMEMobjpool *pop)
{
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zalloc(sizeof(struct object),
TYPE_ZEROED_ABORT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(util_is_zeroed(D_RO(obj), sizeof(struct object)));
D_RW(obj)->value = TEST_VALUE_1;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ZEROED_ABORT));
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_zalloc_zerolen -- allocate an object of zero size to trigger tx abort
*/
static void
do_tx_zalloc_zerolen(PMEMobjpool *pop)
{
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zalloc(0, TYPE_ZEROED_ABORT));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ZEROED_ABORT));
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_zalloc_huge -- allocates a huge object to trigger tx abort
*/
static void
do_tx_zalloc_huge(PMEMobjpool *pop)
{
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zalloc(PMEMOBJ_MAX_ALLOC_SIZE + 1,
TYPE_ZEROED_ABORT));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ZEROED_ABORT));
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_zalloc_commit -- allocates zeroed object
*/
static void
do_tx_zalloc_commit(PMEMobjpool *pop)
{
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_zalloc(sizeof(struct object),
TYPE_ZEROED_COMMIT));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(util_is_zeroed(D_RO(obj), sizeof(struct object)));
D_RW(obj)->value = TEST_VALUE_1;
} TX_ONCOMMIT {
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ZEROED_COMMIT));
UT_ASSERT(TOID_EQUALS(first, obj));
UT_ASSERTeq(D_RO(first)->value, D_RO(obj)->value);
TOID(struct object) next;
next = POBJ_NEXT(first);
UT_ASSERT(TOID_IS_NULL(next));
}
/*
* do_tx_xalloc_abort -- allocates a zeroed object and aborts the transaction
*/
static void
do_tx_xalloc_abort(PMEMobjpool *pop)
{
/* xalloc 0 */
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object),
TYPE_XABORT, 0));
UT_ASSERT(!TOID_IS_NULL(obj));
D_RW(obj)->value = TEST_VALUE_1;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XABORT));
UT_ASSERT(TOID_IS_NULL(first));
/* xalloc ZERO */
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object),
TYPE_XZEROED_ABORT, POBJ_XALLOC_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(util_is_zeroed(D_RO(obj), sizeof(struct object)));
D_RW(obj)->value = TEST_VALUE_1;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XZEROED_ABORT));
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_xalloc_zerolen -- allocate an object of zero size to trigger tx abort
*/
static void
do_tx_xalloc_zerolen(PMEMobjpool *pop)
{
/* xalloc 0 */
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(0, TYPE_XABORT, 0));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
/* xalloc 0 with POBJ_XALLOC_NO_ABORT flag */
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(0, TYPE_XABORT,
POBJ_XALLOC_NO_ABORT));
} TX_ONCOMMIT {
TOID_ASSIGN(obj, OID_NULL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
/* alloc 0 with pmemobj_tx_set_failure_behavior called */
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
TOID_ASSIGN(obj, pmemobj_tx_alloc(0, TYPE_XABORT));
} TX_ONCOMMIT {
TOID_ASSIGN(obj, OID_NULL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
/* xalloc 0 with pmemobj_tx_set_failure_behavior called */
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
TOID_ASSIGN(obj, pmemobj_tx_xalloc(0, TYPE_XABORT, 0));
} TX_ONCOMMIT {
TOID_ASSIGN(obj, OID_NULL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
/* zalloc 0 with pmemobj_tx_set_failure_behavior called */
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
TOID_ASSIGN(obj, pmemobj_tx_zalloc(0, TYPE_XABORT));
} TX_ONCOMMIT {
TOID_ASSIGN(obj, OID_NULL);
} TX_ONABORT {
UT_ASSERT(0); /* should not get to this point */
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XABORT));
UT_ASSERT(TOID_IS_NULL(first));
/* xalloc ZERO */
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(0, TYPE_XZEROED_ABORT,
POBJ_XALLOC_ZERO));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XZEROED_ABORT));
UT_ASSERT(TOID_IS_NULL(first));
}
/*
* do_tx_xalloc_huge -- allocates a huge object to trigger tx abort
*/
static void
do_tx_xalloc_huge(PMEMobjpool *pop)
{
/* xalloc 0 */
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(PMEMOBJ_MAX_ALLOC_SIZE + 1,
TYPE_XABORT, 0));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XABORT));
UT_ASSERT(TOID_IS_NULL(first));
/* xalloc ZERO */
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(PMEMOBJ_MAX_ALLOC_SIZE + 1,
TYPE_XZEROED_ABORT, POBJ_XALLOC_ZERO));
UT_ASSERT(0); /* should not get to this point */
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
TOID_ASSIGN(obj, OID_NULL);
} TX_END
UT_ASSERT(TOID_IS_NULL(obj));
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XZEROED_ABORT));
UT_ASSERT(TOID_IS_NULL(first));
/*
* do xalloc until overfilled and then
* free last successful allocation
*/
uint64_t tot_allocated = 0, alloc_size = (5 * 1024 *1024);
int rc = 0;
PMEMoid oid, prev_oid;
POBJ_FOREACH_SAFE(pop, oid, prev_oid) {
pmemobj_free(&oid);
}
TOID_ASSIGN(first, pmemobj_first(pop));
UT_ASSERT(TOID_IS_NULL(first));
TX_BEGIN(pop) {
while (rc == 0) {
oid = pmemobj_tx_xalloc(alloc_size, 0,
POBJ_XALLOC_NO_ABORT);
if (oid.off == 0)
rc = -1;
else {
tot_allocated += alloc_size;
prev_oid = oid;
}
}
rc = pmemobj_tx_free(prev_oid);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, ENOMEM);
UT_ASSERTeq(rc, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_xalloc_commit -- allocates zeroed object
*/
static void
do_tx_xalloc_commit(PMEMobjpool *pop)
{
/* xalloc 0 */
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object),
TYPE_XCOMMIT, 0));
UT_ASSERT(!TOID_IS_NULL(obj));
D_RW(obj)->value = TEST_VALUE_1;
} TX_ONCOMMIT {
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XCOMMIT));
UT_ASSERT(TOID_EQUALS(first, obj));
UT_ASSERTeq(D_RO(first)->value, D_RO(obj)->value);
TOID(struct object) next;
TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid));
UT_ASSERT(TOID_IS_NULL(next));
/* xalloc ZERO */
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object),
TYPE_XZEROED_COMMIT, POBJ_XALLOC_ZERO));
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(util_is_zeroed(D_RO(obj), sizeof(struct object)));
D_RW(obj)->value = TEST_VALUE_1;
} TX_ONCOMMIT {
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XZEROED_COMMIT));
UT_ASSERT(TOID_EQUALS(first, obj));
UT_ASSERTeq(D_RO(first)->value, D_RO(obj)->value);
TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid));
UT_ASSERT(TOID_IS_NULL(next));
}
/*
* do_tx_xalloc_noflush -- allocates zeroed object
*/
static void
do_tx_xalloc_noflush(PMEMobjpool *pop)
{
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object),
TYPE_XNOFLUSHED_COMMIT, POBJ_XALLOC_NO_FLUSH));
UT_ASSERT(!TOID_IS_NULL(obj));
D_RW(obj)->data[OBJ_SIZE - sizeof(size_t) - 1] = TEST_VALUE_1;
/* let pmemcheck find we didn't flush it */
} TX_ONCOMMIT {
UT_ASSERTeq(D_RO(obj)->data[OBJ_SIZE - sizeof(size_t) - 1],
TEST_VALUE_1);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID(struct object) first;
TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XNOFLUSHED_COMMIT));
UT_ASSERT(TOID_EQUALS(first, obj));
UT_ASSERTeq(D_RO(first)->data[OBJ_SIZE - sizeof(size_t) - 1],
D_RO(obj)->data[OBJ_SIZE - sizeof(size_t) - 1]);
TOID(struct object) next;
TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid));
UT_ASSERT(TOID_IS_NULL(next));
}
/*
* do_tx_root -- retrieve root inside of transaction
*/
static void
do_tx_root(PMEMobjpool *pop)
{
size_t root_size = 24;
TX_BEGIN(pop) {
PMEMoid root = pmemobj_root(pop, root_size);
UT_ASSERT(!OID_IS_NULL(root));
UT_ASSERT(util_is_zeroed(pmemobj_direct(root),
root_size));
UT_ASSERTeq(root_size, pmemobj_root_size(pop));
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_alloc_many -- allocates many objects inside of a single transaction
*/
static void
do_tx_alloc_many(PMEMobjpool *pop)
{
#define TX_ALLOC_COUNT 70 /* bigger than max reservations */
PMEMoid oid, oid2;
POBJ_FOREACH_SAFE(pop, oid, oid2) {
pmemobj_free(&oid);
}
TOID(struct object) first;
TOID_ASSIGN(first, pmemobj_first(pop));
UT_ASSERT(TOID_IS_NULL(first));
PMEMoid oids[TX_ALLOC_COUNT];
TX_BEGIN(pop) {
for (int i = 0; i < TX_ALLOC_COUNT; ++i) {
oids[i] = pmemobj_tx_alloc(1, 0);
UT_ASSERT(!OID_IS_NULL(oids[i]));
}
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
/* empty tx to make sure there's no leftover state */
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
for (int i = 0; i < TX_ALLOC_COUNT; ++i) {
pmemobj_tx_free(oids[i]);
}
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(first, pmemobj_first(pop));
UT_ASSERT(TOID_IS_NULL(first));
#undef TX_ALLOC_COUNT
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_alloc");
util_init();
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_tx_root(pop);
VALGRIND_WRITE_STATS;
/* alloc */
do_tx_alloc_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_alloc_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_alloc_zerolen(pop);
VALGRIND_WRITE_STATS;
do_tx_alloc_huge(pop);
VALGRIND_WRITE_STATS;
/* zalloc */
do_tx_zalloc_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_zalloc_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_zalloc_zerolen(pop);
VALGRIND_WRITE_STATS;
do_tx_zalloc_huge(pop);
VALGRIND_WRITE_STATS;
/* xalloc */
do_tx_xalloc_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_xalloc_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_xalloc_zerolen(pop);
VALGRIND_WRITE_STATS;
do_tx_xalloc_huge(pop);
VALGRIND_WRITE_STATS;
/* alloc */
do_tx_alloc_commit_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_alloc_abort_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_alloc_abort_after_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_alloc_oom(pop);
VALGRIND_WRITE_STATS;
do_tx_alloc_many(pop);
VALGRIND_WRITE_STATS;
do_tx_xalloc_noflush(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 20,667 | 21.862832 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_pool_lock/obj_pool_lock.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* obj_pool_lock.c -- unit test which checks whether it's possible to
* simultaneously open the same obj pool
*/
#include "unittest.h"
#define LAYOUT "layout"
static void
test_reopen(const char *path)
{
PMEMobjpool *pop1 = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR);
if (!pop1)
UT_FATAL("!create");
PMEMobjpool *pop2 = pmemobj_open(path, LAYOUT);
if (pop2)
UT_FATAL("pmemobj_open should not succeed");
if (errno != EWOULDBLOCK)
UT_FATAL("!pmemobj_open failed but for unexpected reason");
pmemobj_close(pop1);
pop2 = pmemobj_open(path, LAYOUT);
if (!pop2)
UT_FATAL("pmemobj_open should succeed after close");
pmemobj_close(pop2);
UNLINK(path);
}
#ifndef _WIN32
static void
test_open_in_different_process(int argc, char **argv, unsigned sleep)
{
pid_t pid = fork();
PMEMobjpool *pop;
char *path = argv[1];
if (pid < 0)
UT_FATAL("fork failed");
if (pid == 0) {
/* child */
if (sleep)
usleep(sleep);
while (os_access(path, R_OK))
usleep(100 * 1000);
pop = pmemobj_open(path, LAYOUT);
if (pop)
UT_FATAL("pmemobj_open after fork should not succeed");
if (errno != EWOULDBLOCK)
UT_FATAL("!pmemobj_open after fork failed but for "
"unexpected reason");
exit(0);
}
pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR);
if (!pop)
UT_FATAL("!create");
int status;
if (waitpid(pid, &status, 0) < 0)
UT_FATAL("!waitpid failed");
if (!WIFEXITED(status))
UT_FATAL("child process failed");
pmemobj_close(pop);
UNLINK(path);
}
#else
static void
test_open_in_different_process(int argc, char **argv, unsigned sleep)
{
PMEMobjpool *pop;
if (sleep > 0)
return;
char *path = argv[1];
/* before starting the 2nd process, create a pool */
pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR);
if (!pop)
UT_FATAL("!create");
/*
* "X" is pass as an additional param to the new process
* created by ut_spawnv to distinguish second process on Windows
*/
uintptr_t result = ut_spawnv(argc, argv, "X", NULL);
if (result == -1)
UT_FATAL("Create new process failed error: %d", GetLastError());
pmemobj_close(pop);
}
#endif
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_pool_lock");
if (argc < 2)
UT_FATAL("usage: %s path", argv[0]);
if (argc == 2) {
test_reopen(argv[1]);
test_open_in_different_process(argc, argv, 0);
for (unsigned i = 1; i < 100000; i *= 2)
test_open_in_different_process(argc, argv, i);
} else if (argc == 3) {
PMEMobjpool *pop;
/* 2nd arg used by windows for 2 process test */
pop = pmemobj_open(argv[1], LAYOUT);
if (pop)
UT_FATAL("pmemobj_open after create process should "
"not succeed");
if (errno != EWOULDBLOCK)
UT_FATAL("!pmemobj_open after create process failed "
"but for unexpected reason");
}
DONE(NULL);
}
| 2,963 | 19.727273 | 69 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/blk_pool/blk_pool.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* blk_pool.c -- unit test for pmemblk_create() and pmemblk_open()
*
* usage: blk_pool op path bsize [poolsize mode]
*
* op can be:
* c - create
* o - open
* f - do fault injection
*
* "poolsize" and "mode" arguments are ignored for "open"
*/
#include "unittest.h"
#include "../libpmemblk/blk.h"
#define MB ((size_t)1 << 20)
static void
do_fault_injection(const char *path, size_t bsize,
size_t poolsize, unsigned mode)
{
if (!pmemblk_fault_injection_enabled())
return;
pmemblk_inject_fault_at(PMEM_MALLOC, 1, "blk_runtime_init");
PMEMblkpool *pbp = pmemblk_create(path, bsize, poolsize, mode);
UT_ASSERTeq(pbp, NULL);
UT_ASSERTeq(errno, ENOMEM);
}
static void
pool_create(const char *path, size_t bsize, size_t poolsize, unsigned mode)
{
PMEMblkpool *pbp = pmemblk_create(path, bsize, poolsize, mode);
if (pbp == NULL)
UT_OUT("!%s: pmemblk_create", path);
else {
os_stat_t stbuf;
STAT(path, &stbuf);
UT_OUT("%s: file size %zu usable blocks %zu mode 0%o",
path, stbuf.st_size,
pmemblk_nblock(pbp),
stbuf.st_mode & 0777);
pmemblk_close(pbp);
int result = pmemblk_check(path, bsize);
if (result < 0)
UT_OUT("!%s: pmemblk_check", path);
else if (result == 0)
UT_OUT("%s: pmemblk_check: not consistent", path);
else
UT_ASSERTeq(pmemblk_check(path, bsize * 2), -1);
}
}
static void
pool_open(const char *path, size_t bsize)
{
PMEMblkpool *pbp = pmemblk_open(path, bsize);
if (pbp == NULL)
UT_OUT("!%s: pmemblk_open", path);
else {
UT_OUT("%s: pmemblk_open: Success", path);
pmemblk_close(pbp);
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "blk_pool");
if (argc < 4)
UT_FATAL("usage: %s op path bsize [poolsize mode]", argv[0]);
size_t bsize = strtoul(argv[3], NULL, 0);
size_t poolsize;
unsigned mode;
switch (argv[1][0]) {
case 'c':
poolsize = strtoul(argv[4], NULL, 0) * MB; /* in megabytes */
mode = strtoul(argv[5], NULL, 8);
pool_create(argv[2], bsize, poolsize, mode);
break;
case 'o':
pool_open(argv[2], bsize);
break;
case 'f':
poolsize = strtoul(argv[4], NULL, 0) * MB; /* in megabytes */
mode = strtoul(argv[5], NULL, 8);
do_fault_injection(argv[2], bsize, poolsize, mode);
break;
default:
UT_FATAL("unknown operation");
}
DONE(NULL);
}
| 2,377 | 20.423423 | 75 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_memcpy/pmem_memcpy.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* pmem_memcpy.c -- unit test for doing a memcpy
*
* usage: pmem_memcpy file destoff srcoff length
*
*/
#include "unittest.h"
#include "util_pmem.h"
#include "file.h"
#include "memcpy_common.h"
static void *
pmem_memcpy_persist_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memcpy_persist(pmemdest, src, len);
}
static void *
pmem_memcpy_nodrain_wrapper(void *pmemdest, const void *src, size_t len,
unsigned flags)
{
(void) flags;
return pmem_memcpy_nodrain(pmemdest, src, len);
}
static void
do_persist_ddax(const void *ptr, size_t size)
{
util_persist_auto(1, ptr, size);
}
static void
do_persist(const void *ptr, size_t size)
{
util_persist_auto(0, ptr, size);
}
/*
* swap_mappings - swap given two mapped regions.
*
* Try swapping src and dest by unmapping src, mapping a new dest with
* the original src address as a hint. If successful, unmap original dest.
* Map a new src with the original dest as a hint.
*/
static void
swap_mappings(char **dest, char **src, size_t size, int fd)
{
char *d = *dest;
char *s = *src;
char *td, *ts;
MUNMAP(*src, size);
/* mmap destination using src addr as a hint */
td = MMAP(s, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
MUNMAP(*dest, size);
*dest = td;
/* mmap src using original destination addr as a hint */
ts = MMAP(d, size, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS,
-1, 0);
*src = ts;
}
/*
* do_memcpy_variants -- do_memcpy wrapper that tests multiple variants
* of memcpy functions
*/
static void
do_memcpy_variants(int fd, char *dest, int dest_off, char *src, int src_off,
size_t bytes, size_t mapped_len, const char *file_name,
persist_fn p)
{
do_memcpy(fd, dest, dest_off, src, src_off, bytes, mapped_len,
file_name, pmem_memcpy_persist_wrapper, 0, p);
do_memcpy(fd, dest, dest_off, src, src_off, bytes, mapped_len,
file_name, pmem_memcpy_nodrain_wrapper, 0, p);
for (int i = 0; i < ARRAY_SIZE(Flags); ++i) {
do_memcpy(fd, dest, dest_off, src, src_off, bytes, mapped_len,
file_name, pmem_memcpy, Flags[i], p);
}
}
int
main(int argc, char *argv[])
{
int fd;
char *dest;
char *src;
char *dest_orig;
char *src_orig;
size_t mapped_len;
if (argc != 5)
UT_FATAL("usage: %s file srcoff destoff length", argv[0]);
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem_memcpy %s %s %s %s %savx %savx512f",
argv[2], argv[3], argv[4], thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
fd = OPEN(argv[1], O_RDWR);
int dest_off = atoi(argv[2]);
int src_off = atoi(argv[3]);
size_t bytes = strtoul(argv[4], NULL, 0);
/* src > dst */
dest_orig = dest = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, NULL);
if (dest == NULL)
UT_FATAL("!could not map file: %s", argv[1]);
src_orig = src = MMAP(dest + mapped_len, mapped_len,
PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0);
/*
* Its very unlikely that src would not be > dest. pmem_map_file
* chooses the first unused address >= 1TB, large
* enough to hold the give range, and 1GB aligned. If the
* addresses did not get swapped to allow src > dst, log error
* and allow test to continue.
*/
if (src <= dest) {
swap_mappings(&dest, &src, mapped_len, fd);
if (src <= dest)
UT_FATAL("cannot map files in memory order");
}
enum file_type type = util_fd_get_type(fd);
if (type < 0)
UT_FATAL("cannot check type of file with fd %d", fd);
persist_fn persist;
persist = type == TYPE_DEVDAX ? do_persist_ddax : do_persist;
memset(dest, 0, (2 * bytes));
persist(dest, 2 * bytes);
memset(src, 0, (2 * bytes));
do_memcpy_variants(fd, dest, dest_off, src, src_off,
bytes, 0, argv[1], persist);
/* dest > src */
swap_mappings(&dest, &src, mapped_len, fd);
if (dest <= src)
UT_FATAL("cannot map files in memory order");
do_memcpy_variants(fd, dest, dest_off, src, src_off,
bytes, 0, argv[1], persist);
int ret = pmem_unmap(dest_orig, mapped_len);
UT_ASSERTeq(ret, 0);
MUNMAP(src_orig, mapped_len);
CLOSE(fd);
DONE(NULL);
}
| 4,249 | 23.853801 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_memcpy/TESTS.py
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2020, Intel Corporation
from collections import namedtuple
import testframework as t
TC = namedtuple('TC', ['dest', 'src', 'length'])
class PmemMemcpy(t.Test):
test_type = t.Short
filesize = 4 * t.MiB
envs0 = ()
envs1 = ()
test_cases = (
# aligned everything
TC(dest=0, src=0, length=4096),
# unaligned dest
TC(dest=7, src=0, length=4096),
# unaligned dest, unaligned src
TC(dest=7, src=9, length=4096),
# aligned dest, unaligned src
TC(dest=0, src=9, length=4096)
)
def run(self, ctx):
for env in self.envs0:
ctx.env[env] = '0'
for env in self.envs1:
ctx.env[env] = '1'
for tc in self.test_cases:
filepath = ctx.create_holey_file(self.filesize, 'testfile',)
ctx.exec('pmem_memcpy', filepath, tc.dest, tc.src, tc.length)
class TEST0(PmemMemcpy):
pass
@t.require_architectures('x86_64')
class TEST1(PmemMemcpy):
envs0 = ("PMEM_AVX512F",)
@t.require_architectures('x86_64')
class TEST2(PmemMemcpy):
envs0 = ("PMEM_AVX512F", "PMEM_AVX",)
class TEST3(PmemMemcpy):
envs1 = ("PMEM_NO_MOVNT",)
class TEST4(PmemMemcpy):
envs1 = ("PMEM_NO_MOVNT", "PMEM_NO_GENERIC_MEMCPY")
| 1,320 | 20.655738 | 73 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/ex_librpmem_fibonacci/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019, Intel Corporation
#
#
# ex_librpmem_fibonacci/config.sh -- test configuration
#
# Filesystem-DAX cannot be used for RDMA
# since it is missing support in Linux kernel
CONF_GLOBAL_FS_TYPE=non-pmem
CONF_GLOBAL_BUILD_TYPE="debug nondebug"
CONF_GLOBAL_TEST_TYPE=medium
CONF_GLOBAL_RPMEM_PROVIDER=all
CONF_GLOBAL_RPMEM_PMETHOD=all
CONF_TEST_TYPE[2]=long
| 431 | 20.6 | 55 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_movnt/pmem_movnt.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* pmem_movnt.c -- unit test for MOVNT threshold
*
* usage: pmem_movnt
*
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
char *dst;
char *src;
const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD");
const char *avx = os_getenv("PMEM_AVX");
const char *avx512f = os_getenv("PMEM_AVX512F");
START(argc, argv, "pmem_movnt %s %savx %savx512f",
thr ? thr : "default",
avx ? "" : "!",
avx512f ? "" : "!");
src = MEMALIGN(64, 8192);
dst = MEMALIGN(64, 8192);
memset(src, 0x88, 8192);
memset(dst, 0, 8192);
for (size_t size = 1; size <= 4096; size *= 2) {
memset(dst, 0, 4096);
pmem_memcpy_nodrain(dst, src, size);
UT_ASSERTeq(memcmp(src, dst, size), 0);
UT_ASSERTeq(dst[size], 0);
}
for (size_t size = 1; size <= 4096; size *= 2) {
memset(dst, 0, 4096);
pmem_memmove_nodrain(dst, src, size);
UT_ASSERTeq(memcmp(src, dst, size), 0);
UT_ASSERTeq(dst[size], 0);
}
for (size_t size = 1; size <= 4096; size *= 2) {
memset(dst, 0, 4096);
pmem_memset_nodrain(dst, 0x77, size);
UT_ASSERTeq(dst[0], 0x77);
UT_ASSERTeq(dst[size - 1], 0x77);
UT_ASSERTeq(dst[size], 0);
}
ALIGNED_FREE(dst);
ALIGNED_FREE(src);
DONE(NULL);
}
| 1,273 | 19.885246 | 53 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_memcheck_register/obj_memcheck_register.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* obj_memcheck_register.c - tests that verifies that objects are registered
* correctly in memcheck
*/
#include "unittest.h"
static void
test_create(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, "register",
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
PMEMoid oid = pmemobj_root(pop, 1024);
TX_BEGIN(pop) {
pmemobj_tx_alloc(1024, 0);
pmemobj_tx_add_range(oid, 0, 10);
} TX_END
pmemobj_close(pop);
}
static void
test_open(const char *path)
{
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_open(path, "register")) == NULL)
UT_FATAL("!pmemobj_open: %s", path);
PMEMoid oid = pmemobj_root(pop, 1024);
TX_BEGIN(pop) {
pmemobj_tx_add_range(oid, 0, 10);
} TX_END
pmemobj_close(pop);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_memcheck_register");
if (argc != 3)
UT_FATAL("usage: %s [c|o] file-name", argv[0]);
switch (argv[1][0]) {
case 'c':
test_create(argv[2]);
break;
case 'o':
test_open(argv[2]);
break;
default:
UT_FATAL("usage: %s [c|o] file-name", argv[0]);
break;
}
DONE(NULL);
}
| 1,216 | 16.637681 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_reorder_basic/obj_reorder_basic.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* obj_reorder_basic.c -- a simple unit test for store reordering
*
* usage: obj_reorder_basic file w|c
* w - write data
* c - check data consistency
*
*/
#include "unittest.h"
#include "util.h"
#include "valgrind_internal.h"
#define LAYOUT_NAME "intro_1"
#define MAX_BUF_LEN 10
#define BUF_VALUE 'a'
struct my_root {
size_t len;
char buf[MAX_BUF_LEN];
};
/*
* write_consistent -- (internal) write data in a consistent manner
*/
static void
write_consistent(struct pmemobjpool *pop)
{
PMEMoid root = pmemobj_root(pop, sizeof(struct my_root));
struct my_root *rootp = pmemobj_direct(root);
char buf[MAX_BUF_LEN];
memset(buf, BUF_VALUE, sizeof(buf));
buf[MAX_BUF_LEN - 1] = '\0';
rootp->len = strlen(buf);
pmemobj_persist(pop, &rootp->len, sizeof(rootp->len));
pmemobj_memcpy_persist(pop, rootp->buf, buf, rootp->len);
}
/*
* check_consistency -- (internal) check buf consistency
*/
static int
check_consistency(struct pmemobjpool *pop)
{
PMEMoid root = pmemobj_root(pop, sizeof(struct my_root));
struct my_root *rootp = pmemobj_direct(root);
if (rootp->len == strlen(rootp->buf) && rootp->len != 0)
for (int i = 0; i < MAX_BUF_LEN - 1; ++i)
if (rootp->buf[i] != BUF_VALUE)
return 1;
return 0;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_reorder_basic");
util_init();
if (argc != 3 || strchr("wc", argv[1][0]) == 0 || argv[1][1] != '\0')
UT_FATAL("usage: %s w|c file", argv[0]);
char opt = argv[1][0];
if (opt == 'c') {
int y = 1;
pmemobj_ctl_set(NULL, "copy_on_write.at_open", &y);
}
PMEMobjpool *pop = pmemobj_open(argv[2], LAYOUT_NAME);
UT_ASSERT(pop != NULL);
VALGRIND_EMIT_LOG("PMREORDER_MARKER_WRITE.BEGIN");
switch (opt) {
case 'w':
{
write_consistent(pop);
break;
}
case 'c':
{
int ret = check_consistency(pop);
pmemobj_close(pop);
END(ret);
}
default:
UT_FATAL("Unrecognized option %c", opt);
}
VALGRIND_EMIT_LOG("PMREORDER_MARKER_WRITE.END");
pmemobj_close(pop);
DONE(NULL);
}
| 2,094 | 19.339806 | 70 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_heap_interrupt/mocks_windows.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of memops functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmemobj
* files, when compiled for the purpose of obj_heap_interrupt test.
* It would replace default implementation with mocked functions defined
* in obj_heap_interrupt.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL
#define operation_finish __wrap_operation_finish
#endif
| 578 | 27.95 | 73 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_heap_interrupt/obj_heap_interrupt.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* obj_heap_interrupt.c -- unit test for pool heap interruption
*/
#include "heap_layout.h"
#include "memops.h"
#include "unittest.h"
POBJ_LAYOUT_BEGIN(heap_interrupt);
POBJ_LAYOUT_END(heap_interrupt);
static int exit_on_finish = 0;
FUNC_MOCK(operation_finish, void, struct operation_context *ctx,
unsigned flags)
FUNC_MOCK_RUN_DEFAULT {
if (exit_on_finish)
exit(0);
else
_FUNC_REAL(operation_finish)(ctx, flags);
}
FUNC_MOCK_END
static void
sc0_create(PMEMobjpool *pop)
{
PMEMoid oids[3];
TX_BEGIN(pop) {
oids[0] = pmemobj_tx_alloc(CHUNKSIZE - 100, 0);
oids[1] = pmemobj_tx_alloc(CHUNKSIZE - 100, 0);
oids[2] = pmemobj_tx_alloc(CHUNKSIZE - 100, 0);
} TX_END
pmemobj_free(&oids[0]);
exit_on_finish = 1;
pmemobj_free(&oids[1]);
}
/*
* noop_verify -- used in cases in which a successful open means that the test
* have passed successfully.
*/
static void
noop_verify(PMEMobjpool *pop)
{
}
typedef void (*scenario_func)(PMEMobjpool *pop);
static struct {
scenario_func create;
scenario_func verify;
} scenarios[] = {
{sc0_create, noop_verify},
};
int
main(int argc, char *argv[])
{
START(argc, argv, "heap_interrupt");
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(heap_interrupt) != 0);
if (argc != 4)
UT_FATAL("usage: %s file [cmd: c/o] [scenario]", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop = NULL;
int exists = argv[2][0] == 'o';
int scenario = atoi(argv[3]);
if (!exists) {
if ((pop = pmemobj_create(path,
POBJ_LAYOUT_NAME(heap_interrupt),
0, S_IWUSR | S_IRUSR)) == NULL) {
UT_FATAL("failed to create pool\n");
}
scenarios[scenario].create(pop);
/* if we get here, something is wrong with function mocking */
UT_ASSERT(0);
} else {
if ((pop = pmemobj_open(path,
POBJ_LAYOUT_NAME(heap_interrupt)))
== NULL) {
UT_FATAL("failed to open pool\n");
}
scenarios[scenario].verify(pop);
}
pmemobj_close(pop);
DONE(NULL);
}
#ifdef _MSC_VER
/*
* Since libpmemobj is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
#endif
| 2,178 | 19.556604 | 78 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_pmalloc_mt/obj_pmalloc_mt.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_pmalloc_mt.c -- multithreaded test of allocator
*/
#include <stdint.h>
#include "file.h"
#include "obj.h"
#include "pmalloc.h"
#include "sys_util.h"
#include "unittest.h"
#define MAX_THREADS 32
#define MAX_OPS_PER_THREAD 1000
#define ALLOC_SIZE 104
#define REALLOC_SIZE (ALLOC_SIZE * 3)
#define MIX_RERUNS 2
#define CHUNKSIZE (1 << 18)
#define CHUNKS_PER_THREAD 3
static unsigned Threads;
static unsigned Ops_per_thread;
static unsigned Tx_per_thread;
struct action {
struct pobj_action pact;
os_mutex_t lock;
os_cond_t cond;
};
struct root {
uint64_t offs[MAX_THREADS][MAX_OPS_PER_THREAD];
struct action actions[MAX_THREADS][MAX_OPS_PER_THREAD];
};
struct worker_args {
PMEMobjpool *pop;
struct root *r;
unsigned idx;
};
static void *
alloc_worker(void *arg)
{
struct worker_args *a = arg;
for (unsigned i = 0; i < Ops_per_thread; ++i) {
pmalloc(a->pop, &a->r->offs[a->idx][i], ALLOC_SIZE, 0, 0);
UT_ASSERTne(a->r->offs[a->idx][i], 0);
}
return NULL;
}
static void *
realloc_worker(void *arg)
{
struct worker_args *a = arg;
for (unsigned i = 0; i < Ops_per_thread; ++i) {
prealloc(a->pop, &a->r->offs[a->idx][i], REALLOC_SIZE, 0, 0);
UT_ASSERTne(a->r->offs[a->idx][i], 0);
}
return NULL;
}
static void *
free_worker(void *arg)
{
struct worker_args *a = arg;
for (unsigned i = 0; i < Ops_per_thread; ++i) {
pfree(a->pop, &a->r->offs[a->idx][i]);
UT_ASSERTeq(a->r->offs[a->idx][i], 0);
}
return NULL;
}
static void *
mix_worker(void *arg)
{
struct worker_args *a = arg;
/*
* The mix scenario is ran twice to increase the chances of run
* contention.
*/
for (unsigned j = 0; j < MIX_RERUNS; ++j) {
for (unsigned i = 0; i < Ops_per_thread; ++i) {
pmalloc(a->pop, &a->r->offs[a->idx][i],
ALLOC_SIZE, 0, 0);
UT_ASSERTne(a->r->offs[a->idx][i], 0);
}
for (unsigned i = 0; i < Ops_per_thread; ++i) {
pfree(a->pop, &a->r->offs[a->idx][i]);
UT_ASSERTeq(a->r->offs[a->idx][i], 0);
}
}
return NULL;
}
static void *
tx_worker(void *arg)
{
struct worker_args *a = arg;
/*
* Allocate objects until exhaustion, once that happens the transaction
* will automatically abort and all of the objects will be freed.
*/
TX_BEGIN(a->pop) {
for (unsigned n = 0; ; ++n) { /* this is NOT an infinite loop */
pmemobj_tx_alloc(ALLOC_SIZE, a->idx);
if (Ops_per_thread != MAX_OPS_PER_THREAD &&
n == Ops_per_thread) {
pmemobj_tx_abort(0);
}
}
} TX_END
return NULL;
}
static void *
tx3_worker(void *arg)
{
struct worker_args *a = arg;
/*
* Allocate N objects, abort, repeat M times. Should reveal issues in
* transaction abort handling.
*/
for (unsigned n = 0; n < Tx_per_thread; ++n) {
TX_BEGIN(a->pop) {
for (unsigned i = 0; i < Ops_per_thread; ++i) {
pmemobj_tx_alloc(ALLOC_SIZE, a->idx);
}
pmemobj_tx_abort(EINVAL);
} TX_END
}
return NULL;
}
static void *
alloc_free_worker(void *arg)
{
struct worker_args *a = arg;
PMEMoid oid;
for (unsigned i = 0; i < Ops_per_thread; ++i) {
int err = pmemobj_alloc(a->pop, &oid, ALLOC_SIZE,
0, NULL, NULL);
UT_ASSERTeq(err, 0);
pmemobj_free(&oid);
}
return NULL;
}
#define OPS_PER_TX 10
#define STEP 8
#define TEST_LANES 4
static void *
tx2_worker(void *arg)
{
struct worker_args *a = arg;
for (unsigned n = 0; n < Tx_per_thread; ++n) {
PMEMoid oids[OPS_PER_TX];
TX_BEGIN(a->pop) {
for (int i = 0; i < OPS_PER_TX; ++i) {
oids[i] = pmemobj_tx_alloc(ALLOC_SIZE, a->idx);
for (unsigned j = 0; j < ALLOC_SIZE;
j += STEP) {
pmemobj_tx_add_range(oids[i], j, STEP);
}
}
} TX_END
TX_BEGIN(a->pop) {
for (int i = 0; i < OPS_PER_TX; ++i)
pmemobj_tx_free(oids[i]);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
return NULL;
}
static void *
action_cancel_worker(void *arg)
{
struct worker_args *a = arg;
PMEMoid oid;
for (unsigned i = 0; i < Ops_per_thread; ++i) {
unsigned arr_id = a->idx / 2;
struct action *act = &a->r->actions[arr_id][i];
if (a->idx % 2 == 0) {
os_mutex_lock(&act->lock);
oid = pmemobj_reserve(a->pop,
&act->pact, ALLOC_SIZE, 0);
UT_ASSERT(!OID_IS_NULL(oid));
os_cond_signal(&act->cond);
os_mutex_unlock(&act->lock);
} else {
os_mutex_lock(&act->lock);
while (act->pact.heap.offset == 0)
os_cond_wait(&act->cond, &act->lock);
pmemobj_cancel(a->pop, &act->pact, 1);
os_mutex_unlock(&act->lock);
}
}
return NULL;
}
static void *
action_publish_worker(void *arg)
{
struct worker_args *a = arg;
PMEMoid oid;
for (unsigned i = 0; i < Ops_per_thread; ++i) {
unsigned arr_id = a->idx / 2;
struct action *act = &a->r->actions[arr_id][i];
if (a->idx % 2 == 0) {
os_mutex_lock(&act->lock);
oid = pmemobj_reserve(a->pop,
&act->pact, ALLOC_SIZE, 0);
UT_ASSERT(!OID_IS_NULL(oid));
os_cond_signal(&act->cond);
os_mutex_unlock(&act->lock);
} else {
os_mutex_lock(&act->lock);
while (act->pact.heap.offset == 0)
os_cond_wait(&act->cond, &act->lock);
pmemobj_publish(a->pop, &act->pact, 1);
os_mutex_unlock(&act->lock);
}
}
return NULL;
}
static void *
action_mix_worker(void *arg)
{
struct worker_args *a = arg;
PMEMoid oid;
for (unsigned i = 0; i < Ops_per_thread; ++i) {
unsigned arr_id = a->idx / 2;
unsigned publish = i % 2;
struct action *act = &a->r->actions[arr_id][i];
if (a->idx % 2 == 0) {
os_mutex_lock(&act->lock);
oid = pmemobj_reserve(a->pop,
&act->pact, ALLOC_SIZE, 0);
UT_ASSERT(!OID_IS_NULL(oid));
os_cond_signal(&act->cond);
os_mutex_unlock(&act->lock);
} else {
os_mutex_lock(&act->lock);
while (act->pact.heap.offset == 0)
os_cond_wait(&act->cond, &act->lock);
if (publish)
pmemobj_publish(a->pop, &act->pact, 1);
else
pmemobj_cancel(a->pop, &act->pact, 1);
os_mutex_unlock(&act->lock);
}
pmemobj_persist(a->pop, act, sizeof(*act));
}
return NULL;
}
static void
actions_clear(PMEMobjpool *pop, struct root *r)
{
for (unsigned i = 0; i < Threads; ++i) {
for (unsigned j = 0; j < Ops_per_thread; ++j) {
struct action *a = &r->actions[i][j];
util_mutex_destroy(&a->lock);
util_mutex_init(&a->lock);
util_cond_destroy(&a->cond);
util_cond_init(&a->cond);
memset(&a->pact, 0, sizeof(a->pact));
pmemobj_persist(pop, a, sizeof(*a));
}
}
}
static void
run_worker(void *(worker_func)(void *arg), struct worker_args args[])
{
os_thread_t t[MAX_THREADS];
for (unsigned i = 0; i < Threads; ++i)
THREAD_CREATE(&t[i], NULL, worker_func, &args[i]);
for (unsigned i = 0; i < Threads; ++i)
THREAD_JOIN(&t[i], NULL);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_pmalloc_mt");
if (argc != 5)
UT_FATAL("usage: %s <threads> <ops/t> <tx/t> [file]", argv[0]);
PMEMobjpool *pop;
Threads = ATOU(argv[1]);
if (Threads > MAX_THREADS)
UT_FATAL("Threads %d > %d", Threads, MAX_THREADS);
Ops_per_thread = ATOU(argv[2]);
if (Ops_per_thread > MAX_OPS_PER_THREAD)
UT_FATAL("Ops per thread %d > %d", Threads, MAX_THREADS);
Tx_per_thread = ATOU(argv[3]);
int exists = util_file_exists(argv[4]);
if (exists < 0)
UT_FATAL("!util_file_exists");
if (!exists) {
pop = pmemobj_create(argv[4], "TEST", (PMEMOBJ_MIN_POOL) +
(MAX_THREADS * CHUNKSIZE * CHUNKS_PER_THREAD),
0666);
if (pop == NULL)
UT_FATAL("!pmemobj_create");
} else {
pop = pmemobj_open(argv[4], "TEST");
if (pop == NULL)
UT_FATAL("!pmemobj_open");
}
PMEMoid oid = pmemobj_root(pop, sizeof(struct root));
struct root *r = pmemobj_direct(oid);
UT_ASSERTne(r, NULL);
struct worker_args args[MAX_THREADS];
for (unsigned i = 0; i < Threads; ++i) {
args[i].pop = pop;
args[i].r = r;
args[i].idx = i;
for (unsigned j = 0; j < Ops_per_thread; ++j) {
struct action *a = &r->actions[i][j];
util_mutex_init(&a->lock);
util_cond_init(&a->cond);
}
}
run_worker(alloc_worker, args);
run_worker(realloc_worker, args);
run_worker(free_worker, args);
run_worker(mix_worker, args);
run_worker(alloc_free_worker, args);
run_worker(action_cancel_worker, args);
actions_clear(pop, r);
run_worker(action_publish_worker, args);
actions_clear(pop, r);
run_worker(action_mix_worker, args);
/*
* Reduce the number of lanes to a value smaller than the number of
* threads. This will ensure that at least some of the state of the lane
* will be shared between threads. Doing this might reveal bugs related
* to runtime race detection instrumentation.
*/
unsigned old_nlanes = pop->lanes_desc.runtime_nlanes;
pop->lanes_desc.runtime_nlanes = TEST_LANES;
run_worker(tx2_worker, args);
pop->lanes_desc.runtime_nlanes = old_nlanes;
/*
* This workload might create many allocation classes due to pvector,
* keep it last.
*/
if (Threads == MAX_THREADS) /* don't run for short tests */
run_worker(tx_worker, args);
run_worker(tx3_worker, args);
pmemobj_close(pop);
DONE(NULL);
}
#ifdef _MSC_VER
/*
* Since libpmemobj is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
#endif
| 9,123 | 21.09201 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_ctl_alignment/obj_ctl_alignment.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018-2020, Intel Corporation */
/*
* obj_ctl_alignment.c -- tests for the alloc class alignment
*/
#include "unittest.h"
#define LAYOUT "obj_ctl_alignment"
static PMEMobjpool *pop;
static void
test_fail(void)
{
struct pobj_alloc_class_desc ac;
ac.header_type = POBJ_HEADER_NONE;
ac.unit_size = 1024 - 1;
ac.units_per_block = 100;
ac.alignment = 512;
int ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc", &ac);
UT_ASSERTeq(ret, -1); /* unit_size must be multiple of alignment */
}
static void
test_aligned_allocs(size_t size, size_t alignment, enum pobj_header_type htype)
{
struct pobj_alloc_class_desc ac;
ac.header_type = htype;
ac.unit_size = size;
ac.units_per_block = 100;
ac.alignment = alignment;
int ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc", &ac);
UT_ASSERTeq(ret, 0);
PMEMoid oid;
ret = pmemobj_xalloc(pop, &oid, 1, 0,
POBJ_CLASS_ID(ac.class_id), NULL, NULL);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(oid.off % alignment, 0);
UT_ASSERTeq((uintptr_t)pmemobj_direct(oid) % alignment, 0);
ret = pmemobj_xalloc(pop, &oid, 1, 0,
POBJ_CLASS_ID(ac.class_id), NULL, NULL);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(oid.off % alignment, 0);
UT_ASSERTeq((uintptr_t)pmemobj_direct(oid) % alignment, 0);
char query[1024];
SNPRINTF(query, 1024, "heap.alloc_class.%u.desc", ac.class_id);
struct pobj_alloc_class_desc read_ac;
ret = pmemobj_ctl_get(pop, query, &read_ac);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(ac.alignment, read_ac.alignment);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_alignment");
if (argc != 2)
UT_FATAL("usage: %s file-name", argv[0]);
const char *path = argv[1];
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL * 10,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
test_fail();
test_aligned_allocs(1024, 512, POBJ_HEADER_NONE);
test_aligned_allocs(1024, 512, POBJ_HEADER_COMPACT);
test_aligned_allocs(64, 64, POBJ_HEADER_COMPACT);
pmemobj_close(pop);
DONE(NULL);
}
| 2,055 | 23.47619 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_list/mocks_windows.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of obj list functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmemobj
* files, when compiled for the purpose of obj_list test.
* It would replace default implementation with mocked functions defined
* in obj_list.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#if defined(__cplusplus)
extern "C" {
#endif
#ifdef WRAP_REAL
#define WRAP_REAL_PMALLOC
#define WRAP_REAL_ULOG
#define WRAP_REAL_LANE
#define WRAP_REAL_HEAP
#define WRAP_REAL_PMEMOBJ
#endif
#ifndef WRAP_REAL_PMALLOC
#define pmalloc __wrap_pmalloc
#define pfree __wrap_pfree
#define pmalloc_construct __wrap_pmalloc_construct
#define prealloc __wrap_prealloc
#define prealloc_construct __wrap_prealloc_construct
#define palloc_usable_size __wrap_palloc_usable_size
#define palloc_reserve __wrap_palloc_reserve
#define palloc_publish __wrap_palloc_publish
#define palloc_defer_free __wrap_palloc_defer_free
#endif
#ifndef WRAP_REAL_ULOG
#define ulog_store __wrap_ulog_store
#define ulog_process __wrap_ulog_process
#endif
#ifndef WRAP_REAL_LANE
#define lane_hold __wrap_lane_hold
#define lane_release __wrap_lane_release
#define lane_recover_and_section_boot __wrap_lane_recover_and_section_boot
#define lane_section_cleanup __wrap_lane_section_cleanup
#endif
#ifndef WRAP_REAL_HEAP
#define heap_boot __wrap_heap_boot
#endif
#ifndef WRAP_REAL_PMEMOBJ
#define pmemobj_alloc __wrap_pmemobj_alloc
#define pmemobj_alloc_usable_size __wrap_pmemobj_alloc_usable_size
#define pmemobj_openU __wrap_pmemobj_open
#define pmemobj_close __wrap_pmemobj_close
#define pmemobj_direct __wrap_pmemobj_direct
#define pmemobj_pool_by_oid __wrap_pmemobj_pool_by_oid
#define pmemobj_pool_by_ptr __wrap_pmemobj_pool_by_ptr
#endif
#if defined(__cplusplus)
}
#endif
| 1,933 | 26.628571 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_list/obj_list.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* obj_list.h -- unit tests for list module
*/
#include <stddef.h>
#include <sys/param.h>
#include "list.h"
#include "obj.h"
#include "lane.h"
#include "unittest.h"
#include "util.h"
/* offset to "in band" item */
#define OOB_OFF (sizeof(struct oob_header))
/* pmemobj initial heap offset */
#define HEAP_OFFSET 8192
TOID_DECLARE(struct item, 0);
TOID_DECLARE(struct list, 1);
TOID_DECLARE(struct oob_list, 2);
TOID_DECLARE(struct oob_item, 3);
struct item {
int id;
POBJ_LIST_ENTRY(struct item) next;
};
struct oob_header {
char data[48];
};
struct oob_item {
struct oob_header oob;
struct item item;
};
struct oob_list {
struct list_head head;
};
struct list {
POBJ_LIST_HEAD(listhead, struct item) head;
};
enum ulog_fail
{
/* don't fail at all */
NO_FAIL,
/* fail after ulog_store */
FAIL_AFTER_FINISH,
/* fail before ulog_store */
FAIL_BEFORE_FINISH,
/* fail after process */
FAIL_AFTER_PROCESS
};
/* global handle to pmemobj pool */
extern PMEMobjpool *Pop;
/* pointer to heap offset */
extern uint64_t *Heap_offset;
/* list lane section */
extern struct lane Lane;
/* actual item id */
extern int *Id;
/* fail event */
extern enum ulog_fail Ulog_fail;
/* global "in band" lists */
extern TOID(struct list) List;
extern TOID(struct list) List_sec;
/* global "out of band" lists */
extern TOID(struct oob_list) List_oob;
extern TOID(struct oob_list) List_oob_sec;
extern TOID(struct oob_item) *Item;
/* usage macros */
#define FATAL_USAGE()\
UT_FATAL("usage: obj_list <file> [PRnifr]")
#define FATAL_USAGE_PRINT()\
UT_FATAL("usage: obj_list <file> P:<list>")
#define FATAL_USAGE_PRINT_REVERSE()\
UT_FATAL("usage: obj_list <file> R:<list>")
#define FATAL_USAGE_INSERT()\
UT_FATAL("usage: obj_list <file> i:<where>:<num>")
#define FATAL_USAGE_INSERT_NEW()\
UT_FATAL("usage: obj_list <file> n:<where>:<num>:<value>")
#define FATAL_USAGE_REMOVE_FREE()\
UT_FATAL("usage: obj_list <file> f:<list>:<num>:<from>")
#define FATAL_USAGE_REMOVE()\
UT_FATAL("usage: obj_list <file> r:<num>")
#define FATAL_USAGE_MOVE()\
UT_FATAL("usage: obj_list <file> m:<num>:<where>:<num>")
#define FATAL_USAGE_FAIL()\
UT_FATAL("usage: obj_list <file> "\
"F:<after_finish|before_finish|after_process>")
| 2,314 | 21.475728 | 59 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_list/obj_list_mocks.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_list_mocks.c -- mocks for redo/lane/heap/obj modules
*/
#include <inttypes.h>
#include "valgrind_internal.h"
#include "obj_list.h"
#include "set.h"
/*
* pmem_drain_nop -- no operation for drain on non-pmem memory
*/
static void
pmem_drain_nop(void)
{
/* NOP */
}
/*
* obj_persist -- pmemobj version of pmem_persist w/o replication
*/
static int
obj_persist(void *ctx, const void *addr, size_t len, unsigned flags)
{
PMEMobjpool *pop = (PMEMobjpool *)ctx;
pop->persist_local(addr, len);
return 0;
}
/*
* obj_flush -- pmemobj version of pmem_flush w/o replication
*/
static int
obj_flush(void *ctx, const void *addr, size_t len, unsigned flags)
{
PMEMobjpool *pop = (PMEMobjpool *)ctx;
pop->flush_local(addr, len);
return 0;
}
static uintptr_t Pool_addr;
static size_t Pool_size;
static void
obj_msync_nofail(const void *addr, size_t size)
{
uintptr_t addr_ptrt = (uintptr_t)addr;
/*
* Verify msynced range is in the last mapped file range. Useful for
* catching errors which normally would be caught only on Windows by
* win_mmap.c.
*/
if (addr_ptrt < Pool_addr || addr_ptrt >= Pool_addr + Pool_size ||
addr_ptrt + size >= Pool_addr + Pool_size)
UT_FATAL("<0x%" PRIxPTR ",0x%" PRIxPTR "> "
"not in <0x%" PRIxPTR ",0x%" PRIxPTR "> range",
addr_ptrt, addr_ptrt + size, Pool_addr,
Pool_addr + Pool_size);
if (pmem_msync(addr, size))
UT_FATAL("!pmem_msync");
}
/*
* obj_drain -- pmemobj version of pmem_drain w/o replication
*/
static void
obj_drain(void *ctx)
{
PMEMobjpool *pop = (PMEMobjpool *)ctx;
pop->drain_local();
}
static void *
obj_memcpy(void *ctx, void *dest, const void *src, size_t len,
unsigned flags)
{
return pmem_memcpy(dest, src, len, flags);
}
static void *
obj_memset(void *ctx, void *ptr, int c, size_t sz, unsigned flags)
{
return pmem_memset(ptr, c, sz, flags);
}
/*
* linear_alloc -- allocates `size` bytes (rounded up to 8 bytes) and returns
* offset to the allocated object
*/
static uint64_t
linear_alloc(uint64_t *cur_offset, size_t size)
{
uint64_t ret = *cur_offset;
*cur_offset += roundup(size, sizeof(uint64_t));
return ret;
}
/*
* pmemobj_open -- pmemobj_open mock
*
* This function initializes the pmemobj pool for purposes of this
* unittest.
*/
FUNC_MOCK(pmemobj_open, PMEMobjpool *, const char *fname, const char *layout)
FUNC_MOCK_RUN_DEFAULT
{
size_t size;
int is_pmem;
void *addr = pmem_map_file(fname, 0, 0, 0, &size, &is_pmem);
if (!addr) {
UT_OUT("!%s: pmem_map_file", fname);
return NULL;
}
Pool_addr = (uintptr_t)addr;
Pool_size = size;
Pop = (PMEMobjpool *)addr;
Pop->addr = Pop;
Pop->is_pmem = is_pmem;
Pop->rdonly = 0;
Pop->uuid_lo = 0x12345678;
VALGRIND_REMOVE_PMEM_MAPPING(&Pop->mutex_head,
sizeof(Pop->mutex_head));
VALGRIND_REMOVE_PMEM_MAPPING(&Pop->rwlock_head,
sizeof(Pop->rwlock_head));
VALGRIND_REMOVE_PMEM_MAPPING(&Pop->cond_head,
sizeof(Pop->cond_head));
Pop->mutex_head = NULL;
Pop->rwlock_head = NULL;
Pop->cond_head = NULL;
if (Pop->is_pmem) {
Pop->persist_local = pmem_persist;
Pop->flush_local = pmem_flush;
Pop->drain_local = pmem_drain;
Pop->memcpy_local = pmem_memcpy;
Pop->memset_local = pmem_memset;
} else {
Pop->persist_local = obj_msync_nofail;
Pop->flush_local = obj_msync_nofail;
Pop->drain_local = pmem_drain_nop;
Pop->memcpy_local = pmem_memcpy;
Pop->memset_local = pmem_memset;
}
Pop->p_ops.persist = obj_persist;
Pop->p_ops.flush = obj_flush;
Pop->p_ops.drain = obj_drain;
Pop->p_ops.memcpy = obj_memcpy;
Pop->p_ops.memset = obj_memset;
Pop->p_ops.base = Pop;
struct pmem_ops *p_ops = &Pop->p_ops;
Pop->heap_offset = HEAP_OFFSET;
Pop->heap_size = size - Pop->heap_offset;
uint64_t heap_offset = HEAP_OFFSET;
Heap_offset = (uint64_t *)((uintptr_t)Pop +
linear_alloc(&heap_offset, sizeof(*Heap_offset)));
Id = (int *)((uintptr_t)Pop + linear_alloc(&heap_offset, sizeof(*Id)));
/* Alloc lane layout */
Lane.layout = (struct lane_layout *)((uintptr_t)Pop +
linear_alloc(&heap_offset, LANE_TOTAL_SIZE));
/* Alloc in band lists */
List.oid.pool_uuid_lo = Pop->uuid_lo;
List.oid.off = linear_alloc(&heap_offset, sizeof(struct list));
List_sec.oid.pool_uuid_lo = Pop->uuid_lo;
List_sec.oid.off = linear_alloc(&heap_offset, sizeof(struct list));
/* Alloc out of band lists */
List_oob.oid.pool_uuid_lo = Pop->uuid_lo;
List_oob.oid.off = linear_alloc(&heap_offset, sizeof(struct oob_list));
List_oob_sec.oid.pool_uuid_lo = Pop->uuid_lo;
List_oob_sec.oid.off =
linear_alloc(&heap_offset, sizeof(struct oob_list));
Item = (union oob_item_toid *)((uintptr_t)Pop +
linear_alloc(&heap_offset, sizeof(*Item)));
Item->oid.pool_uuid_lo = Pop->uuid_lo;
Item->oid.off = linear_alloc(&heap_offset, sizeof(struct oob_item));
pmemops_persist(p_ops, Item, sizeof(*Item));
if (*Heap_offset == 0) {
*Heap_offset = heap_offset;
pmemops_persist(p_ops, Heap_offset, sizeof(*Heap_offset));
}
pmemops_persist(p_ops, Pop, HEAP_OFFSET);
Pop->run_id += 2;
pmemops_persist(p_ops, &Pop->run_id, sizeof(Pop->run_id));
Lane.external = operation_new((struct ulog *)&Lane.layout->external,
LANE_REDO_EXTERNAL_SIZE, NULL, NULL, p_ops, LOG_TYPE_REDO);
return Pop;
}
FUNC_MOCK_END
/*
* pmemobj_close -- pmemobj_close mock
*
* Just unmap the mapped area.
*/
FUNC_MOCK(pmemobj_close, void, PMEMobjpool *pop)
FUNC_MOCK_RUN_DEFAULT {
operation_delete(Lane.external);
UT_ASSERTeq(pmem_unmap(Pop,
Pop->heap_size + Pop->heap_offset), 0);
Pop = NULL;
Pool_addr = 0;
Pool_size = 0;
}
FUNC_MOCK_END
/*
* pmemobj_pool_by_ptr -- pmemobj_pool_by_ptr mock
*
* Just return Pop.
*/
FUNC_MOCK_RET_ALWAYS(pmemobj_pool_by_ptr, PMEMobjpool *, Pop, const void *ptr);
/*
* pmemobj_direct -- pmemobj_direct mock
*/
FUNC_MOCK(pmemobj_direct, void *, PMEMoid oid)
FUNC_MOCK_RUN_DEFAULT {
return (void *)((uintptr_t)Pop + oid.off);
}
FUNC_MOCK_END
FUNC_MOCK_RET_ALWAYS(pmemobj_pool_by_oid, PMEMobjpool *, Pop, PMEMoid oid);
/*
* pmemobj_alloc_usable_size -- pmemobj_alloc_usable_size mock
*/
FUNC_MOCK(pmemobj_alloc_usable_size, size_t, PMEMoid oid)
FUNC_MOCK_RUN_DEFAULT {
size_t size = palloc_usable_size(
&Pop->heap, oid.off - OOB_OFF);
return size - OOB_OFF;
}
FUNC_MOCK_END
/*
* pmemobj_alloc -- pmemobj_alloc mock
*
* Allocates an object using pmalloc and return PMEMoid.
*/
FUNC_MOCK(pmemobj_alloc, int, PMEMobjpool *pop, PMEMoid *oidp,
size_t size, uint64_t type_num,
pmemobj_constr constructor, void *arg)
FUNC_MOCK_RUN_DEFAULT {
PMEMoid oid = {0, 0};
oid.pool_uuid_lo = 0;
pmalloc(pop, &oid.off, size, 0, 0);
if (oidp) {
*oidp = oid;
if (OBJ_PTR_FROM_POOL(pop, oidp))
pmemops_persist(&Pop->p_ops, oidp,
sizeof(*oidp));
}
return 0;
}
FUNC_MOCK_END
/*
* lane_hold -- lane_hold mock
*
* Returns pointer to list lane section.
*/
FUNC_MOCK(lane_hold, unsigned, PMEMobjpool *pop, struct lane **lane)
FUNC_MOCK_RUN_DEFAULT {
*lane = &Lane;
return 0;
}
FUNC_MOCK_END
/*
* lane_release -- lane_release mock
*
* Always returns success.
*/
FUNC_MOCK_RET_ALWAYS_VOID(lane_release, PMEMobjpool *pop);
/*
* lane_recover_and_section_boot -- lane_recover_and_section_boot mock
*/
FUNC_MOCK(lane_recover_and_section_boot, int, PMEMobjpool *pop)
FUNC_MOCK_RUN_DEFAULT {
ulog_recover((struct ulog *)&Lane.layout->external,
OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops);
return 0;
}
FUNC_MOCK_END
/*
* lane_section_cleanup -- lane_section_cleanup mock
*/
FUNC_MOCK(lane_section_cleanup, int, PMEMobjpool *pop)
FUNC_MOCK_RUN_DEFAULT {
return 0;
}
FUNC_MOCK_END
/*
* ulog_store_last -- ulog_store_last mock
*/
FUNC_MOCK(ulog_store, void,
struct ulog *dest,
struct ulog *src, size_t nbytes, size_t redo_base_nbytes,
size_t ulog_base_capacity,
struct ulog_next *next, const struct pmem_ops *p_ops)
FUNC_MOCK_RUN_DEFAULT {
switch (Ulog_fail) {
case FAIL_AFTER_FINISH:
_FUNC_REAL(ulog_store)(dest, src,
nbytes, redo_base_nbytes,
ulog_base_capacity,
next, p_ops);
DONEW(NULL);
break;
case FAIL_BEFORE_FINISH:
DONEW(NULL);
break;
default:
_FUNC_REAL(ulog_store)(dest, src,
nbytes, redo_base_nbytes,
ulog_base_capacity,
next, p_ops);
break;
}
}
FUNC_MOCK_END
/*
* ulog_process -- ulog_process mock
*/
FUNC_MOCK(ulog_process, void, struct ulog *ulog,
ulog_check_offset_fn check, const struct pmem_ops *p_ops)
FUNC_MOCK_RUN_DEFAULT {
_FUNC_REAL(ulog_process)(ulog, check, p_ops);
if (Ulog_fail == FAIL_AFTER_PROCESS) {
DONEW(NULL);
}
}
FUNC_MOCK_END
/*
* heap_boot -- heap_boot mock
*
* Always returns success.
*/
FUNC_MOCK_RET_ALWAYS(heap_boot, int, 0, PMEMobjpool *pop);
| 8,765 | 22.691892 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_list/obj_list_mocks_palloc.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_list_mocks_palloc.c -- mocks for palloc/pmalloc modules
*/
#include "obj_list.h"
/*
* pmalloc -- pmalloc mock
*
* Allocates the memory using linear allocator.
* Prints the id of allocated struct oob_item for tracking purposes.
*/
FUNC_MOCK(pmalloc, int, PMEMobjpool *pop, uint64_t *ptr,
size_t size, uint64_t extra_field, uint16_t flags)
FUNC_MOCK_RUN_DEFAULT {
struct pmem_ops *p_ops = &Pop->p_ops;
size = size + OOB_OFF + sizeof(uint64_t) * 2;
uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop
+ *Heap_offset);
*alloc_size = size;
pmemops_persist(p_ops, alloc_size, sizeof(*alloc_size));
*ptr = *Heap_offset + sizeof(uint64_t);
if (OBJ_PTR_FROM_POOL(pop, ptr))
pmemops_persist(p_ops, ptr, sizeof(*ptr));
struct oob_item *item =
(struct oob_item *)((uintptr_t)Pop + *ptr);
*ptr += OOB_OFF;
if (OBJ_PTR_FROM_POOL(pop, ptr))
pmemops_persist(p_ops, ptr, sizeof(*ptr));
item->item.id = *Id;
pmemops_persist(p_ops, &item->item.id, sizeof(item->item.id));
(*Id)++;
pmemops_persist(p_ops, Id, sizeof(*Id));
*Heap_offset = *Heap_offset + sizeof(uint64_t) +
size + OOB_OFF;
pmemops_persist(p_ops, Heap_offset, sizeof(*Heap_offset));
UT_OUT("pmalloc(id = %d)", item->item.id);
return 0;
}
FUNC_MOCK_END
/*
* pfree -- pfree mock
*
* Just prints freeing struct oob_item id. Doesn't free the memory.
*/
FUNC_MOCK(pfree, void, PMEMobjpool *pop, uint64_t *ptr)
FUNC_MOCK_RUN_DEFAULT {
struct oob_item *item =
(struct oob_item *)((uintptr_t)Pop + *ptr - OOB_OFF);
UT_OUT("pfree(id = %d)", item->item.id);
*ptr = 0;
if (OBJ_PTR_FROM_POOL(pop, ptr))
pmemops_persist(&Pop->p_ops, ptr, sizeof(*ptr));
return;
}
FUNC_MOCK_END
/*
* pmalloc_construct -- pmalloc_construct mock
*
* Allocates the memory using linear allocator and invokes the constructor.
* Prints the id of allocated struct oob_item for tracking purposes.
*/
FUNC_MOCK(pmalloc_construct, int, PMEMobjpool *pop, uint64_t *off,
size_t size, palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t flags, uint16_t class_id)
FUNC_MOCK_RUN_DEFAULT {
struct pmem_ops *p_ops = &Pop->p_ops;
size = size + OOB_OFF + sizeof(uint64_t) * 2;
uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop +
*Heap_offset);
*alloc_size = size;
pmemops_persist(p_ops, alloc_size, sizeof(*alloc_size));
*off = *Heap_offset + sizeof(uint64_t) + OOB_OFF;
if (OBJ_PTR_FROM_POOL(pop, off))
pmemops_persist(p_ops, off, sizeof(*off));
*Heap_offset = *Heap_offset + sizeof(uint64_t) + size;
pmemops_persist(p_ops, Heap_offset, sizeof(*Heap_offset));
void *ptr = (void *)((uintptr_t)Pop + *off);
constructor(pop, ptr, size, arg);
return 0;
}
FUNC_MOCK_END
/*
* prealloc -- prealloc mock
*/
FUNC_MOCK(prealloc, int, PMEMobjpool *pop, uint64_t *off, size_t size,
uint64_t extra_field, uint16_t flags)
FUNC_MOCK_RUN_DEFAULT {
uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop +
*off - sizeof(uint64_t));
struct item *item = (struct item *)((uintptr_t)Pop +
*off + OOB_OFF);
if (*alloc_size >= size) {
*alloc_size = size;
pmemops_persist(&Pop->p_ops, alloc_size,
sizeof(*alloc_size));
UT_OUT("prealloc(id = %d, size = %zu) = true",
item->id,
(size - OOB_OFF) / sizeof(struct item));
return 0;
} else {
UT_OUT("prealloc(id = %d, size = %zu) = false",
item->id,
(size - OOB_OFF) / sizeof(struct item));
return -1;
}
}
FUNC_MOCK_END
/*
* prealloc_construct -- prealloc_construct mock
*/
FUNC_MOCK(prealloc_construct, int, PMEMobjpool *pop, uint64_t *off,
size_t size, palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t flags, uint16_t class_id)
FUNC_MOCK_RUN_DEFAULT {
int ret = __wrap_prealloc(pop, off, size, 0, 0);
if (!ret) {
void *ptr = (void *)((uintptr_t)Pop + *off + OOB_OFF);
constructor(pop, ptr, size, arg);
}
return ret;
}
FUNC_MOCK_END
/*
* palloc_reserve -- palloc_reserve mock
*/
FUNC_MOCK(palloc_reserve, int, struct palloc_heap *heap, size_t size,
palloc_constr constructor, void *arg,
uint64_t extra_field, uint16_t object_flags, uint16_t class_id,
uint16_t arena_id, struct pobj_action *act)
FUNC_MOCK_RUN_DEFAULT {
struct pmem_ops *p_ops = &Pop->p_ops;
size = size + OOB_OFF + sizeof(uint64_t) * 2;
uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop
+ *Heap_offset);
*alloc_size = size;
pmemops_persist(p_ops, alloc_size, sizeof(*alloc_size));
act->heap.offset = *Heap_offset + sizeof(uint64_t);
struct oob_item *item =
(struct oob_item *)((uintptr_t)Pop + act->heap.offset);
act->heap.offset += OOB_OFF;
item->item.id = *Id;
pmemops_persist(p_ops, &item->item.id, sizeof(item->item.id));
(*Id)++;
pmemops_persist(p_ops, Id, sizeof(*Id));
*Heap_offset += sizeof(uint64_t) + size + OOB_OFF;
pmemops_persist(p_ops, Heap_offset, sizeof(*Heap_offset));
UT_OUT("pmalloc(id = %d)", item->item.id);
return 0;
}
FUNC_MOCK_END
/*
* palloc_publish -- mock publish, must process operation
*/
FUNC_MOCK(palloc_publish, void, struct palloc_heap *heap,
struct pobj_action *actv, size_t actvcnt,
struct operation_context *ctx)
FUNC_MOCK_RUN_DEFAULT {
operation_process(ctx);
operation_finish(ctx, 0);
}
FUNC_MOCK_END
/*
* palloc_defer_free -- pfree mock
*
* Just prints freeing struct oob_item id. Doesn't free the memory.
*/
FUNC_MOCK(palloc_defer_free, void, struct palloc_heap *heap, uint64_t off,
struct pobj_action *act)
FUNC_MOCK_RUN_DEFAULT {
struct oob_item *item =
(struct oob_item *)((uintptr_t)Pop + off - OOB_OFF);
UT_OUT("pfree(id = %d)", item->item.id);
act->heap.offset = off;
return;
}
FUNC_MOCK_END
/*
* pmalloc_usable_size -- pmalloc_usable_size mock
*/
FUNC_MOCK(palloc_usable_size, size_t, struct palloc_heap *heap, uint64_t off)
FUNC_MOCK_RUN_DEFAULT {
uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop +
off - sizeof(uint64_t));
return (size_t)*alloc_size;
}
FUNC_MOCK_END
| 6,050 | 26.756881 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_list/obj_list.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* obj_list.c -- unit tests for list module
*/
/*
* XXX - On VC++, this file must be compiled as C++ to have __typeof__ defined.
* However, the rest of the files (libpmemobj source) are still compiled as C.
* To avoid issues with 'C' linkage the entire file is in 'extern "C"' block.
*/
#if defined(__cplusplus) && defined(_MSC_VER)
extern "C" {
/*
* XXX - Templates cannot be used with 'C' linkage, so for the purpose
* of this test, we override template-based definition of __typeof__ with
* a simple alias to decltype.
*/
#define __typeof__(p) decltype(p)
#endif
#include "obj_list.h"
/* global handle to pmemobj pool */
PMEMobjpool *Pop;
/* pointer to heap offset */
uint64_t *Heap_offset;
/* list lane section */
struct lane Lane;
/* actual item id */
int *Id;
/* fail event */
enum ulog_fail Ulog_fail = NO_FAIL;
/* global "in band" lists */
TOID(struct list) List;
TOID(struct list) List_sec;
/* global "out of band" lists */
TOID(struct oob_list) List_oob;
TOID(struct oob_list) List_oob_sec;
TOID(struct oob_item) *Item;
/*
* for each element on list in normal order
*/
#define PLIST_FOREACH(item, list, head, field)\
for ((item) = \
D_RW((list))->head.pe_first;\
!TOID_IS_NULL((item));\
TOID_ASSIGN((item),\
TOID_EQUALS((item),\
D_RW(D_RW((list))->head.pe_first)->field.pe_prev) ?\
OID_NULL : \
D_RW(item)->field.pe_next.oid))
/*
* for each element on list in reverse order
*/
#define PLIST_FOREACH_REVERSE(item, list, head, field)\
for ((item) = \
TOID_IS_NULL(D_RW((list))->head.pe_first) ? D_RW(list)->head.pe_first :\
D_RW(D_RW(list)->head.pe_first)->field.pe_prev;\
!TOID_IS_NULL((item));\
TOID_ASSIGN((item),\
TOID_EQUALS((item),\
D_RW((list))->head.pe_first) ?\
OID_NULL :\
D_RW(item)->field.pe_prev.oid))
/*
* get_item_list -- get nth item from list
*/
static PMEMoid
get_item_list(PMEMoid head, int n)
{
TOID(struct list) list;
TOID_ASSIGN(list, head);
TOID(struct item) item;
if (n >= 0) {
PLIST_FOREACH(item, list, head, next) {
if (n == 0)
return item.oid;
n--;
}
} else {
PLIST_FOREACH_REVERSE(item, list, head, next) {
n++;
if (n == 0)
return item.oid;
}
}
return OID_NULL;
}
/*
* do_print -- print list elements in normal order
*/
static void
do_print(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
if (sscanf(arg, "P:%d", &L) != 1)
FATAL_USAGE_PRINT();
if (L == 2) {
TOID(struct item) item;
UT_OUT("list:");
PLIST_FOREACH(item, List, head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else if (L == 4) {
TOID(struct item) item;
UT_OUT("list sec:");
PLIST_FOREACH(item, List_sec, head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else {
FATAL_USAGE_PRINT();
}
}
/*
* do_print_reverse -- print list elements in reverse order
*/
static void
do_print_reverse(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
if (sscanf(arg, "R:%d", &L) != 1)
FATAL_USAGE_PRINT_REVERSE();
if (L == 2) {
TOID(struct item) item;
UT_OUT("list reverse:");
PLIST_FOREACH_REVERSE(item, List, head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else if (L == 4) {
TOID(struct item) item;
UT_OUT("list sec reverse:");
PLIST_FOREACH_REVERSE(item, List_sec, head, next) {
UT_OUT("id = %d", D_RO(item)->id);
}
} else {
FATAL_USAGE_PRINT_REVERSE();
}
}
/*
* item_constructor -- constructor which sets the item's id to
* new value
*/
static int
item_constructor(void *ctx, void *ptr, size_t usable_size, void *arg)
{
PMEMobjpool *pop = (PMEMobjpool *)ctx;
int id = *(int *)arg;
struct item *item = (struct item *)ptr;
item->id = id;
pmemops_persist(&pop->p_ops, &item->id, sizeof(item->id));
UT_OUT("constructor(id = %d)", id);
return 0;
}
struct realloc_arg {
void *ptr;
size_t new_size;
size_t old_size;
};
/*
* do_insert_new -- insert new element to list
*/
static void
do_insert_new(PMEMobjpool *pop, const char *arg)
{
int n; /* which element on List */
int before;
int id;
int ret = sscanf(arg, "n:%d:%d:%d", &before, &n, &id);
if (ret == 3) {
ret = list_insert_new_user(pop,
offsetof(struct item, next),
(struct list_head *)&D_RW(List)->head,
get_item_list(List.oid, n),
before,
sizeof(struct item),
TOID_TYPE_NUM(struct item),
item_constructor,
&id, (PMEMoid *)Item);
if (ret)
UT_FATAL("list_insert_new(List, List_oob) failed");
} else if (ret == 2) {
ret = list_insert_new_user(pop,
offsetof(struct item, next),
(struct list_head *)&D_RW(List)->head,
get_item_list(List.oid, n),
before,
sizeof(struct item),
TOID_TYPE_NUM(struct item),
NULL, NULL, (PMEMoid *)Item);
if (ret)
UT_FATAL("list_insert_new(List, List_oob) failed");
} else {
FATAL_USAGE_INSERT_NEW();
}
}
/*
* do_insert -- insert element to list
*/
static void
do_insert(PMEMobjpool *pop, const char *arg)
{
int before;
int n; /* which element */
if (sscanf(arg, "i:%d:%d",
&before, &n) != 2)
FATAL_USAGE_INSERT();
PMEMoid it;
pmemobj_alloc(pop, &it,
sizeof(struct oob_item), 0, NULL, NULL);
if (list_insert(pop,
offsetof(struct item, next),
(struct list_head *)&D_RW(List)->head,
get_item_list(List.oid, n),
before,
it)) {
UT_FATAL("list_insert(List) failed");
}
}
/*
* do_remove_free -- remove and free element from list
*/
static void
do_remove_free(PMEMobjpool *pop, const char *arg)
{
int L; /* which list */
int n; /* which element */
int N; /* remove from single/both lists */
if (sscanf(arg, "f:%d:%d:%d", &L, &n, &N) != 3)
FATAL_USAGE_REMOVE_FREE();
PMEMoid oid;
if (L == 2) {
oid = get_item_list(List.oid, n);
} else {
FATAL_USAGE_REMOVE_FREE();
}
if (N == 1) {
if (list_remove_free_user(pop,
0,
NULL,
&oid)) {
UT_FATAL("list_remove_free(List_oob) failed");
}
} else if (N == 2) {
if (list_remove_free_user(pop,
offsetof(struct item, next),
(struct list_head *)&D_RW(List)->head,
&oid)) {
UT_FATAL("list_remove_free(List_oob, List) failed");
}
} else {
FATAL_USAGE_REMOVE_FREE();
}
}
/*
* do_remove -- remove element from list
*/
static void
do_remove(PMEMobjpool *pop, const char *arg)
{
int n; /* which element */
if (sscanf(arg, "r:%d", &n) != 1)
FATAL_USAGE_REMOVE();
if (list_remove(pop,
offsetof(struct item, next),
(struct list_head *)&D_RW(List)->head,
get_item_list(List.oid, n))) {
UT_FATAL("list_remove(List) failed");
}
}
/*
* do_move -- move element from one list to another
*/
static void
do_move(PMEMobjpool *pop, const char *arg)
{
int n;
int d;
int before;
if (sscanf(arg, "m:%d:%d:%d", &n, &before, &d) != 3)
FATAL_USAGE_MOVE();
if (list_move(pop,
offsetof(struct item, next),
(struct list_head *)&D_RW(List)->head,
offsetof(struct item, next),
(struct list_head *)&D_RW(List_sec)->head,
get_item_list(List_sec.oid, d),
before,
get_item_list(List.oid, n))) {
UT_FATAL("list_move(List, List_sec) failed");
}
}
/*
* do_move_one_list -- move element within one list
*/
static void
do_move_one_list(PMEMobjpool *pop, const char *arg)
{
int n;
int d;
int before;
if (sscanf(arg, "M:%d:%d:%d", &n, &before, &d) != 3)
FATAL_USAGE_MOVE();
if (list_move(pop,
offsetof(struct item, next),
(struct list_head *)&D_RW(List)->head,
offsetof(struct item, next),
(struct list_head *)&D_RW(List)->head,
get_item_list(List.oid, d),
before,
get_item_list(List.oid, n))) {
UT_FATAL("list_move(List, List) failed");
}
}
/*
* do_fail -- fail after specified event
*/
static void
do_fail(PMEMobjpool *pop, const char *arg)
{
if (strcmp(arg, "F:before_finish") == 0) {
Ulog_fail = FAIL_BEFORE_FINISH;
} else if (strcmp(arg, "F:after_finish") == 0) {
Ulog_fail = FAIL_AFTER_FINISH;
} else if (strcmp(arg, "F:after_process") == 0) {
Ulog_fail = FAIL_AFTER_PROCESS;
} else {
FATAL_USAGE_FAIL();
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_list");
if (argc < 2)
FATAL_USAGE();
const char *path = argv[1];
util_init(); /* to initialize On_valgrind flag */
UT_COMPILE_ERROR_ON(OOB_OFF != 48);
PMEMobjpool *pop = pmemobj_open(path, NULL);
UT_ASSERTne(pop, NULL);
UT_ASSERT(!TOID_IS_NULL(List));
UT_ASSERT(!TOID_IS_NULL(List_oob));
int i;
for (i = 2; i < argc; i++) {
switch (argv[i][0]) {
case 'P':
do_print(pop, argv[i]);
break;
case 'R':
do_print_reverse(pop, argv[i]);
break;
case 'n':
do_insert_new(pop, argv[i]);
break;
case 'i':
do_insert(pop, argv[i]);
break;
case 'f':
do_remove_free(pop, argv[i]);
break;
case 'r':
do_remove(pop, argv[i]);
break;
case 'm':
do_move(pop, argv[i]);
break;
case 'M':
do_move_one_list(pop, argv[i]);
break;
case 'V':
lane_recover_and_section_boot(pop);
break;
case 'F':
do_fail(pop, argv[i]);
break;
default:
FATAL_USAGE();
}
}
pmemobj_close(pop);
DONE(NULL);
}
#if defined(__cplusplus) && defined(_MSC_VER)
}
#endif
| 8,987 | 19.902326 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/blk_rw_mt/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2017, Intel Corporation
#
#
# blk_rw_mt/config.sh -- test configuration
#
# Extend timeout for this test, as it may take a few minutes
# when run on a non-pmem file system.
CONF_GLOBAL_TIMEOUT='10m'
| 274 | 18.642857 | 60 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/blk_rw_mt/blk_rw_mt.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2020, Intel Corporation */
/*
* Copyright (c) 2016, Microsoft Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* blk_rw_mt.c -- unit test for multi-threaded random I/O
*
* usage: blk_rw_mt bsize file seed nthread nops
*
*/
#include "unittest.h"
#include "rand.h"
static size_t Bsize;
/* all I/O below this LBA (increases collisions) */
static const unsigned Nblock = 100;
static unsigned Seed;
static unsigned Nthread;
static unsigned Nops;
static PMEMblkpool *Handle;
/*
* construct -- build a buffer for writing
*/
static void
construct(int *ordp, unsigned char *buf)
{
for (int i = 0; i < Bsize; i++)
buf[i] = *ordp;
(*ordp)++;
if (*ordp > 255)
*ordp = 1;
}
/*
* check -- check for torn buffers
*/
static void
check(unsigned char *buf)
{
unsigned val = *buf;
for (int i = 1; i < Bsize; i++)
if (buf[i] != val) {
UT_OUT("{%u} TORN at byte %d", val, i);
break;
}
}
/*
* worker -- the work each thread performs
*/
static void *
worker(void *arg)
{
uintptr_t mytid = (uintptr_t)arg;
unsigned char *buf = MALLOC(Bsize);
int ord = 1;
rng_t rng;
randomize_r(&rng, Seed + mytid);
for (unsigned i = 0; i < Nops; i++) {
os_off_t lba = (os_off_t)(rnd64_r(&rng) % Nblock);
if (rnd64_r(&rng) % 2) {
/* read */
if (pmemblk_read(Handle, buf, lba) < 0)
UT_OUT("!read lba %zu", lba);
else
check(buf);
} else {
/* write */
construct(&ord, buf);
if (pmemblk_write(Handle, buf, lba) < 0)
UT_OUT("!write lba %zu", lba);
}
}
FREE(buf);
return NULL;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "blk_rw_mt");
if (argc != 6)
UT_FATAL("usage: %s bsize file seed nthread nops", argv[0]);
Bsize = strtoul(argv[1], NULL, 0);
const char *path = argv[2];
if ((Handle = pmemblk_create(path, Bsize, 0,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!%s: pmemblk_create", path);
Seed = strtoul(argv[3], NULL, 0);
Nthread = strtoul(argv[4], NULL, 0);
Nops = strtoul(argv[5], NULL, 0);
UT_OUT("%s block size %zu usable blocks %u", argv[1], Bsize, Nblock);
os_thread_t *threads = MALLOC(Nthread * sizeof(os_thread_t));
/* kick off nthread threads */
for (unsigned i = 0; i < Nthread; i++)
THREAD_CREATE(&threads[i], NULL, worker, (void *)(intptr_t)i);
/* wait for all the threads to complete */
for (unsigned i = 0; i < Nthread; i++)
THREAD_JOIN(&threads[i], NULL);
FREE(threads);
pmemblk_close(Handle);
/* XXX not ready to pass this part of the test yet */
int result = pmemblk_check(path, Bsize);
if (result < 0)
UT_OUT("!%s: pmemblk_check", path);
else if (result == 0)
UT_OUT("%s: pmemblk_check: not consistent", path);
DONE(NULL);
}
| 4,260 | 25.302469 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/set_funcs/set_funcs.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* set_funcs.c -- unit test for pmem*_set_funcs()
*/
#include "unittest.h"
#define EXISTING_FILE "/root"
#define NON_ZERO_POOL_SIZE 1
#define GUARD 0x2BEE5AFEULL
#define EXTRA sizeof(GUARD)
#define OBJ 0
#define BLK 1
#define LOG 2
static struct counters {
int mallocs;
int frees;
int reallocs;
int reallocs_null;
int strdups;
} cnt[5];
static void *
test_malloc(size_t size)
{
unsigned long long *p = malloc(size + EXTRA);
UT_ASSERTne(p, NULL);
*p = GUARD;
return ++p;
}
static void
test_free(void *ptr)
{
if (ptr == NULL)
return;
unsigned long long *p = ptr;
--p;
UT_ASSERTeq(*p, GUARD);
free(p);
}
static void *
test_realloc(void *ptr, size_t size)
{
unsigned long long *p;
if (ptr != NULL) {
p = ptr;
--p;
UT_ASSERTeq(*p, GUARD);
p = realloc(p, size + EXTRA);
} else {
p = malloc(size + EXTRA);
}
UT_ASSERTne(p, NULL);
*p = GUARD;
return ++p;
}
static char *
test_strdup(const char *s)
{
if (s == NULL)
return NULL;
size_t size = strlen(s) + 1;
unsigned long long *p = malloc(size + EXTRA);
UT_ASSERTne(p, NULL);
*p = GUARD;
++p;
strcpy((char *)p, s);
return (char *)p;
}
static void *
obj_malloc(size_t size)
{
cnt[OBJ].mallocs++;
return test_malloc(size);
}
static void
obj_free(void *ptr)
{
if (ptr)
cnt[OBJ].frees++;
test_free(ptr);
}
static void *
obj_realloc(void *ptr, size_t size)
{
if (ptr == NULL)
cnt[OBJ].reallocs_null++;
else
cnt[OBJ].reallocs++;
return test_realloc(ptr, size);
}
static char *
obj_strdup(const char *s)
{
cnt[OBJ].strdups++;
return test_strdup(s);
}
static void *
blk_malloc(size_t size)
{
cnt[BLK].mallocs++;
return test_malloc(size);
}
static void
blk_free(void *ptr)
{
if (ptr)
cnt[BLK].frees++;
test_free(ptr);
}
static void *
blk_realloc(void *ptr, size_t size)
{
if (ptr == NULL)
cnt[BLK].reallocs_null++;
else
cnt[BLK].reallocs++;
return test_realloc(ptr, size);
}
static char *
blk_strdup(const char *s)
{
cnt[BLK].strdups++;
return test_strdup(s);
}
static void *
log_malloc(size_t size)
{
cnt[LOG].mallocs++;
return test_malloc(size);
}
static void
log_free(void *ptr)
{
if (ptr)
cnt[LOG].frees++;
test_free(ptr);
}
static void *
log_realloc(void *ptr, size_t size)
{
if (ptr == NULL)
cnt[LOG].reallocs_null++;
else
cnt[LOG].reallocs++;
return test_realloc(ptr, size);
}
static char *
log_strdup(const char *s)
{
cnt[LOG].strdups++;
return test_strdup(s);
}
/*
* There are a few allocations made at first call to pmemobj_open() or
* pmemobj_create(). They are related to some global structures
* holding a list of all open pools. These allocation are not released on
* pmemobj_close(), but in the library destructor. So, we need to take them
* into account when detecting memory leaks.
*
* obj_init/obj_pool_init:
* critnib_new - Malloc + Zalloc
* ctree_new - Malloc
* lane_info_ht_boot/lane_info_create:
* critnib_new - Malloc + Zalloc
*/
#define OBJ_EXTRA_NALLOC 6
static void
test_obj(const char *path)
{
pmemobj_set_funcs(obj_malloc, obj_free, obj_realloc, obj_strdup);
/*
* Generate ERR() call, that calls malloc() once,
* but only when it is called for the first time
* (free() is called in the destructor of the library).
*/
pmemobj_create(EXISTING_FILE, "", NON_ZERO_POOL_SIZE, 0);
memset(cnt, 0, sizeof(cnt));
PMEMobjpool *pop;
pop = pmemobj_create(path, NULL, PMEMOBJ_MIN_POOL, 0600);
PMEMoid oid;
if (pmemobj_alloc(pop, &oid, 10, 0, NULL, NULL))
UT_FATAL("!alloc");
if (pmemobj_realloc(pop, &oid, 100, 0))
UT_FATAL("!realloc");
pmemobj_free(&oid);
pmemobj_close(pop);
UT_OUT("obj_mallocs: %d", cnt[OBJ].mallocs);
UT_OUT("obj_frees: %d", cnt[OBJ].frees);
UT_OUT("obj_reallocs: %d", cnt[OBJ].reallocs);
UT_OUT("obj_reallocs_null: %d", cnt[OBJ].reallocs_null);
UT_OUT("obj_strdups: %d", cnt[OBJ].strdups);
if (cnt[OBJ].mallocs == 0 || cnt[OBJ].frees == 0)
UT_FATAL("OBJ mallocs: %d, frees: %d", cnt[OBJ].mallocs,
cnt[OBJ].frees);
for (int i = 0; i < 5; ++i) {
if (i == OBJ)
continue;
if (cnt[i].mallocs || cnt[i].frees)
UT_FATAL("OBJ allocation used %d functions", i);
}
if (cnt[OBJ].mallocs + cnt[OBJ].strdups + cnt[OBJ].reallocs_null !=
cnt[OBJ].frees + OBJ_EXTRA_NALLOC)
UT_FATAL("OBJ memory leak");
UNLINK(path);
}
static void
test_blk(const char *path)
{
pmemblk_set_funcs(blk_malloc, blk_free, blk_realloc, blk_strdup);
/*
* Generate ERR() call, that calls malloc() once,
* but only when it is called for the first time
* (free() is called in the destructor of the library).
*/
pmemblk_create(EXISTING_FILE, 0, NON_ZERO_POOL_SIZE, 0);
memset(cnt, 0, sizeof(cnt));
PMEMblkpool *blk = pmemblk_create(path, 512, PMEMBLK_MIN_POOL, 0600);
pmemblk_close(blk);
UT_OUT("blk_mallocs: %d", cnt[BLK].mallocs);
UT_OUT("blk_frees: %d", cnt[BLK].frees);
UT_OUT("blk_reallocs: %d", cnt[BLK].reallocs);
UT_OUT("blk_reallocs_null: %d", cnt[BLK].reallocs_null);
UT_OUT("blk_strdups: %d", cnt[BLK].strdups);
if (cnt[BLK].mallocs == 0 || cnt[BLK].frees == 0)
UT_FATAL("BLK mallocs: %d, frees: %d", cnt[BLK].mallocs,
cnt[BLK].frees);
for (int i = 0; i < 5; ++i) {
if (i == BLK)
continue;
if (cnt[i].mallocs || cnt[i].frees)
UT_FATAL("BLK allocation used %d functions", i);
}
if (cnt[BLK].mallocs + cnt[BLK].strdups + cnt[BLK].reallocs_null
!= cnt[BLK].frees)
UT_FATAL("BLK memory leak");
UNLINK(path);
}
static void
test_log(const char *path)
{
pmemlog_set_funcs(log_malloc, log_free, log_realloc, log_strdup);
/*
* Generate ERR() call, that calls malloc() once,
* but only when it is called for the first time
* (free() is called in the destructor of the library).
*/
pmemlog_create(EXISTING_FILE, NON_ZERO_POOL_SIZE, 0);
memset(cnt, 0, sizeof(cnt));
PMEMlogpool *log = pmemlog_create(path, PMEMLOG_MIN_POOL, 0600);
pmemlog_close(log);
UT_OUT("log_mallocs: %d", cnt[LOG].mallocs);
UT_OUT("log_frees: %d", cnt[LOG].frees);
UT_OUT("log_reallocs: %d", cnt[LOG].reallocs);
UT_OUT("log_reallocs_null: %d", cnt[LOG].reallocs_null);
UT_OUT("log_strdups: %d", cnt[LOG].strdups);
if (cnt[LOG].mallocs == 0 || cnt[LOG].frees == 0)
UT_FATAL("LOG mallocs: %d, frees: %d", cnt[LOG].mallocs,
cnt[LOG].frees);
for (int i = 0; i < 5; ++i) {
if (i == LOG)
continue;
if (cnt[i].mallocs || cnt[i].frees)
UT_FATAL("LOG allocation used %d functions", i);
}
if (cnt[LOG].mallocs + cnt[LOG].strdups + cnt[LOG].reallocs_null
!= cnt[LOG].frees)
UT_FATAL("LOG memory leak");
UNLINK(path);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "set_funcs");
if (argc < 3)
UT_FATAL("usage: %s file dir", argv[0]);
test_obj(argv[1]);
test_blk(argv[1]);
test_log(argv[1]);
DONE(NULL);
}
| 6,820 | 19.002933 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_ctl_stats/obj_ctl_stats.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2020, Intel Corporation */
/*
* obj_ctl_stats.c -- tests for the libpmemobj statistics module
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_stats");
if (argc != 2)
UT_FATAL("usage: %s file-name", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, "ctl", PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
int enabled;
int ret = pmemobj_ctl_get(pop, "stats.enabled", &enabled);
UT_ASSERTeq(enabled, 0);
UT_ASSERTeq(ret, 0);
ret = pmemobj_alloc(pop, NULL, 1, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
size_t allocated;
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocated);
UT_ASSERTeq(allocated, 0);
enabled = 1;
ret = pmemobj_ctl_set(pop, "stats.enabled", &enabled);
UT_ASSERTeq(ret, 0);
PMEMoid oid;
ret = pmemobj_alloc(pop, &oid, 1, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
size_t oid_size = pmemobj_alloc_usable_size(oid) + 16;
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocated);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(allocated, oid_size);
size_t run_allocated = 0;
ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &run_allocated);
UT_ASSERTeq(ret, 0);
UT_ASSERT(run_allocated /* 2 allocs */ > allocated /* 1 alloc */);
pmemobj_free(&oid);
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocated);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(allocated, 0);
ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &run_allocated);
UT_ASSERTeq(ret, 0);
UT_ASSERT(run_allocated /* 2 allocs */ > allocated /* 1 alloc */);
TX_BEGIN(pop) {
oid = pmemobj_tx_alloc(1, 0);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
oid_size = pmemobj_alloc_usable_size(oid) + 16;
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocated);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(allocated, oid_size);
enum pobj_stats_enabled enum_enabled;
ret = pmemobj_ctl_get(pop, "stats.enabled", &enum_enabled);
UT_ASSERTeq(enabled, POBJ_STATS_ENABLED_BOTH);
UT_ASSERTeq(ret, 0);
run_allocated = 0;
ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &run_allocated);
UT_ASSERTeq(ret, 0);
enum_enabled = POBJ_STATS_ENABLED_PERSISTENT; /* transient disabled */
ret = pmemobj_ctl_set(pop, "stats.enabled", &enum_enabled);
UT_ASSERTeq(ret, 0);
ret = pmemobj_alloc(pop, &oid, 1, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
size_t tmp = 0;
ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &tmp);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(tmp, run_allocated); /* shouldn't change */
/* the deallocated object shouldn't be reflected in rebuilt stats */
pmemobj_free(&oid);
pmemobj_close(pop);
pop = pmemobj_open(path, "ctl");
UT_ASSERTne(pop, NULL);
/* stats are rebuilt lazily, so initially this should be 0 */
tmp = 0;
ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &tmp);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(tmp, 0);
ret = pmemobj_alloc(pop, NULL, 1, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
/* after first alloc, the previously allocated object will be found */
tmp = 0;
ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &tmp);
UT_ASSERTeq(ret, 0);
UT_ASSERTeq(tmp, run_allocated + oid_size);
pmemobj_close(pop);
DONE(NULL);
}
| 3,299 | 25.829268 | 72 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/traces_custom_function/traces_custom_function.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2014-2017, Intel Corporation */
/*
* traces_custom_function.c -- unit test for traces with custom print or
* vsnprintf functions
*
* usage: traces_custom_function [v|p]
*
*/
#define LOG_PREFIX "trace_func"
#define LOG_LEVEL_VAR "TRACE_LOG_LEVEL"
#define LOG_FILE_VAR "TRACE_LOG_FILE"
#define MAJOR_VERSION 1
#define MINOR_VERSION 0
#include <sys/types.h>
#include <stdarg.h>
#include "pmemcommon.h"
#include "unittest.h"
/*
* print_custom_function -- Custom function to handle output
*
* This is called from the library to print text instead of output to stderr.
*/
static void
print_custom_function(const char *s)
{
if (s) {
UT_OUT("CUSTOM_PRINT: %s", s);
} else {
UT_OUT("CUSTOM_PRINT(NULL)");
}
}
/*
* vsnprintf_custom_function -- Custom vsnprintf implementation
*
* It modifies format by adding @@ in front of each conversion specification.
*/
static int
vsnprintf_custom_function(char *str, size_t size, const char *format,
va_list ap)
{
char *format2 = MALLOC(strlen(format) * 3);
int i = 0;
int ret_val;
while (*format != '\0') {
if (*format == '%') {
format2[i++] = '@';
format2[i++] = '@';
}
format2[i++] = *format++;
}
format2[i++] = '\0';
ret_val = vsnprintf(str, size, format2, ap);
FREE(format2);
return ret_val;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "traces_custom_function");
if (argc != 2)
UT_FATAL("usage: %s [v|p]", argv[0]);
out_set_print_func(print_custom_function);
common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR,
MAJOR_VERSION, MINOR_VERSION);
switch (argv[1][0]) {
case 'p': {
LOG(0, "Log level NONE");
LOG(1, "Log level ERROR");
LOG(2, "Log level WARNING");
LOG(3, "Log level INFO");
LOG(4, "Log level DEBUG");
}
break;
case 'v':
out_set_vsnprintf_func(vsnprintf_custom_function);
LOG(0, "no format");
LOG(0, "pointer: %p", (void *)0x12345678);
LOG(0, "string: %s", "Hello world!");
LOG(0, "number: %u", 12345678);
errno = EINVAL;
LOG(0, "!error");
break;
default:
UT_FATAL("usage: %s [v|p]", argv[0]);
}
/* Cleanup */
common_fini();
DONE(NULL);
}
| 2,156 | 19.158879 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/ex_librpmem_hello/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019, Intel Corporation
#
#
# ex_librpmem_hello/config.sh -- test configuration
#
# Filesystem-DAX cannot be used for RDMA
# since it is missing support in Linux kernel
CONF_GLOBAL_FS_TYPE=non-pmem
CONF_GLOBAL_BUILD_TYPE="debug nondebug"
CONF_GLOBAL_TEST_TYPE=short
CONF_GLOBAL_RPMEM_PROVIDER=all
CONF_GLOBAL_RPMEM_PMETHOD=all
| 402 | 21.388889 | 51 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_poolset_foreach/util_poolset_foreach.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* util_poolset_foreach.c -- unit test for util_poolset_foreach_part()
*
* usage: util_poolset_foreach file...
*/
#include "unittest.h"
#include "set.h"
#include "pmemcommon.h"
#include <errno.h>
#define LOG_PREFIX "ut"
#define LOG_LEVEL_VAR "TEST_LOG_LEVEL"
#define LOG_FILE_VAR "TEST_LOG_FILE"
#define MAJOR_VERSION 1
#define MINOR_VERSION 0
static int
cb(struct part_file *pf, void *arg)
{
if (pf->is_remote) {
/* remote replica */
const char *node_addr = pf->remote->node_addr;
const char *pool_desc = pf->remote->pool_desc;
char *set_name = (char *)arg;
UT_OUT("%s: %s %s", set_name, node_addr, pool_desc);
} else {
const char *name = pf->part->path;
char *set_name = (char *)arg;
UT_OUT("%s: %s", set_name, name);
}
return 0;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_poolset_foreach");
common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR,
MAJOR_VERSION, MINOR_VERSION);
if (argc < 2)
UT_FATAL("usage: %s file...",
argv[0]);
for (int i = 1; i < argc; i++) {
char *fname = argv[i];
int ret = util_poolset_foreach_part(fname, cb, fname);
UT_OUT("util_poolset_foreach_part(%s): %d", fname, ret);
}
common_fini();
DONE(NULL);
}
| 1,293 | 20.213115 | 70 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_map_prot/pmem2_map_prot.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2020, Intel Corporation */
/*
* pmem2_map_prot.c -- pmem2_map_prot unit tests
*/
#include <stdbool.h>
#include <signal.h>
#include <setjmp.h>
#include "config.h"
#include "source.h"
#include "map.h"
#include "out.h"
#include "pmem2.h"
#include "unittest.h"
#include "ut_pmem2.h"
#include "ut_pmem2_setup.h"
#include "ut_fh.h"
struct res {
struct FHandle *fh;
struct pmem2_config cfg;
struct pmem2_source *src;
};
/*
* res_prepare -- set access mode and protection flags
*/
static void
res_prepare(const char *file, struct res *res, int access, unsigned proto)
{
#ifdef _WIN32
enum file_handle_type fh_type = FH_HANDLE;
#else
enum file_handle_type fh_type = FH_FD;
#endif
ut_pmem2_prepare_config(&res->cfg, &res->src, &res->fh, fh_type, file,
0, 0, access);
pmem2_config_set_protection(&res->cfg, proto);
}
/*
* res_cleanup -- free resources
*/
static void
res_cleanup(struct res *res)
{
PMEM2_SOURCE_DELETE(&res->src);
UT_FH_CLOSE(res->fh);
}
static const char *word1 = "Persistent or nonpersistent: this is the question.";
static ut_jmp_buf_t Jmp;
/*
* signal_handler -- called on SIGSEGV
*/
static void
signal_handler(int sig)
{
ut_siglongjmp(Jmp);
}
/*
* test_rw_mode_rw_prot -- test R/W protection
* pmem2_map() - should success
* memcpy() - should success
*/
static int
test_rw_mode_rw_prot(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_rw_mode_rw_prot <file>");
struct res res;
/* read/write on file opened in read/write mode - should success */
res_prepare(argv[0], &res, FH_RDWR,
PMEM2_PROT_READ | PMEM2_PROT_WRITE);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
void *addr_map = pmem2_map_get_address(map);
memcpy_fn(addr_map, word1, strlen(word1), 0);
UT_ASSERTeq(memcmp(addr_map, word1, strlen(word1)), 0);
pmem2_unmap(&map);
res_cleanup(&res);
return 1;
}
/*
* template_mode_prot_mismatch - try to map file with mutually exclusive FD
* access and map protection
*/
static void
template_mode_prot_mismatch(char *file, int access, unsigned prot)
{
struct res res;
/* read/write on file opened in read-only mode - should fail */
res_prepare(file, &res, access, prot);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_NO_ACCESS);
res_cleanup(&res);
}
/*
* test_r_mode_rw_prot -- test R/W protection
* pmem2_map() - should fail
*/
static int
test_r_mode_rw_prot(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_r_mode_rw_prot <file>");
char *file = argv[0];
template_mode_prot_mismatch(file, FH_READ,
PMEM2_PROT_WRITE | PMEM2_PROT_READ);
return 1;
}
/*
* test_rw_mode_rwx_prot - test R/W/X protection on R/W file
* pmem2_map() - should fail
*/
static int
test_rw_modex_rwx_prot(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_rw_modex_rwx_prot <file>");
char *file = argv[0];
template_mode_prot_mismatch(file, FH_RDWR,
PMEM2_PROT_EXEC |PMEM2_PROT_WRITE | PMEM2_PROT_READ);
return 1;
}
/*
* test_rw_modex_rx_prot - test R/X protection on R/W file
* pmem2_map() - should fail
*/
static int
test_rw_modex_rx_prot(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_rw_modex_rx_prot <file>");
char *file = argv[0];
template_mode_prot_mismatch(file, FH_RDWR,
PMEM2_PROT_EXEC | PMEM2_PROT_READ);
return 1;
}
/*
* test_rw_mode_r_prot -- test R/W protection
* pmem2_map() - should success
* memcpy() - should fail
*/
static int
test_rw_mode_r_prot(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_rw_mode_r_prot <file>");
/* arrange to catch SIGSEGV */
struct sigaction v;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
SIGACTION(SIGSEGV, &v, NULL);
struct res res;
/* read-only on file opened in read/write mode - should success */
res_prepare(argv[0], &res, FH_RDWR, PMEM2_PROT_READ);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
void *addr_map = pmem2_map_get_address(map);
if (!ut_sigsetjmp(Jmp)) {
/* memcpy should now fail */
memcpy_fn(addr_map, word1, strlen(word1), 0);
UT_FATAL("memcpy successful");
}
pmem2_unmap(&map);
res_cleanup(&res);
signal(SIGSEGV, SIG_DFL);
return 1;
}
/*
* test_r_mode_r_prot -- test R/W protection
* pmem2_map() - should success
* memcpy() - should fail
*/
static int
test_r_mode_r_prot(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_r_mode_r_prot <file>");
/* arrange to catch SIGSEGV */
struct sigaction v;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
SIGACTION(SIGSEGV, &v, NULL);
struct res res;
/* read-only on file opened in read-only mode - should succeed */
res_prepare(argv[0], &res, FH_READ, PMEM2_PROT_READ);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
void *addr_map = pmem2_map_get_address(map);
if (!ut_sigsetjmp(Jmp)) {
/* memcpy should now fail */
memcpy_fn(addr_map, word1, strlen(word1), 0);
UT_FATAL("memcpy successful");
}
pmem2_unmap(&map);
res_cleanup(&res);
signal(SIGSEGV, SIG_DFL);
return 1;
}
/*
* test_rw_mode_none_prot -- test R/W protection
* pmem2_map() - should success
* memcpy() - should fail
*/
static int
test_rw_mode_none_prot(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_rw_mode_none_prot <file>");
/* arrange to catch SIGSEGV */
struct sigaction v;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
SIGACTION(SIGSEGV, &v, NULL);
struct res res;
/* none on file opened in read-only mode - should success */
res_prepare(argv[0], &res, FH_READ, PMEM2_PROT_NONE);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
void *addr_map = pmem2_map_get_address(map);
if (!ut_sigsetjmp(Jmp)) {
/* memcpy should now fail */
memcpy_fn(addr_map, word1, strlen(word1), 0);
UT_FATAL("memcpy successful");
}
pmem2_unmap(&map);
res_cleanup(&res);
signal(SIGSEGV, SIG_DFL);
return 1;
}
/*
* sum_asm[] --> simple program in assembly which calculates '2 + 2' and
* returns the result
*/
static unsigned char sum_asm[] = {
0x55, /* push %rbp */
0x48, 0x89, 0xe5, /* mov %rsp,%rbp */
0xc7, 0x45, 0xf8, 0x02, 0x00, 0x00, 0x00, /* movl $0x2,-0x8(%rbp) */
0x8b, 0x45, 0xf8, /* mov -0x8(%rbp),%eax */
0x01, 0xc0, /* add %eax,%eax */
0x89, 0x45, 0xfc, /* mov %eax,-0x4(%rbp) */
0x8b, 0x45, 0xfc, /* mov -0x4(%rbp),%eax */
0x5d, /* pop %rbp */
0xc3, /* retq */
};
typedef int (*sum_fn)(void);
/*
* test_rx_mode_rx_prot_do_execute -- copy string with the program to mapped
* memory to prepare memory, execute the program and verify result
*/
static int
test_rx_mode_rx_prot_do_execute(const struct test_case *tc, int argc,
char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_rx_mode_rx_prot_do_execute <file>");
char *file = argv[0];
struct res res;
/* Windows does not support PMEM2_PROT_WRITE combination */
res_prepare(file, &res, FH_EXEC | FH_RDWR,
PMEM2_PROT_WRITE | PMEM2_PROT_READ);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
char *addr_map = pmem2_map_get_address(map);
map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0);
pmem2_unmap(&map);
/* Windows does not support PMEM2_PROT_EXEC combination */
pmem2_config_set_protection(&res.cfg,
PMEM2_PROT_READ | PMEM2_PROT_EXEC);
ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
sum_fn sum = (sum_fn)addr_map;
int sum_result = sum();
UT_ASSERTeq(sum_result, 4);
pmem2_unmap(&map);
res_cleanup(&res);
return 1;
}
/*
* test_rwx_mode_rx_prot_do_write -- try to copy the string into mapped memory,
* expect failure
*/
static int
test_rwx_mode_rx_prot_do_write(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 2)
UT_FATAL(
"usage: test_rwx_mode_rx_prot_do_write <file> <if_sharing>");
struct sigaction v;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
SIGACTION(SIGSEGV, &v, NULL);
char *file = argv[0];
unsigned if_sharing = ATOU(argv[1]);
struct res res;
/* Windows does not support PMEM2_PROT_EXEC combination */
res_prepare(file, &res, FH_EXEC | FH_RDWR,
PMEM2_PROT_READ | PMEM2_PROT_EXEC);
if (if_sharing)
pmem2_config_set_sharing(&res.cfg, PMEM2_PRIVATE);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
char *addr_map = pmem2_map_get_address(map);
if (!ut_sigsetjmp(Jmp)) {
/* memcpy_fn should fail */
map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0);
}
pmem2_unmap(&map);
res_cleanup(&res);
signal(SIGSEGV, SIG_DFL);
return 2;
}
/*
* test_rwx_mode_rwx_prot_do_execute -- copy string with the program to mapped
* memory to prepare memory, execute the program and verify result
*/
static int
test_rwx_mode_rwx_prot_do_execute(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 2)
UT_FATAL(
"usage: test_rwx_mode_rwx_prot_do_execute <file> <if_sharing>");
char *file = argv[0];
unsigned if_sharing = ATOU(argv[1]);
struct res res;
res_prepare(file, &res, FH_EXEC | FH_RDWR,
PMEM2_PROT_EXEC | PMEM2_PROT_WRITE | PMEM2_PROT_READ);
if (if_sharing)
pmem2_config_set_sharing(&res.cfg, PMEM2_PRIVATE);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
char *addr_map = pmem2_map_get_address(map);
map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0);
sum_fn sum = (sum_fn)addr_map;
int sum_result = sum();
UT_ASSERTeq(sum_result, 4);
pmem2_unmap(&map);
res_cleanup(&res);
signal(SIGSEGV, SIG_DFL);
return 2;
}
/*
* test_rw_mode_rw_prot_do_execute -- copy string with the program to mapped
* memory to prepare memory, and execute the program - should fail
*/
static int
test_rw_mode_rw_prot_do_execute(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 2)
UT_FATAL(
"usage: test_rw_mode_rwx_prot_do_execute <file> <if_sharing>");
struct sigaction v;
sigemptyset(&v.sa_mask);
v.sa_flags = 0;
v.sa_handler = signal_handler;
SIGACTION(SIGSEGV, &v, NULL);
char *file = argv[0];
unsigned if_sharing = ATOU(argv[1]);
struct res res;
res_prepare(file, &res, FH_RDWR, PMEM2_PROT_WRITE | PMEM2_PROT_READ);
if (if_sharing)
pmem2_config_set_sharing(&res.cfg, PMEM2_PRIVATE);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
void *addr_map = pmem2_map_get_address(map);
map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0);
sum_fn sum = (sum_fn)addr_map;
if (!ut_sigsetjmp(Jmp)) {
sum(); /* sum function should now fail */
}
pmem2_unmap(&map);
res_cleanup(&res);
return 2;
}
static const char *initial_state = "No code.";
/*
* test_rwx_prot_map_priv_do_execute -- copy string with the program to
* the mapped memory with MAP_PRIVATE to prepare memory, execute the program
* and verify the result
*/
static int
test_rwx_prot_map_priv_do_execute(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 1)
UT_FATAL(
"usage: test_rwx_prot_map_priv_do_execute <file> <if_sharing>");
char *file = argv[0];
struct res res;
res_prepare(file, &res, FH_RDWR, PMEM2_PROT_WRITE | PMEM2_PROT_READ);
struct pmem2_map *map;
int ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
char *addr_map = pmem2_map_get_address(map);
map->memcpy_fn(addr_map, initial_state, sizeof(initial_state), 0);
pmem2_unmap(&map);
res_cleanup(&res);
res_prepare(file, &res, FH_READ | FH_EXEC,
PMEM2_PROT_EXEC | PMEM2_PROT_WRITE | PMEM2_PROT_READ);
pmem2_config_set_sharing(&res.cfg, PMEM2_PRIVATE);
ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
addr_map = pmem2_map_get_address(map);
map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0);
sum_fn sum = (sum_fn)addr_map;
int sum_result = sum();
UT_ASSERTeq(sum_result, 4);
pmem2_unmap(&map);
ret = pmem2_map(&res.cfg, res.src, &map);
UT_ASSERTeq(ret, 0);
addr_map = pmem2_map_get_address(map);
/* check if changes in private mapping affect initial state */
UT_ASSERTeq(memcmp(addr_map, initial_state, strlen(initial_state)), 0);
pmem2_unmap(&map);
res_cleanup(&res);
return 1;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_rw_mode_rw_prot),
TEST_CASE(test_r_mode_rw_prot),
TEST_CASE(test_rw_modex_rwx_prot),
TEST_CASE(test_rw_modex_rx_prot),
TEST_CASE(test_rw_mode_r_prot),
TEST_CASE(test_r_mode_r_prot),
TEST_CASE(test_rw_mode_none_prot),
TEST_CASE(test_rx_mode_rx_prot_do_execute),
TEST_CASE(test_rwx_mode_rx_prot_do_write),
TEST_CASE(test_rwx_mode_rwx_prot_do_execute),
TEST_CASE(test_rw_mode_rw_prot_do_execute),
TEST_CASE(test_rwx_prot_map_priv_do_execute),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_map_prot");
util_init();
out_init("pmem2_map_prot", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
#ifdef _MSC_VER
MSVC_CONSTR(libpmem2_init)
MSVC_DESTR(libpmem2_fini)
#endif
| 13,698 | 22.537801 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_map_prot/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2020, Intel Corporation
#
import testframework as t
class PMEM2_MAP_PROT(t.Test):
test_type = t.Short
filesize = 16 * t.MiB
def run(self, ctx):
filepath = ctx.create_holey_file(self.filesize, 'testfile',)
ctx.exec('pmem2_map_prot', self.test_case, filepath)
class TEST0(PMEM2_MAP_PROT):
"""
READ/WRITE protection on file opened in read-write mode - should succeed
"""
test_case = "test_rw_mode_rw_prot"
class TEST1(PMEM2_MAP_PROT):
"""
READ/WRITE protection on file opened in read-only mode - should fail
"""
test_case = "test_r_mode_rw_prot"
@t.windows_only
class TEST2(PMEM2_MAP_PROT):
"""
READ/WRITE protection on file opened in read-only mode - should fail
"""
test_case = "test_rw_modex_rwx_prot"
@t.windows_only
class TEST3(PMEM2_MAP_PROT):
"""
READ/WRITE protection on file opened in read-only mode - should fail
"""
test_case = "test_rw_modex_rx_prot"
class TEST4(PMEM2_MAP_PROT):
"""
READ protection on file opened in read-write mode - should succeed
"""
test_case = "test_rw_mode_r_prot"
class TEST5(PMEM2_MAP_PROT):
"""
READ protection on file opened in read-only mode - should succeed
"""
test_case = "test_r_mode_r_prot"
# PMEM2_PROT_NONE flag is not supported by the CreateFileMapping function.
# This test on purpose performs an "Invalid write"
# which causes Memcheck to fail.
@t.windows_exclude
@t.require_valgrind_disabled('memcheck')
class TEST6(PMEM2_MAP_PROT):
"""
NONE protection on file opened in read-write mode - should succeed
"""
test_case = "test_rw_mode_none_prot"
@t.require_architectures('x86_64')
class TEST7(PMEM2_MAP_PROT):
"""
READ|EXEC protection on file opened in read|write|exec mode; test runs
the program, which is put in mapped memory - should succeed
"""
test_case = "test_rx_mode_rx_prot_do_execute"
class PMEM2_PROT_EXEC(t.Test):
test_type = t.Short
filesize = 16 * t.MiB
is_map_private = 0
def run(self, ctx):
filepath = ctx.create_holey_file(self.filesize, 'testfile')
ctx.exec('pmem2_map_prot', self.test_case,
filepath, self.is_map_private)
class TEST8(PMEM2_PROT_EXEC):
"""
READ|EXEC protection on file opened in read|write|exec mode; test writes
data to mapped memory - should failed
"""
test_case = "test_rwx_mode_rx_prot_do_write"
class TEST9(PMEM2_PROT_EXEC):
"""
READ|EXEC protection on file opened in read|write|exec mode; test writes
data to mapped memory with MAP_PRIVATE - should failed
"""
test_case = "test_rwx_mode_rx_prot_do_write"
is_map_private = 1
@t.require_architectures('x86_64')
class TEST10(PMEM2_PROT_EXEC):
"""
READ|WRITE|EXEC protection on file opened in read|write|exec mode; test
runs the program, which is put in mapped memory - should succeed
"""
test_case = "test_rwx_mode_rwx_prot_do_execute"
@t.require_architectures('x86_64')
class TEST11(PMEM2_PROT_EXEC):
"""
READ|WRITE protection on file opened in read|write mode; test runs
the program, which is put in mapped memory with MAP_PRIVATE -
should succeed
"""
test_case = "test_rwx_mode_rwx_prot_do_execute"
is_map_private = 1
@t.require_architectures('x86_64')
class TEST12(PMEM2_PROT_EXEC):
"""
READ|EXEC protection on file opened in read|write mode; test runs
the program, which is put in mapped memory - should failed
"""
test_case = "test_rw_mode_rw_prot_do_execute"
@t.require_architectures('x86_64')
class TEST13(PMEM2_PROT_EXEC):
"""
READ|EXEC protection on file opened in read|write mode; test runs
the program, which is put in mapped memory with MAP_PRIVATE - should failed
"""
test_case = "test_rw_mode_rw_prot_do_execute"
is_map_private = 1
@t.require_architectures('x86_64')
class TEST14(PMEM2_MAP_PROT):
"""
READ|EXEC protection on file opened in read|exec|write mode; test
runs the program, which is put in mapped memory with MAP_PRIVATE -
should succeed
"""
test_case = "test_rwx_prot_map_priv_do_execute"
@t.windows_exclude
@t.require_devdax(t.DevDax('devdax1'))
class PMEM2_MAP_DEVDAX(t.Test):
test_type = t.Short
def run(self, ctx):
dd = ctx.devdaxes.devdax1
ctx.exec('pmem2_map_prot', self.test_case, dd.path)
class TEST15(PMEM2_MAP_DEVDAX):
"""
READ/WRITE protection on device DAX opened in read-write
mode - should succeed
"""
test_case = "test_rw_mode_rw_prot"
# XXX: we need to add support to test framework and create a tool-helper
# to skip this test when exec access is not allowed for specific device
# @t.require_architectures('x86_64')
# class TEST16(PMEM2_MAP_DEVDAX):
# """
# READ|EXEC protection on device DAX opened in read|write|exec mode; test
# runs the program, which is put in mapped memory - should succeed
# """
# test_case = "test_rx_mode_rx_prot_do_execute"
| 5,066 | 26.241935 | 79 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/ex_librpmem_manpage/config.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019, Intel Corporation
#
#
# ex_librpmem_manpage/config.sh -- test configuration
#
# Filesystem-DAX cannot be used for RDMA
# since it is missing support in Linux kernel
CONF_GLOBAL_FS_TYPE=non-pmem
CONF_GLOBAL_BUILD_TYPE="debug nondebug"
CONF_GLOBAL_TEST_TYPE=short
CONF_GLOBAL_RPMEM_PROVIDER=all
CONF_GLOBAL_RPMEM_PMETHOD=all
| 404 | 21.5 | 53 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_layout/obj_layout.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2019, Intel Corporation */
/*
* obj_layout.c -- unit test for layout
*
* This test should be modified after every layout change. It's here to prevent
* any accidental layout changes.
*/
#include "util.h"
#include "unittest.h"
#include "sync.h"
#include "heap_layout.h"
#include "lane.h"
#include "tx.h"
#include "ulog.h"
#include "list.h"
#define SIZEOF_CHUNK_HEADER_V3 (8)
#define MAX_CHUNK_V3 (65535 - 7)
#define SIZEOF_CHUNK_V3 (1024ULL * 256)
#define SIZEOF_CHUNK_RUN_HEADER_V3 (16)
#define SIZEOF_ZONE_HEADER_V3 (64)
#define SIZEOF_ZONE_METADATA_V3 (SIZEOF_ZONE_HEADER_V3 +\
SIZEOF_CHUNK_HEADER_V3 * MAX_CHUNK_V3)
#define SIZEOF_HEAP_HDR_V3 (1024)
#define SIZEOF_LEGACY_ALLOCATION_HEADER_V3 (64)
#define SIZEOF_COMPACT_ALLOCATION_HEADER_V3 (16)
#define SIZEOF_LOCK_V3 (64)
#define SIZEOF_PMEMOID_V3 (16)
#define SIZEOF_LIST_ENTRY_V3 (SIZEOF_PMEMOID_V3 * 2)
#define SIZEOF_LIST_HEAD_V3 (SIZEOF_PMEMOID_V3 + SIZEOF_LOCK_V3)
#define SIZEOF_LANE_SECTION_V3 (1024)
#define SIZEOF_LANE_V3 (3 * SIZEOF_LANE_SECTION_V3)
#define SIZEOF_ULOG_V4 (CACHELINE_SIZE)
#define SIZEOF_ULOG_BASE_ENTRY_V4 (8)
#define SIZEOF_ULOG_VAL_ENTRY_V4 (16)
#define SIZEOF_ULOG_BUF_ENTRY_V4 (24)
#if CACHELINE_SIZE == 128
#define SIZEOF_LANE_UNDO_SIZE (1920)
#define SIZEOF_LANE_REDO_EXTERNAL_SIZE (640)
#define SIZEOF_LANE_REDO_INTERNAL_SIZE (128)
#elif CACHELINE_SIZE == 64
#define SIZEOF_LANE_UNDO_SIZE (2048)
#define SIZEOF_LANE_REDO_EXTERNAL_SIZE (640)
#define SIZEOF_LANE_REDO_INTERNAL_SIZE (192)
#else
#error "Unknown cacheline size"
#endif
POBJ_LAYOUT_BEGIN(layout);
POBJ_LAYOUT_ROOT(layout, struct foo);
POBJ_LAYOUT_END(layout);
struct foo {
POBJ_LIST_ENTRY(struct foo) f;
};
POBJ_LIST_HEAD(foo_head, struct foo);
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_layout");
UT_COMPILE_ERROR_ON(CHUNKSIZE != SIZEOF_CHUNK_V3);
ASSERT_ALIGNED_BEGIN(struct chunk);
ASSERT_ALIGNED_FIELD(struct chunk, data);
ASSERT_ALIGNED_CHECK(struct chunk);
UT_COMPILE_ERROR_ON(sizeof(struct chunk_run) != SIZEOF_CHUNK_V3);
ASSERT_ALIGNED_BEGIN(struct chunk_run_header);
ASSERT_ALIGNED_FIELD(struct chunk_run_header, block_size);
ASSERT_ALIGNED_FIELD(struct chunk_run_header, alignment);
ASSERT_ALIGNED_CHECK(struct chunk_run_header);
UT_COMPILE_ERROR_ON(sizeof(struct chunk_run_header) !=
SIZEOF_CHUNK_RUN_HEADER_V3);
ASSERT_ALIGNED_BEGIN(struct chunk_run);
ASSERT_ALIGNED_FIELD(struct chunk_run, hdr);
ASSERT_ALIGNED_FIELD(struct chunk_run, content);
ASSERT_ALIGNED_CHECK(struct chunk_run);
UT_COMPILE_ERROR_ON(sizeof(struct chunk_run) != SIZEOF_CHUNK_V3);
ASSERT_ALIGNED_BEGIN(struct chunk_header);
ASSERT_ALIGNED_FIELD(struct chunk_header, type);
ASSERT_ALIGNED_FIELD(struct chunk_header, flags);
ASSERT_ALIGNED_FIELD(struct chunk_header, size_idx);
ASSERT_ALIGNED_CHECK(struct chunk_header);
UT_COMPILE_ERROR_ON(sizeof(struct chunk_header) !=
SIZEOF_CHUNK_HEADER_V3);
ASSERT_ALIGNED_BEGIN(struct zone_header);
ASSERT_ALIGNED_FIELD(struct zone_header, magic);
ASSERT_ALIGNED_FIELD(struct zone_header, size_idx);
ASSERT_ALIGNED_FIELD(struct zone_header, reserved);
ASSERT_ALIGNED_CHECK(struct zone_header);
UT_COMPILE_ERROR_ON(sizeof(struct zone_header) !=
SIZEOF_ZONE_HEADER_V3);
ASSERT_ALIGNED_BEGIN(struct zone);
ASSERT_ALIGNED_FIELD(struct zone, header);
ASSERT_ALIGNED_FIELD(struct zone, chunk_headers);
ASSERT_ALIGNED_CHECK(struct zone);
UT_COMPILE_ERROR_ON(sizeof(struct zone) !=
SIZEOF_ZONE_METADATA_V3);
ASSERT_ALIGNED_BEGIN(struct heap_header);
ASSERT_ALIGNED_FIELD(struct heap_header, signature);
ASSERT_ALIGNED_FIELD(struct heap_header, major);
ASSERT_ALIGNED_FIELD(struct heap_header, minor);
ASSERT_ALIGNED_FIELD(struct heap_header, unused);
ASSERT_ALIGNED_FIELD(struct heap_header, chunksize);
ASSERT_ALIGNED_FIELD(struct heap_header, chunks_per_zone);
ASSERT_ALIGNED_FIELD(struct heap_header, reserved);
ASSERT_ALIGNED_FIELD(struct heap_header, checksum);
ASSERT_ALIGNED_CHECK(struct heap_header);
UT_COMPILE_ERROR_ON(sizeof(struct heap_header) !=
SIZEOF_HEAP_HDR_V3);
ASSERT_ALIGNED_BEGIN(struct allocation_header_legacy);
ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, unused);
ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, size);
ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, unused2);
ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, root_size);
ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, type_num);
ASSERT_ALIGNED_CHECK(struct allocation_header_legacy);
UT_COMPILE_ERROR_ON(sizeof(struct allocation_header_legacy) !=
SIZEOF_LEGACY_ALLOCATION_HEADER_V3);
ASSERT_ALIGNED_BEGIN(struct allocation_header_compact);
ASSERT_ALIGNED_FIELD(struct allocation_header_compact, size);
ASSERT_ALIGNED_FIELD(struct allocation_header_compact, extra);
ASSERT_ALIGNED_CHECK(struct allocation_header_compact);
UT_COMPILE_ERROR_ON(sizeof(struct allocation_header_compact) !=
SIZEOF_COMPACT_ALLOCATION_HEADER_V3);
ASSERT_ALIGNED_BEGIN(struct ulog);
ASSERT_ALIGNED_FIELD(struct ulog, checksum);
ASSERT_ALIGNED_FIELD(struct ulog, next);
ASSERT_ALIGNED_FIELD(struct ulog, capacity);
ASSERT_ALIGNED_FIELD(struct ulog, gen_num);
ASSERT_ALIGNED_FIELD(struct ulog, flags);
ASSERT_ALIGNED_FIELD(struct ulog, unused);
ASSERT_ALIGNED_CHECK(struct ulog);
UT_COMPILE_ERROR_ON(sizeof(struct ulog) !=
SIZEOF_ULOG_V4);
ASSERT_ALIGNED_BEGIN(struct ulog_entry_base);
ASSERT_ALIGNED_FIELD(struct ulog_entry_base, offset);
ASSERT_ALIGNED_CHECK(struct ulog_entry_base);
UT_COMPILE_ERROR_ON(sizeof(struct ulog_entry_base) !=
SIZEOF_ULOG_BASE_ENTRY_V4);
ASSERT_ALIGNED_BEGIN(struct ulog_entry_val);
ASSERT_ALIGNED_FIELD(struct ulog_entry_val, base);
ASSERT_ALIGNED_FIELD(struct ulog_entry_val, value);
ASSERT_ALIGNED_CHECK(struct ulog_entry_val);
UT_COMPILE_ERROR_ON(sizeof(struct ulog_entry_val) !=
SIZEOF_ULOG_VAL_ENTRY_V4);
ASSERT_ALIGNED_BEGIN(struct ulog_entry_buf);
ASSERT_ALIGNED_FIELD(struct ulog_entry_buf, base);
ASSERT_ALIGNED_FIELD(struct ulog_entry_buf, checksum);
ASSERT_ALIGNED_FIELD(struct ulog_entry_buf, size);
ASSERT_ALIGNED_CHECK(struct ulog_entry_buf);
UT_COMPILE_ERROR_ON(sizeof(struct ulog_entry_buf) !=
SIZEOF_ULOG_BUF_ENTRY_V4);
ASSERT_ALIGNED_BEGIN(PMEMoid);
ASSERT_ALIGNED_FIELD(PMEMoid, pool_uuid_lo);
ASSERT_ALIGNED_FIELD(PMEMoid, off);
ASSERT_ALIGNED_CHECK(PMEMoid);
UT_COMPILE_ERROR_ON(sizeof(PMEMoid) !=
SIZEOF_PMEMOID_V3);
UT_COMPILE_ERROR_ON(sizeof(PMEMmutex) != SIZEOF_LOCK_V3);
UT_COMPILE_ERROR_ON(sizeof(PMEMmutex) != sizeof(PMEMmutex_internal));
UT_COMPILE_ERROR_ON(util_alignof(PMEMmutex) !=
util_alignof(PMEMmutex_internal));
UT_COMPILE_ERROR_ON(util_alignof(PMEMmutex) !=
util_alignof(os_mutex_t));
UT_COMPILE_ERROR_ON(util_alignof(PMEMmutex) !=
util_alignof(uint64_t));
UT_COMPILE_ERROR_ON(sizeof(PMEMrwlock) != SIZEOF_LOCK_V3);
UT_COMPILE_ERROR_ON(util_alignof(PMEMrwlock) !=
util_alignof(PMEMrwlock_internal));
UT_COMPILE_ERROR_ON(util_alignof(PMEMrwlock) !=
util_alignof(os_rwlock_t));
UT_COMPILE_ERROR_ON(util_alignof(PMEMrwlock) !=
util_alignof(uint64_t));
UT_COMPILE_ERROR_ON(sizeof(PMEMcond) != SIZEOF_LOCK_V3);
UT_COMPILE_ERROR_ON(util_alignof(PMEMcond) !=
util_alignof(PMEMcond_internal));
UT_COMPILE_ERROR_ON(util_alignof(PMEMcond) !=
util_alignof(os_cond_t));
UT_COMPILE_ERROR_ON(util_alignof(PMEMcond) !=
util_alignof(uint64_t));
UT_COMPILE_ERROR_ON(sizeof(struct foo) != SIZEOF_LIST_ENTRY_V3);
UT_COMPILE_ERROR_ON(sizeof(struct list_entry) != SIZEOF_LIST_ENTRY_V3);
UT_COMPILE_ERROR_ON(sizeof(struct foo_head) != SIZEOF_LIST_HEAD_V3);
UT_COMPILE_ERROR_ON(sizeof(struct list_head) != SIZEOF_LIST_HEAD_V3);
ASSERT_ALIGNED_BEGIN(struct lane_layout);
ASSERT_ALIGNED_FIELD(struct lane_layout, internal);
ASSERT_ALIGNED_FIELD(struct lane_layout, external);
ASSERT_ALIGNED_FIELD(struct lane_layout, undo);
ASSERT_ALIGNED_CHECK(struct lane_layout);
UT_COMPILE_ERROR_ON(sizeof(struct lane_layout) !=
SIZEOF_LANE_V3);
UT_COMPILE_ERROR_ON(LANE_UNDO_SIZE != SIZEOF_LANE_UNDO_SIZE);
UT_COMPILE_ERROR_ON(LANE_REDO_EXTERNAL_SIZE !=
SIZEOF_LANE_REDO_EXTERNAL_SIZE);
UT_COMPILE_ERROR_ON(LANE_REDO_INTERNAL_SIZE !=
SIZEOF_LANE_REDO_INTERNAL_SIZE);
DONE(NULL);
}
#ifdef _MSC_VER
/*
* Since libpmemobj is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
#endif
| 8,411 | 35.103004 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_add_range_direct/obj_tx_add_range_direct.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* obj_tx_add_range_direct.c -- unit test for pmemobj_tx_add_range_direct
*/
#include <string.h>
#include <stddef.h>
#include "tx.h"
#include "unittest.h"
#include "util.h"
#include "valgrind_internal.h"
#define LAYOUT_NAME "tx_add_range_direct"
#define OBJ_SIZE 1024
enum type_number {
TYPE_OBJ,
TYPE_OBJ_ABORT,
};
TOID_DECLARE(struct object, 0);
struct object {
size_t value;
unsigned char data[OBJ_SIZE - sizeof(size_t)];
};
#define VALUE_OFF (offsetof(struct object, value))
#define VALUE_SIZE (sizeof(size_t))
#define DATA_OFF (offsetof(struct object, data))
#define DATA_SIZE (OBJ_SIZE - sizeof(size_t))
#define TEST_VALUE_1 1
#define TEST_VALUE_2 2
/*
* do_tx_zalloc -- do tx allocation with specified type number
*/
static PMEMoid
do_tx_zalloc(PMEMobjpool *pop, unsigned type_num)
{
PMEMoid ret = OID_NULL;
TX_BEGIN(pop) {
ret = pmemobj_tx_zalloc(sizeof(struct object), type_num);
} TX_END
return ret;
}
/*
* do_tx_alloc -- do tx allocation and initialize first num bytes
*/
static PMEMoid
do_tx_alloc(PMEMobjpool *pop, uint64_t type_num, uint64_t init_num)
{
PMEMoid ret = OID_NULL;
TX_BEGIN(pop) {
ret = pmemobj_tx_alloc(sizeof(struct object), type_num);
pmemobj_memset(pop, pmemobj_direct(ret), 0, init_num, 0);
} TX_END
return ret;
}
/*
* do_tx_add_range_alloc_commit -- call add_range_direct on object allocated
* within the same transaction and commit the transaction
*/
static void
do_tx_add_range_alloc_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
UT_ASSERT(!TOID_IS_NULL(obj));
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
ret = pmemobj_tx_add_range_direct(ptr + DATA_OFF,
DATA_SIZE);
UT_ASSERTeq(ret, 0);
pmemobj_memset_persist(pop, D_RW(obj)->data, TEST_VALUE_2,
DATA_SIZE);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
size_t i;
for (i = 0; i < DATA_SIZE; i++)
UT_ASSERTeq(D_RO(obj)->data[i], TEST_VALUE_2);
}
/*
* do_tx_add_range_alloc_abort -- call add_range_direct on object allocated
* within the same transaction and abort the transaction
*/
static void
do_tx_add_range_alloc_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TX_BEGIN(pop) {
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ_ABORT));
UT_ASSERT(!TOID_IS_NULL(obj));
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
ret = pmemobj_tx_add_range_direct(ptr + DATA_OFF,
DATA_SIZE);
UT_ASSERTeq(ret, 0);
pmemobj_memset_persist(pop, D_RW(obj)->data, TEST_VALUE_2,
DATA_SIZE);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_OBJ_ABORT));
UT_ASSERT(TOID_IS_NULL(obj));
}
/*
* do_tx_add_range_twice_commit -- call add_range_direct one the same area
* twice and commit the transaction
*/
static void
do_tx_add_range_twice_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
UT_ASSERT(!TOID_IS_NULL(obj));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_2;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_2);
}
/*
* do_tx_add_range_twice_abort -- call add_range_direct one the same area
* twice and abort the transaction
*/
static void
do_tx_add_range_twice_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
UT_ASSERT(!TOID_IS_NULL(obj));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_2;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, 0);
}
/*
* do_tx_add_range_abort_after_nested -- call add_range_direct and
* commit the tx
*/
static void
do_tx_add_range_abort_after_nested(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj1;
TOID(struct object) obj2;
TOID_ASSIGN(obj1, do_tx_zalloc(pop, TYPE_OBJ));
TOID_ASSIGN(obj2, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
char *ptr1 = (char *)pmemobj_direct(obj1.oid);
ret = pmemobj_tx_add_range_direct(ptr1 + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj1)->value = TEST_VALUE_1;
TX_BEGIN(pop) {
char *ptr2 = (char *)pmemobj_direct(obj2.oid);
ret = pmemobj_tx_add_range_direct(ptr2 + DATA_OFF,
DATA_SIZE);
UT_ASSERTeq(ret, 0);
pmemobj_memset_persist(pop, D_RW(obj2)->data,
TEST_VALUE_2, DATA_SIZE);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj1)->value, 0);
size_t i;
for (i = 0; i < DATA_SIZE; i++)
UT_ASSERTeq(D_RO(obj2)->data[i], 0);
}
/*
* do_tx_add_range_abort_nested -- call add_range_direct and
* commit the tx
*/
static void
do_tx_add_range_abort_nested(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj1;
TOID(struct object) obj2;
TOID_ASSIGN(obj1, do_tx_zalloc(pop, TYPE_OBJ));
TOID_ASSIGN(obj2, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
char *ptr1 = (char *)pmemobj_direct(obj1.oid);
ret = pmemobj_tx_add_range_direct(ptr1 + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj1)->value = TEST_VALUE_1;
TX_BEGIN(pop) {
char *ptr2 = (char *)pmemobj_direct(obj2.oid);
ret = pmemobj_tx_add_range_direct(ptr2 + DATA_OFF,
DATA_SIZE);
UT_ASSERTeq(ret, 0);
pmemobj_memset_persist(pop, D_RW(obj2)->data,
TEST_VALUE_2, DATA_SIZE);
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj1)->value, 0);
size_t i;
for (i = 0; i < DATA_SIZE; i++)
UT_ASSERTeq(D_RO(obj2)->data[i], 0);
}
/*
* do_tx_add_range_commit_nested -- call add_range_direct and commit the tx
*/
static void
do_tx_add_range_commit_nested(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj1;
TOID(struct object) obj2;
TOID_ASSIGN(obj1, do_tx_zalloc(pop, TYPE_OBJ));
TOID_ASSIGN(obj2, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
char *ptr1 = (char *)pmemobj_direct(obj1.oid);
ret = pmemobj_tx_add_range_direct(ptr1 + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj1)->value = TEST_VALUE_1;
TX_BEGIN(pop) {
char *ptr2 = (char *)pmemobj_direct(obj2.oid);
ret = pmemobj_tx_add_range_direct(ptr2 + DATA_OFF,
DATA_SIZE);
UT_ASSERTeq(ret, 0);
pmemobj_memset_persist(pop, D_RW(obj2)->data,
TEST_VALUE_2, DATA_SIZE);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj1)->value, TEST_VALUE_1);
size_t i;
for (i = 0; i < DATA_SIZE; i++)
UT_ASSERTeq(D_RO(obj2)->data[i], TEST_VALUE_2);
}
/*
* do_tx_add_range_abort -- call add_range_direct and abort the tx
*/
static void
do_tx_add_range_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, 0);
}
/*
* do_tx_add_range_commit -- call add_range_direct and commit tx
*/
static void
do_tx_add_range_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF,
VALUE_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
}
/*
* do_tx_xadd_range_no_flush_commit -- call xadd_range_direct with
* POBJ_XADD_NO_FLUSH flag set and commit tx
*/
static void
do_tx_xadd_range_no_flush_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF,
VALUE_SIZE, POBJ_XADD_NO_FLUSH);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
/* let pmemcheck find we didn't flush it */
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
}
/*
* do_tx_xadd_range_no_snapshot_commit -- call xadd_range_direct with
* POBJ_XADD_NO_SNAPSHOT flag, commit the transaction
*/
static void
do_tx_xadd_range_no_snapshot_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF,
VALUE_SIZE, POBJ_XADD_NO_SNAPSHOT);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
}
/*
* do_tx_xadd_range_no_snapshot_abort -- call xadd_range_direct with
* POBJ_XADD_NO_SNAPSHOT flag, modify the value, abort the transaction
*/
static void
do_tx_xadd_range_no_snapshot_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
D_RW(obj)->value = TEST_VALUE_1;
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE,
POBJ_XADD_NO_SNAPSHOT);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_2;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
/*
* value added with NO_SNAPSHOT flag should NOT be rolled back
* after abort
*/
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_2);
}
/*
* do_tx_xadd_range_no_uninit_check -- call xdd_range_direct for
* initialized memory with POBJ_XADD_ASSUME_INITIALIZED flag set and commit the
* tx
*/
static void
do_tx_xadd_range_no_uninit_check_commit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE,
POBJ_XADD_ASSUME_INITIALIZED);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
}
/*
* do_tx_xadd_range_no_uninit_check -- call xadd_range_direct for
* uninitialized memory with POBJ_XADD_ASSUME_INITIALIZED flag set and commit
* the tx
*/
static void
do_tx_xadd_range_no_uninit_check_commit_uninit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, 0));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE,
POBJ_XADD_ASSUME_INITIALIZED);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_xadd_range_direct(ptr + DATA_OFF, DATA_SIZE,
POBJ_XADD_ASSUME_INITIALIZED);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
D_RW(obj)->data[256] = TEST_VALUE_2;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERTeq(D_RO(obj)->data[256], TEST_VALUE_2);
}
/*
* do_tx_xadd_range_no_uninit_check -- call xadd_range_direct for
* partially uninitialized memory with POBJ_XADD_ASSUME_INITIALIZED flag set
* only for uninitialized part and commit the tx
*/
static void
do_tx_xadd_range_no_uninit_check_commit_part_uninit(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, VALUE_SIZE));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_xadd_range_direct(ptr + DATA_OFF, DATA_SIZE,
POBJ_XADD_ASSUME_INITIALIZED);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
D_RW(obj)->data[256] = TEST_VALUE_2;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERTeq(D_RO(obj)->data[256], TEST_VALUE_2);
}
/*
* do_tx_add_range_no_uninit_check -- call add_range_direct for
* partially uninitialized memory.
*/
static void
do_tx_add_range_no_uninit_check_commit_no_flag(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, VALUE_SIZE));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_add_range_direct(ptr + DATA_OFF, DATA_SIZE);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
D_RW(obj)->data[256] = TEST_VALUE_2;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
UT_ASSERTeq(D_RO(obj)->data[256], TEST_VALUE_2);
}
/*
* do_tx_xadd_range_no_uninit_check_abort -- call pmemobj_tx_range with
* POBJ_XADD_ASSUME_INITIALIZED flag, modify the value inside aborted
* transaction
*/
static void
do_tx_xadd_range_no_uninit_check_abort(PMEMobjpool *pop)
{
int ret;
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, 0));
TX_BEGIN(pop) {
char *ptr = (char *)pmemobj_direct(obj.oid);
ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE,
POBJ_XADD_ASSUME_INITIALIZED);
UT_ASSERTeq(ret, 0);
ret = pmemobj_tx_xadd_range_direct(ptr + DATA_OFF, DATA_SIZE,
POBJ_XADD_ASSUME_INITIALIZED);
UT_ASSERTeq(ret, 0);
D_RW(obj)->value = TEST_VALUE_1;
D_RW(obj)->data[256] = TEST_VALUE_2;
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_commit_and_abort -- use range cache, commit and then abort to make
* sure that it won't affect previously modified data.
*/
static void
do_tx_commit_and_abort(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
TX_SET(obj, value, TEST_VALUE_1); /* this will land in cache */
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
}
/*
* test_add_direct_macros -- test TX_ADD_DIRECT, TX_ADD_FIELD_DIRECT and
* TX_SET_DIRECT
*/
static void
test_add_direct_macros(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
TX_BEGIN(pop) {
struct object *o = D_RW(obj);
TX_SET_DIRECT(o, value, TEST_VALUE_1);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
TX_BEGIN(pop) {
struct object *o = D_RW(obj);
TX_ADD_DIRECT(o);
o->value = TEST_VALUE_2;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_2);
TX_BEGIN(pop) {
struct object *o = D_RW(obj);
TX_ADD_FIELD_DIRECT(o, value);
o->value = TEST_VALUE_1;
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1);
}
#define MAX_CACHED_RANGES 100
/*
* test_tx_corruption_bug -- test whether tx_adds for small objects from one
* transaction does NOT leak to the next transaction
*/
static void
test_tx_corruption_bug(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
struct object *o = D_RW(obj);
unsigned char i;
UT_COMPILE_ERROR_ON(1.5 * MAX_CACHED_RANGES > 255);
TX_BEGIN(pop) {
for (i = 0; i < 1.5 * MAX_CACHED_RANGES; ++i) {
TX_ADD_DIRECT(&o->data[i]);
o->data[i] = i;
}
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
for (i = 0; i < 1.5 * MAX_CACHED_RANGES; ++i)
UT_ASSERTeq((unsigned char)o->data[i], i);
TX_BEGIN(pop) {
for (i = 0; i < 0.1 * MAX_CACHED_RANGES; ++i) {
TX_ADD_DIRECT(&o->data[i]);
o->data[i] = i + 10;
}
pmemobj_tx_abort(EINVAL);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
for (i = 0; i < 1.5 * MAX_CACHED_RANGES; ++i)
UT_ASSERTeq((unsigned char)o->data[i], i);
pmemobj_free(&obj.oid);
}
static void
do_tx_add_range_too_large(PMEMobjpool *pop)
{
TOID(struct object) obj;
TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ));
int ret = 0;
TX_BEGIN(pop) {
ret = pmemobj_tx_add_range_direct(pmemobj_direct(obj.oid),
PMEMOBJ_MAX_ALLOC_SIZE + 1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
UT_ASSERTeq(errno, EINVAL);
UT_ASSERTeq(ret, 0);
} TX_END
errno = 0;
ret = 0;
TX_BEGIN(pop) {
ret = pmemobj_tx_xadd_range_direct(pmemobj_direct(obj.oid),
PMEMOBJ_MAX_ALLOC_SIZE + 1, POBJ_XADD_NO_ABORT);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
UT_ASSERTeq(ret, EINVAL);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
errno = 0;
ret = 0;
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
ret = pmemobj_tx_add_range_direct(pmemobj_direct(obj.oid),
PMEMOBJ_MAX_ALLOC_SIZE + 1);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
UT_ASSERTeq(ret, EINVAL);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
errno = 0;
ret = 0;
TX_BEGIN(pop) {
pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN);
ret = pmemobj_tx_xadd_range_direct(pmemobj_direct(obj.oid),
PMEMOBJ_MAX_ALLOC_SIZE + 1, 0);
} TX_ONCOMMIT {
UT_ASSERTeq(errno, EINVAL);
UT_ASSERTeq(ret, EINVAL);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
errno = 0;
}
static void
do_tx_add_range_lots_of_small_snapshots(PMEMobjpool *pop)
{
size_t s = TX_DEFAULT_RANGE_CACHE_SIZE * 2;
size_t snapshot_s = 8;
PMEMoid obj;
int ret = pmemobj_zalloc(pop, &obj, s, 0);
UT_ASSERTeq(ret, 0);
TX_BEGIN(pop) {
for (size_t n = 0; n < s; n += snapshot_s) {
void *addr = (void *)((size_t)pmemobj_direct(obj) + n);
pmemobj_tx_add_range_direct(addr, snapshot_s);
}
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
static void
do_tx_add_cache_overflowing_range(PMEMobjpool *pop)
{
/*
* This test adds snapshot to the cache, but in way that results in
* one of the add_range being split into two caches.
*/
size_t s = TX_DEFAULT_RANGE_CACHE_SIZE * 2;
size_t snapshot_s = TX_DEFAULT_RANGE_CACHE_THRESHOLD - 8;
PMEMoid obj;
int ret = pmemobj_zalloc(pop, &obj, s, 0);
UT_ASSERTeq(ret, 0);
TX_BEGIN(pop) {
size_t n = 0;
while (n != s) {
if (n + snapshot_s > s)
snapshot_s = s - n;
void *addr = (void *)((size_t)pmemobj_direct(obj) + n);
pmemobj_tx_add_range_direct(addr, snapshot_s);
memset(addr, 0xc, snapshot_s);
n += snapshot_s;
}
pmemobj_tx_abort(0);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
UT_ASSERT(util_is_zeroed(pmemobj_direct(obj), s));
UT_ASSERTne(errno, 0);
errno = 0;
pmemobj_free(&obj);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_add_range_direct");
util_init();
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL * 4,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_tx_add_range_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_commit_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_abort_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_abort_after_nested(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_twice_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_twice_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_alloc_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_alloc_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_commit_and_abort(pop);
VALGRIND_WRITE_STATS;
test_add_direct_macros(pop);
VALGRIND_WRITE_STATS;
test_tx_corruption_bug(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_too_large(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_lots_of_small_snapshots(pop);
VALGRIND_WRITE_STATS;
do_tx_add_cache_overflowing_range(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_snapshot_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_snapshot_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_uninit_check_commit(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_uninit_check_commit_uninit(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_uninit_check_commit_part_uninit(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_uninit_check_abort(pop);
VALGRIND_WRITE_STATS;
do_tx_add_range_no_uninit_check_commit_no_flag(pop);
VALGRIND_WRITE_STATS;
do_tx_xadd_range_no_flush_commit(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 20,975 | 22.177901 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_add_range_direct/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
"""
unit tests for pmemobj_tx_add_range_direct
and pmemobj_tx_xadd_range_direct
"""
from os import path
import testframework as t
@t.require_valgrind_disabled('pmemcheck', 'memcheck')
class TEST0(t.Test):
test_type = t.Medium
def run(self, ctx):
testfile = path.join(ctx.testdir, 'testfile0')
ctx.exec('obj_tx_add_range_direct', testfile)
@t.require_valgrind_enabled('pmemcheck')
class TEST1(t.Test):
test_type = t.Medium
def run(self, ctx):
ctx.valgrind.add_opt('--mult-stores=no')
testfile = path.join(ctx.testdir, 'testfile1')
ctx.exec('obj_tx_add_range_direct', testfile)
@t.require_valgrind_enabled('memcheck')
@t.require_build('debug')
class TEST2(t.Test):
test_type = t.Medium
def run(self, ctx):
testfile = path.join(ctx.testdir, 'testfile2')
ctx.exec('obj_tx_add_range_direct', testfile)
| 983 | 21.883721 | 54 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_many_size_allocs/obj_many_size_allocs.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* obj_many_size_allocs.c -- allocation of many objects with different sizes
*
*/
#include <stddef.h>
#include "unittest.h"
#include "heap.h"
#define LAYOUT_NAME "many_size_allocs"
#define TEST_ALLOC_SIZE 2048
#define LAZY_LOAD_SIZE 10
#define LAZY_LOAD_BIG_SIZE 150
struct cargs {
size_t size;
};
static int
test_constructor(PMEMobjpool *pop, void *addr, void *args)
{
struct cargs *a = args;
/* do not use pmem_memset_persit() here */
pmemobj_memset_persist(pop, addr, a->size % 256, a->size);
return 0;
}
static PMEMobjpool *
test_allocs(PMEMobjpool *pop, const char *path)
{
PMEMoid *oid = MALLOC(sizeof(PMEMoid) * TEST_ALLOC_SIZE);
if (pmemobj_alloc(pop, &oid[0], 0, 0, NULL, NULL) == 0)
UT_FATAL("pmemobj_alloc(0) succeeded");
for (unsigned i = 1; i < TEST_ALLOC_SIZE; ++i) {
struct cargs args = { i };
if (pmemobj_alloc(pop, &oid[i], i, 0,
test_constructor, &args) != 0)
UT_FATAL("!pmemobj_alloc");
UT_ASSERT(!OID_IS_NULL(oid[i]));
}
pmemobj_close(pop);
UT_ASSERT(pmemobj_check(path, LAYOUT_NAME) == 1);
UT_ASSERT((pop = pmemobj_open(path, LAYOUT_NAME)) != NULL);
for (int i = 1; i < TEST_ALLOC_SIZE; ++i) {
pmemobj_free(&oid[i]);
UT_ASSERT(OID_IS_NULL(oid[i]));
}
FREE(oid);
return pop;
}
static PMEMobjpool *
test_lazy_load(PMEMobjpool *pop, const char *path)
{
PMEMoid oid[3];
int ret = pmemobj_alloc(pop, &oid[0], LAZY_LOAD_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
ret = pmemobj_alloc(pop, &oid[1], LAZY_LOAD_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
ret = pmemobj_alloc(pop, &oid[2], LAZY_LOAD_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
pmemobj_close(pop);
UT_ASSERT((pop = pmemobj_open(path, LAYOUT_NAME)) != NULL);
pmemobj_free(&oid[1]);
ret = pmemobj_alloc(pop, &oid[1], LAZY_LOAD_BIG_SIZE, 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
return pop;
}
#define ALLOC_BLOCK_SIZE 64
#define MAX_BUCKET_MAP_ENTRIES (RUN_DEFAULT_SIZE / ALLOC_BLOCK_SIZE)
static void
test_all_classes(PMEMobjpool *pop)
{
for (unsigned i = 1; i <= MAX_BUCKET_MAP_ENTRIES; ++i) {
int err;
int nallocs = 0;
while ((err = pmemobj_alloc(pop, NULL, i * ALLOC_BLOCK_SIZE, 0,
NULL, NULL)) == 0) {
nallocs++;
}
UT_ASSERT(nallocs > 0);
PMEMoid iter, niter;
POBJ_FOREACH_SAFE(pop, iter, niter) {
pmemobj_free(&iter);
}
}
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_many_size_allocs");
if (argc != 2)
UT_FATAL("usage: %s file-name", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop = NULL;
if ((pop = pmemobj_create(path, LAYOUT_NAME,
0, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
pop = test_lazy_load(pop, path);
pop = test_allocs(pop, path);
test_all_classes(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 2,837 | 20.179104 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_integration/pmem2_integration.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* pmem2_integration.c -- pmem2 integration tests
*/
#include "libpmem2.h"
#include "unittest.h"
#include "rand.h"
#include "ut_pmem2.h"
#include "ut_pmem2_setup_integration.h"
#define N_GRANULARITIES 3 /* BYTE, CACHE_LINE, PAGE */
/*
* map_invalid -- try to mapping memory with invalid config
*/
static void
map_invalid(struct pmem2_config *cfg, struct pmem2_source *src, int result)
{
struct pmem2_map *map = (struct pmem2_map *)0x7;
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, result);
UT_ASSERTeq(map, NULL);
}
/*
* map_valid -- return valid mapped pmem2_map and validate mapped memory length
*/
static struct pmem2_map *
map_valid(struct pmem2_config *cfg, struct pmem2_source *src, size_t size)
{
struct pmem2_map *map = NULL;
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTne(map, NULL);
UT_ASSERTeq(pmem2_map_get_size(map), size);
return map;
}
/*
* test_reuse_cfg -- map pmem2_map twice using the same pmem2_config
*/
static int
test_reuse_cfg(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_reuse_cfg <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t size;
UT_ASSERTeq(pmem2_source_size(src, &size), 0);
struct pmem2_map *map1 = map_valid(cfg, src, size);
struct pmem2_map *map2 = map_valid(cfg, src, size);
/* cleanup after the test */
pmem2_unmap(&map2);
pmem2_unmap(&map1);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
/*
* test_reuse_cfg_with_diff_fd -- map pmem2_map using the same pmem2_config
* with changed file descriptor
*/
static int
test_reuse_cfg_with_diff_fd(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_reuse_cfg_with_diff_fd <file> <file2>");
char *file1 = argv[0];
int fd1 = OPEN(file1, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd1,
PMEM2_GRANULARITY_PAGE);
size_t size1;
UT_ASSERTeq(pmem2_source_size(src, &size1), 0);
struct pmem2_map *map1 = map_valid(cfg, src, size1);
char *file2 = argv[1];
int fd2 = OPEN(file2, O_RDWR);
/* set another valid file descriptor in source */
struct pmem2_source *src2;
UT_ASSERTeq(pmem2_source_from_fd(&src2, fd2), 0);
size_t size2;
UT_ASSERTeq(pmem2_source_size(src2, &size2), 0);
struct pmem2_map *map2 = map_valid(cfg, src2, size2);
/* cleanup after the test */
pmem2_unmap(&map2);
CLOSE(fd2);
pmem2_unmap(&map1);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
pmem2_source_delete(&src2);
CLOSE(fd1);
return 2;
}
/*
* test_register_pmem -- map, use and unmap memory
*/
static int
test_register_pmem(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_register_pmem <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
char *word = "XXXXXXXX";
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t size;
UT_ASSERTeq(pmem2_source_size(src, &size), 0);
struct pmem2_map *map = map_valid(cfg, src, size);
char *addr = pmem2_map_get_address(map);
size_t length = strlen(word);
/* write some data in mapped memory without persisting data */
memcpy(addr, word, length);
/* cleanup after the test */
pmem2_unmap(&map);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
/*
* test_use_misc_lens_and_offsets -- test with multiple offsets and lengths
*/
static int
test_use_misc_lens_and_offsets(const struct test_case *tc,
int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_use_misc_lens_and_offsets <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len;
UT_ASSERTeq(pmem2_source_size(src, &len), 0);
struct pmem2_map *map = map_valid(cfg, src, len);
char *base = pmem2_map_get_address(map);
pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map);
rng_t rng;
randomize_r(&rng, 13); /* arbitrarily chosen value */
for (size_t i = 0; i < len; i++)
base[i] = (char)rnd64_r(&rng);
persist_fn(base, len);
UT_ASSERTeq(len % Ut_mmap_align, 0);
for (size_t l = len; l > 0; l -= Ut_mmap_align) {
for (size_t off = 0; off < l; off += Ut_mmap_align) {
size_t len2 = l - off;
int ret = pmem2_config_set_length(cfg, len2);
UT_PMEM2_EXPECT_RETURN(ret, 0);
ret = pmem2_config_set_offset(cfg, off);
UT_PMEM2_EXPECT_RETURN(ret, 0);
struct pmem2_map *map2 = map_valid(cfg, src, len2);
char *ptr = pmem2_map_get_address(map2);
UT_ASSERTeq(ret = memcmp(base + off, ptr, len2), 0);
pmem2_unmap(&map2);
}
}
pmem2_unmap(&map);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
struct gran_test_ctx;
typedef void(*map_func)(struct pmem2_config *cfg,
struct pmem2_source *src, struct gran_test_ctx *ctx);
/*
* gran_test_ctx -- essential parameters used by granularity test
*/
struct gran_test_ctx {
map_func map_with_expected_gran;
enum pmem2_granularity expected_granularity;
};
/*
* map_with_avail_gran -- map the range with valid granularity,
* includes cleanup
*/
static void
map_with_avail_gran(struct pmem2_config *cfg,
struct pmem2_source *src, struct gran_test_ctx *ctx)
{
struct pmem2_map *map;
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTne(map, NULL);
UT_ASSERTeq(ctx->expected_granularity,
pmem2_map_get_store_granularity(map));
/* cleanup after the test */
pmem2_unmap(&map);
}
/*
* map_with_unavail_gran -- map the range with invalid granularity
* (unsuccessful)
*/
static void
map_with_unavail_gran(struct pmem2_config *cfg,
struct pmem2_source *src, struct gran_test_ctx *unused)
{
struct pmem2_map *map;
int ret = pmem2_map(cfg, src, &map);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_GRANULARITY_NOT_SUPPORTED);
UT_ERR("%s", pmem2_errormsg());
UT_ASSERTeq(map, NULL);
}
static const map_func map_with_gran[N_GRANULARITIES][N_GRANULARITIES] = {
/* requested granularity / available granularity */
/* -------------------------------------------------------------------- */
/* BYTE CACHE_LINE PAGE */
/* -------------------------------------------------------------------- */
/* BYTE */ {map_with_avail_gran, map_with_unavail_gran, map_with_unavail_gran},
/* CL */ {map_with_avail_gran, map_with_avail_gran, map_with_unavail_gran},
/* PAGE */ {map_with_avail_gran, map_with_avail_gran, map_with_avail_gran}};
static const enum pmem2_granularity gran_id2granularity[N_GRANULARITIES] = {
PMEM2_GRANULARITY_BYTE,
PMEM2_GRANULARITY_CACHE_LINE,
PMEM2_GRANULARITY_PAGE};
/*
* str2gran_id -- reads granularity id from the provided string
*/
static int
str2gran_id(const char *in)
{
int gran = atoi(in);
UT_ASSERT(gran >= 0 && gran < N_GRANULARITIES);
return gran;
}
/*
* test_granularity -- performs pmem2_map with certain expected granularity
* in context of certain available granularity
*/
static int
test_granularity(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 3)
UT_FATAL(
"usage: test_granularity <file>"
" <available_granularity> <requested_granularity>");
struct gran_test_ctx ctx;
int avail_gran_id = str2gran_id(argv[1]);
int req_gran_id = str2gran_id(argv[2]);
ctx.expected_granularity = gran_id2granularity[avail_gran_id];
ctx.map_with_expected_gran = map_with_gran[req_gran_id][avail_gran_id];
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
gran_id2granularity[req_gran_id]);
ctx.map_with_expected_gran(cfg, src, &ctx);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 3;
}
/*
* test_len_not_aligned -- try to use unaligned length
*/
static int
test_len_not_aligned(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_len_not_aligned <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len, alignment;
int ret = pmem2_source_size(src, &len);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_SOURCE_ALIGNMENT(src, &alignment);
UT_ASSERT(len > alignment);
size_t aligned_len = ALIGN_DOWN(len, alignment);
size_t unaligned_len = aligned_len - 1;
ret = pmem2_config_set_length(cfg, unaligned_len);
UT_PMEM2_EXPECT_RETURN(ret, 0);
map_invalid(cfg, src, PMEM2_E_LENGTH_UNALIGNED);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
/*
* test_len_aligned -- try to use aligned length
*/
static int
test_len_aligned(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_len_aligned <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len, alignment;
int ret = pmem2_source_size(src, &len);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_SOURCE_ALIGNMENT(src, &alignment);
UT_ASSERT(len > alignment);
size_t aligned_len = ALIGN_DOWN(len, alignment);
ret = pmem2_config_set_length(cfg, aligned_len);
UT_PMEM2_EXPECT_RETURN(ret, 0);
struct pmem2_map *map = map_valid(cfg, src, aligned_len);
pmem2_unmap(&map);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
/*
* test_offset_not_aligned -- try to map with unaligned offset
*/
static int
test_offset_not_aligned(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_offset_not_aligned <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len, alignment;
int ret = pmem2_source_size(src, &len);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_SOURCE_ALIGNMENT(src, &alignment);
/* break the offset */
size_t offset = alignment - 1;
ret = pmem2_config_set_offset(cfg, offset);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERT(len > alignment);
/* in this case len has to be aligned, only offset will be unaligned */
size_t aligned_len = ALIGN_DOWN(len, alignment);
ret = pmem2_config_set_length(cfg, aligned_len - alignment);
UT_PMEM2_EXPECT_RETURN(ret, 0);
map_invalid(cfg, src, PMEM2_E_OFFSET_UNALIGNED);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
/*
* test_offset_aligned -- try to map with aligned offset
*/
static int
test_offset_aligned(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 1)
UT_FATAL("usage: test_offset_aligned <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len, alignment;
int ret = pmem2_source_size(src, &len);
UT_PMEM2_EXPECT_RETURN(ret, 0);
PMEM2_SOURCE_ALIGNMENT(src, &alignment);
/* set the aligned offset */
size_t offset = alignment;
ret = pmem2_config_set_offset(cfg, offset);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERT(len > alignment * 2);
/* set the aligned len */
size_t map_len = ALIGN_DOWN(len / 2, alignment);
ret = pmem2_config_set_length(cfg, map_len);
UT_PMEM2_EXPECT_RETURN(ret, 0);
struct pmem2_map *map = map_valid(cfg, src, map_len);
pmem2_unmap(&map);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
/*
* test_mem_move_cpy_set_with_map_private -- map O_RDONLY file and do
* pmem2_[cpy|set|move]_fns with PMEM2_PRIVATE sharing
*/
static int
test_mem_move_cpy_set_with_map_private(const struct test_case *tc, int argc,
char *argv[])
{
if (argc < 1)
UT_FATAL(
"usage: test_mem_move_cpy_set_with_map_private <file>");
char *file = argv[0];
int fd = OPEN(file, O_RDONLY);
const char *word1 = "Persistent memory...";
const char *word2 = "Nonpersistent memory";
const char *word3 = "XXXXXXXXXXXXXXXXXXXX";
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
pmem2_config_set_sharing(cfg, PMEM2_PRIVATE);
size_t size = 0;
UT_ASSERTeq(pmem2_source_size(src, &size), 0);
struct pmem2_map *map = map_valid(cfg, src, size);
char *addr = pmem2_map_get_address(map);
/* copy inital state */
char *initial_state = MALLOC(size);
memcpy(initial_state, addr, size);
pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map);
pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map);
pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map);
memcpy_fn(addr, word1, strlen(word1), 0);
UT_ASSERTeq(strcmp(addr, word1), 0);
memmove_fn(addr, word2, strlen(word2), 0);
UT_ASSERTeq(strcmp(addr, word2), 0);
memset_fn(addr, 'X', strlen(word3), 0);
UT_ASSERTeq(strcmp(addr, word3), 0);
/* remap memory, and check that the data has not been saved */
pmem2_unmap(&map);
map = map_valid(cfg, src, size);
addr = pmem2_map_get_address(map);
UT_ASSERTeq(strcmp(addr, initial_state), 0);
/* cleanup after the test */
pmem2_unmap(&map);
FREE(initial_state);
pmem2_config_delete(&cfg);
pmem2_source_delete(&src);
CLOSE(fd);
return 1;
}
/*
* test_deep_flush_valid -- perform valid deep_flush for whole map
*/
static int
test_deep_flush_valid(const struct test_case *tc, int argc, char *argv[])
{
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len;
PMEM2_SOURCE_SIZE(src, &len);
struct pmem2_map *map = map_valid(cfg, src, len);
char *addr = pmem2_map_get_address(map);
pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map);
memset(addr, 0, len);
persist_fn(addr, len);
int ret = pmem2_deep_flush(map, addr, len);
UT_PMEM2_EXPECT_RETURN(ret, 0);
pmem2_unmap(&map);
PMEM2_CONFIG_DELETE(&cfg);
PMEM2_SOURCE_DELETE(&src);
CLOSE(fd);
return 1;
}
/*
* test_deep_flush_e_range_behind -- try deep_flush for range behind a map
*/
static int
test_deep_flush_e_range_behind(const struct test_case *tc,
int argc, char *argv[])
{
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len;
PMEM2_SOURCE_SIZE(src, &len);
struct pmem2_map *map = map_valid(cfg, src, len);
size_t map_size = pmem2_map_get_size(map);
char *addr = pmem2_map_get_address(map);
pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map);
memset(addr, 0, len);
persist_fn(addr, len);
int ret = pmem2_deep_flush(map, addr + map_size + 1, 64);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_DEEP_FLUSH_RANGE);
pmem2_unmap(&map);
PMEM2_CONFIG_DELETE(&cfg);
PMEM2_SOURCE_DELETE(&src);
CLOSE(fd);
return 1;
}
/*
* test_deep_flush_e_range_before -- try deep_flush for range before a map
*/
static int
test_deep_flush_e_range_before(const struct test_case *tc,
int argc, char *argv[])
{
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len;
PMEM2_SOURCE_SIZE(src, &len);
struct pmem2_map *map = map_valid(cfg, src, len);
size_t map_size = pmem2_map_get_size(map);
char *addr = pmem2_map_get_address(map);
pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map);
memset(addr, 0, len);
persist_fn(addr, len);
int ret = pmem2_deep_flush(map, addr - map_size, 64);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_DEEP_FLUSH_RANGE);
pmem2_unmap(&map);
PMEM2_CONFIG_DELETE(&cfg);
PMEM2_SOURCE_DELETE(&src);
CLOSE(fd);
return 1;
}
/*
* test_deep_flush_slice -- try deep_flush for slice of a map
*/
static int
test_deep_flush_slice(const struct test_case *tc, int argc, char *argv[])
{
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len;
PMEM2_SOURCE_SIZE(src, &len);
struct pmem2_map *map = map_valid(cfg, src, len);
size_t map_size = pmem2_map_get_size(map);
size_t map_part = map_size / 4;
char *addr = pmem2_map_get_address(map);
pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map);
memset(addr, 0, map_part);
persist_fn(addr, map_part);
int ret = pmem2_deep_flush(map, addr + map_part, map_part);
UT_PMEM2_EXPECT_RETURN(ret, 0);
pmem2_unmap(&map);
PMEM2_CONFIG_DELETE(&cfg);
PMEM2_SOURCE_DELETE(&src);
CLOSE(fd);
return 1;
}
/*
* test_deep_flush_overlap -- try deep_flush for range overlaping map
*/
static int
test_deep_flush_overlap(const struct test_case *tc, int argc, char *argv[])
{
char *file = argv[0];
int fd = OPEN(file, O_RDWR);
struct pmem2_config *cfg;
struct pmem2_source *src;
PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd,
PMEM2_GRANULARITY_PAGE);
size_t len;
PMEM2_SOURCE_SIZE(src, &len);
struct pmem2_map *map = map_valid(cfg, src, len);
size_t map_size = pmem2_map_get_size(map);
char *addr = pmem2_map_get_address(map);
pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map);
memset(addr, 0, len);
persist_fn(addr, len);
int ret = pmem2_deep_flush(map, addr + 1024, map_size);
UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_DEEP_FLUSH_RANGE);
pmem2_unmap(&map);
PMEM2_CONFIG_DELETE(&cfg);
PMEM2_SOURCE_DELETE(&src);
CLOSE(fd);
return 1;
}
/*
* test_source_anon -- tests map/config/source functions in combination
* with anonymous source.
*/
static int
test_source_anon(enum pmem2_sharing_type sharing,
enum pmem2_granularity granularity,
size_t source_len, size_t map_len)
{
int ret = 0;
struct pmem2_config *cfg;
struct pmem2_source *src;
struct pmem2_map *map;
struct pmem2_badblock_context *bbctx;
UT_ASSERTeq(pmem2_source_from_anon(&src, source_len), 0);
UT_ASSERTeq(pmem2_source_device_id(src, NULL, NULL), PMEM2_E_NOSUPP);
UT_ASSERTeq(pmem2_source_device_usc(src, NULL), PMEM2_E_NOSUPP);
UT_ASSERTeq(pmem2_badblock_context_new(src, &bbctx), PMEM2_E_NOSUPP);
size_t alignment;
UT_ASSERTeq(pmem2_source_alignment(src, &alignment), 0);
UT_ASSERT(alignment >= Ut_pagesize);
size_t size;
UT_ASSERTeq(pmem2_source_size(src, &size), 0);
UT_ASSERTeq(size, source_len);
PMEM2_CONFIG_NEW(&cfg);
UT_ASSERTeq(pmem2_config_set_length(cfg, map_len), 0);
UT_ASSERTeq(pmem2_config_set_offset(cfg, alignment), 0); /* ignored */
UT_ASSERTeq(pmem2_config_set_required_store_granularity(cfg,
granularity), 0);
UT_ASSERTeq(pmem2_config_set_sharing(cfg, sharing), 0);
if ((ret = pmem2_map(cfg, src, &map)) != 0)
goto map_fail;
void *addr = pmem2_map_get_address(map);
UT_ASSERTne(addr, NULL);
UT_ASSERTeq(pmem2_map_get_size(map), map_len ? map_len : source_len);
UT_ASSERTeq(pmem2_map_get_store_granularity(map),
PMEM2_GRANULARITY_BYTE);
UT_ASSERTeq(pmem2_deep_flush(map, addr, alignment), PMEM2_E_NOSUPP);
UT_ASSERTeq(pmem2_unmap(&map), 0);
map_fail:
PMEM2_CONFIG_DELETE(&cfg);
pmem2_source_delete(&src);
return ret;
}
/*
* test_source_anon_ok_private -- valid config /w private flag
*/
static int
test_source_anon_private(const struct test_case *tc, int argc, char *argv[])
{
int ret = test_source_anon(PMEM2_PRIVATE, PMEM2_GRANULARITY_BYTE,
1 << 30ULL, 1 << 20ULL);
UT_ASSERTeq(ret, 0);
return 1;
}
/*
* test_source_anon_shared -- valid config /w shared flag
*/
static int
test_source_anon_shared(const struct test_case *tc, int argc, char *argv[])
{
int ret = test_source_anon(PMEM2_SHARED, PMEM2_GRANULARITY_BYTE,
1 << 30ULL, 1 << 20ULL);
UT_ASSERTeq(ret, 0);
return 1;
}
/*
* test_source_anon_page -- valid config /w page granularity
*/
static int
test_source_anon_page(const struct test_case *tc, int argc, char *argv[])
{
int ret = test_source_anon(PMEM2_SHARED, PMEM2_GRANULARITY_PAGE,
1 << 30ULL, 1 << 20ULL);
UT_ASSERTeq(ret, 0);
return 1;
}
/*
* test_source_anon_zero_len -- valid config /w zero (src inherited) map length
*/
static int
test_source_anon_zero_len(const struct test_case *tc, int argc, char *argv[])
{
int ret = test_source_anon(PMEM2_SHARED, PMEM2_GRANULARITY_BYTE,
1 << 30ULL, 0);
UT_ASSERTeq(ret, 0);
return 1;
}
/*
* test_source_anon_too_small -- valid config /w small mapping length
*/
static int
test_source_anon_too_small(const struct test_case *tc, int argc, char *argv[])
{
int ret = test_source_anon(PMEM2_SHARED, PMEM2_GRANULARITY_BYTE,
1 << 30ULL, 1 << 10ULL);
UT_ASSERTne(ret, 0);
return 1;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_reuse_cfg),
TEST_CASE(test_reuse_cfg_with_diff_fd),
TEST_CASE(test_register_pmem),
TEST_CASE(test_use_misc_lens_and_offsets),
TEST_CASE(test_granularity),
TEST_CASE(test_len_not_aligned),
TEST_CASE(test_len_aligned),
TEST_CASE(test_offset_not_aligned),
TEST_CASE(test_offset_aligned),
TEST_CASE(test_mem_move_cpy_set_with_map_private),
TEST_CASE(test_deep_flush_valid),
TEST_CASE(test_deep_flush_e_range_behind),
TEST_CASE(test_deep_flush_e_range_before),
TEST_CASE(test_deep_flush_slice),
TEST_CASE(test_deep_flush_overlap),
TEST_CASE(test_source_anon_private),
TEST_CASE(test_source_anon_shared),
TEST_CASE(test_source_anon_page),
TEST_CASE(test_source_anon_too_small),
TEST_CASE(test_source_anon_zero_len),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char *argv[])
{
START(argc, argv, "pmem2_integration");
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
DONE(NULL);
}
| 22,113 | 23.736018 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_integration/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
#
import testframework as t
from testframework import granularity as g
class Granularity(str):
BYTE = '0'
CACHE_LINE = '1'
PAGE = '2'
class PMEM2_INTEGRATION(t.Test):
test_type = t.Medium
def run(self, ctx):
filepath = ctx.create_holey_file(16 * t.MiB, 'testfile')
ctx.exec('pmem2_integration', self.test_case, filepath)
@t.require_devdax(t.DevDax('devdax'))
class PMEM2_INTEGRATION_DEV_DAXES(t.Test):
test_type = t.Medium
def run(self, ctx):
dd = ctx.devdaxes.devdax
ctx.exec('pmem2_integration', self.test_case, dd.path)
class PMEM2_GRANULARITY(t.Test):
test_type = t.Medium
test_case = 'test_granularity'
available_granularity = None
requested_granularity = None
def run(self, ctx):
filepath = ctx.create_holey_file(16 * t.MiB, 'testfile')
ctx.exec('pmem2_integration', self.test_case, filepath,
self.available_granularity, self.requested_granularity)
class TEST0(PMEM2_INTEGRATION):
"""map twice using the same config"""
test_case = "test_reuse_cfg"
class TEST1(PMEM2_INTEGRATION):
"""map using the same config with changed file descriptor"""
test_case = "test_reuse_cfg_with_diff_fd"
def run(self, ctx):
filepath1 = ctx.create_holey_file(16 * t.MiB, 'testfile1')
filepath2 = ctx.create_holey_file(16 * t.MiB, 'testfile2')
ctx.exec('pmem2_integration', self.test_case, filepath1, filepath2)
@t.require_valgrind_enabled('pmemcheck')
class TEST2(PMEM2_INTEGRATION):
"""check if Valgrind registers data writing on pmem"""
test_case = "test_register_pmem"
@t.require_valgrind_enabled('pmemcheck')
@t.windows_exclude
class TEST3(PMEM2_INTEGRATION_DEV_DAXES):
"""check if Valgrind registers data writing on DevDax"""
test_case = "test_register_pmem"
class TEST4(PMEM2_INTEGRATION):
"""create multiple mappings with different offsets and lengths"""
test_case = "test_use_misc_lens_and_offsets"
def run(self, ctx):
filepath = ctx.create_holey_file(1 * t.MiB, 'testfile1')
ctx.exec('pmem2_integration', self.test_case, filepath)
@g.require_granularity(g.PAGE)
class TEST5(PMEM2_GRANULARITY):
"""test granularity with available page granularity and expected page
granularity"""
available_granularity = Granularity.PAGE
requested_granularity = Granularity.PAGE
@g.require_granularity(g.PAGE)
class TEST6(PMEM2_GRANULARITY):
"""test granularity with available page granularity and expected cache
line granularity"""
available_granularity = Granularity.PAGE
requested_granularity = Granularity.CACHE_LINE
@g.require_granularity(g.PAGE)
class TEST7(PMEM2_GRANULARITY):
"""test granularity with available page granularity and expected byte
granularity"""
available_granularity = Granularity.PAGE
requested_granularity = Granularity.BYTE
@g.require_granularity(g.CACHELINE)
class TEST8(PMEM2_GRANULARITY):
"""test granularity with available cache line granularity and expected
page granularity"""
available_granularity = Granularity.CACHE_LINE
requested_granularity = Granularity.PAGE
@g.require_granularity(g.CACHELINE)
class TEST9(PMEM2_GRANULARITY):
"""test granularity with available cache line granularity and expected
cache line granularity"""
available_granularity = Granularity.CACHE_LINE
requested_granularity = Granularity.CACHE_LINE
@g.require_granularity(g.CACHELINE)
class TEST10(PMEM2_GRANULARITY):
"""test granularity with available cache line granularity and expected
byte granularity"""
available_granularity = Granularity.CACHE_LINE
requested_granularity = Granularity.BYTE
@g.require_granularity(g.BYTE)
class TEST11(PMEM2_GRANULARITY):
"""test granularity with available byte granularity and expected page
granularity"""
available_granularity = Granularity.BYTE
requested_granularity = Granularity.PAGE
@g.require_granularity(g.BYTE)
class TEST12(PMEM2_GRANULARITY):
"""test granularity with available byte granularity and expected cache
line granularity"""
available_granularity = Granularity.BYTE
requested_granularity = Granularity.CACHE_LINE
@g.require_granularity(g.BYTE)
class TEST13(PMEM2_GRANULARITY):
"""test granularity with available byte granularity and expected byte
granularity"""
available_granularity = Granularity.BYTE
requested_granularity = Granularity.BYTE
class TEST14(PMEM2_INTEGRATION):
"""test not aligned length"""
test_case = "test_len_not_aligned"
@t.windows_exclude
class TEST15(PMEM2_INTEGRATION_DEV_DAXES):
"""test not aligned length on DevDax"""
test_case = "test_len_not_aligned"
class TEST16(PMEM2_INTEGRATION):
"""test aligned length"""
test_case = "test_len_aligned"
@t.windows_exclude
class TEST17(PMEM2_INTEGRATION_DEV_DAXES):
"""test aligned length on DevDax"""
test_case = "test_len_aligned"
class TEST18(PMEM2_INTEGRATION):
"""test unaligned offset"""
test_case = "test_offset_not_aligned"
@t.windows_exclude
class TEST19(PMEM2_INTEGRATION_DEV_DAXES):
"""test unaligned offset"""
test_case = "test_offset_not_aligned"
class TEST20(PMEM2_INTEGRATION):
"""test unaligned offset"""
test_case = "test_offset_aligned"
@t.windows_exclude
class TEST21(PMEM2_INTEGRATION_DEV_DAXES):
"""test unaligned offset"""
test_case = "test_offset_aligned"
class TEST22(PMEM2_INTEGRATION):
"""
map O_RDONLY file and test pmem2_[cpy|set|move]_fns with
PMEM2_PRIVATE sharing
"""
test_case = "test_mem_move_cpy_set_with_map_private"
class TEST23(PMEM2_INTEGRATION):
"""test valid case of pmem2_deep_sflush"""
test_case = "test_deep_flush_valid"
class TEST24(PMEM2_INTEGRATION):
"""test deep flush with range out of map"""
test_case = "test_deep_flush_e_range_behind"
class TEST25(PMEM2_INTEGRATION):
"""test deep flush with range out of map"""
test_case = "test_deep_flush_e_range_before"
class TEST26(PMEM2_INTEGRATION):
"""test deep flush with part of map"""
test_case = "test_deep_flush_slice"
class TEST27(PMEM2_INTEGRATION):
"""test deep flush with overlaping part"""
test_case = "test_deep_flush_overlap"
class TEST28(PMEM2_INTEGRATION):
"""test for anonymous mappings"""
test_case = "test_source_anon_private"
class TEST29(PMEM2_INTEGRATION):
"""test for anonymous mappings"""
test_case = "test_source_anon_shared"
class TEST30(PMEM2_INTEGRATION):
"""test for anonymous mappings"""
test_case = "test_source_anon_page"
class TEST31(PMEM2_INTEGRATION):
"""test for anonymous mappings"""
test_case = "test_source_anon_too_small"
class TEST32(PMEM2_INTEGRATION):
"""test for anonymous mappings"""
test_case = "test_source_anon_zero_len"
# XXX: add test cases with:
# @t.require_devdax(t.DevDax('devdax', deep_flush=True))
# @t.require_devdax(t.DevDax('devdax', deep_flush=False))
# if deep_flush == 1 then expected return code 0
# if deep_flush == 0 then expected return code PMEM2_E_NOSUPP
| 7,203 | 26.601533 | 75 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_ctl_alloc_class_config/obj_ctl_alloc_class_config.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* obj_ctl_alloc_class_config.c -- tests for the ctl alloc class config
*/
#include "unittest.h"
#define LAYOUT "obj_ctl_alloc_class_config"
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_ctl_alloc_class_config");
if (argc != 2)
UT_FATAL("usage: %s file-name", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
struct pobj_alloc_class_desc alloc_class;
int ret;
ret = pmemobj_ctl_get(pop, "heap.alloc_class.128.desc", &alloc_class);
UT_ASSERTeq(ret, 0);
UT_OUT("%d %lu %d", alloc_class.header_type, alloc_class.unit_size,
alloc_class.units_per_block);
ret = pmemobj_ctl_get(pop, "heap.alloc_class.129.desc", &alloc_class);
UT_ASSERTeq(ret, 0);
UT_OUT("%d %lu %d", alloc_class.header_type, alloc_class.unit_size,
alloc_class.units_per_block);
ret = pmemobj_ctl_get(pop, "heap.alloc_class.130.desc", &alloc_class);
UT_ASSERTeq(ret, 0);
UT_OUT("%d %lu %d", alloc_class.header_type, alloc_class.unit_size,
alloc_class.units_per_block);
pmemobj_close(pop);
DONE(NULL);
}
| 1,242 | 22.45283 | 71 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_action/obj_action.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2019, Intel Corporation */
/*
* obj_action.c -- test the action API
*/
#include <stdlib.h>
#include "unittest.h"
#define LAYOUT_NAME "obj_action"
struct macro_reserve_s {
PMEMoid oid;
uint64_t value;
};
TOID_DECLARE(struct macro_reserve_s, 1);
struct foo {
int bar;
};
struct root {
struct {
PMEMoid oid;
uint64_t value;
} reserved;
struct {
PMEMoid oid;
uint64_t value;
} published;
struct {
PMEMoid oid;
} tx_reserved;
struct {
PMEMoid oid;
} tx_reserved_fulfilled;
struct {
PMEMoid oid;
} tx_published;
};
#define HUGE_ALLOC_SIZE ((1 << 20) * 3)
#define MAX_ACTS 10
static void
test_resv_cancel_huge(PMEMobjpool *pop)
{
PMEMoid oid;
unsigned nallocs = 0;
struct pobj_action *act = (struct pobj_action *)
ZALLOC(sizeof(struct pobj_action) * MAX_ACTS);
do {
oid = pmemobj_reserve(pop, &act[nallocs++], HUGE_ALLOC_SIZE, 0);
} while (!OID_IS_NULL(oid));
pmemobj_cancel(pop, act, nallocs - 1);
unsigned nallocs2 = 0;
do {
oid = pmemobj_reserve(pop, &act[nallocs2++],
HUGE_ALLOC_SIZE, 0);
} while (!OID_IS_NULL(oid));
pmemobj_cancel(pop, act, nallocs2 - 1);
UT_ASSERTeq(nallocs, nallocs2);
FREE(act);
}
static void
test_defer_free(PMEMobjpool *pop)
{
PMEMoid oid;
int ret = pmemobj_alloc(pop, &oid, sizeof(struct foo), 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
struct pobj_action act;
pmemobj_defer_free(pop, oid, &act);
pmemobj_publish(pop, &act, 1);
struct foo *f = (struct foo *)pmemobj_direct(oid);
f->bar = 5; /* should trigger memcheck error */
ret = pmemobj_alloc(pop, &oid, sizeof(struct foo), 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
pmemobj_defer_free(pop, oid, &act);
pmemobj_cancel(pop, &act, 1);
f = (struct foo *)pmemobj_direct(oid);
f->bar = 5; /* should NOT trigger memcheck error */
}
/*
* This function tests if macros included in action.h api compile and
* allocate memory.
*/
static void
test_api_macros(PMEMobjpool *pop)
{
struct pobj_action macro_reserve_act[1];
TOID(struct macro_reserve_s) macro_reserve_p = POBJ_RESERVE_NEW(pop,
struct macro_reserve_s, ¯o_reserve_act[0]);
UT_ASSERT(!OID_IS_NULL(macro_reserve_p.oid));
pmemobj_publish(pop, macro_reserve_act, 1);
POBJ_FREE(¯o_reserve_p);
macro_reserve_p = POBJ_RESERVE_ALLOC(pop, struct macro_reserve_s,
sizeof(struct macro_reserve_s), ¯o_reserve_act[0]);
UT_ASSERT(!OID_IS_NULL(macro_reserve_p.oid));
pmemobj_publish(pop, macro_reserve_act, 1);
POBJ_FREE(¯o_reserve_p);
macro_reserve_p = POBJ_XRESERVE_NEW(pop, struct macro_reserve_s,
¯o_reserve_act[0], 0);
UT_ASSERT(!OID_IS_NULL(macro_reserve_p.oid));
pmemobj_publish(pop, macro_reserve_act, 1);
POBJ_FREE(¯o_reserve_p);
macro_reserve_p = POBJ_XRESERVE_ALLOC(pop, struct macro_reserve_s,
sizeof(struct macro_reserve_s), ¯o_reserve_act[0], 0);
UT_ASSERT(!OID_IS_NULL(macro_reserve_p.oid));
pmemobj_publish(pop, macro_reserve_act, 1);
POBJ_FREE(¯o_reserve_p);
}
#define POBJ_MAX_ACTIONS 60
static void
test_many(PMEMobjpool *pop, size_t n)
{
struct pobj_action *act = (struct pobj_action *)
MALLOC(sizeof(struct pobj_action) * n);
PMEMoid *oid = (PMEMoid *)
MALLOC(sizeof(PMEMoid) * n);
for (int i = 0; i < n; ++i) {
oid[i] = pmemobj_reserve(pop, &act[i], 1, 0);
UT_ASSERT(!OID_IS_NULL(oid[i]));
}
UT_ASSERTeq(pmemobj_publish(pop, act, n), 0);
for (int i = 0; i < n; ++i) {
pmemobj_defer_free(pop, oid[i], &act[i]);
}
UT_ASSERTeq(pmemobj_publish(pop, act, n), 0);
FREE(oid);
FREE(act);
}
static void
test_duplicate(PMEMobjpool *pop)
{
struct pobj_alloc_class_desc alloc_class_128;
alloc_class_128.header_type = POBJ_HEADER_COMPACT;
alloc_class_128.unit_size = 1024 * 100;
alloc_class_128.units_per_block = 1;
alloc_class_128.alignment = 0;
int ret = pmemobj_ctl_set(pop, "heap.alloc_class.128.desc",
&alloc_class_128);
UT_ASSERTeq(ret, 0);
struct pobj_action a[10];
PMEMoid oid[10];
oid[0] = pmemobj_xreserve(pop, &a[0], 1, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid[0]));
pmemobj_cancel(pop, a, 1);
oid[0] = pmemobj_xreserve(pop, &a[0], 1, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid[0]));
oid[0] = pmemobj_xreserve(pop, &a[1], 1, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid[0]));
oid[0] = pmemobj_xreserve(pop, &a[2], 1, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid[0]));
pmemobj_cancel(pop, a, 3);
oid[0] = pmemobj_xreserve(pop, &a[0], 1, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid[0]));
oid[0] = pmemobj_xreserve(pop, &a[1], 1, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid[0]));
oid[0] = pmemobj_xreserve(pop, &a[2], 1, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid[0]));
oid[0] = pmemobj_xreserve(pop, &a[3], 1, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid[0]));
oid[0] = pmemobj_xreserve(pop, &a[4], 1, 0, POBJ_CLASS_ID(128));
UT_ASSERT(!OID_IS_NULL(oid[0]));
pmemobj_cancel(pop, a, 5);
}
static void
test_many_sets(PMEMobjpool *pop, size_t n)
{
struct pobj_action *act = (struct pobj_action *)
MALLOC(sizeof(struct pobj_action) * n);
PMEMoid oid;
pmemobj_alloc(pop, &oid, sizeof(uint64_t) * n, 0, NULL, NULL);
UT_ASSERT(!OID_IS_NULL(oid));
uint64_t *values = (uint64_t *)pmemobj_direct(oid);
for (uint64_t i = 0; i < n; ++i)
pmemobj_set_value(pop, &act[i], values + i, i);
UT_ASSERTeq(pmemobj_publish(pop, act, n), 0);
for (uint64_t i = 0; i < n; ++i)
UT_ASSERTeq(*(values + i), i);
pmemobj_free(&oid);
FREE(act);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_action");
if (argc < 2)
UT_FATAL("usage: %s filename", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR);
if (pop == NULL)
UT_FATAL("!pmemobj_create: %s", path);
PMEMoid root = pmemobj_root(pop, sizeof(struct root));
struct root *rootp = (struct root *)pmemobj_direct(root);
struct pobj_action reserved[2];
struct pobj_action published[2];
struct pobj_action tx_reserved;
struct pobj_action tx_reserved_fulfilled;
struct pobj_action tx_published;
rootp->reserved.oid =
pmemobj_reserve(pop, &reserved[0], sizeof(struct foo), 0);
pmemobj_set_value(pop, &reserved[1], &rootp->reserved.value, 1);
rootp->tx_reserved.oid =
pmemobj_reserve(pop, &tx_reserved, sizeof(struct foo), 0);
rootp->tx_reserved_fulfilled.oid =
pmemobj_reserve(pop,
&tx_reserved_fulfilled, sizeof(struct foo), 0);
rootp->tx_published.oid =
pmemobj_reserve(pop, &tx_published, sizeof(struct foo), 0);
rootp->published.oid =
pmemobj_reserve(pop, &published[0], sizeof(struct foo), 0);
TX_BEGIN(pop) {
pmemobj_tx_publish(&tx_reserved, 1);
pmemobj_tx_abort(EINVAL);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
pmemobj_tx_publish(&tx_reserved_fulfilled, 1);
pmemobj_tx_publish(NULL, 0); /* this is to force resv fulfill */
pmemobj_tx_abort(EINVAL);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_END
pmemobj_set_value(pop, &published[1], &rootp->published.value, 1);
pmemobj_publish(pop, published, 2);
TX_BEGIN(pop) {
pmemobj_tx_publish(&tx_published, 1);
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
pmemobj_persist(pop, rootp, sizeof(*rootp));
pmemobj_close(pop);
UT_ASSERTeq(pmemobj_check(path, LAYOUT_NAME), 1);
UT_ASSERTne(pop = pmemobj_open(path, LAYOUT_NAME), NULL);
root = pmemobj_root(pop, sizeof(struct root));
rootp = (struct root *)pmemobj_direct(root);
struct foo *reserved_foop =
(struct foo *)pmemobj_direct(rootp->reserved.oid);
reserved_foop->bar = 1; /* should trigger memcheck error */
UT_ASSERTeq(rootp->reserved.value, 0);
struct foo *published_foop =
(struct foo *)pmemobj_direct(rootp->published.oid);
published_foop->bar = 1; /* should NOT trigger memcheck error */
UT_ASSERTeq(rootp->published.value, 1);
struct foo *tx_reserved_foop =
(struct foo *)pmemobj_direct(rootp->tx_reserved.oid);
tx_reserved_foop->bar = 1; /* should trigger memcheck error */
struct foo *tx_reserved_fulfilled_foop =
(struct foo *)pmemobj_direct(rootp->tx_reserved_fulfilled.oid);
tx_reserved_fulfilled_foop->bar = 1; /* should trigger memcheck error */
struct foo *tx_published_foop =
(struct foo *)pmemobj_direct(rootp->tx_published.oid);
tx_published_foop->bar = 1; /* should NOT trigger memcheck error */
test_resv_cancel_huge(pop);
test_defer_free(pop);
test_api_macros(pop);
test_many(pop, POBJ_MAX_ACTIONS * 2);
test_many_sets(pop, POBJ_MAX_ACTIONS * 2);
test_duplicate(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 8,548 | 23.286932 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_source_size/pmem2_source_size.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* pmem2_source_size.c -- pmem2_source_size unittests
*/
#include <stdint.h>
#include "fault_injection.h"
#include "unittest.h"
#include "ut_pmem2.h"
#include "ut_fh.h"
#include "config.h"
#include "out.h"
typedef void (*test_fun)(const char *path, os_off_t size);
/*
* test_normal_file - tests normal file (common)
*/
static void
test_normal_file(const char *path, os_off_t expected_size,
enum file_handle_type type)
{
struct FHandle *fh = UT_FH_OPEN(type, path, FH_RDWR);
struct pmem2_source *src;
PMEM2_SOURCE_FROM_FH(&src, fh);
size_t size;
int ret = pmem2_source_size(src, &size);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(size, expected_size);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
}
/*
* test_normal_file_fd - tests normal file using a file descriptor
*/
static int
test_normal_file_fd(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_normal_file_fd <file> <expected_size>");
char *path = argv[0];
os_off_t expected_size = ATOLL(argv[1]);
test_normal_file(path, expected_size, FH_FD);
return 2;
}
/*
* test_normal_file_handle - tests normal file using a HANDLE
*/
static int
test_normal_file_handle(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_normal_file_handle"
" <file> <expected_size>");
char *path = argv[0];
os_off_t expected_size = ATOLL(argv[1]);
test_normal_file(path, expected_size, FH_HANDLE);
return 2;
}
/*
* test_tmpfile - tests temporary file
*/
static void
test_tmpfile(const char *dir, os_off_t requested_size,
enum file_handle_type type)
{
struct FHandle *fh = UT_FH_OPEN(type, dir, FH_RDWR | FH_TMPFILE);
UT_FH_TRUNCATE(fh, requested_size);
struct pmem2_source *src;
PMEM2_SOURCE_FROM_FH(&src, fh);
size_t size = SIZE_MAX;
int ret = pmem2_source_size(src, &size);
UT_PMEM2_EXPECT_RETURN(ret, 0);
UT_ASSERTeq(size, requested_size);
PMEM2_SOURCE_DELETE(&src);
UT_FH_CLOSE(fh);
}
/*
* test_tmpfile_fd - tests temporary file using file descriptor interface
*/
static int
test_tmpfile_fd(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_tmpfile_fd <file> <requested_size>");
char *dir = argv[0];
os_off_t requested_size = ATOLL(argv[1]);
test_tmpfile(dir, requested_size, FH_FD);
return 2;
}
/*
* test_tmpfile_handle - tests temporary file using file handle interface
*/
static int
test_tmpfile_handle(const struct test_case *tc, int argc, char *argv[])
{
if (argc < 2)
UT_FATAL("usage: test_tmpfile_handle <file> <requested_size>");
char *dir = argv[0];
os_off_t requested_size = ATOLL(argv[1]);
test_tmpfile(dir, requested_size, FH_HANDLE);
return 2;
}
/*
* test_cases -- available test cases
*/
static struct test_case test_cases[] = {
TEST_CASE(test_normal_file_fd),
TEST_CASE(test_normal_file_handle),
TEST_CASE(test_tmpfile_fd),
TEST_CASE(test_tmpfile_handle),
};
#define NTESTS (sizeof(test_cases) / sizeof(test_cases[0]))
int
main(int argc, char **argv)
{
START(argc, argv, "pmem2_source_size");
util_init();
out_init("pmem2_source_size", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0);
TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS);
out_fini();
DONE(NULL);
}
| 3,326 | 20.191083 | 75 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_source_size/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
#
import testframework as t
class NormalFile(t.Test):
test_type = t.Short
def run(self, ctx):
filepath = ctx.create_holey_file(self.size, 'testfile')
ctx.exec('pmem2_source_size', self.test_case,
filepath, self.size)
class TEST0(NormalFile):
test_case = 'test_normal_file_fd'
size = 0
@t.windows_only
class TEST1(NormalFile):
test_case = 'test_normal_file_handle'
size = 0
class TEST2(NormalFile):
test_case = 'test_normal_file_fd'
size = 16 * t.MiB
@t.windows_only
class TEST3(NormalFile):
test_case = 'test_normal_file_handle'
size = 16 * t.MiB
# On Windows fd interface doesn't support temporary files
# FreeBSD doesn't support O_TMPFILE
@t.linux_only
class TEST4(t.Test):
test_type = t.Short
def run(self, ctx):
ctx.exec('pmem2_source_size', 'test_tmpfile_fd',
ctx.testdir, 16 * t.MiB)
# XXX doesn't work
# @t.windows_only
# class TEST5(t.Test):
# test_type = t.Short
#
# def run(self, ctx):
# ctx.exec('pmem2_source_size', 'tmp_file_handle',
# ctx.testdir, str(16 * t.MiB))
@t.windows_exclude
@t.require_devdax(t.DevDax('devdax1'))
class TEST6(t.Test):
test_type = t.Short
def run(self, ctx):
dd = ctx.devdaxes.devdax1
ctx.exec('pmem2_source_size',
'test_normal_file_fd', dd.path, dd.size)
| 1,485 | 19.929577 | 63 |
py
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_vecq/util_vecq.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2018, Intel Corporation */
/*
* util_vecq.c -- unit test for vecq implementation
*/
#include "unittest.h"
#include "vecq.h"
struct test {
int foo;
int bar;
};
static void
vecq_test()
{
VECQ(testvec, struct test) v;
VECQ_INIT(&v);
struct test t = {5, 10};
struct test t2 = {10, 15};
int ret;
ret = VECQ_ENQUEUE(&v, t);
UT_ASSERTeq(ret, 0);
ret = VECQ_ENQUEUE(&v, t2);
UT_ASSERTeq(ret, 0);
struct test res = VECQ_FRONT(&v);
UT_ASSERTeq(res.bar, t.bar);
size_t s = VECQ_SIZE(&v);
UT_ASSERTeq(s, 2);
size_t c = VECQ_CAPACITY(&v);
UT_ASSERTeq(c, 64);
res = VECQ_DEQUEUE(&v);
UT_ASSERTeq(res.bar, t.bar);
res = VECQ_DEQUEUE(&v);
UT_ASSERTeq(res.bar, t2.bar);
VECQ_DELETE(&v);
}
static void
vecq_test_grow()
{
VECQ(testvec, int) v;
VECQ_INIT(&v);
for (int j = 0; j < 100; ++j) {
int n = j * 100;
for (int i = 1; i < n; ++i) {
int ret = VECQ_ENQUEUE(&v, i);
UT_ASSERTeq(ret, 0);
}
for (int i = 1; i < n; ++i) {
int res = VECQ_DEQUEUE(&v);
UT_ASSERTeq(res, i);
}
}
VECQ_DELETE(&v);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_vecq");
vecq_test();
vecq_test_grow();
DONE(NULL);
}
| 1,215 | 14.012346 | 51 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/ex_linkedlist/ex_linkedlist.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* ex_linkedlist.c - test of linkedlist example
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "pmemobj_list.h"
#include "unittest.h"
#define ELEMENT_NO 10
#define PRINT_RES(res, struct_name) do {\
if ((res) == 0) {\
UT_OUT("Outcome for " #struct_name " is correct!");\
} else {\
UT_ERR("Outcome for " #struct_name\
" does not match expected result!!!");\
}\
} while (0)
POBJ_LAYOUT_BEGIN(list);
POBJ_LAYOUT_ROOT(list, struct base);
POBJ_LAYOUT_TOID(list, struct tqueuehead);
POBJ_LAYOUT_TOID(list, struct slisthead);
POBJ_LAYOUT_TOID(list, struct tqnode);
POBJ_LAYOUT_TOID(list, struct snode);
POBJ_LAYOUT_END(list);
POBJ_TAILQ_HEAD(tqueuehead, struct tqnode);
struct tqnode {
int data;
POBJ_TAILQ_ENTRY(struct tqnode) tnd;
};
POBJ_SLIST_HEAD(slisthead, struct snode);
struct snode {
int data;
POBJ_SLIST_ENTRY(struct snode) snd;
};
struct base {
struct tqueuehead tqueue;
struct slisthead slist;
};
static const int expectedResTQ[] = { 111, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 222 };
static const int expectedResSL[] = { 111, 8, 222, 6, 5, 4, 3, 2, 1, 0, 333 };
/*
* dump_tq -- dumps list on standard output
*/
static void
dump_tq(struct tqueuehead *head, const char *str)
{
TOID(struct tqnode) var;
UT_OUT("%s start", str);
POBJ_TAILQ_FOREACH(var, head, tnd)
UT_OUT("%d", D_RW(var)->data);
UT_OUT("%s end", str);
}
/*
* init_tqueue -- initialize tail queue
*/
static void
init_tqueue(PMEMobjpool *pop, struct tqueuehead *head)
{
if (!POBJ_TAILQ_EMPTY(head))
return;
TOID(struct tqnode) node;
TOID(struct tqnode) middleNode;
TOID(struct tqnode) node888;
TOID(struct tqnode) tempNode;
int i = 0;
TX_BEGIN(pop) {
POBJ_TAILQ_INIT(head);
dump_tq(head, "after init");
for (i = 0; i < ELEMENT_NO; ++i) {
node = TX_NEW(struct tqnode);
D_RW(node)->data = i;
if (0 == i) {
middleNode = node;
}
POBJ_TAILQ_INSERT_HEAD(head, node, tnd);
node = TX_NEW(struct tqnode);
D_RW(node)->data = i;
POBJ_TAILQ_INSERT_TAIL(head, node, tnd);
}
dump_tq(head, "after insert[head|tail]");
node = TX_NEW(struct tqnode);
D_RW(node)->data = 666;
POBJ_TAILQ_INSERT_AFTER(middleNode, node, tnd);
dump_tq(head, "after insert_after1");
middleNode = POBJ_TAILQ_NEXT(middleNode, tnd);
node = TX_NEW(struct tqnode);
D_RW(node)->data = 888;
node888 = node;
POBJ_TAILQ_INSERT_BEFORE(middleNode, node, tnd);
dump_tq(head, "after insert_before1");
node = TX_NEW(struct tqnode);
D_RW(node)->data = 555;
POBJ_TAILQ_INSERT_BEFORE(middleNode, node, tnd);
dump_tq(head, "after insert_before2");
node = TX_NEW(struct tqnode);
D_RW(node)->data = 111;
tempNode = POBJ_TAILQ_FIRST(head);
POBJ_TAILQ_INSERT_BEFORE(tempNode, node, tnd);
dump_tq(head, "after insert_before3");
node = TX_NEW(struct tqnode);
D_RW(node)->data = 222;
tempNode = POBJ_TAILQ_LAST(head);
POBJ_TAILQ_INSERT_AFTER(tempNode, node, tnd);
dump_tq(head, "after insert_after2");
tempNode = middleNode;
middleNode = POBJ_TAILQ_PREV(tempNode, tnd);
POBJ_TAILQ_MOVE_ELEMENT_TAIL(head, middleNode, tnd);
dump_tq(head, "after move_element_tail");
POBJ_TAILQ_MOVE_ELEMENT_HEAD(head, tempNode, tnd);
dump_tq(head, "after move_element_head");
tempNode = POBJ_TAILQ_FIRST(head);
POBJ_TAILQ_REMOVE(head, tempNode, tnd);
dump_tq(head, "after remove1");
tempNode = POBJ_TAILQ_LAST(head);
POBJ_TAILQ_REMOVE(head, tempNode, tnd);
dump_tq(head, "after remove2");
POBJ_TAILQ_REMOVE(head, node888, tnd);
dump_tq(head, "after remove3");
} TX_ONABORT {
abort();
} TX_END
}
/*
* dump_sl -- dumps list on standard output
*/
static void
dump_sl(struct slisthead *head, const char *str)
{
TOID(struct snode) var;
UT_OUT("%s start", str);
POBJ_SLIST_FOREACH(var, head, snd)
UT_OUT("%d", D_RW(var)->data);
UT_OUT("%s end", str);
}
/*
* init_slist -- initialize SLIST
*/
static void
init_slist(PMEMobjpool *pop, struct slisthead *head)
{
if (!POBJ_SLIST_EMPTY(head))
return;
TOID(struct snode) node;
TOID(struct snode) tempNode;
int i = 0;
TX_BEGIN(pop) {
POBJ_SLIST_INIT(head);
dump_sl(head, "after init");
for (i = 0; i < ELEMENT_NO; ++i) {
node = TX_NEW(struct snode);
D_RW(node)->data = i;
POBJ_SLIST_INSERT_HEAD(head, node, snd);
}
dump_sl(head, "after insert_head");
tempNode = POBJ_SLIST_FIRST(head);
node = TX_NEW(struct snode);
D_RW(node)->data = 111;
POBJ_SLIST_INSERT_AFTER(tempNode, node, snd);
dump_sl(head, "after insert_after1");
tempNode = POBJ_SLIST_NEXT(node, snd);
node = TX_NEW(struct snode);
D_RW(node)->data = 222;
POBJ_SLIST_INSERT_AFTER(tempNode, node, snd);
dump_sl(head, "after insert_after2");
tempNode = POBJ_SLIST_NEXT(node, snd);
POBJ_SLIST_REMOVE_FREE(head, tempNode, snd);
dump_sl(head, "after remove_free1");
POBJ_SLIST_REMOVE_HEAD(head, snd);
dump_sl(head, "after remove_head");
TOID(struct snode) element = POBJ_SLIST_FIRST(head);
while (!TOID_IS_NULL(D_RO(element)->snd.pe_next)) {
element = D_RO(element)->snd.pe_next;
}
node = TX_NEW(struct snode);
D_RW(node)->data = 333;
POBJ_SLIST_INSERT_AFTER(element, node, snd);
dump_sl(head, "after insert_after3");
element = node;
node = TX_NEW(struct snode);
D_RW(node)->data = 123;
POBJ_SLIST_INSERT_AFTER(element, node, snd);
dump_sl(head, "after insert_after4");
tempNode = POBJ_SLIST_NEXT(node, snd);
POBJ_SLIST_REMOVE_FREE(head, node, snd);
dump_sl(head, "after remove_free2");
} TX_ONABORT {
abort();
} TX_END
}
int
main(int argc, char *argv[])
{
unsigned res = 0;
PMEMobjpool *pop;
const char *path;
START(argc, argv, "ex_linkedlist");
/* root doesn't count */
UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(list) != 4);
if (argc != 2) {
UT_FATAL("usage: %s file-name", argv[0]);
}
path = argv[1];
if (os_access(path, F_OK) != 0) {
if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(list),
PMEMOBJ_MIN_POOL, 0666)) == NULL) {
UT_FATAL("!pmemobj_create: %s", path);
}
} else {
if ((pop = pmemobj_open(path,
POBJ_LAYOUT_NAME(list))) == NULL) {
UT_FATAL("!pmemobj_open: %s", path);
}
}
TOID(struct base) base = POBJ_ROOT(pop, struct base);
struct tqueuehead *tqhead = &D_RW(base)->tqueue;
struct slisthead *slhead = &D_RW(base)->slist;
init_tqueue(pop, tqhead);
init_slist(pop, slhead);
int i = 0;
TOID(struct tqnode) tqelement;
POBJ_TAILQ_FOREACH(tqelement, tqhead, tnd) {
if (D_RO(tqelement)->data != expectedResTQ[i]) {
res = 1;
break;
}
i++;
}
PRINT_RES(res, tail queue);
i = 0;
res = 0;
TOID(struct snode) slelement;
POBJ_SLIST_FOREACH(slelement, slhead, snd) {
if (D_RO(slelement)->data != expectedResSL[i]) {
res = 1;
break;
}
i++;
}
PRINT_RES(res, singly linked list);
pmemobj_close(pop);
DONE(NULL);
}
| 6,919 | 22.862069 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_persist_count/mocks_windows.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of pmem functions
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmemobj
* files, when compiled for the purpose of obj_persist_count test.
* It would replace default implementation with mocked functions defined
* in obj_persist_count.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL
#define pmem_persist __wrap_pmem_persist
#define pmem_flush __wrap_pmem_flush
#define pmem_drain __wrap_pmem_drain
#define pmem_msync __wrap_pmem_msync
#define pmem_memcpy_persist __wrap_pmem_memcpy_persist
#define pmem_memcpy_nodrain __wrap_pmem_memcpy_nodrain
#define pmem_memcpy __wrap_pmem_memcpy
#define pmem_memmove_persist __wrap_pmem_memmove_persist
#define pmem_memmove_nodrain __wrap_pmem_memmove_nodrain
#define pmem_memmove __wrap_pmem_memmove
#define pmem_memset_persist __wrap_pmem_memset_persist
#define pmem_memset_nodrain __wrap_pmem_memset_nodrain
#define pmem_memset __wrap_pmem_memset
#endif
| 1,130 | 34.34375 | 73 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_persist_count/obj_persist_count.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2019, Intel Corporation */
/*
* obj_persist_count.c -- counting number of persists
*/
#define _GNU_SOURCE
#include "obj.h"
#include "pmalloc.h"
#include "unittest.h"
struct ops_counter {
unsigned n_cl_stores;
unsigned n_drain;
unsigned n_pmem_persist;
unsigned n_pmem_msync;
unsigned n_pmem_flush;
unsigned n_pmem_drain;
unsigned n_flush_from_pmem_memcpy;
unsigned n_flush_from_pmem_memset;
unsigned n_drain_from_pmem_memcpy;
unsigned n_drain_from_pmem_memset;
unsigned n_pot_cache_misses;
};
static struct ops_counter ops_counter;
static struct ops_counter tx_counter;
#define FLUSH_ALIGN ((uintptr_t)64)
#define MOVNT_THRESHOLD 256
static unsigned
cl_flushed(const void *addr, size_t len, uintptr_t alignment)
{
uintptr_t start = (uintptr_t)addr & ~(alignment - 1);
uintptr_t end = ((uintptr_t)addr + len + alignment - 1) &
~(alignment - 1);
return (unsigned)(end - start) / FLUSH_ALIGN;
}
#define PMEM_F_MEM_MOVNT (PMEM_F_MEM_WC | PMEM_F_MEM_NONTEMPORAL)
#define PMEM_F_MEM_MOV (PMEM_F_MEM_WB | PMEM_F_MEM_TEMPORAL)
static unsigned
bulk_cl_changed(const void *addr, size_t len, unsigned flags)
{
uintptr_t start = (uintptr_t)addr & ~(FLUSH_ALIGN - 1);
uintptr_t end = ((uintptr_t)addr + len + FLUSH_ALIGN - 1) &
~(FLUSH_ALIGN - 1);
unsigned cl_changed = (unsigned)(end - start) / FLUSH_ALIGN;
int wc; /* write combining */
if (flags & PMEM_F_MEM_NOFLUSH)
wc = 0; /* NOFLUSH always uses temporal instructions */
else if (flags & PMEM_F_MEM_MOVNT)
wc = 1;
else if (flags & PMEM_F_MEM_MOV)
wc = 0;
else if (len < MOVNT_THRESHOLD)
wc = 0;
else
wc = 1;
/* count number of potential cache misses */
if (!wc) {
/*
* When we don't use write combining, it means all
* cache lines may be missing.
*/
ops_counter.n_pot_cache_misses += cl_changed;
} else {
/*
* When we use write combining there won't be any cache misses,
* with an exception of unaligned beginning or end.
*/
if (start != (uintptr_t)addr)
ops_counter.n_pot_cache_misses++;
if (end != ((uintptr_t)addr + len) &&
start + FLUSH_ALIGN != end)
ops_counter.n_pot_cache_misses++;
}
return cl_changed;
}
static void
flush_cl(const void *addr, size_t len)
{
unsigned flushed = cl_flushed(addr, len, FLUSH_ALIGN);
ops_counter.n_cl_stores += flushed;
ops_counter.n_pot_cache_misses += flushed;
}
static void
flush_msync(const void *addr, size_t len)
{
unsigned flushed = cl_flushed(addr, len, Pagesize);
ops_counter.n_cl_stores += flushed;
ops_counter.n_pot_cache_misses += flushed;
}
FUNC_MOCK(pmem_persist, void, const void *addr, size_t len)
FUNC_MOCK_RUN_DEFAULT {
ops_counter.n_pmem_persist++;
flush_cl(addr, len);
ops_counter.n_drain++;
_FUNC_REAL(pmem_persist)(addr, len);
}
FUNC_MOCK_END
FUNC_MOCK(pmem_msync, int, const void *addr, size_t len)
FUNC_MOCK_RUN_DEFAULT {
ops_counter.n_pmem_msync++;
flush_msync(addr, len);
ops_counter.n_drain++;
return _FUNC_REAL(pmem_msync)(addr, len);
}
FUNC_MOCK_END
FUNC_MOCK(pmem_flush, void, const void *addr, size_t len)
FUNC_MOCK_RUN_DEFAULT {
ops_counter.n_pmem_flush++;
flush_cl(addr, len);
_FUNC_REAL(pmem_flush)(addr, len);
}
FUNC_MOCK_END
FUNC_MOCK(pmem_drain, void, void)
FUNC_MOCK_RUN_DEFAULT {
ops_counter.n_pmem_drain++;
ops_counter.n_drain++;
_FUNC_REAL(pmem_drain)();
}
FUNC_MOCK_END
static void
memcpy_nodrain_count(void *dest, const void *src, size_t len, unsigned flags)
{
unsigned cl_stores = bulk_cl_changed(dest, len, flags);
if (!(flags & PMEM_F_MEM_NOFLUSH))
ops_counter.n_flush_from_pmem_memcpy += cl_stores;
ops_counter.n_cl_stores += cl_stores;
}
static void
memcpy_persist_count(void *dest, const void *src, size_t len, unsigned flags)
{
memcpy_nodrain_count(dest, src, len, flags);
ops_counter.n_drain_from_pmem_memcpy++;
ops_counter.n_drain++;
}
FUNC_MOCK(pmem_memcpy_persist, void *, void *dest, const void *src, size_t len)
FUNC_MOCK_RUN_DEFAULT {
memcpy_persist_count(dest, src, len, 0);
return _FUNC_REAL(pmem_memcpy_persist)(dest, src, len);
}
FUNC_MOCK_END
FUNC_MOCK(pmem_memcpy_nodrain, void *, void *dest, const void *src, size_t len)
FUNC_MOCK_RUN_DEFAULT {
memcpy_nodrain_count(dest, src, len, 0);
return _FUNC_REAL(pmem_memcpy_nodrain)(dest, src, len);
}
FUNC_MOCK_END
static unsigned
sanitize_flags(unsigned flags)
{
if (flags & PMEM_F_MEM_NOFLUSH) {
/* NOFLUSH implies NODRAIN */
flags |= PMEM_F_MEM_NODRAIN;
}
return flags;
}
FUNC_MOCK(pmem_memcpy, void *, void *dest, const void *src, size_t len,
unsigned flags)
FUNC_MOCK_RUN_DEFAULT {
flags = sanitize_flags(flags);
if (flags & PMEM_F_MEM_NODRAIN)
memcpy_nodrain_count(dest, src, len, flags);
else
memcpy_persist_count(dest, src, len, flags);
return _FUNC_REAL(pmem_memcpy)(dest, src, len, flags);
}
FUNC_MOCK_END
FUNC_MOCK(pmem_memmove_persist, void *, void *dest, const void *src, size_t len)
FUNC_MOCK_RUN_DEFAULT {
memcpy_persist_count(dest, src, len, 0);
return _FUNC_REAL(pmem_memmove_persist)(dest, src, len);
}
FUNC_MOCK_END
FUNC_MOCK(pmem_memmove_nodrain, void *, void *dest, const void *src, size_t len)
FUNC_MOCK_RUN_DEFAULT {
memcpy_nodrain_count(dest, src, len, 0);
return _FUNC_REAL(pmem_memmove_nodrain)(dest, src, len);
}
FUNC_MOCK_END
FUNC_MOCK(pmem_memmove, void *, void *dest, const void *src, size_t len,
unsigned flags)
FUNC_MOCK_RUN_DEFAULT {
flags = sanitize_flags(flags);
if (flags & PMEM_F_MEM_NODRAIN)
memcpy_nodrain_count(dest, src, len, flags);
else
memcpy_persist_count(dest, src, len, flags);
return _FUNC_REAL(pmem_memmove)(dest, src, len, flags);
}
FUNC_MOCK_END
static void
memset_nodrain_count(void *dest, size_t len, unsigned flags)
{
unsigned cl_set = bulk_cl_changed(dest, len, flags);
if (!(flags & PMEM_F_MEM_NOFLUSH))
ops_counter.n_flush_from_pmem_memset += cl_set;
ops_counter.n_cl_stores += cl_set;
}
static void
memset_persist_count(void *dest, size_t len, unsigned flags)
{
memset_nodrain_count(dest, len, flags);
ops_counter.n_drain_from_pmem_memset++;
ops_counter.n_drain++;
}
FUNC_MOCK(pmem_memset_persist, void *, void *dest, int c, size_t len)
FUNC_MOCK_RUN_DEFAULT {
memset_persist_count(dest, len, 0);
return _FUNC_REAL(pmem_memset_persist)(dest, c, len);
}
FUNC_MOCK_END
FUNC_MOCK(pmem_memset_nodrain, void *, void *dest, int c, size_t len)
FUNC_MOCK_RUN_DEFAULT {
memset_nodrain_count(dest, len, 0);
return _FUNC_REAL(pmem_memset_nodrain)(dest, c, len);
}
FUNC_MOCK_END
FUNC_MOCK(pmem_memset, void *, void *dest, int c, size_t len, unsigned flags)
FUNC_MOCK_RUN_DEFAULT {
flags = sanitize_flags(flags);
if (flags & PMEM_F_MEM_NODRAIN)
memset_nodrain_count(dest, len, flags);
else
memset_persist_count(dest, len, flags);
return _FUNC_REAL(pmem_memset)(dest, c, len, flags);
}
FUNC_MOCK_END
/*
* reset_counters -- zero all counters
*/
static void
reset_counters(void)
{
memset(&ops_counter, 0, sizeof(ops_counter));
}
/*
* print_reset_counters -- print and then zero all counters
*/
static void
print_reset_counters(const char *task, unsigned tx)
{
#define CNT(name) (ops_counter.name - tx * tx_counter.name)
UT_OUT(
"%-14s %-7d %-10d %-12d %-10d %-10d %-10d %-15d %-17d %-15d %-17d %-23d",
task,
CNT(n_cl_stores),
CNT(n_drain),
CNT(n_pmem_persist),
CNT(n_pmem_msync),
CNT(n_pmem_flush),
CNT(n_pmem_drain),
CNT(n_flush_from_pmem_memcpy),
CNT(n_drain_from_pmem_memcpy),
CNT(n_flush_from_pmem_memset),
CNT(n_drain_from_pmem_memset),
CNT(n_pot_cache_misses));
#undef CNT
reset_counters();
}
#define LARGE_SNAPSHOT ((1 << 10) * 10)
struct foo_large {
uint8_t snapshot[LARGE_SNAPSHOT];
};
struct foo {
int val;
uint64_t dest;
PMEMoid bar;
PMEMoid bar2;
};
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_persist_count");
if (argc != 2)
UT_FATAL("usage: %s file-name", argv[0]);
const char *path = argv[1];
PMEMobjpool *pop;
if ((pop = pmemobj_create(path, "persist_count",
PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create: %s", path);
UT_OUT(
"%-14s %-7s %-10s %-12s %-10s %-10s %-10s %-15s %-17s %-15s %-17s %-23s",
"task",
"cl(all)",
"drain(all)",
"pmem_persist",
"pmem_msync",
"pmem_flush",
"pmem_drain",
"pmem_memcpy_cls",
"pmem_memcpy_drain",
"pmem_memset_cls",
"pmem_memset_drain",
"potential_cache_misses");
print_reset_counters("pool_create", 0);
/* allocate one structure to create a run */
pmemobj_alloc(pop, NULL, sizeof(struct foo), 0, NULL, NULL);
reset_counters();
PMEMoid root = pmemobj_root(pop, sizeof(struct foo));
UT_ASSERT(!OID_IS_NULL(root));
print_reset_counters("root_alloc", 0);
PMEMoid oid;
int ret = pmemobj_alloc(pop, &oid, sizeof(struct foo), 0, NULL, NULL);
UT_ASSERTeq(ret, 0);
print_reset_counters("atomic_alloc", 0);
pmemobj_free(&oid);
print_reset_counters("atomic_free", 0);
struct foo *f = pmemobj_direct(root);
TX_BEGIN(pop) {
} TX_END
memcpy(&tx_counter, &ops_counter, sizeof(ops_counter));
print_reset_counters("tx_begin_end", 0);
TX_BEGIN(pop) {
f->bar = pmemobj_tx_alloc(sizeof(struct foo), 0);
UT_ASSERT(!OID_IS_NULL(f->bar));
} TX_END
print_reset_counters("tx_alloc", 1);
TX_BEGIN(pop) {
f->bar2 = pmemobj_tx_alloc(sizeof(struct foo), 0);
UT_ASSERT(!OID_IS_NULL(f->bar2));
} TX_END
print_reset_counters("tx_alloc_next", 1);
TX_BEGIN(pop) {
pmemobj_tx_free(f->bar);
} TX_END
print_reset_counters("tx_free", 1);
TX_BEGIN(pop) {
pmemobj_tx_free(f->bar2);
} TX_END
print_reset_counters("tx_free_next", 1);
TX_BEGIN(pop) {
pmemobj_tx_xadd_range_direct(&f->val, sizeof(f->val),
POBJ_XADD_NO_FLUSH);
} TX_END
print_reset_counters("tx_add", 1);
TX_BEGIN(pop) {
pmemobj_tx_xadd_range_direct(&f->val, sizeof(f->val),
POBJ_XADD_NO_FLUSH);
} TX_END
print_reset_counters("tx_add_next", 1);
PMEMoid large_foo;
pmemobj_zalloc(pop, &large_foo, sizeof(struct foo_large), 0);
UT_ASSERT(!OID_IS_NULL(large_foo));
reset_counters();
struct foo_large *flarge = pmemobj_direct(large_foo);
TX_BEGIN(pop) {
pmemobj_tx_xadd_range_direct(&flarge->snapshot,
sizeof(flarge->snapshot),
POBJ_XADD_NO_FLUSH);
} TX_END
print_reset_counters("tx_add_large", 1);
TX_BEGIN(pop) {
pmemobj_tx_xadd_range_direct(&flarge->snapshot,
sizeof(flarge->snapshot),
POBJ_XADD_NO_FLUSH);
} TX_END
print_reset_counters("tx_add_lnext", 1);
pmalloc(pop, &f->dest, sizeof(f->val), 0, 0);
print_reset_counters("pmalloc", 0);
pfree(pop, &f->dest);
print_reset_counters("pfree", 0);
uint64_t stack_var;
pmalloc(pop, &stack_var, sizeof(f->val), 0, 0);
print_reset_counters("pmalloc_stack", 0);
pfree(pop, &stack_var);
print_reset_counters("pfree_stack", 0);
pmemobj_close(pop);
DONE(NULL);
}
#ifdef _MSC_VER
/*
* Since libpmemobj is linked statically, we need to invoke its ctor/dtor.
*/
MSVC_CONSTR(libpmemobj_init)
MSVC_DESTR(libpmemobj_fini)
#endif
| 10,962 | 22.832609 | 80 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_proto/rpmem_proto.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2018, Intel Corporation */
/*
* rpmem_proto.c -- unit test for rpmem_proto header
*
* The purpose of this test is to make sure the structures which describe
* rpmem protocol messages does not have any padding.
*/
#include "unittest.h"
#include "librpmem.h"
#include "rpmem_proto.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "rpmem_proto");
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_hdr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr, type);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr, size);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_hdr);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_hdr_resp);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr_resp, status);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr_resp, type);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr_resp, size);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_hdr_resp);
ASSERT_ALIGNED_BEGIN(struct rpmem_pool_attr);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, signature);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, major);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, compat_features);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, incompat_features);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, ro_compat_features);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, poolset_uuid);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, uuid);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, next_uuid);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, prev_uuid);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, user_flags);
ASSERT_ALIGNED_CHECK(struct rpmem_pool_attr);
ASSERT_ALIGNED_BEGIN(struct rpmem_pool_attr_packed);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, signature);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, major);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, compat_features);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, incompat_features);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, ro_compat_features);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, poolset_uuid);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, uuid);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, next_uuid);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, prev_uuid);
ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, user_flags);
ASSERT_ALIGNED_CHECK(struct rpmem_pool_attr_packed);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_ibc_attr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, port);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, persist_method);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, rkey);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, raddr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, nlanes);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_ibc_attr);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_common);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, major);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, minor);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, pool_size);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, nlanes);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, provider);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, buff_size);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_common);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_pool_desc);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_pool_desc, size);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_pool_desc);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_create);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_create, hdr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_create, c);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_create, pool_attr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_create, pool_desc);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_create);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_create_resp);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_create_resp, hdr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_create_resp, ibc);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_create_resp);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_open);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_open, hdr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_open, c);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_open, pool_desc);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_open);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_open_resp);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_open_resp, hdr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_open_resp, ibc);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_open_resp, pool_attr);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_open_resp);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_close);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_close, hdr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_close, flags);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_close);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_close_resp);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_close_resp, hdr);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_close_resp);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_persist);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist, flags);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist, lane);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist, addr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist, size);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_persist);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_persist_resp);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist_resp, flags);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist_resp, lane);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_persist_resp);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_set_attr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_set_attr, hdr);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_set_attr, pool_attr);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_set_attr);
ASSERT_ALIGNED_BEGIN(struct rpmem_msg_set_attr_resp);
ASSERT_ALIGNED_FIELD(struct rpmem_msg_set_attr_resp, hdr);
ASSERT_ALIGNED_CHECK(struct rpmem_msg_set_attr_resp);
DONE(NULL);
}
| 5,733 | 41.474074 | 73 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/signal_handle/signal_handle.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017, Intel Corporation */
/*
* signal_handle.c -- unit test for signal_handle
*
*
* operations are: 's', 'a', 'a', 'i', 'v'
* s: testing SIGSEGV with signal_handler_2
* a: testing SIGABRT with signal_handler_1
* a: testing second occurrence of SIGABRT with signal_handler_1
* i: testing SIGILL with signal_handler_2
* v: testing third occurrence of SIGABRT with other signal_handler_3
*
*/
#include "unittest.h"
ut_jmp_buf_t Jmp;
static void
signal_handler_1(int sig)
{
UT_OUT("\tsignal_handler_1: %s", os_strsignal(sig));
ut_siglongjmp(Jmp);
}
static void
signal_handler_2(int sig)
{
UT_OUT("\tsignal_handler_2: %s", os_strsignal(sig));
ut_siglongjmp(Jmp);
}
static void
signal_handler_3(int sig)
{
UT_OUT("\tsignal_handler_3: %s", os_strsignal(sig));
ut_siglongjmp(Jmp);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "signal_handle");
if (argc < 2)
UT_FATAL("usage: %s op:s|a|a|i|v", argv[0]);
struct sigaction v1, v2, v3;
sigemptyset(&v1.sa_mask);
v1.sa_flags = 0;
v1.sa_handler = signal_handler_1;
sigemptyset(&v2.sa_mask);
v2.sa_flags = 0;
v2.sa_handler = signal_handler_2;
SIGACTION(SIGSEGV, &v2, NULL);
SIGACTION(SIGABRT, &v1, NULL);
SIGACTION(SIGABRT, &v2, NULL);
SIGACTION(SIGABRT, &v1, NULL);
SIGACTION(SIGILL, &v2, NULL);
for (int arg = 1; arg < argc; arg++) {
if (strchr("sabiv", argv[arg][0]) == NULL ||
argv[arg][1] != '\0')
UT_FATAL("op must be one of: s, a, a, i, v");
switch (argv[arg][0]) {
case 's':
UT_OUT("Testing SIGSEGV...");
if (!ut_sigsetjmp(Jmp)) {
if (!raise(SIGSEGV)) {
UT_OUT("\t SIGSEGV occurrence");
} else {
UT_OUT("\t Issue with SIGSEGV raise");
}
}
break;
case 'a':
UT_OUT("Testing SIGABRT...");
if (!ut_sigsetjmp(Jmp)) {
if (!raise(SIGABRT)) {
UT_OUT("\t SIGABRT occurrence");
} else {
UT_OUT("\t Issue with SIGABRT raise");
}
}
break;
case 'i':
UT_OUT("Testing SIGILL...");
if (!ut_sigsetjmp(Jmp)) {
if (!raise(SIGILL)) {
UT_OUT("\t SIGILL occurrence");
} else {
UT_OUT("\t Issue with SIGILL raise");
}
}
break;
case 'v':
if (!ut_sigsetjmp(Jmp)) {
sigemptyset(&v3.sa_mask);
v3.sa_flags = 0;
v3.sa_handler = signal_handler_3;
UT_OUT("Testing SIGABRT...");
SIGACTION(SIGABRT, &v3, NULL);
if (!raise(SIGABRT)) {
UT_OUT("\t SIGABRT occurrence");
} else {
UT_OUT("\t Issue with SIGABRT raise");
}
}
break;
}
}
DONE(NULL);
}
| 2,549 | 19.238095 | 69 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_toid/obj_toid.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
/*
* obj_toid.c -- unit test for TOID_VALID, DIRECT_RO, DIRECT_RW macros
*/
#include <sys/param.h>
#include "unittest.h"
#define LAYOUT_NAME "toid"
#define TEST_NUM 5
TOID_DECLARE(struct obj, 0);
struct obj {
int id;
};
/*
* do_toid_valid -- validates if type number is equal to object's metadata
*/
static void
do_toid_valid(PMEMobjpool *pop)
{
TOID(struct obj) obj;
POBJ_NEW(pop, &obj, struct obj, NULL, NULL);
UT_ASSERT(!TOID_IS_NULL(obj));
UT_ASSERT(TOID_VALID(obj));
POBJ_FREE(&obj);
}
/*
* do_toid_no_valid -- validates if type number is not equal to
* object's metadata
*/
static void
do_toid_no_valid(PMEMobjpool *pop)
{
TOID(struct obj) obj;
int ret = pmemobj_alloc(pop, &obj.oid, sizeof(struct obj), TEST_NUM,
NULL, NULL);
UT_ASSERTeq(ret, 0);
UT_ASSERT(!TOID_VALID(obj));
POBJ_FREE(&obj);
}
/*
* do_direct_simple - checks if DIRECT_RW and DIRECT_RO macros correctly
* write and read from member of structure represented by TOID
*/
static void
do_direct_simple(PMEMobjpool *pop)
{
TOID(struct obj) obj;
POBJ_NEW(pop, &obj, struct obj, NULL, NULL);
D_RW(obj)->id = TEST_NUM;
pmemobj_persist(pop, &D_RW(obj)->id, sizeof(D_RW(obj)->id));
UT_ASSERTeq(D_RO(obj)->id, TEST_NUM);
POBJ_FREE(&obj);
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_toid");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_toid_valid(pop);
do_toid_no_valid(pop);
do_direct_simple(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 1,716 | 19.686747 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_check/obj_check.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* obj_check.c -- unit tests for pmemobj_check
*/
#include <stddef.h>
#include "unittest.h"
#include "libpmemobj.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_check");
if (argc < 2 || argc > 5)
UT_FATAL("usage: obj_check <file> [-l <layout>] [-o]");
const char *path = argv[1];
const char *layout = NULL;
PMEMobjpool *pop = NULL;
int open = 0;
for (int i = 2; i < argc; ++i) {
if (strcmp(argv[i], "-o") == 0)
open = 1;
else if (strcmp(argv[i], "-l") == 0) {
layout = argv[i + 1];
i++;
} else
UT_FATAL("Unrecognized argument: %s", argv[i]);
}
if (open) {
pop = pmemobj_open(path, layout);
if (pop == NULL)
UT_OUT("!%s: pmemobj_open", path);
else
UT_OUT("%s: pmemobj_open: Success", path);
}
int ret = pmemobj_check(path, layout);
switch (ret) {
case 1:
UT_OUT("consistent");
break;
case 0:
UT_OUT("not consistent: %s", pmemobj_errormsg());
break;
default:
UT_OUT("error: %s", pmemobj_errormsg());
break;
}
if (pop != NULL)
pmemobj_close(pop);
DONE(NULL);
}
| 1,136 | 17.33871 | 57 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/compat_incompat_features/common.sh
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2017-2019, Intel Corporation
#
#
# compat_incompat_features/common.sh -- common stuff for compat/incompat feature
# flags tests
#
ERR=err${UNITTEST_NUM}.log
ERR_TEMP=err${UNITTEST_NUM}_part.log
LOG=out${UNITTEST_NUM}.log
LOG_TEMP=out${UNITTEST_NUM}_part.log
rm -f $LOG && touch $LOG
rm -f $LOG_TEMP && touch $LOG_TEMP
rm -f $ERR && touch $ERR
rm -f $ERR_TEMP && touch $ERR_TEMP
LAYOUT=OBJ_LAYOUT$SUFFIX
POOLSET=$DIR/pool.set
POOL_TYPES=(obj blk log)
# pmempool create arguments:
declare -A create_args
create_args[obj]="obj $POOLSET"
create_args[blk]="blk 512 $POOLSET"
create_args[log]="log $POOLSET"
# Known compat flags:
# Known incompat flags:
let "POOL_FEAT_SINGLEHDR = 0x0001"
let "POOL_FEAT_CKSUM_2K = 0x0002"
let "POOL_FEAT_SDS = 0x0004"
# Unknown compat flags:
UNKNOWN_COMPAT=(2 4 8 1024)
# Unknown incompat flags:
UNKNOWN_INCOMPAT=(8 15 1111)
# set compat flags in header
set_compat() {
local part=$1
local flag=$2
expect_normal_exit $PMEMSPOIL $part pool_hdr.features.compat=$flag \
"pool_hdr.f:checksum_gen"
}
# set incompat flags in header
set_incompat() {
local part=$1
local flag=$2
expect_normal_exit $PMEMSPOIL $part pool_hdr.features.incompat=$flag \
"pool_hdr.f:checksum_gen"
}
| 1,326 | 22.280702 | 80 |
sh
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/compat_incompat_features/pool_open.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2017-2018, Intel Corporation */
/*
* pool_open.c -- a tool for verifying that an obj/blk/log pool opens correctly
*
* usage: pool_open <path> <obj|blk|log> <layout>
*/
#include "unittest.h"
int
main(int argc, char *argv[])
{
START(argc, argv, "compat_incompat_features");
if (argc < 3)
UT_FATAL("usage: %s <obj|blk|log> <path>", argv[0]);
char *type = argv[1];
char *path = argv[2];
if (strcmp(type, "obj") == 0) {
PMEMobjpool *pop = pmemobj_open(path, "");
if (pop == NULL) {
UT_FATAL("!%s: pmemobj_open failed", path);
} else {
UT_OUT("%s: pmemobj_open succeeded", path);
pmemobj_close(pop);
}
} else if (strcmp(type, "blk") == 0) {
PMEMblkpool *pop = pmemblk_open(path, 0);
if (pop == NULL) {
UT_FATAL("!%s: pmemblk_open failed", path);
} else {
UT_OUT("%s: pmemblk_open succeeded", path);
pmemblk_close(pop);
}
} else if (strcmp(type, "log") == 0) {
PMEMlogpool *pop = pmemlog_open(path);
if (pop == NULL) {
UT_FATAL("!%s: pmemlog_open failed", path);
} else {
UT_OUT("%s: pmemlog_open succeeded", path);
pmemlog_close(pop);
}
} else {
UT_FATAL("usage: %s <obj|blk|log> <path>", argv[0]);
}
DONE(NULL);
}
| 1,237 | 23.27451 | 79 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_poolset/mocks_windows.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2017, Intel Corporation */
/*
* mocks_windows.h -- redefinitions of libc functions used in util_poolset
*
* This file is Windows-specific.
*
* This file should be included (i.e. using Forced Include) by libpmem
* files, when compiled for the purpose of util_poolset test.
* It would replace default implementation with mocked functions defined
* in util_poolset.c.
*
* These defines could be also passed as preprocessor definitions.
*/
#ifndef WRAP_REAL_OPEN
#define os_open __wrap_os_open
#endif
#ifndef WRAP_REAL_FALLOCATE
#define os_posix_fallocate __wrap_os_posix_fallocate
#endif
#ifndef WRAP_REAL_PMEM
#define pmem_is_pmem __wrap_pmem_is_pmem
#endif
| 730 | 25.107143 | 74 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_poolset/util_poolset.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/*
* util_poolset.c -- unit test for util_pool_create() / util_pool_open()
*
* usage: util_poolset cmd minlen hdrsize [mockopts] setfile ...
*/
#include <stdbool.h>
#include "unittest.h"
#include "pmemcommon.h"
#include "set.h"
#include <errno.h>
#include "mocks.h"
#include "fault_injection.h"
#define LOG_PREFIX "ut"
#define LOG_LEVEL_VAR "TEST_LOG_LEVEL"
#define LOG_FILE_VAR "TEST_LOG_FILE"
#define MAJOR_VERSION 1
#define MINOR_VERSION 0
#define SIG "PMEMXXX"
#define MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */
#define TEST_FORMAT_INCOMPAT_DEFAULT POOL_FEAT_CKSUM_2K
#define TEST_FORMAT_INCOMPAT_CHECK POOL_FEAT_INCOMPAT_VALID
static size_t Extend_size = MIN_PART * 2;
const char *Open_path = "";
os_off_t Fallocate_len = -1;
size_t Is_pmem_len = 0;
/*
* poolset_info -- (internal) dumps poolset info and checks its integrity
*
* Performs the following checks:
* - part_size[i] == rounddown(file_size - pool_hdr_size, Mmap_align)
* - replica_size == sum(part_size)
* - pool_size == min(replica_size)
*/
static void
poolset_info(const char *fname, struct pool_set *set, int o)
{
if (o)
UT_OUT("%s: opened: nreps %d poolsize %zu rdonly %d",
fname, set->nreplicas, set->poolsize,
set->rdonly);
else
UT_OUT("%s: created: nreps %d poolsize %zu zeroed %d",
fname, set->nreplicas, set->poolsize,
set->zeroed);
size_t poolsize = SIZE_MAX;
for (unsigned r = 0; r < set->nreplicas; r++) {
struct pool_replica *rep = set->replica[r];
size_t repsize = 0;
UT_OUT(" replica[%d]: nparts %d nhdrs %d repsize %zu "
"is_pmem %d",
r, rep->nparts, rep->nhdrs, rep->repsize, rep->is_pmem);
for (unsigned i = 0; i < rep->nparts; i++) {
struct pool_set_part *part = &rep->part[i];
UT_OUT(" part[%d] path %s filesize %zu size %zu",
i, part->path, part->filesize, part->size);
size_t partsize =
(part->filesize & ~(Ut_mmap_align - 1));
repsize += partsize;
if (i > 0 && (set->options & OPTION_SINGLEHDR) == 0)
UT_ASSERTeq(part->size,
partsize - Ut_mmap_align); /* XXX */
}
repsize -= (rep->nhdrs - 1) * Ut_mmap_align;
UT_ASSERTeq(rep->repsize, repsize);
UT_ASSERT(rep->resvsize >= repsize);
if (rep->repsize < poolsize)
poolsize = rep->repsize;
}
UT_ASSERTeq(set->poolsize, poolsize);
}
/*
* mock_options -- (internal) parse mock options and enable mocked functions
*/
static int
mock_options(const char *arg)
{
/* reset to defaults */
Open_path = "";
Fallocate_len = -1;
Is_pmem_len = 0;
if (arg[0] != '-' || arg[1] != 'm')
return 0;
switch (arg[2]) {
case 'n':
/* do nothing */
break;
case 'o':
/* open */
Open_path = &arg[4];
break;
case 'f':
/* fallocate */
Fallocate_len = ATOLL(&arg[4]);
break;
case 'p':
/* is_pmem */
Is_pmem_len = ATOULL(&arg[4]);
break;
default:
UT_FATAL("unknown mock option: %c", arg[2]);
}
return 1;
}
int
main(int argc, char *argv[])
{
START(argc, argv, "util_poolset");
common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR,
MAJOR_VERSION, MINOR_VERSION);
if (argc < 3)
UT_FATAL("usage: %s cmd minsize [mockopts] "
"setfile ...", argv[0]);
char *fname;
struct pool_set *set;
int ret;
size_t minsize = strtoul(argv[2], &fname, 0);
for (int arg = 3; arg < argc; arg++) {
arg += mock_options(argv[arg]);
fname = argv[arg];
struct pool_attr attr;
memset(&attr, 0, sizeof(attr));
memcpy(attr.signature, SIG, sizeof(SIG));
attr.major = 1;
switch (argv[1][0]) {
case 'c':
attr.features.incompat = TEST_FORMAT_INCOMPAT_DEFAULT;
ret = util_pool_create(&set, fname, 0, minsize,
MIN_PART, &attr, NULL, REPLICAS_ENABLED);
if (ret == -1)
UT_OUT("!%s: util_pool_create", fname);
else {
/*
* XXX: On Windows pool files are created with
* R/W permissions, so no need for chmod().
*/
#ifndef _WIN32
util_poolset_chmod(set, S_IWUSR | S_IRUSR);
#endif
poolset_info(fname, set, 0);
util_poolset_close(set, DO_NOT_DELETE_PARTS);
}
break;
case 'o':
attr.features.incompat = TEST_FORMAT_INCOMPAT_CHECK;
ret = util_pool_open(&set, fname, MIN_PART, &attr,
NULL, NULL, 0 /* flags */);
if (ret == -1)
UT_OUT("!%s: util_pool_open", fname);
else {
poolset_info(fname, set, 1);
util_poolset_close(set, DO_NOT_DELETE_PARTS);
}
break;
case 'e':
attr.features.incompat = TEST_FORMAT_INCOMPAT_CHECK;
ret = util_pool_open(&set, fname, MIN_PART, &attr,
NULL, NULL, 0 /* flags */);
UT_ASSERTeq(ret, 0);
size_t esize = Extend_size;
void *nptr = util_pool_extend(set, &esize, MIN_PART);
if (nptr == NULL)
UT_OUT("!%s: util_pool_extend", fname);
else {
poolset_info(fname, set, 1);
}
util_poolset_close(set, DO_NOT_DELETE_PARTS);
break;
case 'f':
if (!core_fault_injection_enabled())
break;
attr.features.incompat = TEST_FORMAT_INCOMPAT_CHECK;
ret = util_pool_open(&set, fname, MIN_PART, &attr,
NULL, NULL, 0 /* flags */);
UT_ASSERTeq(ret, 0);
size_t fsize = Extend_size;
core_inject_fault_at(PMEM_MALLOC, 2,
"util_poolset_append_new_part");
void *fnptr = util_pool_extend(set, &fsize, MIN_PART);
UT_ASSERTeq(fnptr, NULL);
UT_ASSERTeq(errno, ENOMEM);
util_poolset_close(set, DO_NOT_DELETE_PARTS);
break;
}
}
common_fini();
DONE(NULL);
}
| 5,390 | 23.843318 | 76 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_poolset/mocks.h
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2017, Intel Corporation */
#ifndef MOCKS_H
#define MOCKS_H
extern const char *Open_path;
extern os_off_t Fallocate_len;
extern size_t Is_pmem_len;
#endif
| 216 | 17.083333 | 44 |
h
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_poolset/mocks_windows.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2016-2020, Intel Corporation */
/*
* mocks_windows.c -- mocked functions used in util_poolset.c
*/
#include "pmem.h"
#include "util.h"
#include "unittest.h"
extern const char *Open_path;
extern os_off_t Fallocate_len;
extern size_t Is_pmem_len;
/*
* os_open -- open mock
*
* due to differences in function mocking on linux we are wrapping os_open
* but on linux we just wrap open syscall
*/
FUNC_MOCK(os_open, int, const char *path, int flags, ...)
FUNC_MOCK_RUN_DEFAULT {
if (strcmp(Open_path, path) == 0) {
UT_OUT("mocked open: %s", path);
errno = EACCES;
return -1;
}
va_list ap;
va_start(ap, flags);
int mode = va_arg(ap, int);
va_end(ap);
return _FUNC_REAL(os_open)(path, flags, mode);
}
FUNC_MOCK_END
/*
* posix_fallocate -- posix_fallocate mock
*/
FUNC_MOCK(os_posix_fallocate, int, int fd, os_off_t offset, os_off_t len)
FUNC_MOCK_RUN_DEFAULT {
if (Fallocate_len == len) {
UT_OUT("mocked fallocate: %ju", len);
return ENOSPC;
}
return _FUNC_REAL(os_posix_fallocate)(fd, offset, len);
}
FUNC_MOCK_END
/*
* pmem_is_pmem -- pmem_is_pmem mock
*/
FUNC_MOCK(pmem_is_pmem, int, const void *addr, size_t len)
FUNC_MOCK_RUN_DEFAULT {
if (Is_pmem_len == len) {
UT_OUT("mocked pmem_is_pmem: %zu", len);
return 1;
}
return _FUNC_REAL(pmem_is_pmem)(addr, len);
}
FUNC_MOCK_END
/*
* On Windows libpmem is statically linked to util_poolset test, but we
* don't want its ctor to initialize 'out' module.
*/
/*
* libpmem_init -- load-time initialization for libpmem
*
* Called automatically by the run-time loader.
*/
CONSTRUCTOR(libpmem_init)
void
libpmem_init(void)
{
pmem_init();
}
| 1,686 | 19.82716 | 74 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_poolset/mocks_posix.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2018, Intel Corporation */
/*
* mocks_posix.c -- mocked functions used in util_poolset.c (Posix version)
*/
#include "unittest.h"
extern const char *Open_path;
extern os_off_t Fallocate_len;
extern size_t Is_pmem_len;
/*
* open -- open mock
*/
FUNC_MOCK(open, int, const char *path, int flags, ...)
FUNC_MOCK_RUN_DEFAULT {
if (strcmp(Open_path, path) == 0) {
UT_OUT("mocked open: %s", path);
errno = EACCES;
return -1;
}
va_list ap;
va_start(ap, flags);
int mode = va_arg(ap, int);
va_end(ap);
return _FUNC_REAL(open)(path, flags, mode);
}
FUNC_MOCK_END
/*
* posix_fallocate -- posix_fallocate mock
*/
FUNC_MOCK(posix_fallocate, int, int fd, os_off_t offset, off_t len)
FUNC_MOCK_RUN_DEFAULT {
if (Fallocate_len == len) {
UT_OUT("mocked fallocate: %ju", len);
return ENOSPC;
}
return _FUNC_REAL(posix_fallocate)(fd, offset, len);
}
FUNC_MOCK_END
/*
* pmem_is_pmem -- pmem_is_pmem mock
*/
FUNC_MOCK(pmem_is_pmem, int, const void *addr, size_t len)
FUNC_MOCK_RUN_DEFAULT {
if (Is_pmem_len == len) {
UT_OUT("mocked pmem_is_pmem: %zu", len);
return 1;
}
return _FUNC_REAL(pmem_is_pmem)(addr, len);
}
FUNC_MOCK_END
| 1,212 | 19.559322 | 75 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_user_data/obj_tx_user_data.c
|
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2019-2020, Intel Corporation */
/*
* obj_tx_user_data.c -- unit test for pmemobj_tx_(get/set)_user_data
*/
#include "unittest.h"
#define LAYOUT_NAME "tx_user_data"
#define USER_DATA_V1 (void *) 123456789ULL
#define USER_DATA_V2 (void *) 987654321ULL
/*
* do_tx_set_get_user_data_nested -- do set and verify user data in a tx
*/
static void
do_tx_set_get_user_data_nested(PMEMobjpool *pop)
{
TX_BEGIN(pop) {
pmemobj_tx_set_user_data(USER_DATA_V1);
UT_ASSERTeq(USER_DATA_V1, pmemobj_tx_get_user_data());
TX_BEGIN(pop) {
UT_ASSERTeq(USER_DATA_V1, pmemobj_tx_get_user_data());
pmemobj_tx_set_user_data(USER_DATA_V2);
UT_ASSERTeq(USER_DATA_V2, pmemobj_tx_get_user_data());
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
} TX_ONCOMMIT {
UT_ASSERTeq(USER_DATA_V2, pmemobj_tx_get_user_data());
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
TX_BEGIN(pop) {
UT_ASSERTeq(NULL, pmemobj_tx_get_user_data());
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
/*
* do_tx_set_get_user_data_abort -- do set and verify user data in a tx after
* tx abort
*/
static void
do_tx_set_get_user_data_abort(PMEMobjpool *pop)
{
TX_BEGIN(pop) {
pmemobj_tx_set_user_data(USER_DATA_V1);
UT_ASSERTeq(USER_DATA_V1, pmemobj_tx_get_user_data());
pmemobj_tx_abort(-1);
} TX_ONCOMMIT {
UT_ASSERT(0);
} TX_ONABORT {
UT_ASSERTeq(USER_DATA_V1, pmemobj_tx_get_user_data());
} TX_END
TX_BEGIN(pop) {
UT_ASSERTeq(NULL, pmemobj_tx_get_user_data());
} TX_ONABORT {
UT_ASSERT(0);
} TX_END
}
int
main(int argc, char *argv[])
{
START(argc, argv, "obj_tx_user_data");
if (argc != 2)
UT_FATAL("usage: %s [file]", argv[0]);
PMEMobjpool *pop;
if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL,
S_IWUSR | S_IRUSR)) == NULL)
UT_FATAL("!pmemobj_create");
do_tx_set_get_user_data_nested(pop);
do_tx_set_get_user_data_abort(pop);
pmemobj_close(pop);
DONE(NULL);
}
| 1,948 | 20.655556 | 77 |
c
|
null |
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_user_data/TESTS.py
|
#!../env.py
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2019-2020, Intel Corporation
#
import testframework as t
class TEST0(t.BaseTest):
test_type = t.Short
def run(self, ctx):
testfile = t.path.join(ctx.testdir, 'testfile')
ctx.exec('obj_tx_user_data', testfile)
| 302 | 17.9375 | 55 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.