1
0
Fork 0
mirror of https://git.rwth-aachen.de/acs/public/villas/node/ synced 2025-03-09 00:00:00 +01:00

Merge branch 'disable-hugepages' into 'master'

Disable hugepages

See merge request acs/public/villas/VILLASnode!55
This commit is contained in:
Steffen Vogel 2019-10-26 14:15:06 +02:00
commit a56d875bf0
32 changed files with 119 additions and 128 deletions

View file

@ -6,8 +6,8 @@ variables:
CRITERION_OPTS: --ignore-warnings
DOCKER_FILE: packaging/docker/Dockerfile.dev
DOCKER_TAG: ${CI_COMMIT_REF_NAME}
DOCKER_IMAGE: villas/node
DOCKER_IMAGE_DEV: villas/node-dev
DOCKER_IMAGE: registry.git.rwth-aachen.de/acs/public/villas/villasnode/node
DOCKER_IMAGE_DEV: registry.git.rwth-aachen.de/acs/public/villas/villasnode/node-dev
MAKE_OPTS: -j32
RELEASEVER: 29
@ -170,6 +170,8 @@ test:integration:
packaging:docker:
stage: packaging
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
script:
- docker build
--build-arg BUILDER_IMAGE=${DOCKER_IMAGE_DEV}:${DOCKER_TAG}

View file

@ -73,9 +73,9 @@ int memory_lock(size_t lock);
* @retval nullptr If allocation failed.
* @retval <>0 If allocation was successful.
*/
void * memory_alloc(struct memory_type *m, size_t len);
void * memory_alloc(size_t len, struct memory_type *m = memory_default);
void * memory_alloc_aligned(struct memory_type *m, size_t len, size_t alignment);
void * memory_alloc_aligned(size_t len, size_t alignment, struct memory_type *m = memory_default);
int memory_free(void *ptr);

View file

@ -26,8 +26,8 @@
#include <villas/node.h>
struct memory_ib {
struct ibv_pd *pd;
struct memory_type *parent;
struct ibv_pd *pd;
struct memory_type *parent;
};
struct ibv_mr * memory_ib_get_mr(void *ptr);

View file

@ -30,8 +30,8 @@
struct memory_type;
struct node;
typedef struct memory_allocation * (*memory_allocator_t)(struct memory_type *mem, size_t len, size_t alignment);
typedef int (*memory_deallocator_t)(struct memory_type *mem, struct memory_allocation * ma);
typedef struct memory_allocation * (*memory_allocator_t)(size_t len, size_t alignment, struct memory_type *mem);
typedef int (*memory_deallocator_t)(struct memory_allocation * ma, struct memory_type *mem);
enum class MemoryFlags {
MMAP = (1 << 0),
@ -53,12 +53,11 @@ struct memory_type {
};
extern struct memory_type memory_heap;
extern struct memory_type memory_hugepage;
extern struct memory_type memory_mmap;
extern struct memory_type memory_mmap_hugetlb;
extern struct memory_type *memory_default;
struct memory_type * memory_ib(struct node *n, struct memory_type *parent);
struct memory_type * memory_managed(void *ptr, size_t len);
int memory_hugepage_init(int hugepages);
struct memory_type * memory_type_lookup(enum MemoryFlags flags);

View file

@ -193,7 +193,7 @@ struct node_type * node_type(struct node *n)
return n->_vt;
}
struct memory_type * node_memory_type(struct node *n, struct memory_type *parent);
struct memory_type * node_memory_type(struct node *n);
bool node_is_valid_name(const char *name);

View file

@ -57,7 +57,7 @@ struct pool {
* @retval 0 The pool has been successfully initialized.
* @retval <>0 There was an error during the pool initialization.
*/
int pool_init(struct pool *p, size_t cnt, size_t blocksz, struct memory_type *mem);
int pool_init(struct pool *p, size_t cnt, size_t blocksz, struct memory_type *mem = memory_default);
/** Destroy and release memory used by pool. */
int pool_destroy(struct pool *p);

View file

@ -41,9 +41,7 @@
#include <villas/common.h>
#include <villas/config.h>
/* Forward declarations */
struct memory_type;
#include <villas/memory_type.h>
typedef char cacheline_pad_t[CACHELINE_SIZE];
@ -73,7 +71,7 @@ struct queue {
};
/** Initialize MPMC queue */
int queue_init(struct queue *q, size_t size, struct memory_type *mem);
int queue_init(struct queue *q, size_t size, struct memory_type *mem = memory_default);
/** Desroy MPMC queue and release memory */
int queue_destroy(struct queue *q);

View file

@ -64,7 +64,7 @@ struct queue_signalled {
#define queue_signalled_available(q) queue_available(&((q)->queue))
int queue_signalled_init(struct queue_signalled *qs, size_t size, struct memory_type *mem, enum QueueSignalledMode mode = QueueSignalledMode::AUTO, int flags = 0);
int queue_signalled_init(struct queue_signalled *qs, size_t size, struct memory_type *mem = memory_default, enum QueueSignalledMode mode = QueueSignalledMode::AUTO, int flags = 0);
int queue_signalled_destroy(struct queue_signalled *qs);

View file

@ -45,8 +45,8 @@ set(LIB_SRC
mapping.cpp
memory.cpp
memory/heap.cpp
memory/hugepage.cpp
memory/managed.cpp
memory/mmap.cpp
node_direction.cpp
node_type.cpp
node.cpp

View file

@ -44,9 +44,18 @@ int memory_init(int hugepages)
info("Initialize memory sub-system: #hugepages=%d", hugepages);
ret = memory_hugepage_init(hugepages);
if (ret)
return ret;
if (hugepages > 0) {
ret = memory_hugepage_init(hugepages);
if (ret)
return ret;
memory_default = &memory_mmap_hugetlb;
}
else {
memory_default = &memory_mmap;
warning("Hugepage allocator disabled.");
}
size_t lock = kernel_get_hugepage_size() * hugepages;
@ -105,16 +114,16 @@ int memory_lock(size_t lock)
return 0;
}
void * memory_alloc(struct memory_type *m, size_t len)
void * memory_alloc(size_t len, struct memory_type *m)
{
return memory_alloc_aligned(m, len, sizeof(void *));
return memory_alloc_aligned(len, sizeof(void *), m);
}
void * memory_alloc_aligned(struct memory_type *m, size_t len, size_t alignment)
void * memory_alloc_aligned(size_t len, size_t alignment, struct memory_type *m)
{
struct memory_allocation *ma = m->alloc(m, len, alignment);
struct memory_allocation *ma = m->alloc(len, alignment, m);
if (ma == nullptr) {
warning("Memory allocation of type %s failed. reason=%s", m->name, strerror(errno) );
warning("Memory allocation of type %s failed. reason=%s", m->name, strerror(errno));
return nullptr;
}
@ -136,16 +145,16 @@ int memory_free(void *ptr)
debug(LOG_MEM | 5, "Releasing %#zx bytes of %s memory: %p", ma->length, ma->type->name, ma->address);
ret = ma->type->free(ma->type, ma);
ret = ma->type->free(ma, ma->type);
if (ret)
return ret;
/* Remove allocation entry */
auto iter = allocations.find(ptr);
if (iter == allocations.end())
if (iter == allocations.end())
return -1;
allocations.erase(iter);
allocations.erase(iter);
free(ma);
return 0;
@ -156,12 +165,4 @@ struct memory_allocation * memory_get_allocation(void *ptr)
return allocations[ptr];
}
struct memory_type * memory_type_lookup(enum MemoryFlags flags)
{
if ((int) flags & (int) MemoryFlags::HUGEPAGE)
return &memory_hugepage;
else if ((int) flags & (int) MemoryFlags::HEAP)
return &memory_heap;
else
return nullptr;
}
struct memory_type *memory_default = nullptr;

View file

@ -27,7 +27,7 @@
using namespace villas::utils;
static struct memory_allocation * memory_heap_alloc(struct memory_type *m, size_t len, size_t alignment)
static struct memory_allocation * memory_heap_alloc(size_t len, size_t alignment, struct memory_type *m)
{
int ret;
@ -51,7 +51,7 @@ static struct memory_allocation * memory_heap_alloc(struct memory_type *m, size_
return ma;
}
static int memory_heap_free(struct memory_type *m, struct memory_allocation *ma)
static int memory_heap_free(struct memory_allocation *ma, struct memory_type *m)
{
free(ma->address);

View file

@ -40,7 +40,7 @@ struct ibv_mr * memory_ib_get_mr(void *ptr)
return mr;
}
static struct memory_allocation * memory_ib_alloc(struct memory_type *m, size_t len, size_t alignment)
static struct memory_allocation * memory_ib_alloc(size_t len, size_t alignment, struct memory_type *m)
{
struct memory_ib *mi = (struct memory_ib *) m->_vd;
@ -52,7 +52,7 @@ static struct memory_allocation * memory_ib_alloc(struct memory_type *m, size_t
ma->length = len;
ma->alignment = alignment;
ma->parent = mi->parent->alloc(mi->parent, len + sizeof(struct ibv_mr *), alignment);
ma->parent = mi->parent->alloc(len + sizeof(struct ibv_mr *), alignment, mi->parent);
ma->address = ma->parent->address;
if (!mi->pd)
@ -60,7 +60,7 @@ static struct memory_allocation * memory_ib_alloc(struct memory_type *m, size_t
ma->ib.mr = ibv_reg_mr(mi->pd, ma->address, ma->length, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE);
if (!ma->ib.mr) {
mi->parent->free(mi->parent, ma->parent);
mi->parent->free(ma->parent, mi->parent);
free(ma);
return nullptr;
}
@ -68,14 +68,14 @@ static struct memory_allocation * memory_ib_alloc(struct memory_type *m, size_t
return ma;
}
static int memory_ib_free(struct memory_type *m, struct memory_allocation *ma)
static int memory_ib_free(struct memory_allocation *ma, struct memory_type *m)
{
int ret;
struct memory_ib *mi = (struct memory_ib *) m->_vd;
ibv_dereg_mr(ma->ib.mr);
ret = mi->parent->free(mi->parent, ma->parent);
ret = mi->parent->free(ma->parent, mi->parent);
if (ret)
return ret;

View file

@ -36,7 +36,7 @@
using namespace villas::utils;
static struct memory_allocation * memory_managed_alloc(struct memory_type *m, size_t len, size_t alignment)
static struct memory_allocation * memory_managed_alloc(size_t len, size_t alignment, struct memory_type *m)
{
/* Simple first-fit allocation */
struct memory_block *first = (struct memory_block *) m->_vd;
@ -124,7 +124,7 @@ static struct memory_allocation * memory_managed_alloc(struct memory_type *m, si
return nullptr;
}
static int memory_managed_free(struct memory_type *m, struct memory_allocation *ma)
static int memory_managed_free(struct memory_allocation *ma, struct memory_type *m)
{
struct memory_block *block = ma->managed.block;

View file

@ -1,4 +1,4 @@
/** Hugepage memory allocator.
/** mmap memory allocator.
*
* @author Steffen Vogel <stvogel@eonerc.rwth-aachen.de>
* @copyright 2014-2019, Institute for Automation of Complex Power Systems, EONERC
@ -74,11 +74,9 @@ int memory_hugepage_init(int hugepages)
return 0;
}
/** Allocate memory backed by hugepages with malloc() like interface */
static struct memory_allocation * memory_hugepage_alloc(struct memory_type *m, size_t len, size_t alignment)
/** Allocate memory backed by mmaps with malloc() like interface */
static struct memory_allocation * memory_mmap_alloc(size_t len, size_t alignment, struct memory_type *m)
{
static bool use_huge = true;
int flags, fd;
size_t sz;
@ -86,7 +84,7 @@ static struct memory_allocation * memory_hugepage_alloc(struct memory_type *m, s
if (!ma)
return nullptr;
retry: if (use_huge) {
if (m->flags & (int) MemoryFlags::HUGEPAGE) {
#ifdef __linux__
flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB;
#else
@ -99,15 +97,19 @@ retry: if (use_huge) {
fd = -1;
#endif
sz = hugepgsz;
info("allocate %#zx bytes mmap_hugetlb memory", len);
}
else {
flags = MAP_PRIVATE | MAP_ANONYMOUS;
fd = -1;
sz = pgsz;
info("allocate %#zx bytes mmap memory", len);
}
/** We must make sure that len is a multiple of the (huge)page size
/** We must make sure that len is a multiple of the page size
*
* See: https://lkml.org/lkml/2014/10/22/925
*/
@ -117,21 +119,14 @@ retry: if (use_huge) {
ma->address = mmap(nullptr, ma->length, PROT_READ | PROT_WRITE, flags, fd, 0);
if (ma->address == MAP_FAILED) {
if (use_huge) {
warning("Failed to map hugepages, try with normal pages instead!");
use_huge = false;
goto retry;
}
else {
free(ma);
return nullptr;
}
free(ma);
return nullptr;
}
return ma;
}
static int memory_hugepage_free(struct memory_type *m, struct memory_allocation *ma)
static int memory_mmap_free(struct memory_allocation *ma, struct memory_type *m)
{
int ret;
@ -142,10 +137,18 @@ static int memory_hugepage_free(struct memory_type *m, struct memory_allocation
return 0;
}
struct memory_type memory_hugepage = {
.name = "mmap_hugepages",
struct memory_type memory_mmap = {
.name = "mmap",
.flags = (int) MemoryFlags::MMAP,
.alignment = 12, /* 4k page */
.alloc = memory_mmap_alloc,
.free = memory_mmap_free
};
struct memory_type memory_mmap_hugetlb = {
.name = "mmap",
.flags = (int) MemoryFlags::MMAP | (int) MemoryFlags::HUGEPAGE,
.alignment = 21, /* 2 MiB hugepage */
.alloc = memory_hugepage_alloc,
.free = memory_hugepage_free
.alloc = memory_mmap_alloc,
.free = memory_mmap_free
};

View file

@ -542,9 +542,9 @@ int node_netem_fds(struct node *n, int fds[])
return node_type(n)->netem_fds ? node_type(n)->netem_fds(n, fds) : -1;
}
struct memory_type * node_memory_type(struct node *n, struct memory_type *parent)
struct memory_type * node_memory_type(struct node *n)
{
return node_type(n)->memory_type ? node_type(n)->memory_type(n, parent) : &memory_hugepage;
return node_type(n)->memory_type ? node_type(n)->memory_type(n, memory_default) : memory_default;
}
int node_list_parse(struct vlist *list, json_t *cfg, struct vlist *all)

View file

@ -328,11 +328,11 @@ int iec61850_sv_start(struct node *n)
SVReceiver_addSubscriber(i->in.receiver, i->in.subscriber);
/* Initialize pool and queue to pass samples between threads */
ret = pool_init(&i->in.pool, 1024, SAMPLE_LENGTH(vlist_length(&n->in.signals)), &memory_hugepage);
ret = pool_init(&i->in.pool, 1024, SAMPLE_LENGTH(vlist_length(&n->in.signals)));
if (ret)
return ret;
ret = queue_signalled_init(&i->in.queue, 1024, &memory_hugepage);
ret = queue_signalled_init(&i->in.queue, 1024);
if (ret)
return ret;

View file

@ -81,11 +81,11 @@ int loopback_start(struct node *n)
vlist_length(&n->out.signals)
);
ret = pool_init(&l->pool, l->queuelen, SAMPLE_LENGTH(len), &memory_hugepage);
ret = pool_init(&l->pool, l->queuelen, SAMPLE_LENGTH(len));
if (ret)
return ret;
return queue_signalled_init(&l->queue, l->queuelen, &memory_hugepage, l->mode);
return queue_signalled_init(&l->queue, l->queuelen, memory_default, l->mode);
}
int loopback_stop(struct node *n)

View file

@ -361,11 +361,11 @@ int mqtt_start(struct node *n)
if (ret)
return ret;
ret = pool_init(&m->pool, 1024, SAMPLE_LENGTH(vlist_length(&n->in.signals)), &memory_hugepage);
ret = pool_init(&m->pool, 1024, SAMPLE_LENGTH(vlist_length(&n->in.signals)));
if (ret)
return ret;
ret = queue_signalled_init(&m->queue, 1024, &memory_hugepage);
ret = queue_signalled_init(&m->queue, 1024);
if (ret)
return ret;

View file

@ -86,7 +86,7 @@ static int websocket_connection_init(struct websocket_connection *c)
c->_name = nullptr;
ret = queue_init(&c->queue, DEFAULT_QUEUE_LENGTH, &memory_hugepage);
ret = queue_init(&c->queue, DEFAULT_QUEUE_LENGTH);
if (ret)
return ret;
@ -391,11 +391,11 @@ int websocket_start(struct node *n)
int ret;
struct websocket *w = (struct websocket *) n->_vd;
ret = pool_init(&w->pool, DEFAULT_WEBSOCKET_QUEUE_LENGTH, SAMPLE_LENGTH(DEFAULT_WEBSOCKET_SAMPLE_LENGTH), &memory_hugepage);
ret = pool_init(&w->pool, DEFAULT_WEBSOCKET_QUEUE_LENGTH, SAMPLE_LENGTH(DEFAULT_WEBSOCKET_SAMPLE_LENGTH));
if (ret)
return ret;
ret = queue_signalled_init(&w->queue, DEFAULT_WEBSOCKET_QUEUE_LENGTH, &memory_hugepage);
ret = queue_signalled_init(&w->queue, DEFAULT_WEBSOCKET_QUEUE_LENGTH);
if (ret)
return ret;

View file

@ -226,7 +226,7 @@ int path_prepare(struct path *p)
assert(p->state == State::CHECKED);
/* Initialize destinations */
struct memory_type *pool_mt = &memory_hugepage;
struct memory_type *pool_mt = memory_default;
unsigned pool_size = MAX(1UL, vlist_length(&p->destinations)) * p->queuelen;
for (size_t i = 0; i < vlist_length(&p->destinations); i++) {
@ -236,7 +236,7 @@ int path_prepare(struct path *p)
pool_size = node_type(pd->node)->pool_size;
if (node_type(pd->node)->memory_type)
pool_mt = node_memory_type(pd->node, &memory_hugepage);
pool_mt = node_memory_type(pd->node);
ret = path_destination_init(pd, p->queuelen);
if (ret)

View file

@ -31,7 +31,7 @@ int path_destination_init(struct path_destination *pd, int queuelen)
{
int ret;
ret = queue_init(&pd->queue, queuelen, &memory_hugepage);
ret = queue_init(&pd->queue, queuelen);
if (ret)
return ret;

View file

@ -37,7 +37,7 @@ int path_source_init(struct path_source *ps)
if (ps->node->_vt->pool_size)
pool_size = ps->node->_vt->pool_size;
ret = pool_init(&ps->pool, pool_size, SAMPLE_LENGTH(vlist_length(&ps->node->in.signals)), node_memory_type(ps->node, &memory_hugepage));
ret = pool_init(&ps->pool, pool_size, SAMPLE_LENGTH(vlist_length(&ps->node->in.signals)), node_memory_type(ps->node));
if (ret)
return ret;

View file

@ -36,7 +36,7 @@ int pool_init(struct pool *p, size_t cnt, size_t blocksz, struct memory_type *m)
p->blocksz = p->alignment * CEIL(blocksz, p->alignment);
p->len = cnt * p->blocksz;
void *buffer = memory_alloc_aligned(m, p->len, p->alignment);
void *buffer = memory_alloc_aligned(p->len, p->alignment, m);
if (!buffer)
serror("Failed to allocate memory for memory pool");
else

View file

@ -48,7 +48,7 @@ int queue_init(struct queue *q, size_t size, struct memory_type *m)
}
q->buffer_mask = size - 1;
struct queue_cell *buffer = (struct queue_cell *) memory_alloc(m, sizeof(struct queue_cell) * size);
struct queue_cell *buffer = (struct queue_cell *) memory_alloc(sizeof(struct queue_cell) * size, m);
if (!buffer)
return -2;

View file

@ -94,7 +94,7 @@ retry: fd = shm_open(wname, O_RDWR|O_CREAT|O_EXCL, 0600);
close(fd);
manager = memory_managed(base, len);
shared = (struct shmem_shared *) memory_alloc(manager, sizeof(struct shmem_shared));
shared = (struct shmem_shared *) memory_alloc(sizeof(struct shmem_shared), manager);
if (!shared) {
errno = ENOMEM;
return -5;

View file

@ -201,7 +201,7 @@ check: if (optarg == endptr)
smps = new struct sample*[cnt];
ret = pool_init(&p, 10 * cnt, SAMPLE_LENGTH(DEFAULT_SAMPLE_LENGTH), &memory_hugepage);
ret = pool_init(&p, 10 * cnt, SAMPLE_LENGTH(DEFAULT_SAMPLE_LENGTH));
if (ret)
throw RuntimeError("Failed to initilize memory pool");

View file

@ -64,13 +64,7 @@ class Node : public Tool {
public:
Node(int argc, char *argv[]) :
Tool(argc, argv, "node")
{
int ret;
ret = memory_init(DEFAULT_NR_HUGEPAGES);
if (ret)
throw RuntimeError("Failed to initialize memory");
}
{ }
protected:
SuperNode sn;

View file

@ -81,7 +81,7 @@ public:
unsigned vec = LOG2_CEIL(MAX(node->out.vectorize, node->in.vectorize));
unsigned pool_size = node_type(node)->pool_size ? node_type(node)->pool_size : vec;
int ret = pool_init(&pool, pool_size, SAMPLE_LENGTH(DEFAULT_SAMPLE_LENGTH), node_memory_type(node, &memory_hugepage));
int ret = pool_init(&pool, pool_size, SAMPLE_LENGTH(DEFAULT_SAMPLE_LENGTH), node_memory_type(node));
if (ret < 0)
throw RuntimeError("Failed to allocate memory for pool.");
}

View file

@ -227,7 +227,7 @@ ParameterizedTest(struct param *p, io, lowlevel, .init = init_memory)
struct sample *smps[p->cnt];
struct sample *smpt[p->cnt];
ret = pool_init(&pool, 2 * p->cnt, SAMPLE_LENGTH(NUM_VALUES), &memory_hugepage);
ret = pool_init(&pool, 2 * p->cnt, SAMPLE_LENGTH(NUM_VALUES));
cr_assert_eq(ret, 0);
vlist_init(&signals);
@ -297,7 +297,7 @@ ParameterizedTest(struct param *p, io, highlevel, .init = init_memory)
struct sample *smps[p->cnt];
struct sample *smpt[p->cnt];
ret = pool_init(&pool, 2 * p->cnt, SAMPLE_LENGTH(NUM_VALUES), &memory_hugepage);
ret = pool_init(&pool, 2 * p->cnt, SAMPLE_LENGTH(NUM_VALUES));
cr_assert_eq(ret, 0);
ret = sample_alloc_many(&pool, smps, p->cnt);

View file

@ -37,22 +37,20 @@ extern void init_memory();
TheoryDataPoints(memory, aligned) = {
DataPoints(size_t, 1, 32, 55, 1 << 10, PAGESIZE, HUGEPAGESIZE),
DataPoints(size_t, 1, 8, PAGESIZE, PAGESIZE),
DataPoints(enum MemoryFlags, MemoryFlags::HEAP, MemoryFlags::HUGEPAGE, MemoryFlags::HUGEPAGE)
DataPoints(struct memory_type *, &memory_heap, &memory_mmap_hugetlb, &memory_mmap_hugetlb)
};
Theory((size_t len, size_t align, enum MemoryFlags memory_type), memory, aligned, .init = init_memory) {
Theory((size_t len, size_t align, struct memory_type *mt), memory, aligned, .init = init_memory) {
int ret;
void *ptr;
struct memory_type *mt = memory_type_lookup(memory_type);
ptr = memory_alloc_aligned(mt, len, align);
ptr = memory_alloc_aligned(len, align, mt);
cr_assert_not_null(ptr, "Failed to allocate memory");
cr_assert(IS_ALIGNED(ptr, align), "Memory at %p is not alligned to %#zx byte bounary", ptr, align);
#ifndef __APPLE__
if (mt == &memory_hugepage) {
if (mt == &memory_mmap_hugetlb) {
cr_assert(IS_ALIGNED(ptr, HUGEPAGESIZE), "Memory at %p is not alligned to %#x byte bounary", ptr, HUGEPAGESIZE);
}
#endif
@ -72,26 +70,26 @@ Test(memory, manager, .init = init_memory) {
total_size = 1 << 10;
max_block = total_size - sizeof(struct memory_type) - sizeof(struct memory_block);
p = memory_alloc(&memory_heap, total_size);
p = memory_alloc(total_size, &memory_heap);
cr_assert_not_null(p);
m = memory_managed(p, total_size);
cr_assert_not_null(m);
p1 = memory_alloc(m, 16);
p1 = memory_alloc(16, m);
cr_assert_not_null(p1);
p2 = memory_alloc(m, 32);
p2 = memory_alloc(32, m);
cr_assert_not_null(p2);
ret = memory_free(p1);
cr_assert(ret == 0);
p1 = memory_alloc_aligned(m, 128, 128);
p1 = memory_alloc_aligned(128, 128, m);
cr_assert_not_null(p1);
cr_assert(IS_ALIGNED(p1, 128));
p3 = memory_alloc_aligned(m, 128, 256);
p3 = memory_alloc_aligned(128, 256, m);
cr_assert(p3);
cr_assert(IS_ALIGNED(p3, 256));
@ -104,7 +102,7 @@ Test(memory, manager, .init = init_memory) {
ret = memory_free(p3);
cr_assert(ret == 0);
p1 = memory_alloc(m, max_block);
p1 = memory_alloc(max_block, m);
cr_assert_not_null(p1);
ret = memory_free(p1);

View file

@ -34,16 +34,16 @@ struct param {
int thread_count;
int pool_size;
size_t block_size;
enum MemoryFlags memory_type;
struct memory_type *mt;
};
ParameterizedTestParameters(pool, basic)
{
static struct param params[] = {
{ 1, 4096, 150, MemoryFlags::HEAP },
{ 1, 128, 8, MemoryFlags::HUGEPAGE },
{ 1, 4, 8192, MemoryFlags::HUGEPAGE },
{ 1, 1 << 13, 4, MemoryFlags::HUGEPAGE }
{ 1, 4096, 150, &memory_heap },
{ 1, 128, 8, &memory_mmap_hugetlb },
{ 1, 4, 8192, &memory_mmap_hugetlb },
{ 1, 1 << 13, 4, &memory_mmap_hugetlb }
};
return cr_make_param_array(struct param, params, ARRAY_LEN(params));
@ -56,9 +56,7 @@ ParameterizedTest(struct param *p, pool, basic, .init = init_memory)
void *ptr, *ptrs[p->pool_size];
struct memory_type *mt = memory_type_lookup(p->memory_type);
ret = pool_init(&pool, p->pool_size, p->block_size, mt);
ret = pool_init(&pool, p->pool_size, p->block_size, p->mt);
cr_assert_eq(ret, 0, "Failed to create pool");
ptr = pool_get(&pool);

View file

@ -54,7 +54,7 @@ struct param {
int thread_count;
bool many;
int batch_size;
enum MemoryFlags memory_type;
struct memory_type *mt;
volatile int start;
struct queue queue;
};
@ -274,35 +274,35 @@ ParameterizedTestParameters(queue, multi_threaded)
.thread_count = 32,
.many = true,
.batch_size = 10,
.memory_type = MemoryFlags::HEAP
.mt = &memory_heap
}, {
.iter_count = 1 << 8,
.queue_size = 1 << 9,
.thread_count = 4,
.many = true,
.batch_size = 100,
.memory_type = MemoryFlags::HEAP
.mt = &memory_heap
}, {
.iter_count = 1 << 16,
.queue_size = 1 << 14,
.thread_count = 16,
.many = true,
.batch_size = 100,
.memory_type = MemoryFlags::HEAP
.mt = &memory_heap
}, {
.iter_count = 1 << 8,
.queue_size = 1 << 9,
.thread_count = 4,
.many = true,
.batch_size = 10,
.memory_type = MemoryFlags::HEAP
.mt = &memory_heap
}, {
.iter_count = 1 << 16,
.queue_size = 1 << 9,
.thread_count = 16,
.many = false,
.batch_size = 10,
.memory_type = MemoryFlags::HUGEPAGE
.mt = &memory_mmap_hugetlb
}
};
@ -321,9 +321,7 @@ ParameterizedTest(struct param *p, queue, multi_threaded, .timeout = 20, .init =
p->start = 0;
p->queue.state = ATOMIC_VAR_INIT(State::DESTROYED);
struct memory_type *mt = memory_type_lookup(p->memory_type);
ret = queue_init(&p->queue, p->queue_size, mt);
ret = queue_init(&p->queue, p->queue_size, p->mt);
cr_assert_eq(ret, 0, "Failed to create queue");
uint64_t start_tsc_time, end_tsc_time;