1
0
Fork 0
mirror of https://git.rwth-aachen.de/acs/public/villas/node/ synced 2025-03-09 00:00:00 +01:00

replaced lstack backed memory pool with new queue as underlying datastrucutre

This commit is contained in:
Steffen Vogel 2016-08-28 23:55:51 -04:00
parent d0dc7e216e
commit 2648c1f57c
4 changed files with 41 additions and 266 deletions

View file

@ -1,83 +0,0 @@
/** Lock-less LIFO stack
*
* Based on a single-linked list and double word compare exchange (DWCAS)
* to solve the ABA problem.
*
* Based on https://github.com/skeeto/lstack
*
* @file
* @author Steffen Vogel <stvogel@eonerc.rwth-aachen.de>
* @copyright 2014-2016, Institute for Automation of Complex Power Systems, EONERC
* This file is part of VILLASnode. All Rights Reserved. Proprietary and confidential.
* Unauthorized copying of this file, via any medium is strictly prohibited.
*********************************************************************************/
#ifndef _LSTACK_H_
#define _LSTACK_H_
#include <stdlib.h>
#include <stdint.h>
#include <stdatomic.h>
struct lstack_node {
void *value;
struct lstack_node *next;
};
/** DWCAS cmpexch16 */
struct lstack_head {
uintptr_t aba;
struct lstack_node *node;
};
struct lstack {
struct lstack_node *node_buffer;
_Atomic struct lstack_head head; /**> List of stack elements */
_Atomic struct lstack_head free; /**> List of unused elements */
_Atomic size_t size, avail;
};
/** Initialize a lock-less stack which can hold up to maxsz values.
*
* Note: this function is not thread-safe.
*/
int lstack_init(struct lstack *lstack, size_t maxsz);
/** Pop cnt values from the stack an place them in the array values */
ssize_t lstack_pop_many(struct lstack *lstack, void *values[], size_t cnt);
/** Push cnt values which are giving by the array values to the stack. */
ssize_t lstack_push_many(struct lstack *lstack, void *values[], size_t cnt);
/** Push a single value to the stack. */
static inline __attribute__((unused)) int lstack_push(struct lstack *lstack, void *value)
{
return lstack_push_many(lstack, &value, 1) == 1 ? 0 : -1;
}
/** Pop a single element from the stack and return it. */
static inline __attribute__((unused)) void * lstack_pop(struct lstack *lstack)
{
void *value;
lstack_pop_many(lstack, &value, 1);
return value;
}
/** Return the approximate size of the stack. */
static inline __attribute__((unused)) size_t lstack_size(struct lstack *lstack)
{
return atomic_load(&lstack->size);
}
/** Release memory used by the stack.
*
* Note: this function is not thread-safe.
*/
static inline __attribute__((unused)) void lstack_destroy(struct lstack *lstack)
{
free(lstack->node_buffer);
}
#endif /* _LSTACK_H_ */

View file

@ -12,51 +12,48 @@
#ifndef _POOL_H_
#define _POOL_H_
#include "lstack.h"
struct pool_block;
/** A thread-safe memory pool */
struct pool {
size_t blocksz;
size_t alignment;
void *buffer; /**< Address of the underlying memory area */
size_t len; /**< Length of the underlying memory area */
struct lstack stack;
size_t blocksz; /**< Length of a block in bytes */
size_t alignment; /**< Alignment of a block in bytes */
struct mpmc_queue queue; /**< The queue which is used to keep track of free blocks */
};
/** Initiazlize a pool */
int pool_init(struct pool *p, size_t blocksz, size_t alignment, void *buf, size_t len);
#define INLINE static inline __attribute__((unused))
/** Allocate hugepages for the pool and initialize it */
int pool_init_mmap(struct pool *p, size_t blocksz, size_t cnt);
/** Initiazlize a pool */
int pool_init(struct pool *p, size_t blocksz, size_t alignment, const struct memtype *mem);
/** Destroy and release memory used by pool. */
static inline __attribute__((unused)) void pool_destroy(struct pool *p)
{
lstack_destroy(&p->stack);
}
int pool_destroy(struct pool *p);
/** Pop cnt values from the stack an place them in the array blocks */
static inline __attribute__((unused)) ssize_t pool_get_many(struct pool *p, void *blocks[], size_t cnt)
INLINE ssize_t pool_get_many(struct pool *p, void *blocks[], size_t cnt)
{
return lstack_pop_many(&p->stack, blocks, cnt);
return mpmc_queue_pull_many(&p->queue, blocks, cnt);
}
/** Push cnt values which are giving by the array values to the stack. */
static inline __attribute__((unused)) ssize_t pool_put_many(struct pool *p, void *blocks[], size_t cnt)
INLINE ssize_t pool_put_many(struct pool *p, void *blocks[], size_t cnt)
{
return lstack_push_many(&p->stack, blocks, cnt);
return mpmc_queue_push_many(&p->queue, blocks, cnt);
}
/** Get a free memory block from pool. */
static inline __attribute__((unused)) void * pool_get(struct pool *p)
INLINE void * pool_get(struct pool *p)
{
return lstack_pop(&p->stack);
void *ptr;
return mpmc_queue_pull(&p->queue, &ptr) == 1 ? ptr : NULL;
}
/** Release a memory block back to the pool. */
static inline __attribute__((unused)) int pool_put(struct pool *p, void *buf)
INLINE int pool_put(struct pool *p, void *buf)
{
return lstack_push(&p->stack, buf);
return mpmc_queue_push(&p->queue, buf);
}
#endif /* _POOL_H_ */

View file

@ -1,130 +0,0 @@
/** Lock-less LIFO stack
*
* Based on a single-linked list and double word compare exchange (DWCAS)
* to solve the ABA problem.
*
* Based on https://github.com/skeeto/lstack
*
* @author Steffen Vogel <stvogel@eonerc.rwth-aachen.de>
* @copyright 2014-2016, Institute for Automation of Complex Power Systems, EONERC
* This file is part of VILLASnode. All Rights Reserved. Proprietary and confidential.
* Unauthorized copying of this file, via any medium is strictly prohibited.
*********************************************************************************/
#include <stdlib.h>
#include <errno.h>
#include "lstack.h"
#include "utils.h"
static struct lstack_node * pop_many(_Atomic struct lstack_head *head, size_t cnt)
{
size_t i;
struct lstack_head next, orig = atomic_load(head);
do {
next.aba = orig.aba + 1;
for (i = 0, next.node = orig.node;
i < cnt && next.node;
i++, next.node = next.node->next) {
// debug(3, "pop_many next.node %p next.node->next %p", next.node, next.node->next);
}
if (i != cnt)
return NULL;
} while (!atomic_compare_exchange_weak(head, &orig, next));
return orig.node;
}
static int push_many(_Atomic struct lstack_head *head, struct lstack_node *node, size_t cnt)
{
size_t i;
struct lstack_head next, orig = atomic_load(head);
struct lstack_node *last = node;
/* Find last node which will be pushed */
for (i = 1; i < cnt; i++) {
// debug(3, "push_many node %p node->next %p", last, last->next);
last = last->next;
}
do {
next.aba = orig.aba + 1;
next.node = node;
last->next = orig.node;
} while (!atomic_compare_exchange_weak(head, &orig, next));
return 0;
}
int lstack_init(struct lstack *lstack, size_t maxsz)
{
/* Pre-allocate all nodes. */
lstack->node_buffer = alloc(maxsz * sizeof(struct lstack_node));
for (size_t i = 1; i < maxsz; i++)
lstack->node_buffer[i-1].next = &lstack->node_buffer[i];
lstack->node_buffer[maxsz - 1].next = NULL;
lstack->free = ATOMIC_VAR_INIT(((struct lstack_head) { 0, lstack->node_buffer }));
lstack->head = ATOMIC_VAR_INIT(((struct lstack_head) { 0, NULL }));
lstack->size = ATOMIC_VAR_INIT(0);
lstack->avail = ATOMIC_VAR_INIT(maxsz);
return 0;
}
ssize_t lstack_push_many(struct lstack *lstack, void *values[], size_t cnt)
{
size_t i;
struct lstack_node *nodes, *node;
if (cnt == 0)
return 0;
nodes = pop_many(&lstack->free, cnt);
if (!nodes)
return 0;
atomic_fetch_sub(&lstack->avail, cnt);
for (i = 0, node = nodes;
i < cnt && node;
i++, node = node->next)
node->value = values[i];
push_many(&lstack->head, nodes, cnt);
atomic_fetch_add(&lstack->size, cnt);
return i;
}
ssize_t lstack_pop_many(struct lstack *lstack, void *values[], size_t cnt)
{
size_t i;
struct lstack_node *nodes, *node;
if (cnt == 0)
return 0;
nodes = pop_many(&lstack->head, cnt);
if (!nodes)
return 0;
atomic_fetch_sub(&lstack->size, cnt);
for (i = 0, node = nodes;
i < cnt && node;
i++, node = node->next)
values[i] = node->value;
push_many(&lstack->free, nodes, cnt);
atomic_fetch_add(&lstack->avail, cnt);
return i;
}

View file

@ -6,52 +6,43 @@
* Unauthorized copying of this file, via any medium is strictly prohibited.
*/
#include <sys/mman.h>
#include "utils.h"
#include "pool.h"
#include "memory.h"
#include "kernel/kernel.h"
int pool_init_mmap(struct pool *p, size_t blocksz, size_t cnt)
int pool_init(struct pool *p, size_t blocksz, size_t cnt, const struct memtype *m)
{
void *addr;
int flags;
size_t len, alignedsz, align;
int flags, prot;
size_t len, alignedsz, alignment;
align = kernel_get_cacheline_size();
alignedsz = blocksz * CEIL(blocksz, align);
/* Make sure that we use a block size that is aligned to the size of a cache line */
alignment = kernel_get_cacheline_size();
alignedsz = blocksz * CEIL(blocksz, );
len = cnt * alignedsz;
debug(DBG_POOL | 4, "Allocating %#zx bytes for memory pool", len);
flags = MAP_LOCKED | MAP_PRIVATE | MAP_ANONYMOUS; // MAP_HUGETLB
/** @todo Use hugepages */
/* addr is allways aligned to pagesize boundary */
addr = mmap(NULL, len, PROT_READ | PROT_WRITE, flags, -1, 0);
if (addr == MAP_FAILED)
serror("Failed to allocate memory for sample pool");
return pool_init(p, blocksz, align, addr, len);
}
int pool_init(struct pool *p, size_t blocksz, size_t align, void *buf, size_t len)
{
size_t alignedsz, cnt;
assert(IS_ALIGNED(buf, align)); /* buf has to be aligned */
addr = memory_alloc_align(m, len, aligment);
if (!addr)
serror("Failed to allocate memory for memory pool");
else
debug(DBG_POOL | 4, "Allocated %#zx bytes for memory pool", len);
p->blocksz = blocksz;
p->alignment = align;
p->alignment = alignment;
alignedsz = blocksz * CEIL(blocksz, align);
cnt = len / alignedsz;
lstack_init(&p->stack, cnt);
mpmc_queue_init(&p->queue, cnt, m);
for (int i = 0; i < cnt; i++)
lstack_push(&p->stack, buf + i * alignedsz);
return 0;
}
int pool_destroy(struct pool *p)
{
mpmc_queue_destroy(&p->queue);
memory_dealloc(p->buffer, p->len);
}