mirror of
https://git.rwth-aachen.de/acs/public/villas/node/
synced 2025-03-09 00:00:00 +01:00
memory: refactored memory subsystem
This commit is contained in:
parent
93750a2bcd
commit
bb70be0b2c
35 changed files with 496 additions and 349 deletions
|
@ -26,35 +26,14 @@
|
|||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include <villas/memory_type.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define HUGEPAGESIZE (1 << 21)
|
||||
|
||||
struct memtype;
|
||||
|
||||
typedef void *(*memzone_allocator_t)(struct memtype *mem, size_t len, size_t alignment);
|
||||
typedef int (*memzone_deallocator_t)(struct memtype *mem, void *ptr, size_t len);
|
||||
|
||||
enum memtype_flags {
|
||||
MEMORY_MMAP = (1 << 0),
|
||||
MEMORY_DMA = (1 << 1),
|
||||
MEMORY_HUGEPAGE = (1 << 2),
|
||||
MEMORY_HEAP = (1 << 3)
|
||||
};
|
||||
|
||||
struct memtype {
|
||||
const char *name;
|
||||
int flags;
|
||||
|
||||
size_t alignment;
|
||||
|
||||
memzone_allocator_t alloc;
|
||||
memzone_deallocator_t free;
|
||||
|
||||
void *_vd; /**<Virtual data for possible state */
|
||||
};
|
||||
/* Forward declarations */
|
||||
struct node;
|
||||
|
||||
enum memblock_flags {
|
||||
MEMBLOCK_USED = 1,
|
||||
|
@ -71,7 +50,7 @@ struct memblock {
|
|||
|
||||
/** @todo Unused for now */
|
||||
struct memzone {
|
||||
struct memtype *const type;
|
||||
struct memory_type *const type;
|
||||
|
||||
void *addr;
|
||||
uintptr_t physaddr;
|
||||
|
@ -86,16 +65,11 @@ int memory_init(int hugepages);
|
|||
* @retval NULL If allocation failed.
|
||||
* @retval <>0 If allocation was successful.
|
||||
*/
|
||||
void * memory_alloc(struct memtype *m, size_t len);
|
||||
void * memory_alloc(struct memory_type *m, size_t len);
|
||||
|
||||
void * memory_alloc_aligned(struct memtype *m, size_t len, size_t alignment);
|
||||
void * memory_alloc_aligned(struct memory_type *m, size_t len, size_t alignment);
|
||||
|
||||
int memory_free(struct memtype *m, void *ptr, size_t len);
|
||||
|
||||
struct memtype * memtype_managed_init(void *ptr, size_t len);
|
||||
|
||||
extern struct memtype memtype_heap;
|
||||
extern struct memtype memtype_hugepage;
|
||||
int memory_free(struct memory_type *m, void *ptr, size_t len);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
69
include/villas/memory_type.h
Normal file
69
include/villas/memory_type.h
Normal file
|
@ -0,0 +1,69 @@
|
|||
/** Memory allocators.
|
||||
*
|
||||
* @file
|
||||
* @author Steffen Vogel <stvogel@eonerc.rwth-aachen.de>
|
||||
* @copyright 2017, Institute for Automation of Complex Power Systems, EONERC
|
||||
* @license GNU General Public License (version 3)
|
||||
*
|
||||
* VILLASnode
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*********************************************************************************/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct memory_type;
|
||||
|
||||
typedef void *(*memzone_allocator_t)(struct memory_type *mem, size_t len, size_t alignment);
|
||||
typedef int (*memzone_deallocator_t)(struct memory_type *mem, void *ptr, size_t len);
|
||||
|
||||
enum memory_type_flags {
|
||||
MEMORY_MMAP = (1 << 0),
|
||||
MEMORY_DMA = (1 << 1),
|
||||
MEMORY_HUGEPAGE = (1 << 2),
|
||||
MEMORY_HEAP = (1 << 3)
|
||||
};
|
||||
|
||||
struct memory_type {
|
||||
const char *name;
|
||||
int flags;
|
||||
|
||||
size_t alignment;
|
||||
|
||||
memzone_allocator_t alloc;
|
||||
memzone_deallocator_t free;
|
||||
|
||||
void *_vd; /**< Virtual data for internal state */
|
||||
};
|
||||
|
||||
extern struct memory_type memory_type_heap;
|
||||
extern struct memory_type memory_hugepage;
|
||||
|
||||
struct ibv_mr * memory_type_ib_mr(void *ptr);
|
||||
|
||||
struct node;
|
||||
|
||||
struct memory_type * memory_ib(struct node *n, struct memory_type *parent);
|
||||
struct memory_type * memory_managed(void *ptr, size_t len);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
|
@ -162,7 +162,7 @@ int node_write(struct node *n, struct sample *smps[], unsigned cnt);
|
|||
|
||||
int node_fd(struct node *n);
|
||||
|
||||
struct memtype * node_memtype(struct node *n, struct memtype *parent);
|
||||
struct memory_type * node_memory_type(struct node *n, struct memory_type *parent);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -164,7 +164,7 @@ struct node_type {
|
|||
int (*fd)(struct node *n);
|
||||
|
||||
/** */
|
||||
struct memtype * (*memtype)(struct node *n, struct memtype *parent);
|
||||
struct memory_type * (*memory_type)(struct node *n, struct memory_type *parent);
|
||||
};
|
||||
|
||||
/** Initialize all registered node type subsystems.
|
||||
|
|
|
@ -39,7 +39,7 @@ extern "C" {
|
|||
/** A thread-safe memory pool */
|
||||
struct pool {
|
||||
off_t buffer_off; /**< Offset from the struct address to the underlying memory area */
|
||||
struct memtype *mem;
|
||||
struct memory_type *mem;
|
||||
|
||||
enum state state;
|
||||
|
||||
|
@ -62,7 +62,7 @@ struct pool {
|
|||
* @retval 0 The pool has been successfully initialized.
|
||||
* @retval <>0 There was an error during the pool initialization.
|
||||
*/
|
||||
int pool_init(struct pool *p, size_t cnt, size_t blocksz, struct memtype *mem);
|
||||
int pool_init(struct pool *p, size_t cnt, size_t blocksz, struct memory_type *mem);
|
||||
|
||||
/** Destroy and release memory used by pool. */
|
||||
int pool_destroy(struct pool *p);
|
||||
|
|
|
@ -45,7 +45,7 @@ extern "C"{
|
|||
#endif
|
||||
|
||||
/* Forward declarations */
|
||||
struct memtype;
|
||||
struct memory_type;
|
||||
|
||||
#define CACHELINE_SIZE 64
|
||||
typedef char cacheline_pad_t[CACHELINE_SIZE];
|
||||
|
@ -61,7 +61,7 @@ struct queue {
|
|||
|
||||
atomic_state state;
|
||||
|
||||
struct memtype *mem;
|
||||
struct memory_type *mem;
|
||||
size_t buffer_mask;
|
||||
off_t buffer_off; /**< Relative pointer to struct queue_cell[] */
|
||||
|
||||
|
@ -77,7 +77,7 @@ struct queue {
|
|||
};
|
||||
|
||||
/** Initialize MPMC queue */
|
||||
int queue_init(struct queue *q, size_t size, struct memtype *mem);
|
||||
int queue_init(struct queue *q, size_t size, struct memory_type *mem);
|
||||
|
||||
/** Desroy MPMC queue and release memory */
|
||||
int queue_destroy(struct queue *q);
|
||||
|
|
|
@ -68,7 +68,7 @@ struct queue_signalled {
|
|||
|
||||
#define queue_signalled_available(q) queue_available(&((q)->queue))
|
||||
|
||||
int queue_signalled_init(struct queue_signalled *qs, size_t size, struct memtype *mem, int flags);
|
||||
int queue_signalled_init(struct queue_signalled *qs, size_t size, struct memory_type *mem, int flags);
|
||||
|
||||
int queue_signalled_destroy(struct queue_signalled *qs);
|
||||
|
||||
|
|
|
@ -26,9 +26,10 @@ LIB = $(BUILDDIR)/$(LIB_NAME).so.$(LIB_ABI_VERSION)
|
|||
|
||||
# Object files for libvillas
|
||||
LIB_SRCS += $(addprefix lib/kernel/, kernel.c rt.c) \
|
||||
$(addprefix lib/memory/, heap.c hugepage.c ib.c managed.c) \
|
||||
$(addprefix lib/, sample.c path.c node.c hook.c log.c log_config.c \
|
||||
utils.c super_node.c hist.c timing.c pool.c list.c queue.c \
|
||||
queue_signalled.c memory.c memory_ib.c advio.c plugin.c node_type.c stats.c \
|
||||
queue_signalled.c memory.c advio.c plugin.c node_type.c stats.c \
|
||||
mapping.c shmem.c config_helper.c crypt.c compat.c \
|
||||
log_helper.c task.c buffer.c table.c bitset.c signal.c \
|
||||
hash_table.c \
|
||||
|
|
|
@ -272,7 +272,7 @@ int api_init(struct api *a, struct super_node *sn)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = queue_signalled_init(&a->pending, 1024, &memtype_heap, 0);
|
||||
ret = queue_signalled_init(&a->pending, 1024, &memory_type_heap, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -40,11 +40,11 @@ int api_session_init(struct api_session *s, enum api_mode m)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = queue_init(&s->request.queue, 128, &memtype_heap);
|
||||
ret = queue_init(&s->request.queue, 128, &memory_type_heap);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = queue_init(&s->response.queue, 128, &memtype_heap);
|
||||
ret = queue_init(&s->response.queue, 128, &memory_type_heap);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
245
lib/memory.c
245
lib/memory.c
|
@ -25,21 +25,13 @@
|
|||
#include <errno.h>
|
||||
#include <strings.h>
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
/* Required to allocate hugepages on Apple OS X */
|
||||
#ifdef __MACH__
|
||||
#include <mach/vm_statistics.h>
|
||||
#elif defined(__linux__)
|
||||
#include <villas/kernel/kernel.h>
|
||||
#endif
|
||||
|
||||
#include <villas/log.h>
|
||||
#include <villas/memory.h>
|
||||
#include <villas/utils.h>
|
||||
#include <villas/kernel/kernel.h>
|
||||
|
||||
int memory_init(int hugepages)
|
||||
{
|
||||
|
@ -77,7 +69,7 @@ int memory_init(int hugepages)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void * memory_alloc(struct memtype *m, size_t len)
|
||||
void * memory_alloc(struct memory_type *m, size_t len)
|
||||
{
|
||||
void *ptr = m->alloc(m, len, sizeof(void *));
|
||||
|
||||
|
@ -86,7 +78,7 @@ void * memory_alloc(struct memtype *m, size_t len)
|
|||
return ptr;
|
||||
}
|
||||
|
||||
void * memory_alloc_aligned(struct memtype *m, size_t len, size_t alignment)
|
||||
void * memory_alloc_aligned(struct memory_type *m, size_t len, size_t alignment)
|
||||
{
|
||||
void *ptr = m->alloc(m, len, alignment);
|
||||
|
||||
|
@ -95,238 +87,9 @@ void * memory_alloc_aligned(struct memtype *m, size_t len, size_t alignment)
|
|||
return ptr;
|
||||
}
|
||||
|
||||
int memory_free(struct memtype *m, void *ptr, size_t len)
|
||||
int memory_free(struct memory_type *m, void *ptr, size_t len)
|
||||
{
|
||||
debug(LOG_MEM | 5, "Releasing %#zx bytes of %s memory", len, m->name);
|
||||
|
||||
return m->free(m, ptr, len);
|
||||
}
|
||||
|
||||
static void * memory_heap_alloc(struct memtype *m, size_t len, size_t alignment)
|
||||
{
|
||||
void *ptr;
|
||||
int ret;
|
||||
|
||||
if (alignment < sizeof(void *))
|
||||
alignment = sizeof(void *);
|
||||
|
||||
ret = posix_memalign(&ptr, alignment, len);
|
||||
|
||||
return ret ? NULL : ptr;
|
||||
}
|
||||
|
||||
int memory_heap_free(struct memtype *m, void *ptr, size_t len)
|
||||
{
|
||||
free(ptr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Allocate memory backed by hugepages with malloc() like interface */
|
||||
static void * memory_hugepage_alloc(struct memtype *m, size_t len, size_t alignment)
|
||||
{
|
||||
void *ret;
|
||||
int prot = PROT_READ | PROT_WRITE;
|
||||
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
||||
|
||||
#ifdef __MACH__
|
||||
flags |= VM_FLAGS_SUPERPAGE_SIZE_2MB;
|
||||
#elif defined(__linux__)
|
||||
flags |= MAP_HUGETLB;
|
||||
|
||||
if (getuid() == 0)
|
||||
flags |= MAP_LOCKED;
|
||||
#endif
|
||||
|
||||
ret = mmap(NULL, len, prot, flags, -1, 0);
|
||||
if (ret == MAP_FAILED)
|
||||
return NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int memory_hugepage_free(struct memtype *m, void *ptr, size_t len)
|
||||
{
|
||||
/** We must make sure that len is a multiple of the hugepage size
|
||||
*
|
||||
* See: https://lkml.org/lkml/2014/10/22/925
|
||||
*/
|
||||
len = ALIGN(len, HUGEPAGESIZE);
|
||||
|
||||
return munmap(ptr, len);
|
||||
}
|
||||
|
||||
void* memory_managed_alloc(struct memtype *m, size_t len, size_t alignment)
|
||||
{
|
||||
/* Simple first-fit allocation */
|
||||
struct memblock *first = (struct memblock *) m->_vd;
|
||||
struct memblock *block;
|
||||
|
||||
for (block = first; block != NULL; block = block->next) {
|
||||
if (block->flags & MEMBLOCK_USED)
|
||||
continue;
|
||||
|
||||
char* cptr = (char *) block + sizeof(struct memblock);
|
||||
size_t avail = block->len;
|
||||
uintptr_t uptr = (uintptr_t) cptr;
|
||||
|
||||
/* Check alignment first; leave a gap at start of block to assure
|
||||
* alignment if necessary */
|
||||
uintptr_t rem = uptr % alignment;
|
||||
uintptr_t gap = 0;
|
||||
if (rem != 0) {
|
||||
gap = alignment - rem;
|
||||
if (gap > avail)
|
||||
continue; /* Next aligned address isn't in this block anymore */
|
||||
|
||||
cptr += gap;
|
||||
avail -= gap;
|
||||
}
|
||||
|
||||
if (avail >= len) {
|
||||
if (gap > sizeof(struct memblock)) {
|
||||
/* The alignment gap is big enough to fit another block.
|
||||
* The original block descriptor is already at the correct
|
||||
* position, so we just change its len and create a new block
|
||||
* descriptor for the actual block we're handling. */
|
||||
block->len = gap - sizeof(struct memblock);
|
||||
struct memblock *newblock = (struct memblock *) (cptr - sizeof(struct memblock));
|
||||
newblock->prev = block;
|
||||
newblock->next = block->next;
|
||||
block->next = newblock;
|
||||
newblock->flags = 0;
|
||||
newblock->len = len;
|
||||
block = newblock;
|
||||
}
|
||||
else {
|
||||
/* The gap is too small to fit another block descriptor, so we
|
||||
* must account for the gap length in the block length. */
|
||||
block->len = len + gap;
|
||||
}
|
||||
|
||||
if (avail > len + sizeof(struct memblock)) {
|
||||
/* Imperfect fit, so create another block for the remaining part */
|
||||
struct memblock *newblock = (struct memblock *) (cptr + len);
|
||||
newblock->prev = block;
|
||||
newblock->next = block->next;
|
||||
block->next = newblock;
|
||||
if (newblock->next)
|
||||
newblock->next->prev = newblock;
|
||||
newblock->flags = 0;
|
||||
newblock->len = avail - len - sizeof(struct memblock);
|
||||
}
|
||||
else {
|
||||
/* If this block was larger than the requested length, but only
|
||||
* by less than sizeof(struct memblock), we may have wasted
|
||||
* memory by previous assignments to block->len. */
|
||||
block->len = avail;
|
||||
}
|
||||
|
||||
block->flags |= MEMBLOCK_USED;
|
||||
|
||||
return (void *) cptr;
|
||||
}
|
||||
}
|
||||
|
||||
/* No suitable block found */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int memory_managed_free(struct memtype *m, void *ptr, size_t len)
|
||||
{
|
||||
struct memblock *first = (struct memblock *) m->_vd;
|
||||
struct memblock *block;
|
||||
char *cptr = ptr;
|
||||
|
||||
for (block = first; block != NULL; block = block->next) {
|
||||
if (!(block->flags & MEMBLOCK_USED))
|
||||
continue;
|
||||
|
||||
/* Since we may waste some memory at the start of a block to ensure
|
||||
* alignment, ptr may not actually be the start of the block */
|
||||
if ((char *) block + sizeof(struct memblock) <= cptr &&
|
||||
cptr < (char *) block + sizeof(struct memblock) + block->len) {
|
||||
/* Try to merge it with neighbouring free blocks */
|
||||
if (block->prev && !(block->prev->flags & MEMBLOCK_USED) &&
|
||||
block->next && !(block->next->flags & MEMBLOCK_USED)) {
|
||||
/* Special case first: both previous and next block are unused */
|
||||
block->prev->len += block->len + block->next->len + 2 * sizeof(struct memblock);
|
||||
block->prev->next = block->next->next;
|
||||
if (block->next->next)
|
||||
block->next->next->prev = block->prev;
|
||||
}
|
||||
else if (block->prev && !(block->prev->flags & MEMBLOCK_USED)) {
|
||||
block->prev->len += block->len + sizeof(struct memblock);
|
||||
block->prev->next = block->next;
|
||||
if (block->next)
|
||||
block->next->prev = block->prev;
|
||||
}
|
||||
else if (block->next && !(block->next->flags & MEMBLOCK_USED)) {
|
||||
block->len += block->next->len + sizeof(struct memblock);
|
||||
block->next = block->next->next;
|
||||
if (block->next)
|
||||
block->next->prev = block;
|
||||
}
|
||||
else {
|
||||
/* no neighbouring free block, so just mark it as free */
|
||||
block->flags &= ~MEMBLOCK_USED;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct memtype * memtype_managed_init(void *ptr, size_t len)
|
||||
{
|
||||
struct memtype *mt = ptr;
|
||||
struct memblock *mb;
|
||||
char *cptr = ptr;
|
||||
|
||||
if (len < sizeof(struct memtype) + sizeof(struct memblock)) {
|
||||
info("memtype_managed_init: passed region too small");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Initialize memtype */
|
||||
mt->name = "managed";
|
||||
mt->flags = 0;
|
||||
mt->alloc = memory_managed_alloc;
|
||||
mt->free = memory_managed_free;
|
||||
mt->alignment = 1;
|
||||
|
||||
cptr += ALIGN(sizeof(struct memtype), sizeof(void *));
|
||||
|
||||
/* Initialize first free memblock */
|
||||
mb = (struct memblock *) cptr;
|
||||
mb->prev = NULL;
|
||||
mb->next = NULL;
|
||||
mb->flags = 0;
|
||||
|
||||
cptr += ALIGN(sizeof(struct memblock), sizeof(void *));
|
||||
|
||||
mb->len = len - (cptr - (char *) ptr);
|
||||
|
||||
mt->_vd = (void *) mb;
|
||||
|
||||
return mt;
|
||||
}
|
||||
|
||||
/* List of available memory types */
|
||||
struct memtype memtype_heap = {
|
||||
.name = "heap",
|
||||
.flags = MEMORY_HEAP,
|
||||
.alloc = memory_heap_alloc,
|
||||
.free = memory_heap_free,
|
||||
.alignment = 1
|
||||
};
|
||||
|
||||
struct memtype memtype_hugepage = {
|
||||
.name = "mmap_hugepages",
|
||||
.flags = MEMORY_MMAP | MEMORY_HUGEPAGE,
|
||||
.alloc = memory_hugepage_alloc,
|
||||
.free = memory_hugepage_free,
|
||||
.alignment = 21 /* 2 MiB hugepage */
|
||||
};
|
||||
|
|
54
lib/memory/heap.c
Normal file
54
lib/memory/heap.c
Normal file
|
@ -0,0 +1,54 @@
|
|||
/** Memory allocators.
|
||||
*
|
||||
* @author Steffen Vogel <stvogel@eonerc.rwth-aachen.de>
|
||||
* @copyright 2017, Institute for Automation of Complex Power Systems, EONERC
|
||||
* @license GNU General Public License (version 3)
|
||||
*
|
||||
* VILLASnode
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*********************************************************************************/
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <villas/memory_type.h>
|
||||
|
||||
static void * memory_heap_alloc(struct memory_type *m, size_t len, size_t alignment)
|
||||
{
|
||||
void *ptr;
|
||||
int ret;
|
||||
|
||||
if (alignment < sizeof(void *))
|
||||
alignment = sizeof(void *);
|
||||
|
||||
ret = posix_memalign(&ptr, alignment, len);
|
||||
|
||||
return ret ? NULL : ptr;
|
||||
}
|
||||
|
||||
int memory_heap_free(struct memory_type *m, void *ptr, size_t len)
|
||||
{
|
||||
free(ptr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* List of available memory types */
|
||||
struct memory_type memory_type_heap = {
|
||||
.name = "heap",
|
||||
.flags = MEMORY_HEAP,
|
||||
.alloc = memory_heap_alloc,
|
||||
.free = memory_heap_free,
|
||||
.alignment = 1
|
||||
};
|
86
lib/memory/hugepage.c
Normal file
86
lib/memory/hugepage.c
Normal file
|
@ -0,0 +1,86 @@
|
|||
/** Hugepage memory allocator.
|
||||
*
|
||||
* @author Steffen Vogel <stvogel@eonerc.rwth-aachen.de>
|
||||
* @copyright 2017, Institute for Automation of Complex Power Systems, EONERC
|
||||
* @license GNU General Public License (version 3)
|
||||
*
|
||||
* VILLASnode
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*********************************************************************************/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <strings.h>
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
/* Required to allocate hugepages on Apple OS X */
|
||||
#ifdef __MACH__
|
||||
#include <mach/vm_statistics.h>
|
||||
#elif defined(__linux__)
|
||||
#include <villas/kernel/kernel.h>
|
||||
#endif
|
||||
|
||||
#include <villas/log.h>
|
||||
#include <villas/memory_type.h>
|
||||
#include <villas/utils.h>
|
||||
|
||||
#define HUGEPAGESIZE (1 << 21) /* 2 MiB */
|
||||
|
||||
/** Allocate memory backed by hugepages with malloc() like interface */
|
||||
static void * memory_hugepage_alloc(struct memory_type *m, size_t len, size_t alignment)
|
||||
{
|
||||
void *ret;
|
||||
int prot = PROT_READ | PROT_WRITE;
|
||||
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
||||
|
||||
#ifdef __MACH__
|
||||
flags |= VM_FLAGS_SUPERPAGE_SIZE_2MB;
|
||||
#elif defined(__linux__)
|
||||
flags |= MAP_HUGETLB;
|
||||
|
||||
if (getuid() == 0)
|
||||
flags |= MAP_LOCKED;
|
||||
#endif
|
||||
|
||||
ret = mmap(NULL, len, prot, flags, -1, 0);
|
||||
if (ret == MAP_FAILED)
|
||||
return NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int memory_hugepage_free(struct memory_type *m, void *ptr, size_t len)
|
||||
{
|
||||
/** We must make sure that len is a multiple of the hugepage size
|
||||
*
|
||||
* See: https://lkml.org/lkml/2014/10/22/925
|
||||
*/
|
||||
len = ALIGN(len, HUGEPAGESIZE);
|
||||
|
||||
return munmap(ptr, len);
|
||||
}
|
||||
|
||||
struct memory_type memory_hugepage = {
|
||||
.name = "mmap_hugepages",
|
||||
.flags = MEMORY_MMAP | MEMORY_HUGEPAGE,
|
||||
.alloc = memory_hugepage_alloc,
|
||||
.free = memory_hugepage_free,
|
||||
.alignment = 21 /* 2 MiB hugepage */
|
||||
};
|
|
@ -21,9 +21,14 @@
|
|||
*********************************************************************************/
|
||||
|
||||
#include <villas/nodes/infiniband.h>
|
||||
#include <villas/memory_ib.h>
|
||||
#include <villas/memory.h>
|
||||
#include <rdma/rdma_cma.h>
|
||||
|
||||
struct memory_ib {
|
||||
struct ibv_pd *pd;
|
||||
struct memory_type *parent;
|
||||
};
|
||||
|
||||
struct ibv_mr * memory_ib_mr(void *ptr)
|
||||
{
|
||||
struct ibv_mr *mr = (struct ibv_mr *) ptr;
|
||||
|
@ -31,7 +36,7 @@ struct ibv_mr * memory_ib_mr(void *ptr)
|
|||
return (mr - 1);
|
||||
}
|
||||
|
||||
void * memory_ib_alloc(struct memtype *m, size_t len, size_t alignment)
|
||||
void * memory_ib_alloc(struct memory_type *m, size_t len, size_t alignment)
|
||||
{
|
||||
struct memory_ib *mi = (struct memory_ib *) m->_vd;
|
||||
|
||||
|
@ -47,7 +52,7 @@ void * memory_ib_alloc(struct memtype *m, size_t len, size_t alignment)
|
|||
return ptr;
|
||||
}
|
||||
|
||||
int memory_ib_free(struct memtype *m, void *ptr, size_t len)
|
||||
int memory_ib_free(struct memory_type *m, void *ptr, size_t len)
|
||||
{
|
||||
struct memory_ib *mi = (struct memory_ib *) m->_vd;
|
||||
struct ibv_mr *mr = memory_ib_mr(ptr);
|
||||
|
@ -62,10 +67,10 @@ int memory_ib_free(struct memtype *m, void *ptr, size_t len)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct memtype * ib_memtype(struct node *n, struct memtype *parent)
|
||||
struct memory_type * memory_ib(struct node *n, struct memory_type *parent)
|
||||
{
|
||||
struct infiniband *i = (struct infiniband *) n->_vd;
|
||||
struct memtype *mt = malloc(sizeof(struct memtype));
|
||||
struct memory_type *mt = malloc(sizeof(struct memory_type));
|
||||
|
||||
mt->name = "ib";
|
||||
mt->flags = 0;
|
193
lib/memory/managed.c
Normal file
193
lib/memory/managed.c
Normal file
|
@ -0,0 +1,193 @@
|
|||
/** Memory allocators.
|
||||
*
|
||||
* @author Steffen Vogel <stvogel@eonerc.rwth-aachen.de>
|
||||
* @copyright 2017, Institute for Automation of Complex Power Systems, EONERC
|
||||
* @license GNU General Public License (version 3)
|
||||
*
|
||||
* VILLASnode
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*********************************************************************************/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <strings.h>
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <villas/log.h>
|
||||
#include <villas/memory.h>
|
||||
#include <villas/utils.h>
|
||||
|
||||
void* memory_managed_alloc(struct memory_type *m, size_t len, size_t alignment)
|
||||
{
|
||||
/* Simple first-fit allocation */
|
||||
struct memblock *first = (struct memblock *) m->_vd;
|
||||
struct memblock *block;
|
||||
|
||||
for (block = first; block != NULL; block = block->next) {
|
||||
if (block->flags & MEMBLOCK_USED)
|
||||
continue;
|
||||
|
||||
char* cptr = (char *) block + sizeof(struct memblock);
|
||||
size_t avail = block->len;
|
||||
uintptr_t uptr = (uintptr_t) cptr;
|
||||
|
||||
/* Check alignment first; leave a gap at start of block to assure
|
||||
* alignment if necessary */
|
||||
uintptr_t rem = uptr % alignment;
|
||||
uintptr_t gap = 0;
|
||||
if (rem != 0) {
|
||||
gap = alignment - rem;
|
||||
if (gap > avail)
|
||||
continue; /* Next aligned address isn't in this block anymore */
|
||||
|
||||
cptr += gap;
|
||||
avail -= gap;
|
||||
}
|
||||
|
||||
if (avail >= len) {
|
||||
if (gap > sizeof(struct memblock)) {
|
||||
/* The alignment gap is big enough to fit another block.
|
||||
* The original block descriptor is already at the correct
|
||||
* position, so we just change its len and create a new block
|
||||
* descriptor for the actual block we're handling. */
|
||||
block->len = gap - sizeof(struct memblock);
|
||||
struct memblock *newblock = (struct memblock *) (cptr - sizeof(struct memblock));
|
||||
newblock->prev = block;
|
||||
newblock->next = block->next;
|
||||
block->next = newblock;
|
||||
newblock->flags = 0;
|
||||
newblock->len = len;
|
||||
block = newblock;
|
||||
}
|
||||
else {
|
||||
/* The gap is too small to fit another block descriptor, so we
|
||||
* must account for the gap length in the block length. */
|
||||
block->len = len + gap;
|
||||
}
|
||||
|
||||
if (avail > len + sizeof(struct memblock)) {
|
||||
/* Imperfect fit, so create another block for the remaining part */
|
||||
struct memblock *newblock = (struct memblock *) (cptr + len);
|
||||
newblock->prev = block;
|
||||
newblock->next = block->next;
|
||||
block->next = newblock;
|
||||
if (newblock->next)
|
||||
newblock->next->prev = newblock;
|
||||
newblock->flags = 0;
|
||||
newblock->len = avail - len - sizeof(struct memblock);
|
||||
}
|
||||
else {
|
||||
/* If this block was larger than the requested length, but only
|
||||
* by less than sizeof(struct memblock), we may have wasted
|
||||
* memory by previous assignments to block->len. */
|
||||
block->len = avail;
|
||||
}
|
||||
|
||||
block->flags |= MEMBLOCK_USED;
|
||||
|
||||
return (void *) cptr;
|
||||
}
|
||||
}
|
||||
|
||||
/* No suitable block found */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int memory_managed_free(struct memory_type *m, void *ptr, size_t len)
|
||||
{
|
||||
struct memblock *first = (struct memblock *) m->_vd;
|
||||
struct memblock *block;
|
||||
char *cptr = ptr;
|
||||
|
||||
for (block = first; block != NULL; block = block->next) {
|
||||
if (!(block->flags & MEMBLOCK_USED))
|
||||
continue;
|
||||
|
||||
/* Since we may waste some memory at the start of a block to ensure
|
||||
* alignment, ptr may not actually be the start of the block */
|
||||
if ((char *) block + sizeof(struct memblock) <= cptr &&
|
||||
cptr < (char *) block + sizeof(struct memblock) + block->len) {
|
||||
/* Try to merge it with neighbouring free blocks */
|
||||
if (block->prev && !(block->prev->flags & MEMBLOCK_USED) &&
|
||||
block->next && !(block->next->flags & MEMBLOCK_USED)) {
|
||||
/* Special case first: both previous and next block are unused */
|
||||
block->prev->len += block->len + block->next->len + 2 * sizeof(struct memblock);
|
||||
block->prev->next = block->next->next;
|
||||
if (block->next->next)
|
||||
block->next->next->prev = block->prev;
|
||||
}
|
||||
else if (block->prev && !(block->prev->flags & MEMBLOCK_USED)) {
|
||||
block->prev->len += block->len + sizeof(struct memblock);
|
||||
block->prev->next = block->next;
|
||||
if (block->next)
|
||||
block->next->prev = block->prev;
|
||||
}
|
||||
else if (block->next && !(block->next->flags & MEMBLOCK_USED)) {
|
||||
block->len += block->next->len + sizeof(struct memblock);
|
||||
block->next = block->next->next;
|
||||
if (block->next)
|
||||
block->next->prev = block;
|
||||
}
|
||||
else {
|
||||
/* no neighbouring free block, so just mark it as free */
|
||||
block->flags &= ~MEMBLOCK_USED;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct memory_type * memory_managed(void *ptr, size_t len)
|
||||
{
|
||||
struct memory_type *mt = ptr;
|
||||
struct memblock *mb;
|
||||
char *cptr = ptr;
|
||||
|
||||
if (len < sizeof(struct memory_type) + sizeof(struct memblock)) {
|
||||
info("memory_managed: passed region too small");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Initialize memory_type */
|
||||
mt->name = "managed";
|
||||
mt->flags = 0;
|
||||
mt->alloc = memory_managed_alloc;
|
||||
mt->free = memory_managed_free;
|
||||
mt->alignment = 1;
|
||||
|
||||
cptr += ALIGN(sizeof(struct memory_type), sizeof(void *));
|
||||
|
||||
/* Initialize first free memblock */
|
||||
mb = (struct memblock *) cptr;
|
||||
mb->prev = NULL;
|
||||
mb->next = NULL;
|
||||
mb->flags = 0;
|
||||
|
||||
cptr += ALIGN(sizeof(struct memblock), sizeof(void *));
|
||||
|
||||
mb->len = len - (cptr - (char *) ptr);
|
||||
|
||||
mt->_vd = (void *) mb;
|
||||
|
||||
return mt;
|
||||
}
|
|
@ -549,9 +549,9 @@ int node_fd(struct node *n)
|
|||
return n->_vt->fd ? n->_vt->fd(n) : -1;
|
||||
}
|
||||
|
||||
struct memtype * node_memtype(struct node *n, struct memtype *parent)
|
||||
struct memory_type * node_memory_type(struct node *n, struct memory_type *parent)
|
||||
{
|
||||
return n->_vt->memtype(n, parent) ? n->_vt->memtype(n, parent) : &memtype_hugepage;
|
||||
return n->_vt->memory_type ? n->_vt->memory_type(n, parent) : &memory_hugepage;
|
||||
}
|
||||
|
||||
int node_parse_list(struct list *list, json_t *cfg, struct list *all)
|
||||
|
|
|
@ -294,11 +294,11 @@ int iec61850_sv_start(struct node *n)
|
|||
SVReceiver_addSubscriber(i->subscriber.receiver, i->subscriber.subscriber);
|
||||
|
||||
/* Initialize pool and queue to pass samples between threads */
|
||||
ret = pool_init(&i->subscriber.pool, 1024, SAMPLE_LEN(n->samplelen), &memtype_hugepage);
|
||||
ret = pool_init(&i->subscriber.pool, 1024, SAMPLE_LEN(n->samplelen), &memory_hugepage);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = queue_signalled_init(&i->subscriber.queue, 1024, &memtype_hugepage, 0);
|
||||
ret = queue_signalled_init(&i->subscriber.queue, 1024, &memory_hugepage, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include <villas/format_type.h>
|
||||
#include <villas/memory.h>
|
||||
#include <villas/pool.h>
|
||||
#include <villas/memory_ib.h>
|
||||
#include <villas/memory.h>
|
||||
|
||||
#include <rdma/rdma_cma.h>
|
||||
|
||||
|
@ -240,7 +240,7 @@ static void ib_build_ibv(struct node *n)
|
|||
pool_init(&ib->mem.p_recv,
|
||||
ib->qp_init.cap.max_recv_wr,
|
||||
64*sizeof(double),
|
||||
&memtype_heap);
|
||||
&memory_type_heap);
|
||||
if(ret)
|
||||
{
|
||||
error("Failed to init recv memory pool of node %s: %s",
|
||||
|
@ -271,7 +271,7 @@ static void ib_build_ibv(struct node *n)
|
|||
pool_init(&ib->mem.p_send,
|
||||
ib->qp_init.cap.max_send_wr,
|
||||
sizeof(double),
|
||||
&memtype_heap);
|
||||
&memory_type_heap);
|
||||
if(ret)
|
||||
{
|
||||
error("Failed to init send memory of node %s: %s",
|
||||
|
@ -839,7 +839,7 @@ static struct plugin p = {
|
|||
.read = ib_read,
|
||||
.write = ib_write,
|
||||
.fd = ib_fd,
|
||||
.memtype = ib_memtype
|
||||
.memory_type = memory_ib
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -50,11 +50,11 @@ int loopback_open(struct node *n)
|
|||
int ret;
|
||||
struct loopback *l = (struct loopback *) n->_vd;
|
||||
|
||||
ret = pool_init(&l->pool, l->queuelen, SAMPLE_LEN(n->samplelen), &memtype_hugepage);
|
||||
ret = pool_init(&l->pool, l->queuelen, SAMPLE_LEN(n->samplelen), &memory_hugepage);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return queue_signalled_init(&l->queue, l->queuelen, &memtype_hugepage, QUEUE_SIGNALLED_EVENTFD);
|
||||
return queue_signalled_init(&l->queue, l->queuelen, &memory_hugepage, QUEUE_SIGNALLED_EVENTFD);
|
||||
}
|
||||
|
||||
int loopback_close(struct node *n)
|
||||
|
|
|
@ -301,11 +301,11 @@ int mqtt_start(struct node *n)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pool_init(&m->pool, 1024, SAMPLE_LEN(n->samplelen), &memtype_hugepage);
|
||||
ret = pool_init(&m->pool, 1024, SAMPLE_LEN(n->samplelen), &memory_hugepage);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = queue_signalled_init(&m->queue, 1024, &memtype_hugepage, 0);
|
||||
ret = queue_signalled_init(&m->queue, 1024, &memory_hugepage, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ static int websocket_connection_init(struct websocket_connection *c)
|
|||
|
||||
c->_name = NULL;
|
||||
|
||||
ret = queue_init(&c->queue, DEFAULT_QUEUELEN, &memtype_hugepage);
|
||||
ret = queue_init(&c->queue, DEFAULT_QUEUELEN, &memory_hugepage);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -400,11 +400,11 @@ int websocket_start(struct node *n)
|
|||
int ret;
|
||||
struct websocket *w = (struct websocket *) n->_vd;
|
||||
|
||||
ret = pool_init(&w->pool, DEFAULT_WEBSOCKET_QUEUELEN, SAMPLE_LEN(DEFAULT_WEBSOCKET_SAMPLELEN), &memtype_hugepage);
|
||||
ret = pool_init(&w->pool, DEFAULT_WEBSOCKET_QUEUELEN, SAMPLE_LEN(DEFAULT_WEBSOCKET_SAMPLELEN), &memory_hugepage);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = queue_signalled_init(&w->queue, DEFAULT_WEBSOCKET_QUEUELEN, &memtype_hugepage, 0);
|
||||
ret = queue_signalled_init(&w->queue, DEFAULT_WEBSOCKET_QUEUELEN, &memory_hugepage, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ static int path_source_init(struct path_source *ps)
|
|||
{
|
||||
int ret;
|
||||
|
||||
ret = pool_init(&ps->pool, MAX(DEFAULT_QUEUELEN, ps->node->in.vectorize), SAMPLE_LEN(ps->node->samplelen), &memtype_hugepage);
|
||||
ret = pool_init(&ps->pool, MAX(DEFAULT_QUEUELEN, ps->node->in.vectorize), SAMPLE_LEN(ps->node->samplelen), &memory_hugepage);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -148,7 +148,7 @@ static int path_destination_init(struct path_destination *pd, int queuelen)
|
|||
{
|
||||
int ret;
|
||||
|
||||
ret = queue_init(&pd->queue, queuelen, &memtype_hugepage);
|
||||
ret = queue_init(&pd->queue, queuelen, &memory_hugepage);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -430,7 +430,7 @@ int path_init2(struct path *p)
|
|||
if (!p->samplelen)
|
||||
p->samplelen = DEFAULT_SAMPLELEN;
|
||||
|
||||
ret = pool_init(&p->pool, MAX(1, list_length(&p->destinations)) * p->queuelen, SAMPLE_LEN(p->samplelen), &memtype_hugepage);
|
||||
ret = pool_init(&p->pool, MAX(1, list_length(&p->destinations)) * p->queuelen, SAMPLE_LEN(p->samplelen), &memory_hugepage);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include <villas/memory.h>
|
||||
#include <villas/kernel/kernel.h>
|
||||
|
||||
int pool_init(struct pool *p, size_t cnt, size_t blocksz, struct memtype *m)
|
||||
int pool_init(struct pool *p, size_t cnt, size_t blocksz, struct memory_type *m)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
#include <villas/memory.h>
|
||||
|
||||
/** Initialize MPMC queue */
|
||||
int queue_init(struct queue *q, size_t size, struct memtype *mem)
|
||||
int queue_init(struct queue *q, size_t size, struct memory_type *mem)
|
||||
{
|
||||
assert(q->state == STATE_DESTROYED);
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ static void queue_signalled_cleanup(void *p)
|
|||
pthread_mutex_unlock(&qs->pthread.mutex);
|
||||
}
|
||||
|
||||
int queue_signalled_init(struct queue_signalled *qs, size_t size, struct memtype *mem, int flags)
|
||||
int queue_signalled_init(struct queue_signalled *qs, size_t size, struct memory_type *mem, int flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
10
lib/shmem.c
10
lib/shmem.c
|
@ -35,8 +35,8 @@
|
|||
|
||||
size_t shmem_total_size(int queuelen, int samplelen)
|
||||
{
|
||||
/* We have the constant const of the memtype header */
|
||||
return sizeof(struct memtype)
|
||||
/* We have the constant const of the memory_type header */
|
||||
return sizeof(struct memory_type)
|
||||
/* and the shared struct itself */
|
||||
+ sizeof(struct shmem_shared)
|
||||
/* the size of the actual queue and the queue for the pool */
|
||||
|
@ -55,7 +55,7 @@ int shmem_int_open(const char *wname, const char* rname, struct shmem_int *shm,
|
|||
int fd, ret;
|
||||
size_t len;
|
||||
void *base;
|
||||
struct memtype *manager;
|
||||
struct memory_type *manager;
|
||||
struct shmem_shared *shared;
|
||||
struct stat stat_buf;
|
||||
sem_t *sem_own, *sem_other;
|
||||
|
@ -92,7 +92,7 @@ retry: fd = shm_open(wname, O_RDWR|O_CREAT|O_EXCL, 0600);
|
|||
|
||||
close(fd);
|
||||
|
||||
manager = memtype_managed_init(base, len);
|
||||
manager = memory_managed(base, len);
|
||||
shared = memory_alloc(manager, sizeof(struct shmem_shared));
|
||||
if (!shared) {
|
||||
errno = ENOMEM;
|
||||
|
@ -144,7 +144,7 @@ retry: fd = shm_open(wname, O_RDWR|O_CREAT|O_EXCL, 0600);
|
|||
if (base == MAP_FAILED)
|
||||
return -10;
|
||||
|
||||
cptr = (char *) base + sizeof(struct memtype) + sizeof(struct memblock);
|
||||
cptr = (char *) base + sizeof(struct memory_type) + sizeof(struct memblock);
|
||||
shared = (struct shmem_shared *) cptr;
|
||||
shm->read.base = base;
|
||||
shm->read.name = rname;
|
||||
|
|
|
@ -182,7 +182,7 @@ check: if (optarg == endptr)
|
|||
|
||||
smps = alloc(cnt * sizeof(struct sample *));
|
||||
|
||||
ret = pool_init(&q, 10 * cnt, SAMPLE_LEN(DEFAULT_SAMPLELEN), &memtype_hugepage);
|
||||
ret = pool_init(&q, 10 * cnt, SAMPLE_LEN(DEFAULT_SAMPLELEN), &memory_hugepage);
|
||||
if (ret)
|
||||
error("Failed to initilize memory pool");
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ static void * send_loop(void *ctx)
|
|||
struct sample *smps[node->out.vectorize];
|
||||
|
||||
/* Initialize memory */
|
||||
ret = pool_init(&sendd.pool, LOG2_CEIL(node->out.vectorize), SAMPLE_LEN(DEFAULT_SAMPLELEN), node_memtype(node, &memtype_hugepage));
|
||||
ret = pool_init(&sendd.pool, LOG2_CEIL(node->out.vectorize), SAMPLE_LEN(DEFAULT_SAMPLELEN), node_memory_type(node, &memory_hugepage));
|
||||
if (ret < 0)
|
||||
error("Failed to allocate memory for receive pool.");
|
||||
|
||||
|
@ -196,7 +196,7 @@ static void * recv_loop(void *ctx)
|
|||
struct sample *smps[node->in.vectorize];
|
||||
|
||||
/* Initialize memory */
|
||||
ret = pool_init(&recvv.pool, LOG2_CEIL(node->in.vectorize), SAMPLE_LEN(DEFAULT_SAMPLELEN), node_memtype(node, &memtype_hugepage));
|
||||
ret = pool_init(&recvv.pool, LOG2_CEIL(node->in.vectorize), SAMPLE_LEN(DEFAULT_SAMPLELEN), node_memory_type(node, &memory_hugepage));
|
||||
if (ret < 0)
|
||||
error("Failed to allocate memory for receive pool.");
|
||||
|
||||
|
|
|
@ -155,7 +155,7 @@ int main(int argc, char *argv[])
|
|||
if (ret)
|
||||
error("Failed to verify node configuration");
|
||||
|
||||
ret = pool_init(&q, 16, SAMPLE_LEN(n.samplelen), &memtype_heap);
|
||||
ret = pool_init(&q, 16, SAMPLE_LEN(n.samplelen), &memory_type_heap);
|
||||
if (ret)
|
||||
error("Failed to initialize pool");
|
||||
|
||||
|
|
|
@ -122,7 +122,7 @@ check: if (optarg == endptr)
|
|||
int n = argc - optind; /* The number of files which we compare */
|
||||
struct side s[n];
|
||||
|
||||
ret = pool_init(&pool, n, SAMPLE_LEN(DEFAULT_SAMPLELEN), &memtype_heap);
|
||||
ret = pool_init(&pool, n, SAMPLE_LEN(DEFAULT_SAMPLELEN), &memory_type_heap);
|
||||
if (ret)
|
||||
error("Failed to initialize pool");
|
||||
|
||||
|
|
|
@ -185,7 +185,7 @@ ParameterizedTest(char *fmt, io, lowlevel)
|
|||
struct sample *smps[NUM_SAMPLES];
|
||||
struct sample *smpt[NUM_SAMPLES];
|
||||
|
||||
ret = pool_init(&p, 2 * NUM_SAMPLES, SAMPLE_LEN(NUM_VALUES), &memtype_hugepage);
|
||||
ret = pool_init(&p, 2 * NUM_SAMPLES, SAMPLE_LEN(NUM_VALUES), &memory_hugepage);
|
||||
cr_assert_eq(ret, 0);
|
||||
|
||||
info("Running test for format = %s", fmt);
|
||||
|
@ -232,7 +232,7 @@ ParameterizedTest(char *fmt, io, highlevel)
|
|||
|
||||
info("Running test for format = %s", fmt);
|
||||
|
||||
ret = pool_init(&p, 2 * NUM_SAMPLES, SAMPLE_LEN(NUM_VALUES), &memtype_hugepage);
|
||||
ret = pool_init(&p, 2 * NUM_SAMPLES, SAMPLE_LEN(NUM_VALUES), &memory_hugepage);
|
||||
cr_assert_eq(ret, 0);
|
||||
|
||||
generate_samples(&p, smps, smpt, NUM_SAMPLES, NUM_VALUES);
|
||||
|
|
|
@ -28,13 +28,15 @@
|
|||
#include <villas/memory.h>
|
||||
#include <villas/utils.h>
|
||||
|
||||
#define HUGEPAGESIZE (1<<22)
|
||||
|
||||
TheoryDataPoints(memory, aligned) = {
|
||||
DataPoints(size_t, 1, 32, 55, 1 << 10, 1 << 20),
|
||||
DataPoints(size_t, 1, 8, 1 << 12),
|
||||
DataPoints(struct memtype *, &memtype_heap, &memtype_hugepage)
|
||||
DataPoints(struct memory_type *, &memory_type_heap, &memory_hugepage)
|
||||
};
|
||||
|
||||
Theory((size_t len, size_t align, struct memtype *m), memory, aligned) {
|
||||
Theory((size_t len, size_t align, struct memory_type *m), memory, aligned) {
|
||||
int ret;
|
||||
void *ptr;
|
||||
|
||||
|
@ -43,7 +45,7 @@ Theory((size_t len, size_t align, struct memtype *m), memory, aligned) {
|
|||
|
||||
cr_assert(IS_ALIGNED(ptr, align));
|
||||
|
||||
if (m == &memtype_hugepage) {
|
||||
if (m == &memory_hugepage) {
|
||||
cr_assert(IS_ALIGNED(ptr, HUGEPAGESIZE));
|
||||
}
|
||||
|
||||
|
@ -57,15 +59,15 @@ Test(memory, manager) {
|
|||
|
||||
int ret;
|
||||
void *p, *p1, *p2, *p3;
|
||||
struct memtype *m;
|
||||
struct memory_type *m;
|
||||
|
||||
total_size = 1 << 10;
|
||||
max_block = total_size - sizeof(struct memtype) - sizeof(struct memblock);
|
||||
max_block = total_size - sizeof(struct memory_type) - sizeof(struct memblock);
|
||||
|
||||
p = memory_alloc(&memtype_heap, total_size);
|
||||
p = memory_alloc(&memory_type_heap, total_size);
|
||||
cr_assert_not_null(p);
|
||||
|
||||
m = memtype_managed_init(p, total_size);
|
||||
m = memory_managed(p, total_size);
|
||||
cr_assert_not_null(m);
|
||||
|
||||
p1 = memory_alloc(m, 16);
|
||||
|
@ -100,6 +102,6 @@ Test(memory, manager) {
|
|||
ret = memory_free(m, p1, max_block);
|
||||
cr_assert(ret == 0);
|
||||
|
||||
ret = memory_free(&memtype_heap, p, total_size);
|
||||
ret = memory_free(&memory_type_heap, p, total_size);
|
||||
cr_assert(ret == 0);
|
||||
}
|
||||
|
|
|
@ -32,16 +32,16 @@ struct param {
|
|||
int thread_count;
|
||||
int pool_size;
|
||||
size_t block_size;
|
||||
struct memtype *memtype;
|
||||
struct memory_type *memory_type;
|
||||
};
|
||||
|
||||
ParameterizedTestParameters(pool, basic)
|
||||
{
|
||||
static struct param params[] = {
|
||||
{ 1, 4096, 150, &memtype_heap },
|
||||
{ 1, 128, 8, &memtype_hugepage },
|
||||
{ 1, 4, 8192, &memtype_hugepage },
|
||||
{ 1, 1 << 13, 4, &memtype_heap }
|
||||
{ 1, 4096, 150, &memory_type_heap },
|
||||
{ 1, 128, 8, &memory_hugepage },
|
||||
{ 1, 4, 8192, &memory_hugepage },
|
||||
{ 1, 1 << 13, 4, &memory_type_heap }
|
||||
};
|
||||
|
||||
return cr_make_param_array(struct param, params, ARRAY_LEN(params));
|
||||
|
@ -54,7 +54,7 @@ ParameterizedTest(struct param *p, pool, basic)
|
|||
|
||||
void *ptr, *ptrs[p->pool_size];
|
||||
|
||||
ret = pool_init(&pool, p->pool_size, p->block_size, p->memtype);
|
||||
ret = pool_init(&pool, p->pool_size, p->block_size, p->memory_type);
|
||||
cr_assert_eq(ret, 0, "Failed to create pool");
|
||||
|
||||
ptr = pool_get(&pool);
|
||||
|
|
|
@ -51,7 +51,7 @@ struct param {
|
|||
int batch_size;
|
||||
void * (*thread_func)(void *);
|
||||
struct queue queue;
|
||||
const struct memtype *memtype;
|
||||
const struct memory_type *memory_type;
|
||||
};
|
||||
|
||||
/** Get thread id as integer
|
||||
|
@ -243,7 +243,7 @@ Test(queue, single_threaded)
|
|||
.start = 1 /* we start immeadiatly */
|
||||
};
|
||||
|
||||
ret = queue_init(&p.queue, p.queue_size, &memtype_heap);
|
||||
ret = queue_init(&p.queue, p.queue_size, &memory_type_heap);
|
||||
cr_assert_eq(ret, 0, "Failed to create queue");
|
||||
|
||||
producer(&p);
|
||||
|
@ -265,35 +265,35 @@ ParameterizedTestParameters(queue, multi_threaded)
|
|||
.thread_count = 32,
|
||||
.thread_func = producer_consumer_many,
|
||||
.batch_size = 10,
|
||||
.memtype = &memtype_heap
|
||||
.memory_type = &memory_type_heap
|
||||
}, {
|
||||
.iter_count = 1 << 8,
|
||||
.queue_size = 1 << 9,
|
||||
.thread_count = 4,
|
||||
.thread_func = producer_consumer_many,
|
||||
.batch_size = 100,
|
||||
.memtype = &memtype_heap
|
||||
.memory_type = &memory_type_heap
|
||||
}, {
|
||||
.iter_count = 1 << 16,
|
||||
.queue_size = 1 << 14,
|
||||
.thread_count = 16,
|
||||
.thread_func = producer_consumer_many,
|
||||
.batch_size = 100,
|
||||
.memtype = &memtype_heap
|
||||
.memory_type = &memory_type_heap
|
||||
}, {
|
||||
.iter_count = 1 << 8,
|
||||
.queue_size = 1 << 9,
|
||||
.thread_count = 4,
|
||||
.thread_func = producer_consumer_many,
|
||||
.batch_size = 10,
|
||||
.memtype = &memtype_heap
|
||||
.memory_type = &memory_type_heap
|
||||
}, {
|
||||
.iter_count = 1 << 16,
|
||||
.queue_size = 1 << 9,
|
||||
.thread_count = 16,
|
||||
.thread_func = producer_consumer,
|
||||
.batch_size = 10,
|
||||
.memtype = &memtype_hugepage
|
||||
.memory_type = &memory_hugepage
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -308,7 +308,7 @@ ParameterizedTest(struct param *p, queue, multi_threaded, .timeout = 20)
|
|||
|
||||
p->start = 0;
|
||||
|
||||
ret = queue_init(&p->queue, p->queue_size, &memtype_heap);
|
||||
ret = queue_init(&p->queue, p->queue_size, &memory_type_heap);
|
||||
cr_assert_eq(ret, 0, "Failed to create queue");
|
||||
|
||||
uint64_t start_tsc_time, end_tsc_time;
|
||||
|
@ -350,7 +350,7 @@ Test(queue, init_destroy)
|
|||
int ret;
|
||||
struct queue q = { .state = STATE_DESTROYED };
|
||||
|
||||
ret = queue_init(&q, 1024, &memtype_heap);
|
||||
ret = queue_init(&q, 1024, &memory_type_heap);
|
||||
cr_assert_eq(ret, 0); /* Should succeed */
|
||||
|
||||
ret = queue_destroy(&q);
|
||||
|
|
|
@ -132,7 +132,7 @@ ParameterizedTest(struct param *param, queue_signalled, simple, .timeout = 5)
|
|||
|
||||
pthread_t t1, t2;
|
||||
|
||||
ret = queue_signalled_init(&q, LOG2_CEIL(NUM_ELEM), &memtype_heap, param->flags);
|
||||
ret = queue_signalled_init(&q, LOG2_CEIL(NUM_ELEM), &memory_type_heap, param->flags);
|
||||
cr_assert_eq(ret, 0, "Failed to initialize queue: flags=%#x, ret=%d", param->flags, ret);
|
||||
|
||||
ret = pthread_create(&t1, NULL, producer, &q);
|
||||
|
|
Loading…
Add table
Reference in a new issue