mirror of
https://git.rwth-aachen.de/acs/public/villas/node/
synced 2025-03-09 00:00:00 +01:00
Merge branch 'memtype-managed' into develop
This commit is contained in:
commit
a256417417
8 changed files with 253 additions and 49 deletions
|
@ -12,8 +12,10 @@
|
|||
|
||||
#define HUGEPAGESIZE (1 << 21)
|
||||
|
||||
typedef void *(*memzone_allocator_t)(size_t len, size_t alignment);
|
||||
typedef int (*memzone_deallocator_t)(void *ptr, size_t len);
|
||||
struct memtype;
|
||||
|
||||
typedef void *(*memzone_allocator_t)(struct memtype *mem, size_t len, size_t alignment);
|
||||
typedef int (*memzone_deallocator_t)(struct memtype *mem, void *ptr, size_t len);
|
||||
|
||||
enum memtype_flags {
|
||||
MEMORY_MMAP = (1 << 0),
|
||||
|
@ -30,6 +32,21 @@ struct memtype {
|
|||
|
||||
memzone_allocator_t alloc;
|
||||
memzone_deallocator_t free;
|
||||
|
||||
void *_vd; /**<Virtual data for possible state */
|
||||
};
|
||||
|
||||
enum memblock_flags {
|
||||
MEMBLOCK_USED = 1,
|
||||
};
|
||||
|
||||
/** Descriptor of a memory block. Associated block always starts at
|
||||
* &m + sizeof(struct memblock). */
|
||||
struct memblock {
|
||||
struct memblock* prev;
|
||||
struct memblock* next;
|
||||
size_t len; /**<Length of the block; doesn't include the descriptor itself */
|
||||
int flags;
|
||||
};
|
||||
|
||||
/** @todo Unused for now */
|
||||
|
@ -49,11 +66,13 @@ int memory_init(int hugepages);
|
|||
* @retval NULL If allocation failed.
|
||||
* @retval <>0 If allocation was successful.
|
||||
*/
|
||||
void * memory_alloc(const struct memtype *m, size_t len);
|
||||
void * memory_alloc(struct memtype *m, size_t len);
|
||||
|
||||
void * memory_alloc_aligned(const struct memtype *m, size_t len, size_t alignment);
|
||||
void * memory_alloc_aligned(struct memtype *m, size_t len, size_t alignment);
|
||||
|
||||
int memory_free(const struct memtype *m, void *ptr, size_t len);
|
||||
int memory_free(struct memtype *m, void *ptr, size_t len);
|
||||
|
||||
extern const struct memtype memtype_heap;
|
||||
extern const struct memtype memtype_hugepage;
|
||||
struct memtype * memtype_managed_init(void *ptr, size_t len);
|
||||
|
||||
extern struct memtype memtype_heap;
|
||||
extern struct memtype memtype_hugepage;
|
|
@ -19,7 +19,7 @@
|
|||
/** A thread-safe memory pool */
|
||||
struct pool {
|
||||
void *buffer; /**< Address of the underlying memory area */
|
||||
const struct memtype *mem;
|
||||
struct memtype *mem;
|
||||
|
||||
enum state state;
|
||||
|
||||
|
@ -42,7 +42,7 @@ struct pool {
|
|||
* @retval 0 The pool has been successfully initialized.
|
||||
* @retval <>0 There was an error during the pool initialization.
|
||||
*/
|
||||
int pool_init(struct pool *p, size_t cnt, size_t blocksz, const struct memtype *mem);
|
||||
int pool_init(struct pool *p, size_t cnt, size_t blocksz, struct memtype *mem);
|
||||
|
||||
/** Destroy and release memory used by pool. */
|
||||
int pool_destroy(struct pool *p);
|
||||
|
|
|
@ -51,7 +51,7 @@ struct queue {
|
|||
|
||||
enum state state;
|
||||
|
||||
struct memtype const * mem;
|
||||
struct memtype * mem;
|
||||
size_t buffer_mask;
|
||||
struct queue_cell {
|
||||
atomic_size_t sequence;
|
||||
|
@ -70,7 +70,7 @@ struct queue {
|
|||
};
|
||||
|
||||
/** Initialize MPMC queue */
|
||||
int queue_init(struct queue *q, size_t size, const struct memtype *mem);
|
||||
int queue_init(struct queue *q, size_t size, struct memtype *mem);
|
||||
|
||||
/** Desroy MPMC queue and release memory */
|
||||
int queue_destroy(struct queue *q);
|
||||
|
@ -99,4 +99,4 @@ int queue_push_many(struct queue *q, void *ptr[], size_t cnt);
|
|||
* This number can be smaller than \p cnt in case the queue contained less than
|
||||
* \p cnt elements.
|
||||
*/
|
||||
int queue_pull_many(struct queue *q, void *ptr[], size_t cnt);
|
||||
int queue_pull_many(struct queue *q, void *ptr[], size_t cnt);
|
||||
|
|
211
lib/memory.c
211
lib/memory.c
|
@ -33,81 +33,240 @@ int memory_init(int hugepages)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void * memory_alloc(const struct memtype *m, size_t len)
|
||||
void * memory_alloc(struct memtype *m, size_t len)
|
||||
{
|
||||
void *ptr = m->alloc(len, sizeof(void *));
|
||||
void *ptr = m->alloc(m, len, sizeof(void *));
|
||||
|
||||
debug(LOG_MEM | 2, "Allocated %#zx bytes of %s memory: %p", len, m->name, ptr);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void * memory_alloc_aligned(const struct memtype *m, size_t len, size_t alignment)
|
||||
void * memory_alloc_aligned(struct memtype *m, size_t len, size_t alignment)
|
||||
{
|
||||
void *ptr = m->alloc(len, alignment);
|
||||
void *ptr = m->alloc(m, len, alignment);
|
||||
|
||||
debug(LOG_MEM | 2, "Allocated %#zx bytes of %#zx-byte-aligned %s memory: %p", len, alignment, m->name, ptr);
|
||||
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
int memory_free(const struct memtype *m, void *ptr, size_t len)
|
||||
int memory_free(struct memtype *m, void *ptr, size_t len)
|
||||
{
|
||||
debug(LOG_MEM | 2, "Releasing %#zx bytes of %s memory", len, m->name);
|
||||
return m->free(ptr, len);
|
||||
|
||||
return m->free(m, ptr, len);
|
||||
}
|
||||
|
||||
static void * memory_heap_alloc(size_t len, size_t alignment)
|
||||
static void * memory_heap_alloc(struct memtype *m, size_t len, size_t alignment)
|
||||
{
|
||||
void *ptr;
|
||||
int ret;
|
||||
|
||||
|
||||
if (alignment < sizeof(void *))
|
||||
alignment = sizeof(void *);
|
||||
|
||||
|
||||
ret = posix_memalign(&ptr, alignment, len);
|
||||
|
||||
|
||||
return ret ? NULL : ptr;
|
||||
}
|
||||
|
||||
int memory_heap_free(void *ptr, size_t len)
|
||||
int memory_heap_free(struct memtype *m, void *ptr, size_t len)
|
||||
{
|
||||
free(ptr);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Allocate memory backed by hugepages with malloc() like interface */
|
||||
static void * memory_hugepage_alloc(size_t len, size_t alignment)
|
||||
static void * memory_hugepage_alloc(struct memtype *m, size_t len, size_t alignment)
|
||||
{
|
||||
int prot = PROT_READ | PROT_WRITE;
|
||||
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
|
||||
|
||||
|
||||
#ifdef __MACH__
|
||||
flags |= VM_FLAGS_SUPERPAGE_SIZE_2MB;
|
||||
#elif defined(__linux__)
|
||||
flags |= MAP_HUGETLB | MAP_LOCKED;
|
||||
#endif
|
||||
|
||||
|
||||
void *ret = mmap(NULL, len, prot, flags, -1, 0);
|
||||
|
||||
|
||||
if (ret == MAP_FAILED) {
|
||||
info("Failed to allocate huge pages: Check https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int memory_hugepage_free(void *ptr, size_t len)
|
||||
static int memory_hugepage_free(struct memtype *m, void *ptr, size_t len)
|
||||
{
|
||||
len = ALIGN(len, HUGEPAGESIZE); /* ugly see: https://lkml.org/lkml/2015/3/27/171 */
|
||||
|
||||
return munmap(ptr, len);
|
||||
}
|
||||
|
||||
void* memory_managed_alloc(struct memtype *m, size_t len, size_t alignment)
|
||||
{
|
||||
/* Simple first-fit allocation */
|
||||
struct memblock *first = m->_vd;
|
||||
struct memblock *block;
|
||||
|
||||
for (block = first; block != NULL; block = block->next) {
|
||||
if (block->flags & MEMBLOCK_USED)
|
||||
continue;
|
||||
|
||||
char* cptr = (char *) block + sizeof(struct memblock);
|
||||
size_t avail = block->len;
|
||||
uintptr_t uptr = (uintptr_t) cptr;
|
||||
|
||||
/* Check alignment first; leave a gap at start of block to assure
|
||||
* alignment if necessary */
|
||||
uintptr_t rem = uptr % alignment;
|
||||
uintptr_t gap = 0;
|
||||
if (rem != 0) {
|
||||
gap = alignment - rem;
|
||||
if (gap > avail)
|
||||
continue; /* Next aligned address isn't in this block anymore */
|
||||
|
||||
cptr += gap;
|
||||
avail -= gap;
|
||||
}
|
||||
|
||||
if (avail >= len) {
|
||||
if (gap > sizeof(struct memblock)) {
|
||||
/* The alignment gap is big enough to fit another block.
|
||||
* The original block descriptor is already at the correct
|
||||
* position, so we just change its len and create a new block
|
||||
* descriptor for the actual block we're handling. */
|
||||
block->len = gap - sizeof(struct memblock);
|
||||
struct memblock *newblock = (struct memblock *) (cptr - sizeof(struct memblock));
|
||||
newblock->prev = block;
|
||||
newblock->next = block->next;
|
||||
block->next = newblock;
|
||||
newblock->flags = 0;
|
||||
newblock->len = len;
|
||||
block = newblock;
|
||||
}
|
||||
else {
|
||||
/* The gap is too small to fit another block descriptor, so we
|
||||
* must account for the gap length in the block length. */
|
||||
block->len = len + gap;
|
||||
}
|
||||
|
||||
if (avail > len + sizeof(struct memblock)) {
|
||||
/* Imperfect fit, so create another block for the remaining part */
|
||||
struct memblock *newblock = (struct memblock *) (cptr + len);
|
||||
newblock->prev = block;
|
||||
newblock->next = block->next;
|
||||
block->next = newblock;
|
||||
if (newblock->next)
|
||||
newblock->next->prev = newblock;
|
||||
newblock->flags = 0;
|
||||
newblock->len = avail - len - sizeof(struct memblock);
|
||||
}
|
||||
else {
|
||||
/* If this block was larger than the requested length, but only
|
||||
* by less than sizeof(struct memblock), we may have wasted
|
||||
* memory by previous assignments to block->len. */
|
||||
block->len = avail;
|
||||
}
|
||||
|
||||
block->flags |= MEMBLOCK_USED;
|
||||
|
||||
return (void *) cptr;
|
||||
}
|
||||
}
|
||||
|
||||
/* No suitable block found */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int memory_managed_free(struct memtype *m, void *ptr, size_t len)
|
||||
{
|
||||
struct memblock *first = m->_vd;
|
||||
struct memblock *block;
|
||||
char *cptr = ptr;
|
||||
|
||||
for (block = first; block != NULL; block = block->next) {
|
||||
if (!(block->flags & MEMBLOCK_USED))
|
||||
continue;
|
||||
|
||||
/* Since we may waste some memory at the start of a block to ensure
|
||||
* alignment, ptr may not actually be the start of the block */
|
||||
if ((char *) block + sizeof(struct memblock) <= cptr &&
|
||||
cptr < (char *) block + sizeof(struct memblock) + block->len) {
|
||||
/* Try to merge it with neighbouring free blocks */
|
||||
if (block->prev && !(block->prev->flags & MEMBLOCK_USED) &&
|
||||
block->next && !(block->next->flags & MEMBLOCK_USED)) {
|
||||
/* Special case first: both previous and next block are unused */
|
||||
block->prev->len += block->len + block->next->len + 2 * sizeof(struct memblock);
|
||||
block->prev->next = block->next->next;
|
||||
if (block->next->next)
|
||||
block->next->next->prev = block->prev;
|
||||
}
|
||||
else if (block->prev && !(block->prev->flags & MEMBLOCK_USED)) {
|
||||
block->prev->len += block->len + sizeof(struct memblock);
|
||||
block->prev->next = block->next;
|
||||
if (block->next)
|
||||
block->next->prev = block->prev;
|
||||
}
|
||||
else if (block->next && !(block->next->flags & MEMBLOCK_USED)) {
|
||||
block->len += block->next->len + sizeof(struct memblock);
|
||||
block->next = block->next->next;
|
||||
if (block->next)
|
||||
block->next->prev = block;
|
||||
}
|
||||
else {
|
||||
/* no neighbouring free block, so just mark it as free */
|
||||
block->flags &= ~MEMBLOCK_USED;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
struct memtype * memtype_managed_init(void *ptr, size_t len)
|
||||
{
|
||||
struct memtype *mt = ptr;
|
||||
struct memblock *mb;
|
||||
char *cptr = ptr;
|
||||
|
||||
if (len < sizeof(struct memtype) + sizeof(struct memblock)) {
|
||||
info("memtype_managed_init: passed region too small");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Initialize memtype */
|
||||
mt->name = "managed";
|
||||
mt->flags = 0;
|
||||
mt->alloc = memory_managed_alloc;
|
||||
mt->free = memory_managed_free;
|
||||
mt->alignment = 1;
|
||||
|
||||
cptr += ALIGN(sizeof(struct memtype), sizeof(void *));
|
||||
|
||||
/* Initialize first free memblock */
|
||||
mb = (struct memblock *) cptr;
|
||||
mb->prev = NULL;
|
||||
mb->next = NULL;
|
||||
mb->flags = 0;
|
||||
|
||||
cptr += ALIGN(sizeof(struct memblock), sizeof(void *));
|
||||
|
||||
mb->len = len - (cptr - (char *) ptr);
|
||||
|
||||
mt->_vd = (void *) mb;
|
||||
|
||||
return mt;
|
||||
}
|
||||
|
||||
/* List of available memory types */
|
||||
const struct memtype memtype_heap = {
|
||||
struct memtype memtype_heap = {
|
||||
.name = "heap",
|
||||
.flags = MEMORY_HEAP,
|
||||
.alloc = memory_heap_alloc,
|
||||
|
@ -115,18 +274,10 @@ const struct memtype memtype_heap = {
|
|||
.alignment = 1
|
||||
};
|
||||
|
||||
const struct memtype memtype_hugepage = {
|
||||
struct memtype memtype_hugepage = {
|
||||
.name = "mmap_hugepages",
|
||||
.flags = MEMORY_MMAP | MEMORY_HUGEPAGE,
|
||||
.alloc = memory_hugepage_alloc,
|
||||
.free = memory_hugepage_free,
|
||||
.alignment = 21 /* 2 MiB hugepage */
|
||||
};
|
||||
|
||||
/** @todo */
|
||||
const struct memtype memtype_dma = {
|
||||
.name = "dma",
|
||||
.flags = MEMORY_DMA | MEMORY_MMAP,
|
||||
.alloc = NULL, .free = NULL,
|
||||
.alignment = 12
|
||||
.alignment = 21 /* 2 MiB hugepage */
|
||||
};
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include "memory.h"
|
||||
#include "kernel/kernel.h"
|
||||
|
||||
int pool_init(struct pool *p, size_t cnt, size_t blocksz, const struct memtype *m)
|
||||
int pool_init(struct pool *p, size_t cnt, size_t blocksz, struct memtype *m)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
#include "memory.h"
|
||||
|
||||
/** Initialize MPMC queue */
|
||||
int queue_init(struct queue *q, size_t size, const struct memtype *mem)
|
||||
int queue_init(struct queue *q, size_t size, struct memtype *mem)
|
||||
{
|
||||
assert(q->state == STATE_DESTROYED);
|
||||
|
||||
|
|
|
@ -15,10 +15,10 @@
|
|||
TheoryDataPoints(memory, aligned) = {
|
||||
DataPoints(size_t, 1, 32, 55, 1 << 10, 1 << 20),
|
||||
DataPoints(size_t, 1, 8, 1 << 12),
|
||||
DataPoints(const struct memtype *, &memtype_heap, &memtype_hugepage)
|
||||
DataPoints(struct memtype *, &memtype_heap, &memtype_hugepage)
|
||||
};
|
||||
|
||||
Theory((size_t len, size_t align, const struct memtype *m), memory, aligned) {
|
||||
Theory((size_t len, size_t align, struct memtype *m), memory, aligned) {
|
||||
int ret;
|
||||
void *ptr;
|
||||
|
||||
|
@ -33,4 +33,38 @@ Theory((size_t len, size_t align, const struct memtype *m), memory, aligned) {
|
|||
|
||||
ret = memory_free(m, ptr, len);
|
||||
cr_assert_eq(ret, 0, "Failed to release memory: ret=%d, ptr=%p, len=%zu: %s", ret, ptr, len, strerror(errno));
|
||||
}
|
||||
}
|
||||
|
||||
Test(memory, manager) {
|
||||
size_t total_size = 1 << 10;
|
||||
size_t max_block = total_size - sizeof(struct memtype) - sizeof(struct memblock);
|
||||
void *p = memory_alloc(&memtype_heap, total_size);
|
||||
struct memtype *manager = memtype_managed_init(p, total_size);
|
||||
|
||||
void *p1, *p2, *p3;
|
||||
p1 = memory_alloc(manager, 16);
|
||||
cr_assert(p1);
|
||||
|
||||
p2 = memory_alloc(manager, 32);
|
||||
cr_assert(p2);
|
||||
|
||||
cr_assert(memory_free(manager, p1, 16) == 0);
|
||||
|
||||
p1 = memory_alloc_aligned(manager, 128, 128);
|
||||
cr_assert(p1);
|
||||
cr_assert(IS_ALIGNED(p1, 128));
|
||||
|
||||
p3 = memory_alloc_aligned(manager, 128, 256);
|
||||
cr_assert(p3);
|
||||
cr_assert(IS_ALIGNED(p3, 256));
|
||||
|
||||
cr_assert(memory_free(manager, p2, 32) == 0);
|
||||
cr_assert(memory_free(manager, p1, 128) == 0);
|
||||
cr_assert(memory_free(manager, p3, 128) == 0);
|
||||
|
||||
|
||||
p1 = memory_alloc(manager, max_block);
|
||||
cr_assert(p1);
|
||||
cr_assert(memory_free(manager, p1, max_block) == 0);
|
||||
memory_free(&memtype_heap, p, total_size);
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ struct param {
|
|||
int thread_count;
|
||||
int pool_size;
|
||||
size_t block_size;
|
||||
const struct memtype *memtype;
|
||||
struct memtype *memtype;
|
||||
};
|
||||
|
||||
ParameterizedTestParameters(pool, basic)
|
||||
|
@ -63,4 +63,4 @@ ParameterizedTest(struct param *p, pool, basic)
|
|||
ret = pool_destroy(&pool);
|
||||
cr_assert_eq(ret, 0, "Failed to destroy pool");
|
||||
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue