1
0
Fork 0
mirror of https://git.rwth-aachen.de/acs/public/villas/node/ synced 2025-03-09 00:00:00 +01:00

implement managed memtype

This commit is contained in:
Georg Reinke 2017-03-27 13:22:54 +02:00
parent b89b30ffe1
commit fee29436b0
6 changed files with 144 additions and 29 deletions

View file

@ -13,8 +13,10 @@
#define HUGEPAGESIZE (1 << 21)
typedef void *(*memzone_allocator_t)(size_t len, size_t alignment);
typedef int (*memzone_deallocator_t)(void *ptr, size_t len);
struct memtype;
typedef void *(*memzone_allocator_t)(struct memtype *mem, size_t len, size_t alignment);
typedef int (*memzone_deallocator_t)(struct memtype *mem, void *ptr, size_t len);
enum memtype_flags {
MEMORY_MMAP = (1 << 0),
@ -33,6 +35,26 @@ struct memtype {
memzone_deallocator_t free;
};
enum memblock_flags {
MEMBLOCK_USED = 1,
};
// Descriptor of a memory block. Associated block always starts at
// &m + sizeof(struct memblock).
struct memblock {
struct memblock* prev;
struct memblock* next;
size_t len;
int flags;
};
struct memtype_managed {
struct memtype mt;
void *base;
size_t len;
struct memblock *first;
};
/** @todo Unused for now */
struct memzone {
struct memtype * const type;
@ -50,13 +72,15 @@ int memory_init();
* @retval NULL If allocation failed.
* @retval <>0 If allocation was successful.
*/
void * memory_alloc(const struct memtype *m, size_t len);
void * memory_alloc(struct memtype *m, size_t len);
void * memory_alloc_aligned(const struct memtype *m, size_t len, size_t alignment);
void * memory_alloc_aligned(struct memtype *m, size_t len, size_t alignment);
int memory_free(const struct memtype *m, void *ptr, size_t len);
int memory_free(struct memtype *m, void *ptr, size_t len);
extern const struct memtype memtype_heap;
extern const struct memtype memtype_hugepage;
struct memtype* memtype_managed_init(void *ptr, size_t len);
#endif /* _MEMORY_H_ */
extern struct memtype memtype_heap;
extern struct memtype memtype_hugepage;
#endif /* _MEMORY_H_ */

View file

@ -18,7 +18,7 @@
/** A thread-safe memory pool */
struct pool {
void *buffer; /**< Address of the underlying memory area */
const struct memtype *mem;
struct memtype *mem;
size_t len; /**< Length of the underlying memory area */
@ -31,7 +31,7 @@ struct pool {
#define INLINE static inline __attribute__((unused))
/** Initiazlize a pool */
int pool_init(struct pool *p, size_t cnt, size_t blocksz, const struct memtype *mem);
int pool_init(struct pool *p, size_t cnt, size_t blocksz, struct memtype *mem);
/** Destroy and release memory used by pool. */
int pool_destroy(struct pool *p);
@ -61,4 +61,4 @@ INLINE int pool_put(struct pool *p, void *buf)
return queue_push(&p->queue, buf);
}
#endif /* _POOL_H_ */
#endif /* _POOL_H_ */

View file

@ -45,7 +45,7 @@ typedef char cacheline_pad_t[CACHELINE_SIZE];
struct queue {
cacheline_pad_t _pad0; /**< Shared area: all threads read */
struct memtype const * mem;
struct memtype * mem;
size_t buffer_mask;
struct queue_cell {
atomic_size_t sequence;
@ -64,7 +64,7 @@ struct queue {
};
/** Initialize MPMC queue */
int queue_init(struct queue *q, size_t size, const struct memtype *mem);
int queue_init(struct queue *q, size_t size, struct memtype *mem);
/** Desroy MPMC queue and release memory */
int queue_destroy(struct queue *q);
@ -84,4 +84,4 @@ int queue_push_many(struct queue *q, void *ptr[], size_t cnt);
int queue_pull_many(struct queue *q, void *ptr[], size_t cnt);
#endif /* _MPMC_QUEUE_H_ */
#endif /* _MPMC_QUEUE_H_ */

View file

@ -30,31 +30,31 @@ int memory_init()
return 0;
}
void * memory_alloc(const struct memtype *m, size_t len)
void * memory_alloc(struct memtype *m, size_t len)
{
void *ptr = m->alloc(len, sizeof(void *));
void *ptr = m->alloc(m, len, sizeof(void *));
debug(DBG_MEM | 2, "Allocated %#zx bytes of %s memory: %p", len, m->name, ptr);
return ptr;
}
void * memory_alloc_aligned(const struct memtype *m, size_t len, size_t alignment)
void * memory_alloc_aligned(struct memtype *m, size_t len, size_t alignment)
{
void *ptr = m->alloc(len, alignment);
void *ptr = m->alloc(m, len, alignment);
debug(DBG_MEM | 2, "Allocated %#zx bytes of %#zx-byte-aligned %s memory: %p", len, alignment, m->name, ptr);
return ptr;
}
int memory_free(const struct memtype *m, void *ptr, size_t len)
int memory_free(struct memtype *m, void *ptr, size_t len)
{
debug(DBG_MEM | 2, "Releasing %#zx bytes of %s memory", len, m->name);
return m->free(ptr, len);
return m->free(m, ptr, len);
}
static void * memory_heap_alloc(size_t len, size_t alignment)
static void * memory_heap_alloc(struct memtype *m, size_t len, size_t alignment)
{
void *ptr;
int ret;
@ -67,7 +67,7 @@ static void * memory_heap_alloc(size_t len, size_t alignment)
return ret ? NULL : ptr;
}
int memory_heap_free(void *ptr, size_t len)
int memory_heap_free(struct memtype *m, void *ptr, size_t len)
{
free(ptr);
@ -75,7 +75,7 @@ int memory_heap_free(void *ptr, size_t len)
}
/** Allocate memory backed by hugepages with malloc() like interface */
static void * memory_hugepage_alloc(size_t len, size_t alignment)
static void * memory_hugepage_alloc(struct memtype *m, size_t len, size_t alignment)
{
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
@ -96,15 +96,106 @@ static void * memory_hugepage_alloc(size_t len, size_t alignment)
return ret;
}
static int memory_hugepage_free(void *ptr, size_t len)
static int memory_hugepage_free(struct memtype *m, void *ptr, size_t len)
{
len = ALIGN(len, HUGEPAGESIZE); /* ugly see: https://lkml.org/lkml/2015/3/27/171 */
return munmap(ptr, len);
}
void* memory_managed_alloc(struct memtype *m, size_t len, size_t alignment) {
// Simple first-fit allocation.
struct memtype_managed* man = (struct memtype_managed*) m;
struct memblock* block;
for (block = man->first; block != NULL; block = block->next) {
if (block->flags & MEMBLOCK_USED)
continue;
char* cptr = (char*) block + sizeof(struct memblock);
size_t avail = block->len;
uintptr_t uptr = (uintptr_t) cptr;
// check alignment first
uintptr_t rem = uptr % alignment;
if (rem != 0) {
cptr += alignment - rem;
avail -= alignment - rem;
}
// TODO: if alignment is large, we may waste a lot of memory here.
if (avail > len + sizeof(struct memblock)) {
struct memblock *newblock = (struct memblock*) (cptr + len);
newblock->prev = block;
newblock->next = block->next;
block->next = newblock;
if (newblock->next)
newblock->next->prev = newblock;
newblock->flags = 0;
newblock->len = (char*) block + block->len - cptr - sizeof(struct memblock);
block->flags |= MEMBLOCK_USED;
return (void*) cptr;
}
}
// no suitable block found
return NULL;
}
int memory_managed_free(struct memtype *m, void *ptr, size_t len) {
struct memtype_managed* man = (struct memtype_managed*) m;
char* cptr = (char*) ptr;
struct memblock* block;
for (block = man->first; block != NULL; block = block->next) {
if (!(block->flags & MEMBLOCK_USED))
continue;
// since we may waste some memory at the start of a block to ensure
// alignment, ptr may not actually be the start of the block
if ((char*) block + sizeof(struct memblock) <= cptr &&
cptr < (char*) block + block->len) {
// try to merge it with a neighbouring free block
if (block->prev && !(block->prev->flags & MEMBLOCK_USED)) {
block->prev->len += block->len + sizeof(struct memblock);
block->prev->next = block->next;
if (block->next)
block->next->prev = block->prev;
} else if (block->next && !(block->next->flags & MEMBLOCK_USED)) {
block->len += block->next->len + sizeof(struct memblock);
block->next = block->next->next;
if (block->next)
block->next->prev = block;
} else {
block->flags &= (~MEMBLOCK_USED);
}
return 0;
}
}
return -1;
}
struct memtype* memtype_managed_init(void *ptr, size_t len) {
if (len < sizeof(struct memtype_managed) + sizeof(struct memblock)) {
info("memtype_managed: passed region too small");
return NULL;
}
struct memtype_managed *man = (struct memtype_managed*) ptr;
man->mt.name = "managed";
man->mt.flags = 0; // TODO
man->mt.alloc = memory_managed_alloc;
man->mt.free = memory_managed_free;
man->mt.alignment = 1;
man->base = ptr;
man->len = len;
char *cptr = (char*) ptr;
cptr += ALIGN(sizeof(struct memtype_managed), sizeof(void*));
man->first = (struct memblock*) ((void*) cptr);
man->first->prev = NULL;
man->first->next = NULL;
cptr += ALIGN(sizeof(struct memblock), sizeof(void*));
man->first->len = len - (cptr - (char*) ptr);
man->first->flags = 0;
return (struct memtype*) man;
}
/* List of available memory types */
const struct memtype memtype_heap = {
struct memtype memtype_heap = {
.name = "heap",
.flags = MEMORY_HEAP,
.alloc = memory_heap_alloc,
@ -112,7 +203,7 @@ const struct memtype memtype_heap = {
.alignment = 1
};
const struct memtype memtype_hugepage = {
struct memtype memtype_hugepage = {
.name = "mmap_hugepages",
.flags = MEMORY_MMAP | MEMORY_HUGEPAGE,
.alloc = memory_hugepage_alloc,

View file

@ -10,7 +10,7 @@
#include "memory.h"
#include "kernel/kernel.h"
int pool_init(struct pool *p, size_t cnt, size_t blocksz, const struct memtype *m)
int pool_init(struct pool *p, size_t cnt, size_t blocksz, struct memtype *m)
{
int ret;
@ -39,4 +39,4 @@ int pool_destroy(struct pool *p)
queue_destroy(&p->queue);
return memory_free(p->mem, p->buffer, p->len);
}
}

View file

@ -35,7 +35,7 @@
#include "utils.h"
/** Initialize MPMC queue */
int queue_init(struct queue *q, size_t size, const struct memtype *mem)
int queue_init(struct queue *q, size_t size, struct memtype *mem)
{
/* Queue size must be 2 exponent */