diff --git a/include/villas/memory.h b/include/villas/memory.h index 9a3084544..bcf96a5a1 100644 --- a/include/villas/memory.h +++ b/include/villas/memory.h @@ -13,8 +13,10 @@ #define HUGEPAGESIZE (1 << 21) -typedef void *(*memzone_allocator_t)(size_t len, size_t alignment); -typedef int (*memzone_deallocator_t)(void *ptr, size_t len); +struct memtype; + +typedef void *(*memzone_allocator_t)(struct memtype *mem, size_t len, size_t alignment); +typedef int (*memzone_deallocator_t)(struct memtype *mem, void *ptr, size_t len); enum memtype_flags { MEMORY_MMAP = (1 << 0), @@ -33,6 +35,26 @@ struct memtype { memzone_deallocator_t free; }; +enum memblock_flags { + MEMBLOCK_USED = 1, +}; + +// Descriptor of a memory block. Associated block always starts at +// &m + sizeof(struct memblock). +struct memblock { + struct memblock* prev; + struct memblock* next; + size_t len; + int flags; +}; + +struct memtype_managed { + struct memtype mt; + void *base; + size_t len; + struct memblock *first; +}; + /** @todo Unused for now */ struct memzone { struct memtype * const type; @@ -50,13 +72,15 @@ int memory_init(); * @retval NULL If allocation failed. * @retval <>0 If allocation was successful. */ -void * memory_alloc(const struct memtype *m, size_t len); +void * memory_alloc(struct memtype *m, size_t len); -void * memory_alloc_aligned(const struct memtype *m, size_t len, size_t alignment); +void * memory_alloc_aligned(struct memtype *m, size_t len, size_t alignment); -int memory_free(const struct memtype *m, void *ptr, size_t len); +int memory_free(struct memtype *m, void *ptr, size_t len); -extern const struct memtype memtype_heap; -extern const struct memtype memtype_hugepage; +struct memtype* memtype_managed_init(void *ptr, size_t len); -#endif /* _MEMORY_H_ */ \ No newline at end of file +extern struct memtype memtype_heap; +extern struct memtype memtype_hugepage; + +#endif /* _MEMORY_H_ */ diff --git a/include/villas/pool.h b/include/villas/pool.h index 38b017a7f..13f0e5d7b 100644 --- a/include/villas/pool.h +++ b/include/villas/pool.h @@ -18,7 +18,7 @@ /** A thread-safe memory pool */ struct pool { void *buffer; /**< Address of the underlying memory area */ - const struct memtype *mem; + struct memtype *mem; size_t len; /**< Length of the underlying memory area */ @@ -31,7 +31,7 @@ struct pool { #define INLINE static inline __attribute__((unused)) /** Initiazlize a pool */ -int pool_init(struct pool *p, size_t cnt, size_t blocksz, const struct memtype *mem); +int pool_init(struct pool *p, size_t cnt, size_t blocksz, struct memtype *mem); /** Destroy and release memory used by pool. */ int pool_destroy(struct pool *p); @@ -61,4 +61,4 @@ INLINE int pool_put(struct pool *p, void *buf) return queue_push(&p->queue, buf); } -#endif /* _POOL_H_ */ \ No newline at end of file +#endif /* _POOL_H_ */ diff --git a/include/villas/queue.h b/include/villas/queue.h index 50fa5aff9..17a76724b 100644 --- a/include/villas/queue.h +++ b/include/villas/queue.h @@ -45,7 +45,7 @@ typedef char cacheline_pad_t[CACHELINE_SIZE]; struct queue { cacheline_pad_t _pad0; /**< Shared area: all threads read */ - struct memtype const * mem; + struct memtype * mem; size_t buffer_mask; struct queue_cell { atomic_size_t sequence; @@ -64,7 +64,7 @@ struct queue { }; /** Initialize MPMC queue */ -int queue_init(struct queue *q, size_t size, const struct memtype *mem); +int queue_init(struct queue *q, size_t size, struct memtype *mem); /** Desroy MPMC queue and release memory */ int queue_destroy(struct queue *q); @@ -84,4 +84,4 @@ int queue_push_many(struct queue *q, void *ptr[], size_t cnt); int queue_pull_many(struct queue *q, void *ptr[], size_t cnt); -#endif /* _MPMC_QUEUE_H_ */ \ No newline at end of file +#endif /* _MPMC_QUEUE_H_ */ diff --git a/lib/memory.c b/lib/memory.c index f8f2ebe8a..a8b9d708a 100644 --- a/lib/memory.c +++ b/lib/memory.c @@ -30,31 +30,31 @@ int memory_init() return 0; } -void * memory_alloc(const struct memtype *m, size_t len) +void * memory_alloc(struct memtype *m, size_t len) { - void *ptr = m->alloc(len, sizeof(void *)); + void *ptr = m->alloc(m, len, sizeof(void *)); debug(DBG_MEM | 2, "Allocated %#zx bytes of %s memory: %p", len, m->name, ptr); return ptr; } -void * memory_alloc_aligned(const struct memtype *m, size_t len, size_t alignment) +void * memory_alloc_aligned(struct memtype *m, size_t len, size_t alignment) { - void *ptr = m->alloc(len, alignment); + void *ptr = m->alloc(m, len, alignment); debug(DBG_MEM | 2, "Allocated %#zx bytes of %#zx-byte-aligned %s memory: %p", len, alignment, m->name, ptr); return ptr; } -int memory_free(const struct memtype *m, void *ptr, size_t len) +int memory_free(struct memtype *m, void *ptr, size_t len) { debug(DBG_MEM | 2, "Releasing %#zx bytes of %s memory", len, m->name); - return m->free(ptr, len); + return m->free(m, ptr, len); } -static void * memory_heap_alloc(size_t len, size_t alignment) +static void * memory_heap_alloc(struct memtype *m, size_t len, size_t alignment) { void *ptr; int ret; @@ -67,7 +67,7 @@ static void * memory_heap_alloc(size_t len, size_t alignment) return ret ? NULL : ptr; } -int memory_heap_free(void *ptr, size_t len) +int memory_heap_free(struct memtype *m, void *ptr, size_t len) { free(ptr); @@ -75,7 +75,7 @@ int memory_heap_free(void *ptr, size_t len) } /** Allocate memory backed by hugepages with malloc() like interface */ -static void * memory_hugepage_alloc(size_t len, size_t alignment) +static void * memory_hugepage_alloc(struct memtype *m, size_t len, size_t alignment) { int prot = PROT_READ | PROT_WRITE; int flags = MAP_PRIVATE | MAP_ANONYMOUS; @@ -96,15 +96,106 @@ static void * memory_hugepage_alloc(size_t len, size_t alignment) return ret; } -static int memory_hugepage_free(void *ptr, size_t len) +static int memory_hugepage_free(struct memtype *m, void *ptr, size_t len) { len = ALIGN(len, HUGEPAGESIZE); /* ugly see: https://lkml.org/lkml/2015/3/27/171 */ return munmap(ptr, len); } +void* memory_managed_alloc(struct memtype *m, size_t len, size_t alignment) { + // Simple first-fit allocation. + struct memtype_managed* man = (struct memtype_managed*) m; + struct memblock* block; + for (block = man->first; block != NULL; block = block->next) { + if (block->flags & MEMBLOCK_USED) + continue; + char* cptr = (char*) block + sizeof(struct memblock); + size_t avail = block->len; + uintptr_t uptr = (uintptr_t) cptr; + // check alignment first + uintptr_t rem = uptr % alignment; + if (rem != 0) { + cptr += alignment - rem; + avail -= alignment - rem; + } + // TODO: if alignment is large, we may waste a lot of memory here. + if (avail > len + sizeof(struct memblock)) { + struct memblock *newblock = (struct memblock*) (cptr + len); + newblock->prev = block; + newblock->next = block->next; + block->next = newblock; + if (newblock->next) + newblock->next->prev = newblock; + newblock->flags = 0; + newblock->len = (char*) block + block->len - cptr - sizeof(struct memblock); + block->flags |= MEMBLOCK_USED; + return (void*) cptr; + } + } + // no suitable block found + return NULL; +} + +int memory_managed_free(struct memtype *m, void *ptr, size_t len) { + struct memtype_managed* man = (struct memtype_managed*) m; + char* cptr = (char*) ptr; + struct memblock* block; + for (block = man->first; block != NULL; block = block->next) { + if (!(block->flags & MEMBLOCK_USED)) + continue; + // since we may waste some memory at the start of a block to ensure + // alignment, ptr may not actually be the start of the block + if ((char*) block + sizeof(struct memblock) <= cptr && + cptr < (char*) block + block->len) { + // try to merge it with a neighbouring free block + if (block->prev && !(block->prev->flags & MEMBLOCK_USED)) { + block->prev->len += block->len + sizeof(struct memblock); + block->prev->next = block->next; + if (block->next) + block->next->prev = block->prev; + } else if (block->next && !(block->next->flags & MEMBLOCK_USED)) { + block->len += block->next->len + sizeof(struct memblock); + block->next = block->next->next; + if (block->next) + block->next->prev = block; + } else { + block->flags &= (~MEMBLOCK_USED); + } + return 0; + } + } + return -1; +} + +struct memtype* memtype_managed_init(void *ptr, size_t len) { + if (len < sizeof(struct memtype_managed) + sizeof(struct memblock)) { + info("memtype_managed: passed region too small"); + return NULL; + } + struct memtype_managed *man = (struct memtype_managed*) ptr; + man->mt.name = "managed"; + man->mt.flags = 0; // TODO + man->mt.alloc = memory_managed_alloc; + man->mt.free = memory_managed_free; + man->mt.alignment = 1; + man->base = ptr; + man->len = len; + + char *cptr = (char*) ptr; + cptr += ALIGN(sizeof(struct memtype_managed), sizeof(void*)); + man->first = (struct memblock*) ((void*) cptr); + man->first->prev = NULL; + man->first->next = NULL; + cptr += ALIGN(sizeof(struct memblock), sizeof(void*)); + man->first->len = len - (cptr - (char*) ptr); + man->first->flags = 0; + + return (struct memtype*) man; +} + /* List of available memory types */ -const struct memtype memtype_heap = { +struct memtype memtype_heap = { .name = "heap", .flags = MEMORY_HEAP, .alloc = memory_heap_alloc, @@ -112,7 +203,7 @@ const struct memtype memtype_heap = { .alignment = 1 }; -const struct memtype memtype_hugepage = { +struct memtype memtype_hugepage = { .name = "mmap_hugepages", .flags = MEMORY_MMAP | MEMORY_HUGEPAGE, .alloc = memory_hugepage_alloc, diff --git a/lib/pool.c b/lib/pool.c index e2dc3f9c9..b9fe4482b 100644 --- a/lib/pool.c +++ b/lib/pool.c @@ -10,7 +10,7 @@ #include "memory.h" #include "kernel/kernel.h" -int pool_init(struct pool *p, size_t cnt, size_t blocksz, const struct memtype *m) +int pool_init(struct pool *p, size_t cnt, size_t blocksz, struct memtype *m) { int ret; @@ -39,4 +39,4 @@ int pool_destroy(struct pool *p) queue_destroy(&p->queue); return memory_free(p->mem, p->buffer, p->len); -} \ No newline at end of file +} diff --git a/lib/queue.c b/lib/queue.c index 20a7a6710..fdd1c09f3 100644 --- a/lib/queue.c +++ b/lib/queue.c @@ -35,7 +35,7 @@ #include "utils.h" /** Initialize MPMC queue */ -int queue_init(struct queue *q, size_t size, const struct memtype *mem) +int queue_init(struct queue *q, size_t size, struct memtype *mem) { /* Queue size must be 2 exponent */