1
0
Fork 0
mirror of https://git.rwth-aachen.de/acs/public/villas/node/ synced 2025-03-30 00:00:11 +01:00

Merge branch 'fix-memory-init-for-zero-hugepages' into 'master'

memory: init page size for mmap even if number of hugepages is equal to 0

See merge request acs/public/villas/node!57
This commit is contained in:
Steffen Vogel 2019-10-29 10:03:54 +01:00
commit d2af9fedbb
3 changed files with 28 additions and 34 deletions

View file

@ -60,4 +60,4 @@ extern struct memory_type *memory_default;
struct memory_type * memory_ib(struct node *n, struct memory_type *parent);
struct memory_type * memory_managed(void *ptr, size_t len);
int memory_hugepage_init(int hugepages);
int memory_mmap_init(int hugepages);

View file

@ -44,18 +44,9 @@ int memory_init(int hugepages)
info("Initialize memory sub-system: #hugepages=%d", hugepages);
if (hugepages > 0) {
ret = memory_hugepage_init(hugepages);
if (ret)
return ret;
memory_default = &memory_mmap_hugetlb;
}
else {
memory_default = &memory_mmap;
warning("Hugepage allocator disabled.");
}
ret = memory_mmap_init(hugepages);
if (ret < 0)
return ret;
size_t lock = kernel_get_hugepage_size() * hugepages;

View file

@ -45,31 +45,38 @@ using namespace villas::utils;
static size_t pgsz = -1;
static size_t hugepgsz = -1;
int memory_hugepage_init(int hugepages)
int memory_mmap_init(int hugepages)
{
pgsz = kernel_get_page_size();
if (pgsz < 0)
return -1;
hugepgsz = kernel_get_hugepage_size();
if (hugepgsz < 0)
return -1;
if (hugepages > 0) {
hugepgsz = kernel_get_hugepage_size();
if (hugepgsz < 0)
return -1;
#if defined(__linux__) && defined(__x86_64__)
int pagecnt;
int pagecnt;
pagecnt = kernel_get_nr_hugepages();
if (pagecnt < hugepages) {
if (getuid() == 0) {
kernel_set_nr_hugepages(hugepages);
debug(LOG_MEM | 2, "Increased number of reserved hugepages from %d to %d", pagecnt, hugepages);
}
else {
warning("Failed to reserved hugepages. Please reserve manually by running as root:");
warning(" $ echo %d > /proc/sys/vm/nr_hugepages", hugepages);
}
}
pagecnt = kernel_get_nr_hugepages();
if (pagecnt < hugepages) {
if (getuid() == 0) {
kernel_set_nr_hugepages(hugepages);
debug(LOG_MEM | 2, "Increased number of reserved hugepages from %d to %d", pagecnt, hugepages);
}
else {
warning("Failed to reserved hugepages. Please reserve manually by running as root:");
warning(" $ echo %d > /proc/sys/vm/nr_hugepages", hugepages);
}
}
#endif
memory_default = &memory_mmap_hugetlb;
}
else {
warning("Hugepage allocator disabled.");
memory_default = &memory_mmap;
}
return 0;
}
@ -97,16 +104,12 @@ static struct memory_allocation * memory_mmap_alloc(size_t len, size_t alignment
fd = -1;
#endif
sz = hugepgsz;
info("allocate %#zx bytes mmap_hugetlb memory", len);
}
else {
flags = MAP_PRIVATE | MAP_ANONYMOUS;
fd = -1;
sz = pgsz;
info("allocate %#zx bytes mmap memory", len);
}
/** We must make sure that len is a multiple of the page size
@ -146,7 +149,7 @@ struct memory_type memory_mmap = {
};
struct memory_type memory_mmap_hugetlb = {
.name = "mmap",
.name = "mmap_hugetlb",
.flags = (int) MemoryFlags::MMAP | (int) MemoryFlags::HUGEPAGE,
.alignment = 21, /* 2 MiB hugepage */
.alloc = memory_mmap_alloc,