1
0
Fork 0
mirror of https://git.rwth-aachen.de/acs/public/villas/node/ synced 2025-03-09 00:00:00 +01:00
VILLASnode/lib/memory/mmap.cpp

179 lines
4.3 KiB
C++
Raw Permalink Normal View History

2019-10-26 13:35:40 +02:00
/** mmap memory allocator.
2018-07-02 14:17:50 +02:00
*
* @author Steffen Vogel <stvogel@eonerc.rwth-aachen.de>
2020-01-20 17:17:00 +01:00
* @copyright 2014-2020, Institute for Automation of Complex Power Systems, EONERC
2018-07-02 14:17:50 +02:00
* @license GNU General Public License (version 3)
*
* VILLASnode
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*********************************************************************************/
#include <cstdlib>
#include <cerrno>
2018-07-02 14:17:50 +02:00
#include <unistd.h>
#include <sys/mman.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sys/types.h>
/* Required to allocate hugepages on Apple OS X */
#ifdef __MACH__
#include <mach/vm_statistics.h>
2019-04-05 02:22:53 +02:00
#endif /* __MACH__ */
2018-07-02 14:17:50 +02:00
2020-03-04 13:07:20 +01:00
#include <villas/kernel/kernel.hpp>
#include <villas/memory.h>
#include <villas/utils.hpp>
2020-03-04 13:07:20 +01:00
#include <villas/kernel/kernel.hpp>
#include <villas/exceptions.hpp>
2021-02-16 14:15:14 +01:00
#include <villas/log.hpp>
2018-07-02 14:17:50 +02:00
using namespace villas;
2020-03-04 13:38:40 +01:00
using namespace villas;
2019-06-04 16:55:38 +02:00
using namespace villas::utils;
static size_t pgsz = -1;
static size_t hugepgsz = -1;
2021-02-16 14:15:14 +01:00
static Logger logger;
int memory_mmap_init(int hugepages)
{
2021-02-16 14:15:14 +01:00
logger = logging.get("memory:mmap");
2021-05-20 06:21:33 -04:00
pgsz = kernel::getPageSize();
if (pgsz < 0)
return -1;
if (hugepages > 0) {
2021-05-20 06:21:33 -04:00
hugepgsz = kernel::getHugePageSize();
if (hugepgsz < 0) {
2021-02-16 14:15:14 +01:00
logger->warn("Failed to determine hugepage size.");
memory_default = &memory_mmap;
return 0;
}
#if defined(__linux__) && defined(__x86_64__)
int ret, pagecnt;
2021-05-20 06:21:33 -04:00
pagecnt = kernel::getNrHugepages();
if (pagecnt < hugepages) {
if (getuid() == 0) {
2021-05-20 06:21:33 -04:00
ret = kernel::setNrHugepages(hugepages);
if (ret) {
2021-02-16 14:15:14 +01:00
logger->warn("Failed to increase number of reserved hugepages");
memory_default = &memory_mmap;
}
else {
logger->debug("Increased number of reserved hugepages from {} to {}", pagecnt, hugepages);
memory_default = &memory_mmap_hugetlb;
}
2019-10-29 09:20:57 +01:00
}
else {
2021-02-16 14:15:14 +01:00
logger->warn("Failed to reserved hugepages. Please reserve manually by running as root:");
logger->warn(" $ echo {} > /proc/sys/vm/nr_hugepages", hugepages);
memory_default = &memory_mmap;
}
}
else
memory_default = &memory_mmap_hugetlb;
#else
memory_default = &memory_mmap;
#endif
2019-10-29 09:20:57 +01:00
}
else {
2021-02-16 14:15:14 +01:00
logger->warn("Hugepage allocator disabled.");
memory_default = &memory_mmap;
}
2019-10-29 09:20:57 +01:00
return 0;
}
2019-10-26 13:35:40 +02:00
/** Allocate memory backed by mmaps with malloc() like interface */
static struct memory_allocation * memory_mmap_alloc(size_t len, size_t alignment, struct memory_type *m)
2018-07-02 14:17:50 +02:00
{
int flags, fd;
size_t sz;
auto *ma = new struct memory_allocation;
if (!ma)
throw MemoryAllocationError();
2019-10-26 13:35:40 +02:00
if (m->flags & (int) MemoryFlags::HUGEPAGE) {
#ifdef __linux__
flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB;
#else
flags = MAP_PRIVATE | MAP_ANONYMOUS;
#endif
2018-07-02 14:17:50 +02:00
#ifdef __MACH__
fd = VM_FLAGS_SUPERPAGE_SIZE_2MB;
#else
fd = -1;
2018-07-02 14:17:50 +02:00
#endif
sz = hugepgsz;
}
else {
flags = MAP_PRIVATE | MAP_ANONYMOUS;
fd = -1;
2018-07-02 14:17:50 +02:00
sz = pgsz;
}
2018-07-02 14:17:50 +02:00
2019-10-26 13:35:40 +02:00
/** We must make sure that len is a multiple of the page size
2018-07-02 14:17:50 +02:00
*
* See: https://lkml.org/lkml/2014/10/22/925
*/
ma->length = ALIGN(len, sz);
ma->alignment = ALIGN(alignment, sz);
ma->type = m;
2019-04-07 15:13:40 +02:00
ma->address = mmap(nullptr, ma->length, PROT_READ | PROT_WRITE, flags, fd, 0);
if (ma->address == MAP_FAILED) {
delete ma;
2019-10-26 13:35:40 +02:00
return nullptr;
}
2018-08-13 15:26:24 +02:00
return ma;
}
2019-10-26 13:35:40 +02:00
static int memory_mmap_free(struct memory_allocation *ma, struct memory_type *m)
{
int ret;
ret = munmap(ma->address, ma->length);
if (ret)
return ret;
return 0;
2018-07-02 14:17:50 +02:00
}
2019-10-26 13:35:40 +02:00
struct memory_type memory_mmap = {
.name = "mmap",
.flags = (int) MemoryFlags::MMAP,
.alignment = 12, /* 4k page */
.alloc = memory_mmap_alloc,
.free = memory_mmap_free
};
struct memory_type memory_mmap_hugetlb = {
2019-10-29 09:21:13 +01:00
.name = "mmap_hugetlb",
2019-06-23 16:13:23 +02:00
.flags = (int) MemoryFlags::MMAP | (int) MemoryFlags::HUGEPAGE,
2019-04-07 15:13:40 +02:00
.alignment = 21, /* 2 MiB hugepage */
2019-10-26 13:35:40 +02:00
.alloc = memory_mmap_alloc,
.free = memory_mmap_free
2018-07-02 14:17:50 +02:00
};