1
0
Fork 0
mirror of https://git.rwth-aachen.de/acs/public/villas/node/ synced 2025-03-09 00:00:00 +01:00
VILLASnode/lib/shmem.cpp

218 lines
5.6 KiB
C++
Raw Permalink Normal View History

/** Shared-memory interface: The interface functions that the external program should use.
*
* @file
* @author Georg Martin Reinke <georg.reinke@rwth-aachen.de>
2020-01-20 17:17:00 +01:00
* @copyright 2014-2020, Institute for Automation of Complex Power Systems, EONERC
2017-04-27 12:56:43 +02:00
* @license GNU General Public License (version 3)
*
* VILLASnode
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* any later version.
*
2017-04-27 12:56:43 +02:00
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
2017-04-27 12:56:43 +02:00
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*********************************************************************************/
#include <cerrno>
#include <fcntl.h>
#include <semaphore.h>
#include <sys/mman.h>
#include <sys/stat.h>
2020-03-04 13:07:20 +01:00
#include <villas/kernel/kernel.hpp>
2017-12-09 02:19:28 +08:00
#include <villas/memory.h>
#include <villas/utils.hpp>
2017-12-09 02:19:28 +08:00
#include <villas/sample.h>
#include <villas/shmem.h>
2020-03-04 13:38:40 +01:00
using namespace villas;
2017-07-28 12:17:24 +02:00
size_t shmem_total_size(int queuelen, int samplelen)
{
2018-07-02 14:17:50 +02:00
/* We have the constant const of the memory_type header */
return sizeof(struct memory_type)
/* and the shared struct itself */
+ sizeof(struct shmem_shared)
2017-07-28 12:17:24 +02:00
/* the size of the actual queue and the queue for the pool */
+ queuelen * (2 * sizeof(struct queue_cell))
/* the size of the pool */
2021-05-20 06:21:33 -04:00
+ queuelen * kernel::getCachelineSize() * CEIL(SAMPLE_LENGTH(samplelen), kernel::getCachelineSize())
2017-07-28 12:17:24 +02:00
/* a memblock for each allocation (1 shmem_shared, 2 queues, 1 pool) */
+ 4 * sizeof(struct memory_block)
/* and some extra buffer for alignment */
+ 1024;
}
int shmem_int_open(const char *wname, const char* rname, struct shmem_int *shm, struct shmem_conf *conf)
{
char *cptr;
2017-07-28 12:17:24 +02:00
int fd, ret;
size_t len;
void *base;
2018-07-02 14:17:50 +02:00
struct memory_type *manager;
struct shmem_shared *shared;
struct stat stat_buf;
sem_t *sem_own, *sem_other;
/* Ensure both semaphores exist */
sem_own = sem_open(wname, O_CREAT, 0600, 0);
if (sem_own == SEM_FAILED)
return -1;
sem_other = sem_open(rname, O_CREAT, 0600, 0);
if (sem_other == SEM_FAILED)
return -2;
/* Open and initialize the shared region for the output queue */
retry: fd = shm_open(wname, O_RDWR|O_CREAT|O_EXCL, 0600);
if (fd < 0) {
if (errno == EEXIST) {
ret = shm_unlink(wname);
if (ret)
return -12;
2019-01-30 01:54:03 +01:00
goto retry;
}
return -3;
}
2017-07-28 12:17:24 +02:00
len = shmem_total_size(conf->queuelen, conf->samplelen);
if (ftruncate(fd, len) < 0)
return -1;
2019-04-07 15:13:40 +02:00
base = mmap(nullptr, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (base == MAP_FAILED)
return -4;
close(fd);
2018-07-02 14:17:50 +02:00
manager = memory_managed(base, len);
2019-10-26 13:07:02 +02:00
shared = (struct shmem_shared *) memory_alloc(sizeof(struct shmem_shared), manager);
if (!shared) {
errno = ENOMEM;
return -5;
}
shared->polling = conf->polling;
2017-10-16 23:08:46 +02:00
2019-06-23 16:13:23 +02:00
int flags = (int) QueueSignalledFlags::PROCESS_SHARED;
enum QueueSignalledMode mode = conf->polling
? QueueSignalledMode::POLLING
: QueueSignalledMode::PTHREAD;
ret = queue_signalled_init(&shared->queue, conf->queuelen, manager, mode, flags);
if (ret) {
errno = ENOMEM;
return -6;
}
ret = pool_init(&shared->pool, conf->queuelen, SAMPLE_LENGTH(conf->samplelen), manager);
if (ret) {
errno = ENOMEM;
return -7;
}
shm->write.base = base;
shm->write.name = wname;
shm->write.len = len;
shm->write.shared = shared;
/* Post own semaphore and wait on the other one, so both processes know that
* both regions are initialized */
sem_post(sem_own);
sem_wait(sem_other);
2017-06-08 13:07:20 +02:00
/* Open and map the other region */
fd = shm_open(rname, O_RDWR, 0);
if (fd < 0)
return -8;
if (fstat(fd, &stat_buf) < 0)
return -9;
len = stat_buf.st_size;
2019-04-07 15:13:40 +02:00
base = mmap(nullptr, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (base == MAP_FAILED)
return -10;
2017-06-08 13:07:20 +02:00
cptr = (char *) base + sizeof(struct memory_type) + sizeof(struct memory_block);
shared = (struct shmem_shared *) cptr;
shm->read.base = base;
shm->read.name = rname;
shm->read.len = len;
shm->read.shared = shared;
2017-06-08 13:07:20 +02:00
2017-07-28 12:17:37 +02:00
shm->readers = 0;
shm->writers = 0;
shm->closed = 0;
/* Unlink the semaphores; we don't need them anymore */
sem_unlink(wname);
return 0;
}
int shmem_int_close(struct shmem_int *shm)
{
int ret;
2017-07-28 12:17:37 +02:00
atomic_store(&shm->closed, 1);
2017-10-16 23:08:46 +02:00
ret = queue_signalled_close(&shm->write.shared->queue);
if (ret)
return ret;
shm_unlink(shm->write.name);
2017-07-28 12:17:37 +02:00
if (atomic_load(&shm->readers) == 0)
munmap(shm->read.base, shm->read.len);
2017-07-28 12:17:37 +02:00
if (atomic_load(&shm->writers) == 0)
munmap(shm->write.base, shm->write.len);
return 0;
}
2021-05-10 00:12:30 +02:00
int shmem_int_read(struct shmem_int *shm, struct sample * const smps[], unsigned cnt)
{
2017-07-28 12:17:37 +02:00
int ret;
atomic_fetch_add(&shm->readers, 1);
ret = queue_signalled_pull_many(&shm->read.shared->queue, (void **) smps, cnt);
2017-07-28 12:17:37 +02:00
if (atomic_fetch_sub(&shm->readers, 1) == 1 && atomic_load(&shm->closed) == 1)
munmap(shm->read.base, shm->read.len);
return ret;
}
2021-05-10 00:12:30 +02:00
int shmem_int_write(struct shmem_int *shm, const struct sample * const smps[], unsigned cnt)
{
2017-07-28 12:17:37 +02:00
int ret;
atomic_fetch_add(&shm->writers, 1);
ret = queue_signalled_push_many(&shm->write.shared->queue, (void **) smps, cnt);
2017-07-28 12:17:37 +02:00
if (atomic_fetch_sub(&shm->writers, 1) == 1 && atomic_load(&shm->closed) == 1)
munmap(shm->write.base, shm->write.len);
return ret;
}
int shmem_int_alloc(struct shmem_int *shm, struct sample *smps[], unsigned cnt)
{
return sample_alloc_many(&shm->write.shared->pool, smps, cnt);
}