1
0
Fork 0
mirror of https://github.com/hermitcore/libhermit.git synced 2025-03-30 00:00:15 +01:00

move architecture dependent part to a subdirectory of arch

This commit is contained in:
Stefan Lankes 2017-03-06 23:32:42 +01:00
parent a6d4208e45
commit 8f9430f3fe
8 changed files with 23 additions and 77 deletions

View file

@ -36,6 +36,7 @@
#include <hermit/stddef.h> #include <hermit/stddef.h>
#include <hermit/stdlib.h> #include <hermit/stdlib.h>
#include <asm/processor.h>
#ifndef __PAGE_H__ #ifndef __PAGE_H__
#define __PAGE_H__ #define __PAGE_H__
@ -144,6 +145,8 @@ static inline size_t sign_extend(ssize_t addr, int bits)
/// Disable execution for this page /// Disable execution for this page
#define PG_XD (1L << 63) #define PG_XD (1L << 63)
#define PG_NX (has_nx() ? PG_XD : 0)
/** @brief Converts a virtual address to a physical /** @brief Converts a virtual address to a physical
* *
* A non mapped virtual address causes a pagefault! * A non mapped virtual address causes a pagefault!

View file

@ -1,4 +1,4 @@
C_source := page.c C_source := memory.c vma.c page.c
MODULE := arch_x86_mm MODULE := arch_x86_mm
include $(TOPDIR)/Makefile.inc include $(TOPDIR)/Makefile.inc

View file

@ -96,7 +96,17 @@ typedef struct vma {
*/ */
int vma_init(void); int vma_init(void);
/** @brief Add a new virtual memory area to the list of VMAs /** @brief Initalize the kernelspace VMA list
*
* Reserves several architecture-relevant virtual memory regions
*
* @return
* - 0 on success
* - <0 on failure
*/
int vma_arch_init(void);
/** @brief Add a new virtual memory area to the list of VMAs
* *
* @param start Start address of the new area * @param start Start address of the new area
* @param end End address of the new area * @param end End address of the new area

View file

@ -1,4 +1,4 @@
C_source := memory.c malloc.c vma.c shm.c hbmemory.c C_source := malloc.c vma.c hbmemory.c
MODULE := mm MODULE := mm
include $(TOPDIR)/Makefile.inc include $(TOPDIR)/Makefile.inc

View file

@ -152,10 +152,7 @@ void* palloc(size_t sz, uint32_t flags)
} }
//TODO: interpretation of from (vma) flags is missing //TODO: interpretation of from (vma) flags is missing
bits = PG_RW|PG_GLOBAL; bits = PG_RW|PG_GLOBAL|PG_NX;
// protect heap by the NX flag
if (has_nx())
bits |= PG_XD;
// map physical pages to VMA // map physical pages to VMA
err = page_map(viraddr, phyaddr, npages, bits); err = page_map(viraddr, phyaddr, npages, bits);
@ -191,10 +188,7 @@ void* create_stack(size_t sz)
return NULL; return NULL;
} }
bits = PG_RW|PG_GLOBAL; bits = PG_RW|PG_GLOBAL|PG_NX;
// protect heap by the NX flag
if (has_nx())
bits |= PG_XD;
// map physical pages to VMA // map physical pages to VMA
err = page_map(viraddr+PAGE_SIZE, phyaddr, npages, bits); err = page_map(viraddr+PAGE_SIZE, phyaddr, npages, bits);

View file

@ -1,50 +0,0 @@
/*
* Copyright (c) 2015, Stefan Lankes, RWTH Aachen University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/shm.h>
#if 0
int shmget(key_t key, size_t size, int shmflg)
{
return 0;
}
void *shmat(int shmid, const void *shmaddr, int shmflg)
{
return NULL;
}
int shmdt(const void *shmaddr)
{
return 0;
}
int shmctl(int shmid, int cmd, struct shmid_ds *buf)
{
return 0;
}
#endif

View file

@ -32,9 +32,8 @@
#include <hermit/spinlock.h> #include <hermit/spinlock.h>
#include <hermit/errno.h> #include <hermit/errno.h>
#include <hermit/logging.h> #include <hermit/logging.h>
#include <asm/multiboot.h>
/* /*
* Note that linker symbols are not variables, they have no memory allocated for * Note that linker symbols are not variables, they have no memory allocated for
* maintaining a value, rather their address is their value. * maintaining a value, rather their address is their value.
*/ */
@ -43,7 +42,7 @@ extern const void kernel_end;
/* /*
* Kernel space VMA list and lock * Kernel space VMA list and lock
* *
* For bootstrapping we initialize the VMA list with one empty VMA * For bootstrapping we initialize the VMA list with one empty VMA
* (start == end) and expand this VMA by calls to vma_alloc() * (start == end) and expand this VMA by calls to vma_alloc()
*/ */
@ -51,8 +50,6 @@ static vma_t vma_boot = { VMA_MIN, VMA_MIN, VMA_HEAP };
static vma_t* vma_list = &vma_boot; static vma_t* vma_list = &vma_boot;
static spinlock_irqsave_t vma_lock = SPINLOCK_IRQSAVE_INIT; static spinlock_irqsave_t vma_lock = SPINLOCK_IRQSAVE_INIT;
// TODO: we might move the architecture specific VMA regions to a
// seperate function arch_vma_init()
int vma_init(void) int vma_init(void)
{ {
int ret; int ret;
@ -73,17 +70,9 @@ int vma_init(void)
if (BUILTIN_EXPECT(ret, 0)) if (BUILTIN_EXPECT(ret, 0))
goto out; goto out;
if (mb_info) { // we might move the architecture specific VMA regions to a
ret = vma_add((size_t)mb_info & PAGE_MASK, ((size_t)mb_info & PAGE_MASK) + PAGE_SIZE, VMA_READ|VMA_WRITE); // seperate function vma_arch_init()
if (BUILTIN_EXPECT(ret, 0)) ret = vma_arch_init();
goto out;
if ((mb_info->cmdline & PAGE_MASK) != ((size_t) mb_info & PAGE_MASK)) {
ret = vma_add((size_t)mb_info->cmdline & PAGE_MASK, ((size_t)mb_info->cmdline & PAGE_MASK) + PAGE_SIZE, VMA_READ|VMA_WRITE);
if (BUILTIN_EXPECT(ret, 0))
goto out;
}
}
out: out:
return ret; return ret;