Compare commits
No commits in common. "vma_kernel" and "master" have entirely different histories.
vma_kernel
...
master
37 changed files with 1183 additions and 1640 deletions
105
Makefile.example
105
Makefile.example
|
@ -1,12 +1,8 @@
|
|||
NAME = metalsvm
|
||||
|
||||
# For 64bit support, you have define BIT as 64
|
||||
# Note: do not forget to 'make veryclean' after changing BIT!!!
|
||||
BIT=64
|
||||
ARCH = x86
|
||||
SMP=1
|
||||
|
||||
TOPDIR = $(shell pwd)
|
||||
ARCH = x86
|
||||
# For 64bit support, you have define BIT as 64
|
||||
BIT=32
|
||||
NAME = metalsvm
|
||||
LWIPDIRS = lwip/src/arch lwip/src/api lwip/src/core lwip/src/core/ipv4 lwip/src/netif
|
||||
DRIVERDIRS = drivers/net drivers/char
|
||||
KERNDIRS = libkern kernel mm fs apps arch/$(ARCH)/kernel arch/$(ARCH)/mm arch/$(ARCH)/scc $(LWIPDIRS) $(DRIVERDIRS)
|
||||
|
@ -34,56 +30,35 @@ RANLIB_FOR_TARGET = $(CROSSCOMPREFIX)ranlib
|
|||
STRIP_FOR_TARGET = $(CROSSCOMPREFIX)strip
|
||||
READELF_FOR_TARGET = $(CROSSCOMPREFIX)readelf
|
||||
|
||||
# Tools
|
||||
MAKE = make
|
||||
RM = rm -rf
|
||||
NASM = nasm
|
||||
# For 64bit code, you have to use qemu-system-x86_64
|
||||
QEMU = qemu-system-i386
|
||||
GDB = gdb
|
||||
|
||||
ifeq ($(BIT), 32)
|
||||
QEMU = qemu-system-i386
|
||||
else ifeq ($(BIT), 64)
|
||||
QEMU = qemu-system-x86_64
|
||||
endif
|
||||
|
||||
|
||||
INCLUDE = -I$(TOPDIR)/include \
|
||||
-I$(TOPDIR)/arch/$(ARCH)/include \
|
||||
-I$(TOPDIR)/lwip/src/include \
|
||||
-I$(TOPDIR)/lwip/src/include/ipv4 \
|
||||
-I$(TOPDIR)/drivers
|
||||
|
||||
|
||||
# For 64bit support, you have to define -felf64 instead of -felf32
|
||||
NASMFLAGS = -felf32 -g -i$(TOPDIR)/include/metalsvm/
|
||||
INCLUDE = -I$(TOPDIR)/include -I$(TOPDIR)/arch/$(ARCH)/include -I$(TOPDIR)/lwip/src/include -I$(TOPDIR)/lwip/src/include/ipv4 -I$(TOPDIR)/drivers
|
||||
# For 64bit support, you have to define "-m64 -mno-red-zone" instead of "-m32 -march=i586"
|
||||
# Compiler options for final code
|
||||
CFLAGS = -g -O2 -m$(BIT) -Wall -fomit-frame-pointer -ffreestanding -fstrength-reduce -finline-functions $(INCLUDE) $(STACKPROT)
|
||||
|
||||
CFLAGS = -g -m32 -march=i586 -Wall -O2 -fstrength-reduce -fomit-frame-pointer -finline-functions -ffreestanding $(INCLUDE) $(STACKPROT)
|
||||
# Compiler options for debuging
|
||||
#CFLAGS = -g -O -m$(BIT) -Wall -fomit-frame-pointer -ffreestanding $(INCLUDE) $(STACKPROT)
|
||||
|
||||
NASMFLAGS = -felf$(BIT) -g -i$(TOPDIR)/include/metalsvm/
|
||||
#CFLAGS = -g -O -m32 -march=i586 -Wall -fomit-frame-pointer -ffreestanding $(INCLUDE) $(STACKPROT)
|
||||
ARFLAGS = rsv
|
||||
LDFLAGS = -T link$(BIT).ld -z max-page-size=4096 --defsym __BUILD_DATE=$(shell date +'%Y%m%d') --defsym __BUILD_TIME=$(shell date +'%H%M%S')
|
||||
|
||||
STRIP_DEBUG = --strip-debug
|
||||
KEEP_DEBUG = --only-keep-debug
|
||||
|
||||
# Do not change to elf64!
|
||||
# The Multiboot spec can only boot elf32 binaries
|
||||
OUTPUT_FORMAT = -O elf32-i386
|
||||
|
||||
CFLAGS_FOR_NEWLIB = -m$(BIT) -O2 $(STACKPROT)
|
||||
LDFLAGS_FOR_NEWLIB = -m$(BIT)
|
||||
CFLAGS_FOR_TOOLS = -m$(BIT) -O2 -Wall
|
||||
# For 64bit support, you have to define -m64 instead of "-m32 -march=i586"
|
||||
CFLAGS_FOR_NEWLIB = -m32 -march=i586 -O2 $(STACKPROT)
|
||||
# For 64bit support, you have to define -m64 instead of "-m32 -march=i586"
|
||||
LDFLAGS_FOR_NEWLIB = -m32 -march=i586
|
||||
# For 64bit support, you have to define -m64 instead of "-m32"
|
||||
CFLAGS_FOR_TOOLS = -m32 -O2 -Wall
|
||||
LDFLAGS_FOR_TOOLS =
|
||||
NASMFLAGS_FOR_NEWLIB = -felf$(BIT)
|
||||
|
||||
ifeq ($(BIT), 32)
|
||||
CFLAGS += -march=i586
|
||||
CFLAGS_FOR_NEWLIB += -march=i586
|
||||
LDFLAGS_FOR_NEWLIB += -march=i586
|
||||
else ifeq ($(BIT), 64)
|
||||
CFLAGS += -mno-red-zone
|
||||
endif
|
||||
# For 64bit support, you have to define -felf64 instead of -felf32
|
||||
NASMFLAGS_FOR_NEWLIB = -felf32
|
||||
|
||||
# Prettify output
|
||||
V = 0
|
||||
|
@ -93,15 +68,11 @@ ifeq ($V,0)
|
|||
endif
|
||||
|
||||
default: all
|
||||
|
||||
|
||||
all: newlib tools $(NAME).elf
|
||||
|
||||
|
||||
newlib:
|
||||
$(MAKE) ARCH=$(ARCH) BIT=$(BIT) \
|
||||
LDFLAGS="$(LDFLAGS_FOR_NEWLIB)" \
|
||||
CFLAGS="$(CFLAGS_FOR_NEWLIB)" \
|
||||
NASMFLAGS="$(NASMFLAGS_FOR_NEWLIB)" \
|
||||
CC_FOR_TARGET=$(CC_FOR_TARGET) \
|
||||
$(MAKE) ARCH=$(ARCH) BIT=$(BIT) LDFLAGS="$(LDFLAGS_FOR_NEWLIB)" CFLAGS="$(CFLAGS_FOR_NEWLIB)" NASMFLAGS="$(NASMFLAGS_FOR_NEWLIB)" CC_FOR_TARGET=$(CC_FOR_TARGET) \
|
||||
CXX_FOR_TARGET=$(CXX_FOR_TARGET) \
|
||||
GCC_FOR_TARGET=$(GCC_FOR_TARGET) \
|
||||
AR_FOR_TARGET=$(AR_FOR_TARGET) \
|
||||
|
@ -125,23 +96,14 @@ $(NAME).elf:
|
|||
$Q$(OBJCOPY_FOR_TARGET) $(STRIP_DEBUG) $(OUTPUT_FORMAT) $(NAME).elf
|
||||
|
||||
qemu: newlib tools $(NAME).elf
|
||||
$(QEMU) -monitor stdio -serial tcp::12346,server,nowait -smp $(SMP) -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -kernel metalsvm.elf -initrd tools/initrd.img
|
||||
$(QEMU) -monitor stdio -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
|
||||
|
||||
qemudbg: newlib tools $(NAME).elf
|
||||
$(QEMU) -s -S -nographic -monitor stdio -serial tcp::12346,server -smp $(SMP) -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -kernel metalsvm.elf -initrd tools/initrd.img
|
||||
$(QEMU) -s -S -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
|
||||
|
||||
gdb: $(NAME).elf
|
||||
$(GDB) -q -x script.gdb
|
||||
|
||||
debug: newlib tools $(NAME).elf
|
||||
killall $(QEMU) || true
|
||||
killall $(GDB) || true
|
||||
sleep 1
|
||||
gnome-terminal --working-directory=$(TOPDIR) \
|
||||
--tab --title=Shell --command="bash -c 'sleep 1 && telnet localhost 12345'" \
|
||||
--tab --title=QEmu --command="make qemudbg" \
|
||||
--tab --title=GDB --command="make gdb" \
|
||||
--tab --title=Debug --command="bash -c 'sleep 1 && telnet localhost 12346'"
|
||||
make qemudbg > /dev/null &
|
||||
$(GDB) -x script.gdb
|
||||
|
||||
clean:
|
||||
$Q$(RM) $(NAME).elf $(NAME).sym *~
|
||||
|
@ -150,7 +112,7 @@ clean:
|
|||
|
||||
veryclean: clean
|
||||
$Q$(MAKE) -C newlib veryclean
|
||||
@echo Very cleaned.
|
||||
@echo Very cleaned
|
||||
|
||||
#depend:
|
||||
# for i in $(SUBDIRS); do $(MAKE) -k -C $$i depend; done
|
||||
|
@ -162,15 +124,16 @@ veryclean: clean
|
|||
$Q$(CPP_FOR_TARGET) -MF $*.dep -MT $*.o -MM -D__KERNEL__ $(CFLAGS) $<
|
||||
|
||||
include/metalsvm/config.inc: include/metalsvm/config.h
|
||||
@echo "; This file is generated automatically from the config.h file." > $@
|
||||
@echo "; Before editing this, you should consider editing config.h." >> $@
|
||||
@sed -nre 's/^[\t ]*#define[\t ]+([a-z_0-9]+)([\t ]+.*)*/%define \1/ip' $< >> $@
|
||||
@sed -nre 's/^[\t ]*#define[\t ]+([a-z_0-9]+)[\t ]+([a-z_0-9.]+)([\t ]+.*)*/%define \1 \2/ip' $< >> $@
|
||||
@echo "; This file is generated automatically from the config.h file." > include/metalsvm/config.inc
|
||||
@echo "; Before editing this, you should consider editing config.h." >> include/metalsvm/config.inc
|
||||
@awk '/^#define MAX_CORES/{ print "%define MAX_CORES", $$3 }' include/metalsvm/config.h >> include/metalsvm/config.inc
|
||||
@awk '/^#define KERNEL_STACK_SIZE/{ print "%define KERNEL_STACK_SIZE", $$3 }' include/metalsvm/config.h >> include/metalsvm/config.inc
|
||||
@awk '/^#define CONFIG_VGA/{ print "%define CONFIG_VGA", $$3 }' include/metalsvm/config.h >> include/metalsvm/config.inc
|
||||
|
||||
%.o : %.asm include/metalsvm/config.inc
|
||||
@echo [ASM] $@
|
||||
$Q$(NASM) $(NASMFLAGS) -o $@ $<
|
||||
|
||||
.PHONY: default all clean qemu qemudbg gdb debug newlib tools
|
||||
.PHONY: default all clean emu gdb newlib tools
|
||||
|
||||
include $(addsuffix /Makefile,$(SUBDIRS))
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
C_source := tests.c echo.c netio.c jacobi.c laplace.c gfx_client.c gfx_generic.c paging.c
|
||||
C_source := tests.c echo.c netio.c jacobi.c laplace.c gfx_client.c gfx_generic.c
|
||||
MODULE := apps
|
||||
|
||||
include $(TOPDIR)/Makefile.inc
|
||||
|
|
227
apps/paging.c
227
apps/paging.c
|
@ -1,227 +0,0 @@
|
|||
/*
|
||||
* Copyright 2011 Steffen Vogel, Chair for Operating Systems,
|
||||
* RWTH Aachen University
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* This file is part of MetalSVM.
|
||||
*/
|
||||
|
||||
#include <metalsvm/stdlib.h>
|
||||
#include <metalsvm/stdio.h>
|
||||
#include <metalsvm/stdarg.h>
|
||||
#include <metalsvm/mmu.h>
|
||||
#include <metalsvm/time.h>
|
||||
#include <metalsvm/tasks.h>
|
||||
#include <metalsvm/vma.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
#define PAGE_COUNT 10
|
||||
#define SIZE (PAGE_COUNT*PAGE_SIZE)
|
||||
#define VIRT_FROM_ADDR 0x100000000000
|
||||
#define VIRT_TO_ADDR 0x200000000000
|
||||
|
||||
/** @brief Simple helper to format our test results */
|
||||
static void test(size_t expr, char *fmt, ...)
|
||||
{
|
||||
void _putchar(int c, void *arg) { kputchar(c); } // for kvprintf
|
||||
|
||||
static int c = 1;
|
||||
|
||||
va_list ap;
|
||||
va_start(ap, fmt);
|
||||
|
||||
kprintf("%s #%u:\t", (expr) ? "PASSED" : "FAILED", c++);
|
||||
kvprintf(fmt, _putchar, NULL, 10, ap);
|
||||
kputs("\n");
|
||||
|
||||
va_end(ap);
|
||||
|
||||
if (!expr)
|
||||
abort();
|
||||
}
|
||||
|
||||
/** @brief BSD sum algorithm ('sum' Unix command) and used by QEmu */
|
||||
uint16_t checksum(size_t start, size_t end) {
|
||||
size_t addr;
|
||||
uint16_t sum;
|
||||
|
||||
for(addr = start, sum = 0; addr < end; addr++) {
|
||||
uint8_t val = *((uint8_t *) addr);
|
||||
sum = (sum >> 1) | (sum << 15);
|
||||
sum += val;
|
||||
}
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
||||
static int paging_stage2(void *arg) {
|
||||
size_t old, new;
|
||||
|
||||
kprintf("PAGING: entering stage 2...\n");
|
||||
|
||||
old = *((size_t *) arg);
|
||||
kprintf("old sum: %lu\n", old);
|
||||
|
||||
new = checksum(VIRT_FROM_ADDR, VIRT_FROM_ADDR + PAGE_COUNT*PAGE_SIZE);
|
||||
test(old == new, "checksum(%p, %p) = %lu", VIRT_FROM_ADDR, VIRT_FROM_ADDR + PAGE_COUNT*PAGE_SIZE, new);
|
||||
|
||||
size_t cr3 = read_cr3();
|
||||
kprintf("cr3 new = %x\n", cr3);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** @brief Test of the paging subsystem
|
||||
*
|
||||
* We will map a single physical memory region to two virtual regions.
|
||||
* When writing to the first one, we should be able to read the same contents
|
||||
* from the second one.
|
||||
*/
|
||||
static void paging(void)
|
||||
{
|
||||
size_t c, sum;
|
||||
size_t *p1, *p2;
|
||||
size_t virt_from, virt_to, virt_alloc;
|
||||
size_t phys;
|
||||
|
||||
// allocate physical page frames
|
||||
phys = get_pages(PAGE_COUNT);
|
||||
test(phys, "get_pages(%lu) = 0x%lx", PAGE_COUNT, phys);
|
||||
|
||||
// create first mapping
|
||||
virt_from = map_region(VIRT_FROM_ADDR, phys, PAGE_COUNT, 0);
|
||||
test(virt_from, "map_region(0x%lx, 0x%lx, %lu, 0x%x) = 0x%lx", VIRT_FROM_ADDR, phys, PAGE_COUNT, 0, virt_from);
|
||||
|
||||
// check address translation
|
||||
phys = virt_to_phys(virt_from);
|
||||
test(phys, "virt_to_phys(0x%lx) = 0x%lx", virt_from, phys);
|
||||
|
||||
// write test data
|
||||
p1 = (size_t *) virt_from;
|
||||
for (c = 0; c < PAGE_COUNT*PAGE_SIZE/sizeof(size_t); c++) {
|
||||
p1[c] = c;
|
||||
}
|
||||
|
||||
// create second mapping pointing to the same page frames
|
||||
virt_to = map_region(VIRT_TO_ADDR, phys, PAGE_COUNT, 0);
|
||||
test(virt_to, "map_region(0x%lx, 0x%lx, %lu, 0x%x) = 0x%lx", VIRT_TO_ADDR, phys, PAGE_COUNT, 0, virt_to);
|
||||
|
||||
// check address translation
|
||||
phys = virt_to_phys(virt_to);
|
||||
test(phys, "virt_to_phys(0x%lx) = 0x%lx", virt_to, phys);
|
||||
|
||||
// check if both mapped areas are equal
|
||||
p2 = (size_t *) virt_to;
|
||||
for (c = 0; c < PAGE_COUNT*PAGE_SIZE/sizeof(size_t); c++) {
|
||||
if (p1[c] != p2[c])
|
||||
test(0, "data mismatch: *(%p) != *(%p)", &p1[c], &p2[c]);
|
||||
}
|
||||
test(1, "data is equal");
|
||||
|
||||
// try to remap without MAP_REMAP
|
||||
virt_to = map_region(VIRT_TO_ADDR, phys+PAGE_SIZE, PAGE_COUNT, 0);
|
||||
test(!virt_to, "map_region(0x%lx, 0x%lx, %lu, 0x%x) = 0x%lx (without MAP_REMAP flag)", VIRT_TO_ADDR, phys+PAGE_SIZE, PAGE_COUNT, 0, virt_to);
|
||||
|
||||
// try to remap with MAP_REMAP
|
||||
virt_to = map_region(VIRT_TO_ADDR, phys+PAGE_SIZE, PAGE_COUNT, MAP_REMAP);
|
||||
test(virt_to, "map_region(0x%lx, 0x%lx, %lu, 0x%x) = 0x%lx (with MAP_REMAP flag)", VIRT_TO_ADDR, phys+PAGE_SIZE, PAGE_COUNT, MAP_REMAP, virt_to);
|
||||
|
||||
// check if data is not equal anymore (we remapped with 1 page offset)
|
||||
p2 = (size_t *) virt_to;
|
||||
for (c = 0; c < PAGE_COUNT*PAGE_SIZE/sizeof(size_t); c++) {
|
||||
if (p1[c] == p2[c])
|
||||
test(0, "data match at *(%p) != *(%p)", &p1[c], &p2[c]);
|
||||
}
|
||||
test(1, "data is unequal");
|
||||
|
||||
// test vma_alloc
|
||||
virt_alloc = map_region(0, phys, PAGE_COUNT, 0);
|
||||
test(virt_alloc, "map_region(0x%lx, 0x%lx, %lu, 0x%x) = 0x%lx", 0, phys, PAGE_COUNT, 0, virt_alloc);
|
||||
|
||||
// data should match against new vm addr
|
||||
p2 = (size_t *) virt_alloc;
|
||||
for (c = 0; c < PAGE_COUNT*PAGE_SIZE/sizeof(size_t); c++) {
|
||||
if (p1[c] != p2[c])
|
||||
test(0, "data mismatch at *(%p) != *(%p)", &p1[c], &p2[c]);
|
||||
}
|
||||
test(1, "data is equal");
|
||||
|
||||
// calc checksum
|
||||
sum = checksum(virt_alloc, virt_alloc + PAGE_COUNT*PAGE_SIZE);
|
||||
test(sum, "checksum(%p, %p) = %lu", virt_alloc, virt_alloc + PAGE_COUNT*PAGE_SIZE, sum);
|
||||
|
||||
size_t cr3 = read_cr3();
|
||||
kprintf("cr3 old = %x\n", cr3);
|
||||
|
||||
//create_kernel_task(0, paging_stage2, &sum, NORMAL_PRIO);
|
||||
//sleep(3);
|
||||
}
|
||||
|
||||
/** @brief Test of the VMA allocator */
|
||||
static void vma(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
// vma_alloc
|
||||
size_t a1 = vma_alloc(SIZE, VMA_HEAP);
|
||||
test(a1, "vma_alloc(0x%x, 0x%x) = 0x%lx", SIZE, VMA_HEAP, a1);
|
||||
vma_dump();
|
||||
|
||||
size_t a2 = vma_alloc(SIZE, VMA_HEAP|VMA_USER);
|
||||
test(a2 != 0, "vma_alloc(0x%x, 0x%x) = 0x%lx", SIZE, VMA_HEAP|VMA_USER, a2);
|
||||
vma_dump();
|
||||
|
||||
// vma_add
|
||||
ret = vma_add(VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE, VMA_HEAP|VMA_USER);
|
||||
test(ret >= 0, "vma_add(0x%lx, 0x%lx, 0x%x) = %u", VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE, VMA_HEAP|VMA_USER, ret);
|
||||
vma_dump();
|
||||
|
||||
ret = vma_add(VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE, VMA_HEAP|VMA_USER);
|
||||
test(ret >= 0, "vma_add(0x%lx, 0x%lx, 0x%x) = %u", VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE, VMA_HEAP|VMA_USER, ret);
|
||||
vma_dump();
|
||||
|
||||
ret = vma_add(VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR, VMA_HEAP|VMA_USER);
|
||||
test(ret >= 0, "vma_add(0x%lx, 0x%lx, 0x%x) = %u", VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR, VMA_HEAP|VMA_USER, ret);
|
||||
vma_dump();
|
||||
|
||||
// vma_free
|
||||
ret = vma_free(VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR);
|
||||
test(ret >= 0, "vma_free(0x%lx, 0x%lx) = %u", VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR, ret);
|
||||
vma_dump();
|
||||
|
||||
ret = vma_free(VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE);
|
||||
test(ret >= 0, "vma_free(0x%lx, 0x%lx) = %u", VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE, ret);
|
||||
vma_dump();
|
||||
|
||||
ret = vma_free(VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE);
|
||||
test(ret >= 0, "vma_free(0x%lx, 0x%lx) = %u", VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE, ret);
|
||||
vma_dump();
|
||||
}
|
||||
|
||||
/** @brief This is a simple procedure to test memory management subsystem */
|
||||
int memory(void* arg)
|
||||
{
|
||||
kprintf("======== PAGING: test started...\n");
|
||||
paging();
|
||||
kprintf("======== VMA: test started...\n");
|
||||
vma();
|
||||
|
||||
|
||||
kprintf("======== All tests finished successfull...\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -43,7 +43,6 @@
|
|||
|
||||
int laplace(void* arg);
|
||||
int jacobi(void* arg);
|
||||
int memory(void* arg);
|
||||
void echo_init(void);
|
||||
void netio_init(void);
|
||||
|
||||
|
@ -745,7 +744,8 @@ int test_init(void)
|
|||
create_user_task(NULL, "/bin/jacobi", jacobi_argv);
|
||||
//create_user_task_on_core(NULL, "/bin/jacobi", jacobi_argv, 1);
|
||||
#endif
|
||||
#if defined(START_MMNIF_TEST) && defined(CONFIG_LWIP) && LWIP_SOCKET
|
||||
#ifdef START_MMNIF_TEST
|
||||
#if defined(CONFIG_LWIP) && LWIP_SOCKET
|
||||
if (RCCE_IAM == 0) {
|
||||
kprintf("Start /bin/server...\n");
|
||||
create_user_task(NULL, "/bin/server", server_argv);
|
||||
|
@ -755,8 +755,6 @@ int test_init(void)
|
|||
create_user_task(NULL, "/bin/client", client_argv);
|
||||
}
|
||||
#endif
|
||||
#ifdef START_MEMORY
|
||||
create_kernel_task(NULL, memory, NULL, NORMAL_PRIO);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -46,7 +46,6 @@
|
|||
//#define START_HELLO
|
||||
//#define START_TESTS
|
||||
//#define START_JACOBI
|
||||
//#define START_MEMORY
|
||||
|
||||
//#define START_CHIEFTEST
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
// ____ _ _
|
||||
// / ___| _ _ _ __ ___ | |__ ___ | |___
|
||||
// \___ \| | | | '_ ` _ \| '_ \ / _ \| / __|
|
||||
// ___) | |_| | | | | | | |_) | (_) | \__
|
||||
// ___) | |_| | | | | | | |_) | (_) | \__ \
|
||||
// |____/ \__, |_| |_| |_|_.__/ \___/|_|___/
|
||||
// |___/
|
||||
//
|
||||
|
@ -253,7 +253,7 @@
|
|||
// _____ _ _
|
||||
// | ___| _ _ __ ___| |_(_) ___ _ __ ___
|
||||
// | |_ | | | | '_ \ / __| __| |/ _ \| '_ \/ __|
|
||||
// | _|| |_| | | | | (__| |_| | (_) | | | \__
|
||||
// | _|| |_| | | | | (__| |_| | (_) | | | \__ \
|
||||
// |_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/
|
||||
//
|
||||
// #########################################################################################
|
||||
|
|
|
@ -102,7 +102,7 @@ inline static void outportl(unsigned short _port, unsigned int _data)
|
|||
|
||||
inline static void uart_putchar(unsigned char _data)
|
||||
{
|
||||
outportb(UART_PORT, _data);
|
||||
outportb(0x2F8, _data);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -35,11 +35,9 @@
|
|||
|
||||
#ifdef CONFIG_MULTIBOOT
|
||||
|
||||
/// Does the bootloader provide mem_* fields?
|
||||
#define MULTIBOOT_INFO_MEM 0x00000001
|
||||
/// Does the bootloader provide a list of modules?
|
||||
/* are there modules to do something with? */
|
||||
#define MULTIBOOT_INFO_MODS 0x00000008
|
||||
/// Does the bootloader provide a full memory map?
|
||||
/* is there a full memory map? */
|
||||
#define MULTIBOOT_INFO_MEM_MAP 0x00000040
|
||||
|
||||
typedef uint16_t multiboot_uint16_t;
|
||||
|
@ -116,6 +114,7 @@ struct multiboot_info
|
|||
multiboot_uint16_t vbe_interface_off;
|
||||
multiboot_uint16_t vbe_interface_len;
|
||||
};
|
||||
|
||||
typedef struct multiboot_info multiboot_info_t;
|
||||
|
||||
struct multiboot_mmap_entry
|
||||
|
|
|
@ -31,62 +31,49 @@
|
|||
#include <metalsvm/stddef.h>
|
||||
#include <metalsvm/stdlib.h>
|
||||
|
||||
// 4KB pages
|
||||
#define PAGE_SHIFT 12
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define PAGE_MAP_LEVELS 2
|
||||
#define PAGE_MAP_SHIFT 10
|
||||
#elif defined(CONFIG_X86_64)
|
||||
#define PAGE_MAP_LEVELS 4
|
||||
#define PAGE_MAP_SHIFT 9
|
||||
#endif
|
||||
|
||||
// base addresses of page map structures
|
||||
#ifdef CONFIG_X86_32
|
||||
#define PAGE_PGD 0xFFFFF000
|
||||
#define PAGE_PGT 0xFFC00000
|
||||
#elif defined(CONFIG_X86_64)
|
||||
#define PAGE_PML4 0xFFFFFFFFFFFFF000
|
||||
#define PAGE_PDPT 0xFFFFFFFFFFE00000
|
||||
#define PAGE_PGD 0xFFFFFFFFC0000000
|
||||
#define PAGE_PGT 0xFFFFFF8000000000
|
||||
#endif
|
||||
|
||||
#define PAGE_MAP_ENTRIES (1 << PAGE_MAP_SHIFT)
|
||||
#define PAGE_SIZE (1 << PAGE_SHIFT)
|
||||
#define PAGE_MASK ~(PAGE_SIZE - 1)
|
||||
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
|
||||
#define _PAGE_BIT_PRESENT 0 /* is present */
|
||||
#define _PAGE_BIT_RW 1 /* writeable */
|
||||
#define _PAGE_BIT_USER 2 /* userspace addressable */
|
||||
#define _PAGE_BIT_PWT 3 /* page write through */
|
||||
#define _PAGE_BIT_PCD 4 /* page cache disabled */
|
||||
#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
|
||||
#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
|
||||
#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
|
||||
#define _PAGE_BIT_PAT 7 /* on 4KB pages */
|
||||
#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
|
||||
#define _PAGE_BIT_SVM_STRONG 9 /* mark a virtual address range as used by the SVM system */
|
||||
#define _PAGE_BIT_SVM_LAZYRELEASE 10 /* mark a virtual address range as used by the SVM system */
|
||||
#define _PAGE_BIT_SVM_INIT 11 /* mark if the MBP proxy is used */
|
||||
|
||||
/// Page is present
|
||||
#define PG_PRESENT (1 << 0)
|
||||
#define PG_PRESENT (1 << _PAGE_BIT_PRESENT)
|
||||
/// Page is read- and writable
|
||||
#define PG_RW (1 << 1)
|
||||
#define PG_RW (1 << _PAGE_BIT_RW)
|
||||
/// Page is addressable from userspace
|
||||
#define PG_USER (1 << 2)
|
||||
#define PG_USER (1 << _PAGE_BIT_USER)
|
||||
/// Page write through is activated
|
||||
#define PG_PWT (1 << 3)
|
||||
#define PG_PWT (1 << _PAGE_BIT_PWT)
|
||||
/// Page cache is disabled
|
||||
#define PG_PCD (1 << 4)
|
||||
#define PG_PCD (1 << _PAGE_BIT_PCD)
|
||||
/// Page was recently accessed (set by CPU)
|
||||
#define PG_ACCESSED (1 << 5)
|
||||
#define PG_ACCESSED (1 << _PAGE_BIT_ACCESSED)
|
||||
/// Page is dirty due to recentwrite-access (set by CPU)
|
||||
#define PG_DIRTY (1 << 6)
|
||||
#define PG_DIRTY (1 << _PAGE_BIT_DIRTY)
|
||||
/// Big page: 4MB (or 2MB)
|
||||
#define PG_PSE (1 << 7)
|
||||
#define PG_PSE (1 << _PAGE_BIT_PSE)
|
||||
/// Page is part of the MPB (SCC specific entry)
|
||||
#define PG_MPE PG_PSE
|
||||
/// Global TLB entry (Pentium Pro and later)
|
||||
#define PG_GLOBAL (1 << 8)
|
||||
#define PG_GLOBAL (1 << _PAGE_BIT_GLOBAL)
|
||||
/// Pattern flag
|
||||
#define PG_PAT (1 << 7)
|
||||
#define PG_PAT (1 << _PAGE_BIT_PAT)
|
||||
/// This virtual address range is used by SVM system as marked
|
||||
#define PG_SVM (1 << 9)
|
||||
#define PG_SVM_STRONG PG_SVM_STRONG
|
||||
#define PG_SVM PG_SVM_STRONG
|
||||
#define PG_SVM_STRONG (1 << _PAGE_BIT_SVM_STRONG)
|
||||
/// This virtual address range is used by SVM system as marked
|
||||
#define PG_SVM_LAZYRELEASE (1 << 10)
|
||||
#define PG_SVM_LAZYRELEASE (1 << _PAGE_BIT_SVM_LAZYRELEASE)
|
||||
/// Currently, no page frame is behind this page (only the MBP proxy)
|
||||
#define PG_SVM_INIT (1 << 11)
|
||||
#define PG_SVM_INIT (1 << _PAGE_BIT_SVM_INIT)
|
||||
|
||||
/// This is a whole set of flags (PRESENT,RW,ACCESSED,DIRTY) for kernelspace tables
|
||||
#define KERN_TABLE (PG_PRESENT|PG_RW|PG_ACCESSED|PG_DIRTY)
|
||||
|
@ -97,14 +84,33 @@
|
|||
/// This is a whole set of flags (PRESENT,RW,USER) for userspace pages
|
||||
#define USER_PAGE (PG_PRESENT|PG_RW|PG_USER)
|
||||
|
||||
/** @brief General page map structure
|
||||
#if __SIZEOF_POINTER__ == 4
|
||||
#define PGT_ENTRIES 1024
|
||||
#elif __SIZEOF_POINTER__ == 8
|
||||
#define PGT_ENTRIES 512
|
||||
#endif
|
||||
|
||||
/** @brief Page table structure
|
||||
*
|
||||
* This page map structure is a general type for all indirecton levels.\n
|
||||
* As all page map levels containing the same amount of entries.
|
||||
* This structure keeps page table entries.\n
|
||||
* On a 32bit system, a page table consists normally of 1024 entries.
|
||||
*/
|
||||
typedef struct page_map {
|
||||
size_t entries[PAGE_MAP_ENTRIES];
|
||||
} __attribute__ ((aligned (4096))) page_map_t;
|
||||
typedef struct page_table
|
||||
{
|
||||
/// Page table entries are unsigned 32bit integers.
|
||||
size_t entries[PGT_ENTRIES];
|
||||
} page_table_t __attribute__ ((aligned (4096)));
|
||||
|
||||
/** @brief Page directory structure
|
||||
*
|
||||
* This structure keeps page directory entries.\
|
||||
* On a 32bit system, a page directory consists normally of 1024 entries.
|
||||
*/
|
||||
typedef struct page_dir
|
||||
{
|
||||
/// Page dir entries are unsigned 32bit integers.
|
||||
size_t entries[PGT_ENTRIES];
|
||||
} page_dir_t __attribute__ ((aligned (4096)));
|
||||
|
||||
/** @brief Converts a virtual address to a physical
|
||||
*
|
||||
|
@ -186,7 +192,7 @@ int arch_paging_init(void);
|
|||
*
|
||||
* @return Returns the address of the boot task's page dir array.
|
||||
*/
|
||||
page_map_t* get_boot_page_map(void);
|
||||
page_dir_t* get_boot_pgd(void);
|
||||
|
||||
/** @brief Setup a new page directory for a new user-level task
|
||||
*
|
||||
|
@ -197,18 +203,18 @@ page_map_t* get_boot_page_map(void);
|
|||
* - counter of allocated page tables
|
||||
* - -ENOMEM (-12) on failure
|
||||
*/
|
||||
int create_page_map(task_t* task, int copy);
|
||||
int create_pgd(task_t* task, int copy);
|
||||
|
||||
/** @brief Delete all page map structures of the current task
|
||||
/** @brief Delete page directory and its page tables
|
||||
*
|
||||
* Puts PML4, PDPT, PGD, PGT tables back to buffer and
|
||||
* sets the task's page map pointer to NULL
|
||||
* Puts page tables and page directory back to buffer and
|
||||
* sets the task's page directory pointer to NULL
|
||||
*
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure (in case PGD is still the boot-pgd).
|
||||
*/
|
||||
int drop_page_map(void);
|
||||
int drop_pgd(void);
|
||||
|
||||
/** @brief Change the page permission in the page tables of the current task
|
||||
*
|
||||
|
|
|
@ -273,7 +273,7 @@ int ipi_tlb_flush(void);
|
|||
/** @brief Flush a specific page entry in TLB
|
||||
* @param addr The (virtual) address of the page to flush
|
||||
*/
|
||||
static inline void tlb_flush_one_page(size_t addr)
|
||||
static inline void tlb_flush_one_page(uint32_t addr)
|
||||
{
|
||||
asm volatile("invlpg (%0)" : : "r"(addr) : "memory");
|
||||
#if MAX_CORES > 1
|
||||
|
@ -293,7 +293,7 @@ static inline void tlb_flush_one_page(size_t addr)
|
|||
*/
|
||||
static inline void tlb_flush(void)
|
||||
{
|
||||
size_t val = read_cr3();
|
||||
uint32_t val = read_cr3();
|
||||
|
||||
if (val)
|
||||
write_cr3(val);
|
||||
|
|
|
@ -387,14 +387,12 @@ void smp_start(uint32_t id)
|
|||
|
||||
kprintf("Application processor %d is entering its idle task\n", apic_cpu_id());
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
// initialization for x86_64 is done in smp_entry()
|
||||
// initialize default cpu features
|
||||
cpu_init();
|
||||
#endif
|
||||
|
||||
// use the same gdt like the boot processors
|
||||
gdt_flush();
|
||||
|
||||
|
||||
// install IDT
|
||||
idt_install();
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
SECTION .mboot
|
||||
global start
|
||||
start:
|
||||
mov byte [msg], 'H'
|
||||
jmp stublet
|
||||
|
||||
; This part MUST be 4byte aligned, so we solve that issue using 'ALIGN 4'
|
||||
|
@ -37,10 +38,10 @@ mboot:
|
|||
; Multiboot macros to make a few lines more readable later
|
||||
MULTIBOOT_PAGE_ALIGN equ 1<<0
|
||||
MULTIBOOT_MEMORY_INFO equ 1<<1
|
||||
; MULTIBOOT_AOUT_KLUDGE equ 1<<16
|
||||
; MULTIBOOT_AOUT_KLUDGE equ 1<<16
|
||||
MULTIBOOT_HEADER_MAGIC equ 0x1BADB002
|
||||
MULTIBOOT_HEADER_FLAGS equ MULTIBOOT_PAGE_ALIGN | MULTIBOOT_MEMORY_INFO ; | MULTIBOOT_AOUT_KLUDGE
|
||||
MULTIBOOT_CHECKSUM equ -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
|
||||
MULTIBOOT_CHECKSUM equ -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
|
||||
EXTERN code, bss, end
|
||||
|
||||
; This is the GRUB Multiboot header. A boot signature
|
||||
|
@ -48,6 +49,8 @@ mboot:
|
|||
dd MULTIBOOT_HEADER_FLAGS
|
||||
dd MULTIBOOT_CHECKSUM
|
||||
|
||||
msg db "?ello from MetalSVM kernel!!", 0
|
||||
|
||||
SECTION .text
|
||||
ALIGN 4
|
||||
stublet:
|
||||
|
@ -67,7 +70,7 @@ stublet:
|
|||
; jump to the boot processors's C code
|
||||
extern main
|
||||
call main
|
||||
jmp $ ; infinitive loop
|
||||
jmp $
|
||||
|
||||
global cpu_init
|
||||
cpu_init:
|
||||
|
@ -109,7 +112,7 @@ global read_ip
|
|||
read_ip:
|
||||
mov eax, [esp+4]
|
||||
pop DWORD [eax] ; Get the return address
|
||||
add esp, 4 ; Dirty Hack! read_ip cleanup the stack
|
||||
add esp, 4 ; Dirty Hack! read_ip cleanup the stacl
|
||||
jmp [eax] ; Return. Can't use RET because return
|
||||
; address popped off the stack.
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ extern kernel_end
|
|||
extern apic_mp
|
||||
|
||||
; We use a special name to map this section at the begin of our kernel
|
||||
; => Multiboot needs its magic number at the beginning of the kernel
|
||||
; => Multiboot needs its magic number at the begin of the kernel
|
||||
SECTION .mboot
|
||||
global start
|
||||
start:
|
||||
|
@ -42,19 +42,19 @@ mboot:
|
|||
; Multiboot macros to make a few lines more readable later
|
||||
MULTIBOOT_PAGE_ALIGN equ 1<<0
|
||||
MULTIBOOT_MEMORY_INFO equ 1<<1
|
||||
; MULTIBOOT_AOUT_KLUDGE equ 1<<16
|
||||
; MULTIBOOT_AOUT_KLUDGE equ 1<<16
|
||||
MULTIBOOT_HEADER_MAGIC equ 0x1BADB002
|
||||
MULTIBOOT_HEADER_FLAGS equ MULTIBOOT_PAGE_ALIGN | MULTIBOOT_MEMORY_INFO ; | MULTIBOOT_AOUT_KLUDGE
|
||||
MULTIBOOT_CHECKSUM equ -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
|
||||
MULTIBOOT_CHECKSUM equ -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
|
||||
EXTERN code, bss, end
|
||||
|
||||
; This is the GRUB Multiboot header. A boot signature
|
||||
dd MULTIBOOT_HEADER_MAGIC
|
||||
dd MULTIBOOT_HEADER_FLAGS
|
||||
dd MULTIBOOT_CHECKSUM
|
||||
|
||||
|
||||
ALIGN 4
|
||||
; we need already a valid GDT to switch in the 64bit mode
|
||||
; we need already a valid GDT to switch in the 64bit modus
|
||||
GDT64: ; Global Descriptor Table (64-bit).
|
||||
.Null: equ $ - GDT64 ; The null descriptor.
|
||||
dw 0 ; Limit (low).
|
||||
|
@ -81,39 +81,47 @@ GDT64: ; Global Descriptor Table (64-bit).
|
|||
dw $ - GDT64 - 1 ; Limit.
|
||||
dq GDT64 ; Base.
|
||||
|
||||
times 256 DD 0 ; stack for booting
|
||||
times 256 DD 0
|
||||
startup_stack:
|
||||
|
||||
SECTION .data
|
||||
; create default page tables for the 64bit kernel
|
||||
global boot_pml4
|
||||
global boot_pgd ; aka PML4
|
||||
ALIGN 4096 ; of course, the page tables have to be page aligned
|
||||
|
||||
PAGE_MAP_ENTRIES equ (1<<9)
|
||||
PAGE_SIZE equ (1<<12)
|
||||
|
||||
boot_pml4 times PAGE_MAP_ENTRIES DQ 0
|
||||
boot_pdpt times PAGE_MAP_ENTRIES DQ 0
|
||||
boot_pgd times PAGE_MAP_ENTRIES DQ 0
|
||||
boot_pgt times (KERNEL_SPACE/PAGE_SIZE) DQ 0
|
||||
NOPTS equ 512
|
||||
boot_pgd times 512 DQ 0
|
||||
boot_pdpt times 512 DQ 0
|
||||
boot_pd times 512 DQ 0
|
||||
boot_pt times (NOPTS*512) DQ 0
|
||||
|
||||
SECTION .text
|
||||
ALIGN 8
|
||||
%if MAX_CORES > 1
|
||||
global smp_entry
|
||||
smp_entry:
|
||||
; initialize cpu features
|
||||
call cpu_init
|
||||
; initialize cr3 register
|
||||
mov edi, boot_pml4
|
||||
; enable caching, disable paging and fpu emulation
|
||||
and eax, 0x1ffffffb
|
||||
; ...and turn on FPU exceptions
|
||||
or eax, 0x22
|
||||
mov cr0, eax
|
||||
; clears the current pgd entry
|
||||
xor eax, eax
|
||||
mov cr3, eax
|
||||
; at this stage, we disable the SSE support
|
||||
mov eax, cr4
|
||||
and eax, 0xfffbf9ff
|
||||
mov cr4, eax
|
||||
|
||||
; initialize page table
|
||||
mov edi, boot_pgd
|
||||
mov cr3, edi
|
||||
|
||||
; enable PAE
|
||||
; we need to enable PAE modus
|
||||
mov eax, cr4
|
||||
or eax, 1 << 5
|
||||
mov cr4, eax
|
||||
|
||||
; enable longmode (compatibility mode)
|
||||
; switch to the compatibility mode (which is part of long mode)
|
||||
mov ecx, 0xC0000080
|
||||
rdmsr
|
||||
or eax, 1 << 8
|
||||
|
@ -121,10 +129,9 @@ smp_entry:
|
|||
|
||||
; enable paging
|
||||
mov eax, cr0
|
||||
or eax, 1 << 31 | 1 << 0 ; Set the PG-bit, which is the 31nd bit, and the PE-bit, which is the 0th bit.
|
||||
mov cr0, eax ; According to the multiboot spec the PE-bit has to be set by bootloader already!
|
||||
or eax, 1 << 31 | 1 << 0 ; Set the PG-bit, which is the 31nd bit, and the PM-bit, which is the 0th bit.
|
||||
mov cr0, eax
|
||||
|
||||
; jump to 64-bit longmode
|
||||
mov edi, [esp+4] ; set argumet for smp_start
|
||||
lgdt [GDT64.Pointer] ; Load the 64-bit global descriptor table.
|
||||
jmp GDT64.Code:smp_start64 ; Set the code segment and enter 64-bit long mode.
|
||||
|
@ -132,90 +139,40 @@ smp_entry:
|
|||
jmp $ ; endless loop
|
||||
%endif
|
||||
|
||||
; search MP Floating Pointer Structure
|
||||
search_mps:
|
||||
search_apic:
|
||||
push ebp
|
||||
mov ebp, esp
|
||||
push ecx
|
||||
|
||||
xor eax, eax
|
||||
mov ecx, [ebp+8]
|
||||
.l1:
|
||||
L1:
|
||||
cmp [ecx], DWORD 0x5f504d5f ; MP_FLT_SIGNATURE
|
||||
jne .l2
|
||||
jne L2
|
||||
mov al, BYTE [ecx+9]
|
||||
cmp eax, 4
|
||||
ja .l2
|
||||
ja L2
|
||||
mov al, BYTE [ecx+11]
|
||||
cmp eax, 0
|
||||
jne .l2
|
||||
jne L2
|
||||
mov eax, ecx
|
||||
jmp .l3
|
||||
jmp L3
|
||||
|
||||
.l2:
|
||||
L2:
|
||||
add ecx, 4
|
||||
cmp ecx, [ebp+12]
|
||||
jb .l1
|
||||
jb L1
|
||||
xor eax, eax
|
||||
|
||||
.l3:
|
||||
L3:
|
||||
pop ecx
|
||||
pop ebp
|
||||
ret
|
||||
|
||||
check_longmode:
|
||||
; check for cpuid instruction
|
||||
pushfd
|
||||
pop eax
|
||||
mov ecx, eax
|
||||
xor eax, 1 << 21
|
||||
push eax
|
||||
popfd
|
||||
pushfd
|
||||
pop eax
|
||||
push ecx
|
||||
popfd
|
||||
xor eax, ecx
|
||||
jz .unsupported
|
||||
; check for extended cpu features (cpuid > 0x80000000)
|
||||
mov eax, 0x80000000
|
||||
cpuid
|
||||
cmp eax, 0x80000001
|
||||
jb .unsupported ; It is less, there is no long mode.
|
||||
; check if longmode is supported
|
||||
mov eax, 0x80000001
|
||||
cpuid
|
||||
test edx, 1 << 29 ; Test if the LM-bit, which is bit 29, is set in the D-register.
|
||||
jz .unsupported ; They aren't, there is no long mode.
|
||||
ret
|
||||
.unsupported:
|
||||
jmp $
|
||||
|
||||
check_lapic:
|
||||
push eax
|
||||
push ebx
|
||||
push ecx
|
||||
push edx
|
||||
mov eax, 1
|
||||
cpuid
|
||||
and edx, 0x200
|
||||
cmp edx, 0
|
||||
je .unsupported
|
||||
; map lapic at 0xFEE00000 below the kernel
|
||||
mov edi, kernel_start - 0x1000
|
||||
shr edi, 9 ; (edi >> 12) * 8
|
||||
add edi, boot_pgt
|
||||
mov ebx, 0xFEE00000 ; LAPIC base address
|
||||
or ebx, 0x00000013
|
||||
mov DWORD [edi], ebx
|
||||
.unsupported:
|
||||
pop edx
|
||||
pop ecx
|
||||
pop ebx
|
||||
pop eax
|
||||
ret
|
||||
|
||||
cpu_init:
|
||||
ALIGN 4
|
||||
stublet:
|
||||
mov esp, startup_stack-4
|
||||
push ebx ; save pointer to the multiboot structure
|
||||
mov eax, cr0
|
||||
; enable caching, disable paging and fpu emulation
|
||||
and eax, 0x1ffffffb
|
||||
|
@ -229,120 +186,155 @@ cpu_init:
|
|||
mov eax, cr4
|
||||
and eax, 0xfffbf9ff
|
||||
mov cr4, eax
|
||||
ret
|
||||
; do we have the instruction cpuid?
|
||||
pushfd
|
||||
pop eax
|
||||
mov ecx, eax
|
||||
xor eax, 1 << 21
|
||||
push eax
|
||||
popfd
|
||||
pushfd
|
||||
pop eax
|
||||
push ecx
|
||||
popfd
|
||||
xor eax, ecx
|
||||
jz Linvalid
|
||||
; cpuid > 0x80000000?
|
||||
mov eax, 0x80000000
|
||||
cpuid
|
||||
cmp eax, 0x80000001
|
||||
jb Linvalid ; It is less, there is no long mode.
|
||||
; do we have a long mode?
|
||||
mov eax, 0x80000001
|
||||
cpuid
|
||||
test edx, 1 << 29 ; Test if the LM-bit, which is bit 29, is set in the D-register.
|
||||
jz Linvalid ; They aren't, there is no long mode.
|
||||
|
||||
; identity map a single page at address eax
|
||||
identity_page:
|
||||
push edi
|
||||
push ebx
|
||||
; initialize page table
|
||||
mov edi, boot_pgd
|
||||
mov cr3, edi
|
||||
|
||||
and eax, 0xFFFFF000
|
||||
mov edi, eax
|
||||
shr edi, 9 ; (edi >> 12) * 8 (index for boot_pgt)
|
||||
add edi, boot_pgt
|
||||
mov ebx, eax
|
||||
or ebx, 0x13 ; set present, writable and cache disable bits
|
||||
; So lets make PML4T[0] point to the PDPT and so on:
|
||||
mov DWORD [edi], boot_pdpt ; Set the double word at the destination index to pdpt.
|
||||
or DWORD [edi], 0x00000003 ; Set present and writeable bit
|
||||
mov edi, boot_pdpt
|
||||
mov DWORD [edi], boot_pd ; Set the double word at the destination index to pd.
|
||||
or DWORD [edi], 0x00000003 ; Set present and writeable bit
|
||||
mov edi, boot_pd
|
||||
mov ebx, boot_pt
|
||||
mov ecx, NOPTS
|
||||
L0:
|
||||
mov DWORD [edi], ebx ; Set the double word at the destination index to pt.
|
||||
or DWORD [edi], 0x00000003 ; Set present and writeable bit
|
||||
add edi, 8
|
||||
add ebx, 0x1000
|
||||
loop L0
|
||||
|
||||
%ifdef CONFIG_VGA
|
||||
; map the VGA address into the virtual address space
|
||||
mov edi, 0xB8000
|
||||
shr edi, 9 ; (edi >> 12) * 8
|
||||
add edi, boot_pt
|
||||
mov ebx, 0xB8000
|
||||
or ebx, 0x00000013
|
||||
mov DWORD [edi], ebx
|
||||
%endif
|
||||
|
||||
; map multiboot structure into the virtual address space
|
||||
mov edi, [esp]
|
||||
and edi, 0xFFFFF000
|
||||
shr edi, 9 ; (edi >> 12) * 8
|
||||
add edi, boot_pt
|
||||
mov ebx, [esp]
|
||||
and ebx, 0xFFFFF000
|
||||
or ebx, 0x00000003
|
||||
mov DWORD [edi], ebx
|
||||
|
||||
pop ebx
|
||||
pop edi
|
||||
ret
|
||||
|
||||
ALIGN 4
|
||||
stublet:
|
||||
mov esp, startup_stack-4
|
||||
; save pointer to the Multiboot structure
|
||||
; check if lapic is available
|
||||
push eax
|
||||
push ebx
|
||||
; initialize cpu features
|
||||
call cpu_init
|
||||
; check if longmode is supported
|
||||
call check_longmode
|
||||
; check if lapic is available
|
||||
call check_lapic
|
||||
push ecx
|
||||
push edx
|
||||
mov eax, 1
|
||||
cpuid
|
||||
and edx, 0x200
|
||||
cmp edx, 0
|
||||
je no_lapic
|
||||
; map lapic at 0xFEE00000 below the kernel
|
||||
mov edi, kernel_start - 0x1000
|
||||
shr edi, 9 ; (edi >> 12) * 8
|
||||
add edi, boot_pt
|
||||
mov ebx, 0xFEE00000
|
||||
or ebx, 0x00000013
|
||||
mov DWORD [edi], ebx
|
||||
no_lapic:
|
||||
pop edx
|
||||
pop ecx
|
||||
pop ebx
|
||||
pop eax
|
||||
|
||||
; find MP Floating Pointer structure
|
||||
; search APIC
|
||||
push DWORD 0x100000
|
||||
push DWORD 0xF0000
|
||||
call search_mps
|
||||
call search_apic
|
||||
add esp, 8
|
||||
|
||||
cmp eax, 0
|
||||
jne map_mps
|
||||
jne La
|
||||
|
||||
push DWORD 0xA0000
|
||||
push DWORD 0x9F000
|
||||
call search_mps
|
||||
call search_apic
|
||||
add esp, 8
|
||||
|
||||
|
||||
cmp eax, 0
|
||||
je map_kernel
|
||||
je Lb
|
||||
|
||||
map_mps:
|
||||
; map MP Floating Pointer structure
|
||||
La:
|
||||
; map MP Floating Pointer Structure
|
||||
mov DWORD [apic_mp], eax
|
||||
call identity_page
|
||||
mov edi, eax
|
||||
and edi, 0xFFFFF000
|
||||
shr edi, 9 ; (edi >> 12) * 8
|
||||
add edi, boot_pt
|
||||
mov ebx, eax
|
||||
and ebx, 0xFFFFF000
|
||||
or ebx, 0x00000013
|
||||
mov DWORD [edi], ebx
|
||||
|
||||
; map MP Configuration table
|
||||
mov eax, [apic_mp+4]
|
||||
call identity_page
|
||||
; map mp_config
|
||||
mov edi, [eax+4]
|
||||
and edi, 0xFFFFF000
|
||||
shr edi, 9 ; (edi >> 12) * 8
|
||||
add edi, boot_pt
|
||||
mov ebx, [eax+4]
|
||||
and ebx, 0xFFFFF000
|
||||
or ebx, 0x00000013
|
||||
mov DWORD [edi], ebx
|
||||
|
||||
%ifdef CONFIG_VGA
|
||||
; map VGA textmode plane
|
||||
mov eax, 0xB8000
|
||||
call identity_page
|
||||
%endif
|
||||
|
||||
; map Multiboot structure
|
||||
mov eax, [esp] ; pointer is still on the stack
|
||||
call identity_page
|
||||
|
||||
map_kernel:
|
||||
Lb:
|
||||
mov edi, kernel_start
|
||||
shr edi, 9 ; (edi >> 12) * 8 (index for boot_pgt)
|
||||
add edi, boot_pgt
|
||||
shr edi, 9 ; (kernel_start >> 12) * 8
|
||||
add edi, boot_pt
|
||||
mov ebx, kernel_start
|
||||
or ebx, 0x00000003 ; set present and writable flags
|
||||
or ebx, 0x00000003
|
||||
mov ecx, kernel_end ; determine kernel size in number of pages
|
||||
sub ecx, kernel_start
|
||||
shr ecx, 12
|
||||
inc ecx
|
||||
.l1:
|
||||
mov DWORD [edi], ebx
|
||||
|
||||
Lc:
|
||||
mov DWORD [edi], ebx ; Set the double word at the destination index to the B-register.
|
||||
add edi, 8
|
||||
add ebx, 0x1000
|
||||
loop .l1
|
||||
loop Lc
|
||||
|
||||
init_paging:
|
||||
mov edi, boot_pml4
|
||||
mov cr3, edi
|
||||
|
||||
mov DWORD [edi], boot_pdpt
|
||||
or DWORD [edi], 0x07 ; set present, user and writable flags
|
||||
|
||||
add edi, (PAGE_MAP_ENTRIES-1)*8 ; setup recursive paging
|
||||
mov DWORD [edi], boot_pml4 ; boot_pml4[511] -> boot_pml4
|
||||
or DWORD [edi], 0x03 ; set present and writable flags
|
||||
|
||||
mov edi, boot_pdpt
|
||||
mov DWORD [edi], boot_pgd
|
||||
or DWORD [edi], 0x03 ; set present and writable flags
|
||||
|
||||
mov edi, boot_pgd
|
||||
mov ebx, boot_pgt
|
||||
mov ecx, PAGE_MAP_ENTRIES ; map all boot_pgt to the kernel space
|
||||
.l1:
|
||||
mov DWORD [edi], ebx
|
||||
or DWORD [edi], 0x03 ; set present and writable flags
|
||||
add edi, 8
|
||||
add ebx, 0x1000
|
||||
loop .l1
|
||||
|
||||
; enable PAE
|
||||
; we need to enable PAE modus
|
||||
mov eax, cr4
|
||||
or eax, 1 << 5
|
||||
mov cr4, eax
|
||||
|
||||
; enable longmode (compatibility mode)
|
||||
; switch to the compatibility mode (which is part of long mode)
|
||||
mov ecx, 0xC0000080
|
||||
rdmsr
|
||||
or eax, 1 << 8
|
||||
|
@ -350,14 +342,16 @@ init_paging:
|
|||
|
||||
; enable paging
|
||||
mov eax, cr0
|
||||
or eax, 1 << 31 | 1 << 0 ; Set the PG-bit, which is the 31nd bit, and the PE-bit, which is the 0th bit.
|
||||
or eax, 1 << 31 | 1 << 0 ; Set the PG-bit, which is the 31nd bit, and the PM-bit, which is the 0th bit.
|
||||
mov cr0, eax
|
||||
|
||||
; jump to 64-bit longmode
|
||||
pop ebx ; restore pointer to multiboot structure
|
||||
lgdt [GDT64.Pointer] ; Load the 64-bit global descriptor table.
|
||||
jmp GDT64.Code:start64 ; Set the code segment and enter 64-bit long mode.
|
||||
|
||||
Linvalid:
|
||||
jmp $
|
||||
|
||||
[BITS 64]
|
||||
start64:
|
||||
; initialize segment registers
|
||||
|
@ -395,6 +389,23 @@ smp_start64:
|
|||
jmp $
|
||||
%endif
|
||||
|
||||
global cpu_init
|
||||
cpu_init:
|
||||
; mov eax, cr0
|
||||
; enable caching, disable paging and fpu emulation
|
||||
; and eax, 0x1ffffffb
|
||||
; ...and turn on FPU exceptions
|
||||
; or eax, 0x22
|
||||
; mov cr0, eax
|
||||
; clears the current pgd entry
|
||||
; xor eax, eax
|
||||
; mov cr3, eax
|
||||
; at this stage, we disable the SSE support
|
||||
; mov eax, cr4
|
||||
; and eax, 0xfffbf9ff
|
||||
; mov cr4, eax
|
||||
; ret
|
||||
|
||||
; This will set up our new segment registers and is declared in
|
||||
; C as 'extern void gdt_flush();'
|
||||
global gdt_flush
|
||||
|
|
|
@ -50,7 +50,7 @@ size_t* get_current_stack(void)
|
|||
#endif
|
||||
|
||||
// use new page table
|
||||
write_cr3(virt_to_phys((size_t)curr_task->page_map));
|
||||
write_cr3(virt_to_phys((size_t)curr_task->pgd));
|
||||
|
||||
return curr_task->last_stack_pointer;
|
||||
}
|
||||
|
|
|
@ -42,4 +42,38 @@ L3:
|
|||
pop rax
|
||||
ret
|
||||
|
||||
%if 0
|
||||
; The following function is derived from JamesM's kernel development tutorials
|
||||
; (http://www.jamesmolloy.co.uk/tutorial_html/)
|
||||
global copy_page_physical
|
||||
copy_page_physical:
|
||||
push esi ; According to __cdecl, we must preserve the contents of ESI
|
||||
push edi ; and EDI.
|
||||
pushf ; push EFLAGS, so we can pop it and reenable interrupts
|
||||
; later, if they were enabled anyway.
|
||||
cli ; Disable interrupts, so we aren't interrupted.
|
||||
; Load these in BEFORE we disable paging!
|
||||
|
||||
mov edi, [esp+12+4] ; Destination address
|
||||
mov esi, [esp+12+8] ; Source address
|
||||
|
||||
mov edx, cr0 ; Get the control register...
|
||||
and edx, 0x7fffffff ; and...
|
||||
mov cr0, edx ; Disable paging.
|
||||
|
||||
cld
|
||||
mov ecx, 0x400 ; 1024*4bytes = 4096 bytes = page size
|
||||
rep movsd ; copy page
|
||||
|
||||
mov edx, cr0 ; Get the control register again
|
||||
or edx, 0x80000000 ; and...
|
||||
mov cr0, edx ; Enable paging.
|
||||
|
||||
popf ; Pop EFLAGS back.
|
||||
pop edi ; Get the original value of EDI
|
||||
pop esi ; and ESI back.
|
||||
ret
|
||||
|
||||
%endif
|
||||
|
||||
SECTION .note.GNU-stack noalloc noexec nowrite progbits
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
* 0x00100000 - 0x0DEADFFF: Kernel (size depends on the configuration) (221MB)
|
||||
* 0x0DEAE000 - 0x3FFFEFFF: Kernel heap (801MB)
|
||||
* 0x3FFFF000 - 0x3FFFFFFF: Page Tables are mapped in this region (4KB)
|
||||
* (The first 256 entries belongs to kernel space)
|
||||
* (The last 256 entries belongs to kernel space)
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -57,14 +57,13 @@ extern const void kernel_start;
|
|||
extern const void kernel_end;
|
||||
|
||||
// boot task's page directory and page directory lock
|
||||
static page_map_t boot_pgd = {{[0 ... MAP_ENTRIES-1] = 0}};
|
||||
static page_map_t boot_pgt[KERNEL_SPACE/(MAP_ENTRIES*PAGE_SIZE)];
|
||||
static page_map_t pgt_container = {{[0 ... MAP_ENTRIES-1] = 0}};
|
||||
|
||||
static page_dir_t boot_pgd = {{[0 ... PGT_ENTRIES-1] = 0}};
|
||||
static page_table_t pgt_container = {{[0 ... PGT_ENTRIES-1] = 0}};
|
||||
static page_table_t boot_pgt[KERNEL_SPACE/(1024*PAGE_SIZE)];
|
||||
static spinlock_t kslock = SPINLOCK_INIT;
|
||||
static int paging_enabled = 0;
|
||||
|
||||
page_map_t* get_boot_page_map(void)
|
||||
page_dir_t* get_boot_pgd(void)
|
||||
{
|
||||
return &boot_pgd;
|
||||
}
|
||||
|
@ -72,26 +71,26 @@ page_map_t* get_boot_page_map(void)
|
|||
/*
|
||||
* TODO: We create a full copy of the current task. Copy-On-Access will be the better solution.
|
||||
*
|
||||
* No PGD locking is needed because only create_page_map use this function and holds already the
|
||||
* No PGD locking is needed because onls create_pgd use this function and holds already the
|
||||
* PGD lock.
|
||||
*/
|
||||
inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_map_t* pgt, int* counter)
|
||||
inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_table_t* pgt, int* counter)
|
||||
{
|
||||
uint32_t i;
|
||||
page_map_t* new_pgt;
|
||||
page_table_t* new_pgt;
|
||||
size_t phyaddr;
|
||||
|
||||
if (BUILTIN_EXPECT(!pgt, 0))
|
||||
return 0;
|
||||
|
||||
new_pgt = kmalloc(sizeof(page_map_t));
|
||||
new_pgt = kmalloc(sizeof(page_table_t));
|
||||
if (!new_pgt)
|
||||
return 0;
|
||||
memset(new_pgt, 0x00, sizeof(page_map_t));
|
||||
memset(new_pgt, 0x00, sizeof(page_table_t));
|
||||
if (counter)
|
||||
(*counter)++;
|
||||
|
||||
for(i=0; i<MAP_ENTRIES; i++) {
|
||||
for(i=0; i<PGT_ENTRIES; i++) {
|
||||
if (pgt->entries[i] & PAGE_MASK) {
|
||||
if (!(pgt->entries[i] & PG_USER)) {
|
||||
// Kernel page => copy only page entries
|
||||
|
@ -118,11 +117,11 @@ inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_map_
|
|||
return phyaddr;
|
||||
}
|
||||
|
||||
int create_page_map(task_t* task, int copy)
|
||||
int create_pgd(task_t* task, int copy)
|
||||
{
|
||||
page_map_t* pgd;
|
||||
page_map_t* pgt;
|
||||
page_map_t* pgt_container;
|
||||
page_dir_t* pgd;
|
||||
page_table_t* pgt;
|
||||
page_table_t* pgt_container;
|
||||
uint32_t i;
|
||||
uint32_t index1, index2;
|
||||
size_t viraddr, phyaddr;
|
||||
|
@ -134,26 +133,25 @@ int create_page_map(task_t* task, int copy)
|
|||
|
||||
// we already know the virtual address of the "page table container"
|
||||
// (see file header)
|
||||
pgt_container = (page_map_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
|
||||
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
|
||||
|
||||
// create new page directory for the new task
|
||||
pgd = kmalloc(sizeof(page_map_t));
|
||||
pgd = kmalloc(sizeof(page_dir_t));
|
||||
if (!pgd)
|
||||
return -ENOMEM;
|
||||
memset(pgd, 0x00, sizeof(page_map_t));
|
||||
memset(pgd, 0x00, sizeof(page_dir_t));
|
||||
|
||||
// create a new "page table container" for the new task
|
||||
pgt = kmalloc(sizeof(page_map_t));
|
||||
pgt = kmalloc(sizeof(page_table_t));
|
||||
if (!pgt) {
|
||||
kfree(pgd, sizeof(page_map_t));
|
||||
kfree(pgd, sizeof(page_dir_t));
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(pgt, 0x00, sizeof(page_map_t));
|
||||
memset(pgt, 0x00, sizeof(page_table_t));
|
||||
|
||||
// copy kernel tables
|
||||
spinlock_lock(&kslock);
|
||||
|
||||
for(i=0; i<MAP_ENTRIES; i++) {
|
||||
for(i=0; i<PGT_ENTRIES; i++) {
|
||||
pgd->entries[i] = boot_pgd.entries[i];
|
||||
// only kernel entries will be copied
|
||||
if (pgd->entries[i] && !(pgd->entries[i] & PG_USER))
|
||||
|
@ -171,33 +169,36 @@ int create_page_map(task_t* task, int copy)
|
|||
pgd->entries[index1] = ((size_t) virt_to_phys((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
|
||||
pgt->entries[index2] = ((size_t) virt_to_phys((size_t) pgt) & PAGE_MASK)|KERN_PAGE;
|
||||
|
||||
task->page_map = pgd;
|
||||
task->pgd = pgd;
|
||||
|
||||
if (copy) {
|
||||
spinlock_irqsave_lock(&curr_task->page_lock);
|
||||
spinlock_irqsave_lock(&curr_task->pgd_lock);
|
||||
|
||||
for (i=KERNEL_SPACE/(1024*PAGE_SIZE); i<1024; i++) {
|
||||
if (!(curr_task->page_map->entries[i]))
|
||||
if (!(curr_task->pgd->entries[i]))
|
||||
continue;
|
||||
if (!(curr_task->page_map->entries[i] & PG_USER))
|
||||
if (!(curr_task->pgd->entries[i] & PG_USER))
|
||||
continue;
|
||||
|
||||
phyaddr = copy_page_table(task, i, (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + i*PAGE_SIZE) & PAGE_MASK), &counter);
|
||||
phyaddr = copy_page_table(task, i, (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + i*PAGE_SIZE) & PAGE_MASK), &counter);
|
||||
if (phyaddr) {
|
||||
pgd->entries[i] = (phyaddr & PAGE_MASK) | (curr_task->page_map->entries[i] & 0xFFF);
|
||||
pgd->entries[i] = (phyaddr & PAGE_MASK) | (curr_task->pgd->entries[i] & 0xFFF);
|
||||
pgt->entries[i] = (phyaddr & PAGE_MASK) | KERN_PAGE;
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(&curr_task->page_lock);
|
||||
spinlock_irqsave_unlock(&curr_task->pgd_lock);
|
||||
}
|
||||
|
||||
return counter;
|
||||
}
|
||||
|
||||
int drop_page_map(void)
|
||||
/*
|
||||
* drops all page frames and the PGD of a user task
|
||||
*/
|
||||
int drop_pgd(void)
|
||||
{
|
||||
page_map_t* pgd = per_core(current_task)->page_map;
|
||||
page_dir_t* pgd = per_core(current_task)->pgd;
|
||||
size_t phy_pgd = virt_to_phys((size_t) pgd);
|
||||
task_t* task = per_core(current_task);
|
||||
uint32_t i;
|
||||
|
@ -205,9 +206,9 @@ int drop_page_map(void)
|
|||
if (BUILTIN_EXPECT(pgd == &boot_pgd, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
|
||||
for(i=0; i<MAP_ENTRIES; i++) {
|
||||
for(i=0; i<PGT_ENTRIES; i++) {
|
||||
if (pgd->entries[i] & PG_USER) {
|
||||
put_page(pgd->entries[i] & PAGE_MASK);
|
||||
pgd->entries[i] = 0;
|
||||
|
@ -217,9 +218,9 @@ int drop_page_map(void)
|
|||
// freeing the page directory
|
||||
put_page(phy_pgd);
|
||||
|
||||
task->page_map = NULL;
|
||||
task->pgd = NULL;
|
||||
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -228,24 +229,24 @@ size_t virt_to_phys(size_t viraddr)
|
|||
{
|
||||
task_t* task = per_core(current_task);
|
||||
uint32_t index1, index2;
|
||||
page_map_t* pgt;
|
||||
page_table_t* pgt;
|
||||
size_t ret = 0;
|
||||
|
||||
if (!paging_enabled)
|
||||
return viraddr;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
||||
return 0;
|
||||
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
if (!(task->page_map->entries[index1] & PAGE_MASK))
|
||||
if (!(task->pgd->entries[index1] & PAGE_MASK))
|
||||
goto out;
|
||||
|
||||
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt || !(pgt->entries[index2]))
|
||||
goto out;
|
||||
|
||||
|
@ -254,7 +255,7 @@ size_t virt_to_phys(size_t viraddr)
|
|||
out:
|
||||
//kprintf("vir %p to phy %p\n", viraddr, ret);
|
||||
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -262,11 +263,11 @@ out:
|
|||
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
page_map_t* pgt;
|
||||
page_table_t* pgt;
|
||||
size_t index, i;
|
||||
size_t ret;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
||||
return 0;
|
||||
|
||||
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
|
||||
|
@ -275,7 +276,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
|
||||
if (!viraddr) {
|
||||
viraddr = vm_alloc(npages, flags);
|
||||
|
@ -291,10 +292,10 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE, phyaddr+=PAGE_SIZE) {
|
||||
index = viraddr >> 22;
|
||||
|
||||
if (!(task->page_map->entries[index])) {
|
||||
page_map_t* pgt_container;
|
||||
if (!(task->pgd->entries[index])) {
|
||||
page_table_t* pgt_container;
|
||||
|
||||
pgt = (page_map_t*) get_page();
|
||||
pgt = (page_table_t*) get_pages(1);
|
||||
if (BUILTIN_EXPECT(!pgt, 0)) {
|
||||
kputs("map_address: out of memory\n");
|
||||
ret = 0;
|
||||
|
@ -303,17 +304,17 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
|
||||
// set the new page table into the directory
|
||||
if (flags & MAP_USER_SPACE)
|
||||
task->page_map->entries[index] = (uint32_t)pgt|USER_TABLE;
|
||||
task->pgd->entries[index] = (uint32_t)pgt|USER_TABLE;
|
||||
else
|
||||
task->page_map->entries[index] = (uint32_t)pgt|KERN_TABLE;
|
||||
task->pgd->entries[index] = (uint32_t)pgt|KERN_TABLE;
|
||||
|
||||
// if paging is already enabled, we need to use the virtual address
|
||||
if (paging_enabled)
|
||||
// we already know the virtual address of the "page table container"
|
||||
// (see file header)
|
||||
pgt_container = (page_map_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
|
||||
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
|
||||
else
|
||||
pgt_container = (page_map_t*) (task->page_map->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & PAGE_MASK);
|
||||
pgt_container = (page_table_t*) (task->pgd->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & PAGE_MASK);
|
||||
|
||||
if (BUILTIN_EXPECT(!pgt_container, 0)) {
|
||||
kputs("map_address: internal error\n");
|
||||
|
@ -329,11 +330,11 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
memset((void*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK), 0x00, PAGE_SIZE);
|
||||
else
|
||||
memset(pgt, 0x00, PAGE_SIZE);
|
||||
} else pgt = (page_map_t*) (task->page_map->entries[index] & PAGE_MASK);
|
||||
} else pgt = (page_table_t*) (task->pgd->entries[index] & PAGE_MASK);
|
||||
|
||||
/* convert physical address to virtual */
|
||||
if (paging_enabled)
|
||||
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
|
||||
|
||||
index = (viraddr >> 12) & 0x3FF;
|
||||
if (pgt->entries[index] && !(flags & MAP_REMAP)) {
|
||||
|
@ -381,7 +382,7 @@ out:
|
|||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -391,18 +392,18 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
uint32_t index1, index2, newflags;
|
||||
size_t viraddr = start & 0xFFFFF000;
|
||||
size_t phyaddr;
|
||||
page_map_t* pgt;
|
||||
page_map_t* pgd;
|
||||
page_table_t* pgt;
|
||||
page_dir_t* pgd;
|
||||
task_t* task = per_core(current_task);
|
||||
|
||||
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
pgd = per_core(current_task)->page_map;
|
||||
pgd = per_core(current_task)->pgd;
|
||||
if (BUILTIN_EXPECT(!pgd, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
|
||||
while (viraddr < end)
|
||||
{
|
||||
|
@ -410,7 +411,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
while ((viraddr < end) && (index2 < 1024)) {
|
||||
pgt = (page_map_t*) (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_table_t*) (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (pgt && pgt->entries[index2]) {
|
||||
phyaddr = pgt->entries[index2] & PAGE_MASK;
|
||||
newflags = pgt->entries[index2] & 0xFFF; // get old flags
|
||||
|
@ -447,7 +448,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
}
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -463,9 +464,9 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|||
uint32_t index1, index2, j;
|
||||
size_t viraddr, i, ret = 0;
|
||||
size_t start, end;
|
||||
page_map_t* pgt;
|
||||
page_table_t* pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
return 0;
|
||||
|
||||
if (flags & MAP_KERNEL_SPACE) {
|
||||
|
@ -482,7 +483,7 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
|
||||
viraddr = i = start;
|
||||
j = 0;
|
||||
|
@ -490,7 +491,7 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|||
index1 = i >> 22;
|
||||
index2 = (i >> 12) & 0x3FF;
|
||||
|
||||
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt || !(pgt->entries[index2])) {
|
||||
i+=PAGE_SIZE;
|
||||
j++;
|
||||
|
@ -508,7 +509,7 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -518,22 +519,22 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
|||
task_t* task = per_core(current_task);
|
||||
uint32_t i;
|
||||
uint32_t index1, index2;
|
||||
page_map_t* pgt;
|
||||
page_table_t* pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
|
||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
||||
{
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt)
|
||||
continue;
|
||||
pgt->entries[index2] &= ~PG_PRESENT;
|
||||
|
@ -547,7 +548,7 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
|||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -557,22 +558,22 @@ int vm_free(size_t viraddr, uint32_t npages)
|
|||
task_t* task = per_core(current_task);
|
||||
uint32_t i;
|
||||
uint32_t index1, index2;
|
||||
page_map_t* pgt;
|
||||
page_table_t* pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
|
||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
||||
{
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt)
|
||||
continue;
|
||||
pgt->entries[index2] = 0;
|
||||
|
@ -583,7 +584,7 @@ int vm_free(size_t viraddr, uint32_t npages)
|
|||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -592,8 +593,8 @@ int print_paging_tree(size_t viraddr)
|
|||
{
|
||||
task_t* task = per_core(current_task);
|
||||
uint32_t index1, index2;
|
||||
page_map_t* pgd = NULL;
|
||||
page_map_t* pgt = NULL;
|
||||
page_dir_t* pgd = NULL;
|
||||
page_table_t* pgt = NULL;
|
||||
|
||||
if (BUILTIN_EXPECT(!viraddr, 0))
|
||||
return -EINVAL;
|
||||
|
@ -601,20 +602,20 @@ int print_paging_tree(size_t viraddr)
|
|||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
|
||||
kprintf("Paging dump of address 0x%x\n", viraddr);
|
||||
pgd = task->page_map;
|
||||
pgd = task->pgd;
|
||||
kprintf("\tPage directory entry %u: ", index1);
|
||||
if (pgd) {
|
||||
kprintf("0x%0x\n", pgd->entries[index1]);
|
||||
pgt = (page_map_t*) (pgd->entries[index1] & PAGE_MASK);
|
||||
pgt = (page_table_t*) (pgd->entries[index1] & PAGE_MASK);
|
||||
} else
|
||||
kputs("invalid page directory\n");
|
||||
|
||||
// convert physical address to virtual
|
||||
/* convert physical address to virtual */
|
||||
if (paging_enabled && pgt)
|
||||
pgt = (page_map_t*) (KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE);
|
||||
pgt = (page_table_t*) (KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE);
|
||||
|
||||
kprintf("\tPage table entry %u: ", index2);
|
||||
if (pgt)
|
||||
|
@ -622,7 +623,7 @@ int print_paging_tree(size_t viraddr)
|
|||
else
|
||||
kputs("invalid page table\n");
|
||||
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -630,12 +631,12 @@ int print_paging_tree(size_t viraddr)
|
|||
static void pagefault_handler(struct state *s)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
page_dir_t* pgd = task->pgd;
|
||||
page_table_t* pgt = NULL;
|
||||
size_t viraddr = read_cr2();
|
||||
size_t phyaddr;
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
uint32_t index1, index2;
|
||||
page_map_t* pgd = task->page_map;
|
||||
page_map_t* pgt = NULL;
|
||||
#endif
|
||||
|
||||
if ((viraddr >= task->start_heap) && (viraddr <= task->end_heap) && (viraddr > KERNEL_SPACE)) {
|
||||
|
@ -649,7 +650,7 @@ static void pagefault_handler(struct state *s)
|
|||
memset((void*) viraddr, 0x00, PAGE_SIZE);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
kprintf("Could not map 0x%x at 0x%x\n", phyaddr, viraddr);
|
||||
put_page(phyaddr);
|
||||
}
|
||||
|
@ -660,7 +661,7 @@ static void pagefault_handler(struct state *s)
|
|||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
if (!pgd || !(pgd->entries[index1] & PAGE_MASK))
|
||||
goto default_handler;
|
||||
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt || !(pgt->entries[index2]))
|
||||
goto default_handler;
|
||||
if (pgt->entries[index2] & PG_SVM_INIT) {
|
||||
|
@ -686,14 +687,14 @@ default_handler:
|
|||
int arch_paging_init(void)
|
||||
{
|
||||
uint32_t i, npages, index1, index2;
|
||||
page_map_t* pgt;
|
||||
page_table_t* pgt;
|
||||
size_t viraddr;
|
||||
|
||||
// replace default pagefault handler
|
||||
// uninstall default handler and install our own
|
||||
irq_uninstall_handler(14);
|
||||
irq_install_handler(14, pagefault_handler);
|
||||
|
||||
// create a page table to reference to the other page tables
|
||||
// Create a page table to reference to the other page tables
|
||||
pgt = &pgt_container;
|
||||
|
||||
// map this table at the end of the kernel space
|
||||
|
@ -702,21 +703,21 @@ int arch_paging_init(void)
|
|||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
// now, we create a self reference
|
||||
per_core(current_task)->page_map->entries[index1] = (((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
|
||||
pgt->entries[index2] = ((size_t) pgt & PAGE_MASK)|KERN_PAGE;
|
||||
per_core(current_task)->pgd->entries[index1] = (((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
|
||||
pgt->entries[index2] = ((size_t) pgt & 0xFFFFF000)|KERN_PAGE;
|
||||
|
||||
// create the other PGTs for the kernel space
|
||||
for(i=0; i<KERNEL_SPACE/(1024*PAGE_SIZE)-1; i++) {
|
||||
size_t phyaddr = boot_pgt+i;
|
||||
|
||||
memset((void*) phyaddr, 0x00, sizeof(page_map_t));
|
||||
per_core(current_task)->page_map->entries[i] = (phyaddr & PAGE_MASK)|KERN_TABLE;
|
||||
memset((void*) phyaddr, 0x00, sizeof(page_table_t));
|
||||
per_core(current_task)->pgd->entries[i] = (phyaddr & PAGE_MASK)|KERN_TABLE;
|
||||
pgt->entries[i] = (phyaddr & PAGE_MASK)|KERN_PAGE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the page table and page directory entries for the kernel.
|
||||
* We map the kernel's physical address to the same virtual address.
|
||||
* Set the page table and page directory entries for the kernel. We map the kernel's physical address
|
||||
* to the same virtual address.
|
||||
*/
|
||||
npages = ((size_t) &kernel_end - (size_t) &kernel_start) >> PAGE_SHIFT;
|
||||
if ((size_t)&kernel_end & (PAGE_SIZE-1))
|
||||
|
@ -724,7 +725,7 @@ int arch_paging_init(void)
|
|||
map_region((size_t)&kernel_start, (size_t)&kernel_start, npages, MAP_KERNEL_SPACE);
|
||||
|
||||
#if MAX_CORES > 1
|
||||
// reserve page for smp boot code
|
||||
// Reserve page for smp boot code
|
||||
if (!map_region(SMP_SETUP_ADDR, SMP_SETUP_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE)) {
|
||||
kputs("could not reserve page for smp boot code\n");
|
||||
return -ENOMEM;
|
||||
|
@ -737,12 +738,16 @@ int arch_paging_init(void)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_MULTIBOOT
|
||||
// map mb_info into the kernel space
|
||||
/*
|
||||
* of course, mb_info has to map into the kernel space
|
||||
*/
|
||||
if (mb_info)
|
||||
map_region((size_t) mb_info & PAGE_MASK, (size_t) mb_info & PAGE_MASK, 1, MAP_KERNEL_SPACE);
|
||||
|
||||
#if 0
|
||||
// map reserved memory regions into the kernel space
|
||||
/*
|
||||
* Map reserved memory regions into the kernel space
|
||||
*/
|
||||
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
|
||||
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr;
|
||||
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
||||
|
@ -800,7 +805,7 @@ int arch_paging_init(void)
|
|||
kprintf("Map FPGA regsiters at 0x%x\n", viraddr);
|
||||
#endif
|
||||
|
||||
// enable paging
|
||||
/* enable paging */
|
||||
write_cr3((uint32_t) &boot_pgd);
|
||||
i = read_cr0();
|
||||
i = i | (1 << 31);
|
||||
|
@ -817,7 +822,10 @@ int arch_paging_init(void)
|
|||
bootinfo->addr = viraddr;
|
||||
#endif
|
||||
|
||||
// we turned on paging => now, we are able to register our task
|
||||
/*
|
||||
* we turned on paging
|
||||
* => now, we are able to register our task
|
||||
*/
|
||||
register_task();
|
||||
|
||||
// APIC registers into the kernel address space
|
||||
|
|
|
@ -31,15 +31,21 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/multiboot.h>
|
||||
#include <asm/apic.h>
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
#include <asm/RCCE_lib.h>
|
||||
#include <asm/SCC_API.h>
|
||||
#include <asm/svm.h>
|
||||
#include <asm/icc.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Virtual Memory Layout of the standard configuration
|
||||
* (1 GB kernel space)
|
||||
*
|
||||
* 0x000000000000 - 0x0000000FFFFF: reserved for IO devices (16MB)
|
||||
* 0x000000100000 - 0x00000DEADFFF: Kernel (size depends on the configuration) (221MB)
|
||||
* 0x00000DEAE000 - 0x00003FFFFFFF: Kernel heap
|
||||
* 0xFF8000000000 - 0xFFFFFFFFFFFF: Paging structures are mapped in this region (max 512GB)
|
||||
* 0x00000000 - 0x000FFFFF: reserved for IO devices (16MB)
|
||||
* 0x00100000 - 0x0DEADFFF: Kernel (size depends on the configuration) (221MB)
|
||||
* 0x0DEAE000 - 0x3FFFFFFF: Kernel heap
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -49,248 +55,127 @@
|
|||
extern const void kernel_start;
|
||||
extern const void kernel_end;
|
||||
|
||||
// boot task's page map and page map lock
|
||||
extern page_map_t boot_pml4;
|
||||
// boot task's page directory and page directory lock
|
||||
extern page_dir_t boot_pgd;
|
||||
static spinlock_t kslock = SPINLOCK_INIT;
|
||||
static int paging_enabled = 0;
|
||||
|
||||
page_map_t* get_boot_page_map(void)
|
||||
page_dir_t* get_boot_pgd(void)
|
||||
{
|
||||
return &boot_pml4;
|
||||
return &boot_pgd;
|
||||
}
|
||||
|
||||
/** @brief Copy a single page frame
|
||||
*
|
||||
* @param src virtual address of source page frame
|
||||
* @return physical addr to copied page frame
|
||||
*/
|
||||
static size_t copy_page_frame(size_t *src)
|
||||
int create_pgd(task_t* task, int copy)
|
||||
{
|
||||
kprintf("copy_page_frame(%p)\n", src);
|
||||
#if 1 // TODO: untested
|
||||
size_t phyaddr, viraddr;
|
||||
// Currently, we support only kernel tasks
|
||||
// => all tasks are able to use the same pgd
|
||||
|
||||
// allocate and map an empty page
|
||||
phyaddr = get_page();
|
||||
if (BUILTIN_EXPECT(!phyaddr, 0))
|
||||
return 0;
|
||||
|
||||
viraddr = vma_alloc(PAGE_SIZE, VMA_HEAP);
|
||||
if (BUILTIN_EXPECT(!viraddr, 0))
|
||||
return 0;
|
||||
|
||||
viraddr = map_region(viraddr, phyaddr, 1, MAP_KERNEL_SPACE);
|
||||
if (BUILTIN_EXPECT(!viraddr, 0))
|
||||
return 0;
|
||||
|
||||
// copy the whole page
|
||||
strncpy((void*) viraddr, (void*) src, PAGE_SIZE);
|
||||
|
||||
// unmap and free page
|
||||
unmap_region(viraddr, 1);
|
||||
vma_free(viraddr, viraddr+PAGE_SIZE);
|
||||
|
||||
return phyaddr;
|
||||
#else
|
||||
kprintf("TODO: copy_page_frame(%lx)\n", source);
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline size_t canonicalize(size_t addr)
|
||||
{
|
||||
if (addr & (1UL<<47))
|
||||
return addr;
|
||||
else
|
||||
return addr & ((1UL<<48) - 1);
|
||||
}
|
||||
|
||||
static inline int map_to_level(size_t addr)
|
||||
{
|
||||
if (addr >= PAGE_PML4)
|
||||
return 4;
|
||||
else if (addr >= PAGE_PDPT)
|
||||
return 3;
|
||||
else if (addr >= PAGE_PGD)
|
||||
return 2;
|
||||
else if (addr >= PAGE_PGT)
|
||||
return 1;
|
||||
else
|
||||
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
task->pgd = get_boot_pgd();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline const char * map_to_lvlname(size_t addr)
|
||||
{
|
||||
const char* names[] = {"(none)", "PGT", "PGD", "PDPT", "PML4"};
|
||||
return names[map_to_level(addr)];
|
||||
}
|
||||
|
||||
static inline size_t map_to_virt(size_t addr)
|
||||
{
|
||||
return canonicalize(addr << (map_to_level(addr) * PAGE_MAP_SHIFT));
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy page maps using recursion
|
||||
*
|
||||
* @param from pointer to virtual address of source page tables
|
||||
* @param to pointer to virtual address of destination page tables
|
||||
* @param copy flags what should be copied (see #define COPY_*)
|
||||
* @return number of new allocated page frames (for tables only)
|
||||
/*
|
||||
* drops all page frames and the PGD of a user task
|
||||
*/
|
||||
static int copy_page_map(page_map_t *src, page_map_t *dest, int copy)
|
||||
int drop_pgd(void)
|
||||
{
|
||||
page_map_t* next_src, * next_dest;
|
||||
|
||||
int ret = 0;
|
||||
uint32_t i;
|
||||
for(i=0; i<PAGE_MAP_ENTRIES; i++) {
|
||||
if (!(src->entries[i] & PG_PRESENT))
|
||||
// skip empty entries
|
||||
dest->entries[i] = 0;
|
||||
else if (src->entries[i] & PG_USER) {
|
||||
size_t phys;
|
||||
kprintf("d:%p (%s: 0x%012lx) -> %p\n", &src->entries[i], map_to_lvlname((size_t) &src->entries[i]), map_to_virt((size_t) &src->entries[i]), &dest->entries[i]);
|
||||
|
||||
// deep copy user tables
|
||||
if ((size_t) src >= PAGE_PGT) {
|
||||
phys = get_page();
|
||||
if (BUILTIN_EXPECT(!phys, 0))
|
||||
return -ENOMEM;
|
||||
|
||||
dest->entries[i] = phys|(src->entries[i] & ~PAGE_MASK);
|
||||
|
||||
// reuse pointers to next lower page map tables
|
||||
next_src = (page_map_t*) ((size_t) &src->entries[i] << 9);
|
||||
next_dest = (page_map_t*) ((size_t) &dest->entries[i] << 9);
|
||||
|
||||
ret += 1 + copy_page_map(next_src, next_dest, copy);
|
||||
}
|
||||
// deep copy page frame
|
||||
else {
|
||||
if (copy) {
|
||||
phys = copy_page_frame((size_t*) src->entries[i]);
|
||||
dest->entries[i] = phys|(src->entries[i] & ~PAGE_MASK);
|
||||
}
|
||||
kprintf("c: %p (%lx)\n", &src->entries[i], src->entries[i]);
|
||||
}
|
||||
}
|
||||
// shallow copy kernel only tables
|
||||
else {
|
||||
kprintf("s:%p (%s: 0x%012lx) -> %p\n", &src->entries[i], map_to_lvlname((size_t) &src->entries[i]), map_to_virt((size_t) &src->entries[i]), &dest->entries[i]);
|
||||
dest->entries[i] = src->entries[i];
|
||||
}
|
||||
}
|
||||
|
||||
kputs("r\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
int create_page_map(task_t* task, int copy)
|
||||
{
|
||||
size_t phys;
|
||||
uint32_t ret;
|
||||
|
||||
// fixed mapping for paging structures
|
||||
page_map_t *current = (page_map_t*) PAGE_PML4;
|
||||
page_map_t *new = (page_map_t*) (PAGE_PML4 - 0x1000);
|
||||
|
||||
// get new pml4 table
|
||||
phys = get_page();
|
||||
if (!phys) return -ENOMEM;
|
||||
|
||||
current->entries[PAGE_MAP_ENTRIES-2] = phys|KERN_TABLE;
|
||||
new->entries[PAGE_MAP_ENTRIES-1] = phys|KERN_TABLE;
|
||||
|
||||
tlb_flush(); // ouch :(
|
||||
|
||||
spinlock_lock(&kslock);
|
||||
ret = copy_page_map(current, new, copy);
|
||||
spinlock_unlock(&kslock);
|
||||
|
||||
new->entries[PAGE_MAP_ENTRIES-1] = phys|KERN_TABLE;
|
||||
current->entries[PAGE_MAP_ENTRIES-2] = 0;
|
||||
|
||||
task->page_map = (page_map_t*) phys;
|
||||
|
||||
kprintf("create_page_map: allocated %u page tables\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int drop_page_map(void)
|
||||
{
|
||||
#if 1
|
||||
kprintf("TODO: test drop_page_map()\n");
|
||||
return -EINVAL; // TODO
|
||||
#else
|
||||
#if 0
|
||||
page_dir_t* pgd = per_core(current_task)->pgd;
|
||||
size_t phy_pgd = virt_to_phys((size_t) pgd);
|
||||
task_t* task = per_core(current_task);
|
||||
page_map_t* pml4, * pdpt, * pgd, * pgt;
|
||||
size_t phys;
|
||||
uint32_t i, j, k, l;
|
||||
uint32_t i;
|
||||
|
||||
pml4 = task->page_map;
|
||||
|
||||
if (BUILTIN_EXPECT(pml4 == &boot_pml4, 0))
|
||||
if (BUILTIN_EXPECT(pgd == &boot_pgd, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(&task->page_lock);
|
||||
|
||||
// delete all user pages and tables
|
||||
for(i=0; i<PAGE_MAP_ENTRIES; i++) { // pml4
|
||||
if (pml4->entries[i] & PG_USER) {
|
||||
for(j=0; j<PAGE_MAP_ENTRIES; j++) { // pdpt
|
||||
if (pdpt->entries[j] & PG_USER) {
|
||||
for(k=0; k<PAGE_MAP_ENTRIES; k++) { // pgd
|
||||
if (pgd->entries[k] & PG_USER) {
|
||||
for(l=0; l<PAGE_MAP_ENTRIES; l++) { // pgt
|
||||
if (pgt->entries[l] & PG_USER)
|
||||
put_page(pgt->entries[l] & PAGE_MASK);
|
||||
}
|
||||
// TODO: put pgt
|
||||
}
|
||||
}
|
||||
// TODO: put pgd
|
||||
}
|
||||
}
|
||||
// TODO: put pdpt
|
||||
spinlock_lock(&task->pgd_lock);
|
||||
|
||||
for(i=0; i<1024; i++) {
|
||||
if (pgd->entries[i] & PG_USER) {
|
||||
put_page(pgd->entries[i] & PAGE_MASK);
|
||||
pgd->entries[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
put_page(virt_to_phys((size_t) pml4));
|
||||
task->page_map = NULL;
|
||||
// freeing the page directory
|
||||
put_page(phy_pgd);
|
||||
|
||||
spinlock_unlock(&task->page_lock);
|
||||
task->pgd = NULL;
|
||||
|
||||
spinlock_unlock(&task->pgd_lock);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
size_t virt_to_phys(size_t viraddr)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
size_t phyaddr;
|
||||
size_t* pte;
|
||||
uint16_t idx_pd4 = (viraddr >> 39) & 0x1FF;
|
||||
uint16_t idx_dirp = (viraddr >> 30) & 0x1FF;
|
||||
uint16_t idx_dir = (viraddr >> 21) & 0x1FF;
|
||||
uint16_t idx_table = (viraddr >> 12) & 0x1FF;
|
||||
page_table_t* pgt;
|
||||
size_t ret = 0;
|
||||
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
if (!paging_enabled)
|
||||
return viraddr;
|
||||
|
||||
pte = (size_t *) (PAGE_PGT | (viraddr >> 9));
|
||||
phyaddr = (*pte & PAGE_MASK) | (viraddr & ~PAGE_MASK);
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
||||
return 0;
|
||||
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
|
||||
return phyaddr;
|
||||
// Currently, we allocate pages only in kernel space.
|
||||
// => physical address of the page table is identical of the virtual address
|
||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
||||
if (!pgt)
|
||||
goto out;
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
||||
if (!pgt)
|
||||
goto out;
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
||||
if (!pgt)
|
||||
goto out;
|
||||
|
||||
ret = (size_t) (pgt->entries[idx_table] & PAGE_MASK);
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
ret = ret | (viraddr & 0xFFF); // add page offset
|
||||
out:
|
||||
//kprintf("vir %p to phy %p\n", viraddr, ret);
|
||||
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
page_table_t* pgt;
|
||||
size_t i, ret;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
||||
return 0;
|
||||
|
||||
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
|
||||
return 0;
|
||||
|
||||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
|
||||
if (!viraddr) {
|
||||
kputs("map_region: deprecated vma_alloc() call from within map_region\n");
|
||||
viraddr = vma_alloc(npages*PAGE_SIZE, VMA_HEAP);
|
||||
viraddr = vm_alloc(npages, flags);
|
||||
if (BUILTIN_EXPECT(!viraddr, 0)) {
|
||||
kputs("map_region: found no valid virtual address\n");
|
||||
ret = 0;
|
||||
|
@ -298,40 +183,59 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
}
|
||||
}
|
||||
|
||||
// correct alignment
|
||||
phyaddr &= PAGE_MASK;
|
||||
viraddr &= PAGE_MASK;
|
||||
ret = viraddr;
|
||||
|
||||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
kprintf("map_region: map %u pages from 0x%lx to 0x%lx with flags: 0x%x\n", npages, viraddr, phyaddr, flags);
|
||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE, phyaddr+=PAGE_SIZE) {
|
||||
// page table entry
|
||||
size_t* pte = (size_t *) (PAGE_PGT|(viraddr >> 9));
|
||||
uint16_t idx_pd4 = (viraddr >> 39) & 0x1FF;
|
||||
uint16_t idx_dirp = (viraddr >> 30) & 0x1FF;
|
||||
uint16_t idx_dir = (viraddr >> 21) & 0x1FF;
|
||||
uint16_t idx_table = (viraddr >> 12) & 0x1FF;
|
||||
|
||||
if (*pte && !(flags & MAP_REMAP)) {
|
||||
kprintf("map_region: 0x%lx is already mapped\n", viraddr);
|
||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
kputs("map_region: out of memory\n");
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
kputs("map_region: out of memory\n");
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
kputs("map_region: out of memory\n");
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* convert physical address to virtual */
|
||||
// Currently, we allocate pages only in kernel space.
|
||||
// => physical address of the page table is identical of the virtual address
|
||||
//if (paging_enabled)
|
||||
// pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
|
||||
|
||||
if (pgt->entries[idx_table] && !(flags & MAP_REMAP)) {
|
||||
kprintf("0x%x is already mapped\n", viraddr);
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (flags & MAP_USER_SPACE)
|
||||
*pte = phyaddr|USER_PAGE;
|
||||
pgt->entries[idx_table] = USER_PAGE|(phyaddr & PAGE_MASK);
|
||||
else
|
||||
*pte = phyaddr|KERN_PAGE;
|
||||
pgt->entries[idx_table] = KERN_PAGE|(phyaddr & PAGE_MASK);
|
||||
|
||||
if (flags & MAP_NO_CACHE)
|
||||
*pte |= PG_PCD;
|
||||
pgt->entries[idx_table] |= PG_PCD;
|
||||
|
||||
if (flags & MAP_NO_ACCESS)
|
||||
*pte &= ~PG_PRESENT;
|
||||
pgt->entries[idx_table] &= ~PG_PRESENT;
|
||||
|
||||
if (flags & MAP_WT)
|
||||
*pte |= PG_PWT;
|
||||
pgt->entries[idx_table] |= PG_PWT;
|
||||
|
||||
if (flags & MAP_USER_SPACE)
|
||||
atomic_int32_inc(&task->user_usage);
|
||||
|
@ -343,7 +247,7 @@ out:
|
|||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -354,15 +258,18 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
uint32_t index1, index2, newflags;
|
||||
size_t viraddr = start & PAGE_MASK;
|
||||
size_t phyaddr;
|
||||
page_map_t* pgt;
|
||||
page_map_t* pgd;
|
||||
page_table_t* pgt;
|
||||
page_dir_t* pgd;
|
||||
task_t* task = per_core(current_task);
|
||||
|
||||
pgd = per_core(current_task)->page_map;
|
||||
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
pgd = per_core(current_task)->pgd;
|
||||
if (BUILTIN_EXPECT(!pgd, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(&task->page_lock);
|
||||
spinlock_lock(&task->pgd_lock);
|
||||
|
||||
while (viraddr < end)
|
||||
{
|
||||
|
@ -370,7 +277,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
while ((viraddr < end) && (index2 < 1024)) {
|
||||
pgt = (page_map_t*) (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_table_t*) (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (pgt && pgt->entries[index2]) {
|
||||
phyaddr = pgt->entries[index2] & PAGE_MASK;
|
||||
newflags = pgt->entries[index2] & 0xFFF; // get old flags
|
||||
|
@ -385,8 +292,16 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
// update flags
|
||||
if (!(flags & VMA_WRITE)) {
|
||||
newflags &= ~PG_RW;
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
if (newflags & (PG_SVM_STRONG|PG_SVM_LAZYRELEASE))
|
||||
newflags &= ~PG_MPE;
|
||||
#endif
|
||||
} else {
|
||||
newflags |= PG_RW;
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
if (newflags & (PG_SVM_STRONG|PG_SVM_LAZYRELEASE))
|
||||
newflags |= PG_MPE;
|
||||
#endif
|
||||
}
|
||||
|
||||
pgt->entries[index2] = (newflags & 0xFFF) | (phyaddr & PAGE_MASK);
|
||||
|
@ -399,66 +314,149 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
}
|
||||
}
|
||||
|
||||
spinlock_unlock(&task->page_lock);
|
||||
spinlock_unlock(&task->pgd_lock);
|
||||
#endif
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the first fit algorithm to find a valid address range
|
||||
*
|
||||
* TODO: O(n) => bad performance, we need a better approach
|
||||
*/
|
||||
size_t vm_alloc(uint32_t npages, uint32_t flags)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
size_t viraddr, i, j, ret = 0;
|
||||
size_t start, end;
|
||||
page_table_t* pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
return 0;
|
||||
|
||||
if (flags & MAP_KERNEL_SPACE) {
|
||||
start = (((size_t) &kernel_end) + 10*PAGE_SIZE) & PAGE_MASK;
|
||||
end = (KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK;
|
||||
} else {
|
||||
start = KERNEL_SPACE & PAGE_MASK;
|
||||
end = PAGE_MASK;
|
||||
}
|
||||
|
||||
if (BUILTIN_EXPECT(!npages, 0))
|
||||
return 0;
|
||||
|
||||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
|
||||
viraddr = i = start;
|
||||
j = 0;
|
||||
do {
|
||||
uint16_t idx_pd4 = (viraddr >> 39) & 0x1FF;
|
||||
uint16_t idx_dirp = (viraddr >> 30) & 0x1FF;
|
||||
uint16_t idx_dir = (viraddr >> 21) & 0x1FF;
|
||||
uint16_t idx_table = (viraddr >> 12) & 0x1FF;
|
||||
|
||||
// Currently, we allocate pages only in kernel space.
|
||||
// => physical address of the page table is identical of the virtual address
|
||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
i += (size_t)PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
||||
j += PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
i += PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
||||
j += PGT_ENTRIES*PGT_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
i += PGT_ENTRIES*PAGE_SIZE;
|
||||
j += PGT_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!(pgt->entries[idx_table])) {
|
||||
i += PAGE_SIZE;
|
||||
j++;
|
||||
} else {
|
||||
// restart search
|
||||
j = 0;
|
||||
viraddr = i + PAGE_SIZE;
|
||||
i = i + PAGE_SIZE;
|
||||
}
|
||||
} while((j < npages) && (i<=end));
|
||||
|
||||
if ((j >= npages) && (viraddr < end))
|
||||
ret = viraddr;
|
||||
|
||||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int unmap_region(size_t viraddr, uint32_t npages)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
page_map_t* pdpt, * pgd, * pgt;
|
||||
page_table_t* pgt;
|
||||
size_t i;
|
||||
uint16_t index_pml4, index_pdpt;
|
||||
uint16_t index_pgd, index_pgt;
|
||||
uint16_t idx_pd4, idx_dirp;
|
||||
uint16_t idx_dir, idx_table;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
|
||||
i = 0;
|
||||
while(i<npages)
|
||||
{
|
||||
index_pml4 = (viraddr >> 39) & 0x1FF;
|
||||
index_pdpt = (viraddr >> 30) & 0x1FF;
|
||||
index_pgd = (viraddr >> 21) & 0x1FF;
|
||||
index_pgt = (viraddr >> 12) & 0x1FF;
|
||||
idx_pd4 = (viraddr >> 39) & 0x1FF;
|
||||
idx_dirp = (viraddr >> 30) & 0x1FF;
|
||||
idx_dir = (viraddr >> 21) & 0x1FF;
|
||||
idx_table = (viraddr >> 12) & 0x1FF;
|
||||
|
||||
// currently, we allocate pages only in kernel space.
|
||||
// Currently, we allocate pages only in kernel space.
|
||||
// => physical address of the page table is identical of the virtual address
|
||||
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
||||
if (!pdpt) {
|
||||
viraddr += (size_t) PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_SIZE;
|
||||
i += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
||||
if (!pgd) {
|
||||
viraddr += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_SIZE;
|
||||
i += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
viraddr += PAGE_MAP_ENTRIES*PAGE_SIZE;
|
||||
i += PAGE_MAP_ENTRIES;
|
||||
viraddr += (size_t) PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
||||
i += PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
viraddr += PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
||||
i += PGT_ENTRIES*PGT_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
viraddr += PGT_ENTRIES*PAGE_SIZE;
|
||||
i += PGT_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pgt->entries[index_pgt])
|
||||
pgt->entries[index_pgt] &= ~PG_PRESENT;
|
||||
if (pgt->entries[idx_table])
|
||||
pgt->entries[idx_table] &= ~PG_PRESENT;
|
||||
|
||||
viraddr +=PAGE_SIZE;
|
||||
i++;
|
||||
|
||||
|
||||
if (viraddr > KERNEL_SPACE)
|
||||
atomic_int32_dec(&task->user_usage);
|
||||
|
||||
|
@ -468,7 +466,71 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
|||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vm_free(size_t viraddr, uint32_t npages)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
page_table_t* pgt;
|
||||
size_t i;
|
||||
uint16_t idx_pd4, idx_dirp;
|
||||
uint16_t idx_dir, idx_table;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
|
||||
i = 0;
|
||||
while(i<npages)
|
||||
{
|
||||
idx_pd4 = (viraddr >> 39) & 0x1FF;
|
||||
idx_dirp = (viraddr >> 30) & 0x1FF;
|
||||
idx_dir = (viraddr >> 21) & 0x1FF;
|
||||
idx_table = (viraddr >> 12) & 0x1FF;
|
||||
|
||||
// Currently, we allocate pages only in kernel space.
|
||||
// => physical address of the page table is identical of the virtual address
|
||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
viraddr += (size_t) PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
||||
i += PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
viraddr += PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
||||
i += PGT_ENTRIES*PGT_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
viraddr += PGT_ENTRIES*PAGE_SIZE;
|
||||
i += PGT_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pgt->entries[idx_table])
|
||||
pgt->entries[idx_table] = 0;
|
||||
|
||||
viraddr +=PAGE_SIZE;
|
||||
i++;
|
||||
|
||||
tlb_flush_one_page(viraddr);
|
||||
}
|
||||
|
||||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -476,8 +538,10 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
|||
static void pagefault_handler(struct state *s)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
//page_dir_t* pgd = task->pgd;
|
||||
//page_table_t* pgt = NULL;
|
||||
size_t viraddr = read_cr2();
|
||||
size_t phyaddr;
|
||||
//size_t phyaddr;
|
||||
|
||||
#if 0
|
||||
if ((viraddr >= task->start_heap) && (viraddr <= task->end_heap) && (viraddr > KERNEL_SPACE)) {
|
||||
|
@ -485,49 +549,24 @@ static void pagefault_handler(struct state *s)
|
|||
|
||||
phyaddr = get_page();
|
||||
if (BUILTIN_EXPECT(!phyaddr, 0))
|
||||
goto oom;
|
||||
goto default_handler;
|
||||
|
||||
if (map_region(viraddr, phyaddr, 1, MAP_USER_SPACE) == viraddr) {
|
||||
memset((void*) viraddr, 0x00, PAGE_SIZE);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
kprintf("Could not map 0x%x at 0x%x\n", phyaddr, viraddr);
|
||||
put_page(phyaddr);
|
||||
}
|
||||
/*
|
||||
* handle missing paging structures for userspace
|
||||
* all kernel space paging structures have been initialized in entry64.asm
|
||||
*/
|
||||
else if (viraddr >= PAGE_PGT) {
|
||||
kprintf("map_region: missing paging structure at: 0x%lx (%s)\n", viraddr, map_to_lvlname(viraddr));
|
||||
|
||||
phyaddr = get_page();
|
||||
if (BUILTIN_EXPECT(!phyaddr, 0))
|
||||
goto oom;
|
||||
|
||||
// TODO: initialize with zeros
|
||||
// TODO: check that we are in userspace
|
||||
|
||||
// get pointer to parent page level entry
|
||||
size_t *entry = (size_t *) ((int64_t) viraddr >> 9 & ~0x07);
|
||||
|
||||
// update entry
|
||||
*entry = phyaddr|USER_TABLE;
|
||||
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
//default_handler:
|
||||
kprintf("PAGE FAULT: Task %u got page fault at %p (irq %llu, cs:rip 0x%llx:0x%llx)\n", task->id, viraddr, s->int_no, s->cs, s->rip);
|
||||
kprintf("Register state: rax = 0x%llx, rbx = 0x%llx, rcx = 0x%llx, rdx = 0x%llx, rdi = 0x%llx, rsi = 0x%llx, rbp = 0x%llx, rsp = 0x%llx\n",
|
||||
s->rax, s->rbx, s->rcx, s->rdx, s->rdi, s->rsi, s->rbp, s->rsp);
|
||||
|
||||
irq_enable();
|
||||
abort();
|
||||
|
||||
oom:
|
||||
kputs("map_region: out of memory\n");
|
||||
while(1);
|
||||
irq_enable();
|
||||
abort();
|
||||
}
|
||||
|
@ -536,17 +575,15 @@ int arch_paging_init(void)
|
|||
{
|
||||
uint32_t i, npages;
|
||||
|
||||
// replace default pagefault handler
|
||||
// uninstall default handler and install our own
|
||||
irq_uninstall_handler(14);
|
||||
irq_install_handler(14, pagefault_handler);
|
||||
|
||||
/*
|
||||
* In longmode the kernel is already maped into the kernel space (see entry64.asm)
|
||||
* this includes .data, .bss, .text, VGA, the multiboot & multiprocessing (APIC) structures
|
||||
*/
|
||||
// kernel is already maped into the kernel space (see entry64.asm)
|
||||
// this includes .data, .bss, .text, video memory and the multiboot structure
|
||||
|
||||
#if MAX_CORES > 1
|
||||
// reserve page for smp boot code
|
||||
// Reserve page for smp boot code
|
||||
if (!map_region(SMP_SETUP_ADDR, SMP_SETUP_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE)) {
|
||||
kputs("could not reserve page for smp boot code\n");
|
||||
return -ENOMEM;
|
||||
|
@ -555,7 +592,9 @@ int arch_paging_init(void)
|
|||
|
||||
#ifdef CONFIG_MULTIBOOT
|
||||
#if 0
|
||||
// map reserved memory regions into the kernel space
|
||||
/*
|
||||
* Map reserved memory regions into the kernel space
|
||||
*/
|
||||
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
|
||||
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr;
|
||||
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
||||
|
@ -574,7 +613,7 @@ int arch_paging_init(void)
|
|||
|
||||
/*
|
||||
* Modules like the init ram disk are already loaded.
|
||||
* Therefore, we map these modules into the kernel space.
|
||||
* Therefore, we map these moduels into the kernel space.
|
||||
*/
|
||||
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
|
||||
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
|
||||
|
@ -595,7 +634,13 @@ int arch_paging_init(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
// we turned on paging => now, we are able to register our task
|
||||
/* signalize that we are able to use paging */
|
||||
paging_enabled = 1;
|
||||
|
||||
/*
|
||||
* we turned on paging
|
||||
* => now, we are able to register our task
|
||||
*/
|
||||
register_task();
|
||||
|
||||
// APIC registers into the kernel address space
|
||||
|
|
|
@ -86,12 +86,11 @@ static ssize_t stdio_write(fildes_t* file, uint8_t* buffer, size_t size)
|
|||
for (i = 0; i<size; i++, buffer++) {
|
||||
#ifdef CONFIG_VGA
|
||||
vga_putchar(*buffer);
|
||||
#endif
|
||||
#ifdef CONFIG_UART
|
||||
#elif defined(CONFIG_UART)
|
||||
uart_putchar(*buffer);
|
||||
#endif
|
||||
|
||||
#else
|
||||
kputchar(*buffer);
|
||||
#endif
|
||||
}
|
||||
|
||||
file->offset += size;
|
||||
|
|
|
@ -253,7 +253,7 @@ static int initrd_open(fildes_t* file, const char* name)
|
|||
/* opendir was called: */
|
||||
if (name[0] == '\0')
|
||||
return 0;
|
||||
|
||||
|
||||
/* open file was called: */
|
||||
if (!(file->flags & O_CREAT))
|
||||
return -ENOENT;
|
||||
|
@ -264,11 +264,11 @@ static int initrd_open(fildes_t* file, const char* name)
|
|||
vfs_node_t* new_node = kmalloc(sizeof(vfs_node_t));
|
||||
if (BUILTIN_EXPECT(!new_node, 0))
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
blist = &file->node->block_list;
|
||||
dir_block_t* dir_block;
|
||||
dirent_t* dirent;
|
||||
|
||||
|
||||
memset(new_node, 0x00, sizeof(vfs_node_t));
|
||||
new_node->type = FS_FILE;
|
||||
new_node->read = &initrd_read;
|
||||
|
@ -286,7 +286,7 @@ static int initrd_open(fildes_t* file, const char* name)
|
|||
if (!dirent->vfs_node) {
|
||||
dirent->vfs_node = new_node;
|
||||
strncpy(dirent->name, (char*) name, MAX_FNAME);
|
||||
goto exit_create_file; // TODO: there might be a better Solution
|
||||
goto exit_create_file; // there might be a better Solution ***************
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,14 +34,14 @@ extern "C" {
|
|||
#define PAGE_SHIFT 12
|
||||
#define CACHE_LINE 64
|
||||
#define MAILBOX_SIZE 32
|
||||
#define TIMER_FREQ 100 // in HZ
|
||||
#define CLOCK_TICK_RATE 1193182 // 8254 chip's internal oscillator frequency
|
||||
#define TIMER_FREQ 100 /* in HZ */
|
||||
#define CLOCK_TICK_RATE 1193182 /* 8254 chip's internal oscillator frequency */
|
||||
#define INT_SYSCALL 0x80
|
||||
#define KERNEL_SPACE (1*1024*1024*1024)
|
||||
#define VIDEO_MEM_ADDR 0xB8000 // the video memory address
|
||||
#define VIDEO_MEM_ADDR 0xB8000 // the video memora address
|
||||
#define SMP_SETUP_ADDR 0x07000
|
||||
#define UART_PORT 0x3F8 // 0x2F8 for SCC
|
||||
#define BYTE_ORDER LITTLE_ENDIAN
|
||||
|
||||
#define BYTE_ORDER LITTLE_ENDIAN
|
||||
|
||||
/*
|
||||
* address space / (page_size * sizeof(uint8_t))
|
||||
|
@ -52,7 +52,7 @@ extern "C" {
|
|||
#define CONFIG_PCI
|
||||
#define CONFIG_LWIP
|
||||
#define CONFIG_VGA
|
||||
#define CONFIG_UART
|
||||
//#define CONFIG_UART
|
||||
#define CONFIG_KEYBOARD
|
||||
#define CONFIG_MULTIBOOT
|
||||
//#define CONFIG_ROCKCREEK
|
||||
|
@ -72,7 +72,7 @@ extern "C" {
|
|||
//#define SHMADD
|
||||
#define SHMDBG
|
||||
//#define SHMADD_CACHEABLE
|
||||
#define SCC_BOOTINFO 0x80000
|
||||
#define SCC_BOOTINFO 0x80000
|
||||
|
||||
#define BUILTIN_EXPECT(exp, b) __builtin_expect((exp), (b))
|
||||
//#define BUILTIN_EXPECT(exp, b) (exp)
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
|
||||
#include <metalsvm/stddef.h>
|
||||
#include <asm/atomic.h>
|
||||
//#include <asm/mmu.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -49,39 +50,33 @@ extern atomic_int32_t total_available_pages;
|
|||
*/
|
||||
int mmu_init(void);
|
||||
|
||||
/** @brief Get continuous pages
|
||||
/** @brief get continuous pages
|
||||
*
|
||||
* Use first fit algorithm to find a suitable, continous physical memory region
|
||||
* This function finds a continuous page region (first fit algorithm)
|
||||
*
|
||||
* @param no_pages Desired number of pages
|
||||
*
|
||||
* @param npages Desired number of pages
|
||||
* @return
|
||||
* - physical address on success
|
||||
* - 0 on failure
|
||||
*/
|
||||
size_t get_pages(uint32_t npages);
|
||||
size_t get_pages(uint32_t no_pages);
|
||||
|
||||
/** @brief Get a single page
|
||||
/** @brief get a single page
|
||||
*
|
||||
* Convenience function: uses get_pages(1);
|
||||
*/
|
||||
static inline size_t get_page(void) { return get_pages(1); }
|
||||
|
||||
/** @brief Put back a sequence of continous pages
|
||||
/** @brief Put back a page after use
|
||||
*
|
||||
* @param phyaddr Physical address of the first page
|
||||
* @param npages Number of pages
|
||||
* @param phyaddr Physical address to put back
|
||||
*
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
int put_pages(size_t phyaddr, size_t npages);
|
||||
|
||||
/** @brief Put a single page
|
||||
*
|
||||
* Convenience function: uses put_pages(1);
|
||||
*/
|
||||
static inline int put_page(size_t phyaddr) { return put_pages(phyaddr, 1); }
|
||||
int put_page(size_t phyaddr);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -29,7 +29,10 @@
|
|||
#include <metalsvm/stddef.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
/** @brief Sets up the environment, page directories etc and enables paging. */
|
||||
/**
|
||||
* Sets up the environment, page directories etc and
|
||||
* enables paging.
|
||||
*/
|
||||
static inline int paging_init(void) { return arch_paging_init(); }
|
||||
|
||||
#endif
|
||||
|
|
|
@ -28,10 +28,14 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define NULL ((void*) 0)
|
||||
#define NULL ((void*) 0)
|
||||
|
||||
typedef unsigned int tid_t;
|
||||
|
||||
#define PAGE_SIZE (1 << PAGE_SHIFT)
|
||||
#define PAGE_MASK ~(PAGE_SIZE - 1)
|
||||
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
|
||||
|
||||
#if MAX_CORES == 1
|
||||
#define per_core(name) name
|
||||
#define DECLARE_PER_CORE(type, name) extern type name;
|
||||
|
@ -62,10 +66,10 @@ typedef unsigned int tid_t;
|
|||
irq_nested_enable(flags);\
|
||||
return ret; \
|
||||
}
|
||||
#define CORE_ID smp_id()
|
||||
#define CORE_ID smp_id()
|
||||
#endif
|
||||
|
||||
// needed to find the task, which is currently running on this core
|
||||
/* needed to find the task, which is currently running on this core */
|
||||
struct task;
|
||||
DECLARE_PER_CORE(struct task*, current_task);
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ unsigned long strtoul(const char* nptr, char** endptr, int base);
|
|||
*/
|
||||
static inline int atoi(const char *str)
|
||||
{
|
||||
return (int)strtol(str, (char **) NULL, 10);
|
||||
return (int)strtol(str, (char **)NULL, 10);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -147,7 +147,9 @@ tid_t wait(int32_t* result);
|
|||
*/
|
||||
void update_load(void);
|
||||
|
||||
/** @brief Print the current cpu load */
|
||||
/** @brief Print the current cpu load
|
||||
*
|
||||
*/
|
||||
void dump_load(void);
|
||||
|
||||
#if MAX_CORES > 1
|
||||
|
@ -199,7 +201,9 @@ int block_current_task(void);
|
|||
*/
|
||||
int set_timer(uint64_t deadline);
|
||||
|
||||
/** @brief check is a timer is expired */
|
||||
/** @brief check is a timer is expired
|
||||
*
|
||||
*/
|
||||
void check_timers(void);
|
||||
|
||||
/** @brief Abort current task */
|
||||
|
|
|
@ -62,7 +62,7 @@ extern "C" {
|
|||
#define TASK_L2 (1 << 3)
|
||||
|
||||
typedef int (*entry_point_t)(void*);
|
||||
typedef struct page_map page_map_t;
|
||||
struct page_dir;
|
||||
|
||||
/** @brief The task_t structure */
|
||||
typedef struct task {
|
||||
|
@ -88,10 +88,10 @@ typedef struct task {
|
|||
uint32_t last_core;
|
||||
/// usage in number of pages
|
||||
atomic_int32_t user_usage;
|
||||
/// avoids concurrent access to the page map structures
|
||||
spinlock_irqsave_t page_lock;
|
||||
/// pointer to page directory (32bit) or page map level 4 (64bit) table respectively
|
||||
page_map_t* page_map;
|
||||
/// avoids concurrent access to the page directory
|
||||
spinlock_irqsave_t pgd_lock;
|
||||
/// pointer to the page directory
|
||||
struct page_dir* pgd;
|
||||
/// lock for the VMA_list
|
||||
spinlock_t vma_lock;
|
||||
/// list of VMAs
|
||||
|
|
|
@ -27,102 +27,56 @@
|
|||
#define __VMA_H__
|
||||
|
||||
#include <metalsvm/stddef.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/// Read access to this VMA is allowed
|
||||
#define VMA_READ (1 << 0)
|
||||
/// Write access to this VMA is allowed
|
||||
#define VMA_WRITE (1 << 1)
|
||||
/// Instructions fetches in this VMA are allowed
|
||||
#define VMA_EXECUTE (1 << 2)
|
||||
/// This VMA is cacheable
|
||||
#define VMA_CACHEABLE (1 << 3)
|
||||
/// This VMA is not accessable
|
||||
#define VMA_NO_ACCESS (1 << 4)
|
||||
/// This VMA should be part of the userspace
|
||||
#define VMA_USER (1 << 5)
|
||||
/// A collection of flags used for the kernel heap (kmalloc)
|
||||
#define VMA_HEAP (VMA_READ|VMA_WRITE|VMA_CACHEABLE)
|
||||
|
||||
// boundaries for VAS allocation
|
||||
extern const void kernel_end;
|
||||
//#define VMA_KERN_MIN (((size_t) &kernel_end + PAGE_SIZE) & PAGE_MASK)
|
||||
#define VMA_KERN_MAX KERNEL_SPACE
|
||||
#define VMA_USER_MAX (1UL << 47) // TODO
|
||||
#define VMA_NOACCESS (1 << 4)
|
||||
|
||||
struct vma;
|
||||
|
||||
/** @brief VMA structure definition
|
||||
*
|
||||
* Each item in this linked list marks a used part of the virtual address space.
|
||||
* Its used by vm_alloc() to find holes between them.
|
||||
*/
|
||||
/** @brief VMA structure definition */
|
||||
typedef struct vma {
|
||||
/// Start address of the memory area
|
||||
size_t start;
|
||||
/// End address of the memory area
|
||||
size_t end;
|
||||
/// Type flags field
|
||||
uint32_t flags;
|
||||
uint32_t type;
|
||||
/// Pointer of next VMA element in the list
|
||||
struct vma* next;
|
||||
/// Pointer to previous VMA element in the list
|
||||
struct vma* prev;
|
||||
} vma_t;
|
||||
|
||||
/** @brief Add a new virtual memory area to the list of VMAs
|
||||
/** @brief Add a new virtual memory region to the list of VMAs
|
||||
*
|
||||
* @param start Start address of the new area
|
||||
* @param end End address of the new area
|
||||
* @param flags Type flags the new area shall have
|
||||
* @param task Pointer to the task_t structure of the task
|
||||
* @param start Start address of the new region
|
||||
* @param end End address of the new region
|
||||
* @param type Type flags the new region shall have
|
||||
*
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) or -EINVAL (-12) on failure
|
||||
*/
|
||||
int vma_add(size_t start, size_t end, uint32_t flags);
|
||||
int vma_add(struct task* task, size_t start, size_t end, uint32_t type);
|
||||
|
||||
/** @brief Search for a free memory area
|
||||
/** @brief Dump information about this task's VMAs into the terminal.
|
||||
*
|
||||
* @param size Size of requestes VMA in bytes
|
||||
* @param flags
|
||||
* @return Type flags the new area shall have
|
||||
* - 0 on failure
|
||||
* - the start address of a free area
|
||||
*/
|
||||
size_t vma_alloc(size_t size, uint32_t flags);
|
||||
|
||||
/** @brief Free an allocated memory area
|
||||
* This will print out Start, end and flags for each VMA in the task's list
|
||||
*
|
||||
* @param start Start address of the area to be freed
|
||||
* @param end End address of the to be freed
|
||||
* @param task The task's task_t structure
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
int vma_free(size_t start, size_t end);
|
||||
|
||||
/** @brief Free all virtual memory areas
|
||||
*
|
||||
* @return
|
||||
* - 0 on success
|
||||
*/
|
||||
int drop_vma_list();
|
||||
|
||||
/** @brief Copy the VMA list of the current task to task
|
||||
*
|
||||
* @param task The task where the list should be copied to
|
||||
* @return
|
||||
* - 0 on success
|
||||
*/
|
||||
int copy_vma_list(struct task* task);
|
||||
|
||||
/** @brief Dump information about this task's VMAs into the terminal. */
|
||||
void vma_dump();
|
||||
int vma_dump(struct task* task);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ extern const void bss_end;
|
|||
int lowlevel_init(void)
|
||||
{
|
||||
// initialize .bss section
|
||||
memset(&bss_start, 0x00, (char*) &bss_end - (char*) &bss_start);
|
||||
memset((void*)&bss_start, 0x00, ((size_t) &bss_end - (size_t) &bss_start));
|
||||
|
||||
koutput_init();
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include <metalsvm/fs.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/kb.h>
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
#include <asm/icc.h>
|
||||
|
@ -74,7 +73,6 @@ int main(void)
|
|||
kprintf("This is MetalSVM %s Build %u, %u\n",
|
||||
METALSVM_VERSION, &__BUILD_DATE, &__BUILD_TIME);
|
||||
popbg();
|
||||
|
||||
system_init();
|
||||
irq_init();
|
||||
timer_init();
|
||||
|
@ -87,7 +85,7 @@ int main(void)
|
|||
icc_init();
|
||||
svm_init();
|
||||
#endif
|
||||
initrd_init();
|
||||
initrd_init();
|
||||
|
||||
irq_enable();
|
||||
|
||||
|
@ -103,7 +101,7 @@ int main(void)
|
|||
disable_timer_irq();
|
||||
#endif
|
||||
|
||||
sleep(2);
|
||||
sleep(5);
|
||||
create_kernel_task(&id, initd, NULL, NORMAL_PRIO);
|
||||
kprintf("Create initd with id %u\n", id);
|
||||
reschedule();
|
||||
|
|
|
@ -109,7 +109,7 @@ static int sys_open(const char* name, int flags, int mode)
|
|||
curr_task->fildes_table[fd] = NULL;
|
||||
return check;
|
||||
}
|
||||
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
|
@ -403,7 +403,6 @@ static int sys_sbrk(int incr)
|
|||
|
||||
spinlock_lock(&task->vma_lock);
|
||||
|
||||
// search vma containing the heap
|
||||
tmp = task->vma_list;
|
||||
while(tmp && !((task->end_heap >= tmp->start) && (task->end_heap <= tmp->end)))
|
||||
tmp = tmp->next;
|
||||
|
@ -412,16 +411,11 @@ static int sys_sbrk(int incr)
|
|||
task->end_heap += incr;
|
||||
if (task->end_heap < task->start_heap)
|
||||
task->end_heap = task->start_heap;
|
||||
|
||||
|
||||
// resize virtual memory area
|
||||
if (tmp && (tmp->end <= task->end_heap))
|
||||
tmp->end = task->end_heap;
|
||||
|
||||
// allocation and mapping of new pages for the heap
|
||||
// is catched by the pagefault handler
|
||||
|
||||
//kprintf("sys_sbrk: tid=%d, start_heap=%8x, end_heap=%8x, incr=%4x\n", task->id, task->start_heap, task->end_heap, incr);
|
||||
|
||||
spinlock_unlock(&task->vma_lock);
|
||||
|
||||
return ret;
|
||||
|
|
110
kernel/tasks.c
110
kernel/tasks.c
|
@ -78,7 +78,6 @@ DEFINE_PER_CORE(task_t*, current_task, task_table+0);
|
|||
extern const void boot_stack;
|
||||
|
||||
/** @brief helper function for the assembly code to determine the current task
|
||||
*
|
||||
* @return Pointer to the task_t structure of current task
|
||||
*/
|
||||
task_t* get_current_task(void) {
|
||||
|
@ -105,7 +104,7 @@ int multitasking_init(void) {
|
|||
|
||||
mailbox_wait_msg_init(&task_table[0].inbox);
|
||||
memset(task_table[0].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
task_table[0].page_map = get_boot_page_map();
|
||||
task_table[0].pgd = get_boot_pgd();
|
||||
task_table[0].flags = TASK_DEFAULT_FLAGS;
|
||||
task_table[0].prio = IDLE_PRIO;
|
||||
task_table[0].stack = (void*) &boot_stack;
|
||||
|
@ -129,7 +128,7 @@ size_t get_idle_task(uint32_t id)
|
|||
atomic_int32_set(&task_table[id].user_usage, 0);
|
||||
mailbox_wait_msg_init(&task_table[id].inbox);
|
||||
memset(task_table[id].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
task_table[id].page_map = get_boot_page_map();
|
||||
task_table[id].pgd = get_boot_pgd();
|
||||
current_task[id].var = task_table+id;
|
||||
runqueues[id].idle = task_table+id;
|
||||
|
||||
|
@ -194,8 +193,10 @@ static void wakeup_blocked_tasks(int result)
|
|||
spinlock_irqsave_unlock(&table_lock);
|
||||
}
|
||||
|
||||
/** @brief A procedure to be called by procedures which are called by exiting tasks. */
|
||||
/** @brief A procedure to be called by
|
||||
* procedures which are called by exiting tasks. */
|
||||
static void NORETURN do_exit(int arg) {
|
||||
vma_t* tmp;
|
||||
task_t* curr_task = per_core(current_task);
|
||||
uint32_t flags, core_id, fd, status;
|
||||
|
||||
|
@ -203,14 +204,14 @@ static void NORETURN do_exit(int arg) {
|
|||
for (fd = 0; fd < NR_OPEN; fd++) {
|
||||
if(curr_task->fildes_table[fd] != NULL) {
|
||||
/*
|
||||
* Delete a descriptor from the per-process object
|
||||
* reference table. If this is not the last reference to the underlying
|
||||
* object, the object will be ignored.
|
||||
*/
|
||||
* delete a descriptor from the per-process object
|
||||
* reference table. If this is not the last reference to the underlying
|
||||
* object, the object will be ignored.
|
||||
*/
|
||||
if (curr_task->fildes_table[fd]->count == 1) {
|
||||
// try to close the file
|
||||
/* try to close the file */
|
||||
status = close_fs(curr_task->fildes_table[fd]);
|
||||
// close command failed -> return check = errno
|
||||
/* close command failed -> return check = errno */
|
||||
if (BUILTIN_EXPECT(status < 0, 0))
|
||||
kprintf("Task %u was not able to close file descriptor %i. close_fs returned %d", curr_task->id, fd, -status);
|
||||
kfree(curr_task->fildes_table[fd], sizeof(fildes_t));
|
||||
|
@ -229,13 +230,24 @@ static void NORETURN do_exit(int arg) {
|
|||
|
||||
wakeup_blocked_tasks(arg);
|
||||
|
||||
drop_vma_list(); // kfree virtual memory areas and the vma_list
|
||||
drop_page_map(); // delete page directory and its page tables
|
||||
//vma_dump(curr_task);
|
||||
spinlock_lock(&curr_task->vma_lock);
|
||||
|
||||
// remove memory regions
|
||||
while((tmp = curr_task->vma_list) != NULL) {
|
||||
kfree((void*) tmp->start, tmp->end - tmp->start + 1);
|
||||
curr_task->vma_list = tmp->next;
|
||||
kfree((void*) tmp, sizeof(vma_t));
|
||||
}
|
||||
|
||||
spinlock_unlock(&curr_task->vma_lock);
|
||||
|
||||
drop_pgd(); // delete page directory and its page tables
|
||||
|
||||
#if 0
|
||||
if (atomic_int32_read(&curr_task->user_usage))
|
||||
kprintf("Memory leak! Task %d did not release %d pages\n",
|
||||
curr_task->id, atomic_int32_read(&curr_task->user_usage));
|
||||
curr_task->id, atomic_int32_read(&curr_task->user_usage));
|
||||
#endif
|
||||
curr_task->status = TASK_FINISHED;
|
||||
|
||||
|
@ -250,7 +262,9 @@ static void NORETURN do_exit(int arg) {
|
|||
reschedule();
|
||||
|
||||
kprintf("Kernel panic: scheduler on core %d found no valid task\n", CORE_ID);
|
||||
while(1) HALT;
|
||||
while(1) {
|
||||
HALT;
|
||||
}
|
||||
}
|
||||
|
||||
/** @brief A procedure to be called by kernel tasks */
|
||||
|
@ -313,10 +327,10 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uin
|
|||
if (task_table[i].status == TASK_INVALID) {
|
||||
atomic_int32_set(&task_table[i].user_usage, 0);
|
||||
|
||||
ret = create_page_map(task_table+i, 0);
|
||||
ret = create_pgd(task_table+i, 0);
|
||||
if (ret < 0) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
goto create_task_out;
|
||||
}
|
||||
|
||||
task_table[i].id = i;
|
||||
|
@ -362,7 +376,7 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uin
|
|||
}
|
||||
}
|
||||
|
||||
out:
|
||||
create_task_out:
|
||||
spinlock_irqsave_unlock(&table_lock);
|
||||
|
||||
return ret;
|
||||
|
@ -373,7 +387,11 @@ int sys_fork(void)
|
|||
int ret = -ENOMEM;
|
||||
unsigned int i, core_id, fd_i;
|
||||
task_t* parent_task = per_core(current_task);
|
||||
vma_t** child;
|
||||
vma_t* parent;
|
||||
vma_t* tmp;
|
||||
|
||||
spinlock_lock(&parent_task->vma_lock);
|
||||
spinlock_irqsave_lock(&table_lock);
|
||||
|
||||
core_id = CORE_ID;
|
||||
|
@ -382,29 +400,46 @@ int sys_fork(void)
|
|||
if (task_table[i].status == TASK_INVALID) {
|
||||
atomic_int32_set(&task_table[i].user_usage, 0);
|
||||
|
||||
ret = create_page_map(task_table+i, 1);
|
||||
ret = create_pgd(task_table+i, 1);
|
||||
if (ret < 0) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = copy_vma_list(child_task);
|
||||
if (BUILTIN_EXPECT(!ret, 0)) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
goto create_task_out;
|
||||
}
|
||||
|
||||
task_table[i].id = i;
|
||||
task_table[i].last_stack_pointer = NULL;
|
||||
task_table[i].stack = create_stack();
|
||||
|
||||
// init fildes_table
|
||||
spinlock_init(&task_table[i].vma_lock);
|
||||
|
||||
// copy VMA list
|
||||
child = &task_table[i].vma_list;
|
||||
parent = parent_task->vma_list;
|
||||
tmp = NULL;
|
||||
|
||||
while(parent) {
|
||||
*child = (vma_t*) kmalloc(sizeof(vma_t));
|
||||
if (BUILTIN_EXPECT(!child, 0))
|
||||
break;
|
||||
|
||||
(*child)->start = parent->start;
|
||||
(*child)->end = parent->end;
|
||||
(*child)->type = parent->type;
|
||||
(*child)->prev = tmp;
|
||||
(*child)->next = NULL;
|
||||
|
||||
parent = parent->next;
|
||||
tmp = *child;
|
||||
child = &((*child)->next);
|
||||
}
|
||||
|
||||
|
||||
/* init fildes_table */
|
||||
task_table[i].fildes_table = kmalloc(sizeof(filp_t)*NR_OPEN);
|
||||
memcpy(task_table[i].fildes_table, parent_task->fildes_table, sizeof(filp_t)*NR_OPEN);
|
||||
for (fd_i = 0; fd_i < NR_OPEN; fd_i++) {
|
||||
for (fd_i = 0; fd_i < NR_OPEN; fd_i++)
|
||||
if ((task_table[i].fildes_table[fd_i]) != NULL)
|
||||
task_table[i].fildes_table[fd_i]->count++;
|
||||
}
|
||||
|
||||
mailbox_wait_msg_init(&task_table[i].inbox);
|
||||
memset(task_table[i].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
|
@ -452,8 +487,9 @@ int sys_fork(void)
|
|||
}
|
||||
}
|
||||
|
||||
out:
|
||||
create_task_out:
|
||||
spinlock_irqsave_unlock(&table_lock);
|
||||
spinlock_unlock(&parent_task->vma_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -643,7 +679,7 @@ static int load_task(load_args_t* largs)
|
|||
flags |= VMA_WRITE;
|
||||
if (prog_header.flags & PF_X)
|
||||
flags |= VMA_EXECUTE;
|
||||
vma_add(prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
|
||||
vma_add(curr_task, prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
|
||||
|
||||
if (!(prog_header.flags & PF_W))
|
||||
change_page_permissions(prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
|
||||
|
@ -672,7 +708,7 @@ static int load_task(load_args_t* largs)
|
|||
flags |= VMA_WRITE;
|
||||
if (prog_header.flags & PF_X)
|
||||
flags |= VMA_EXECUTE;
|
||||
vma_add(stack, stack+npages*PAGE_SIZE-1, flags);
|
||||
vma_add(curr_task, stack, stack+npages*PAGE_SIZE-1, flags);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -835,11 +871,13 @@ int create_user_task_on_core(tid_t* id, const char* fname, char** argv, uint32_t
|
|||
int sys_execve(const char* fname, char** argv, char** env)
|
||||
{
|
||||
vfs_node_t* node;
|
||||
vma_t* tmp;
|
||||
size_t i, buffer_size = 0;
|
||||
load_args_t* load_args = NULL;
|
||||
char *dest, *src;
|
||||
int ret, argc = 0;
|
||||
int envc = 0;
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
node = findnode_fs((char*) fname);
|
||||
if (!node || !(node->type == FS_FILE))
|
||||
|
@ -882,8 +920,16 @@ int sys_execve(const char* fname, char** argv, char** env)
|
|||
while ((*dest++ = *src++) != 0);
|
||||
}
|
||||
|
||||
spinlock_lock(&curr_task->vma_lock);
|
||||
|
||||
// remove old program
|
||||
drop_vma_list();
|
||||
while((tmp = curr_task->vma_list) != NULL) {
|
||||
kfree((void*) tmp->start, tmp->end - tmp->start + 1);
|
||||
curr_task->vma_list = tmp->next;
|
||||
kfree((void*) tmp, sizeof(vma_t));
|
||||
}
|
||||
|
||||
spinlock_unlock(&curr_task->vma_lock);
|
||||
|
||||
/*
|
||||
* we use a trap gate to enter the kernel
|
||||
|
|
|
@ -34,7 +34,13 @@
|
|||
#define VGA_EARLY_PRINT 1
|
||||
#define UART_EARLY_PRINT 2
|
||||
|
||||
#ifdef CONFIG_VGA
|
||||
static uint32_t early_print = VGA_EARLY_PRINT;
|
||||
#elif defined(CONFIG_UART)
|
||||
static uint32_t early_print = UART_EARLY_PRINT;
|
||||
#else
|
||||
static uint32_t early_print = NO_EARLY_PRINT;
|
||||
#endif
|
||||
static spinlock_irqsave_t olock = SPINLOCK_IRQSAVE_INIT;
|
||||
static atomic_int32_t kmsg_counter = ATOMIC_INIT(0);
|
||||
static unsigned char kmessages[KMSG_SIZE] __attribute__ ((section(".kmsg"))) = {[0 ... KMSG_SIZE-1] = 0x00};
|
||||
|
@ -139,10 +145,6 @@ int koutput_init(void)
|
|||
{
|
||||
#ifdef CONFIG_VGA
|
||||
vga_init();
|
||||
early_print |= VGA_EARLY_PRINT;
|
||||
#endif
|
||||
#ifdef CONFIG_UART
|
||||
early_print |= UART_EARLY_PRINT;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
@ -159,11 +161,11 @@ int kputchar(int c)
|
|||
kmessages[pos % KMSG_SIZE] = (unsigned char) c;
|
||||
|
||||
#ifdef CONFIG_VGA
|
||||
if (early_print & VGA_EARLY_PRINT)
|
||||
if (early_print == VGA_EARLY_PRINT)
|
||||
vga_putchar(c);
|
||||
#endif
|
||||
#ifdef CONFIG_UART
|
||||
if (early_print & UART_EARLY_PRINT)
|
||||
if (early_print == UART_EARLY_PRINT)
|
||||
uart_putchar(c);
|
||||
#endif
|
||||
|
||||
|
@ -184,11 +186,11 @@ int kputs(const char *str)
|
|||
pos = atomic_int32_inc(&kmsg_counter);
|
||||
kmessages[pos % KMSG_SIZE] = str[i];
|
||||
#ifdef CONFIG_VGA
|
||||
if (early_print & VGA_EARLY_PRINT)
|
||||
if (early_print == VGA_EARLY_PRINT)
|
||||
vga_putchar(str[i]);
|
||||
#endif
|
||||
#ifdef CONFIG_UART
|
||||
if (early_print & UART_EARLY_PRINT)
|
||||
if (early_print == UART_EARLY_PRINT)
|
||||
uart_putchar(str[i]);
|
||||
#endif
|
||||
}
|
||||
|
|
344
mm/memory.c
344
mm/memory.c
|
@ -37,15 +37,17 @@
|
|||
#endif
|
||||
|
||||
/*
|
||||
* Set whole address space as occupied:
|
||||
* 0 => free, 1 => occupied
|
||||
* 0 => free
|
||||
* 1 => occupied
|
||||
*
|
||||
* Set whole address space as occupied
|
||||
*/
|
||||
static uint8_t bitmap[BITMAP_SIZE] = {[0 ... BITMAP_SIZE-1] = 0xFF};
|
||||
static spinlock_t bitmap_lock = SPINLOCK_INIT;
|
||||
|
||||
atomic_int32_t total_pages = ATOMIC_INIT(0);
|
||||
atomic_int32_t total_allocated_pages = ATOMIC_INIT(0);
|
||||
atomic_int32_t total_available_pages = ATOMIC_INIT(0);
|
||||
static uint8_t bitmap[BITMAP_SIZE]; // = {[0 ... BITMAP_SIZE-1] = 0xFF};
|
||||
static spinlock_t bitmap_lock = SPINLOCK_INIT;
|
||||
static size_t alloc_start;
|
||||
atomic_int32_t total_pages = ATOMIC_INIT(0);
|
||||
atomic_int32_t total_allocated_pages = ATOMIC_INIT(0);
|
||||
atomic_int32_t total_available_pages = ATOMIC_INIT(0);
|
||||
|
||||
/*
|
||||
* Note that linker symbols are not variables, they have no memory allocated for
|
||||
|
@ -72,8 +74,8 @@ inline static void page_set_mark(size_t i)
|
|||
size_t index = i >> 3;
|
||||
size_t mod = i & 0x7;
|
||||
|
||||
if (page_marked(i))
|
||||
kprintf("page_set_mark(%u): already marked\n", i);
|
||||
//if (page_marked(i))
|
||||
// kprintf("page %u is alread marked\n", i);
|
||||
|
||||
bitmap[index] = bitmap[index] | (1 << mod);
|
||||
}
|
||||
|
@ -84,155 +86,56 @@ inline static void page_clear_mark(size_t i)
|
|||
size_t mod = i % 8;
|
||||
|
||||
if (page_unmarked(i))
|
||||
kprintf("page_clear_mark(%u): already unmarked\n", i);
|
||||
kprintf("page %u is already unmarked\n", i);
|
||||
|
||||
bitmap[index] = bitmap[index] & ~(1 << mod);
|
||||
}
|
||||
|
||||
size_t get_pages(uint32_t npages)
|
||||
{
|
||||
// skip first page
|
||||
static size_t start = 1;
|
||||
|
||||
uint32_t i, j, l;
|
||||
uint32_t k = 0;
|
||||
size_t ret = 0;
|
||||
|
||||
if (BUILTIN_EXPECT(!npages, 0))
|
||||
return ret;
|
||||
|
||||
if (BUILTIN_EXPECT(npages > atomic_int32_read(&total_available_pages), 0))
|
||||
return ret;
|
||||
|
||||
spinlock_lock(&bitmap_lock);
|
||||
i = start;
|
||||
next_try:
|
||||
while((k < BITMAP_SIZE) && page_marked(i)) {
|
||||
k++;
|
||||
i = (i+1) & (BITMAP_SIZE-1);
|
||||
}
|
||||
|
||||
if (k >= BITMAP_SIZE)
|
||||
goto oom;
|
||||
|
||||
for(j=1; (j<npages) && (i+j < BITMAP_SIZE) && (k < BITMAP_SIZE); j++, k++) {
|
||||
if (page_marked(i+j)) {
|
||||
i = (i+j) & (BITMAP_SIZE-1);
|
||||
goto next_try;
|
||||
}
|
||||
}
|
||||
|
||||
if (i+j >= BITMAP_SIZE) {
|
||||
i = 1;
|
||||
goto next_try;
|
||||
}
|
||||
|
||||
if (k >= BITMAP_SIZE)
|
||||
goto oom;
|
||||
|
||||
ret = i*PAGE_SIZE;
|
||||
kprintf("get_pages: ret 0x%x, i = %d, j = %d, npages = %d\n", ret, i, j, npages); // TODO: remove
|
||||
for(l=i; l<i+j; l++)
|
||||
page_set_mark(l);
|
||||
|
||||
start = i+j;
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
|
||||
atomic_int32_add(&total_allocated_pages, npages);
|
||||
atomic_int32_sub(&total_available_pages, npages);
|
||||
|
||||
return ret;
|
||||
|
||||
oom:
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int put_pages(size_t phyaddr, size_t npages)
|
||||
{
|
||||
if (BUILTIN_EXPECT(!phyaddr || !npages, 0))
|
||||
return -EINVAL;
|
||||
|
||||
uint32_t index;
|
||||
uint32_t base = phyaddr >> PAGE_SHIFT;
|
||||
|
||||
spinlock_lock(&bitmap_lock);
|
||||
for (index=0; index<npages; index++)
|
||||
page_clear_mark(base+index);
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
|
||||
atomic_int32_sub(&total_allocated_pages, npages);
|
||||
atomic_int32_add(&total_available_pages, npages);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mmu_init(void)
|
||||
{
|
||||
size_t kernel_size;
|
||||
unsigned int i;
|
||||
size_t addr;
|
||||
int ret = 0;
|
||||
|
||||
// at first, set default value of the bitmap
|
||||
memset(bitmap, 0xFF, sizeof(uint8_t)*BITMAP_SIZE);
|
||||
|
||||
#ifdef CONFIG_MULTIBOOT
|
||||
if (mb_info) {
|
||||
if (mb_info->flags & MULTIBOOT_INFO_MEM_MAP) {
|
||||
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) ((size_t) mb_info->mmap_addr);
|
||||
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
||||
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
|
||||
size_t end_addr;
|
||||
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) ((size_t) mb_info->mmap_addr);
|
||||
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
||||
|
||||
// mark available memory as free
|
||||
while (mmap < mmap_end) {
|
||||
if (mmap->type == MULTIBOOT_MEMORY_AVAILABLE) {
|
||||
for (addr=mmap->addr; addr < mmap->addr + mmap->len; addr += PAGE_SIZE) {
|
||||
page_clear_mark(addr >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_pages);
|
||||
atomic_int32_inc(&total_available_pages);
|
||||
}
|
||||
while (mmap < mmap_end) {
|
||||
if (mmap->type == MULTIBOOT_MEMORY_AVAILABLE) {
|
||||
/* set the available memory as "unused" */
|
||||
addr = mmap->addr;
|
||||
end_addr = addr + mmap->len;
|
||||
|
||||
while (addr < end_addr) {
|
||||
page_clear_mark(addr >> PAGE_SHIFT);
|
||||
addr += PAGE_SIZE;
|
||||
atomic_int32_inc(&total_pages);
|
||||
atomic_int32_inc(&total_available_pages);
|
||||
}
|
||||
mmap++;
|
||||
}
|
||||
mmap++;
|
||||
}
|
||||
else if (mb_info->flags & MULTIBOOT_INFO_MEM) {
|
||||
size_t page;
|
||||
size_t pages_lower = mb_info->mem_lower >> 2;
|
||||
size_t pages_upper = mb_info->mem_upper >> 2;
|
||||
|
||||
for (page=0; page<pages_lower; page++)
|
||||
page_clear_mark(page);
|
||||
|
||||
for (page=0x100000; page<pages_upper+0x100000; page++)
|
||||
page_clear_mark(page);
|
||||
|
||||
atomic_int32_add(&total_pages, pages_lower + pages_upper);
|
||||
atomic_int32_add(&total_available_pages, pages_lower + pages_upper);
|
||||
}
|
||||
else {
|
||||
kputs("Unable to initialize the memory management subsystem\n");
|
||||
while (1) HALT;
|
||||
}
|
||||
|
||||
// mark mb_info as used
|
||||
page_set_mark((size_t) mb_info >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
|
||||
// mark modules list as used
|
||||
if (mb_info->flags & MULTIBOOT_INFO_MODS) {
|
||||
for(addr=mb_info->mods_addr; addr<mb_info->mods_addr+mb_info->mods_count*sizeof(multiboot_module_t); addr+=PAGE_SIZE) {
|
||||
page_set_mark(addr >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
}
|
||||
} else {
|
||||
kputs("Unable to initialize the memory management subsystem\n");
|
||||
while(1) {
|
||||
HALT;
|
||||
}
|
||||
}
|
||||
#elif defined(CONFIG_ROCKCREEK)
|
||||
// of course, the first slots belong to the private memory
|
||||
/* of course, the first slots belong to the private memory */
|
||||
for(addr=0x00; addr<1*0x1000000; addr+=PAGE_SIZE) {
|
||||
page_clear_mark(addr >> PAGE_SHIFT);
|
||||
if (addr > addr + PAGE_SIZE)
|
||||
break;
|
||||
atomic_int32_inc(&total_pages);
|
||||
atomic_int32_inc(&total_available_pages);
|
||||
atomic_int32_inc(&total_available_pages);
|
||||
}
|
||||
|
||||
// Note: The last slot belongs always to the private memory.
|
||||
|
@ -244,78 +147,71 @@ int mmu_init(void)
|
|||
atomic_int32_inc(&total_available_pages);
|
||||
}
|
||||
|
||||
// mark the bootinfo as used.
|
||||
/*
|
||||
* Mark the bootinfo as used.
|
||||
*/
|
||||
page_set_mark((size_t)bootinfo >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
|
||||
#else
|
||||
#error Currently, MetalSVM supports only the Multiboot specification or the RockCreek processor!
|
||||
#endif
|
||||
|
||||
// mark kernel as used
|
||||
for(addr=(size_t) &kernel_start; addr<(size_t) &kernel_end; addr+=PAGE_SIZE) {
|
||||
page_set_mark(addr >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
}
|
||||
kernel_size = (size_t) &kernel_end - (size_t) &kernel_start;
|
||||
if (kernel_size & (PAGE_SIZE-1))
|
||||
kernel_size += PAGE_SIZE - (kernel_size & (PAGE_SIZE-1));
|
||||
atomic_int32_add(&total_allocated_pages, kernel_size >> PAGE_SHIFT);
|
||||
atomic_int32_sub(&total_available_pages, kernel_size >> PAGE_SHIFT);
|
||||
|
||||
/* set kernel space as used */
|
||||
for(i=(size_t) &kernel_start >> PAGE_SHIFT; i < (size_t) &kernel_end >> PAGE_SHIFT; i++)
|
||||
page_set_mark(i);
|
||||
if ((size_t) &kernel_end & (PAGE_SIZE-1))
|
||||
page_set_mark(i);
|
||||
|
||||
alloc_start = (size_t) &kernel_end >> PAGE_SHIFT;
|
||||
if ((size_t) &kernel_end & (PAGE_SIZE-1))
|
||||
alloc_start++;
|
||||
|
||||
#if MAX_CORES > 1
|
||||
// reserve physical page for SMP boot code
|
||||
page_set_mark(SMP_SETUP_ADDR >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
atomic_int32_add(&total_allocated_pages, 1);
|
||||
atomic_int32_sub(&total_available_pages, 1);
|
||||
#endif
|
||||
|
||||
// enable paging and map SMP, VGA, Multiboot modules etc.
|
||||
ret = paging_init();
|
||||
if (ret) {
|
||||
kprintf("Failed to initialize paging: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
// add kernel to VMA list
|
||||
vma_add((size_t) &kernel_start & PAGE_MASK,
|
||||
PAGE_ALIGN((size_t) &kernel_end),
|
||||
VMA_READ|VMA_WRITE|VMA_EXECUTE|VMA_CACHEABLE);
|
||||
|
||||
// add LAPIC tp VMA list
|
||||
vma_add((size_t) &kernel_start - PAGE_SIZE,
|
||||
(size_t) &kernel_start,
|
||||
VMA_READ|VMA_WRITE);
|
||||
|
||||
#if MAX_CORES > 1
|
||||
// reserve page for SMP boot code
|
||||
vma_add(SMP_SETUP_ADDR & PAGE_MASK,
|
||||
PAGE_ALIGN(SMP_SETUP_ADDR + PAGE_SIZE),
|
||||
VMA_READ|VMA_WRITE|VMA_EXECUTE|VMA_CACHEABLE);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MULTIBOOT
|
||||
/*
|
||||
* Modules like the init ram disk are already loaded.
|
||||
* Therefore, we set these pages as used.
|
||||
*/
|
||||
if (mb_info) {
|
||||
vma_add((size_t) mb_info & PAGE_MASK,
|
||||
PAGE_ALIGN((size_t) mb_info + sizeof(multiboot_info_t)),
|
||||
VMA_READ|VMA_CACHEABLE);
|
||||
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
|
||||
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
|
||||
|
||||
if (mb_info->flags & MULTIBOOT_INFO_MODS) {
|
||||
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
|
||||
/*
|
||||
* Mark the mb_info as used.
|
||||
*/
|
||||
page_set_mark((size_t)mb_info >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
|
||||
vma_add((size_t) mb_info->mods_addr & PAGE_MASK,
|
||||
PAGE_ALIGN((size_t) mb_info->mods_addr + mb_info->mods_count*sizeof(multiboot_module_t)),
|
||||
VMA_READ|VMA_CACHEABLE);
|
||||
for(addr = mb_info->mods_addr; addr < mb_info->mods_addr + mb_info->mods_count * sizeof(multiboot_module_t); addr += PAGE_SIZE) {
|
||||
page_set_mark(addr >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
}
|
||||
|
||||
for(i=0; i<mb_info->mods_count; i++) {
|
||||
vma_add(PAGE_ALIGN(mmodule[i].mod_start),
|
||||
PAGE_ALIGN(mmodule[i].mod_end),
|
||||
VMA_READ|VMA_WRITE|VMA_CACHEABLE);
|
||||
|
||||
for(addr=mmodule[i].mod_start; addr<mmodule[i].mod_end; addr+=PAGE_SIZE) {
|
||||
page_set_mark(addr >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
}
|
||||
for(i=0; i<mb_info->mods_count; i++, mmodule++) {
|
||||
for(addr=mmodule->mod_start; addr<mmodule->mod_end; addr+=PAGE_SIZE) {
|
||||
page_set_mark(addr >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -343,8 +239,8 @@ int mmu_init(void)
|
|||
* The init ram disk are already loaded.
|
||||
* Therefore, we set these pages as used.
|
||||
*/
|
||||
for(addr=bootinfo->addr; addr<bootinfo->addr+bootinfo->size; addr+=PAGE_SIZE) {
|
||||
// this area is already mapped, so we need to virt_to_phys() these addresses.
|
||||
for(addr=bootinfo->addr; addr < bootinfo->addr+bootinfo->size; addr+=PAGE_SIZE) {
|
||||
// This area is already mapped, so we need to virt_to_phys() these addresses.
|
||||
page_set_mark(virt_to_phys(addr) >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
|
@ -354,6 +250,83 @@ int mmu_init(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use first fit algorithm to find a suitable physical memory region
|
||||
*/
|
||||
size_t get_pages(uint32_t npages)
|
||||
{
|
||||
uint32_t i, j, l;
|
||||
uint32_t k = 0;
|
||||
size_t ret = 0;
|
||||
|
||||
if (BUILTIN_EXPECT(!npages, 0))
|
||||
return ret;
|
||||
|
||||
if (BUILTIN_EXPECT(npages > atomic_int32_read(&total_available_pages), 0))
|
||||
return ret;
|
||||
|
||||
spinlock_lock(&bitmap_lock);
|
||||
i = alloc_start;
|
||||
next_try:
|
||||
while((k < BITMAP_SIZE) && page_marked(i)) {
|
||||
k++;
|
||||
i = (i+1) & (BITMAP_SIZE-1);
|
||||
}
|
||||
|
||||
if (k >= BITMAP_SIZE)
|
||||
goto oom;
|
||||
|
||||
for(j=1; (j<npages) && (i+j < BITMAP_SIZE) && (k < BITMAP_SIZE); j++, k++) {
|
||||
if (page_marked(i+j)) {
|
||||
i = (i+j) & (BITMAP_SIZE-1);
|
||||
goto next_try;
|
||||
}
|
||||
}
|
||||
|
||||
if (i+j >= BITMAP_SIZE) {
|
||||
i = 0;
|
||||
goto next_try;
|
||||
}
|
||||
|
||||
if (k >= BITMAP_SIZE)
|
||||
goto oom;
|
||||
|
||||
ret = i*PAGE_SIZE;
|
||||
//kprintf("alloc: ret 0x%x, i = %d, j = %d, npages = %d\n", ret, i, j, npages);
|
||||
for(l=i; l<i+j; l++)
|
||||
page_set_mark(l);
|
||||
|
||||
alloc_start = i+j;
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
|
||||
atomic_int32_add(&total_allocated_pages, npages);
|
||||
atomic_int32_sub(&total_available_pages, npages);
|
||||
|
||||
return ret;
|
||||
|
||||
oom:
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int put_page(size_t phyaddr)
|
||||
{
|
||||
uint32_t index = phyaddr >> PAGE_SHIFT;
|
||||
|
||||
if (BUILTIN_EXPECT(!phyaddr, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(&bitmap_lock);
|
||||
page_clear_mark(index);
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
|
||||
atomic_int32_sub(&total_allocated_pages, 1);
|
||||
atomic_int32_add(&total_available_pages, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void* mem_allocation(size_t sz, uint32_t flags)
|
||||
{
|
||||
size_t phyaddr, viraddr;
|
||||
|
@ -398,6 +371,7 @@ void kfree(void* addr, size_t sz)
|
|||
|
||||
index = phyaddr >> PAGE_SHIFT;
|
||||
page_clear_mark(index);
|
||||
|
||||
}
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
|
||||
|
|
353
mm/vma.c
353
mm/vma.c
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2011 Steffen Vogel, Chair for Operating Systems,
|
||||
* Copyright 2011 Stefan Lankes, Chair for Operating Systems,
|
||||
* RWTH Aachen University
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
@ -17,322 +17,85 @@
|
|||
* This file is part of MetalSVM.
|
||||
*/
|
||||
|
||||
#include <metalsvm/vma.h>
|
||||
#include <metalsvm/stdlib.h>
|
||||
#include <metalsvm/stdio.h>
|
||||
#include <metalsvm/tasks_types.h>
|
||||
#include <metalsvm/spinlock.h>
|
||||
#include <metalsvm/vma.h>
|
||||
#include <metalsvm/errno.h>
|
||||
|
||||
/*
|
||||
* Kernel space VMA list and lock
|
||||
*
|
||||
* For bootstrapping we initialize the VMA list with one empty VMA
|
||||
* (start == end) and expand this VMA by calls to vma_alloc()
|
||||
* add a new virtual memory region to the list of VMAs
|
||||
*/
|
||||
static vma_t vma_boot = { VMA_KERN_MAX, VMA_KERN_MAX, VMA_HEAP };
|
||||
static vma_t* vma_list = &vma_boot;
|
||||
static spinlock_t vma_lock = SPINLOCK_INIT;
|
||||
|
||||
size_t vma_alloc(size_t size, uint32_t flags)
|
||||
int vma_add(task_t* task, size_t start, size_t end, uint32_t type)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
spinlock_t* lock;
|
||||
vma_t** list;
|
||||
size_t ret = 0;
|
||||
|
||||
kprintf("vma_alloc(0x%lx, 0x%x)\n", size, flags);
|
||||
|
||||
size_t base, limit; // boundaries for search
|
||||
size_t start, end;
|
||||
|
||||
if (BUILTIN_EXPECT(!size, 0))
|
||||
return 0;
|
||||
|
||||
if (flags & VMA_USER) {
|
||||
base = VMA_KERN_MAX;
|
||||
limit = VMA_USER_MAX;
|
||||
list = &task->vma_list;
|
||||
lock = &task->vma_lock;
|
||||
}
|
||||
else {
|
||||
base = 0;
|
||||
limit = VMA_KERN_MAX;
|
||||
list = &vma_list;
|
||||
lock = &vma_lock;
|
||||
}
|
||||
|
||||
spinlock_lock(lock);
|
||||
|
||||
// "last" fit search for free memory area
|
||||
vma_t* pred = *list; // vma before current gap
|
||||
vma_t* succ = NULL; // vma after current gap
|
||||
do {
|
||||
start = (pred) ? pred->end : base;
|
||||
end = (succ) ? succ->start : limit;
|
||||
|
||||
if (end > start && end - start > size)
|
||||
break; // we found a gap
|
||||
|
||||
succ = pred;
|
||||
pred = (pred) ? pred->prev : NULL;
|
||||
} while (pred || succ);
|
||||
|
||||
if (BUILTIN_EXPECT(end > limit || end < start || end - start < size, 0)) {
|
||||
spinlock_unlock(lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// resize existing vma
|
||||
if (succ && succ->flags == flags) {
|
||||
succ->start -= size;
|
||||
ret = succ->start;
|
||||
}
|
||||
// insert new vma
|
||||
else {
|
||||
vma_t* new = kmalloc(sizeof(vma_t));
|
||||
if (BUILTIN_EXPECT(!new, 0))
|
||||
return 0;
|
||||
|
||||
new->start = end-size;
|
||||
new->end = end;
|
||||
new->flags = flags;
|
||||
new->next = succ;
|
||||
new->prev = pred;
|
||||
|
||||
if (pred)
|
||||
pred->next = new;
|
||||
if (succ)
|
||||
succ->prev = new;
|
||||
else
|
||||
*list = new;
|
||||
|
||||
ret = new->start;
|
||||
}
|
||||
|
||||
spinlock_unlock(lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vma_free(size_t start, size_t end)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
spinlock_t* lock;
|
||||
vma_t* vma;
|
||||
vma_t** list;
|
||||
|
||||
if (BUILTIN_EXPECT(start >= end, 0))
|
||||
vma_t* new_vma;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || start > end, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (end <= VMA_KERN_MAX) {
|
||||
lock = &vma_lock;
|
||||
list = &vma_list;
|
||||
}
|
||||
else if (start >= VMA_KERN_MAX) {
|
||||
lock = &task->vma_lock;
|
||||
list = &task->vma_list;
|
||||
}
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
if (BUILTIN_EXPECT(!*list, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(lock);
|
||||
|
||||
// search vma
|
||||
vma = *list;
|
||||
while (vma) {
|
||||
if (start >= vma->start && end <= vma->end) break;
|
||||
vma = vma->prev;
|
||||
}
|
||||
|
||||
if (BUILTIN_EXPECT(!vma, 0)) {
|
||||
spinlock_unlock(lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
// free/resize vma
|
||||
if (start == vma->start && end == vma->end) {
|
||||
if (vma == *list)
|
||||
*list = vma->next; // update list head
|
||||
if (vma->prev)
|
||||
vma->prev->next = vma->next;
|
||||
if (vma->next)
|
||||
vma->next->prev = vma->prev;
|
||||
kfree(vma);
|
||||
}
|
||||
else if (start == vma->start)
|
||||
vma->start = end;
|
||||
else if (end == vma->end)
|
||||
vma->end = start;
|
||||
else {
|
||||
vma_t* new = kmalloc(sizeof(vma_t));
|
||||
if (BUILTIN_EXPECT(!new, 0)) {
|
||||
spinlock_unlock(lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
new->start = end;
|
||||
vma->end = start;
|
||||
|
||||
new->end = vma->end;
|
||||
new->next = vma->next;
|
||||
new->prev = vma;
|
||||
vma->next = new;
|
||||
}
|
||||
|
||||
spinlock_unlock(lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vma_add(size_t start, size_t end, uint32_t flags)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
spinlock_t* lock;
|
||||
vma_t** list;
|
||||
|
||||
kprintf("vma_add(0x%lx, 0x%lx, 0x%x)\n", start, end, flags);
|
||||
|
||||
if (BUILTIN_EXPECT(start >= end, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (flags & VMA_USER) {
|
||||
list = &task->vma_list;
|
||||
lock = &task->vma_lock;
|
||||
|
||||
// check if address is in userspace
|
||||
if (BUILTIN_EXPECT(start < VMA_KERN_MAX, 0))
|
||||
return -EINVAL;
|
||||
}
|
||||
else {
|
||||
list = &vma_list;
|
||||
lock = &vma_lock;
|
||||
|
||||
// check if address is in kernelspace
|
||||
if (BUILTIN_EXPECT(end > VMA_KERN_MAX, 0))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spinlock_lock(lock);
|
||||
|
||||
// search gap
|
||||
vma_t* pred = *list;
|
||||
vma_t* succ = NULL;
|
||||
while (pred) {
|
||||
if ((!pred || pred->end <= start) &&
|
||||
(!succ || succ->start >= end))
|
||||
break;
|
||||
|
||||
succ = pred;
|
||||
pred = pred->prev;
|
||||
}
|
||||
|
||||
// resize existing vma
|
||||
if (pred && pred->end == start && pred->flags == flags)
|
||||
pred->end = end;
|
||||
else if (succ && succ->start == end && succ->flags == flags)
|
||||
succ->start = start;
|
||||
// insert new vma
|
||||
else {
|
||||
vma_t* new = kmalloc(sizeof(vma_t));
|
||||
if (BUILTIN_EXPECT(!new, 0))
|
||||
return 0;
|
||||
|
||||
new->start = start;
|
||||
new->end = end;
|
||||
new->flags = flags;
|
||||
new->next = succ;
|
||||
new->prev = pred;
|
||||
|
||||
if (pred)
|
||||
pred->next = new;
|
||||
if (succ)
|
||||
succ->prev = new;
|
||||
else
|
||||
*list = new;
|
||||
}
|
||||
|
||||
spinlock_unlock(lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int copy_vma_list(task_t* task)
|
||||
{
|
||||
task_t* parent_task = per_core(current_task);
|
||||
|
||||
spinlock_init(&task->vma_lock);
|
||||
spinlock_lock(&parent_task->vma_lock);
|
||||
spinlock_lock(&task->vma_lock);
|
||||
|
||||
int ret = 0;
|
||||
vma_t* last = NULL;
|
||||
vma_t* parent = parent_task->vma_list;
|
||||
|
||||
while (parent) {
|
||||
vma_t *new = kmalloc(sizeof(vma_t));
|
||||
if (BUILTIN_EXPECT(!new, 0)) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
new->start = parent->start;
|
||||
new->end = parent->end;
|
||||
new->flags = parent->flags;
|
||||
new->prev = last;
|
||||
|
||||
if (last)
|
||||
last->next = new;
|
||||
else
|
||||
task->vma_list = new;
|
||||
|
||||
last = new;
|
||||
parent = parent->next;
|
||||
}
|
||||
|
||||
out:
|
||||
spinlock_unlock(&task->vma_lock);
|
||||
spinlock_unlock(&parent_task->vma_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int drop_vma_list()
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
new_vma = kmalloc(sizeof(new_vma));
|
||||
if (!new_vma)
|
||||
return -ENOMEM;
|
||||
|
||||
spinlock_lock(&task->vma_lock);
|
||||
|
||||
while(task->vma_list)
|
||||
pfree((void*) task->vma_list->start, task->vma_list->end - task->vma_list->start);
|
||||
new_vma->start = start;
|
||||
new_vma->end = end;
|
||||
new_vma->type = type;
|
||||
|
||||
if (!(task->vma_list)) {
|
||||
new_vma->next = new_vma->prev = NULL;
|
||||
task->vma_list = new_vma;
|
||||
} else {
|
||||
vma_t* tmp = task->vma_list;
|
||||
|
||||
while (tmp->next && tmp->start < start)
|
||||
tmp = tmp->next;
|
||||
|
||||
new_vma->next = tmp->next;
|
||||
new_vma->prev = tmp;
|
||||
tmp->next = new_vma;
|
||||
}
|
||||
|
||||
spinlock_unlock(&task->vma_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vma_dump()
|
||||
int vma_dump(task_t* task)
|
||||
{
|
||||
void print_vma(vma_t *vma) {
|
||||
while (vma) {
|
||||
kprintf("0x%lx - 0x%lx: size=%x, flags=%c%c%c\n", vma->start, vma->end, vma->end - vma->start,
|
||||
(vma->flags & VMA_READ) ? 'r' : '-',
|
||||
(vma->flags & VMA_WRITE) ? 'w' : '-',
|
||||
(vma->flags & VMA_EXECUTE) ? 'x' : '-');
|
||||
vma = vma->prev;
|
||||
}
|
||||
vma_t* tmp;
|
||||
|
||||
if (BUILTIN_EXPECT(!task, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(&task->vma_lock);
|
||||
|
||||
tmp = task->vma_list;
|
||||
while (tmp) {
|
||||
kprintf("%8x - %8x: ", tmp->start, tmp->end);
|
||||
|
||||
if (tmp->type & VMA_READ)
|
||||
kputs("r");
|
||||
else
|
||||
kputs("-");
|
||||
|
||||
if (tmp->type & VMA_WRITE)
|
||||
kputs("w");
|
||||
else
|
||||
kputs("-");
|
||||
|
||||
if (tmp->type & VMA_EXECUTE)
|
||||
kputs("x");
|
||||
else
|
||||
kputs("-");
|
||||
kputs("\n");
|
||||
|
||||
tmp = tmp->next;
|
||||
}
|
||||
|
||||
task_t* task = per_core(current_task);
|
||||
|
||||
kputs("Kernelspace VMAs:\n");
|
||||
spinlock_lock(&vma_lock);
|
||||
print_vma(vma_list);
|
||||
spinlock_unlock(&vma_lock);
|
||||
|
||||
kputs("Userspace VMAs:\n");
|
||||
spinlock_lock(&task->vma_lock);
|
||||
print_vma(task->vma_list);
|
||||
spinlock_unlock(&task->vma_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ int main(int argc, char** argv)
|
|||
exit(1);
|
||||
}
|
||||
testdirent = readdir(testdir);
|
||||
printf("1. Dirent: %s\n", testdirent->d_name);
|
||||
printf("1. Dirent: %s", testdirent->d_name);
|
||||
closedir(testdir);
|
||||
|
||||
return errno;
|
||||
|
|
|
@ -20,64 +20,41 @@
|
|||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include <errno.h>
|
||||
#include <dirent.h>
|
||||
|
||||
int print_usage() {
|
||||
printf("usage: size mb/kb/b [chunks]\n");
|
||||
printf("usage: [size mb/kb/b]");
|
||||
exit(0);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
int multp = 0;
|
||||
int size = 0;
|
||||
int chunks = 1;
|
||||
|
||||
void **test;
|
||||
|
||||
if (argc <= 2 || argc > 4)
|
||||
int m = 0;
|
||||
uint32_t size = 0;
|
||||
if(argc <= 2)
|
||||
print_usage();
|
||||
|
||||
if(argc == 3) {
|
||||
if(!strcmp(argv[2], "mb"))
|
||||
m = 1024*1024;
|
||||
else if(!strcmp(argv[2], "kb"))
|
||||
m = 1024;
|
||||
else if(!strcmp(argv[2], "b"))
|
||||
m = 0;
|
||||
else
|
||||
print_usage();
|
||||
}
|
||||
if(argc > 3)
|
||||
print_usage();
|
||||
|
||||
size = atoi(argv[1]);
|
||||
if (size <= 0)
|
||||
if(size <= 0)
|
||||
print_usage();
|
||||
|
||||
if (!strcasecmp(argv[2], "mb"))
|
||||
multp = 1024*1024;
|
||||
else if (!strcasecmp(argv[2], "kb"))
|
||||
multp = 1024;
|
||||
else if (!strcasecmp(argv[2], "b"))
|
||||
multp = 1;
|
||||
else
|
||||
print_usage();
|
||||
size *= multp;
|
||||
|
||||
if (argc == 4)
|
||||
chunks = atoi(argv[3]);
|
||||
|
||||
test = malloc(chunks * sizeof(void *));
|
||||
if (!test)
|
||||
printf("malloc(%d) - FAILED!\n", chunks * sizeof(void *));
|
||||
|
||||
// allocate...
|
||||
int i;
|
||||
for (i = 0; i < chunks; i++) {
|
||||
test[i] = malloc(size);
|
||||
if (test[i])
|
||||
printf("malloc(%d)\tCHUNK: %d START: %p END: %p\n", size, i, test[i], test[i] + size);
|
||||
else {
|
||||
printf("malloc(%d)\tFAILED! Abort allocation, start with freeing memory\n", size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// and release again
|
||||
for (i = 0; i < chunks; i++) {
|
||||
if (test[i]) {
|
||||
free(test[i]);
|
||||
printf("free(%p)\tCHUNK: %d\n", test[i], i);
|
||||
}
|
||||
}
|
||||
|
||||
free(test);
|
||||
size *= m;
|
||||
uint8_t* test = malloc(size);
|
||||
printf("malloc(%d) - START: %p END: %p \n", size, test, test + size);
|
||||
return 0;
|
||||
}
|
||||
|
|
11
script.gdb
11
script.gdb
|
@ -1,14 +1,7 @@
|
|||
# Constant part of the script
|
||||
set disassembly-flavor intel
|
||||
symbol-file metalsvm.sym
|
||||
target remote localhost:1234
|
||||
|
||||
# Debugging 32bit code
|
||||
#set architecture i386
|
||||
#break stublet
|
||||
#continue
|
||||
|
||||
# Debugging 64bit code
|
||||
#set architecture i386:x86-64
|
||||
#break main
|
||||
# Configure breakpoints and everything as you wish here.
|
||||
break main
|
||||
continue
|
||||
|
|
Loading…
Add table
Reference in a new issue