Compare commits
73 commits
master
...
buddy_mall
Author | SHA1 | Date | |
---|---|---|---|
60f8f53169 | |||
7a3e77c82d | |||
9018781eee | |||
0153fb538d | |||
954ccf1379 | |||
1e98d0e410 | |||
a972efe288 | |||
1fea8eb13b | |||
acc6e2124e | |||
de33962e9d | |||
71f55f0a89 | |||
76e52aa473 | |||
af5fa15d8d | |||
aa1730919e | |||
707d7132c8 | |||
79c4f2703e | |||
3cd5a5853b | |||
421e7ec66e | |||
df99b4dfff | |||
0d7aa3d0ca | |||
06877ff108 | |||
fa07bdee53 | |||
ec171dfcce | |||
892154c9f1 | |||
bbb8c5c186 | |||
92b2badf71 | |||
cdcd9e7d20 | |||
14938ef7e1 | |||
4b485f5733 | |||
9441d21d89 | |||
90d884ec8d | |||
fca96e9851 | |||
143de82f3d | |||
cd57f5ec28 | |||
d59676dbf5 | |||
3e73d6384e | |||
3be25b99d2 | |||
403c529e8b | |||
![]() |
9b47b3ef45 | ||
e290d41149 | |||
f361783f4b | |||
6826e0374d | |||
3ee658d008 | |||
5ab075df9b | |||
db21f7cf05 | |||
dac9b20c18 | |||
40e5d83217 | |||
![]() |
2e230a609e | ||
d275c0a00a | |||
![]() |
b0749fc448 | ||
2f2dd1d3c7 | |||
![]() |
2f02db8dc0 | ||
9621509e78 | |||
![]() |
6b7b70903e | ||
2e62ee2966 | |||
![]() |
7ffaec04f2 | ||
38eb3d5167 | |||
ce66d261b5 | |||
e757ac5c08 | |||
![]() |
e731d60256 | ||
5424397b47 | |||
![]() |
3c8de24349 | ||
1fc3e40c4e | |||
![]() |
ae1af7a053 | ||
16c65de934 | |||
![]() |
b3fa94b0e0 | ||
feec2b7de8 | |||
![]() |
e766295d68 | ||
9c85f88333 | |||
![]() |
264146a7e1 | ||
0376d06594 | |||
![]() |
030ba0d75f | ||
8159ad78d7 |
42 changed files with 2033 additions and 1292 deletions
105
Makefile.example
105
Makefile.example
|
@ -1,8 +1,12 @@
|
|||
TOPDIR = $(shell pwd)
|
||||
ARCH = x86
|
||||
# For 64bit support, you have define BIT as 64
|
||||
BIT=32
|
||||
NAME = metalsvm
|
||||
|
||||
# For 64bit support, you have define BIT as 64
|
||||
# Note: do not forget to 'make veryclean' after changing BIT!!!
|
||||
BIT=64
|
||||
ARCH = x86
|
||||
SMP=1
|
||||
|
||||
TOPDIR = $(shell pwd)
|
||||
LWIPDIRS = lwip/src/arch lwip/src/api lwip/src/core lwip/src/core/ipv4 lwip/src/netif
|
||||
DRIVERDIRS = drivers/net drivers/char
|
||||
KERNDIRS = libkern kernel mm fs apps arch/$(ARCH)/kernel arch/$(ARCH)/mm arch/$(ARCH)/scc $(LWIPDIRS) $(DRIVERDIRS)
|
||||
|
@ -30,35 +34,56 @@ RANLIB_FOR_TARGET = $(CROSSCOMPREFIX)ranlib
|
|||
STRIP_FOR_TARGET = $(CROSSCOMPREFIX)strip
|
||||
READELF_FOR_TARGET = $(CROSSCOMPREFIX)readelf
|
||||
|
||||
# Tools
|
||||
MAKE = make
|
||||
RM = rm -rf
|
||||
NASM = nasm
|
||||
# For 64bit code, you have to use qemu-system-x86_64
|
||||
QEMU = qemu-system-i386
|
||||
GDB = gdb
|
||||
|
||||
# For 64bit support, you have to define -felf64 instead of -felf32
|
||||
NASMFLAGS = -felf32 -g -i$(TOPDIR)/include/metalsvm/
|
||||
INCLUDE = -I$(TOPDIR)/include -I$(TOPDIR)/arch/$(ARCH)/include -I$(TOPDIR)/lwip/src/include -I$(TOPDIR)/lwip/src/include/ipv4 -I$(TOPDIR)/drivers
|
||||
# For 64bit support, you have to define "-m64 -mno-red-zone" instead of "-m32 -march=i586"
|
||||
ifeq ($(BIT), 32)
|
||||
QEMU = qemu-system-i386
|
||||
else ifeq ($(BIT), 64)
|
||||
QEMU = qemu-system-x86_64
|
||||
endif
|
||||
|
||||
|
||||
INCLUDE = -I$(TOPDIR)/include \
|
||||
-I$(TOPDIR)/arch/$(ARCH)/include \
|
||||
-I$(TOPDIR)/lwip/src/include \
|
||||
-I$(TOPDIR)/lwip/src/include/ipv4 \
|
||||
-I$(TOPDIR)/drivers
|
||||
|
||||
|
||||
# Compiler options for final code
|
||||
CFLAGS = -g -m32 -march=i586 -Wall -O2 -fstrength-reduce -fomit-frame-pointer -finline-functions -ffreestanding $(INCLUDE) $(STACKPROT)
|
||||
CFLAGS = -g -O2 -m$(BIT) -Wall -fomit-frame-pointer -ffreestanding -fstrength-reduce -finline-functions $(INCLUDE) $(STACKPROT)
|
||||
|
||||
# Compiler options for debuging
|
||||
#CFLAGS = -g -O -m32 -march=i586 -Wall -fomit-frame-pointer -ffreestanding $(INCLUDE) $(STACKPROT)
|
||||
#CFLAGS = -g -O -m$(BIT) -Wall -fomit-frame-pointer -ffreestanding $(INCLUDE) $(STACKPROT)
|
||||
|
||||
NASMFLAGS = -felf$(BIT) -g -i$(TOPDIR)/include/metalsvm/
|
||||
ARFLAGS = rsv
|
||||
LDFLAGS = -T link$(BIT).ld -z max-page-size=4096 --defsym __BUILD_DATE=$(shell date +'%Y%m%d') --defsym __BUILD_TIME=$(shell date +'%H%M%S')
|
||||
|
||||
STRIP_DEBUG = --strip-debug
|
||||
KEEP_DEBUG = --only-keep-debug
|
||||
|
||||
# Do not change to elf64!
|
||||
# The Multiboot spec can only boot elf32 binaries
|
||||
OUTPUT_FORMAT = -O elf32-i386
|
||||
# For 64bit support, you have to define -m64 instead of "-m32 -march=i586"
|
||||
CFLAGS_FOR_NEWLIB = -m32 -march=i586 -O2 $(STACKPROT)
|
||||
# For 64bit support, you have to define -m64 instead of "-m32 -march=i586"
|
||||
LDFLAGS_FOR_NEWLIB = -m32 -march=i586
|
||||
# For 64bit support, you have to define -m64 instead of "-m32"
|
||||
CFLAGS_FOR_TOOLS = -m32 -O2 -Wall
|
||||
|
||||
CFLAGS_FOR_NEWLIB = -m$(BIT) -O2 $(STACKPROT)
|
||||
LDFLAGS_FOR_NEWLIB = -m$(BIT)
|
||||
CFLAGS_FOR_TOOLS = -m$(BIT) -O2 -Wall
|
||||
LDFLAGS_FOR_TOOLS =
|
||||
# For 64bit support, you have to define -felf64 instead of -felf32
|
||||
NASMFLAGS_FOR_NEWLIB = -felf32
|
||||
NASMFLAGS_FOR_NEWLIB = -felf$(BIT)
|
||||
|
||||
ifeq ($(BIT), 32)
|
||||
CFLAGS += -march=i586
|
||||
CFLAGS_FOR_NEWLIB += -march=i586
|
||||
LDFLAGS_FOR_NEWLIB += -march=i586
|
||||
else ifeq ($(BIT), 64)
|
||||
CFLAGS += -mno-red-zone
|
||||
endif
|
||||
|
||||
# Prettify output
|
||||
V = 0
|
||||
|
@ -68,11 +93,15 @@ ifeq ($V,0)
|
|||
endif
|
||||
|
||||
default: all
|
||||
|
||||
|
||||
all: newlib tools $(NAME).elf
|
||||
|
||||
|
||||
newlib:
|
||||
$(MAKE) ARCH=$(ARCH) BIT=$(BIT) LDFLAGS="$(LDFLAGS_FOR_NEWLIB)" CFLAGS="$(CFLAGS_FOR_NEWLIB)" NASMFLAGS="$(NASMFLAGS_FOR_NEWLIB)" CC_FOR_TARGET=$(CC_FOR_TARGET) \
|
||||
$(MAKE) ARCH=$(ARCH) BIT=$(BIT) \
|
||||
LDFLAGS="$(LDFLAGS_FOR_NEWLIB)" \
|
||||
CFLAGS="$(CFLAGS_FOR_NEWLIB)" \
|
||||
NASMFLAGS="$(NASMFLAGS_FOR_NEWLIB)" \
|
||||
CC_FOR_TARGET=$(CC_FOR_TARGET) \
|
||||
CXX_FOR_TARGET=$(CXX_FOR_TARGET) \
|
||||
GCC_FOR_TARGET=$(GCC_FOR_TARGET) \
|
||||
AR_FOR_TARGET=$(AR_FOR_TARGET) \
|
||||
|
@ -96,14 +125,23 @@ $(NAME).elf:
|
|||
$Q$(OBJCOPY_FOR_TARGET) $(STRIP_DEBUG) $(OUTPUT_FORMAT) $(NAME).elf
|
||||
|
||||
qemu: newlib tools $(NAME).elf
|
||||
$(QEMU) -monitor stdio -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
|
||||
$(QEMU) -monitor stdio -serial tcp::12346,server,nowait -smp $(SMP) -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -kernel metalsvm.elf -initrd tools/initrd.img
|
||||
|
||||
qemudbg: newlib tools $(NAME).elf
|
||||
$(QEMU) -s -S -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -net dump -kernel metalsvm.elf -initrd tools/initrd.img
|
||||
$(QEMU) -s -S -nographic -monitor stdio -serial tcp::12346,server -smp $(SMP) -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -kernel metalsvm.elf -initrd tools/initrd.img
|
||||
|
||||
gdb: $(NAME).elf
|
||||
make qemudbg > /dev/null &
|
||||
$(GDB) -x script.gdb
|
||||
$(GDB) -q -x script.gdb
|
||||
|
||||
debug: newlib tools $(NAME).elf
|
||||
killall $(QEMU) || true
|
||||
killall $(GDB) || true
|
||||
sleep 1
|
||||
gnome-terminal --working-directory=$(TOPDIR) \
|
||||
--tab --title=Shell --command="bash -c 'sleep 1 && telnet localhost 12345'" \
|
||||
--tab --title=QEmu --command="make qemudbg" \
|
||||
--tab --title=GDB --command="make gdb" \
|
||||
--tab --title=Debug --command="bash -c 'sleep 1 && telnet localhost 12346'"
|
||||
|
||||
clean:
|
||||
$Q$(RM) $(NAME).elf $(NAME).sym *~
|
||||
|
@ -112,7 +150,7 @@ clean:
|
|||
|
||||
veryclean: clean
|
||||
$Q$(MAKE) -C newlib veryclean
|
||||
@echo Very cleaned
|
||||
@echo Very cleaned.
|
||||
|
||||
#depend:
|
||||
# for i in $(SUBDIRS); do $(MAKE) -k -C $$i depend; done
|
||||
|
@ -124,16 +162,15 @@ veryclean: clean
|
|||
$Q$(CPP_FOR_TARGET) -MF $*.dep -MT $*.o -MM -D__KERNEL__ $(CFLAGS) $<
|
||||
|
||||
include/metalsvm/config.inc: include/metalsvm/config.h
|
||||
@echo "; This file is generated automatically from the config.h file." > include/metalsvm/config.inc
|
||||
@echo "; Before editing this, you should consider editing config.h." >> include/metalsvm/config.inc
|
||||
@awk '/^#define MAX_CORES/{ print "%define MAX_CORES", $$3 }' include/metalsvm/config.h >> include/metalsvm/config.inc
|
||||
@awk '/^#define KERNEL_STACK_SIZE/{ print "%define KERNEL_STACK_SIZE", $$3 }' include/metalsvm/config.h >> include/metalsvm/config.inc
|
||||
@awk '/^#define CONFIG_VGA/{ print "%define CONFIG_VGA", $$3 }' include/metalsvm/config.h >> include/metalsvm/config.inc
|
||||
@echo "; This file is generated automatically from the config.h file." > $@
|
||||
@echo "; Before editing this, you should consider editing config.h." >> $@
|
||||
@sed -nre 's/^[\t ]*#define[\t ]+([a-z_0-9]+)([\t ]+.*)*/%define \1/ip' $< >> $@
|
||||
@sed -nre 's/^[\t ]*#define[\t ]+([a-z_0-9]+)[\t ]+([a-z_0-9.]+)([\t ]+.*)*/%define \1 \2/ip' $< >> $@
|
||||
|
||||
%.o : %.asm include/metalsvm/config.inc
|
||||
@echo [ASM] $@
|
||||
$Q$(NASM) $(NASMFLAGS) -o $@ $<
|
||||
|
||||
.PHONY: default all clean emu gdb newlib tools
|
||||
.PHONY: default all clean qemu qemudbg gdb debug newlib tools
|
||||
|
||||
include $(addsuffix /Makefile,$(SUBDIRS))
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
C_source := tests.c echo.c netio.c jacobi.c laplace.c gfx_client.c gfx_generic.c
|
||||
C_source := tests.c echo.c netio.c jacobi.c laplace.c gfx_client.c gfx_generic.c paging.c
|
||||
MODULE := apps
|
||||
|
||||
include $(TOPDIR)/Makefile.inc
|
||||
|
|
284
apps/paging.c
Normal file
284
apps/paging.c
Normal file
|
@ -0,0 +1,284 @@
|
|||
/*
|
||||
* Copyright 2011 Steffen Vogel, Chair for Operating Systems,
|
||||
* RWTH Aachen University
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* This file is part of MetalSVM.
|
||||
*/
|
||||
|
||||
#include <metalsvm/stdlib.h>
|
||||
#include <metalsvm/stdio.h>
|
||||
#include <metalsvm/stdarg.h>
|
||||
#include <metalsvm/mmu.h>
|
||||
#include <metalsvm/time.h>
|
||||
#include <metalsvm/tasks.h>
|
||||
#include <metalsvm/vma.h>
|
||||
#include <metalsvm/malloc.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
#define PAGE_COUNT 10
|
||||
#define SIZE (PAGE_COUNT*PAGE_SIZE)
|
||||
#define VIRT_FROM_ADDR 0x100000000000
|
||||
#define VIRT_TO_ADDR 0x200000000000
|
||||
|
||||
/** @brief Simple helper to format our test results */
|
||||
static void test(size_t expr, char *fmt, ...)
|
||||
{
|
||||
void _putchar(int c, void *arg) { kputchar(c); } // for kvprintf
|
||||
|
||||
static int c = 1;
|
||||
|
||||
va_list ap;
|
||||
va_start(ap, fmt);
|
||||
|
||||
kprintf("%s #%u:\t", (expr) ? "PASSED" : "FAILED", c++);
|
||||
kvprintf(fmt, _putchar, NULL, 10, ap);
|
||||
kputs("\n");
|
||||
|
||||
va_end(ap);
|
||||
|
||||
if (!expr)
|
||||
abort();
|
||||
}
|
||||
|
||||
/** @brief Linear feedback shift register PRNG */
|
||||
static uint16_t rand()
|
||||
{
|
||||
static uint16_t lfsr = 0xACE1u;
|
||||
static uint16_t bit;
|
||||
|
||||
bit = ((lfsr >> 0) ^ (lfsr >> 2) ^ (lfsr >> 3) ^ (lfsr >> 5) ) & 1;
|
||||
return lfsr = (lfsr >> 1) | (bit << 15);
|
||||
}
|
||||
|
||||
/** @brief BSD sum algorithm ('sum' Unix command) and used by QEmu */
|
||||
uint16_t checksum(size_t start, size_t end) {
|
||||
size_t addr;
|
||||
uint16_t sum;
|
||||
|
||||
for(addr = start, sum = 0; addr < end; addr++) {
|
||||
uint8_t val = *((uint8_t *) addr);
|
||||
sum = (sum >> 1) | (sum << 15);
|
||||
sum += val;
|
||||
}
|
||||
|
||||
return sum;
|
||||
}
|
||||
|
||||
static int paging_stage2(void *arg) {
|
||||
size_t old, new;
|
||||
|
||||
kprintf("PAGING: entering stage 2...\n");
|
||||
|
||||
old = *((size_t *) arg);
|
||||
kprintf("old sum: %lu\n", old);
|
||||
|
||||
new = checksum(VIRT_FROM_ADDR, VIRT_FROM_ADDR + PAGE_COUNT*PAGE_SIZE);
|
||||
test(old == new, "checksum(%p, %p) = %lu", VIRT_FROM_ADDR, VIRT_FROM_ADDR + PAGE_COUNT*PAGE_SIZE, new);
|
||||
|
||||
size_t cr3 = read_cr3();
|
||||
kprintf("cr3 new = %x\n", cr3);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** @brief Test of the paging subsystem
|
||||
*
|
||||
* We will map a single physical memory region to two virtual regions.
|
||||
* When writing to the first one, we should be able to read the same contents
|
||||
* from the second one.
|
||||
*/
|
||||
static void paging(void)
|
||||
{
|
||||
size_t c, sum;
|
||||
size_t *p1, *p2;
|
||||
size_t virt_from, virt_to, virt_alloc;
|
||||
size_t phys;
|
||||
|
||||
// allocate physical page frames
|
||||
phys = get_pages(PAGE_COUNT);
|
||||
test(phys, "get_pages(%lu) = 0x%lx", PAGE_COUNT, phys);
|
||||
|
||||
// create first mapping
|
||||
virt_from = map_region(VIRT_FROM_ADDR, phys, PAGE_COUNT, 0);
|
||||
test(virt_from, "map_region(0x%lx, 0x%lx, %lu, 0x%x) = 0x%lx", VIRT_FROM_ADDR, phys, PAGE_COUNT, 0, virt_from);
|
||||
|
||||
// check address translation
|
||||
phys = virt_to_phys(virt_from);
|
||||
test(phys, "virt_to_phys(0x%lx) = 0x%lx", virt_from, phys);
|
||||
|
||||
// write test data
|
||||
p1 = (size_t *) virt_from;
|
||||
for (c = 0; c < PAGE_COUNT*PAGE_SIZE/sizeof(size_t); c++) {
|
||||
p1[c] = c;
|
||||
}
|
||||
|
||||
// create second mapping pointing to the same page frames
|
||||
virt_to = map_region(VIRT_TO_ADDR, phys, PAGE_COUNT, 0);
|
||||
test(virt_to, "map_region(0x%lx, 0x%lx, %lu, 0x%x) = 0x%lx", VIRT_TO_ADDR, phys, PAGE_COUNT, 0, virt_to);
|
||||
|
||||
// check address translation
|
||||
phys = virt_to_phys(virt_to);
|
||||
test(phys, "virt_to_phys(0x%lx) = 0x%lx", virt_to, phys);
|
||||
|
||||
// check if both mapped areas are equal
|
||||
p2 = (size_t *) virt_to;
|
||||
for (c = 0; c < PAGE_COUNT*PAGE_SIZE/sizeof(size_t); c++) {
|
||||
if (p1[c] != p2[c])
|
||||
test(0, "data mismatch: *(%p) != *(%p)", &p1[c], &p2[c]);
|
||||
}
|
||||
test(1, "data is equal");
|
||||
|
||||
// try to remap without MAP_REMAP
|
||||
virt_to = map_region(VIRT_TO_ADDR, phys+PAGE_SIZE, PAGE_COUNT, 0);
|
||||
test(!virt_to, "map_region(0x%lx, 0x%lx, %lu, 0x%x) = 0x%lx (without MAP_REMAP flag)", VIRT_TO_ADDR, phys+PAGE_SIZE, PAGE_COUNT, 0, virt_to);
|
||||
|
||||
// try to remap with MAP_REMAP
|
||||
virt_to = map_region(VIRT_TO_ADDR, phys+PAGE_SIZE, PAGE_COUNT, MAP_REMAP);
|
||||
test(virt_to, "map_region(0x%lx, 0x%lx, %lu, 0x%x) = 0x%lx (with MAP_REMAP flag)", VIRT_TO_ADDR, phys+PAGE_SIZE, PAGE_COUNT, MAP_REMAP, virt_to);
|
||||
|
||||
// check if data is not equal anymore (we remapped with 1 page offset)
|
||||
p2 = (size_t *) virt_to;
|
||||
for (c = 0; c < PAGE_COUNT*PAGE_SIZE/sizeof(size_t); c++) {
|
||||
if (p1[c] == p2[c])
|
||||
test(0, "data match at *(%p) != *(%p)", &p1[c], &p2[c]);
|
||||
}
|
||||
test(1, "data is unequal");
|
||||
|
||||
// test vma_alloc
|
||||
virt_alloc = map_region(0, phys, PAGE_COUNT, 0);
|
||||
test(virt_alloc, "map_region(0x%lx, 0x%lx, %lu, 0x%x) = 0x%lx", 0, phys, PAGE_COUNT, 0, virt_alloc);
|
||||
|
||||
// data should match against new vm addr
|
||||
p2 = (size_t *) virt_alloc;
|
||||
for (c = 0; c < PAGE_COUNT*PAGE_SIZE/sizeof(size_t); c++) {
|
||||
if (p1[c] != p2[c])
|
||||
test(0, "data mismatch at *(%p) != *(%p)", &p1[c], &p2[c]);
|
||||
}
|
||||
test(1, "data is equal");
|
||||
|
||||
// calc checksum
|
||||
sum = checksum(virt_alloc, virt_alloc + PAGE_COUNT*PAGE_SIZE);
|
||||
test(sum, "checksum(%p, %p) = %lu", virt_alloc, virt_alloc + PAGE_COUNT*PAGE_SIZE, sum);
|
||||
|
||||
size_t cr3 = read_cr3();
|
||||
kprintf("cr3 old = %x\n", cr3);
|
||||
|
||||
//create_kernel_task(0, paging_stage2, &sum, NORMAL_PRIO);
|
||||
//sleep(3);
|
||||
}
|
||||
|
||||
/** @brief Test of the VMA allocator */
|
||||
static void vma(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
// vma_alloc
|
||||
size_t a1 = vma_alloc(SIZE, VMA_HEAP);
|
||||
test(a1, "vma_alloc(0x%x, 0x%x) = 0x%lx", SIZE, VMA_HEAP, a1);
|
||||
vma_dump();
|
||||
|
||||
size_t a2 = vma_alloc(SIZE, VMA_HEAP|VMA_USER);
|
||||
test(a2 != 0, "vma_alloc(0x%x, 0x%x) = 0x%lx", SIZE, VMA_HEAP|VMA_USER, a2);
|
||||
vma_dump();
|
||||
|
||||
// vma_add
|
||||
ret = vma_add(VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE, VMA_HEAP|VMA_USER);
|
||||
test(ret >= 0, "vma_add(0x%lx, 0x%lx, 0x%x) = %u", VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE, VMA_HEAP|VMA_USER, ret);
|
||||
vma_dump();
|
||||
|
||||
ret = vma_add(VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE, VMA_HEAP|VMA_USER);
|
||||
test(ret >= 0, "vma_add(0x%lx, 0x%lx, 0x%x) = %u", VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE, VMA_HEAP|VMA_USER, ret);
|
||||
vma_dump();
|
||||
|
||||
ret = vma_add(VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR, VMA_HEAP|VMA_USER);
|
||||
test(ret >= 0, "vma_add(0x%lx, 0x%lx, 0x%x) = %u", VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR, VMA_HEAP|VMA_USER, ret);
|
||||
vma_dump();
|
||||
|
||||
// vma_free
|
||||
ret = vma_free(VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR);
|
||||
test(ret >= 0, "vma_free(0x%lx, 0x%lx) = %u", VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR, ret);
|
||||
vma_dump();
|
||||
|
||||
ret = vma_free(VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE);
|
||||
test(ret >= 0, "vma_free(0x%lx, 0x%lx) = %u", VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE, ret);
|
||||
vma_dump();
|
||||
|
||||
ret = vma_free(VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE);
|
||||
test(ret >= 0, "vma_free(0x%lx, 0x%lx) = %u", VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE, ret);
|
||||
vma_dump();
|
||||
}
|
||||
|
||||
/** @brief Test of the kernel malloc allocator */
|
||||
static void malloc(void)
|
||||
{
|
||||
int i;
|
||||
int* p[20];
|
||||
int* a;
|
||||
|
||||
// kmalloc() test
|
||||
buddy_dump();
|
||||
a = kmalloc(SIZE);
|
||||
test(a != NULL, "kmalloc(%lu) = %p", SIZE, a);
|
||||
buddy_dump();
|
||||
|
||||
// simple write/read test
|
||||
for (i=0; i<SIZE/sizeof(int); i++)
|
||||
a[i] = i;
|
||||
|
||||
for (i=0; i<SIZE/sizeof(int); i++) {
|
||||
if (a[i] != i)
|
||||
test(0, "data mismatch: *(%p) != %lu", &a[i], i);
|
||||
}
|
||||
test(1, "data is equal");
|
||||
|
||||
// kfree() test
|
||||
kfree(a);
|
||||
test(1, "kfree(%p)", a);
|
||||
buddy_dump();
|
||||
|
||||
// some random malloc/free patterns to stress the buddy system
|
||||
for (i=0; i<20; i++) {
|
||||
uint16_t sz = rand();
|
||||
p[i] = kmalloc(sz);
|
||||
test(p[i], "kmalloc(%u) = %p", sz, p[i]);
|
||||
}
|
||||
buddy_dump();
|
||||
|
||||
for (i=0; i<20; i++) {
|
||||
kfree(p[i]);
|
||||
test(1, "kfree(%p)", p[i]);
|
||||
}
|
||||
buddy_dump();
|
||||
}
|
||||
|
||||
/** @brief This is a simple procedure to test memory management subsystem */
|
||||
int memory(void* arg)
|
||||
{
|
||||
kprintf("======== PAGING: test started...\n");
|
||||
paging();
|
||||
|
||||
kprintf("======== VMA: test started...\n");
|
||||
vma();
|
||||
|
||||
kprintf("======== MALLOC: test started...\n");
|
||||
malloc();
|
||||
|
||||
kprintf("======== All tests finished successfull...\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -43,6 +43,7 @@
|
|||
|
||||
int laplace(void* arg);
|
||||
int jacobi(void* arg);
|
||||
int memory(void* arg);
|
||||
void echo_init(void);
|
||||
void netio_init(void);
|
||||
|
||||
|
@ -744,8 +745,7 @@ int test_init(void)
|
|||
create_user_task(NULL, "/bin/jacobi", jacobi_argv);
|
||||
//create_user_task_on_core(NULL, "/bin/jacobi", jacobi_argv, 1);
|
||||
#endif
|
||||
#ifdef START_MMNIF_TEST
|
||||
#if defined(CONFIG_LWIP) && LWIP_SOCKET
|
||||
#if defined(START_MMNIF_TEST) && defined(CONFIG_LWIP) && LWIP_SOCKET
|
||||
if (RCCE_IAM == 0) {
|
||||
kprintf("Start /bin/server...\n");
|
||||
create_user_task(NULL, "/bin/server", server_argv);
|
||||
|
@ -755,6 +755,8 @@ int test_init(void)
|
|||
create_user_task(NULL, "/bin/client", client_argv);
|
||||
}
|
||||
#endif
|
||||
#ifdef START_MEMORY
|
||||
create_kernel_task(NULL, memory, NULL, NORMAL_PRIO);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
//#define START_HELLO
|
||||
//#define START_TESTS
|
||||
//#define START_JACOBI
|
||||
//#define START_MEMORY
|
||||
|
||||
//#define START_CHIEFTEST
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
// ____ _ _
|
||||
// / ___| _ _ _ __ ___ | |__ ___ | |___
|
||||
// \___ \| | | | '_ ` _ \| '_ \ / _ \| / __|
|
||||
// ___) | |_| | | | | | | |_) | (_) | \__ \
|
||||
// ___) | |_| | | | | | | |_) | (_) | \__
|
||||
// |____/ \__, |_| |_| |_|_.__/ \___/|_|___/
|
||||
// |___/
|
||||
//
|
||||
|
@ -253,7 +253,7 @@
|
|||
// _____ _ _
|
||||
// | ___| _ _ __ ___| |_(_) ___ _ __ ___
|
||||
// | |_ | | | | '_ \ / __| __| |/ _ \| '_ \/ __|
|
||||
// | _|| |_| | | | | (__| |_| | (_) | | | \__ \
|
||||
// | _|| |_| | | | | (__| |_| | (_) | | | \__
|
||||
// |_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/
|
||||
//
|
||||
// #########################################################################################
|
||||
|
|
|
@ -102,7 +102,7 @@ inline static void outportl(unsigned short _port, unsigned int _data)
|
|||
|
||||
inline static void uart_putchar(unsigned char _data)
|
||||
{
|
||||
outportb(0x2F8, _data);
|
||||
outportb(UART_PORT, _data);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -35,9 +35,11 @@
|
|||
|
||||
#ifdef CONFIG_MULTIBOOT
|
||||
|
||||
/* are there modules to do something with? */
|
||||
/// Does the bootloader provide mem_* fields?
|
||||
#define MULTIBOOT_INFO_MEM 0x00000001
|
||||
/// Does the bootloader provide a list of modules?
|
||||
#define MULTIBOOT_INFO_MODS 0x00000008
|
||||
/* is there a full memory map? */
|
||||
/// Does the bootloader provide a full memory map?
|
||||
#define MULTIBOOT_INFO_MEM_MAP 0x00000040
|
||||
|
||||
typedef uint16_t multiboot_uint16_t;
|
||||
|
@ -114,7 +116,6 @@ struct multiboot_info
|
|||
multiboot_uint16_t vbe_interface_off;
|
||||
multiboot_uint16_t vbe_interface_len;
|
||||
};
|
||||
|
||||
typedef struct multiboot_info multiboot_info_t;
|
||||
|
||||
struct multiboot_mmap_entry
|
||||
|
|
|
@ -31,49 +31,62 @@
|
|||
#include <metalsvm/stddef.h>
|
||||
#include <metalsvm/stdlib.h>
|
||||
|
||||
#define _PAGE_BIT_PRESENT 0 /* is present */
|
||||
#define _PAGE_BIT_RW 1 /* writeable */
|
||||
#define _PAGE_BIT_USER 2 /* userspace addressable */
|
||||
#define _PAGE_BIT_PWT 3 /* page write through */
|
||||
#define _PAGE_BIT_PCD 4 /* page cache disabled */
|
||||
#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
|
||||
#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
|
||||
#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
|
||||
#define _PAGE_BIT_PAT 7 /* on 4KB pages */
|
||||
#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
|
||||
#define _PAGE_BIT_SVM_STRONG 9 /* mark a virtual address range as used by the SVM system */
|
||||
#define _PAGE_BIT_SVM_LAZYRELEASE 10 /* mark a virtual address range as used by the SVM system */
|
||||
#define _PAGE_BIT_SVM_INIT 11 /* mark if the MBP proxy is used */
|
||||
// 4KB pages
|
||||
#define PAGE_SHIFT 12
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define PAGE_MAP_LEVELS 2
|
||||
#define PAGE_MAP_SHIFT 10
|
||||
#elif defined(CONFIG_X86_64)
|
||||
#define PAGE_MAP_LEVELS 4
|
||||
#define PAGE_MAP_SHIFT 9
|
||||
#endif
|
||||
|
||||
// base addresses of page map structures
|
||||
#ifdef CONFIG_X86_32
|
||||
#define PAGE_PGD 0xFFFFF000
|
||||
#define PAGE_PGT 0xFFC00000
|
||||
#elif defined(CONFIG_X86_64)
|
||||
#define PAGE_PML4 0xFFFFFFFFFFFFF000
|
||||
#define PAGE_PDPT 0xFFFFFFFFFFE00000
|
||||
#define PAGE_PGD 0xFFFFFFFFC0000000
|
||||
#define PAGE_PGT 0xFFFFFF8000000000
|
||||
#endif
|
||||
|
||||
#define PAGE_MAP_ENTRIES (1 << PAGE_MAP_SHIFT)
|
||||
#define PAGE_SIZE (1 << PAGE_SHIFT)
|
||||
#define PAGE_MASK ~(PAGE_SIZE - 1)
|
||||
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
|
||||
|
||||
/// Page is present
|
||||
#define PG_PRESENT (1 << _PAGE_BIT_PRESENT)
|
||||
#define PG_PRESENT (1 << 0)
|
||||
/// Page is read- and writable
|
||||
#define PG_RW (1 << _PAGE_BIT_RW)
|
||||
#define PG_RW (1 << 1)
|
||||
/// Page is addressable from userspace
|
||||
#define PG_USER (1 << _PAGE_BIT_USER)
|
||||
#define PG_USER (1 << 2)
|
||||
/// Page write through is activated
|
||||
#define PG_PWT (1 << _PAGE_BIT_PWT)
|
||||
#define PG_PWT (1 << 3)
|
||||
/// Page cache is disabled
|
||||
#define PG_PCD (1 << _PAGE_BIT_PCD)
|
||||
#define PG_PCD (1 << 4)
|
||||
/// Page was recently accessed (set by CPU)
|
||||
#define PG_ACCESSED (1 << _PAGE_BIT_ACCESSED)
|
||||
#define PG_ACCESSED (1 << 5)
|
||||
/// Page is dirty due to recentwrite-access (set by CPU)
|
||||
#define PG_DIRTY (1 << _PAGE_BIT_DIRTY)
|
||||
#define PG_DIRTY (1 << 6)
|
||||
/// Big page: 4MB (or 2MB)
|
||||
#define PG_PSE (1 << _PAGE_BIT_PSE)
|
||||
#define PG_PSE (1 << 7)
|
||||
/// Page is part of the MPB (SCC specific entry)
|
||||
#define PG_MPE PG_PSE
|
||||
/// Global TLB entry (Pentium Pro and later)
|
||||
#define PG_GLOBAL (1 << _PAGE_BIT_GLOBAL)
|
||||
#define PG_GLOBAL (1 << 8)
|
||||
/// Pattern flag
|
||||
#define PG_PAT (1 << _PAGE_BIT_PAT)
|
||||
#define PG_PAT (1 << 7)
|
||||
/// This virtual address range is used by SVM system as marked
|
||||
#define PG_SVM PG_SVM_STRONG
|
||||
#define PG_SVM_STRONG (1 << _PAGE_BIT_SVM_STRONG)
|
||||
#define PG_SVM (1 << 9)
|
||||
#define PG_SVM_STRONG PG_SVM_STRONG
|
||||
/// This virtual address range is used by SVM system as marked
|
||||
#define PG_SVM_LAZYRELEASE (1 << _PAGE_BIT_SVM_LAZYRELEASE)
|
||||
#define PG_SVM_LAZYRELEASE (1 << 10)
|
||||
/// Currently, no page frame is behind this page (only the MBP proxy)
|
||||
#define PG_SVM_INIT (1 << _PAGE_BIT_SVM_INIT)
|
||||
#define PG_SVM_INIT (1 << 11)
|
||||
|
||||
/// This is a whole set of flags (PRESENT,RW,ACCESSED,DIRTY) for kernelspace tables
|
||||
#define KERN_TABLE (PG_PRESENT|PG_RW|PG_ACCESSED|PG_DIRTY)
|
||||
|
@ -84,33 +97,14 @@
|
|||
/// This is a whole set of flags (PRESENT,RW,USER) for userspace pages
|
||||
#define USER_PAGE (PG_PRESENT|PG_RW|PG_USER)
|
||||
|
||||
#if __SIZEOF_POINTER__ == 4
|
||||
#define PGT_ENTRIES 1024
|
||||
#elif __SIZEOF_POINTER__ == 8
|
||||
#define PGT_ENTRIES 512
|
||||
#endif
|
||||
|
||||
/** @brief Page table structure
|
||||
/** @brief General page map structure
|
||||
*
|
||||
* This structure keeps page table entries.\n
|
||||
* On a 32bit system, a page table consists normally of 1024 entries.
|
||||
* This page map structure is a general type for all indirecton levels.\n
|
||||
* As all page map levels containing the same amount of entries.
|
||||
*/
|
||||
typedef struct page_table
|
||||
{
|
||||
/// Page table entries are unsigned 32bit integers.
|
||||
size_t entries[PGT_ENTRIES];
|
||||
} page_table_t __attribute__ ((aligned (4096)));
|
||||
|
||||
/** @brief Page directory structure
|
||||
*
|
||||
* This structure keeps page directory entries.\
|
||||
* On a 32bit system, a page directory consists normally of 1024 entries.
|
||||
*/
|
||||
typedef struct page_dir
|
||||
{
|
||||
/// Page dir entries are unsigned 32bit integers.
|
||||
size_t entries[PGT_ENTRIES];
|
||||
} page_dir_t __attribute__ ((aligned (4096)));
|
||||
typedef struct page_map {
|
||||
size_t entries[PAGE_MAP_ENTRIES];
|
||||
} __attribute__ ((aligned (4096))) page_map_t;
|
||||
|
||||
/** @brief Converts a virtual address to a physical
|
||||
*
|
||||
|
@ -192,7 +186,7 @@ int arch_paging_init(void);
|
|||
*
|
||||
* @return Returns the address of the boot task's page dir array.
|
||||
*/
|
||||
page_dir_t* get_boot_pgd(void);
|
||||
page_map_t* get_boot_page_map(void);
|
||||
|
||||
/** @brief Setup a new page directory for a new user-level task
|
||||
*
|
||||
|
@ -203,18 +197,18 @@ page_dir_t* get_boot_pgd(void);
|
|||
* - counter of allocated page tables
|
||||
* - -ENOMEM (-12) on failure
|
||||
*/
|
||||
int create_pgd(task_t* task, int copy);
|
||||
int create_page_map(task_t* task, int copy);
|
||||
|
||||
/** @brief Delete page directory and its page tables
|
||||
/** @brief Delete all page map structures of the current task
|
||||
*
|
||||
* Puts page tables and page directory back to buffer and
|
||||
* sets the task's page directory pointer to NULL
|
||||
* Puts PML4, PDPT, PGD, PGT tables back to buffer and
|
||||
* sets the task's page map pointer to NULL
|
||||
*
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure (in case PGD is still the boot-pgd).
|
||||
*/
|
||||
int drop_pgd(void);
|
||||
int drop_page_map(void);
|
||||
|
||||
/** @brief Change the page permission in the page tables of the current task
|
||||
*
|
||||
|
|
|
@ -273,7 +273,7 @@ int ipi_tlb_flush(void);
|
|||
/** @brief Flush a specific page entry in TLB
|
||||
* @param addr The (virtual) address of the page to flush
|
||||
*/
|
||||
static inline void tlb_flush_one_page(uint32_t addr)
|
||||
static inline void tlb_flush_one_page(size_t addr)
|
||||
{
|
||||
asm volatile("invlpg (%0)" : : "r"(addr) : "memory");
|
||||
#if MAX_CORES > 1
|
||||
|
@ -293,7 +293,7 @@ static inline void tlb_flush_one_page(uint32_t addr)
|
|||
*/
|
||||
static inline void tlb_flush(void)
|
||||
{
|
||||
uint32_t val = read_cr3();
|
||||
size_t val = read_cr3();
|
||||
|
||||
if (val)
|
||||
write_cr3(val);
|
||||
|
|
|
@ -387,12 +387,14 @@ void smp_start(uint32_t id)
|
|||
|
||||
kprintf("Application processor %d is entering its idle task\n", apic_cpu_id());
|
||||
|
||||
// initialize default cpu features
|
||||
#ifdef CONFIG_X86_32
|
||||
// initialization for x86_64 is done in smp_entry()
|
||||
cpu_init();
|
||||
#endif
|
||||
|
||||
// use the same gdt like the boot processors
|
||||
gdt_flush();
|
||||
|
||||
|
||||
// install IDT
|
||||
idt_install();
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
SECTION .mboot
|
||||
global start
|
||||
start:
|
||||
mov byte [msg], 'H'
|
||||
jmp stublet
|
||||
|
||||
; This part MUST be 4byte aligned, so we solve that issue using 'ALIGN 4'
|
||||
|
@ -38,10 +37,10 @@ mboot:
|
|||
; Multiboot macros to make a few lines more readable later
|
||||
MULTIBOOT_PAGE_ALIGN equ 1<<0
|
||||
MULTIBOOT_MEMORY_INFO equ 1<<1
|
||||
; MULTIBOOT_AOUT_KLUDGE equ 1<<16
|
||||
; MULTIBOOT_AOUT_KLUDGE equ 1<<16
|
||||
MULTIBOOT_HEADER_MAGIC equ 0x1BADB002
|
||||
MULTIBOOT_HEADER_FLAGS equ MULTIBOOT_PAGE_ALIGN | MULTIBOOT_MEMORY_INFO ; | MULTIBOOT_AOUT_KLUDGE
|
||||
MULTIBOOT_CHECKSUM equ -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
|
||||
MULTIBOOT_CHECKSUM equ -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
|
||||
EXTERN code, bss, end
|
||||
|
||||
; This is the GRUB Multiboot header. A boot signature
|
||||
|
@ -49,8 +48,6 @@ mboot:
|
|||
dd MULTIBOOT_HEADER_FLAGS
|
||||
dd MULTIBOOT_CHECKSUM
|
||||
|
||||
msg db "?ello from MetalSVM kernel!!", 0
|
||||
|
||||
SECTION .text
|
||||
ALIGN 4
|
||||
stublet:
|
||||
|
@ -70,7 +67,7 @@ stublet:
|
|||
; jump to the boot processors's C code
|
||||
extern main
|
||||
call main
|
||||
jmp $
|
||||
jmp $ ; infinitive loop
|
||||
|
||||
global cpu_init
|
||||
cpu_init:
|
||||
|
@ -112,7 +109,7 @@ global read_ip
|
|||
read_ip:
|
||||
mov eax, [esp+4]
|
||||
pop DWORD [eax] ; Get the return address
|
||||
add esp, 4 ; Dirty Hack! read_ip cleanup the stacl
|
||||
add esp, 4 ; Dirty Hack! read_ip cleanup the stack
|
||||
jmp [eax] ; Return. Can't use RET because return
|
||||
; address popped off the stack.
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ extern kernel_end
|
|||
extern apic_mp
|
||||
|
||||
; We use a special name to map this section at the begin of our kernel
|
||||
; => Multiboot needs its magic number at the begin of the kernel
|
||||
; => Multiboot needs its magic number at the beginning of the kernel
|
||||
SECTION .mboot
|
||||
global start
|
||||
start:
|
||||
|
@ -42,19 +42,19 @@ mboot:
|
|||
; Multiboot macros to make a few lines more readable later
|
||||
MULTIBOOT_PAGE_ALIGN equ 1<<0
|
||||
MULTIBOOT_MEMORY_INFO equ 1<<1
|
||||
; MULTIBOOT_AOUT_KLUDGE equ 1<<16
|
||||
; MULTIBOOT_AOUT_KLUDGE equ 1<<16
|
||||
MULTIBOOT_HEADER_MAGIC equ 0x1BADB002
|
||||
MULTIBOOT_HEADER_FLAGS equ MULTIBOOT_PAGE_ALIGN | MULTIBOOT_MEMORY_INFO ; | MULTIBOOT_AOUT_KLUDGE
|
||||
MULTIBOOT_CHECKSUM equ -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
|
||||
MULTIBOOT_CHECKSUM equ -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS)
|
||||
EXTERN code, bss, end
|
||||
|
||||
; This is the GRUB Multiboot header. A boot signature
|
||||
dd MULTIBOOT_HEADER_MAGIC
|
||||
dd MULTIBOOT_HEADER_FLAGS
|
||||
dd MULTIBOOT_CHECKSUM
|
||||
|
||||
|
||||
ALIGN 4
|
||||
; we need already a valid GDT to switch in the 64bit modus
|
||||
; we need already a valid GDT to switch in the 64bit mode
|
||||
GDT64: ; Global Descriptor Table (64-bit).
|
||||
.Null: equ $ - GDT64 ; The null descriptor.
|
||||
dw 0 ; Limit (low).
|
||||
|
@ -81,47 +81,39 @@ GDT64: ; Global Descriptor Table (64-bit).
|
|||
dw $ - GDT64 - 1 ; Limit.
|
||||
dq GDT64 ; Base.
|
||||
|
||||
times 256 DD 0
|
||||
times 256 DD 0 ; stack for booting
|
||||
startup_stack:
|
||||
|
||||
SECTION .data
|
||||
; create default page tables for the 64bit kernel
|
||||
global boot_pgd ; aka PML4
|
||||
global boot_pml4
|
||||
ALIGN 4096 ; of course, the page tables have to be page aligned
|
||||
NOPTS equ 512
|
||||
boot_pgd times 512 DQ 0
|
||||
boot_pdpt times 512 DQ 0
|
||||
boot_pd times 512 DQ 0
|
||||
boot_pt times (NOPTS*512) DQ 0
|
||||
|
||||
PAGE_MAP_ENTRIES equ (1<<9)
|
||||
PAGE_SIZE equ (1<<12)
|
||||
|
||||
boot_pml4 times PAGE_MAP_ENTRIES DQ 0
|
||||
boot_pdpt times PAGE_MAP_ENTRIES DQ 0
|
||||
boot_pgd times PAGE_MAP_ENTRIES DQ 0
|
||||
boot_pgt times (KERNEL_SPACE/PAGE_SIZE) DQ 0
|
||||
|
||||
SECTION .text
|
||||
ALIGN 8
|
||||
%if MAX_CORES > 1
|
||||
global smp_entry
|
||||
smp_entry:
|
||||
; enable caching, disable paging and fpu emulation
|
||||
and eax, 0x1ffffffb
|
||||
; ...and turn on FPU exceptions
|
||||
or eax, 0x22
|
||||
mov cr0, eax
|
||||
; clears the current pgd entry
|
||||
xor eax, eax
|
||||
mov cr3, eax
|
||||
; at this stage, we disable the SSE support
|
||||
mov eax, cr4
|
||||
and eax, 0xfffbf9ff
|
||||
mov cr4, eax
|
||||
|
||||
; initialize page table
|
||||
mov edi, boot_pgd
|
||||
; initialize cpu features
|
||||
call cpu_init
|
||||
; initialize cr3 register
|
||||
mov edi, boot_pml4
|
||||
mov cr3, edi
|
||||
|
||||
; we need to enable PAE modus
|
||||
; enable PAE
|
||||
mov eax, cr4
|
||||
or eax, 1 << 5
|
||||
mov cr4, eax
|
||||
|
||||
; switch to the compatibility mode (which is part of long mode)
|
||||
; enable longmode (compatibility mode)
|
||||
mov ecx, 0xC0000080
|
||||
rdmsr
|
||||
or eax, 1 << 8
|
||||
|
@ -129,9 +121,10 @@ smp_entry:
|
|||
|
||||
; enable paging
|
||||
mov eax, cr0
|
||||
or eax, 1 << 31 | 1 << 0 ; Set the PG-bit, which is the 31nd bit, and the PM-bit, which is the 0th bit.
|
||||
mov cr0, eax
|
||||
or eax, 1 << 31 | 1 << 0 ; Set the PG-bit, which is the 31nd bit, and the PE-bit, which is the 0th bit.
|
||||
mov cr0, eax ; According to the multiboot spec the PE-bit has to be set by bootloader already!
|
||||
|
||||
; jump to 64-bit longmode
|
||||
mov edi, [esp+4] ; set argumet for smp_start
|
||||
lgdt [GDT64.Pointer] ; Load the 64-bit global descriptor table.
|
||||
jmp GDT64.Code:smp_start64 ; Set the code segment and enter 64-bit long mode.
|
||||
|
@ -139,54 +132,39 @@ smp_entry:
|
|||
jmp $ ; endless loop
|
||||
%endif
|
||||
|
||||
search_apic:
|
||||
; search MP Floating Pointer Structure
|
||||
search_mps:
|
||||
push ebp
|
||||
mov ebp, esp
|
||||
push ecx
|
||||
|
||||
xor eax, eax
|
||||
mov ecx, [ebp+8]
|
||||
L1:
|
||||
.l1:
|
||||
cmp [ecx], DWORD 0x5f504d5f ; MP_FLT_SIGNATURE
|
||||
jne L2
|
||||
jne .l2
|
||||
mov al, BYTE [ecx+9]
|
||||
cmp eax, 4
|
||||
ja L2
|
||||
ja .l2
|
||||
mov al, BYTE [ecx+11]
|
||||
cmp eax, 0
|
||||
jne L2
|
||||
jne .l2
|
||||
mov eax, ecx
|
||||
jmp L3
|
||||
jmp .l3
|
||||
|
||||
L2:
|
||||
.l2:
|
||||
add ecx, 4
|
||||
cmp ecx, [ebp+12]
|
||||
jb L1
|
||||
jb .l1
|
||||
xor eax, eax
|
||||
|
||||
L3:
|
||||
.l3:
|
||||
pop ecx
|
||||
pop ebp
|
||||
ret
|
||||
|
||||
ALIGN 4
|
||||
stublet:
|
||||
mov esp, startup_stack-4
|
||||
push ebx ; save pointer to the multiboot structure
|
||||
mov eax, cr0
|
||||
; enable caching, disable paging and fpu emulation
|
||||
and eax, 0x1ffffffb
|
||||
; ...and turn on FPU exceptions
|
||||
or eax, 0x22
|
||||
mov cr0, eax
|
||||
; clears the current pgd entry
|
||||
xor eax, eax
|
||||
mov cr3, eax
|
||||
; at this stage, we disable the SSE support
|
||||
mov eax, cr4
|
||||
and eax, 0xfffbf9ff
|
||||
mov cr4, eax
|
||||
; do we have the instruction cpuid?
|
||||
check_longmode:
|
||||
; check for cpuid instruction
|
||||
pushfd
|
||||
pop eax
|
||||
mov ecx, eax
|
||||
|
@ -198,59 +176,22 @@ stublet:
|
|||
push ecx
|
||||
popfd
|
||||
xor eax, ecx
|
||||
jz Linvalid
|
||||
; cpuid > 0x80000000?
|
||||
jz .unsupported
|
||||
; check for extended cpu features (cpuid > 0x80000000)
|
||||
mov eax, 0x80000000
|
||||
cpuid
|
||||
cmp eax, 0x80000001
|
||||
jb Linvalid ; It is less, there is no long mode.
|
||||
; do we have a long mode?
|
||||
jb .unsupported ; It is less, there is no long mode.
|
||||
; check if longmode is supported
|
||||
mov eax, 0x80000001
|
||||
cpuid
|
||||
test edx, 1 << 29 ; Test if the LM-bit, which is bit 29, is set in the D-register.
|
||||
jz Linvalid ; They aren't, there is no long mode.
|
||||
jz .unsupported ; They aren't, there is no long mode.
|
||||
ret
|
||||
.unsupported:
|
||||
jmp $
|
||||
|
||||
; initialize page table
|
||||
mov edi, boot_pgd
|
||||
mov cr3, edi
|
||||
|
||||
; So lets make PML4T[0] point to the PDPT and so on:
|
||||
mov DWORD [edi], boot_pdpt ; Set the double word at the destination index to pdpt.
|
||||
or DWORD [edi], 0x00000003 ; Set present and writeable bit
|
||||
mov edi, boot_pdpt
|
||||
mov DWORD [edi], boot_pd ; Set the double word at the destination index to pd.
|
||||
or DWORD [edi], 0x00000003 ; Set present and writeable bit
|
||||
mov edi, boot_pd
|
||||
mov ebx, boot_pt
|
||||
mov ecx, NOPTS
|
||||
L0:
|
||||
mov DWORD [edi], ebx ; Set the double word at the destination index to pt.
|
||||
or DWORD [edi], 0x00000003 ; Set present and writeable bit
|
||||
add edi, 8
|
||||
add ebx, 0x1000
|
||||
loop L0
|
||||
|
||||
%ifdef CONFIG_VGA
|
||||
; map the VGA address into the virtual address space
|
||||
mov edi, 0xB8000
|
||||
shr edi, 9 ; (edi >> 12) * 8
|
||||
add edi, boot_pt
|
||||
mov ebx, 0xB8000
|
||||
or ebx, 0x00000013
|
||||
mov DWORD [edi], ebx
|
||||
%endif
|
||||
|
||||
; map multiboot structure into the virtual address space
|
||||
mov edi, [esp]
|
||||
and edi, 0xFFFFF000
|
||||
shr edi, 9 ; (edi >> 12) * 8
|
||||
add edi, boot_pt
|
||||
mov ebx, [esp]
|
||||
and ebx, 0xFFFFF000
|
||||
or ebx, 0x00000003
|
||||
mov DWORD [edi], ebx
|
||||
|
||||
; check if lapic is available
|
||||
check_lapic:
|
||||
push eax
|
||||
push ebx
|
||||
push ecx
|
||||
|
@ -259,82 +200,149 @@ L0:
|
|||
cpuid
|
||||
and edx, 0x200
|
||||
cmp edx, 0
|
||||
je no_lapic
|
||||
; map lapic at 0xFEE00000 below the kernel
|
||||
je .unsupported
|
||||
; map lapic at 0xFEE00000 below the kernel
|
||||
mov edi, kernel_start - 0x1000
|
||||
shr edi, 9 ; (edi >> 12) * 8
|
||||
add edi, boot_pt
|
||||
mov ebx, 0xFEE00000
|
||||
add edi, boot_pgt
|
||||
mov ebx, 0xFEE00000 ; LAPIC base address
|
||||
or ebx, 0x00000013
|
||||
mov DWORD [edi], ebx
|
||||
no_lapic:
|
||||
.unsupported:
|
||||
pop edx
|
||||
pop ecx
|
||||
pop ebx
|
||||
pop eax
|
||||
ret
|
||||
|
||||
; search APIC
|
||||
cpu_init:
|
||||
mov eax, cr0
|
||||
; enable caching, disable paging and fpu emulation
|
||||
and eax, 0x1ffffffb
|
||||
; ...and turn on FPU exceptions
|
||||
or eax, 0x22
|
||||
mov cr0, eax
|
||||
; clears the current pgd entry
|
||||
xor eax, eax
|
||||
mov cr3, eax
|
||||
; at this stage, we disable the SSE support
|
||||
mov eax, cr4
|
||||
and eax, 0xfffbf9ff
|
||||
mov cr4, eax
|
||||
ret
|
||||
|
||||
; identity map a single page at address eax
|
||||
identity_page:
|
||||
push edi
|
||||
push ebx
|
||||
|
||||
and eax, 0xFFFFF000
|
||||
mov edi, eax
|
||||
shr edi, 9 ; (edi >> 12) * 8 (index for boot_pgt)
|
||||
add edi, boot_pgt
|
||||
mov ebx, eax
|
||||
or ebx, 0x13 ; set present, writable and cache disable bits
|
||||
mov DWORD [edi], ebx
|
||||
|
||||
pop ebx
|
||||
pop edi
|
||||
ret
|
||||
|
||||
ALIGN 4
|
||||
stublet:
|
||||
mov esp, startup_stack-4
|
||||
; save pointer to the Multiboot structure
|
||||
push ebx
|
||||
; initialize cpu features
|
||||
call cpu_init
|
||||
; check if longmode is supported
|
||||
call check_longmode
|
||||
; check if lapic is available
|
||||
call check_lapic
|
||||
|
||||
; find MP Floating Pointer structure
|
||||
push DWORD 0x100000
|
||||
push DWORD 0xF0000
|
||||
call search_apic
|
||||
call search_mps
|
||||
add esp, 8
|
||||
|
||||
cmp eax, 0
|
||||
jne La
|
||||
jne map_mps
|
||||
|
||||
push DWORD 0xA0000
|
||||
push DWORD 0x9F000
|
||||
call search_apic
|
||||
call search_mps
|
||||
add esp, 8
|
||||
|
||||
|
||||
cmp eax, 0
|
||||
je Lb
|
||||
je map_kernel
|
||||
|
||||
La:
|
||||
; map MP Floating Pointer Structure
|
||||
map_mps:
|
||||
; map MP Floating Pointer structure
|
||||
mov DWORD [apic_mp], eax
|
||||
mov edi, eax
|
||||
and edi, 0xFFFFF000
|
||||
shr edi, 9 ; (edi >> 12) * 8
|
||||
add edi, boot_pt
|
||||
mov ebx, eax
|
||||
and ebx, 0xFFFFF000
|
||||
or ebx, 0x00000013
|
||||
mov DWORD [edi], ebx
|
||||
call identity_page
|
||||
|
||||
; map mp_config
|
||||
mov edi, [eax+4]
|
||||
and edi, 0xFFFFF000
|
||||
shr edi, 9 ; (edi >> 12) * 8
|
||||
add edi, boot_pt
|
||||
mov ebx, [eax+4]
|
||||
and ebx, 0xFFFFF000
|
||||
or ebx, 0x00000013
|
||||
mov DWORD [edi], ebx
|
||||
; map MP Configuration table
|
||||
mov eax, [apic_mp+4]
|
||||
call identity_page
|
||||
|
||||
Lb:
|
||||
%ifdef CONFIG_VGA
|
||||
; map VGA textmode plane
|
||||
mov eax, 0xB8000
|
||||
call identity_page
|
||||
%endif
|
||||
|
||||
; map Multiboot structure
|
||||
mov eax, [esp] ; pointer is still on the stack
|
||||
call identity_page
|
||||
|
||||
map_kernel:
|
||||
mov edi, kernel_start
|
||||
shr edi, 9 ; (kernel_start >> 12) * 8
|
||||
add edi, boot_pt
|
||||
shr edi, 9 ; (edi >> 12) * 8 (index for boot_pgt)
|
||||
add edi, boot_pgt
|
||||
mov ebx, kernel_start
|
||||
or ebx, 0x00000003
|
||||
or ebx, 0x00000003 ; set present and writable flags
|
||||
mov ecx, kernel_end ; determine kernel size in number of pages
|
||||
sub ecx, kernel_start
|
||||
shr ecx, 12
|
||||
inc ecx
|
||||
|
||||
Lc:
|
||||
mov DWORD [edi], ebx ; Set the double word at the destination index to the B-register.
|
||||
.l1:
|
||||
mov DWORD [edi], ebx
|
||||
add edi, 8
|
||||
add ebx, 0x1000
|
||||
loop Lc
|
||||
loop .l1
|
||||
|
||||
; we need to enable PAE modus
|
||||
init_paging:
|
||||
mov edi, boot_pml4
|
||||
mov cr3, edi
|
||||
|
||||
mov DWORD [edi], boot_pdpt
|
||||
or DWORD [edi], 0x07 ; set present, user and writable flags
|
||||
|
||||
add edi, (PAGE_MAP_ENTRIES-1)*8 ; setup recursive paging
|
||||
mov DWORD [edi], boot_pml4 ; boot_pml4[511] -> boot_pml4
|
||||
or DWORD [edi], 0x03 ; set present and writable flags
|
||||
|
||||
mov edi, boot_pdpt
|
||||
mov DWORD [edi], boot_pgd
|
||||
or DWORD [edi], 0x03 ; set present and writable flags
|
||||
|
||||
mov edi, boot_pgd
|
||||
mov ebx, boot_pgt
|
||||
mov ecx, PAGE_MAP_ENTRIES ; map all boot_pgt to the kernel space
|
||||
.l1:
|
||||
mov DWORD [edi], ebx
|
||||
or DWORD [edi], 0x03 ; set present and writable flags
|
||||
add edi, 8
|
||||
add ebx, 0x1000
|
||||
loop .l1
|
||||
|
||||
; enable PAE
|
||||
mov eax, cr4
|
||||
or eax, 1 << 5
|
||||
mov cr4, eax
|
||||
|
||||
; switch to the compatibility mode (which is part of long mode)
|
||||
; enable longmode (compatibility mode)
|
||||
mov ecx, 0xC0000080
|
||||
rdmsr
|
||||
or eax, 1 << 8
|
||||
|
@ -342,16 +350,14 @@ Lc:
|
|||
|
||||
; enable paging
|
||||
mov eax, cr0
|
||||
or eax, 1 << 31 | 1 << 0 ; Set the PG-bit, which is the 31nd bit, and the PM-bit, which is the 0th bit.
|
||||
or eax, 1 << 31 | 1 << 0 ; Set the PG-bit, which is the 31nd bit, and the PE-bit, which is the 0th bit.
|
||||
mov cr0, eax
|
||||
|
||||
; jump to 64-bit longmode
|
||||
pop ebx ; restore pointer to multiboot structure
|
||||
lgdt [GDT64.Pointer] ; Load the 64-bit global descriptor table.
|
||||
jmp GDT64.Code:start64 ; Set the code segment and enter 64-bit long mode.
|
||||
|
||||
Linvalid:
|
||||
jmp $
|
||||
|
||||
[BITS 64]
|
||||
start64:
|
||||
; initialize segment registers
|
||||
|
@ -389,23 +395,6 @@ smp_start64:
|
|||
jmp $
|
||||
%endif
|
||||
|
||||
global cpu_init
|
||||
cpu_init:
|
||||
; mov eax, cr0
|
||||
; enable caching, disable paging and fpu emulation
|
||||
; and eax, 0x1ffffffb
|
||||
; ...and turn on FPU exceptions
|
||||
; or eax, 0x22
|
||||
; mov cr0, eax
|
||||
; clears the current pgd entry
|
||||
; xor eax, eax
|
||||
; mov cr3, eax
|
||||
; at this stage, we disable the SSE support
|
||||
; mov eax, cr4
|
||||
; and eax, 0xfffbf9ff
|
||||
; mov cr4, eax
|
||||
; ret
|
||||
|
||||
; This will set up our new segment registers and is declared in
|
||||
; C as 'extern void gdt_flush();'
|
||||
global gdt_flush
|
||||
|
|
|
@ -50,7 +50,7 @@ size_t* get_current_stack(void)
|
|||
#endif
|
||||
|
||||
// use new page table
|
||||
write_cr3(virt_to_phys((size_t)curr_task->pgd));
|
||||
write_cr3(virt_to_phys((size_t)curr_task->page_map));
|
||||
|
||||
return curr_task->last_stack_pointer;
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ void kb_init(size_t size, tid_t tid) {
|
|||
}
|
||||
|
||||
void kb_finish(void) {
|
||||
kfree(kb_buffer.buffer, (kb_buffer.maxsize * sizeof(char)));
|
||||
kfree(kb_buffer.buffer);
|
||||
kb_buffer.buffer = NULL;
|
||||
kb_buffer.size = 0;
|
||||
kb_buffer.maxsize = 0;
|
||||
|
|
|
@ -42,38 +42,4 @@ L3:
|
|||
pop rax
|
||||
ret
|
||||
|
||||
%if 0
|
||||
; The following function is derived from JamesM's kernel development tutorials
|
||||
; (http://www.jamesmolloy.co.uk/tutorial_html/)
|
||||
global copy_page_physical
|
||||
copy_page_physical:
|
||||
push esi ; According to __cdecl, we must preserve the contents of ESI
|
||||
push edi ; and EDI.
|
||||
pushf ; push EFLAGS, so we can pop it and reenable interrupts
|
||||
; later, if they were enabled anyway.
|
||||
cli ; Disable interrupts, so we aren't interrupted.
|
||||
; Load these in BEFORE we disable paging!
|
||||
|
||||
mov edi, [esp+12+4] ; Destination address
|
||||
mov esi, [esp+12+8] ; Source address
|
||||
|
||||
mov edx, cr0 ; Get the control register...
|
||||
and edx, 0x7fffffff ; and...
|
||||
mov cr0, edx ; Disable paging.
|
||||
|
||||
cld
|
||||
mov ecx, 0x400 ; 1024*4bytes = 4096 bytes = page size
|
||||
rep movsd ; copy page
|
||||
|
||||
mov edx, cr0 ; Get the control register again
|
||||
or edx, 0x80000000 ; and...
|
||||
mov cr0, edx ; Enable paging.
|
||||
|
||||
popf ; Pop EFLAGS back.
|
||||
pop edi ; Get the original value of EDI
|
||||
pop esi ; and ESI back.
|
||||
ret
|
||||
|
||||
%endif
|
||||
|
||||
SECTION .note.GNU-stack noalloc noexec nowrite progbits
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
* 0x00100000 - 0x0DEADFFF: Kernel (size depends on the configuration) (221MB)
|
||||
* 0x0DEAE000 - 0x3FFFEFFF: Kernel heap (801MB)
|
||||
* 0x3FFFF000 - 0x3FFFFFFF: Page Tables are mapped in this region (4KB)
|
||||
* (The last 256 entries belongs to kernel space)
|
||||
* (The first 256 entries belongs to kernel space)
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -57,13 +57,14 @@ extern const void kernel_start;
|
|||
extern const void kernel_end;
|
||||
|
||||
// boot task's page directory and page directory lock
|
||||
static page_dir_t boot_pgd = {{[0 ... PGT_ENTRIES-1] = 0}};
|
||||
static page_table_t pgt_container = {{[0 ... PGT_ENTRIES-1] = 0}};
|
||||
static page_table_t boot_pgt[KERNEL_SPACE/(1024*PAGE_SIZE)];
|
||||
static page_map_t boot_pgd = {{[0 ... MAP_ENTRIES-1] = 0}};
|
||||
static page_map_t boot_pgt[KERNEL_SPACE/(MAP_ENTRIES*PAGE_SIZE)];
|
||||
static page_map_t pgt_container = {{[0 ... MAP_ENTRIES-1] = 0}};
|
||||
|
||||
static spinlock_t kslock = SPINLOCK_INIT;
|
||||
static int paging_enabled = 0;
|
||||
|
||||
page_dir_t* get_boot_pgd(void)
|
||||
page_map_t* get_boot_page_map(void)
|
||||
{
|
||||
return &boot_pgd;
|
||||
}
|
||||
|
@ -71,26 +72,26 @@ page_dir_t* get_boot_pgd(void)
|
|||
/*
|
||||
* TODO: We create a full copy of the current task. Copy-On-Access will be the better solution.
|
||||
*
|
||||
* No PGD locking is needed because onls create_pgd use this function and holds already the
|
||||
* No PGD locking is needed because only create_page_map use this function and holds already the
|
||||
* PGD lock.
|
||||
*/
|
||||
inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_table_t* pgt, int* counter)
|
||||
inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_map_t* pgt, int* counter)
|
||||
{
|
||||
uint32_t i;
|
||||
page_table_t* new_pgt;
|
||||
page_map_t* new_pgt;
|
||||
size_t phyaddr;
|
||||
|
||||
if (BUILTIN_EXPECT(!pgt, 0))
|
||||
return 0;
|
||||
|
||||
new_pgt = kmalloc(sizeof(page_table_t));
|
||||
new_pgt = kmalloc(sizeof(page_map_t));
|
||||
if (!new_pgt)
|
||||
return 0;
|
||||
memset(new_pgt, 0x00, sizeof(page_table_t));
|
||||
memset(new_pgt, 0x00, sizeof(page_map_t));
|
||||
if (counter)
|
||||
(*counter)++;
|
||||
|
||||
for(i=0; i<PGT_ENTRIES; i++) {
|
||||
for(i=0; i<MAP_ENTRIES; i++) {
|
||||
if (pgt->entries[i] & PAGE_MASK) {
|
||||
if (!(pgt->entries[i] & PG_USER)) {
|
||||
// Kernel page => copy only page entries
|
||||
|
@ -117,11 +118,11 @@ inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_tabl
|
|||
return phyaddr;
|
||||
}
|
||||
|
||||
int create_pgd(task_t* task, int copy)
|
||||
int create_page_map(task_t* task, int copy)
|
||||
{
|
||||
page_dir_t* pgd;
|
||||
page_table_t* pgt;
|
||||
page_table_t* pgt_container;
|
||||
page_map_t* pgd;
|
||||
page_map_t* pgt;
|
||||
page_map_t* pgt_container;
|
||||
uint32_t i;
|
||||
uint32_t index1, index2;
|
||||
size_t viraddr, phyaddr;
|
||||
|
@ -133,25 +134,26 @@ int create_pgd(task_t* task, int copy)
|
|||
|
||||
// we already know the virtual address of the "page table container"
|
||||
// (see file header)
|
||||
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
|
||||
pgt_container = (page_map_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
|
||||
|
||||
// create new page directory for the new task
|
||||
pgd = kmalloc(sizeof(page_dir_t));
|
||||
pgd = kmalloc(sizeof(page_map_t));
|
||||
if (!pgd)
|
||||
return -ENOMEM;
|
||||
memset(pgd, 0x00, sizeof(page_dir_t));
|
||||
memset(pgd, 0x00, sizeof(page_map_t));
|
||||
|
||||
// create a new "page table container" for the new task
|
||||
pgt = kmalloc(sizeof(page_table_t));
|
||||
pgt = kmalloc(sizeof(page_map_t));
|
||||
if (!pgt) {
|
||||
kfree(pgd, sizeof(page_dir_t));
|
||||
kfree(pgd, sizeof(page_map_t));
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(pgt, 0x00, sizeof(page_table_t));
|
||||
memset(pgt, 0x00, sizeof(page_map_t));
|
||||
|
||||
// copy kernel tables
|
||||
spinlock_lock(&kslock);
|
||||
|
||||
for(i=0; i<PGT_ENTRIES; i++) {
|
||||
for(i=0; i<MAP_ENTRIES; i++) {
|
||||
pgd->entries[i] = boot_pgd.entries[i];
|
||||
// only kernel entries will be copied
|
||||
if (pgd->entries[i] && !(pgd->entries[i] & PG_USER))
|
||||
|
@ -169,36 +171,33 @@ int create_pgd(task_t* task, int copy)
|
|||
pgd->entries[index1] = ((size_t) virt_to_phys((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
|
||||
pgt->entries[index2] = ((size_t) virt_to_phys((size_t) pgt) & PAGE_MASK)|KERN_PAGE;
|
||||
|
||||
task->pgd = pgd;
|
||||
task->page_map = pgd;
|
||||
|
||||
if (copy) {
|
||||
spinlock_irqsave_lock(&curr_task->pgd_lock);
|
||||
spinlock_irqsave_lock(&curr_task->page_lock);
|
||||
|
||||
for (i=KERNEL_SPACE/(1024*PAGE_SIZE); i<1024; i++) {
|
||||
if (!(curr_task->pgd->entries[i]))
|
||||
if (!(curr_task->page_map->entries[i]))
|
||||
continue;
|
||||
if (!(curr_task->pgd->entries[i] & PG_USER))
|
||||
if (!(curr_task->page_map->entries[i] & PG_USER))
|
||||
continue;
|
||||
|
||||
phyaddr = copy_page_table(task, i, (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + i*PAGE_SIZE) & PAGE_MASK), &counter);
|
||||
phyaddr = copy_page_table(task, i, (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + i*PAGE_SIZE) & PAGE_MASK), &counter);
|
||||
if (phyaddr) {
|
||||
pgd->entries[i] = (phyaddr & PAGE_MASK) | (curr_task->pgd->entries[i] & 0xFFF);
|
||||
pgd->entries[i] = (phyaddr & PAGE_MASK) | (curr_task->page_map->entries[i] & 0xFFF);
|
||||
pgt->entries[i] = (phyaddr & PAGE_MASK) | KERN_PAGE;
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(&curr_task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&curr_task->page_lock);
|
||||
}
|
||||
|
||||
return counter;
|
||||
}
|
||||
|
||||
/*
|
||||
* drops all page frames and the PGD of a user task
|
||||
*/
|
||||
int drop_pgd(void)
|
||||
int drop_page_map(void)
|
||||
{
|
||||
page_dir_t* pgd = per_core(current_task)->pgd;
|
||||
page_map_t* pgd = per_core(current_task)->page_map;
|
||||
size_t phy_pgd = virt_to_phys((size_t) pgd);
|
||||
task_t* task = per_core(current_task);
|
||||
uint32_t i;
|
||||
|
@ -206,9 +205,9 @@ int drop_pgd(void)
|
|||
if (BUILTIN_EXPECT(pgd == &boot_pgd, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
for(i=0; i<PGT_ENTRIES; i++) {
|
||||
for(i=0; i<MAP_ENTRIES; i++) {
|
||||
if (pgd->entries[i] & PG_USER) {
|
||||
put_page(pgd->entries[i] & PAGE_MASK);
|
||||
pgd->entries[i] = 0;
|
||||
|
@ -218,9 +217,9 @@ int drop_pgd(void)
|
|||
// freeing the page directory
|
||||
put_page(phy_pgd);
|
||||
|
||||
task->pgd = NULL;
|
||||
task->page_map = NULL;
|
||||
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -229,24 +228,24 @@ size_t virt_to_phys(size_t viraddr)
|
|||
{
|
||||
task_t* task = per_core(current_task);
|
||||
uint32_t index1, index2;
|
||||
page_table_t* pgt;
|
||||
page_map_t* pgt;
|
||||
size_t ret = 0;
|
||||
|
||||
if (!paging_enabled)
|
||||
return viraddr;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
||||
return 0;
|
||||
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
if (!(task->pgd->entries[index1] & PAGE_MASK))
|
||||
if (!(task->page_map->entries[index1] & PAGE_MASK))
|
||||
goto out;
|
||||
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt || !(pgt->entries[index2]))
|
||||
goto out;
|
||||
|
||||
|
@ -255,7 +254,7 @@ size_t virt_to_phys(size_t viraddr)
|
|||
out:
|
||||
//kprintf("vir %p to phy %p\n", viraddr, ret);
|
||||
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -263,11 +262,11 @@ out:
|
|||
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
page_table_t* pgt;
|
||||
page_map_t* pgt;
|
||||
size_t index, i;
|
||||
size_t ret;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
||||
return 0;
|
||||
|
||||
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
|
||||
|
@ -276,7 +275,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
if (!viraddr) {
|
||||
viraddr = vm_alloc(npages, flags);
|
||||
|
@ -292,10 +291,10 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE, phyaddr+=PAGE_SIZE) {
|
||||
index = viraddr >> 22;
|
||||
|
||||
if (!(task->pgd->entries[index])) {
|
||||
page_table_t* pgt_container;
|
||||
if (!(task->page_map->entries[index])) {
|
||||
page_map_t* pgt_container;
|
||||
|
||||
pgt = (page_table_t*) get_pages(1);
|
||||
pgt = (page_map_t*) get_page();
|
||||
if (BUILTIN_EXPECT(!pgt, 0)) {
|
||||
kputs("map_address: out of memory\n");
|
||||
ret = 0;
|
||||
|
@ -304,17 +303,17 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
|
||||
// set the new page table into the directory
|
||||
if (flags & MAP_USER_SPACE)
|
||||
task->pgd->entries[index] = (uint32_t)pgt|USER_TABLE;
|
||||
task->page_map->entries[index] = (uint32_t)pgt|USER_TABLE;
|
||||
else
|
||||
task->pgd->entries[index] = (uint32_t)pgt|KERN_TABLE;
|
||||
task->page_map->entries[index] = (uint32_t)pgt|KERN_TABLE;
|
||||
|
||||
// if paging is already enabled, we need to use the virtual address
|
||||
if (paging_enabled)
|
||||
// we already know the virtual address of the "page table container"
|
||||
// (see file header)
|
||||
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
|
||||
pgt_container = (page_map_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK);
|
||||
else
|
||||
pgt_container = (page_table_t*) (task->pgd->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & PAGE_MASK);
|
||||
pgt_container = (page_map_t*) (task->page_map->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & PAGE_MASK);
|
||||
|
||||
if (BUILTIN_EXPECT(!pgt_container, 0)) {
|
||||
kputs("map_address: internal error\n");
|
||||
|
@ -330,11 +329,11 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
memset((void*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK), 0x00, PAGE_SIZE);
|
||||
else
|
||||
memset(pgt, 0x00, PAGE_SIZE);
|
||||
} else pgt = (page_table_t*) (task->pgd->entries[index] & PAGE_MASK);
|
||||
} else pgt = (page_map_t*) (task->page_map->entries[index] & PAGE_MASK);
|
||||
|
||||
/* convert physical address to virtual */
|
||||
if (paging_enabled)
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
|
||||
|
||||
index = (viraddr >> 12) & 0x3FF;
|
||||
if (pgt->entries[index] && !(flags & MAP_REMAP)) {
|
||||
|
@ -382,7 +381,7 @@ out:
|
|||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -392,18 +391,18 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
uint32_t index1, index2, newflags;
|
||||
size_t viraddr = start & 0xFFFFF000;
|
||||
size_t phyaddr;
|
||||
page_table_t* pgt;
|
||||
page_dir_t* pgd;
|
||||
page_map_t* pgt;
|
||||
page_map_t* pgd;
|
||||
task_t* task = per_core(current_task);
|
||||
|
||||
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
pgd = per_core(current_task)->pgd;
|
||||
pgd = per_core(current_task)->page_map;
|
||||
if (BUILTIN_EXPECT(!pgd, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
while (viraddr < end)
|
||||
{
|
||||
|
@ -411,7 +410,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
while ((viraddr < end) && (index2 < 1024)) {
|
||||
pgt = (page_table_t*) (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_map_t*) (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (pgt && pgt->entries[index2]) {
|
||||
phyaddr = pgt->entries[index2] & PAGE_MASK;
|
||||
newflags = pgt->entries[index2] & 0xFFF; // get old flags
|
||||
|
@ -448,7 +447,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
}
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -464,9 +463,9 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|||
uint32_t index1, index2, j;
|
||||
size_t viraddr, i, ret = 0;
|
||||
size_t start, end;
|
||||
page_table_t* pgt;
|
||||
page_map_t* pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
||||
return 0;
|
||||
|
||||
if (flags & MAP_KERNEL_SPACE) {
|
||||
|
@ -483,7 +482,7 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
viraddr = i = start;
|
||||
j = 0;
|
||||
|
@ -491,7 +490,7 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|||
index1 = i >> 22;
|
||||
index2 = (i >> 12) & 0x3FF;
|
||||
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt || !(pgt->entries[index2])) {
|
||||
i+=PAGE_SIZE;
|
||||
j++;
|
||||
|
@ -509,7 +508,7 @@ size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -519,22 +518,22 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
|||
task_t* task = per_core(current_task);
|
||||
uint32_t i;
|
||||
uint32_t index1, index2;
|
||||
page_table_t* pgt;
|
||||
page_map_t* pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
||||
{
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt)
|
||||
continue;
|
||||
pgt->entries[index2] &= ~PG_PRESENT;
|
||||
|
@ -548,7 +547,7 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
|||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -558,22 +557,22 @@ int vm_free(size_t viraddr, uint32_t npages)
|
|||
task_t* task = per_core(current_task);
|
||||
uint32_t i;
|
||||
uint32_t index1, index2;
|
||||
page_table_t* pgt;
|
||||
page_map_t* pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
||||
{
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt)
|
||||
continue;
|
||||
pgt->entries[index2] = 0;
|
||||
|
@ -584,7 +583,7 @@ int vm_free(size_t viraddr, uint32_t npages)
|
|||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -593,8 +592,8 @@ int print_paging_tree(size_t viraddr)
|
|||
{
|
||||
task_t* task = per_core(current_task);
|
||||
uint32_t index1, index2;
|
||||
page_dir_t* pgd = NULL;
|
||||
page_table_t* pgt = NULL;
|
||||
page_map_t* pgd = NULL;
|
||||
page_map_t* pgt = NULL;
|
||||
|
||||
if (BUILTIN_EXPECT(!viraddr, 0))
|
||||
return -EINVAL;
|
||||
|
@ -602,20 +601,20 @@ int print_paging_tree(size_t viraddr)
|
|||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
kprintf("Paging dump of address 0x%x\n", viraddr);
|
||||
pgd = task->pgd;
|
||||
pgd = task->page_map;
|
||||
kprintf("\tPage directory entry %u: ", index1);
|
||||
if (pgd) {
|
||||
kprintf("0x%0x\n", pgd->entries[index1]);
|
||||
pgt = (page_table_t*) (pgd->entries[index1] & PAGE_MASK);
|
||||
pgt = (page_map_t*) (pgd->entries[index1] & PAGE_MASK);
|
||||
} else
|
||||
kputs("invalid page directory\n");
|
||||
|
||||
/* convert physical address to virtual */
|
||||
// convert physical address to virtual
|
||||
if (paging_enabled && pgt)
|
||||
pgt = (page_table_t*) (KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE);
|
||||
pgt = (page_map_t*) (KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE);
|
||||
|
||||
kprintf("\tPage table entry %u: ", index2);
|
||||
if (pgt)
|
||||
|
@ -623,7 +622,7 @@ int print_paging_tree(size_t viraddr)
|
|||
else
|
||||
kputs("invalid page table\n");
|
||||
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -631,12 +630,12 @@ int print_paging_tree(size_t viraddr)
|
|||
static void pagefault_handler(struct state *s)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
page_dir_t* pgd = task->pgd;
|
||||
page_table_t* pgt = NULL;
|
||||
size_t viraddr = read_cr2();
|
||||
size_t phyaddr;
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
uint32_t index1, index2;
|
||||
page_map_t* pgd = task->page_map;
|
||||
page_map_t* pgt = NULL;
|
||||
#endif
|
||||
|
||||
if ((viraddr >= task->start_heap) && (viraddr <= task->end_heap) && (viraddr > KERNEL_SPACE)) {
|
||||
|
@ -650,7 +649,7 @@ static void pagefault_handler(struct state *s)
|
|||
memset((void*) viraddr, 0x00, PAGE_SIZE);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
kprintf("Could not map 0x%x at 0x%x\n", phyaddr, viraddr);
|
||||
put_page(phyaddr);
|
||||
}
|
||||
|
@ -661,7 +660,7 @@ static void pagefault_handler(struct state *s)
|
|||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
if (!pgd || !(pgd->entries[index1] & PAGE_MASK))
|
||||
goto default_handler;
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (!pgt || !(pgt->entries[index2]))
|
||||
goto default_handler;
|
||||
if (pgt->entries[index2] & PG_SVM_INIT) {
|
||||
|
@ -687,14 +686,14 @@ default_handler:
|
|||
int arch_paging_init(void)
|
||||
{
|
||||
uint32_t i, npages, index1, index2;
|
||||
page_table_t* pgt;
|
||||
page_map_t* pgt;
|
||||
size_t viraddr;
|
||||
|
||||
// uninstall default handler and install our own
|
||||
// replace default pagefault handler
|
||||
irq_uninstall_handler(14);
|
||||
irq_install_handler(14, pagefault_handler);
|
||||
|
||||
// Create a page table to reference to the other page tables
|
||||
// create a page table to reference to the other page tables
|
||||
pgt = &pgt_container;
|
||||
|
||||
// map this table at the end of the kernel space
|
||||
|
@ -703,21 +702,21 @@ int arch_paging_init(void)
|
|||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
// now, we create a self reference
|
||||
per_core(current_task)->pgd->entries[index1] = (((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
|
||||
pgt->entries[index2] = ((size_t) pgt & 0xFFFFF000)|KERN_PAGE;
|
||||
per_core(current_task)->page_map->entries[index1] = (((size_t) pgt) & PAGE_MASK)|KERN_TABLE;
|
||||
pgt->entries[index2] = ((size_t) pgt & PAGE_MASK)|KERN_PAGE;
|
||||
|
||||
// create the other PGTs for the kernel space
|
||||
for(i=0; i<KERNEL_SPACE/(1024*PAGE_SIZE)-1; i++) {
|
||||
size_t phyaddr = boot_pgt+i;
|
||||
|
||||
memset((void*) phyaddr, 0x00, sizeof(page_table_t));
|
||||
per_core(current_task)->pgd->entries[i] = (phyaddr & PAGE_MASK)|KERN_TABLE;
|
||||
memset((void*) phyaddr, 0x00, sizeof(page_map_t));
|
||||
per_core(current_task)->page_map->entries[i] = (phyaddr & PAGE_MASK)|KERN_TABLE;
|
||||
pgt->entries[i] = (phyaddr & PAGE_MASK)|KERN_PAGE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the page table and page directory entries for the kernel. We map the kernel's physical address
|
||||
* to the same virtual address.
|
||||
* Set the page table and page directory entries for the kernel.
|
||||
* We map the kernel's physical address to the same virtual address.
|
||||
*/
|
||||
npages = ((size_t) &kernel_end - (size_t) &kernel_start) >> PAGE_SHIFT;
|
||||
if ((size_t)&kernel_end & (PAGE_SIZE-1))
|
||||
|
@ -725,7 +724,7 @@ int arch_paging_init(void)
|
|||
map_region((size_t)&kernel_start, (size_t)&kernel_start, npages, MAP_KERNEL_SPACE);
|
||||
|
||||
#if MAX_CORES > 1
|
||||
// Reserve page for smp boot code
|
||||
// reserve page for smp boot code
|
||||
if (!map_region(SMP_SETUP_ADDR, SMP_SETUP_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE)) {
|
||||
kputs("could not reserve page for smp boot code\n");
|
||||
return -ENOMEM;
|
||||
|
@ -738,16 +737,12 @@ int arch_paging_init(void)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_MULTIBOOT
|
||||
/*
|
||||
* of course, mb_info has to map into the kernel space
|
||||
*/
|
||||
// map mb_info into the kernel space
|
||||
if (mb_info)
|
||||
map_region((size_t) mb_info & PAGE_MASK, (size_t) mb_info & PAGE_MASK, 1, MAP_KERNEL_SPACE);
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Map reserved memory regions into the kernel space
|
||||
*/
|
||||
// map reserved memory regions into the kernel space
|
||||
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
|
||||
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr;
|
||||
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
||||
|
@ -805,7 +800,7 @@ int arch_paging_init(void)
|
|||
kprintf("Map FPGA regsiters at 0x%x\n", viraddr);
|
||||
#endif
|
||||
|
||||
/* enable paging */
|
||||
// enable paging
|
||||
write_cr3((uint32_t) &boot_pgd);
|
||||
i = read_cr0();
|
||||
i = i | (1 << 31);
|
||||
|
@ -822,10 +817,7 @@ int arch_paging_init(void)
|
|||
bootinfo->addr = viraddr;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* we turned on paging
|
||||
* => now, we are able to register our task
|
||||
*/
|
||||
// we turned on paging => now, we are able to register our task
|
||||
register_task();
|
||||
|
||||
// APIC registers into the kernel address space
|
||||
|
|
|
@ -31,21 +31,15 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/multiboot.h>
|
||||
#include <asm/apic.h>
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
#include <asm/RCCE_lib.h>
|
||||
#include <asm/SCC_API.h>
|
||||
#include <asm/svm.h>
|
||||
#include <asm/icc.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Virtual Memory Layout of the standard configuration
|
||||
* (1 GB kernel space)
|
||||
*
|
||||
* 0x00000000 - 0x000FFFFF: reserved for IO devices (16MB)
|
||||
* 0x00100000 - 0x0DEADFFF: Kernel (size depends on the configuration) (221MB)
|
||||
* 0x0DEAE000 - 0x3FFFFFFF: Kernel heap
|
||||
*
|
||||
* 0x000000000000 - 0x0000000FFFFF: reserved for IO devices (16MB)
|
||||
* 0x000000100000 - 0x00000DEADFFF: Kernel (size depends on the configuration) (221MB)
|
||||
* 0x00000DEAE000 - 0x00003FFFFFFF: Kernel heap
|
||||
* 0xFF8000000000 - 0xFFFFFFFFFFFF: Paging structures are mapped in this region (max 512GB)
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -55,127 +49,248 @@
|
|||
extern const void kernel_start;
|
||||
extern const void kernel_end;
|
||||
|
||||
// boot task's page directory and page directory lock
|
||||
extern page_dir_t boot_pgd;
|
||||
// boot task's page map and page map lock
|
||||
extern page_map_t boot_pml4;
|
||||
static spinlock_t kslock = SPINLOCK_INIT;
|
||||
static int paging_enabled = 0;
|
||||
|
||||
page_dir_t* get_boot_pgd(void)
|
||||
page_map_t* get_boot_page_map(void)
|
||||
{
|
||||
return &boot_pgd;
|
||||
return &boot_pml4;
|
||||
}
|
||||
|
||||
int create_pgd(task_t* task, int copy)
|
||||
{
|
||||
// Currently, we support only kernel tasks
|
||||
// => all tasks are able to use the same pgd
|
||||
|
||||
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
task->pgd = get_boot_pgd();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* drops all page frames and the PGD of a user task
|
||||
/** @brief Copy a single page frame
|
||||
*
|
||||
* @param src virtual address of source page frame
|
||||
* @return physical addr to copied page frame
|
||||
*/
|
||||
int drop_pgd(void)
|
||||
static size_t copy_page_frame(size_t *src)
|
||||
{
|
||||
#if 0
|
||||
page_dir_t* pgd = per_core(current_task)->pgd;
|
||||
size_t phy_pgd = virt_to_phys((size_t) pgd);
|
||||
task_t* task = per_core(current_task);
|
||||
uint32_t i;
|
||||
kprintf("copy_page_frame(%p)\n", src);
|
||||
#if 1 // TODO: untested
|
||||
size_t phyaddr, viraddr;
|
||||
|
||||
if (BUILTIN_EXPECT(pgd == &boot_pgd, 0))
|
||||
// allocate and map an empty page
|
||||
phyaddr = get_page();
|
||||
if (BUILTIN_EXPECT(!phyaddr, 0))
|
||||
return 0;
|
||||
|
||||
viraddr = vma_alloc(PAGE_SIZE, VMA_HEAP);
|
||||
if (BUILTIN_EXPECT(!viraddr, 0))
|
||||
return 0;
|
||||
|
||||
viraddr = map_region(viraddr, phyaddr, 1, MAP_KERNEL_SPACE);
|
||||
if (BUILTIN_EXPECT(!viraddr, 0))
|
||||
return 0;
|
||||
|
||||
// copy the whole page
|
||||
strncpy((void*) viraddr, (void*) src, PAGE_SIZE);
|
||||
|
||||
// unmap and free page
|
||||
unmap_region(viraddr, 1);
|
||||
vma_free(viraddr, viraddr+PAGE_SIZE);
|
||||
|
||||
return phyaddr;
|
||||
#else
|
||||
kprintf("TODO: copy_page_frame(%lx)\n", source);
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline size_t canonicalize(size_t addr)
|
||||
{
|
||||
if (addr & (1UL<<47))
|
||||
return addr;
|
||||
else
|
||||
return addr & ((1UL<<48) - 1);
|
||||
}
|
||||
|
||||
static inline int map_to_level(size_t addr)
|
||||
{
|
||||
if (addr >= PAGE_PML4)
|
||||
return 4;
|
||||
else if (addr >= PAGE_PDPT)
|
||||
return 3;
|
||||
else if (addr >= PAGE_PGD)
|
||||
return 2;
|
||||
else if (addr >= PAGE_PGT)
|
||||
return 1;
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spinlock_lock(&task->pgd_lock);
|
||||
|
||||
for(i=0; i<1024; i++) {
|
||||
if (pgd->entries[i] & PG_USER) {
|
||||
put_page(pgd->entries[i] & PAGE_MASK);
|
||||
pgd->entries[i] = 0;
|
||||
static inline const char * map_to_lvlname(size_t addr)
|
||||
{
|
||||
const char* names[] = {"(none)", "PGT", "PGD", "PDPT", "PML4"};
|
||||
return names[map_to_level(addr)];
|
||||
}
|
||||
|
||||
static inline size_t map_to_virt(size_t addr)
|
||||
{
|
||||
return canonicalize(addr << (map_to_level(addr) * PAGE_MAP_SHIFT));
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy page maps using recursion
|
||||
*
|
||||
* @param from pointer to virtual address of source page tables
|
||||
* @param to pointer to virtual address of destination page tables
|
||||
* @param copy flags what should be copied (see #define COPY_*)
|
||||
* @return number of new allocated page frames (for tables only)
|
||||
*/
|
||||
static int copy_page_map(page_map_t *src, page_map_t *dest, int copy)
|
||||
{
|
||||
page_map_t* next_src, * next_dest;
|
||||
|
||||
int ret = 0;
|
||||
uint32_t i;
|
||||
for(i=0; i<PAGE_MAP_ENTRIES; i++) {
|
||||
if (!(src->entries[i] & PG_PRESENT))
|
||||
// skip empty entries
|
||||
dest->entries[i] = 0;
|
||||
else if (src->entries[i] & PG_USER) {
|
||||
size_t phys;
|
||||
kprintf("d:%p (%s: 0x%012lx) -> %p\n", &src->entries[i], map_to_lvlname((size_t) &src->entries[i]), map_to_virt((size_t) &src->entries[i]), &dest->entries[i]);
|
||||
|
||||
// deep copy user tables
|
||||
if ((size_t) src >= PAGE_PGT) {
|
||||
phys = get_page();
|
||||
if (BUILTIN_EXPECT(!phys, 0))
|
||||
return -ENOMEM;
|
||||
|
||||
dest->entries[i] = phys|(src->entries[i] & ~PAGE_MASK);
|
||||
|
||||
// reuse pointers to next lower page map tables
|
||||
next_src = (page_map_t*) ((size_t) &src->entries[i] << 9);
|
||||
next_dest = (page_map_t*) ((size_t) &dest->entries[i] << 9);
|
||||
|
||||
ret += 1 + copy_page_map(next_src, next_dest, copy);
|
||||
}
|
||||
// deep copy page frame
|
||||
else {
|
||||
if (copy) {
|
||||
phys = copy_page_frame((size_t*) src->entries[i]);
|
||||
dest->entries[i] = phys|(src->entries[i] & ~PAGE_MASK);
|
||||
}
|
||||
kprintf("c: %p (%lx)\n", &src->entries[i], src->entries[i]);
|
||||
}
|
||||
}
|
||||
// shallow copy kernel only tables
|
||||
else {
|
||||
kprintf("s:%p (%s: 0x%012lx) -> %p\n", &src->entries[i], map_to_lvlname((size_t) &src->entries[i]), map_to_virt((size_t) &src->entries[i]), &dest->entries[i]);
|
||||
dest->entries[i] = src->entries[i];
|
||||
}
|
||||
}
|
||||
|
||||
// freeing the page directory
|
||||
put_page(phy_pgd);
|
||||
kputs("r\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
task->pgd = NULL;
|
||||
int create_page_map(task_t* task, int copy)
|
||||
{
|
||||
size_t phys;
|
||||
uint32_t ret;
|
||||
|
||||
spinlock_unlock(&task->pgd_lock);
|
||||
#endif
|
||||
// fixed mapping for paging structures
|
||||
page_map_t *current = (page_map_t*) PAGE_PML4;
|
||||
page_map_t *new = (page_map_t*) (PAGE_PML4 - 0x1000);
|
||||
|
||||
// get new pml4 table
|
||||
phys = get_page();
|
||||
if (!phys) return -ENOMEM;
|
||||
|
||||
current->entries[PAGE_MAP_ENTRIES-2] = phys|KERN_TABLE;
|
||||
new->entries[PAGE_MAP_ENTRIES-1] = phys|KERN_TABLE;
|
||||
|
||||
tlb_flush(); // ouch :(
|
||||
|
||||
spinlock_lock(&kslock);
|
||||
ret = copy_page_map(current, new, copy);
|
||||
spinlock_unlock(&kslock);
|
||||
|
||||
new->entries[PAGE_MAP_ENTRIES-1] = phys|KERN_TABLE;
|
||||
current->entries[PAGE_MAP_ENTRIES-2] = 0;
|
||||
|
||||
task->page_map = (page_map_t*) phys;
|
||||
|
||||
kprintf("create_page_map: allocated %u page tables\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int drop_page_map(void)
|
||||
{
|
||||
#if 1
|
||||
kprintf("TODO: test drop_page_map()\n");
|
||||
return -EINVAL; // TODO
|
||||
#else
|
||||
task_t* task = per_core(current_task);
|
||||
page_map_t* pml4, * pdpt, * pgd, * pgt;
|
||||
size_t phys;
|
||||
uint32_t i, j, k, l;
|
||||
|
||||
pml4 = task->page_map;
|
||||
|
||||
if (BUILTIN_EXPECT(pml4 == &boot_pml4, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(&task->page_lock);
|
||||
|
||||
// delete all user pages and tables
|
||||
for(i=0; i<PAGE_MAP_ENTRIES; i++) { // pml4
|
||||
if (pml4->entries[i] & PG_USER) {
|
||||
for(j=0; j<PAGE_MAP_ENTRIES; j++) { // pdpt
|
||||
if (pdpt->entries[j] & PG_USER) {
|
||||
for(k=0; k<PAGE_MAP_ENTRIES; k++) { // pgd
|
||||
if (pgd->entries[k] & PG_USER) {
|
||||
for(l=0; l<PAGE_MAP_ENTRIES; l++) { // pgt
|
||||
if (pgt->entries[l] & PG_USER)
|
||||
put_page(pgt->entries[l] & PAGE_MASK);
|
||||
}
|
||||
// TODO: put pgt
|
||||
}
|
||||
}
|
||||
// TODO: put pgd
|
||||
}
|
||||
}
|
||||
// TODO: put pdpt
|
||||
}
|
||||
}
|
||||
|
||||
put_page(virt_to_phys((size_t) pml4));
|
||||
task->page_map = NULL;
|
||||
|
||||
spinlock_unlock(&task->page_lock);
|
||||
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
size_t virt_to_phys(size_t viraddr)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
uint16_t idx_pd4 = (viraddr >> 39) & 0x1FF;
|
||||
uint16_t idx_dirp = (viraddr >> 30) & 0x1FF;
|
||||
uint16_t idx_dir = (viraddr >> 21) & 0x1FF;
|
||||
uint16_t idx_table = (viraddr >> 12) & 0x1FF;
|
||||
page_table_t* pgt;
|
||||
size_t ret = 0;
|
||||
size_t phyaddr;
|
||||
size_t* pte;
|
||||
|
||||
if (!paging_enabled)
|
||||
return viraddr;
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
||||
return 0;
|
||||
pte = (size_t *) (PAGE_PGT | (viraddr >> 9));
|
||||
phyaddr = (*pte & PAGE_MASK) | (viraddr & ~PAGE_MASK);
|
||||
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
// Currently, we allocate pages only in kernel space.
|
||||
// => physical address of the page table is identical of the virtual address
|
||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
||||
if (!pgt)
|
||||
goto out;
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
||||
if (!pgt)
|
||||
goto out;
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
||||
if (!pgt)
|
||||
goto out;
|
||||
|
||||
ret = (size_t) (pgt->entries[idx_table] & PAGE_MASK);
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
ret = ret | (viraddr & 0xFFF); // add page offset
|
||||
out:
|
||||
//kprintf("vir %p to phy %p\n", viraddr, ret);
|
||||
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
|
||||
return ret;
|
||||
return phyaddr;
|
||||
}
|
||||
|
||||
size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
page_table_t* pgt;
|
||||
size_t i, ret;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
||||
return 0;
|
||||
|
||||
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
|
||||
return 0;
|
||||
|
||||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
|
||||
if (!viraddr) {
|
||||
viraddr = vm_alloc(npages, flags);
|
||||
kputs("map_region: deprecated vma_alloc() call from within map_region\n");
|
||||
viraddr = vma_alloc(npages*PAGE_SIZE, VMA_HEAP);
|
||||
if (BUILTIN_EXPECT(!viraddr, 0)) {
|
||||
kputs("map_region: found no valid virtual address\n");
|
||||
ret = 0;
|
||||
|
@ -183,59 +298,40 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
}
|
||||
}
|
||||
|
||||
// correct alignment
|
||||
phyaddr &= PAGE_MASK;
|
||||
viraddr &= PAGE_MASK;
|
||||
ret = viraddr;
|
||||
|
||||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
kprintf("map_region: map %u pages from 0x%lx to 0x%lx with flags: 0x%x\n", npages, viraddr, phyaddr, flags);
|
||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE, phyaddr+=PAGE_SIZE) {
|
||||
uint16_t idx_pd4 = (viraddr >> 39) & 0x1FF;
|
||||
uint16_t idx_dirp = (viraddr >> 30) & 0x1FF;
|
||||
uint16_t idx_dir = (viraddr >> 21) & 0x1FF;
|
||||
uint16_t idx_table = (viraddr >> 12) & 0x1FF;
|
||||
// page table entry
|
||||
size_t* pte = (size_t *) (PAGE_PGT|(viraddr >> 9));
|
||||
|
||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
kputs("map_region: out of memory\n");
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
kputs("map_region: out of memory\n");
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
kputs("map_region: out of memory\n");
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* convert physical address to virtual */
|
||||
// Currently, we allocate pages only in kernel space.
|
||||
// => physical address of the page table is identical of the virtual address
|
||||
//if (paging_enabled)
|
||||
// pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK);
|
||||
|
||||
if (pgt->entries[idx_table] && !(flags & MAP_REMAP)) {
|
||||
kprintf("0x%x is already mapped\n", viraddr);
|
||||
if (*pte && !(flags & MAP_REMAP)) {
|
||||
kprintf("map_region: 0x%lx is already mapped\n", viraddr);
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (flags & MAP_USER_SPACE)
|
||||
pgt->entries[idx_table] = USER_PAGE|(phyaddr & PAGE_MASK);
|
||||
*pte = phyaddr|USER_PAGE;
|
||||
else
|
||||
pgt->entries[idx_table] = KERN_PAGE|(phyaddr & PAGE_MASK);
|
||||
*pte = phyaddr|KERN_PAGE;
|
||||
|
||||
if (flags & MAP_NO_CACHE)
|
||||
pgt->entries[idx_table] |= PG_PCD;
|
||||
*pte |= PG_PCD;
|
||||
|
||||
if (flags & MAP_NO_ACCESS)
|
||||
pgt->entries[idx_table] &= ~PG_PRESENT;
|
||||
*pte &= ~PG_PRESENT;
|
||||
|
||||
if (flags & MAP_WT)
|
||||
pgt->entries[idx_table] |= PG_PWT;
|
||||
*pte |= PG_PWT;
|
||||
|
||||
if (flags & MAP_USER_SPACE)
|
||||
atomic_int32_inc(&task->user_usage);
|
||||
|
@ -247,7 +343,7 @@ out:
|
|||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -258,18 +354,15 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
uint32_t index1, index2, newflags;
|
||||
size_t viraddr = start & PAGE_MASK;
|
||||
size_t phyaddr;
|
||||
page_table_t* pgt;
|
||||
page_dir_t* pgd;
|
||||
page_map_t* pgt;
|
||||
page_map_t* pgd;
|
||||
task_t* task = per_core(current_task);
|
||||
|
||||
if (BUILTIN_EXPECT(!paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
pgd = per_core(current_task)->pgd;
|
||||
pgd = per_core(current_task)->page_map;
|
||||
if (BUILTIN_EXPECT(!pgd, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(&task->pgd_lock);
|
||||
spinlock_lock(&task->page_lock);
|
||||
|
||||
while (viraddr < end)
|
||||
{
|
||||
|
@ -277,7 +370,7 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
while ((viraddr < end) && (index2 < 1024)) {
|
||||
pgt = (page_table_t*) (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
pgt = (page_map_t*) (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK);
|
||||
if (pgt && pgt->entries[index2]) {
|
||||
phyaddr = pgt->entries[index2] & PAGE_MASK;
|
||||
newflags = pgt->entries[index2] & 0xFFF; // get old flags
|
||||
|
@ -292,16 +385,8 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
// update flags
|
||||
if (!(flags & VMA_WRITE)) {
|
||||
newflags &= ~PG_RW;
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
if (newflags & (PG_SVM_STRONG|PG_SVM_LAZYRELEASE))
|
||||
newflags &= ~PG_MPE;
|
||||
#endif
|
||||
} else {
|
||||
newflags |= PG_RW;
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
if (newflags & (PG_SVM_STRONG|PG_SVM_LAZYRELEASE))
|
||||
newflags |= PG_MPE;
|
||||
#endif
|
||||
}
|
||||
|
||||
pgt->entries[index2] = (newflags & 0xFFF) | (phyaddr & PAGE_MASK);
|
||||
|
@ -314,149 +399,66 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
|||
}
|
||||
}
|
||||
|
||||
spinlock_unlock(&task->pgd_lock);
|
||||
spinlock_unlock(&task->page_lock);
|
||||
#endif
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the first fit algorithm to find a valid address range
|
||||
*
|
||||
* TODO: O(n) => bad performance, we need a better approach
|
||||
*/
|
||||
size_t vm_alloc(uint32_t npages, uint32_t flags)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
size_t viraddr, i, j, ret = 0;
|
||||
size_t start, end;
|
||||
page_table_t* pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
return 0;
|
||||
|
||||
if (flags & MAP_KERNEL_SPACE) {
|
||||
start = (((size_t) &kernel_end) + 10*PAGE_SIZE) & PAGE_MASK;
|
||||
end = (KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK;
|
||||
} else {
|
||||
start = KERNEL_SPACE & PAGE_MASK;
|
||||
end = PAGE_MASK;
|
||||
}
|
||||
|
||||
if (BUILTIN_EXPECT(!npages, 0))
|
||||
return 0;
|
||||
|
||||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
|
||||
viraddr = i = start;
|
||||
j = 0;
|
||||
do {
|
||||
uint16_t idx_pd4 = (viraddr >> 39) & 0x1FF;
|
||||
uint16_t idx_dirp = (viraddr >> 30) & 0x1FF;
|
||||
uint16_t idx_dir = (viraddr >> 21) & 0x1FF;
|
||||
uint16_t idx_table = (viraddr >> 12) & 0x1FF;
|
||||
|
||||
// Currently, we allocate pages only in kernel space.
|
||||
// => physical address of the page table is identical of the virtual address
|
||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
i += (size_t)PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
||||
j += PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
i += PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
||||
j += PGT_ENTRIES*PGT_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
i += PGT_ENTRIES*PAGE_SIZE;
|
||||
j += PGT_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!(pgt->entries[idx_table])) {
|
||||
i += PAGE_SIZE;
|
||||
j++;
|
||||
} else {
|
||||
// restart search
|
||||
j = 0;
|
||||
viraddr = i + PAGE_SIZE;
|
||||
i = i + PAGE_SIZE;
|
||||
}
|
||||
} while((j < npages) && (i<=end));
|
||||
|
||||
if ((j >= npages) && (viraddr < end))
|
||||
ret = viraddr;
|
||||
|
||||
if (flags & MAP_KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int unmap_region(size_t viraddr, uint32_t npages)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
page_table_t* pgt;
|
||||
page_map_t* pdpt, * pgd, * pgt;
|
||||
size_t i;
|
||||
uint16_t idx_pd4, idx_dirp;
|
||||
uint16_t idx_dir, idx_table;
|
||||
uint16_t index_pml4, index_pdpt;
|
||||
uint16_t index_pgd, index_pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->page_map, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
spinlock_irqsave_lock(&task->page_lock);
|
||||
|
||||
i = 0;
|
||||
while(i<npages)
|
||||
{
|
||||
idx_pd4 = (viraddr >> 39) & 0x1FF;
|
||||
idx_dirp = (viraddr >> 30) & 0x1FF;
|
||||
idx_dir = (viraddr >> 21) & 0x1FF;
|
||||
idx_table = (viraddr >> 12) & 0x1FF;
|
||||
index_pml4 = (viraddr >> 39) & 0x1FF;
|
||||
index_pdpt = (viraddr >> 30) & 0x1FF;
|
||||
index_pgd = (viraddr >> 21) & 0x1FF;
|
||||
index_pgt = (viraddr >> 12) & 0x1FF;
|
||||
|
||||
// Currently, we allocate pages only in kernel space.
|
||||
// currently, we allocate pages only in kernel space.
|
||||
// => physical address of the page table is identical of the virtual address
|
||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
viraddr += (size_t) PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
||||
i += PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES;
|
||||
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
||||
if (!pdpt) {
|
||||
viraddr += (size_t) PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_SIZE;
|
||||
i += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
viraddr += PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
||||
i += PGT_ENTRIES*PGT_ENTRIES;
|
||||
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
||||
if (!pgd) {
|
||||
viraddr += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_SIZE;
|
||||
i += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
||||
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
viraddr += PGT_ENTRIES*PAGE_SIZE;
|
||||
i += PGT_ENTRIES;
|
||||
viraddr += PAGE_MAP_ENTRIES*PAGE_SIZE;
|
||||
i += PAGE_MAP_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pgt->entries[idx_table])
|
||||
pgt->entries[idx_table] &= ~PG_PRESENT;
|
||||
if (pgt->entries[index_pgt])
|
||||
pgt->entries[index_pgt] &= ~PG_PRESENT;
|
||||
|
||||
viraddr +=PAGE_SIZE;
|
||||
i++;
|
||||
|
||||
|
||||
if (viraddr > KERNEL_SPACE)
|
||||
atomic_int32_dec(&task->user_usage);
|
||||
|
||||
|
@ -466,71 +468,7 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
|||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vm_free(size_t viraddr, uint32_t npages)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
page_table_t* pgt;
|
||||
size_t i;
|
||||
uint16_t idx_pd4, idx_dirp;
|
||||
uint16_t idx_dir, idx_table;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_lock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_lock(&task->pgd_lock);
|
||||
|
||||
i = 0;
|
||||
while(i<npages)
|
||||
{
|
||||
idx_pd4 = (viraddr >> 39) & 0x1FF;
|
||||
idx_dirp = (viraddr >> 30) & 0x1FF;
|
||||
idx_dir = (viraddr >> 21) & 0x1FF;
|
||||
idx_table = (viraddr >> 12) & 0x1FF;
|
||||
|
||||
// Currently, we allocate pages only in kernel space.
|
||||
// => physical address of the page table is identical of the virtual address
|
||||
pgt = (page_table_t*) (task->pgd->entries[idx_pd4] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
viraddr += (size_t) PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
||||
i += PGT_ENTRIES*PGT_ENTRIES*PGT_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dirp] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
viraddr += PGT_ENTRIES*PGT_ENTRIES*PAGE_SIZE;
|
||||
i += PGT_ENTRIES*PGT_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
pgt = (page_table_t*) (pgt->entries[idx_dir] & PAGE_MASK);
|
||||
if (!pgt) {
|
||||
viraddr += PGT_ENTRIES*PAGE_SIZE;
|
||||
i += PGT_ENTRIES;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (pgt->entries[idx_table])
|
||||
pgt->entries[idx_table] = 0;
|
||||
|
||||
viraddr +=PAGE_SIZE;
|
||||
i++;
|
||||
|
||||
tlb_flush_one_page(viraddr);
|
||||
}
|
||||
|
||||
if (viraddr <= KERNEL_SPACE)
|
||||
spinlock_unlock(&kslock);
|
||||
else
|
||||
spinlock_irqsave_unlock(&task->pgd_lock);
|
||||
spinlock_irqsave_unlock(&task->page_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -538,10 +476,8 @@ int vm_free(size_t viraddr, uint32_t npages)
|
|||
static void pagefault_handler(struct state *s)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
//page_dir_t* pgd = task->pgd;
|
||||
//page_table_t* pgt = NULL;
|
||||
size_t viraddr = read_cr2();
|
||||
//size_t phyaddr;
|
||||
size_t phyaddr;
|
||||
|
||||
#if 0
|
||||
if ((viraddr >= task->start_heap) && (viraddr <= task->end_heap) && (viraddr > KERNEL_SPACE)) {
|
||||
|
@ -549,24 +485,49 @@ static void pagefault_handler(struct state *s)
|
|||
|
||||
phyaddr = get_page();
|
||||
if (BUILTIN_EXPECT(!phyaddr, 0))
|
||||
goto default_handler;
|
||||
goto oom;
|
||||
|
||||
if (map_region(viraddr, phyaddr, 1, MAP_USER_SPACE) == viraddr) {
|
||||
memset((void*) viraddr, 0x00, PAGE_SIZE);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
kprintf("Could not map 0x%x at 0x%x\n", phyaddr, viraddr);
|
||||
put_page(phyaddr);
|
||||
}
|
||||
/*
|
||||
* handle missing paging structures for userspace
|
||||
* all kernel space paging structures have been initialized in entry64.asm
|
||||
*/
|
||||
else if (viraddr >= PAGE_PGT) {
|
||||
kprintf("map_region: missing paging structure at: 0x%lx (%s)\n", viraddr, map_to_lvlname(viraddr));
|
||||
|
||||
phyaddr = get_page();
|
||||
if (BUILTIN_EXPECT(!phyaddr, 0))
|
||||
goto oom;
|
||||
|
||||
// TODO: initialize with zeros
|
||||
// TODO: check that we are in userspace
|
||||
|
||||
// get pointer to parent page level entry
|
||||
size_t *entry = (size_t *) ((int64_t) viraddr >> 9 & ~0x07);
|
||||
|
||||
// update entry
|
||||
*entry = phyaddr|USER_TABLE;
|
||||
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
//default_handler:
|
||||
kprintf("PAGE FAULT: Task %u got page fault at %p (irq %llu, cs:rip 0x%llx:0x%llx)\n", task->id, viraddr, s->int_no, s->cs, s->rip);
|
||||
kprintf("Register state: rax = 0x%llx, rbx = 0x%llx, rcx = 0x%llx, rdx = 0x%llx, rdi = 0x%llx, rsi = 0x%llx, rbp = 0x%llx, rsp = 0x%llx\n",
|
||||
s->rax, s->rbx, s->rcx, s->rdx, s->rdi, s->rsi, s->rbp, s->rsp);
|
||||
|
||||
while(1);
|
||||
irq_enable();
|
||||
abort();
|
||||
|
||||
oom:
|
||||
kputs("map_region: out of memory\n");
|
||||
irq_enable();
|
||||
abort();
|
||||
}
|
||||
|
@ -575,15 +536,17 @@ int arch_paging_init(void)
|
|||
{
|
||||
uint32_t i, npages;
|
||||
|
||||
// uninstall default handler and install our own
|
||||
// replace default pagefault handler
|
||||
irq_uninstall_handler(14);
|
||||
irq_install_handler(14, pagefault_handler);
|
||||
|
||||
// kernel is already maped into the kernel space (see entry64.asm)
|
||||
// this includes .data, .bss, .text, video memory and the multiboot structure
|
||||
/*
|
||||
* In longmode the kernel is already maped into the kernel space (see entry64.asm)
|
||||
* this includes .data, .bss, .text, VGA, the multiboot & multiprocessing (APIC) structures
|
||||
*/
|
||||
|
||||
#if MAX_CORES > 1
|
||||
// Reserve page for smp boot code
|
||||
// reserve page for smp boot code
|
||||
if (!map_region(SMP_SETUP_ADDR, SMP_SETUP_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE)) {
|
||||
kputs("could not reserve page for smp boot code\n");
|
||||
return -ENOMEM;
|
||||
|
@ -592,9 +555,7 @@ int arch_paging_init(void)
|
|||
|
||||
#ifdef CONFIG_MULTIBOOT
|
||||
#if 0
|
||||
/*
|
||||
* Map reserved memory regions into the kernel space
|
||||
*/
|
||||
// map reserved memory regions into the kernel space
|
||||
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
|
||||
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr;
|
||||
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
||||
|
@ -613,7 +574,7 @@ int arch_paging_init(void)
|
|||
|
||||
/*
|
||||
* Modules like the init ram disk are already loaded.
|
||||
* Therefore, we map these moduels into the kernel space.
|
||||
* Therefore, we map these modules into the kernel space.
|
||||
*/
|
||||
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
|
||||
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
|
||||
|
@ -634,13 +595,7 @@ int arch_paging_init(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
/* signalize that we are able to use paging */
|
||||
paging_enabled = 1;
|
||||
|
||||
/*
|
||||
* we turned on paging
|
||||
* => now, we are able to register our task
|
||||
*/
|
||||
// we turned on paging => now, we are able to register our task
|
||||
register_task();
|
||||
|
||||
// APIC registers into the kernel address space
|
||||
|
|
|
@ -70,7 +70,7 @@ static ssize_t socket_write(fildes_t* file, uint8_t* buffer, size_t size)
|
|||
return -ENOMEM;
|
||||
memcpy(tmp, buffer, size);
|
||||
ret = lwip_write(file->offset, tmp, size);
|
||||
kfree(tmp, size);
|
||||
kfree(tmp);
|
||||
#endif
|
||||
if (ret < 0)
|
||||
ret = -errno;
|
||||
|
@ -147,7 +147,7 @@ int socket_init(vfs_node_t* node, const char* name)
|
|||
|
||||
} while(blist);
|
||||
|
||||
kfree(new_node, sizeof(vfs_node_t));
|
||||
kfree(new_node);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
|
@ -86,11 +86,12 @@ static ssize_t stdio_write(fildes_t* file, uint8_t* buffer, size_t size)
|
|||
for (i = 0; i<size; i++, buffer++) {
|
||||
#ifdef CONFIG_VGA
|
||||
vga_putchar(*buffer);
|
||||
#elif defined(CONFIG_UART)
|
||||
uart_putchar(*buffer);
|
||||
#else
|
||||
kputchar(*buffer);
|
||||
#endif
|
||||
#ifdef CONFIG_UART
|
||||
uart_putchar(*buffer);
|
||||
#endif
|
||||
|
||||
kputchar(*buffer);
|
||||
}
|
||||
|
||||
file->offset += size;
|
||||
|
@ -152,7 +153,7 @@ int null_init(vfs_node_t* node, const char* name)
|
|||
|
||||
} while(blist);
|
||||
|
||||
kfree(new_node, sizeof(vfs_node_t));
|
||||
kfree(new_node);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -211,7 +212,7 @@ int stdin_init(vfs_node_t* node, const char* name)
|
|||
|
||||
} while(blist);
|
||||
|
||||
kfree(new_node, sizeof(vfs_node_t));
|
||||
kfree(new_node);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -270,7 +271,7 @@ int stdout_init(vfs_node_t* node, const char* name)
|
|||
|
||||
} while(blist);
|
||||
|
||||
kfree(new_node, sizeof(vfs_node_t));
|
||||
kfree(new_node);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -329,7 +330,7 @@ int stderr_init(vfs_node_t* node, const char* name)
|
|||
|
||||
} while(blist);
|
||||
|
||||
kfree(new_node, sizeof(vfs_node_t));
|
||||
kfree(new_node);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
21
fs/initrd.c
21
fs/initrd.c
|
@ -210,7 +210,7 @@ static int initrd_open(fildes_t* file, const char* name)
|
|||
if (file->node->type == FS_FILE) {
|
||||
if ((file->flags & O_CREAT) && (file->flags & O_EXCL))
|
||||
return -EEXIST;
|
||||
|
||||
|
||||
/* in the case of O_TRUNC kfree all the nodes */
|
||||
if (file->flags & O_TRUNC) {
|
||||
uint32_t i;
|
||||
|
@ -221,8 +221,7 @@ static int initrd_open(fildes_t* file, const char* name)
|
|||
/* the first blist pointer have do remain valid. */
|
||||
for(i=0; i<MAX_DATABLOCKS && !data; i++) {
|
||||
if (blist->data[i]) {
|
||||
kfree(blist->data[i],
|
||||
sizeof(data_block_t));
|
||||
kfree(blist->data[i]);
|
||||
}
|
||||
}
|
||||
if (blist->next) {
|
||||
|
@ -234,12 +233,12 @@ static int initrd_open(fildes_t* file, const char* name)
|
|||
do {
|
||||
for(i=0; i<MAX_DATABLOCKS && !data; i++) {
|
||||
if (blist->data[i]) {
|
||||
kfree(blist->data[i], sizeof(data_block_t));
|
||||
kfree(blist->data[i]);
|
||||
}
|
||||
}
|
||||
lastblist = blist;
|
||||
blist = blist->next;
|
||||
kfree(lastblist, sizeof(block_list_t));
|
||||
kfree(lastblist);
|
||||
} while(blist);
|
||||
}
|
||||
|
||||
|
@ -253,7 +252,7 @@ static int initrd_open(fildes_t* file, const char* name)
|
|||
/* opendir was called: */
|
||||
if (name[0] == '\0')
|
||||
return 0;
|
||||
|
||||
|
||||
/* open file was called: */
|
||||
if (!(file->flags & O_CREAT))
|
||||
return -ENOENT;
|
||||
|
@ -264,11 +263,11 @@ static int initrd_open(fildes_t* file, const char* name)
|
|||
vfs_node_t* new_node = kmalloc(sizeof(vfs_node_t));
|
||||
if (BUILTIN_EXPECT(!new_node, 0))
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
blist = &file->node->block_list;
|
||||
dir_block_t* dir_block;
|
||||
dirent_t* dirent;
|
||||
|
||||
|
||||
memset(new_node, 0x00, sizeof(vfs_node_t));
|
||||
new_node->type = FS_FILE;
|
||||
new_node->read = &initrd_read;
|
||||
|
@ -286,7 +285,7 @@ static int initrd_open(fildes_t* file, const char* name)
|
|||
if (!dirent->vfs_node) {
|
||||
dirent->vfs_node = new_node;
|
||||
strncpy(dirent->name, (char*) name, MAX_FNAME);
|
||||
goto exit_create_file; // there might be a better Solution ***************
|
||||
goto exit_create_file; // TODO: there might be a better Solution
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -425,9 +424,9 @@ static vfs_node_t* initrd_mkdir(vfs_node_t* node, const char* name)
|
|||
blist = blist->next;
|
||||
} while(blist);
|
||||
|
||||
kfree(dir_block, sizeof(dir_block_t));
|
||||
kfree(dir_block);
|
||||
out:
|
||||
kfree(new_node, sizeof(vfs_node_t));
|
||||
kfree(new_node);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -34,14 +34,14 @@ extern "C" {
|
|||
#define PAGE_SHIFT 12
|
||||
#define CACHE_LINE 64
|
||||
#define MAILBOX_SIZE 32
|
||||
#define TIMER_FREQ 100 /* in HZ */
|
||||
#define CLOCK_TICK_RATE 1193182 /* 8254 chip's internal oscillator frequency */
|
||||
#define TIMER_FREQ 100 // in HZ
|
||||
#define CLOCK_TICK_RATE 1193182 // 8254 chip's internal oscillator frequency
|
||||
#define INT_SYSCALL 0x80
|
||||
#define KERNEL_SPACE (1*1024*1024*1024)
|
||||
#define VIDEO_MEM_ADDR 0xB8000 // the video memora address
|
||||
#define VIDEO_MEM_ADDR 0xB8000 // the video memory address
|
||||
#define SMP_SETUP_ADDR 0x07000
|
||||
|
||||
#define BYTE_ORDER LITTLE_ENDIAN
|
||||
#define UART_PORT 0x3F8 // 0x2F8 for SCC
|
||||
#define BYTE_ORDER LITTLE_ENDIAN
|
||||
|
||||
/*
|
||||
* address space / (page_size * sizeof(uint8_t))
|
||||
|
@ -52,7 +52,7 @@ extern "C" {
|
|||
#define CONFIG_PCI
|
||||
#define CONFIG_LWIP
|
||||
#define CONFIG_VGA
|
||||
//#define CONFIG_UART
|
||||
#define CONFIG_UART
|
||||
#define CONFIG_KEYBOARD
|
||||
#define CONFIG_MULTIBOOT
|
||||
//#define CONFIG_ROCKCREEK
|
||||
|
@ -72,7 +72,7 @@ extern "C" {
|
|||
//#define SHMADD
|
||||
#define SHMDBG
|
||||
//#define SHMADD_CACHEABLE
|
||||
#define SCC_BOOTINFO 0x80000
|
||||
#define SCC_BOOTINFO 0x80000
|
||||
|
||||
#define BUILTIN_EXPECT(exp, b) __builtin_expect((exp), (b))
|
||||
//#define BUILTIN_EXPECT(exp, b) (exp)
|
||||
|
|
72
include/metalsvm/malloc.h
Normal file
72
include/metalsvm/malloc.h
Normal file
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Copyright 2010 Steffen Vogel, Chair for Operating Systems,
|
||||
* RWTH Aachen University
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* This file is part of MetalSVM.
|
||||
*/
|
||||
|
||||
#ifndef __MALLOC_H__
|
||||
#define __MALLOC_H__
|
||||
|
||||
#include <metalsvm/stddef.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/// Binary exponent of maximal size for kmalloc()
|
||||
#define BUDDY_MAX 32 // 4 GB
|
||||
/// Binary exponent of minimal buddy size
|
||||
#define BUDDY_MIN 4 // 16 Byte >= sizeof(buddy_prefix_t)
|
||||
/// Binary exponent of the size which we allocate at least in one call to buddy_fill();
|
||||
#define BUDDY_ALLOC 17 // 128 KByte >= PAGE_SHIFT, TODO: add Huge Page support?
|
||||
|
||||
#define BUDDY_LISTS (BUDDY_MAX-BUDDY_MIN+1)
|
||||
#define BUDDY_MAGIC 0xBABE
|
||||
|
||||
union buddy;
|
||||
|
||||
/** @brief Buddy
|
||||
*
|
||||
* Every free memory block is stored in a linked list according to its size.
|
||||
* We can use this free memory to store store this buddy_t union which represents
|
||||
* this block (the buddy_t union is alligned to the front).
|
||||
* Therefore the address of the buddy_t union is equal with the address
|
||||
* of the underlying free memory block.
|
||||
*
|
||||
* Every allocated memory block is prefixed with its binary size exponent and
|
||||
* a known magic number. This prefix is hidden by the user because its located
|
||||
* before the actual memory address returned by kmalloc()
|
||||
*/
|
||||
typedef union buddy {
|
||||
/// Pointer to the next buddy in the linked list.
|
||||
union buddy* next;
|
||||
struct {
|
||||
/// The binary exponent of the block size
|
||||
uint8_t exponent;
|
||||
/// Must be equal to BUDDY_MAGIC for a valid memory block
|
||||
uint16_t magic;
|
||||
} prefix;
|
||||
} buddy_t;
|
||||
|
||||
/** @brief Dump free buddies */
|
||||
void buddy_dump(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
@ -31,7 +31,6 @@
|
|||
|
||||
#include <metalsvm/stddef.h>
|
||||
#include <asm/atomic.h>
|
||||
//#include <asm/mmu.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -50,33 +49,39 @@ extern atomic_int32_t total_available_pages;
|
|||
*/
|
||||
int mmu_init(void);
|
||||
|
||||
/** @brief get continuous pages
|
||||
/** @brief Get continuous pages
|
||||
*
|
||||
* This function finds a continuous page region (first fit algorithm)
|
||||
*
|
||||
* @param no_pages Desired number of pages
|
||||
* Use first fit algorithm to find a suitable, continous physical memory region
|
||||
*
|
||||
* @param npages Desired number of pages
|
||||
* @return
|
||||
* - physical address on success
|
||||
* - 0 on failure
|
||||
*/
|
||||
size_t get_pages(uint32_t no_pages);
|
||||
size_t get_pages(uint32_t npages);
|
||||
|
||||
/** @brief get a single page
|
||||
/** @brief Get a single page
|
||||
*
|
||||
* Convenience function: uses get_pages(1);
|
||||
*/
|
||||
static inline size_t get_page(void) { return get_pages(1); }
|
||||
|
||||
/** @brief Put back a page after use
|
||||
/** @brief Put back a sequence of continous pages
|
||||
*
|
||||
* @param phyaddr Physical address to put back
|
||||
* @param phyaddr Physical address of the first page
|
||||
* @param npages Number of pages
|
||||
*
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
int put_page(size_t phyaddr);
|
||||
int put_pages(size_t phyaddr, size_t npages);
|
||||
|
||||
/** @brief Put a single page
|
||||
*
|
||||
* Convenience function: uses put_pages(1);
|
||||
*/
|
||||
static inline int put_page(size_t phyaddr) { return put_pages(phyaddr, 1); }
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -29,10 +29,7 @@
|
|||
#include <metalsvm/stddef.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
/**
|
||||
* Sets up the environment, page directories etc and
|
||||
* enables paging.
|
||||
*/
|
||||
/** @brief Sets up the environment, page directories etc and enables paging. */
|
||||
static inline int paging_init(void) { return arch_paging_init(); }
|
||||
|
||||
#endif
|
||||
|
|
|
@ -28,14 +28,10 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define NULL ((void*) 0)
|
||||
#define NULL ((void*) 0)
|
||||
|
||||
typedef unsigned int tid_t;
|
||||
|
||||
#define PAGE_SIZE (1 << PAGE_SHIFT)
|
||||
#define PAGE_MASK ~(PAGE_SIZE - 1)
|
||||
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
|
||||
|
||||
#if MAX_CORES == 1
|
||||
#define per_core(name) name
|
||||
#define DECLARE_PER_CORE(type, name) extern type name;
|
||||
|
@ -66,10 +62,10 @@ typedef unsigned int tid_t;
|
|||
irq_nested_enable(flags);\
|
||||
return ret; \
|
||||
}
|
||||
#define CORE_ID smp_id()
|
||||
#define CORE_ID smp_id()
|
||||
#endif
|
||||
|
||||
/* needed to find the task, which is currently running on this core */
|
||||
// needed to find the task, which is currently running on this core
|
||||
struct task;
|
||||
DECLARE_PER_CORE(struct task*, current_task);
|
||||
|
||||
|
|
|
@ -29,9 +29,7 @@
|
|||
#ifndef __STDLIB_H__
|
||||
#define __STDLIB_H__
|
||||
|
||||
#include <metalsvm/config.h>
|
||||
#include <metalsvm/tasks_types.h>
|
||||
#include <asm/stddef.h>
|
||||
#include <metalsvm/stddef.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -55,46 +53,42 @@ extern "C" {
|
|||
|
||||
void NORETURN abort(void);
|
||||
|
||||
/** @brief Kernel's memory allocator function.
|
||||
/** @brief General page allocator function
|
||||
*
|
||||
* This will just call mem_allocation with
|
||||
* the flags MAP_KERNEL_SPACE and MAP_HEAP.
|
||||
*
|
||||
* @return Pointer to the new memory range
|
||||
*/
|
||||
void* kmalloc(size_t);
|
||||
|
||||
/** @brief Kernel's more general memory allocator function.
|
||||
*
|
||||
* This function lets you choose flags for the newly allocated memory.
|
||||
* This function allocates and maps whole pages.
|
||||
* To avoid fragmentation you should use kmalloc() and kfree()!
|
||||
*
|
||||
* @param sz Desired size of the new memory
|
||||
* @param flags Flags to specify
|
||||
* @param flags Flags to for map_region(), vma_add()
|
||||
*
|
||||
* @return Pointer to the new memory range
|
||||
*/
|
||||
void* mem_allocation(size_t sz, uint32_t flags);
|
||||
void* palloc(size_t sz, uint32_t flags);
|
||||
|
||||
/** @brief Free memory
|
||||
/** @brief Free general kernel memory
|
||||
*
|
||||
* The kernel malloc doesn't track how
|
||||
* much memory was allocated for which pointer,
|
||||
* The pmalloc() doesn't track how much memory was allocated for which pointer,
|
||||
* so you have to specify how much memory shall be freed.
|
||||
*/
|
||||
void kfree(void*, size_t);
|
||||
|
||||
/** @brief Create a new stack for a new task
|
||||
*
|
||||
* @return start address of the new stack
|
||||
* @param sz The size which should freed
|
||||
*/
|
||||
void* create_stack(void);
|
||||
void pfree(void* addr, size_t sz);
|
||||
|
||||
/** @brief Delete stack of a finished task
|
||||
/** @brief The memory allocator function
|
||||
*
|
||||
* @param addr Pointer to the stack
|
||||
* @return 0 on success
|
||||
* This allocator uses a buddy system to manage free memory.
|
||||
*
|
||||
* @return Pointer to the new memory range
|
||||
*/
|
||||
int destroy_stack(task_t* addr);
|
||||
void* kmalloc(size_t sz);
|
||||
|
||||
/** @brief The memory free function
|
||||
*
|
||||
* Releases memory allocated by malloc()
|
||||
*
|
||||
* @param addr The address to the memory block allocated by malloc()
|
||||
*/
|
||||
void kfree(void* addr);
|
||||
|
||||
/** @brief String to long
|
||||
*
|
||||
|
@ -113,7 +107,7 @@ unsigned long strtoul(const char* nptr, char** endptr, int base);
|
|||
*/
|
||||
static inline int atoi(const char *str)
|
||||
{
|
||||
return (int)strtol(str, (char **)NULL, 10);
|
||||
return (int)strtol(str, (char **) NULL, 10);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -147,9 +147,7 @@ tid_t wait(int32_t* result);
|
|||
*/
|
||||
void update_load(void);
|
||||
|
||||
/** @brief Print the current cpu load
|
||||
*
|
||||
*/
|
||||
/** @brief Print the current cpu load */
|
||||
void dump_load(void);
|
||||
|
||||
#if MAX_CORES > 1
|
||||
|
@ -201,9 +199,7 @@ int block_current_task(void);
|
|||
*/
|
||||
int set_timer(uint64_t deadline);
|
||||
|
||||
/** @brief check is a timer is expired
|
||||
*
|
||||
*/
|
||||
/** @brief check is a timer is expired */
|
||||
void check_timers(void);
|
||||
|
||||
/** @brief Abort current task */
|
||||
|
|
|
@ -62,7 +62,7 @@ extern "C" {
|
|||
#define TASK_L2 (1 << 3)
|
||||
|
||||
typedef int (*entry_point_t)(void*);
|
||||
struct page_dir;
|
||||
typedef struct page_map page_map_t;
|
||||
|
||||
/** @brief The task_t structure */
|
||||
typedef struct task {
|
||||
|
@ -88,10 +88,10 @@ typedef struct task {
|
|||
uint32_t last_core;
|
||||
/// usage in number of pages
|
||||
atomic_int32_t user_usage;
|
||||
/// avoids concurrent access to the page directory
|
||||
spinlock_irqsave_t pgd_lock;
|
||||
/// pointer to the page directory
|
||||
struct page_dir* pgd;
|
||||
/// avoids concurrent access to the page map structures
|
||||
spinlock_irqsave_t page_lock;
|
||||
/// pointer to page directory (32bit) or page map level 4 (64bit) table respectively
|
||||
page_map_t* page_map;
|
||||
/// lock for the VMA_list
|
||||
spinlock_t vma_lock;
|
||||
/// list of VMAs
|
||||
|
|
|
@ -27,56 +27,102 @@
|
|||
#define __VMA_H__
|
||||
|
||||
#include <metalsvm/stddef.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/// Read access to this VMA is allowed
|
||||
#define VMA_READ (1 << 0)
|
||||
/// Write access to this VMA is allowed
|
||||
#define VMA_WRITE (1 << 1)
|
||||
/// Instructions fetches in this VMA are allowed
|
||||
#define VMA_EXECUTE (1 << 2)
|
||||
/// This VMA is cacheable
|
||||
#define VMA_CACHEABLE (1 << 3)
|
||||
#define VMA_NOACCESS (1 << 4)
|
||||
/// This VMA is not accessable
|
||||
#define VMA_NO_ACCESS (1 << 4)
|
||||
/// This VMA should be part of the userspace
|
||||
#define VMA_USER (1 << 5)
|
||||
/// A collection of flags used for the kernel heap (kmalloc)
|
||||
#define VMA_HEAP (VMA_READ|VMA_WRITE|VMA_CACHEABLE)
|
||||
|
||||
// boundaries for VAS allocation
|
||||
extern const void kernel_end;
|
||||
//#define VMA_KERN_MIN (((size_t) &kernel_end + PAGE_SIZE) & PAGE_MASK)
|
||||
#define VMA_KERN_MAX KERNEL_SPACE
|
||||
#define VMA_USER_MAX (1UL << 47) // TODO
|
||||
|
||||
struct vma;
|
||||
|
||||
/** @brief VMA structure definition */
|
||||
/** @brief VMA structure definition
|
||||
*
|
||||
* Each item in this linked list marks a used part of the virtual address space.
|
||||
* Its used by vm_alloc() to find holes between them.
|
||||
*/
|
||||
typedef struct vma {
|
||||
/// Start address of the memory area
|
||||
size_t start;
|
||||
/// End address of the memory area
|
||||
size_t end;
|
||||
/// Type flags field
|
||||
uint32_t type;
|
||||
uint32_t flags;
|
||||
/// Pointer of next VMA element in the list
|
||||
struct vma* next;
|
||||
/// Pointer to previous VMA element in the list
|
||||
struct vma* prev;
|
||||
} vma_t;
|
||||
|
||||
/** @brief Add a new virtual memory region to the list of VMAs
|
||||
/** @brief Add a new virtual memory area to the list of VMAs
|
||||
*
|
||||
* @param task Pointer to the task_t structure of the task
|
||||
* @param start Start address of the new region
|
||||
* @param end End address of the new region
|
||||
* @param type Type flags the new region shall have
|
||||
* @param start Start address of the new area
|
||||
* @param end End address of the new area
|
||||
* @param flags Type flags the new area shall have
|
||||
*
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) or -EINVAL (-12) on failure
|
||||
*/
|
||||
int vma_add(struct task* task, size_t start, size_t end, uint32_t type);
|
||||
int vma_add(size_t start, size_t end, uint32_t flags);
|
||||
|
||||
/** @brief Dump information about this task's VMAs into the terminal.
|
||||
/** @brief Search for a free memory area
|
||||
*
|
||||
* This will print out Start, end and flags for each VMA in the task's list
|
||||
* @param size Size of requestes VMA in bytes
|
||||
* @param flags
|
||||
* @return Type flags the new area shall have
|
||||
* - 0 on failure
|
||||
* - the start address of a free area
|
||||
*/
|
||||
size_t vma_alloc(size_t size, uint32_t flags);
|
||||
|
||||
/** @brief Free an allocated memory area
|
||||
*
|
||||
* @param task The task's task_t structure
|
||||
* @param start Start address of the area to be freed
|
||||
* @param end End address of the to be freed
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
int vma_dump(struct task* task);
|
||||
int vma_free(size_t start, size_t end);
|
||||
|
||||
/** @brief Free all virtual memory areas
|
||||
*
|
||||
* @return
|
||||
* - 0 on success
|
||||
*/
|
||||
int drop_vma_list();
|
||||
|
||||
/** @brief Copy the VMA list of the current task to task
|
||||
*
|
||||
* @param task The task where the list should be copied to
|
||||
* @return
|
||||
* - 0 on success
|
||||
*/
|
||||
int copy_vma_list(struct task* task);
|
||||
|
||||
/** @brief Dump information about this task's VMAs into the terminal. */
|
||||
void vma_dump();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ extern const void bss_end;
|
|||
int lowlevel_init(void)
|
||||
{
|
||||
// initialize .bss section
|
||||
memset((void*)&bss_start, 0x00, ((size_t) &bss_end - (size_t) &bss_start));
|
||||
memset(&bss_start, 0x00, (char*) &bss_end - (char*) &bss_start);
|
||||
|
||||
koutput_init();
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <metalsvm/fs.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/kb.h>
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
#include <asm/icc.h>
|
||||
|
@ -73,6 +74,7 @@ int main(void)
|
|||
kprintf("This is MetalSVM %s Build %u, %u\n",
|
||||
METALSVM_VERSION, &__BUILD_DATE, &__BUILD_TIME);
|
||||
popbg();
|
||||
|
||||
system_init();
|
||||
irq_init();
|
||||
timer_init();
|
||||
|
@ -85,7 +87,7 @@ int main(void)
|
|||
icc_init();
|
||||
svm_init();
|
||||
#endif
|
||||
initrd_init();
|
||||
initrd_init();
|
||||
|
||||
irq_enable();
|
||||
|
||||
|
@ -101,7 +103,7 @@ int main(void)
|
|||
disable_timer_irq();
|
||||
#endif
|
||||
|
||||
sleep(5);
|
||||
sleep(2);
|
||||
create_kernel_task(&id, initd, NULL, NORMAL_PRIO);
|
||||
kprintf("Create initd with id %u\n", id);
|
||||
reschedule();
|
||||
|
|
|
@ -105,11 +105,11 @@ static int sys_open(const char* name, int flags, int mode)
|
|||
/* file doesn't exist! */
|
||||
if (check < 0) {
|
||||
/* tidy up the fildescriptor */
|
||||
kfree(curr_task->fildes_table[fd], sizeof(fildes_t));
|
||||
kfree(curr_task->fildes_table[fd]);
|
||||
curr_task->fildes_table[fd] = NULL;
|
||||
return check;
|
||||
}
|
||||
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
|
@ -196,7 +196,7 @@ static int sys_socket(int domain, int type, int protocol)
|
|||
/* file doesn't exist! */
|
||||
if (curr_task->fildes_table[fd]->node == NULL) {
|
||||
/* tidy up the fildescriptor */
|
||||
kfree(curr_task->fildes_table[fd], sizeof(fildes_t));
|
||||
kfree(curr_task->fildes_table[fd]);
|
||||
curr_task->fildes_table[fd] = NULL;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
@ -240,7 +240,7 @@ static int sys_accept(int s, struct sockaddr* addr, socklen_t* addrlen)
|
|||
/* file doesn't exist! */
|
||||
if (curr_task->fildes_table[fd]->node == NULL) {
|
||||
/* tidy up the fildescriptor */
|
||||
kfree(curr_task->fildes_table[fd], sizeof(fildes_t));
|
||||
kfree(curr_task->fildes_table[fd]);
|
||||
curr_task->fildes_table[fd] = NULL;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ static int sys_close(int fd)
|
|||
/* close command failed -> return check = errno */
|
||||
if (BUILTIN_EXPECT(check < 0, 0))
|
||||
return check;
|
||||
kfree(curr_task->fildes_table[fd], sizeof(fildes_t));
|
||||
kfree(curr_task->fildes_table[fd]);
|
||||
curr_task->fildes_table[fd] = NULL;
|
||||
} else {
|
||||
curr_task->fildes_table[fd]->count--;
|
||||
|
@ -356,7 +356,7 @@ static int sys_dup(int fd)
|
|||
* free the memory which was allocated in get_fildes()
|
||||
* cause will link it to another existing memory space
|
||||
*/
|
||||
kfree(curr_task->fildes_table[new_fd], sizeof(fildes_t));
|
||||
kfree(curr_task->fildes_table[new_fd]);
|
||||
|
||||
/* and link it to another existing memory space */
|
||||
curr_task->fildes_table[new_fd] = curr_task->fildes_table[fd];
|
||||
|
@ -403,6 +403,7 @@ static int sys_sbrk(int incr)
|
|||
|
||||
spinlock_lock(&task->vma_lock);
|
||||
|
||||
// search vma containing the heap
|
||||
tmp = task->vma_list;
|
||||
while(tmp && !((task->end_heap >= tmp->start) && (task->end_heap <= tmp->end)))
|
||||
tmp = tmp->next;
|
||||
|
@ -411,11 +412,16 @@ static int sys_sbrk(int incr)
|
|||
task->end_heap += incr;
|
||||
if (task->end_heap < task->start_heap)
|
||||
task->end_heap = task->start_heap;
|
||||
|
||||
|
||||
// resize virtual memory area
|
||||
if (tmp && (tmp->end <= task->end_heap))
|
||||
tmp->end = task->end_heap;
|
||||
|
||||
// allocation and mapping of new pages for the heap
|
||||
// is catched by the pagefault handler
|
||||
|
||||
//kprintf("sys_sbrk: tid=%d, start_heap=%8x, end_heap=%8x, incr=%4x\n", task->id, task->start_heap, task->end_heap, incr);
|
||||
|
||||
spinlock_unlock(&task->vma_lock);
|
||||
|
||||
return ret;
|
||||
|
|
148
kernel/tasks.c
148
kernel/tasks.c
|
@ -78,6 +78,7 @@ DEFINE_PER_CORE(task_t*, current_task, task_table+0);
|
|||
extern const void boot_stack;
|
||||
|
||||
/** @brief helper function for the assembly code to determine the current task
|
||||
*
|
||||
* @return Pointer to the task_t structure of current task
|
||||
*/
|
||||
task_t* get_current_task(void) {
|
||||
|
@ -96,6 +97,32 @@ uint32_t get_highest_priority(void)
|
|||
return msb(runqueues[CORE_ID].prio_bitmap);
|
||||
}
|
||||
|
||||
/** @brief Create a new stack for a new task
|
||||
*
|
||||
* @return start address of the new stack
|
||||
*/
|
||||
static void* create_stack(void)
|
||||
{
|
||||
return palloc(KERNEL_STACK_SIZE, MAP_KERNEL_SPACE);
|
||||
}
|
||||
|
||||
/** @brief Delete stack of a finished task
|
||||
*
|
||||
* @param addr Pointer to the stack
|
||||
* @return
|
||||
* - 0 on success
|
||||
* - -EINVAL on failure
|
||||
*/
|
||||
static int destroy_stack(task_t* task)
|
||||
{
|
||||
if (BUILTIN_EXPECT(!task || !task->stack, 0))
|
||||
return -EINVAL;
|
||||
|
||||
pfree(task->stack, KERNEL_STACK_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int multitasking_init(void) {
|
||||
if (BUILTIN_EXPECT(task_table[0].status != TASK_IDLE, 0)) {
|
||||
kputs("Task 0 is not an idle task\n");
|
||||
|
@ -104,7 +131,7 @@ int multitasking_init(void) {
|
|||
|
||||
mailbox_wait_msg_init(&task_table[0].inbox);
|
||||
memset(task_table[0].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
task_table[0].pgd = get_boot_pgd();
|
||||
task_table[0].page_map = get_boot_page_map();
|
||||
task_table[0].flags = TASK_DEFAULT_FLAGS;
|
||||
task_table[0].prio = IDLE_PRIO;
|
||||
task_table[0].stack = (void*) &boot_stack;
|
||||
|
@ -128,7 +155,7 @@ size_t get_idle_task(uint32_t id)
|
|||
atomic_int32_set(&task_table[id].user_usage, 0);
|
||||
mailbox_wait_msg_init(&task_table[id].inbox);
|
||||
memset(task_table[id].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
task_table[id].pgd = get_boot_pgd();
|
||||
task_table[id].page_map = get_boot_page_map();
|
||||
current_task[id].var = task_table+id;
|
||||
runqueues[id].idle = task_table+id;
|
||||
|
||||
|
@ -193,10 +220,8 @@ static void wakeup_blocked_tasks(int result)
|
|||
spinlock_irqsave_unlock(&table_lock);
|
||||
}
|
||||
|
||||
/** @brief A procedure to be called by
|
||||
* procedures which are called by exiting tasks. */
|
||||
/** @brief A procedure to be called by procedures which are called by exiting tasks. */
|
||||
static void NORETURN do_exit(int arg) {
|
||||
vma_t* tmp;
|
||||
task_t* curr_task = per_core(current_task);
|
||||
uint32_t flags, core_id, fd, status;
|
||||
|
||||
|
@ -204,17 +229,17 @@ static void NORETURN do_exit(int arg) {
|
|||
for (fd = 0; fd < NR_OPEN; fd++) {
|
||||
if(curr_task->fildes_table[fd] != NULL) {
|
||||
/*
|
||||
* delete a descriptor from the per-process object
|
||||
* reference table. If this is not the last reference to the underlying
|
||||
* object, the object will be ignored.
|
||||
*/
|
||||
* Delete a descriptor from the per-process object
|
||||
* reference table. If this is not the last reference to the underlying
|
||||
* object, the object will be ignored.
|
||||
*/
|
||||
if (curr_task->fildes_table[fd]->count == 1) {
|
||||
/* try to close the file */
|
||||
// try to close the file
|
||||
status = close_fs(curr_task->fildes_table[fd]);
|
||||
/* close command failed -> return check = errno */
|
||||
// close command failed -> return check = errno
|
||||
if (BUILTIN_EXPECT(status < 0, 0))
|
||||
kprintf("Task %u was not able to close file descriptor %i. close_fs returned %d", curr_task->id, fd, -status);
|
||||
kfree(curr_task->fildes_table[fd], sizeof(fildes_t));
|
||||
kfree(curr_task->fildes_table[fd]);
|
||||
curr_task->fildes_table[fd] = NULL;
|
||||
} else {
|
||||
curr_task->fildes_table[fd]->count--;
|
||||
|
@ -223,31 +248,20 @@ static void NORETURN do_exit(int arg) {
|
|||
}
|
||||
}
|
||||
//finally the table has to be cleared.
|
||||
kfree(curr_task->fildes_table, sizeof(filp_t)*NR_OPEN);
|
||||
kfree(curr_task->fildes_table);
|
||||
}
|
||||
|
||||
kprintf("Terminate task: %u, return value %d\n", curr_task->id, arg);
|
||||
|
||||
wakeup_blocked_tasks(arg);
|
||||
|
||||
//vma_dump(curr_task);
|
||||
spinlock_lock(&curr_task->vma_lock);
|
||||
|
||||
// remove memory regions
|
||||
while((tmp = curr_task->vma_list) != NULL) {
|
||||
kfree((void*) tmp->start, tmp->end - tmp->start + 1);
|
||||
curr_task->vma_list = tmp->next;
|
||||
kfree((void*) tmp, sizeof(vma_t));
|
||||
}
|
||||
|
||||
spinlock_unlock(&curr_task->vma_lock);
|
||||
|
||||
drop_pgd(); // delete page directory and its page tables
|
||||
drop_vma_list(); // kfree virtual memory areas and the vma_list
|
||||
drop_page_map(); // delete page directory and its page tables
|
||||
|
||||
#if 0
|
||||
if (atomic_int32_read(&curr_task->user_usage))
|
||||
kprintf("Memory leak! Task %d did not release %d pages\n",
|
||||
curr_task->id, atomic_int32_read(&curr_task->user_usage));
|
||||
curr_task->id, atomic_int32_read(&curr_task->user_usage));
|
||||
#endif
|
||||
curr_task->status = TASK_FINISHED;
|
||||
|
||||
|
@ -262,9 +276,7 @@ static void NORETURN do_exit(int arg) {
|
|||
reschedule();
|
||||
|
||||
kprintf("Kernel panic: scheduler on core %d found no valid task\n", CORE_ID);
|
||||
while(1) {
|
||||
HALT;
|
||||
}
|
||||
while(1) HALT;
|
||||
}
|
||||
|
||||
/** @brief A procedure to be called by kernel tasks */
|
||||
|
@ -327,10 +339,10 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uin
|
|||
if (task_table[i].status == TASK_INVALID) {
|
||||
atomic_int32_set(&task_table[i].user_usage, 0);
|
||||
|
||||
ret = create_pgd(task_table+i, 0);
|
||||
ret = create_page_map(task_table+i, 0);
|
||||
if (ret < 0) {
|
||||
ret = -ENOMEM;
|
||||
goto create_task_out;
|
||||
goto out;
|
||||
}
|
||||
|
||||
task_table[i].id = i;
|
||||
|
@ -376,7 +388,7 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uin
|
|||
}
|
||||
}
|
||||
|
||||
create_task_out:
|
||||
out:
|
||||
spinlock_irqsave_unlock(&table_lock);
|
||||
|
||||
return ret;
|
||||
|
@ -387,11 +399,7 @@ int sys_fork(void)
|
|||
int ret = -ENOMEM;
|
||||
unsigned int i, core_id, fd_i;
|
||||
task_t* parent_task = per_core(current_task);
|
||||
vma_t** child;
|
||||
vma_t* parent;
|
||||
vma_t* tmp;
|
||||
|
||||
spinlock_lock(&parent_task->vma_lock);
|
||||
spinlock_irqsave_lock(&table_lock);
|
||||
|
||||
core_id = CORE_ID;
|
||||
|
@ -400,46 +408,29 @@ int sys_fork(void)
|
|||
if (task_table[i].status == TASK_INVALID) {
|
||||
atomic_int32_set(&task_table[i].user_usage, 0);
|
||||
|
||||
ret = create_pgd(task_table+i, 1);
|
||||
ret = create_page_map(task_table+i, 1);
|
||||
if (ret < 0) {
|
||||
ret = -ENOMEM;
|
||||
goto create_task_out;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = copy_vma_list(child_task);
|
||||
if (BUILTIN_EXPECT(!ret, 0)) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
task_table[i].id = i;
|
||||
task_table[i].last_stack_pointer = NULL;
|
||||
task_table[i].stack = create_stack();
|
||||
|
||||
spinlock_init(&task_table[i].vma_lock);
|
||||
|
||||
// copy VMA list
|
||||
child = &task_table[i].vma_list;
|
||||
parent = parent_task->vma_list;
|
||||
tmp = NULL;
|
||||
|
||||
while(parent) {
|
||||
*child = (vma_t*) kmalloc(sizeof(vma_t));
|
||||
if (BUILTIN_EXPECT(!child, 0))
|
||||
break;
|
||||
|
||||
(*child)->start = parent->start;
|
||||
(*child)->end = parent->end;
|
||||
(*child)->type = parent->type;
|
||||
(*child)->prev = tmp;
|
||||
(*child)->next = NULL;
|
||||
|
||||
parent = parent->next;
|
||||
tmp = *child;
|
||||
child = &((*child)->next);
|
||||
}
|
||||
|
||||
|
||||
/* init fildes_table */
|
||||
// init fildes_table
|
||||
task_table[i].fildes_table = kmalloc(sizeof(filp_t)*NR_OPEN);
|
||||
memcpy(task_table[i].fildes_table, parent_task->fildes_table, sizeof(filp_t)*NR_OPEN);
|
||||
for (fd_i = 0; fd_i < NR_OPEN; fd_i++)
|
||||
for (fd_i = 0; fd_i < NR_OPEN; fd_i++) {
|
||||
if ((task_table[i].fildes_table[fd_i]) != NULL)
|
||||
task_table[i].fildes_table[fd_i]->count++;
|
||||
}
|
||||
|
||||
mailbox_wait_msg_init(&task_table[i].inbox);
|
||||
memset(task_table[i].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
|
@ -487,9 +478,8 @@ int sys_fork(void)
|
|||
}
|
||||
}
|
||||
|
||||
create_task_out:
|
||||
out:
|
||||
spinlock_irqsave_unlock(&table_lock);
|
||||
spinlock_unlock(&parent_task->vma_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -515,7 +505,7 @@ static int kernel_entry(void* args)
|
|||
|
||||
ret = kernel_args->func(kernel_args->args);
|
||||
|
||||
kfree(kernel_args, sizeof(kernel_args_t));
|
||||
kfree(kernel_args);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -679,7 +669,7 @@ static int load_task(load_args_t* largs)
|
|||
flags |= VMA_WRITE;
|
||||
if (prog_header.flags & PF_X)
|
||||
flags |= VMA_EXECUTE;
|
||||
vma_add(curr_task, prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
|
||||
vma_add(prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
|
||||
|
||||
if (!(prog_header.flags & PF_W))
|
||||
change_page_permissions(prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
|
||||
|
@ -708,7 +698,7 @@ static int load_task(load_args_t* largs)
|
|||
flags |= VMA_WRITE;
|
||||
if (prog_header.flags & PF_X)
|
||||
flags |= VMA_EXECUTE;
|
||||
vma_add(curr_task, stack, stack+npages*PAGE_SIZE-1, flags);
|
||||
vma_add(stack, stack+npages*PAGE_SIZE-1, flags);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -774,7 +764,7 @@ static int load_task(load_args_t* largs)
|
|||
offset -= sizeof(int);
|
||||
*((int*) (stack+offset)) = largs->argc;
|
||||
|
||||
kfree(largs, sizeof(load_args_t));
|
||||
kfree(largs);
|
||||
|
||||
// clear fpu state
|
||||
curr_task->flags &= ~(TASK_FPU_USED|TASK_FPU_INIT);
|
||||
|
@ -806,7 +796,7 @@ static int user_entry(void* arg)
|
|||
|
||||
ret = load_task((load_args_t*) arg);
|
||||
|
||||
kfree(arg, sizeof(load_args_t));
|
||||
kfree(arg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -871,13 +861,11 @@ int create_user_task_on_core(tid_t* id, const char* fname, char** argv, uint32_t
|
|||
int sys_execve(const char* fname, char** argv, char** env)
|
||||
{
|
||||
vfs_node_t* node;
|
||||
vma_t* tmp;
|
||||
size_t i, buffer_size = 0;
|
||||
load_args_t* load_args = NULL;
|
||||
char *dest, *src;
|
||||
int ret, argc = 0;
|
||||
int envc = 0;
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
node = findnode_fs((char*) fname);
|
||||
if (!node || !(node->type == FS_FILE))
|
||||
|
@ -920,16 +908,8 @@ int sys_execve(const char* fname, char** argv, char** env)
|
|||
while ((*dest++ = *src++) != 0);
|
||||
}
|
||||
|
||||
spinlock_lock(&curr_task->vma_lock);
|
||||
|
||||
// remove old program
|
||||
while((tmp = curr_task->vma_list) != NULL) {
|
||||
kfree((void*) tmp->start, tmp->end - tmp->start + 1);
|
||||
curr_task->vma_list = tmp->next;
|
||||
kfree((void*) tmp, sizeof(vma_t));
|
||||
}
|
||||
|
||||
spinlock_unlock(&curr_task->vma_lock);
|
||||
drop_vma_list();
|
||||
|
||||
/*
|
||||
* we use a trap gate to enter the kernel
|
||||
|
@ -940,7 +920,7 @@ int sys_execve(const char* fname, char** argv, char** env)
|
|||
|
||||
ret = load_task(load_args);
|
||||
|
||||
kfree(load_args, sizeof(load_args_t));
|
||||
kfree(load_args);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -34,13 +34,7 @@
|
|||
#define VGA_EARLY_PRINT 1
|
||||
#define UART_EARLY_PRINT 2
|
||||
|
||||
#ifdef CONFIG_VGA
|
||||
static uint32_t early_print = VGA_EARLY_PRINT;
|
||||
#elif defined(CONFIG_UART)
|
||||
static uint32_t early_print = UART_EARLY_PRINT;
|
||||
#else
|
||||
static uint32_t early_print = NO_EARLY_PRINT;
|
||||
#endif
|
||||
static spinlock_irqsave_t olock = SPINLOCK_IRQSAVE_INIT;
|
||||
static atomic_int32_t kmsg_counter = ATOMIC_INIT(0);
|
||||
static unsigned char kmessages[KMSG_SIZE] __attribute__ ((section(".kmsg"))) = {[0 ... KMSG_SIZE-1] = 0x00};
|
||||
|
@ -136,7 +130,7 @@ int kmsg_init(vfs_node_t * node, const char *name)
|
|||
}
|
||||
} while (blist);
|
||||
|
||||
kfree(new_node, sizeof(vfs_node_t));
|
||||
kfree(new_node);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -145,6 +139,10 @@ int koutput_init(void)
|
|||
{
|
||||
#ifdef CONFIG_VGA
|
||||
vga_init();
|
||||
early_print |= VGA_EARLY_PRINT;
|
||||
#endif
|
||||
#ifdef CONFIG_UART
|
||||
early_print |= UART_EARLY_PRINT;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
@ -161,11 +159,11 @@ int kputchar(int c)
|
|||
kmessages[pos % KMSG_SIZE] = (unsigned char) c;
|
||||
|
||||
#ifdef CONFIG_VGA
|
||||
if (early_print == VGA_EARLY_PRINT)
|
||||
if (early_print & VGA_EARLY_PRINT)
|
||||
vga_putchar(c);
|
||||
#endif
|
||||
#ifdef CONFIG_UART
|
||||
if (early_print == UART_EARLY_PRINT)
|
||||
if (early_print & UART_EARLY_PRINT)
|
||||
uart_putchar(c);
|
||||
#endif
|
||||
|
||||
|
@ -186,11 +184,11 @@ int kputs(const char *str)
|
|||
pos = atomic_int32_inc(&kmsg_counter);
|
||||
kmessages[pos % KMSG_SIZE] = str[i];
|
||||
#ifdef CONFIG_VGA
|
||||
if (early_print == VGA_EARLY_PRINT)
|
||||
if (early_print & VGA_EARLY_PRINT)
|
||||
vga_putchar(str[i]);
|
||||
#endif
|
||||
#ifdef CONFIG_UART
|
||||
if (early_print == UART_EARLY_PRINT)
|
||||
if (early_print & UART_EARLY_PRINT)
|
||||
uart_putchar(str[i]);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
C_source := memory.c vma.c
|
||||
C_source := memory.c vma.c malloc.c
|
||||
MODULE := mm
|
||||
|
||||
include $(TOPDIR)/Makefile.inc
|
||||
|
|
203
mm/malloc.c
Normal file
203
mm/malloc.c
Normal file
|
@ -0,0 +1,203 @@
|
|||
/*
|
||||
* Copyright 2010 Steffen Vogel, Chair for Operating Systems,
|
||||
* RWTH Aachen University
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* This file is part of MetalSVM.
|
||||
*/
|
||||
|
||||
#include <metalsvm/malloc.h>
|
||||
#include <metalsvm/spinlock.h>
|
||||
#include <metalsvm/stdio.h>
|
||||
#include <metalsvm/mmu.h>
|
||||
|
||||
/// A linked list for each binary size exponent
|
||||
static buddy_t* buddy_lists[BUDDY_LISTS] = { NULL };
|
||||
/// Lock for the buddy lists
|
||||
static spinlock_t buddy_lock = SPINLOCK_INIT;
|
||||
|
||||
/** @brief Check if larger free buddies are available */
|
||||
static inline int buddy_large_avail(uint8_t exp)
|
||||
{
|
||||
while (exp<BUDDY_MAX && !buddy_lists[exp-BUDDY_MIN]) exp++;
|
||||
return exp != BUDDY_MAX;
|
||||
}
|
||||
|
||||
/** @brief Calculate the required buddy size */
|
||||
static inline int buddy_exp(size_t sz)
|
||||
{
|
||||
int exp;
|
||||
for (exp=0; (1<<exp)<sz; exp++);
|
||||
|
||||
if (exp > BUDDY_MAX)
|
||||
return 0;
|
||||
else if (exp < BUDDY_MIN)
|
||||
return BUDDY_MIN;
|
||||
else
|
||||
return exp;
|
||||
}
|
||||
|
||||
/** @brief Get a free buddy by potentially splitting a larger one */
|
||||
static buddy_t* buddy_get(int exp)
|
||||
{
|
||||
spinlock_lock(&buddy_lock);
|
||||
buddy_t** list = &buddy_lists[exp-BUDDY_MIN];
|
||||
buddy_t* buddy = *list;
|
||||
buddy_t* split;
|
||||
|
||||
if (buddy)
|
||||
// there is already a free buddy =>
|
||||
// we remove it from the list
|
||||
*list = buddy->next;
|
||||
else if (exp >= BUDDY_ALLOC && !buddy_large_avail(exp))
|
||||
// theres no free buddy larger than exp =>
|
||||
// we can allocate new memory
|
||||
buddy = (buddy_t*) palloc(1<<exp, MAP_KERNEL_SPACE);
|
||||
else {
|
||||
// we recursivly request a larger buddy...
|
||||
buddy = buddy_get(exp+1);
|
||||
if (BUILTIN_EXPECT(!buddy, 0))
|
||||
goto out;
|
||||
|
||||
// ... and split it, by putting the second half back to the list
|
||||
split = (buddy_t*) ((size_t) buddy + (1<<exp));
|
||||
split->next = *list;
|
||||
*list = split;
|
||||
}
|
||||
|
||||
out:
|
||||
spinlock_unlock(&buddy_lock);
|
||||
|
||||
return buddy;
|
||||
}
|
||||
|
||||
/** @brief Put a buddy back to its free list
|
||||
*
|
||||
* TODO: merge adjacent buddies (memory compaction)
|
||||
*/
|
||||
static void buddy_put(buddy_t* buddy)
|
||||
{
|
||||
spinlock_lock(&buddy_lock);
|
||||
buddy_t** list = &buddy_lists[buddy->prefix.exponent-BUDDY_MIN];
|
||||
buddy->next = *list;
|
||||
*list = buddy;
|
||||
spinlock_unlock(&buddy_lock);
|
||||
}
|
||||
|
||||
void buddy_dump()
|
||||
{
|
||||
size_t free = 0;
|
||||
int i;
|
||||
for (i=0; i<BUDDY_LISTS; i++) {
|
||||
buddy_t* buddy;
|
||||
int exp = i+BUDDY_MIN;
|
||||
|
||||
if (buddy_lists[i])
|
||||
kprintf("buddy_list[%u] (exp=%u, size=%lu bytes):\n", i, exp, 1<<exp);
|
||||
|
||||
for (buddy=buddy_lists[i]; buddy; buddy=buddy->next) {
|
||||
kprintf(" %p -> %p \n", buddy, buddy->next);
|
||||
free += 1<<exp;
|
||||
}
|
||||
}
|
||||
kprintf("free buddies: %lu bytes\n", free);
|
||||
}
|
||||
|
||||
void* palloc(size_t sz, uint32_t flags)
|
||||
{
|
||||
size_t phyaddr, viraddr;
|
||||
uint32_t npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
|
||||
|
||||
kprintf("palloc(%lu) (%lu pages)\n", sz, npages); // TODO: remove
|
||||
|
||||
// get free virtual address space
|
||||
viraddr = vma_alloc(npages*PAGE_SIZE, VMA_HEAP);
|
||||
if (BUILTIN_EXPECT(!viraddr, 0))
|
||||
return NULL;
|
||||
|
||||
// get continous physical pages
|
||||
phyaddr = get_pages(npages);
|
||||
if (BUILTIN_EXPECT(!phyaddr, 0)) {
|
||||
vma_free(viraddr, viraddr+npages*PAGE_SIZE);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// map physical pages to VMA
|
||||
viraddr = map_region(viraddr, phyaddr, npages, flags);
|
||||
if (BUILTIN_EXPECT(!viraddr, 0)) {
|
||||
vma_free(viraddr, viraddr+npages*PAGE_SIZE);
|
||||
put_pages(phyaddr, npages);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (void*) viraddr;
|
||||
}
|
||||
|
||||
void pfree(void* addr, size_t sz)
|
||||
{
|
||||
if (BUILTIN_EXPECT(!addr || !sz, 0))
|
||||
return;
|
||||
|
||||
size_t i;
|
||||
size_t phyaddr;
|
||||
size_t viraddr = (size_t) addr & PAGE_MASK;
|
||||
uint32_t npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
|
||||
|
||||
// memory is propably not continously mapped!
|
||||
for (i=0; i<npages; i++) {
|
||||
phyaddr = virt_to_phys(viraddr+i*PAGE_SHIFT);
|
||||
put_page(phyaddr);
|
||||
}
|
||||
|
||||
unmap_region(viraddr, npages);
|
||||
vma_free(viraddr, viraddr+npages*PAGE_SIZE);
|
||||
}
|
||||
|
||||
void* kmalloc(size_t sz)
|
||||
{
|
||||
if (BUILTIN_EXPECT(!sz, 0))
|
||||
return NULL;
|
||||
|
||||
// add space for the prefix
|
||||
sz += sizeof(buddy_t);
|
||||
|
||||
int exp = buddy_exp(sz);
|
||||
if (BUILTIN_EXPECT(!exp, 0))
|
||||
return NULL;
|
||||
|
||||
buddy_t* buddy = buddy_get(exp);
|
||||
if (BUILTIN_EXPECT(!buddy, 0))
|
||||
return NULL;
|
||||
|
||||
// setup buddy prefix
|
||||
buddy->prefix.magic = BUDDY_MAGIC;
|
||||
buddy->prefix.exponent = exp;
|
||||
|
||||
// pointer arithmetic: we hide the prefix
|
||||
return buddy+1;
|
||||
}
|
||||
|
||||
void kfree(void *addr)
|
||||
{
|
||||
if (BUILTIN_EXPECT(!addr, 0))
|
||||
return;
|
||||
|
||||
buddy_t* buddy = (buddy_t*) addr - 1; // get prefix
|
||||
|
||||
// check magic
|
||||
if (BUILTIN_EXPECT(buddy->prefix.magic != BUDDY_MAGIC, 0))
|
||||
return;
|
||||
|
||||
buddy_put(buddy);
|
||||
}
|
411
mm/memory.c
411
mm/memory.c
|
@ -37,17 +37,15 @@
|
|||
#endif
|
||||
|
||||
/*
|
||||
* 0 => free
|
||||
* 1 => occupied
|
||||
*
|
||||
* Set whole address space as occupied
|
||||
* Set whole address space as occupied:
|
||||
* 0 => free, 1 => occupied
|
||||
*/
|
||||
static uint8_t bitmap[BITMAP_SIZE]; // = {[0 ... BITMAP_SIZE-1] = 0xFF};
|
||||
static spinlock_t bitmap_lock = SPINLOCK_INIT;
|
||||
static size_t alloc_start;
|
||||
atomic_int32_t total_pages = ATOMIC_INIT(0);
|
||||
atomic_int32_t total_allocated_pages = ATOMIC_INIT(0);
|
||||
atomic_int32_t total_available_pages = ATOMIC_INIT(0);
|
||||
static uint8_t bitmap[BITMAP_SIZE] = {[0 ... BITMAP_SIZE-1] = 0xFF};
|
||||
static spinlock_t bitmap_lock = SPINLOCK_INIT;
|
||||
|
||||
atomic_int32_t total_pages = ATOMIC_INIT(0);
|
||||
atomic_int32_t total_allocated_pages = ATOMIC_INIT(0);
|
||||
atomic_int32_t total_available_pages = ATOMIC_INIT(0);
|
||||
|
||||
/*
|
||||
* Note that linker symbols are not variables, they have no memory allocated for
|
||||
|
@ -74,8 +72,8 @@ inline static void page_set_mark(size_t i)
|
|||
size_t index = i >> 3;
|
||||
size_t mod = i & 0x7;
|
||||
|
||||
//if (page_marked(i))
|
||||
// kprintf("page %u is alread marked\n", i);
|
||||
if (page_marked(i))
|
||||
kprintf("page_set_mark(%u): already marked\n", i);
|
||||
|
||||
bitmap[index] = bitmap[index] | (1 << mod);
|
||||
}
|
||||
|
@ -86,56 +84,155 @@ inline static void page_clear_mark(size_t i)
|
|||
size_t mod = i % 8;
|
||||
|
||||
if (page_unmarked(i))
|
||||
kprintf("page %u is already unmarked\n", i);
|
||||
kprintf("page_clear_mark(%u): already unmarked\n", i);
|
||||
|
||||
bitmap[index] = bitmap[index] & ~(1 << mod);
|
||||
}
|
||||
|
||||
size_t get_pages(uint32_t npages)
|
||||
{
|
||||
// skip first page
|
||||
static size_t start = 1;
|
||||
|
||||
uint32_t i, j, l;
|
||||
uint32_t k = 0;
|
||||
size_t ret = 0;
|
||||
|
||||
if (BUILTIN_EXPECT(!npages, 0))
|
||||
return ret;
|
||||
|
||||
if (BUILTIN_EXPECT(npages > atomic_int32_read(&total_available_pages), 0))
|
||||
return ret;
|
||||
|
||||
spinlock_lock(&bitmap_lock);
|
||||
i = start;
|
||||
next_try:
|
||||
while((k < BITMAP_SIZE) && page_marked(i)) {
|
||||
k++;
|
||||
i = (i+1) & (BITMAP_SIZE-1);
|
||||
}
|
||||
|
||||
if (k >= BITMAP_SIZE)
|
||||
goto oom;
|
||||
|
||||
for(j=1; (j<npages) && (i+j < BITMAP_SIZE) && (k < BITMAP_SIZE); j++, k++) {
|
||||
if (page_marked(i+j)) {
|
||||
i = (i+j) & (BITMAP_SIZE-1);
|
||||
goto next_try;
|
||||
}
|
||||
}
|
||||
|
||||
if (i+j >= BITMAP_SIZE) {
|
||||
i = 1;
|
||||
goto next_try;
|
||||
}
|
||||
|
||||
if (k >= BITMAP_SIZE)
|
||||
goto oom;
|
||||
|
||||
ret = i*PAGE_SIZE;
|
||||
kprintf("get_pages: ret 0x%x, i = %d, j = %d, npages = %d\n", ret, i, j, npages); // TODO: remove
|
||||
for(l=i; l<i+j; l++)
|
||||
page_set_mark(l);
|
||||
|
||||
start = i+j;
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
|
||||
atomic_int32_add(&total_allocated_pages, npages);
|
||||
atomic_int32_sub(&total_available_pages, npages);
|
||||
|
||||
return ret;
|
||||
|
||||
oom:
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int put_pages(size_t phyaddr, size_t npages)
|
||||
{
|
||||
if (BUILTIN_EXPECT(!phyaddr || !npages, 0))
|
||||
return -EINVAL;
|
||||
|
||||
uint32_t index;
|
||||
uint32_t base = phyaddr >> PAGE_SHIFT;
|
||||
|
||||
spinlock_lock(&bitmap_lock);
|
||||
for (index=0; index<npages; index++)
|
||||
page_clear_mark(base+index);
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
|
||||
atomic_int32_sub(&total_allocated_pages, npages);
|
||||
atomic_int32_add(&total_available_pages, npages);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mmu_init(void)
|
||||
{
|
||||
size_t kernel_size;
|
||||
unsigned int i;
|
||||
size_t addr;
|
||||
int ret = 0;
|
||||
|
||||
// at first, set default value of the bitmap
|
||||
memset(bitmap, 0xFF, sizeof(uint8_t)*BITMAP_SIZE);
|
||||
|
||||
#ifdef CONFIG_MULTIBOOT
|
||||
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
|
||||
size_t end_addr;
|
||||
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) ((size_t) mb_info->mmap_addr);
|
||||
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
||||
if (mb_info) {
|
||||
if (mb_info->flags & MULTIBOOT_INFO_MEM_MAP) {
|
||||
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) ((size_t) mb_info->mmap_addr);
|
||||
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
||||
|
||||
while (mmap < mmap_end) {
|
||||
if (mmap->type == MULTIBOOT_MEMORY_AVAILABLE) {
|
||||
/* set the available memory as "unused" */
|
||||
addr = mmap->addr;
|
||||
end_addr = addr + mmap->len;
|
||||
|
||||
while (addr < end_addr) {
|
||||
page_clear_mark(addr >> PAGE_SHIFT);
|
||||
addr += PAGE_SIZE;
|
||||
atomic_int32_inc(&total_pages);
|
||||
atomic_int32_inc(&total_available_pages);
|
||||
// mark available memory as free
|
||||
while (mmap < mmap_end) {
|
||||
if (mmap->type == MULTIBOOT_MEMORY_AVAILABLE) {
|
||||
for (addr=mmap->addr; addr < mmap->addr + mmap->len; addr += PAGE_SIZE) {
|
||||
page_clear_mark(addr >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_pages);
|
||||
atomic_int32_inc(&total_available_pages);
|
||||
}
|
||||
}
|
||||
mmap++;
|
||||
}
|
||||
mmap++;
|
||||
}
|
||||
} else {
|
||||
kputs("Unable to initialize the memory management subsystem\n");
|
||||
while(1) {
|
||||
HALT;
|
||||
else if (mb_info->flags & MULTIBOOT_INFO_MEM) {
|
||||
size_t page;
|
||||
size_t pages_lower = mb_info->mem_lower >> 2;
|
||||
size_t pages_upper = mb_info->mem_upper >> 2;
|
||||
|
||||
for (page=0; page<pages_lower; page++)
|
||||
page_clear_mark(page);
|
||||
|
||||
for (page=0x100000; page<pages_upper+0x100000; page++)
|
||||
page_clear_mark(page);
|
||||
|
||||
atomic_int32_add(&total_pages, pages_lower + pages_upper);
|
||||
atomic_int32_add(&total_available_pages, pages_lower + pages_upper);
|
||||
}
|
||||
else {
|
||||
kputs("Unable to initialize the memory management subsystem\n");
|
||||
while (1) HALT;
|
||||
}
|
||||
|
||||
// mark mb_info as used
|
||||
page_set_mark((size_t) mb_info >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
|
||||
// mark modules list as used
|
||||
if (mb_info->flags & MULTIBOOT_INFO_MODS) {
|
||||
for(addr=mb_info->mods_addr; addr<mb_info->mods_addr+mb_info->mods_count*sizeof(multiboot_module_t); addr+=PAGE_SIZE) {
|
||||
page_set_mark(addr >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
}
|
||||
}
|
||||
}
|
||||
#elif defined(CONFIG_ROCKCREEK)
|
||||
/* of course, the first slots belong to the private memory */
|
||||
// of course, the first slots belong to the private memory
|
||||
for(addr=0x00; addr<1*0x1000000; addr+=PAGE_SIZE) {
|
||||
page_clear_mark(addr >> PAGE_SHIFT);
|
||||
if (addr > addr + PAGE_SIZE)
|
||||
break;
|
||||
atomic_int32_inc(&total_pages);
|
||||
atomic_int32_inc(&total_available_pages);
|
||||
atomic_int32_inc(&total_available_pages);
|
||||
}
|
||||
|
||||
// Note: The last slot belongs always to the private memory.
|
||||
|
@ -147,71 +244,78 @@ int mmu_init(void)
|
|||
atomic_int32_inc(&total_available_pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark the bootinfo as used.
|
||||
*/
|
||||
// mark the bootinfo as used.
|
||||
page_set_mark((size_t)bootinfo >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
|
||||
#else
|
||||
#error Currently, MetalSVM supports only the Multiboot specification or the RockCreek processor!
|
||||
#endif
|
||||
|
||||
kernel_size = (size_t) &kernel_end - (size_t) &kernel_start;
|
||||
if (kernel_size & (PAGE_SIZE-1))
|
||||
kernel_size += PAGE_SIZE - (kernel_size & (PAGE_SIZE-1));
|
||||
atomic_int32_add(&total_allocated_pages, kernel_size >> PAGE_SHIFT);
|
||||
atomic_int32_sub(&total_available_pages, kernel_size >> PAGE_SHIFT);
|
||||
|
||||
/* set kernel space as used */
|
||||
for(i=(size_t) &kernel_start >> PAGE_SHIFT; i < (size_t) &kernel_end >> PAGE_SHIFT; i++)
|
||||
page_set_mark(i);
|
||||
if ((size_t) &kernel_end & (PAGE_SIZE-1))
|
||||
page_set_mark(i);
|
||||
|
||||
alloc_start = (size_t) &kernel_end >> PAGE_SHIFT;
|
||||
if ((size_t) &kernel_end & (PAGE_SIZE-1))
|
||||
alloc_start++;
|
||||
// mark kernel as used
|
||||
for(addr=(size_t) &kernel_start; addr<(size_t) &kernel_end; addr+=PAGE_SIZE) {
|
||||
page_set_mark(addr >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
}
|
||||
|
||||
#if MAX_CORES > 1
|
||||
// reserve physical page for SMP boot code
|
||||
page_set_mark(SMP_SETUP_ADDR >> PAGE_SHIFT);
|
||||
atomic_int32_add(&total_allocated_pages, 1);
|
||||
atomic_int32_sub(&total_available_pages, 1);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
#endif
|
||||
|
||||
// enable paging and map SMP, VGA, Multiboot modules etc.
|
||||
ret = paging_init();
|
||||
if (ret) {
|
||||
kprintf("Failed to initialize paging: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
// add kernel to VMA list
|
||||
vma_add((size_t) &kernel_start & PAGE_MASK,
|
||||
PAGE_ALIGN((size_t) &kernel_end),
|
||||
VMA_READ|VMA_WRITE|VMA_EXECUTE|VMA_CACHEABLE);
|
||||
|
||||
// add LAPIC tp VMA list
|
||||
vma_add((size_t) &kernel_start - PAGE_SIZE,
|
||||
(size_t) &kernel_start,
|
||||
VMA_READ|VMA_WRITE);
|
||||
|
||||
#if MAX_CORES > 1
|
||||
// reserve page for SMP boot code
|
||||
vma_add(SMP_SETUP_ADDR & PAGE_MASK,
|
||||
PAGE_ALIGN(SMP_SETUP_ADDR + PAGE_SIZE),
|
||||
VMA_READ|VMA_WRITE|VMA_EXECUTE|VMA_CACHEABLE);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MULTIBOOT
|
||||
/*
|
||||
* Modules like the init ram disk are already loaded.
|
||||
* Therefore, we set these pages as used.
|
||||
*/
|
||||
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
|
||||
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
|
||||
if (mb_info) {
|
||||
vma_add((size_t) mb_info & PAGE_MASK,
|
||||
PAGE_ALIGN((size_t) mb_info + sizeof(multiboot_info_t)),
|
||||
VMA_READ|VMA_CACHEABLE);
|
||||
|
||||
/*
|
||||
* Mark the mb_info as used.
|
||||
*/
|
||||
page_set_mark((size_t)mb_info >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
if (mb_info->flags & MULTIBOOT_INFO_MODS) {
|
||||
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
|
||||
|
||||
for(addr = mb_info->mods_addr; addr < mb_info->mods_addr + mb_info->mods_count * sizeof(multiboot_module_t); addr += PAGE_SIZE) {
|
||||
page_set_mark(addr >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
}
|
||||
vma_add((size_t) mb_info->mods_addr & PAGE_MASK,
|
||||
PAGE_ALIGN((size_t) mb_info->mods_addr + mb_info->mods_count*sizeof(multiboot_module_t)),
|
||||
VMA_READ|VMA_CACHEABLE);
|
||||
|
||||
for(i=0; i<mb_info->mods_count; i++, mmodule++) {
|
||||
for(addr=mmodule->mod_start; addr<mmodule->mod_end; addr+=PAGE_SIZE) {
|
||||
page_set_mark(addr >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
for(i=0; i<mb_info->mods_count; i++) {
|
||||
vma_add(PAGE_ALIGN(mmodule[i].mod_start),
|
||||
PAGE_ALIGN(mmodule[i].mod_end),
|
||||
VMA_READ|VMA_WRITE|VMA_CACHEABLE);
|
||||
|
||||
for(addr=mmodule[i].mod_start; addr<mmodule[i].mod_end; addr+=PAGE_SIZE) {
|
||||
page_set_mark(addr >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -239,8 +343,8 @@ int mmu_init(void)
|
|||
* The init ram disk are already loaded.
|
||||
* Therefore, we set these pages as used.
|
||||
*/
|
||||
for(addr=bootinfo->addr; addr < bootinfo->addr+bootinfo->size; addr+=PAGE_SIZE) {
|
||||
// This area is already mapped, so we need to virt_to_phys() these addresses.
|
||||
for(addr=bootinfo->addr; addr<bootinfo->addr+bootinfo->size; addr+=PAGE_SIZE) {
|
||||
// this area is already mapped, so we need to virt_to_phys() these addresses.
|
||||
page_set_mark(virt_to_phys(addr) >> PAGE_SHIFT);
|
||||
atomic_int32_inc(&total_allocated_pages);
|
||||
atomic_int32_dec(&total_available_pages);
|
||||
|
@ -250,148 +354,3 @@ int mmu_init(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use first fit algorithm to find a suitable physical memory region
|
||||
*/
|
||||
size_t get_pages(uint32_t npages)
|
||||
{
|
||||
uint32_t i, j, l;
|
||||
uint32_t k = 0;
|
||||
size_t ret = 0;
|
||||
|
||||
if (BUILTIN_EXPECT(!npages, 0))
|
||||
return ret;
|
||||
|
||||
if (BUILTIN_EXPECT(npages > atomic_int32_read(&total_available_pages), 0))
|
||||
return ret;
|
||||
|
||||
spinlock_lock(&bitmap_lock);
|
||||
i = alloc_start;
|
||||
next_try:
|
||||
while((k < BITMAP_SIZE) && page_marked(i)) {
|
||||
k++;
|
||||
i = (i+1) & (BITMAP_SIZE-1);
|
||||
}
|
||||
|
||||
if (k >= BITMAP_SIZE)
|
||||
goto oom;
|
||||
|
||||
for(j=1; (j<npages) && (i+j < BITMAP_SIZE) && (k < BITMAP_SIZE); j++, k++) {
|
||||
if (page_marked(i+j)) {
|
||||
i = (i+j) & (BITMAP_SIZE-1);
|
||||
goto next_try;
|
||||
}
|
||||
}
|
||||
|
||||
if (i+j >= BITMAP_SIZE) {
|
||||
i = 0;
|
||||
goto next_try;
|
||||
}
|
||||
|
||||
if (k >= BITMAP_SIZE)
|
||||
goto oom;
|
||||
|
||||
ret = i*PAGE_SIZE;
|
||||
//kprintf("alloc: ret 0x%x, i = %d, j = %d, npages = %d\n", ret, i, j, npages);
|
||||
for(l=i; l<i+j; l++)
|
||||
page_set_mark(l);
|
||||
|
||||
alloc_start = i+j;
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
|
||||
atomic_int32_add(&total_allocated_pages, npages);
|
||||
atomic_int32_sub(&total_available_pages, npages);
|
||||
|
||||
return ret;
|
||||
|
||||
oom:
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int put_page(size_t phyaddr)
|
||||
{
|
||||
uint32_t index = phyaddr >> PAGE_SHIFT;
|
||||
|
||||
if (BUILTIN_EXPECT(!phyaddr, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(&bitmap_lock);
|
||||
page_clear_mark(index);
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
|
||||
atomic_int32_sub(&total_allocated_pages, 1);
|
||||
atomic_int32_add(&total_available_pages, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void* mem_allocation(size_t sz, uint32_t flags)
|
||||
{
|
||||
size_t phyaddr, viraddr;
|
||||
uint32_t npages = sz >> PAGE_SHIFT;
|
||||
|
||||
if (sz & (PAGE_SIZE-1))
|
||||
npages++;
|
||||
|
||||
phyaddr = get_pages(npages);
|
||||
if (BUILTIN_EXPECT(!phyaddr, 0))
|
||||
return 0;
|
||||
|
||||
viraddr = map_region(0, phyaddr, npages, flags);
|
||||
|
||||
return (void*) viraddr;
|
||||
}
|
||||
|
||||
void* kmalloc(size_t sz)
|
||||
{
|
||||
return mem_allocation(sz, MAP_KERNEL_SPACE);
|
||||
}
|
||||
|
||||
void kfree(void* addr, size_t sz)
|
||||
{
|
||||
uint32_t index, npages, i;
|
||||
size_t phyaddr;
|
||||
|
||||
if (BUILTIN_EXPECT(!addr && !sz, 0))
|
||||
return;
|
||||
|
||||
npages = sz >> PAGE_SHIFT;
|
||||
if (sz & (PAGE_SIZE-1))
|
||||
npages++;
|
||||
|
||||
spinlock_lock(&bitmap_lock);
|
||||
for(i=0; i<npages; i++) {
|
||||
unmap_region((size_t) addr+i*PAGE_SIZE, 1);
|
||||
|
||||
phyaddr = virt_to_phys((size_t) addr+i*PAGE_SIZE);
|
||||
if (!phyaddr)
|
||||
continue;
|
||||
|
||||
index = phyaddr >> PAGE_SHIFT;
|
||||
page_clear_mark(index);
|
||||
|
||||
}
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
|
||||
vm_free((size_t) addr, npages);
|
||||
|
||||
atomic_int32_sub(&total_allocated_pages, npages);
|
||||
atomic_int32_add(&total_available_pages, npages);
|
||||
}
|
||||
|
||||
void* create_stack(void)
|
||||
{
|
||||
return kmalloc(KERNEL_STACK_SIZE);
|
||||
}
|
||||
|
||||
int destroy_stack(task_t* task)
|
||||
{
|
||||
if (BUILTIN_EXPECT(!task || !task->stack, 0))
|
||||
return -EINVAL;
|
||||
|
||||
kfree(task->stack, KERNEL_STACK_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
353
mm/vma.c
353
mm/vma.c
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2011 Stefan Lankes, Chair for Operating Systems,
|
||||
* Copyright 2011 Steffen Vogel, Chair for Operating Systems,
|
||||
* RWTH Aachen University
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
@ -17,85 +17,322 @@
|
|||
* This file is part of MetalSVM.
|
||||
*/
|
||||
|
||||
#include <metalsvm/vma.h>
|
||||
#include <metalsvm/stdlib.h>
|
||||
#include <metalsvm/stdio.h>
|
||||
#include <metalsvm/tasks_types.h>
|
||||
#include <metalsvm/spinlock.h>
|
||||
#include <metalsvm/vma.h>
|
||||
#include <metalsvm/errno.h>
|
||||
|
||||
/*
|
||||
* add a new virtual memory region to the list of VMAs
|
||||
* Kernel space VMA list and lock
|
||||
*
|
||||
* For bootstrapping we initialize the VMA list with one empty VMA
|
||||
* (start == end) and expand this VMA by calls to vma_alloc()
|
||||
*/
|
||||
int vma_add(task_t* task, size_t start, size_t end, uint32_t type)
|
||||
static vma_t vma_boot = { VMA_KERN_MAX, VMA_KERN_MAX, VMA_HEAP };
|
||||
static vma_t* vma_list = &vma_boot;
|
||||
static spinlock_t vma_lock = SPINLOCK_INIT;
|
||||
|
||||
size_t vma_alloc(size_t size, uint32_t flags)
|
||||
{
|
||||
vma_t* new_vma;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || start > end, 0))
|
||||
task_t* task = per_core(current_task);
|
||||
spinlock_t* lock;
|
||||
vma_t** list;
|
||||
size_t ret = 0;
|
||||
|
||||
kprintf("vma_alloc(0x%lx, 0x%x)\n", size, flags);
|
||||
|
||||
size_t base, limit; // boundaries for search
|
||||
size_t start, end;
|
||||
|
||||
if (BUILTIN_EXPECT(!size, 0))
|
||||
return 0;
|
||||
|
||||
if (flags & VMA_USER) {
|
||||
base = VMA_KERN_MAX;
|
||||
limit = VMA_USER_MAX;
|
||||
list = &task->vma_list;
|
||||
lock = &task->vma_lock;
|
||||
}
|
||||
else {
|
||||
base = 0;
|
||||
limit = VMA_KERN_MAX;
|
||||
list = &vma_list;
|
||||
lock = &vma_lock;
|
||||
}
|
||||
|
||||
spinlock_lock(lock);
|
||||
|
||||
// "last" fit search for free memory area
|
||||
vma_t* pred = *list; // vma before current gap
|
||||
vma_t* succ = NULL; // vma after current gap
|
||||
do {
|
||||
start = (pred) ? pred->end : base;
|
||||
end = (succ) ? succ->start : limit;
|
||||
|
||||
if (end > start && end - start > size)
|
||||
break; // we found a gap
|
||||
|
||||
succ = pred;
|
||||
pred = (pred) ? pred->prev : NULL;
|
||||
} while (pred || succ);
|
||||
|
||||
if (BUILTIN_EXPECT(end > limit || end < start || end - start < size, 0)) {
|
||||
spinlock_unlock(lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// resize existing vma
|
||||
if (succ && succ->flags == flags) {
|
||||
succ->start -= size;
|
||||
ret = succ->start;
|
||||
}
|
||||
// insert new vma
|
||||
else {
|
||||
vma_t* new = kmalloc(sizeof(vma_t));
|
||||
if (BUILTIN_EXPECT(!new, 0))
|
||||
return 0;
|
||||
|
||||
new->start = end-size;
|
||||
new->end = end;
|
||||
new->flags = flags;
|
||||
new->next = succ;
|
||||
new->prev = pred;
|
||||
|
||||
if (pred)
|
||||
pred->next = new;
|
||||
if (succ)
|
||||
succ->prev = new;
|
||||
else
|
||||
*list = new;
|
||||
|
||||
ret = new->start;
|
||||
}
|
||||
|
||||
spinlock_unlock(lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vma_free(size_t start, size_t end)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
spinlock_t* lock;
|
||||
vma_t* vma;
|
||||
vma_t** list;
|
||||
|
||||
if (BUILTIN_EXPECT(start >= end, 0))
|
||||
return -EINVAL;
|
||||
|
||||
new_vma = kmalloc(sizeof(new_vma));
|
||||
if (!new_vma)
|
||||
return -ENOMEM;
|
||||
if (end <= VMA_KERN_MAX) {
|
||||
lock = &vma_lock;
|
||||
list = &vma_list;
|
||||
}
|
||||
else if (start >= VMA_KERN_MAX) {
|
||||
lock = &task->vma_lock;
|
||||
list = &task->vma_list;
|
||||
}
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
if (BUILTIN_EXPECT(!*list, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(lock);
|
||||
|
||||
// search vma
|
||||
vma = *list;
|
||||
while (vma) {
|
||||
if (start >= vma->start && end <= vma->end) break;
|
||||
vma = vma->prev;
|
||||
}
|
||||
|
||||
if (BUILTIN_EXPECT(!vma, 0)) {
|
||||
spinlock_unlock(lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
// free/resize vma
|
||||
if (start == vma->start && end == vma->end) {
|
||||
if (vma == *list)
|
||||
*list = vma->next; // update list head
|
||||
if (vma->prev)
|
||||
vma->prev->next = vma->next;
|
||||
if (vma->next)
|
||||
vma->next->prev = vma->prev;
|
||||
kfree(vma);
|
||||
}
|
||||
else if (start == vma->start)
|
||||
vma->start = end;
|
||||
else if (end == vma->end)
|
||||
vma->end = start;
|
||||
else {
|
||||
vma_t* new = kmalloc(sizeof(vma_t));
|
||||
if (BUILTIN_EXPECT(!new, 0)) {
|
||||
spinlock_unlock(lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
new->start = end;
|
||||
vma->end = start;
|
||||
|
||||
new->end = vma->end;
|
||||
new->next = vma->next;
|
||||
new->prev = vma;
|
||||
vma->next = new;
|
||||
}
|
||||
|
||||
spinlock_unlock(lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vma_add(size_t start, size_t end, uint32_t flags)
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
spinlock_t* lock;
|
||||
vma_t** list;
|
||||
|
||||
kprintf("vma_add(0x%lx, 0x%lx, 0x%x)\n", start, end, flags);
|
||||
|
||||
if (BUILTIN_EXPECT(start >= end, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (flags & VMA_USER) {
|
||||
list = &task->vma_list;
|
||||
lock = &task->vma_lock;
|
||||
|
||||
// check if address is in userspace
|
||||
if (BUILTIN_EXPECT(start < VMA_KERN_MAX, 0))
|
||||
return -EINVAL;
|
||||
}
|
||||
else {
|
||||
list = &vma_list;
|
||||
lock = &vma_lock;
|
||||
|
||||
// check if address is in kernelspace
|
||||
if (BUILTIN_EXPECT(end > VMA_KERN_MAX, 0))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spinlock_lock(lock);
|
||||
|
||||
// search gap
|
||||
vma_t* pred = *list;
|
||||
vma_t* succ = NULL;
|
||||
while (pred) {
|
||||
if ((!pred || pred->end <= start) &&
|
||||
(!succ || succ->start >= end))
|
||||
break;
|
||||
|
||||
succ = pred;
|
||||
pred = pred->prev;
|
||||
}
|
||||
|
||||
// resize existing vma
|
||||
if (pred && pred->end == start && pred->flags == flags)
|
||||
pred->end = end;
|
||||
else if (succ && succ->start == end && succ->flags == flags)
|
||||
succ->start = start;
|
||||
// insert new vma
|
||||
else {
|
||||
vma_t* new = kmalloc(sizeof(vma_t));
|
||||
if (BUILTIN_EXPECT(!new, 0))
|
||||
return 0;
|
||||
|
||||
new->start = start;
|
||||
new->end = end;
|
||||
new->flags = flags;
|
||||
new->next = succ;
|
||||
new->prev = pred;
|
||||
|
||||
if (pred)
|
||||
pred->next = new;
|
||||
if (succ)
|
||||
succ->prev = new;
|
||||
else
|
||||
*list = new;
|
||||
}
|
||||
|
||||
spinlock_unlock(lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int copy_vma_list(task_t* task)
|
||||
{
|
||||
task_t* parent_task = per_core(current_task);
|
||||
|
||||
spinlock_init(&task->vma_lock);
|
||||
spinlock_lock(&parent_task->vma_lock);
|
||||
spinlock_lock(&task->vma_lock);
|
||||
|
||||
int ret = 0;
|
||||
vma_t* last = NULL;
|
||||
vma_t* parent = parent_task->vma_list;
|
||||
|
||||
while (parent) {
|
||||
vma_t *new = kmalloc(sizeof(vma_t));
|
||||
if (BUILTIN_EXPECT(!new, 0)) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
new->start = parent->start;
|
||||
new->end = parent->end;
|
||||
new->flags = parent->flags;
|
||||
new->prev = last;
|
||||
|
||||
if (last)
|
||||
last->next = new;
|
||||
else
|
||||
task->vma_list = new;
|
||||
|
||||
last = new;
|
||||
parent = parent->next;
|
||||
}
|
||||
|
||||
out:
|
||||
spinlock_unlock(&task->vma_lock);
|
||||
spinlock_unlock(&parent_task->vma_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int drop_vma_list()
|
||||
{
|
||||
task_t* task = per_core(current_task);
|
||||
|
||||
spinlock_lock(&task->vma_lock);
|
||||
|
||||
new_vma->start = start;
|
||||
new_vma->end = end;
|
||||
new_vma->type = type;
|
||||
|
||||
if (!(task->vma_list)) {
|
||||
new_vma->next = new_vma->prev = NULL;
|
||||
task->vma_list = new_vma;
|
||||
} else {
|
||||
vma_t* tmp = task->vma_list;
|
||||
|
||||
while (tmp->next && tmp->start < start)
|
||||
tmp = tmp->next;
|
||||
|
||||
new_vma->next = tmp->next;
|
||||
new_vma->prev = tmp;
|
||||
tmp->next = new_vma;
|
||||
}
|
||||
while(task->vma_list)
|
||||
pfree((void*) task->vma_list->start, task->vma_list->end - task->vma_list->start);
|
||||
|
||||
spinlock_unlock(&task->vma_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vma_dump(task_t* task)
|
||||
void vma_dump()
|
||||
{
|
||||
vma_t* tmp;
|
||||
|
||||
if (BUILTIN_EXPECT(!task, 0))
|
||||
return -EINVAL;
|
||||
|
||||
spinlock_lock(&task->vma_lock);
|
||||
|
||||
tmp = task->vma_list;
|
||||
while (tmp) {
|
||||
kprintf("%8x - %8x: ", tmp->start, tmp->end);
|
||||
|
||||
if (tmp->type & VMA_READ)
|
||||
kputs("r");
|
||||
else
|
||||
kputs("-");
|
||||
|
||||
if (tmp->type & VMA_WRITE)
|
||||
kputs("w");
|
||||
else
|
||||
kputs("-");
|
||||
|
||||
if (tmp->type & VMA_EXECUTE)
|
||||
kputs("x");
|
||||
else
|
||||
kputs("-");
|
||||
kputs("\n");
|
||||
|
||||
tmp = tmp->next;
|
||||
void print_vma(vma_t *vma) {
|
||||
while (vma) {
|
||||
kprintf("0x%lx - 0x%lx: size=%x, flags=%c%c%c\n", vma->start, vma->end, vma->end - vma->start,
|
||||
(vma->flags & VMA_READ) ? 'r' : '-',
|
||||
(vma->flags & VMA_WRITE) ? 'w' : '-',
|
||||
(vma->flags & VMA_EXECUTE) ? 'x' : '-');
|
||||
vma = vma->prev;
|
||||
}
|
||||
}
|
||||
|
||||
spinlock_unlock(&task->vma_lock);
|
||||
task_t* task = per_core(current_task);
|
||||
|
||||
return 0;
|
||||
kputs("Kernelspace VMAs:\n");
|
||||
spinlock_lock(&vma_lock);
|
||||
print_vma(vma_list);
|
||||
spinlock_unlock(&vma_lock);
|
||||
|
||||
kputs("Userspace VMAs:\n");
|
||||
spinlock_lock(&task->vma_lock);
|
||||
print_vma(task->vma_list);
|
||||
spinlock_unlock(&task->vma_lock);
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ int main(int argc, char** argv)
|
|||
exit(1);
|
||||
}
|
||||
testdirent = readdir(testdir);
|
||||
printf("1. Dirent: %s", testdirent->d_name);
|
||||
printf("1. Dirent: %s\n", testdirent->d_name);
|
||||
closedir(testdir);
|
||||
|
||||
return errno;
|
||||
|
|
|
@ -20,41 +20,64 @@
|
|||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
#include <errno.h>
|
||||
#include <dirent.h>
|
||||
|
||||
int print_usage() {
|
||||
printf("usage: [size mb/kb/b]");
|
||||
printf("usage: size mb/kb/b [chunks]\n");
|
||||
exit(0);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
int m = 0;
|
||||
uint32_t size = 0;
|
||||
if(argc <= 2)
|
||||
print_usage();
|
||||
if(argc == 3) {
|
||||
if(!strcmp(argv[2], "mb"))
|
||||
m = 1024*1024;
|
||||
else if(!strcmp(argv[2], "kb"))
|
||||
m = 1024;
|
||||
else if(!strcmp(argv[2], "b"))
|
||||
m = 0;
|
||||
else
|
||||
print_usage();
|
||||
}
|
||||
if(argc > 3)
|
||||
print_usage();
|
||||
|
||||
size = atoi(argv[1]);
|
||||
if(size <= 0)
|
||||
int multp = 0;
|
||||
int size = 0;
|
||||
int chunks = 1;
|
||||
|
||||
void **test;
|
||||
|
||||
if (argc <= 2 || argc > 4)
|
||||
print_usage();
|
||||
|
||||
size *= m;
|
||||
uint8_t* test = malloc(size);
|
||||
printf("malloc(%d) - START: %p END: %p \n", size, test, test + size);
|
||||
size = atoi(argv[1]);
|
||||
if (size <= 0)
|
||||
print_usage();
|
||||
|
||||
if (!strcasecmp(argv[2], "mb"))
|
||||
multp = 1024*1024;
|
||||
else if (!strcasecmp(argv[2], "kb"))
|
||||
multp = 1024;
|
||||
else if (!strcasecmp(argv[2], "b"))
|
||||
multp = 1;
|
||||
else
|
||||
print_usage();
|
||||
size *= multp;
|
||||
|
||||
if (argc == 4)
|
||||
chunks = atoi(argv[3]);
|
||||
|
||||
test = malloc(chunks * sizeof(void *));
|
||||
if (!test)
|
||||
printf("malloc(%d) - FAILED!\n", chunks * sizeof(void *));
|
||||
|
||||
// allocate...
|
||||
int i;
|
||||
for (i = 0; i < chunks; i++) {
|
||||
test[i] = malloc(size);
|
||||
if (test[i])
|
||||
printf("malloc(%d)\tCHUNK: %d START: %p END: %p\n", size, i, test[i], test[i] + size);
|
||||
else {
|
||||
printf("malloc(%d)\tFAILED! Abort allocation, start with freeing memory\n", size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// and release again
|
||||
for (i = 0; i < chunks; i++) {
|
||||
if (test[i]) {
|
||||
free(test[i]);
|
||||
printf("free(%p)\tCHUNK: %d\n", test[i], i);
|
||||
}
|
||||
}
|
||||
|
||||
free(test);
|
||||
return 0;
|
||||
}
|
||||
|
|
11
script.gdb
11
script.gdb
|
@ -1,7 +1,14 @@
|
|||
# Constant part of the script
|
||||
set disassembly-flavor intel
|
||||
symbol-file metalsvm.sym
|
||||
target remote localhost:1234
|
||||
|
||||
# Configure breakpoints and everything as you wish here.
|
||||
break main
|
||||
# Debugging 32bit code
|
||||
#set architecture i386
|
||||
#break stublet
|
||||
#continue
|
||||
|
||||
# Debugging 64bit code
|
||||
#set architecture i386:x86-64
|
||||
#break main
|
||||
continue
|
||||
|
|
Loading…
Add table
Reference in a new issue