Merge branch 'vogel' into x64_paging
This commit is contained in:
commit
edf178f39a
27 changed files with 1079 additions and 635 deletions
|
@ -128,17 +128,20 @@ qemu: newlib tools $(NAME).elf
|
||||||
$(QEMU) -monitor stdio -serial tcp::12346,server,nowait -smp $(SMP) -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -kernel metalsvm.elf -initrd tools/initrd.img
|
$(QEMU) -monitor stdio -serial tcp::12346,server,nowait -smp $(SMP) -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -kernel metalsvm.elf -initrd tools/initrd.img
|
||||||
|
|
||||||
qemudbg: newlib tools $(NAME).elf
|
qemudbg: newlib tools $(NAME).elf
|
||||||
$(QEMU) -s -S -monitor stdio -serial tcp::12346,server -smp $(SMP) -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -kernel metalsvm.elf -initrd tools/initrd.img
|
$(QEMU) -s -S -nographic -monitor stdio -serial tcp::12346,server -smp $(SMP) -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:4711 -kernel metalsvm.elf -initrd tools/initrd.img
|
||||||
|
|
||||||
gdb: $(NAME).elf
|
gdb: $(NAME).elf
|
||||||
$(GDB) -q -x script.gdb
|
$(GDB) -q -x script.gdb
|
||||||
|
|
||||||
debug: newlib tools $(NAME).elf
|
debug: newlib tools $(NAME).elf
|
||||||
|
killall $(QEMU) || true
|
||||||
|
killall $(GDB) || true
|
||||||
|
sleep 1
|
||||||
gnome-terminal --working-directory=$(TOPDIR) \
|
gnome-terminal --working-directory=$(TOPDIR) \
|
||||||
--tab --title=Debug --command="bash -c 'sleep 1 && telnet localhost 12346'" \
|
|
||||||
--tab --title=Shell --command="bash -c 'sleep 1 && telnet localhost 12345'" \
|
--tab --title=Shell --command="bash -c 'sleep 1 && telnet localhost 12345'" \
|
||||||
--tab --title=QEmu --command="make qemudbg" \
|
--tab --title=QEmu --command="make qemudbg" \
|
||||||
--tab --title=GDB --command="make gdb"
|
--tab --title=GDB --command="make gdb" \
|
||||||
|
--tab --title=Debug --command="bash -c 'sleep 1 && telnet localhost 12346'"
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
$Q$(RM) $(NAME).elf $(NAME).sym *~
|
$Q$(RM) $(NAME).elf $(NAME).sym *~
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
C_source := tests.c echo.c netio.c jacobi.c laplace.c gfx_client.c gfx_generic.c paging.c
|
C_source := tests.c echo.c netio.c jacobi.c laplace.c gfx_client.c gfx_generic.c memory.c
|
||||||
MODULE := apps
|
MODULE := apps
|
||||||
|
|
||||||
include $(TOPDIR)/Makefile.inc
|
include $(TOPDIR)/Makefile.inc
|
||||||
|
|
|
@ -23,11 +23,14 @@
|
||||||
#include <metalsvm/mmu.h>
|
#include <metalsvm/mmu.h>
|
||||||
#include <metalsvm/time.h>
|
#include <metalsvm/time.h>
|
||||||
#include <metalsvm/tasks.h>
|
#include <metalsvm/tasks.h>
|
||||||
|
#include <metalsvm/vma.h>
|
||||||
|
#include <metalsvm/malloc.h>
|
||||||
|
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
|
|
||||||
#define PAGE_COUNT 10
|
#define PAGE_COUNT 10
|
||||||
|
#define SIZE (PAGE_COUNT*PAGE_SIZE)
|
||||||
#define VIRT_FROM_ADDR 0x100000000000
|
#define VIRT_FROM_ADDR 0x100000000000
|
||||||
#define VIRT_TO_ADDR 0x200000000000
|
#define VIRT_TO_ADDR 0x200000000000
|
||||||
|
|
||||||
|
@ -51,6 +54,16 @@ static void test(size_t expr, char *fmt, ...)
|
||||||
abort();
|
abort();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** @brief Linear feedback shift register PRNG */
|
||||||
|
static uint16_t rand()
|
||||||
|
{
|
||||||
|
static uint16_t lfsr = 0xACE1u;
|
||||||
|
static uint16_t bit;
|
||||||
|
|
||||||
|
bit = ((lfsr >> 0) ^ (lfsr >> 2) ^ (lfsr >> 3) ^ (lfsr >> 5) ) & 1;
|
||||||
|
return lfsr = (lfsr >> 1) | (bit << 15);
|
||||||
|
}
|
||||||
|
|
||||||
/** @brief BSD sum algorithm ('sum' Unix command) and used by QEmu */
|
/** @brief BSD sum algorithm ('sum' Unix command) and used by QEmu */
|
||||||
uint16_t checksum(size_t start, size_t end) {
|
uint16_t checksum(size_t start, size_t end) {
|
||||||
size_t addr;
|
size_t addr;
|
||||||
|
@ -168,12 +181,102 @@ static void paging(void)
|
||||||
//sleep(3);
|
//sleep(3);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** @brief Test of the VMA allocator */
|
||||||
|
static void vma(void)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
// vma_alloc
|
||||||
|
size_t a1 = vma_alloc(SIZE, VMA_HEAP);
|
||||||
|
test(a1, "vma_alloc(0x%x, 0x%x) = 0x%lx", SIZE, VMA_HEAP, a1);
|
||||||
|
vma_dump();
|
||||||
|
|
||||||
|
size_t a2 = vma_alloc(SIZE, VMA_HEAP|VMA_USER);
|
||||||
|
test(a2 != 0, "vma_alloc(0x%x, 0x%x) = 0x%lx", SIZE, VMA_HEAP|VMA_USER, a2);
|
||||||
|
vma_dump();
|
||||||
|
|
||||||
|
// vma_add
|
||||||
|
ret = vma_add(VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE, VMA_HEAP|VMA_USER);
|
||||||
|
test(ret >= 0, "vma_add(0x%lx, 0x%lx, 0x%x) = %u", VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE, VMA_HEAP|VMA_USER, ret);
|
||||||
|
vma_dump();
|
||||||
|
|
||||||
|
ret = vma_add(VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE, VMA_HEAP|VMA_USER);
|
||||||
|
test(ret >= 0, "vma_add(0x%lx, 0x%lx, 0x%x) = %u", VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE, VMA_HEAP|VMA_USER, ret);
|
||||||
|
vma_dump();
|
||||||
|
|
||||||
|
ret = vma_add(VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR, VMA_HEAP|VMA_USER);
|
||||||
|
test(ret >= 0, "vma_add(0x%lx, 0x%lx, 0x%x) = %u", VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR, VMA_HEAP|VMA_USER, ret);
|
||||||
|
vma_dump();
|
||||||
|
|
||||||
|
// vma_free
|
||||||
|
ret = vma_free(VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR);
|
||||||
|
test(ret >= 0, "vma_free(0x%lx, 0x%lx) = %u", VIRT_FROM_ADDR-SIZE, VIRT_FROM_ADDR, ret);
|
||||||
|
vma_dump();
|
||||||
|
|
||||||
|
ret = vma_free(VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE);
|
||||||
|
test(ret >= 0, "vma_free(0x%lx, 0x%lx) = %u", VIRT_FROM_ADDR+SIZE, VIRT_FROM_ADDR+2*SIZE, ret);
|
||||||
|
vma_dump();
|
||||||
|
|
||||||
|
ret = vma_free(VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE);
|
||||||
|
test(ret >= 0, "vma_free(0x%lx, 0x%lx) = %u", VIRT_FROM_ADDR, VIRT_FROM_ADDR+SIZE, ret);
|
||||||
|
vma_dump();
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @brief Test of the kernel malloc allocator */
|
||||||
|
static void malloc(void)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
int* p[20];
|
||||||
|
int* a;
|
||||||
|
|
||||||
|
// kmalloc() test
|
||||||
|
buddy_dump();
|
||||||
|
a = kmalloc(SIZE);
|
||||||
|
test(a != NULL, "kmalloc(%lu) = %p", SIZE, a);
|
||||||
|
buddy_dump();
|
||||||
|
|
||||||
|
// simple write/read test
|
||||||
|
for (i=0; i<SIZE/sizeof(int); i++)
|
||||||
|
a[i] = i;
|
||||||
|
|
||||||
|
for (i=0; i<SIZE/sizeof(int); i++) {
|
||||||
|
if (a[i] != i)
|
||||||
|
test(0, "data mismatch: *(%p) != %lu", &a[i], i);
|
||||||
|
}
|
||||||
|
test(1, "data is equal");
|
||||||
|
|
||||||
|
// kfree() test
|
||||||
|
kfree(a);
|
||||||
|
test(1, "kfree(%p)", a);
|
||||||
|
buddy_dump();
|
||||||
|
|
||||||
|
// some random malloc/free patterns to stress the buddy system
|
||||||
|
for (i=0; i<20; i++) {
|
||||||
|
uint16_t sz = rand();
|
||||||
|
p[i] = kmalloc(sz);
|
||||||
|
test(p[i] != NULL, "kmalloc(%u) = %p", sz, p[i]);
|
||||||
|
}
|
||||||
|
buddy_dump();
|
||||||
|
|
||||||
|
for (i=0; i<20; i++) {
|
||||||
|
kfree(p[i]);
|
||||||
|
test(1, "kfree(%p)", p[i]);
|
||||||
|
}
|
||||||
|
buddy_dump();
|
||||||
|
}
|
||||||
|
|
||||||
/** @brief This is a simple procedure to test memory management subsystem */
|
/** @brief This is a simple procedure to test memory management subsystem */
|
||||||
int memory(void* arg)
|
int memory(void* arg)
|
||||||
{
|
{
|
||||||
kprintf("======== PAGING: test started...\n");
|
kprintf("======== PAGING: test started...\n");
|
||||||
paging();
|
paging();
|
||||||
|
|
||||||
|
kprintf("======== VMA: test started...\n");
|
||||||
|
vma();
|
||||||
|
|
||||||
|
kprintf("======== MALLOC: test started...\n");
|
||||||
|
malloc();
|
||||||
|
|
||||||
kprintf("======== All tests finished successfull...\n");
|
kprintf("======== All tests finished successfull...\n");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
|
@ -35,9 +35,11 @@
|
||||||
|
|
||||||
#ifdef CONFIG_MULTIBOOT
|
#ifdef CONFIG_MULTIBOOT
|
||||||
|
|
||||||
/* are there modules to do something with? */
|
/// Does the bootloader provide mem_* fields?
|
||||||
|
#define MULTIBOOT_INFO_MEM 0x00000001
|
||||||
|
/// Does the bootloader provide a list of modules?
|
||||||
#define MULTIBOOT_INFO_MODS 0x00000008
|
#define MULTIBOOT_INFO_MODS 0x00000008
|
||||||
/* is there a full memory map? */
|
/// Does the bootloader provide a full memory map?
|
||||||
#define MULTIBOOT_INFO_MEM_MAP 0x00000040
|
#define MULTIBOOT_INFO_MEM_MAP 0x00000040
|
||||||
|
|
||||||
typedef uint16_t multiboot_uint16_t;
|
typedef uint16_t multiboot_uint16_t;
|
||||||
|
@ -114,7 +116,6 @@ struct multiboot_info
|
||||||
multiboot_uint16_t vbe_interface_off;
|
multiboot_uint16_t vbe_interface_off;
|
||||||
multiboot_uint16_t vbe_interface_len;
|
multiboot_uint16_t vbe_interface_len;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef struct multiboot_info multiboot_info_t;
|
typedef struct multiboot_info multiboot_info_t;
|
||||||
|
|
||||||
struct multiboot_mmap_entry
|
struct multiboot_mmap_entry
|
||||||
|
|
|
@ -273,7 +273,7 @@ int ipi_tlb_flush(void);
|
||||||
/** @brief Flush a specific page entry in TLB
|
/** @brief Flush a specific page entry in TLB
|
||||||
* @param addr The (virtual) address of the page to flush
|
* @param addr The (virtual) address of the page to flush
|
||||||
*/
|
*/
|
||||||
static inline void tlb_flush_one_page(uint32_t addr)
|
static inline void tlb_flush_one_page(size_t addr)
|
||||||
{
|
{
|
||||||
asm volatile("invlpg (%0)" : : "r"(addr) : "memory");
|
asm volatile("invlpg (%0)" : : "r"(addr) : "memory");
|
||||||
#if MAX_CORES > 1
|
#if MAX_CORES > 1
|
||||||
|
@ -293,7 +293,7 @@ static inline void tlb_flush_one_page(uint32_t addr)
|
||||||
*/
|
*/
|
||||||
static inline void tlb_flush(void)
|
static inline void tlb_flush(void)
|
||||||
{
|
{
|
||||||
uint32_t val = read_cr3();
|
size_t val = read_cr3();
|
||||||
|
|
||||||
if (val)
|
if (val)
|
||||||
write_cr3(val);
|
write_cr3(val);
|
||||||
|
|
|
@ -37,7 +37,7 @@ void kb_init(size_t size, tid_t tid) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void kb_finish(void) {
|
void kb_finish(void) {
|
||||||
kfree(kb_buffer.buffer, (kb_buffer.maxsize * sizeof(char)));
|
kfree(kb_buffer.buffer);
|
||||||
kb_buffer.buffer = NULL;
|
kb_buffer.buffer = NULL;
|
||||||
kb_buffer.size = 0;
|
kb_buffer.size = 0;
|
||||||
kb_buffer.maxsize = 0;
|
kb_buffer.maxsize = 0;
|
||||||
|
|
|
@ -150,6 +150,7 @@ int create_page_map(task_t* task, int copy)
|
||||||
}
|
}
|
||||||
memset(pgt, 0x00, sizeof(page_map_t));
|
memset(pgt, 0x00, sizeof(page_map_t));
|
||||||
|
|
||||||
|
// copy kernel tables
|
||||||
spinlock_lock(&kslock);
|
spinlock_lock(&kslock);
|
||||||
|
|
||||||
for(i=0; i<MAP_ENTRIES; i++) {
|
for(i=0; i<MAP_ENTRIES; i++) {
|
||||||
|
@ -611,7 +612,7 @@ int print_paging_tree(size_t viraddr)
|
||||||
} else
|
} else
|
||||||
kputs("invalid page directory\n");
|
kputs("invalid page directory\n");
|
||||||
|
|
||||||
/* convert physical address to virtual */
|
// convert physical address to virtual
|
||||||
if (paging_enabled && pgt)
|
if (paging_enabled && pgt)
|
||||||
pgt = (page_map_t*) (KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE);
|
pgt = (page_map_t*) (KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE);
|
||||||
|
|
||||||
|
@ -688,11 +689,11 @@ int arch_paging_init(void)
|
||||||
page_map_t* pgt;
|
page_map_t* pgt;
|
||||||
size_t viraddr;
|
size_t viraddr;
|
||||||
|
|
||||||
// uninstall default handler and install our own
|
// replace default pagefault handler
|
||||||
irq_uninstall_handler(14);
|
irq_uninstall_handler(14);
|
||||||
irq_install_handler(14, pagefault_handler);
|
irq_install_handler(14, pagefault_handler);
|
||||||
|
|
||||||
// Create a page table to reference to the other page tables
|
// create a page table to reference to the other page tables
|
||||||
pgt = &pgt_container;
|
pgt = &pgt_container;
|
||||||
|
|
||||||
// map this table at the end of the kernel space
|
// map this table at the end of the kernel space
|
||||||
|
@ -714,8 +715,8 @@ int arch_paging_init(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set the page table and page directory entries for the kernel. We map the kernel's physical address
|
* Set the page table and page directory entries for the kernel.
|
||||||
* to the same virtual address.
|
* We map the kernel's physical address to the same virtual address.
|
||||||
*/
|
*/
|
||||||
npages = ((size_t) &kernel_end - (size_t) &kernel_start) >> PAGE_SHIFT;
|
npages = ((size_t) &kernel_end - (size_t) &kernel_start) >> PAGE_SHIFT;
|
||||||
if ((size_t)&kernel_end & (PAGE_SIZE-1))
|
if ((size_t)&kernel_end & (PAGE_SIZE-1))
|
||||||
|
@ -723,7 +724,7 @@ int arch_paging_init(void)
|
||||||
map_region((size_t)&kernel_start, (size_t)&kernel_start, npages, MAP_KERNEL_SPACE);
|
map_region((size_t)&kernel_start, (size_t)&kernel_start, npages, MAP_KERNEL_SPACE);
|
||||||
|
|
||||||
#if MAX_CORES > 1
|
#if MAX_CORES > 1
|
||||||
// Reserve page for smp boot code
|
// reserve page for smp boot code
|
||||||
if (!map_region(SMP_SETUP_ADDR, SMP_SETUP_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE)) {
|
if (!map_region(SMP_SETUP_ADDR, SMP_SETUP_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE)) {
|
||||||
kputs("could not reserve page for smp boot code\n");
|
kputs("could not reserve page for smp boot code\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -741,9 +742,7 @@ int arch_paging_init(void)
|
||||||
map_region((size_t) mb_info & PAGE_MASK, (size_t) mb_info & PAGE_MASK, 1, MAP_KERNEL_SPACE);
|
map_region((size_t) mb_info & PAGE_MASK, (size_t) mb_info & PAGE_MASK, 1, MAP_KERNEL_SPACE);
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
/*
|
// map reserved memory regions into the kernel space
|
||||||
* Map reserved memory regions into the kernel space
|
|
||||||
*/
|
|
||||||
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
|
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
|
||||||
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr;
|
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr;
|
||||||
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
||||||
|
@ -818,10 +817,7 @@ int arch_paging_init(void)
|
||||||
bootinfo->addr = viraddr;
|
bootinfo->addr = viraddr;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
// we turned on paging => now, we are able to register our task
|
||||||
* we turned on paging
|
|
||||||
* => now, we are able to register our task
|
|
||||||
*/
|
|
||||||
register_task();
|
register_task();
|
||||||
|
|
||||||
// APIC registers into the kernel address space
|
// APIC registers into the kernel address space
|
||||||
|
|
|
@ -407,91 +407,6 @@ int change_page_permissions(size_t start, size_t end, uint32_t flags)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Use the first fit algorithm to find a valid address range
|
|
||||||
*
|
|
||||||
* TODO: O(n) => bad performance, we need a better approach
|
|
||||||
*/
|
|
||||||
size_t vm_alloc(uint32_t npages, uint32_t flags)
|
|
||||||
{
|
|
||||||
task_t* task = per_core(current_task);
|
|
||||||
size_t viraddr, i, j, ret = 0;
|
|
||||||
size_t start, end;
|
|
||||||
page_map_t* pdpt, * pgd, * pgt;
|
|
||||||
uint16_t index_pml4, index_pdpt;
|
|
||||||
uint16_t index_pgd, index_pgt;
|
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (flags & MAP_KERNEL_SPACE) {
|
|
||||||
start = (((size_t) &kernel_end) + 10*PAGE_SIZE) & PAGE_MASK;
|
|
||||||
end = (KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK;
|
|
||||||
} else {
|
|
||||||
start = KERNEL_SPACE & PAGE_MASK;
|
|
||||||
end = PAGE_MASK;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!npages, 0))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (flags & MAP_KERNEL_SPACE)
|
|
||||||
spinlock_lock(&kslock);
|
|
||||||
else
|
|
||||||
spinlock_irqsave_lock(&task->page_lock);
|
|
||||||
|
|
||||||
viraddr = i = start;
|
|
||||||
j = 0;
|
|
||||||
do {
|
|
||||||
index_pml4 = (viraddr >> 39) & 0x1FF;
|
|
||||||
index_pdpt = (viraddr >> 30) & 0x1FF;
|
|
||||||
index_pgd = (viraddr >> 21) & 0x1FF;
|
|
||||||
index_pgt = (viraddr >> 12) & 0x1FF;
|
|
||||||
|
|
||||||
// Currently, we allocate pages only in kernel space.
|
|
||||||
// => physical address of the page table is identical of the virtual address
|
|
||||||
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
|
||||||
if (!pdpt) {
|
|
||||||
i += (size_t)PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_SIZE;
|
|
||||||
j += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
|
||||||
if (!pgd) {
|
|
||||||
i += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_SIZE;
|
|
||||||
j += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
|
||||||
if (!pgt) {
|
|
||||||
i += PAGE_MAP_ENTRIES*PAGE_SIZE;
|
|
||||||
j += PAGE_MAP_ENTRIES;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!(pgt->entries[index_pgt])) {
|
|
||||||
i += PAGE_SIZE;
|
|
||||||
j++;
|
|
||||||
} else {
|
|
||||||
// restart search
|
|
||||||
j = 0;
|
|
||||||
viraddr = i + PAGE_SIZE;
|
|
||||||
i = i + PAGE_SIZE;
|
|
||||||
}
|
|
||||||
} while((j < npages) && (i<=end));
|
|
||||||
|
|
||||||
if ((j >= npages) && (viraddr < end))
|
|
||||||
ret = viraddr;
|
|
||||||
|
|
||||||
if (flags & MAP_KERNEL_SPACE)
|
|
||||||
spinlock_unlock(&kslock);
|
|
||||||
else
|
|
||||||
spinlock_irqsave_unlock(&task->page_lock);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int unmap_region(size_t viraddr, uint32_t npages)
|
int unmap_region(size_t viraddr, uint32_t npages)
|
||||||
{
|
{
|
||||||
|
@ -517,7 +432,7 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
||||||
index_pgd = (viraddr >> 21) & 0x1FF;
|
index_pgd = (viraddr >> 21) & 0x1FF;
|
||||||
index_pgt = (viraddr >> 12) & 0x1FF;
|
index_pgt = (viraddr >> 12) & 0x1FF;
|
||||||
|
|
||||||
// Currently, we allocate pages only in kernel space.
|
// currently, we allocate pages only in kernel space.
|
||||||
// => physical address of the page table is identical of the virtual address
|
// => physical address of the page table is identical of the virtual address
|
||||||
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
||||||
if (!pdpt) {
|
if (!pdpt) {
|
||||||
|
@ -560,70 +475,6 @@ int unmap_region(size_t viraddr, uint32_t npages)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int vm_free(size_t viraddr, uint32_t npages)
|
|
||||||
{
|
|
||||||
task_t* task = per_core(current_task);
|
|
||||||
page_map_t* pdpt, * pgd, * pgt;
|
|
||||||
size_t i;
|
|
||||||
uint16_t index_pml4, index_pdpt;
|
|
||||||
uint16_t index_pgd, index_pgt;
|
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (viraddr <= KERNEL_SPACE)
|
|
||||||
spinlock_lock(&kslock);
|
|
||||||
else
|
|
||||||
spinlock_irqsave_lock(&task->page_lock);
|
|
||||||
|
|
||||||
i = 0;
|
|
||||||
while(i<npages)
|
|
||||||
{
|
|
||||||
index_pml4 = (viraddr >> 39) & 0x1FF;
|
|
||||||
index_pdpt = (viraddr >> 30) & 0x1FF;
|
|
||||||
index_pgd = (viraddr >> 21) & 0x1FF;
|
|
||||||
index_pgt = (viraddr >> 12) & 0x1FF;
|
|
||||||
|
|
||||||
// Currently, we allocate pages only in kernel space.
|
|
||||||
// => physical address of the page table is identical of the virtual address
|
|
||||||
pdpt = (page_map_t*) (task->page_map->entries[index_pml4] & PAGE_MASK);
|
|
||||||
if (!pdpt) {
|
|
||||||
viraddr += (size_t) PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_SIZE;
|
|
||||||
i += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
pgd = (page_map_t*) (pdpt->entries[index_pdpt] & PAGE_MASK);
|
|
||||||
if (!pgd) {
|
|
||||||
viraddr += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES*PAGE_SIZE;
|
|
||||||
i += PAGE_MAP_ENTRIES*PAGE_MAP_ENTRIES;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
pgt = (page_map_t*) (pgd->entries[index_pgd] & PAGE_MASK);
|
|
||||||
if (!pgt) {
|
|
||||||
viraddr += PAGE_MAP_ENTRIES*PAGE_SIZE;
|
|
||||||
i += PAGE_MAP_ENTRIES;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pgt->entries[index_pgt])
|
|
||||||
pgt->entries[index_pgt] = 0;
|
|
||||||
|
|
||||||
viraddr +=PAGE_SIZE;
|
|
||||||
i++;
|
|
||||||
|
|
||||||
tlb_flush_one_page(viraddr);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (viraddr <= KERNEL_SPACE)
|
|
||||||
spinlock_unlock(&kslock);
|
|
||||||
else
|
|
||||||
spinlock_irqsave_unlock(&task->page_lock);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pagefault_handler(struct state *s)
|
static void pagefault_handler(struct state *s)
|
||||||
{
|
{
|
||||||
task_t* task = per_core(current_task);
|
task_t* task = per_core(current_task);
|
||||||
|
@ -646,8 +497,10 @@ static void pagefault_handler(struct state *s)
|
||||||
kprintf("Could not map 0x%x at 0x%x\n", phyaddr, viraddr);
|
kprintf("Could not map 0x%x at 0x%x\n", phyaddr, viraddr);
|
||||||
put_page(phyaddr);
|
put_page(phyaddr);
|
||||||
}
|
}
|
||||||
// handle missing paging structures for userspace
|
/*
|
||||||
// all kernel space paging structures have been initialized in entry64.asm
|
* handle missing paging structures for userspace
|
||||||
|
* all kernel space paging structures have been initialized in entry64.asm
|
||||||
|
*/
|
||||||
else if (viraddr >= PAGE_PGT) {
|
else if (viraddr >= PAGE_PGT) {
|
||||||
kprintf("map_region: missing paging structure at: 0x%lx (%s)\n", viraddr, map_to_lvlname(viraddr));
|
kprintf("map_region: missing paging structure at: 0x%lx (%s)\n", viraddr, map_to_lvlname(viraddr));
|
||||||
|
|
||||||
|
@ -685,7 +538,7 @@ int arch_paging_init(void)
|
||||||
{
|
{
|
||||||
uint32_t i, npages;
|
uint32_t i, npages;
|
||||||
|
|
||||||
// uninstall default handler and install our own
|
// replace default pagefault handler
|
||||||
irq_uninstall_handler(14);
|
irq_uninstall_handler(14);
|
||||||
irq_install_handler(14, pagefault_handler);
|
irq_install_handler(14, pagefault_handler);
|
||||||
|
|
||||||
|
@ -695,7 +548,7 @@ int arch_paging_init(void)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#if MAX_CORES > 1
|
#if MAX_CORES > 1
|
||||||
// Reserve page for smp boot code
|
// reserve page for smp boot code
|
||||||
if (!map_region(SMP_SETUP_ADDR, SMP_SETUP_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE)) {
|
if (!map_region(SMP_SETUP_ADDR, SMP_SETUP_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE)) {
|
||||||
kputs("could not reserve page for smp boot code\n");
|
kputs("could not reserve page for smp boot code\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -704,9 +557,7 @@ int arch_paging_init(void)
|
||||||
|
|
||||||
#ifdef CONFIG_MULTIBOOT
|
#ifdef CONFIG_MULTIBOOT
|
||||||
#if 0
|
#if 0
|
||||||
/*
|
// map reserved memory regions into the kernel space
|
||||||
* Map reserved memory regions into the kernel space
|
|
||||||
*/
|
|
||||||
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
|
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
|
||||||
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr;
|
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr;
|
||||||
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
||||||
|
@ -742,10 +593,7 @@ int arch_paging_init(void)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
// we turned on paging => now, we are able to register our task
|
||||||
* we turned on paging
|
|
||||||
* => now, we are able to register our task
|
|
||||||
*/
|
|
||||||
register_task();
|
register_task();
|
||||||
|
|
||||||
// APIC registers into the kernel address space
|
// APIC registers into the kernel address space
|
||||||
|
|
|
@ -70,7 +70,7 @@ static ssize_t socket_write(fildes_t* file, uint8_t* buffer, size_t size)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
memcpy(tmp, buffer, size);
|
memcpy(tmp, buffer, size);
|
||||||
ret = lwip_write(file->offset, tmp, size);
|
ret = lwip_write(file->offset, tmp, size);
|
||||||
kfree(tmp, size);
|
kfree(tmp);
|
||||||
#endif
|
#endif
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
ret = -errno;
|
ret = -errno;
|
||||||
|
@ -147,7 +147,7 @@ int socket_init(vfs_node_t* node, const char* name)
|
||||||
|
|
||||||
} while(blist);
|
} while(blist);
|
||||||
|
|
||||||
kfree(new_node, sizeof(vfs_node_t));
|
kfree(new_node);
|
||||||
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
|
@ -153,7 +153,7 @@ int null_init(vfs_node_t* node, const char* name)
|
||||||
|
|
||||||
} while(blist);
|
} while(blist);
|
||||||
|
|
||||||
kfree(new_node, sizeof(vfs_node_t));
|
kfree(new_node);
|
||||||
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -212,7 +212,7 @@ int stdin_init(vfs_node_t* node, const char* name)
|
||||||
|
|
||||||
} while(blist);
|
} while(blist);
|
||||||
|
|
||||||
kfree(new_node, sizeof(vfs_node_t));
|
kfree(new_node);
|
||||||
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -271,7 +271,7 @@ int stdout_init(vfs_node_t* node, const char* name)
|
||||||
|
|
||||||
} while(blist);
|
} while(blist);
|
||||||
|
|
||||||
kfree(new_node, sizeof(vfs_node_t));
|
kfree(new_node);
|
||||||
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -330,7 +330,7 @@ int stderr_init(vfs_node_t* node, const char* name)
|
||||||
|
|
||||||
} while(blist);
|
} while(blist);
|
||||||
|
|
||||||
kfree(new_node, sizeof(vfs_node_t));
|
kfree(new_node);
|
||||||
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
21
fs/initrd.c
21
fs/initrd.c
|
@ -210,7 +210,7 @@ static int initrd_open(fildes_t* file, const char* name)
|
||||||
if (file->node->type == FS_FILE) {
|
if (file->node->type == FS_FILE) {
|
||||||
if ((file->flags & O_CREAT) && (file->flags & O_EXCL))
|
if ((file->flags & O_CREAT) && (file->flags & O_EXCL))
|
||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
|
|
||||||
/* in the case of O_TRUNC kfree all the nodes */
|
/* in the case of O_TRUNC kfree all the nodes */
|
||||||
if (file->flags & O_TRUNC) {
|
if (file->flags & O_TRUNC) {
|
||||||
uint32_t i;
|
uint32_t i;
|
||||||
|
@ -221,8 +221,7 @@ static int initrd_open(fildes_t* file, const char* name)
|
||||||
/* the first blist pointer have do remain valid. */
|
/* the first blist pointer have do remain valid. */
|
||||||
for(i=0; i<MAX_DATABLOCKS && !data; i++) {
|
for(i=0; i<MAX_DATABLOCKS && !data; i++) {
|
||||||
if (blist->data[i]) {
|
if (blist->data[i]) {
|
||||||
kfree(blist->data[i],
|
kfree(blist->data[i]);
|
||||||
sizeof(data_block_t));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (blist->next) {
|
if (blist->next) {
|
||||||
|
@ -234,12 +233,12 @@ static int initrd_open(fildes_t* file, const char* name)
|
||||||
do {
|
do {
|
||||||
for(i=0; i<MAX_DATABLOCKS && !data; i++) {
|
for(i=0; i<MAX_DATABLOCKS && !data; i++) {
|
||||||
if (blist->data[i]) {
|
if (blist->data[i]) {
|
||||||
kfree(blist->data[i], sizeof(data_block_t));
|
kfree(blist->data[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
lastblist = blist;
|
lastblist = blist;
|
||||||
blist = blist->next;
|
blist = blist->next;
|
||||||
kfree(lastblist, sizeof(block_list_t));
|
kfree(lastblist);
|
||||||
} while(blist);
|
} while(blist);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -253,7 +252,7 @@ static int initrd_open(fildes_t* file, const char* name)
|
||||||
/* opendir was called: */
|
/* opendir was called: */
|
||||||
if (name[0] == '\0')
|
if (name[0] == '\0')
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* open file was called: */
|
/* open file was called: */
|
||||||
if (!(file->flags & O_CREAT))
|
if (!(file->flags & O_CREAT))
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
|
@ -264,11 +263,11 @@ static int initrd_open(fildes_t* file, const char* name)
|
||||||
vfs_node_t* new_node = kmalloc(sizeof(vfs_node_t));
|
vfs_node_t* new_node = kmalloc(sizeof(vfs_node_t));
|
||||||
if (BUILTIN_EXPECT(!new_node, 0))
|
if (BUILTIN_EXPECT(!new_node, 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
blist = &file->node->block_list;
|
blist = &file->node->block_list;
|
||||||
dir_block_t* dir_block;
|
dir_block_t* dir_block;
|
||||||
dirent_t* dirent;
|
dirent_t* dirent;
|
||||||
|
|
||||||
memset(new_node, 0x00, sizeof(vfs_node_t));
|
memset(new_node, 0x00, sizeof(vfs_node_t));
|
||||||
new_node->type = FS_FILE;
|
new_node->type = FS_FILE;
|
||||||
new_node->read = &initrd_read;
|
new_node->read = &initrd_read;
|
||||||
|
@ -286,7 +285,7 @@ static int initrd_open(fildes_t* file, const char* name)
|
||||||
if (!dirent->vfs_node) {
|
if (!dirent->vfs_node) {
|
||||||
dirent->vfs_node = new_node;
|
dirent->vfs_node = new_node;
|
||||||
strncpy(dirent->name, (char*) name, MAX_FNAME);
|
strncpy(dirent->name, (char*) name, MAX_FNAME);
|
||||||
goto exit_create_file; // there might be a better Solution ***************
|
goto exit_create_file; // TODO: there might be a better Solution
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -425,9 +424,9 @@ static vfs_node_t* initrd_mkdir(vfs_node_t* node, const char* name)
|
||||||
blist = blist->next;
|
blist = blist->next;
|
||||||
} while(blist);
|
} while(blist);
|
||||||
|
|
||||||
kfree(dir_block, sizeof(dir_block_t));
|
kfree(dir_block);
|
||||||
out:
|
out:
|
||||||
kfree(new_node, sizeof(vfs_node_t));
|
kfree(new_node);
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
72
include/metalsvm/malloc.h
Normal file
72
include/metalsvm/malloc.h
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2010 Steffen Vogel, Chair for Operating Systems,
|
||||||
|
* RWTH Aachen University
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
* This file is part of MetalSVM.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef __MALLOC_H__
|
||||||
|
#define __MALLOC_H__
|
||||||
|
|
||||||
|
#include <metalsvm/stddef.h>
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/// Binary exponent of maximal size for kmalloc()
|
||||||
|
#define BUDDY_MAX 32 // 4 GB
|
||||||
|
/// Binary exponent of minimal buddy size
|
||||||
|
#define BUDDY_MIN 4 // 16 Byte >= sizeof(buddy_prefix_t)
|
||||||
|
/// Binary exponent of the size which we allocate at least in one call to buddy_fill();
|
||||||
|
#define BUDDY_ALLOC 17 // 128 KByte >= PAGE_SHIFT, TODO: add Huge Page support?
|
||||||
|
|
||||||
|
#define BUDDY_LISTS (BUDDY_MAX-BUDDY_MIN+1)
|
||||||
|
#define BUDDY_MAGIC 0xBABE
|
||||||
|
|
||||||
|
union buddy;
|
||||||
|
|
||||||
|
/** @brief Buddy
|
||||||
|
*
|
||||||
|
* Every free memory block is stored in a linked list according to its size.
|
||||||
|
* We can use this free memory to store store this buddy_t union which represents
|
||||||
|
* this block (the buddy_t union is alligned to the front).
|
||||||
|
* Therefore the address of the buddy_t union is equal with the address
|
||||||
|
* of the underlying free memory block.
|
||||||
|
*
|
||||||
|
* Every allocated memory block is prefixed with its binary size exponent and
|
||||||
|
* a known magic number. This prefix is hidden by the user because its located
|
||||||
|
* before the actual memory address returned by kmalloc()
|
||||||
|
*/
|
||||||
|
typedef union buddy {
|
||||||
|
/// Pointer to the next buddy in the linked list.
|
||||||
|
union buddy* next;
|
||||||
|
struct {
|
||||||
|
/// The binary exponent of the block size
|
||||||
|
uint8_t exponent;
|
||||||
|
/// Must be equal to BUDDY_MAGIC for a valid memory block
|
||||||
|
uint16_t magic;
|
||||||
|
} prefix;
|
||||||
|
} buddy_t;
|
||||||
|
|
||||||
|
/** @brief Dump free buddies */
|
||||||
|
void buddy_dump(void);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
|
@ -49,33 +49,39 @@ extern atomic_int32_t total_available_pages;
|
||||||
*/
|
*/
|
||||||
int mmu_init(void);
|
int mmu_init(void);
|
||||||
|
|
||||||
/** @brief get continuous pages
|
/** @brief Get continuous pages
|
||||||
*
|
*
|
||||||
* This function finds a continuous page region (first fit algorithm)
|
* Use first fit algorithm to find a suitable, continous physical memory region
|
||||||
*
|
|
||||||
* @param no_pages Desired number of pages
|
|
||||||
*
|
*
|
||||||
|
* @param npages Desired number of pages
|
||||||
* @return
|
* @return
|
||||||
* - physical address on success
|
* - physical address on success
|
||||||
* - 0 on failure
|
* - 0 on failure
|
||||||
*/
|
*/
|
||||||
size_t get_pages(uint32_t no_pages);
|
size_t get_pages(uint32_t npages);
|
||||||
|
|
||||||
/** @brief get a single page
|
/** @brief Get a single page
|
||||||
*
|
*
|
||||||
* Convenience function: uses get_pages(1);
|
* Convenience function: uses get_pages(1);
|
||||||
*/
|
*/
|
||||||
static inline size_t get_page(void) { return get_pages(1); }
|
static inline size_t get_page(void) { return get_pages(1); }
|
||||||
|
|
||||||
/** @brief Put back a page after use
|
/** @brief Put back a sequence of continous pages
|
||||||
*
|
*
|
||||||
* @param phyaddr Physical address to put back
|
* @param phyaddr Physical address of the first page
|
||||||
|
* @param npages Number of pages
|
||||||
*
|
*
|
||||||
* @return
|
* @return
|
||||||
* - 0 on success
|
* - 0 on success
|
||||||
* - -EINVAL (-22) on failure
|
* - -EINVAL (-22) on failure
|
||||||
*/
|
*/
|
||||||
int put_page(size_t phyaddr);
|
int put_pages(size_t phyaddr, size_t npages);
|
||||||
|
|
||||||
|
/** @brief Put a single page
|
||||||
|
*
|
||||||
|
* Convenience function: uses put_pages(1);
|
||||||
|
*/
|
||||||
|
static inline int put_page(size_t phyaddr) { return put_pages(phyaddr, 1); }
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,10 +29,7 @@
|
||||||
#include <metalsvm/stddef.h>
|
#include <metalsvm/stddef.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
|
||||||
/**
|
/** @brief Sets up the environment, page directories etc and enables paging. */
|
||||||
* Sets up the environment, page directories etc and
|
|
||||||
* enables paging.
|
|
||||||
*/
|
|
||||||
static inline int paging_init(void) { return arch_paging_init(); }
|
static inline int paging_init(void) { return arch_paging_init(); }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define NULL ((void*) 0)
|
#define NULL ((void*) 0)
|
||||||
|
|
||||||
typedef unsigned int tid_t;
|
typedef unsigned int tid_t;
|
||||||
|
|
||||||
|
@ -62,10 +62,10 @@ typedef unsigned int tid_t;
|
||||||
irq_nested_enable(flags);\
|
irq_nested_enable(flags);\
|
||||||
return ret; \
|
return ret; \
|
||||||
}
|
}
|
||||||
#define CORE_ID smp_id()
|
#define CORE_ID smp_id()
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* needed to find the task, which is currently running on this core */
|
// needed to find the task, which is currently running on this core
|
||||||
struct task;
|
struct task;
|
||||||
DECLARE_PER_CORE(struct task*, current_task);
|
DECLARE_PER_CORE(struct task*, current_task);
|
||||||
|
|
||||||
|
|
|
@ -29,9 +29,7 @@
|
||||||
#ifndef __STDLIB_H__
|
#ifndef __STDLIB_H__
|
||||||
#define __STDLIB_H__
|
#define __STDLIB_H__
|
||||||
|
|
||||||
#include <metalsvm/config.h>
|
#include <metalsvm/stddef.h>
|
||||||
#include <metalsvm/tasks_types.h>
|
|
||||||
#include <asm/stddef.h>
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
@ -55,46 +53,42 @@ extern "C" {
|
||||||
|
|
||||||
void NORETURN abort(void);
|
void NORETURN abort(void);
|
||||||
|
|
||||||
/** @brief Kernel's memory allocator function.
|
/** @brief General page allocator function
|
||||||
*
|
*
|
||||||
* This will just call mem_allocation with
|
* This function allocates and maps whole pages.
|
||||||
* the flags MAP_KERNEL_SPACE and MAP_HEAP.
|
* To avoid fragmentation you should use kmalloc() and kfree()!
|
||||||
*
|
|
||||||
* @return Pointer to the new memory range
|
|
||||||
*/
|
|
||||||
void* kmalloc(size_t);
|
|
||||||
|
|
||||||
/** @brief Kernel's more general memory allocator function.
|
|
||||||
*
|
|
||||||
* This function lets you choose flags for the newly allocated memory.
|
|
||||||
*
|
*
|
||||||
* @param sz Desired size of the new memory
|
* @param sz Desired size of the new memory
|
||||||
* @param flags Flags to specify
|
* @param flags Flags to for map_region(), vma_add()
|
||||||
*
|
*
|
||||||
* @return Pointer to the new memory range
|
* @return Pointer to the new memory range
|
||||||
*/
|
*/
|
||||||
void* mem_allocation(size_t sz, uint32_t flags);
|
void* palloc(size_t sz, uint32_t flags);
|
||||||
|
|
||||||
/** @brief Free memory
|
/** @brief Free general kernel memory
|
||||||
*
|
*
|
||||||
* The kernel malloc doesn't track how
|
* The pmalloc() doesn't track how much memory was allocated for which pointer,
|
||||||
* much memory was allocated for which pointer,
|
|
||||||
* so you have to specify how much memory shall be freed.
|
* so you have to specify how much memory shall be freed.
|
||||||
*/
|
|
||||||
void kfree(void*, size_t);
|
|
||||||
|
|
||||||
/** @brief Create a new stack for a new task
|
|
||||||
*
|
*
|
||||||
* @return start address of the new stack
|
* @param sz The size which should freed
|
||||||
*/
|
*/
|
||||||
void* create_stack(void);
|
void pfree(void* addr, size_t sz);
|
||||||
|
|
||||||
/** @brief Delete stack of a finished task
|
/** @brief The memory allocator function
|
||||||
*
|
*
|
||||||
* @param addr Pointer to the stack
|
* This allocator uses a buddy system to manage free memory.
|
||||||
* @return 0 on success
|
*
|
||||||
|
* @return Pointer to the new memory range
|
||||||
*/
|
*/
|
||||||
int destroy_stack(task_t* addr);
|
void* kmalloc(size_t sz);
|
||||||
|
|
||||||
|
/** @brief The memory free function
|
||||||
|
*
|
||||||
|
* Releases memory allocated by malloc()
|
||||||
|
*
|
||||||
|
* @param addr The address to the memory block allocated by malloc()
|
||||||
|
*/
|
||||||
|
void kfree(void* addr);
|
||||||
|
|
||||||
/** @brief String to long
|
/** @brief String to long
|
||||||
*
|
*
|
||||||
|
@ -113,7 +107,7 @@ unsigned long strtoul(const char* nptr, char** endptr, int base);
|
||||||
*/
|
*/
|
||||||
static inline int atoi(const char *str)
|
static inline int atoi(const char *str)
|
||||||
{
|
{
|
||||||
return (int)strtol(str, (char **)NULL, 10);
|
return (int)strtol(str, (char **) NULL, 10);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
|
@ -147,9 +147,7 @@ tid_t wait(int32_t* result);
|
||||||
*/
|
*/
|
||||||
void update_load(void);
|
void update_load(void);
|
||||||
|
|
||||||
/** @brief Print the current cpu load
|
/** @brief Print the current cpu load */
|
||||||
*
|
|
||||||
*/
|
|
||||||
void dump_load(void);
|
void dump_load(void);
|
||||||
|
|
||||||
#if MAX_CORES > 1
|
#if MAX_CORES > 1
|
||||||
|
@ -201,9 +199,7 @@ int block_current_task(void);
|
||||||
*/
|
*/
|
||||||
int set_timer(uint64_t deadline);
|
int set_timer(uint64_t deadline);
|
||||||
|
|
||||||
/** @brief check is a timer is expired
|
/** @brief check is a timer is expired */
|
||||||
*
|
|
||||||
*/
|
|
||||||
void check_timers(void);
|
void check_timers(void);
|
||||||
|
|
||||||
/** @brief Abort current task */
|
/** @brief Abort current task */
|
||||||
|
|
|
@ -27,56 +27,102 @@
|
||||||
#define __VMA_H__
|
#define __VMA_H__
|
||||||
|
|
||||||
#include <metalsvm/stddef.h>
|
#include <metalsvm/stddef.h>
|
||||||
|
#include <asm/page.h>
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/// Read access to this VMA is allowed
|
||||||
#define VMA_READ (1 << 0)
|
#define VMA_READ (1 << 0)
|
||||||
|
/// Write access to this VMA is allowed
|
||||||
#define VMA_WRITE (1 << 1)
|
#define VMA_WRITE (1 << 1)
|
||||||
|
/// Instructions fetches in this VMA are allowed
|
||||||
#define VMA_EXECUTE (1 << 2)
|
#define VMA_EXECUTE (1 << 2)
|
||||||
|
/// This VMA is cacheable
|
||||||
#define VMA_CACHEABLE (1 << 3)
|
#define VMA_CACHEABLE (1 << 3)
|
||||||
#define VMA_NOACCESS (1 << 4)
|
/// This VMA is not accessable
|
||||||
|
#define VMA_NO_ACCESS (1 << 4)
|
||||||
|
/// This VMA should be part of the userspace
|
||||||
|
#define VMA_USER (1 << 5)
|
||||||
|
/// A collection of flags used for the kernel heap (kmalloc)
|
||||||
|
#define VMA_HEAP (VMA_READ|VMA_WRITE|VMA_CACHEABLE)
|
||||||
|
|
||||||
|
// boundaries for VAS allocation
|
||||||
|
extern const void kernel_end;
|
||||||
|
//#define VMA_KERN_MIN (((size_t) &kernel_end + PAGE_SIZE) & PAGE_MASK)
|
||||||
|
#define VMA_KERN_MAX KERNEL_SPACE
|
||||||
|
#define VMA_USER_MAX (1UL << 47) // TODO
|
||||||
|
|
||||||
struct vma;
|
struct vma;
|
||||||
|
|
||||||
/** @brief VMA structure definition */
|
/** @brief VMA structure definition
|
||||||
|
*
|
||||||
|
* Each item in this linked list marks a used part of the virtual address space.
|
||||||
|
* Its used by vm_alloc() to find holes between them.
|
||||||
|
*/
|
||||||
typedef struct vma {
|
typedef struct vma {
|
||||||
/// Start address of the memory area
|
/// Start address of the memory area
|
||||||
size_t start;
|
size_t start;
|
||||||
/// End address of the memory area
|
/// End address of the memory area
|
||||||
size_t end;
|
size_t end;
|
||||||
/// Type flags field
|
/// Type flags field
|
||||||
uint32_t type;
|
uint32_t flags;
|
||||||
/// Pointer of next VMA element in the list
|
/// Pointer of next VMA element in the list
|
||||||
struct vma* next;
|
struct vma* next;
|
||||||
/// Pointer to previous VMA element in the list
|
/// Pointer to previous VMA element in the list
|
||||||
struct vma* prev;
|
struct vma* prev;
|
||||||
} vma_t;
|
} vma_t;
|
||||||
|
|
||||||
/** @brief Add a new virtual memory region to the list of VMAs
|
/** @brief Add a new virtual memory area to the list of VMAs
|
||||||
*
|
*
|
||||||
* @param task Pointer to the task_t structure of the task
|
* @param start Start address of the new area
|
||||||
* @param start Start address of the new region
|
* @param end End address of the new area
|
||||||
* @param end End address of the new region
|
* @param flags Type flags the new area shall have
|
||||||
* @param type Type flags the new region shall have
|
|
||||||
*
|
*
|
||||||
* @return
|
* @return
|
||||||
* - 0 on success
|
* - 0 on success
|
||||||
* - -EINVAL (-22) or -EINVAL (-12) on failure
|
* - -EINVAL (-22) or -EINVAL (-12) on failure
|
||||||
*/
|
*/
|
||||||
int vma_add(struct task* task, size_t start, size_t end, uint32_t type);
|
int vma_add(size_t start, size_t end, uint32_t flags);
|
||||||
|
|
||||||
/** @brief Dump information about this task's VMAs into the terminal.
|
/** @brief Search for a free memory area
|
||||||
*
|
*
|
||||||
* This will print out Start, end and flags for each VMA in the task's list
|
* @param size Size of requestes VMA in bytes
|
||||||
|
* @param flags
|
||||||
|
* @return Type flags the new area shall have
|
||||||
|
* - 0 on failure
|
||||||
|
* - the start address of a free area
|
||||||
|
*/
|
||||||
|
size_t vma_alloc(size_t size, uint32_t flags);
|
||||||
|
|
||||||
|
/** @brief Free an allocated memory area
|
||||||
*
|
*
|
||||||
* @param task The task's task_t structure
|
* @param start Start address of the area to be freed
|
||||||
|
* @param end End address of the to be freed
|
||||||
* @return
|
* @return
|
||||||
* - 0 on success
|
* - 0 on success
|
||||||
* - -EINVAL (-22) on failure
|
* - -EINVAL (-22) on failure
|
||||||
*/
|
*/
|
||||||
int vma_dump(struct task* task);
|
int vma_free(size_t start, size_t end);
|
||||||
|
|
||||||
|
/** @brief Free all virtual memory areas
|
||||||
|
*
|
||||||
|
* @return
|
||||||
|
* - 0 on success
|
||||||
|
*/
|
||||||
|
int drop_vma_list();
|
||||||
|
|
||||||
|
/** @brief Copy the VMA list of the current task to task
|
||||||
|
*
|
||||||
|
* @param task The task where the list should be copied to
|
||||||
|
* @return
|
||||||
|
* - 0 on success
|
||||||
|
*/
|
||||||
|
int copy_vma_list(struct task* task);
|
||||||
|
|
||||||
|
/** @brief Dump information about this task's VMAs into the terminal. */
|
||||||
|
void vma_dump();
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,7 +63,7 @@ extern const void bss_end;
|
||||||
int lowlevel_init(void)
|
int lowlevel_init(void)
|
||||||
{
|
{
|
||||||
// initialize .bss section
|
// initialize .bss section
|
||||||
memset((void*)&bss_start, 0x00, ((size_t) &bss_end - (size_t) &bss_start));
|
memset((char*) &bss_start, 0x00, (char*) &bss_end - (char*) &bss_start);
|
||||||
|
|
||||||
koutput_init();
|
koutput_init();
|
||||||
|
|
||||||
|
|
|
@ -74,6 +74,7 @@ int main(void)
|
||||||
kprintf("This is MetalSVM %s Build %u, %u\n",
|
kprintf("This is MetalSVM %s Build %u, %u\n",
|
||||||
METALSVM_VERSION, &__BUILD_DATE, &__BUILD_TIME);
|
METALSVM_VERSION, &__BUILD_DATE, &__BUILD_TIME);
|
||||||
popbg();
|
popbg();
|
||||||
|
|
||||||
system_init();
|
system_init();
|
||||||
irq_init();
|
irq_init();
|
||||||
timer_init();
|
timer_init();
|
||||||
|
@ -86,7 +87,7 @@ int main(void)
|
||||||
icc_init();
|
icc_init();
|
||||||
svm_init();
|
svm_init();
|
||||||
#endif
|
#endif
|
||||||
initrd_init();
|
initrd_init();
|
||||||
|
|
||||||
irq_enable();
|
irq_enable();
|
||||||
|
|
||||||
|
@ -102,7 +103,7 @@ int main(void)
|
||||||
disable_timer_irq();
|
disable_timer_irq();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
sleep(5);
|
sleep(2);
|
||||||
create_kernel_task(&id, initd, NULL, NORMAL_PRIO);
|
create_kernel_task(&id, initd, NULL, NORMAL_PRIO);
|
||||||
kprintf("Create initd with id %u\n", id);
|
kprintf("Create initd with id %u\n", id);
|
||||||
reschedule();
|
reschedule();
|
||||||
|
|
|
@ -105,11 +105,11 @@ static int sys_open(const char* name, int flags, int mode)
|
||||||
/* file doesn't exist! */
|
/* file doesn't exist! */
|
||||||
if (check < 0) {
|
if (check < 0) {
|
||||||
/* tidy up the fildescriptor */
|
/* tidy up the fildescriptor */
|
||||||
kfree(curr_task->fildes_table[fd], sizeof(fildes_t));
|
kfree(curr_task->fildes_table[fd]);
|
||||||
curr_task->fildes_table[fd] = NULL;
|
curr_task->fildes_table[fd] = NULL;
|
||||||
return check;
|
return check;
|
||||||
}
|
}
|
||||||
|
|
||||||
return fd;
|
return fd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -196,7 +196,7 @@ static int sys_socket(int domain, int type, int protocol)
|
||||||
/* file doesn't exist! */
|
/* file doesn't exist! */
|
||||||
if (curr_task->fildes_table[fd]->node == NULL) {
|
if (curr_task->fildes_table[fd]->node == NULL) {
|
||||||
/* tidy up the fildescriptor */
|
/* tidy up the fildescriptor */
|
||||||
kfree(curr_task->fildes_table[fd], sizeof(fildes_t));
|
kfree(curr_task->fildes_table[fd]);
|
||||||
curr_task->fildes_table[fd] = NULL;
|
curr_task->fildes_table[fd] = NULL;
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
|
@ -240,7 +240,7 @@ static int sys_accept(int s, struct sockaddr* addr, socklen_t* addrlen)
|
||||||
/* file doesn't exist! */
|
/* file doesn't exist! */
|
||||||
if (curr_task->fildes_table[fd]->node == NULL) {
|
if (curr_task->fildes_table[fd]->node == NULL) {
|
||||||
/* tidy up the fildescriptor */
|
/* tidy up the fildescriptor */
|
||||||
kfree(curr_task->fildes_table[fd], sizeof(fildes_t));
|
kfree(curr_task->fildes_table[fd]);
|
||||||
curr_task->fildes_table[fd] = NULL;
|
curr_task->fildes_table[fd] = NULL;
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
|
@ -273,7 +273,7 @@ static int sys_close(int fd)
|
||||||
/* close command failed -> return check = errno */
|
/* close command failed -> return check = errno */
|
||||||
if (BUILTIN_EXPECT(check < 0, 0))
|
if (BUILTIN_EXPECT(check < 0, 0))
|
||||||
return check;
|
return check;
|
||||||
kfree(curr_task->fildes_table[fd], sizeof(fildes_t));
|
kfree(curr_task->fildes_table[fd]);
|
||||||
curr_task->fildes_table[fd] = NULL;
|
curr_task->fildes_table[fd] = NULL;
|
||||||
} else {
|
} else {
|
||||||
curr_task->fildes_table[fd]->count--;
|
curr_task->fildes_table[fd]->count--;
|
||||||
|
@ -356,7 +356,7 @@ static int sys_dup(int fd)
|
||||||
* free the memory which was allocated in get_fildes()
|
* free the memory which was allocated in get_fildes()
|
||||||
* cause will link it to another existing memory space
|
* cause will link it to another existing memory space
|
||||||
*/
|
*/
|
||||||
kfree(curr_task->fildes_table[new_fd], sizeof(fildes_t));
|
kfree(curr_task->fildes_table[new_fd]);
|
||||||
|
|
||||||
/* and link it to another existing memory space */
|
/* and link it to another existing memory space */
|
||||||
curr_task->fildes_table[new_fd] = curr_task->fildes_table[fd];
|
curr_task->fildes_table[new_fd] = curr_task->fildes_table[fd];
|
||||||
|
|
138
kernel/tasks.c
138
kernel/tasks.c
|
@ -78,6 +78,7 @@ DEFINE_PER_CORE(task_t*, current_task, task_table+0);
|
||||||
extern const void boot_stack;
|
extern const void boot_stack;
|
||||||
|
|
||||||
/** @brief helper function for the assembly code to determine the current task
|
/** @brief helper function for the assembly code to determine the current task
|
||||||
|
*
|
||||||
* @return Pointer to the task_t structure of current task
|
* @return Pointer to the task_t structure of current task
|
||||||
*/
|
*/
|
||||||
task_t* get_current_task(void) {
|
task_t* get_current_task(void) {
|
||||||
|
@ -96,6 +97,32 @@ uint32_t get_highest_priority(void)
|
||||||
return msb(runqueues[CORE_ID].prio_bitmap);
|
return msb(runqueues[CORE_ID].prio_bitmap);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** @brief Create a new stack for a new task
|
||||||
|
*
|
||||||
|
* @return start address of the new stack
|
||||||
|
*/
|
||||||
|
static void* create_stack(void)
|
||||||
|
{
|
||||||
|
return palloc(KERNEL_STACK_SIZE, MAP_KERNEL_SPACE);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @brief Delete stack of a finished task
|
||||||
|
*
|
||||||
|
* @param addr Pointer to the stack
|
||||||
|
* @return
|
||||||
|
* - 0 on success
|
||||||
|
* - -EINVAL on failure
|
||||||
|
*/
|
||||||
|
static int destroy_stack(task_t* task)
|
||||||
|
{
|
||||||
|
if (BUILTIN_EXPECT(!task || !task->stack, 0))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
pfree(task->stack, KERNEL_STACK_SIZE);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int multitasking_init(void) {
|
int multitasking_init(void) {
|
||||||
if (BUILTIN_EXPECT(task_table[0].status != TASK_IDLE, 0)) {
|
if (BUILTIN_EXPECT(task_table[0].status != TASK_IDLE, 0)) {
|
||||||
kputs("Task 0 is not an idle task\n");
|
kputs("Task 0 is not an idle task\n");
|
||||||
|
@ -193,10 +220,8 @@ static void wakeup_blocked_tasks(int result)
|
||||||
spinlock_irqsave_unlock(&table_lock);
|
spinlock_irqsave_unlock(&table_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @brief A procedure to be called by
|
/** @brief A procedure to be called by procedures which are called by exiting tasks. */
|
||||||
* procedures which are called by exiting tasks. */
|
|
||||||
static void NORETURN do_exit(int arg) {
|
static void NORETURN do_exit(int arg) {
|
||||||
vma_t* tmp;
|
|
||||||
task_t* curr_task = per_core(current_task);
|
task_t* curr_task = per_core(current_task);
|
||||||
uint32_t flags, core_id, fd, status;
|
uint32_t flags, core_id, fd, status;
|
||||||
|
|
||||||
|
@ -204,17 +229,17 @@ static void NORETURN do_exit(int arg) {
|
||||||
for (fd = 0; fd < NR_OPEN; fd++) {
|
for (fd = 0; fd < NR_OPEN; fd++) {
|
||||||
if(curr_task->fildes_table[fd] != NULL) {
|
if(curr_task->fildes_table[fd] != NULL) {
|
||||||
/*
|
/*
|
||||||
* delete a descriptor from the per-process object
|
* Delete a descriptor from the per-process object
|
||||||
* reference table. If this is not the last reference to the underlying
|
* reference table. If this is not the last reference to the underlying
|
||||||
* object, the object will be ignored.
|
* object, the object will be ignored.
|
||||||
*/
|
*/
|
||||||
if (curr_task->fildes_table[fd]->count == 1) {
|
if (curr_task->fildes_table[fd]->count == 1) {
|
||||||
/* try to close the file */
|
// try to close the file
|
||||||
status = close_fs(curr_task->fildes_table[fd]);
|
status = close_fs(curr_task->fildes_table[fd]);
|
||||||
/* close command failed -> return check = errno */
|
// close command failed -> return check = errno
|
||||||
if (BUILTIN_EXPECT(status < 0, 0))
|
if (BUILTIN_EXPECT(status < 0, 0))
|
||||||
kprintf("Task %u was not able to close file descriptor %i. close_fs returned %d", curr_task->id, fd, -status);
|
kprintf("Task %u was not able to close file descriptor %i. close_fs returned %d", curr_task->id, fd, -status);
|
||||||
kfree(curr_task->fildes_table[fd], sizeof(fildes_t));
|
kfree(curr_task->fildes_table[fd]);
|
||||||
curr_task->fildes_table[fd] = NULL;
|
curr_task->fildes_table[fd] = NULL;
|
||||||
} else {
|
} else {
|
||||||
curr_task->fildes_table[fd]->count--;
|
curr_task->fildes_table[fd]->count--;
|
||||||
|
@ -223,31 +248,20 @@ static void NORETURN do_exit(int arg) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//finally the table has to be cleared.
|
//finally the table has to be cleared.
|
||||||
kfree(curr_task->fildes_table, sizeof(filp_t)*NR_OPEN);
|
kfree(curr_task->fildes_table);
|
||||||
}
|
}
|
||||||
|
|
||||||
kprintf("Terminate task: %u, return value %d\n", curr_task->id, arg);
|
kprintf("Terminate task: %u, return value %d\n", curr_task->id, arg);
|
||||||
|
|
||||||
wakeup_blocked_tasks(arg);
|
wakeup_blocked_tasks(arg);
|
||||||
|
|
||||||
//vma_dump(curr_task);
|
drop_vma_list(); // kfree virtual memory areas and the vma_list
|
||||||
spinlock_lock(&curr_task->vma_lock);
|
|
||||||
|
|
||||||
// remove memory regions
|
|
||||||
while((tmp = curr_task->vma_list) != NULL) {
|
|
||||||
kfree((void*) tmp->start, tmp->end - tmp->start + 1);
|
|
||||||
curr_task->vma_list = tmp->next;
|
|
||||||
kfree((void*) tmp, sizeof(vma_t));
|
|
||||||
}
|
|
||||||
|
|
||||||
spinlock_unlock(&curr_task->vma_lock);
|
|
||||||
|
|
||||||
drop_page_map(); // delete page directory and its page tables
|
drop_page_map(); // delete page directory and its page tables
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
if (atomic_int32_read(&curr_task->user_usage))
|
if (atomic_int32_read(&curr_task->user_usage))
|
||||||
kprintf("Memory leak! Task %d did not release %d pages\n",
|
kprintf("Memory leak! Task %d did not release %d pages\n",
|
||||||
curr_task->id, atomic_int32_read(&curr_task->user_usage));
|
curr_task->id, atomic_int32_read(&curr_task->user_usage));
|
||||||
#endif
|
#endif
|
||||||
curr_task->status = TASK_FINISHED;
|
curr_task->status = TASK_FINISHED;
|
||||||
|
|
||||||
|
@ -262,9 +276,7 @@ static void NORETURN do_exit(int arg) {
|
||||||
reschedule();
|
reschedule();
|
||||||
|
|
||||||
kprintf("Kernel panic: scheduler on core %d found no valid task\n", CORE_ID);
|
kprintf("Kernel panic: scheduler on core %d found no valid task\n", CORE_ID);
|
||||||
while(1) {
|
while(1) HALT;
|
||||||
HALT;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @brief A procedure to be called by kernel tasks */
|
/** @brief A procedure to be called by kernel tasks */
|
||||||
|
@ -330,7 +342,7 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uin
|
||||||
ret = create_page_map(task_table+i, 0);
|
ret = create_page_map(task_table+i, 0);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto create_task_out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
task_table[i].id = i;
|
task_table[i].id = i;
|
||||||
|
@ -376,7 +388,7 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uin
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
create_task_out:
|
out:
|
||||||
spinlock_irqsave_unlock(&table_lock);
|
spinlock_irqsave_unlock(&table_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -387,11 +399,7 @@ int sys_fork(void)
|
||||||
int ret = -ENOMEM;
|
int ret = -ENOMEM;
|
||||||
unsigned int i, core_id, fd_i;
|
unsigned int i, core_id, fd_i;
|
||||||
task_t* parent_task = per_core(current_task);
|
task_t* parent_task = per_core(current_task);
|
||||||
vma_t** child;
|
|
||||||
vma_t* parent;
|
|
||||||
vma_t* tmp;
|
|
||||||
|
|
||||||
spinlock_lock(&parent_task->vma_lock);
|
|
||||||
spinlock_irqsave_lock(&table_lock);
|
spinlock_irqsave_lock(&table_lock);
|
||||||
|
|
||||||
core_id = CORE_ID;
|
core_id = CORE_ID;
|
||||||
|
@ -403,43 +411,26 @@ int sys_fork(void)
|
||||||
ret = create_page_map(task_table+i, 1);
|
ret = create_page_map(task_table+i, 1);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto create_task_out;
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = copy_vma_list(child_task);
|
||||||
|
if (BUILTIN_EXPECT(!ret, 0)) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
task_table[i].id = i;
|
task_table[i].id = i;
|
||||||
task_table[i].last_stack_pointer = NULL;
|
task_table[i].last_stack_pointer = NULL;
|
||||||
task_table[i].stack = create_stack();
|
task_table[i].stack = create_stack();
|
||||||
|
|
||||||
spinlock_init(&task_table[i].vma_lock);
|
// init fildes_table
|
||||||
|
|
||||||
// copy VMA list
|
|
||||||
child = &task_table[i].vma_list;
|
|
||||||
parent = parent_task->vma_list;
|
|
||||||
tmp = NULL;
|
|
||||||
|
|
||||||
while(parent) {
|
|
||||||
*child = (vma_t*) kmalloc(sizeof(vma_t));
|
|
||||||
if (BUILTIN_EXPECT(!child, 0))
|
|
||||||
break;
|
|
||||||
|
|
||||||
(*child)->start = parent->start;
|
|
||||||
(*child)->end = parent->end;
|
|
||||||
(*child)->type = parent->type;
|
|
||||||
(*child)->prev = tmp;
|
|
||||||
(*child)->next = NULL;
|
|
||||||
|
|
||||||
parent = parent->next;
|
|
||||||
tmp = *child;
|
|
||||||
child = &((*child)->next);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/* init fildes_table */
|
|
||||||
task_table[i].fildes_table = kmalloc(sizeof(filp_t)*NR_OPEN);
|
task_table[i].fildes_table = kmalloc(sizeof(filp_t)*NR_OPEN);
|
||||||
memcpy(task_table[i].fildes_table, parent_task->fildes_table, sizeof(filp_t)*NR_OPEN);
|
memcpy(task_table[i].fildes_table, parent_task->fildes_table, sizeof(filp_t)*NR_OPEN);
|
||||||
for (fd_i = 0; fd_i < NR_OPEN; fd_i++)
|
for (fd_i = 0; fd_i < NR_OPEN; fd_i++) {
|
||||||
if ((task_table[i].fildes_table[fd_i]) != NULL)
|
if ((task_table[i].fildes_table[fd_i]) != NULL)
|
||||||
task_table[i].fildes_table[fd_i]->count++;
|
task_table[i].fildes_table[fd_i]->count++;
|
||||||
|
}
|
||||||
|
|
||||||
mailbox_wait_msg_init(&task_table[i].inbox);
|
mailbox_wait_msg_init(&task_table[i].inbox);
|
||||||
memset(task_table[i].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
memset(task_table[i].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||||
|
@ -487,9 +478,8 @@ int sys_fork(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
create_task_out:
|
out:
|
||||||
spinlock_irqsave_unlock(&table_lock);
|
spinlock_irqsave_unlock(&table_lock);
|
||||||
spinlock_unlock(&parent_task->vma_lock);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -515,7 +505,7 @@ static int kernel_entry(void* args)
|
||||||
|
|
||||||
ret = kernel_args->func(kernel_args->args);
|
ret = kernel_args->func(kernel_args->args);
|
||||||
|
|
||||||
kfree(kernel_args, sizeof(kernel_args_t));
|
kfree(kernel_args);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -679,7 +669,7 @@ static int load_task(load_args_t* largs)
|
||||||
flags |= VMA_WRITE;
|
flags |= VMA_WRITE;
|
||||||
if (prog_header.flags & PF_X)
|
if (prog_header.flags & PF_X)
|
||||||
flags |= VMA_EXECUTE;
|
flags |= VMA_EXECUTE;
|
||||||
vma_add(curr_task, prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
|
vma_add(prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
|
||||||
|
|
||||||
if (!(prog_header.flags & PF_W))
|
if (!(prog_header.flags & PF_W))
|
||||||
change_page_permissions(prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
|
change_page_permissions(prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
|
||||||
|
@ -708,7 +698,7 @@ static int load_task(load_args_t* largs)
|
||||||
flags |= VMA_WRITE;
|
flags |= VMA_WRITE;
|
||||||
if (prog_header.flags & PF_X)
|
if (prog_header.flags & PF_X)
|
||||||
flags |= VMA_EXECUTE;
|
flags |= VMA_EXECUTE;
|
||||||
vma_add(curr_task, stack, stack+npages*PAGE_SIZE-1, flags);
|
vma_add(stack, stack+npages*PAGE_SIZE-1, flags);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -774,7 +764,7 @@ static int load_task(load_args_t* largs)
|
||||||
offset -= sizeof(int);
|
offset -= sizeof(int);
|
||||||
*((int*) (stack+offset)) = largs->argc;
|
*((int*) (stack+offset)) = largs->argc;
|
||||||
|
|
||||||
kfree(largs, sizeof(load_args_t));
|
kfree(largs);
|
||||||
|
|
||||||
// clear fpu state
|
// clear fpu state
|
||||||
curr_task->flags &= ~(TASK_FPU_USED|TASK_FPU_INIT);
|
curr_task->flags &= ~(TASK_FPU_USED|TASK_FPU_INIT);
|
||||||
|
@ -806,7 +796,7 @@ static int user_entry(void* arg)
|
||||||
|
|
||||||
ret = load_task((load_args_t*) arg);
|
ret = load_task((load_args_t*) arg);
|
||||||
|
|
||||||
kfree(arg, sizeof(load_args_t));
|
kfree(arg);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -871,13 +861,11 @@ int create_user_task_on_core(tid_t* id, const char* fname, char** argv, uint32_t
|
||||||
int sys_execve(const char* fname, char** argv, char** env)
|
int sys_execve(const char* fname, char** argv, char** env)
|
||||||
{
|
{
|
||||||
vfs_node_t* node;
|
vfs_node_t* node;
|
||||||
vma_t* tmp;
|
|
||||||
size_t i, buffer_size = 0;
|
size_t i, buffer_size = 0;
|
||||||
load_args_t* load_args = NULL;
|
load_args_t* load_args = NULL;
|
||||||
char *dest, *src;
|
char *dest, *src;
|
||||||
int ret, argc = 0;
|
int ret, argc = 0;
|
||||||
int envc = 0;
|
int envc = 0;
|
||||||
task_t* curr_task = per_core(current_task);
|
|
||||||
|
|
||||||
node = findnode_fs((char*) fname);
|
node = findnode_fs((char*) fname);
|
||||||
if (!node || !(node->type == FS_FILE))
|
if (!node || !(node->type == FS_FILE))
|
||||||
|
@ -920,16 +908,8 @@ int sys_execve(const char* fname, char** argv, char** env)
|
||||||
while ((*dest++ = *src++) != 0);
|
while ((*dest++ = *src++) != 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
spinlock_lock(&curr_task->vma_lock);
|
|
||||||
|
|
||||||
// remove old program
|
// remove old program
|
||||||
while((tmp = curr_task->vma_list) != NULL) {
|
drop_vma_list();
|
||||||
kfree((void*) tmp->start, tmp->end - tmp->start + 1);
|
|
||||||
curr_task->vma_list = tmp->next;
|
|
||||||
kfree((void*) tmp, sizeof(vma_t));
|
|
||||||
}
|
|
||||||
|
|
||||||
spinlock_unlock(&curr_task->vma_lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we use a trap gate to enter the kernel
|
* we use a trap gate to enter the kernel
|
||||||
|
@ -940,7 +920,7 @@ int sys_execve(const char* fname, char** argv, char** env)
|
||||||
|
|
||||||
ret = load_task(load_args);
|
ret = load_task(load_args);
|
||||||
|
|
||||||
kfree(load_args, sizeof(load_args_t));
|
kfree(load_args);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -130,7 +130,7 @@ int kmsg_init(vfs_node_t * node, const char *name)
|
||||||
}
|
}
|
||||||
} while (blist);
|
} while (blist);
|
||||||
|
|
||||||
kfree(new_node, sizeof(vfs_node_t));
|
kfree(new_node);
|
||||||
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
C_source := memory.c vma.c
|
C_source := memory.c vma.c malloc.c
|
||||||
MODULE := mm
|
MODULE := mm
|
||||||
|
|
||||||
include $(TOPDIR)/Makefile.inc
|
include $(TOPDIR)/Makefile.inc
|
||||||
|
|
203
mm/malloc.c
Normal file
203
mm/malloc.c
Normal file
|
@ -0,0 +1,203 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2010 Steffen Vogel, Chair for Operating Systems,
|
||||||
|
* RWTH Aachen University
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
* This file is part of MetalSVM.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <metalsvm/malloc.h>
|
||||||
|
#include <metalsvm/spinlock.h>
|
||||||
|
#include <metalsvm/stdio.h>
|
||||||
|
#include <metalsvm/mmu.h>
|
||||||
|
|
||||||
|
/// A linked list for each binary size exponent
|
||||||
|
static buddy_t* buddy_lists[BUDDY_LISTS] = { NULL };
|
||||||
|
/// Lock for the buddy lists
|
||||||
|
static spinlock_t buddy_lock = SPINLOCK_INIT;
|
||||||
|
|
||||||
|
/** @brief Check if larger free buddies are available */
|
||||||
|
static inline int buddy_large_avail(uint8_t exp)
|
||||||
|
{
|
||||||
|
while (exp<BUDDY_MAX && !buddy_lists[exp-BUDDY_MIN]) exp++;
|
||||||
|
return exp != BUDDY_MAX;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @brief Calculate the required buddy size */
|
||||||
|
static inline int buddy_exp(size_t sz)
|
||||||
|
{
|
||||||
|
int exp;
|
||||||
|
for (exp=0; (1<<exp)<sz; exp++);
|
||||||
|
|
||||||
|
if (exp > BUDDY_MAX)
|
||||||
|
return 0;
|
||||||
|
else if (exp < BUDDY_MIN)
|
||||||
|
return BUDDY_MIN;
|
||||||
|
else
|
||||||
|
return exp;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @brief Get a free buddy by potentially splitting a larger one */
|
||||||
|
static buddy_t* buddy_get(int exp)
|
||||||
|
{
|
||||||
|
spinlock_lock(&buddy_lock);
|
||||||
|
buddy_t** list = &buddy_lists[exp-BUDDY_MIN];
|
||||||
|
buddy_t* buddy = *list;
|
||||||
|
buddy_t* split;
|
||||||
|
|
||||||
|
if (buddy)
|
||||||
|
// there is already a free buddy =>
|
||||||
|
// we remove it from the list
|
||||||
|
*list = buddy->next;
|
||||||
|
else if (exp >= BUDDY_ALLOC && !buddy_large_avail(exp))
|
||||||
|
// theres no free buddy larger than exp =>
|
||||||
|
// we can allocate new memory
|
||||||
|
buddy = (buddy_t*) palloc(1<<exp, MAP_KERNEL_SPACE);
|
||||||
|
else {
|
||||||
|
// we recursivly request a larger buddy...
|
||||||
|
buddy = buddy_get(exp+1);
|
||||||
|
if (BUILTIN_EXPECT(!buddy, 0))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
// ... and split it, by putting the second half back to the list
|
||||||
|
split = (buddy_t*) ((size_t) buddy + (1<<exp));
|
||||||
|
split->next = *list;
|
||||||
|
*list = split;
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
spinlock_unlock(&buddy_lock);
|
||||||
|
|
||||||
|
return buddy;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @brief Put a buddy back to its free list
|
||||||
|
*
|
||||||
|
* TODO: merge adjacent buddies (memory compaction)
|
||||||
|
*/
|
||||||
|
static void buddy_put(buddy_t* buddy)
|
||||||
|
{
|
||||||
|
spinlock_lock(&buddy_lock);
|
||||||
|
buddy_t** list = &buddy_lists[buddy->prefix.exponent-BUDDY_MIN];
|
||||||
|
buddy->next = *list;
|
||||||
|
*list = buddy;
|
||||||
|
spinlock_unlock(&buddy_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
void buddy_dump()
|
||||||
|
{
|
||||||
|
size_t free = 0;
|
||||||
|
int i;
|
||||||
|
for (i=0; i<BUDDY_LISTS; i++) {
|
||||||
|
buddy_t* buddy;
|
||||||
|
int exp = i+BUDDY_MIN;
|
||||||
|
|
||||||
|
if (buddy_lists[i])
|
||||||
|
kprintf("buddy_list[%u] (exp=%u, size=%lu bytes):\n", i, exp, 1<<exp);
|
||||||
|
|
||||||
|
for (buddy=buddy_lists[i]; buddy; buddy=buddy->next) {
|
||||||
|
kprintf(" %p -> %p \n", buddy, buddy->next);
|
||||||
|
free += 1<<exp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
kprintf("free buddies: %lu bytes\n", free);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* palloc(size_t sz, uint32_t flags)
|
||||||
|
{
|
||||||
|
size_t phyaddr, viraddr;
|
||||||
|
uint32_t npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
kprintf("palloc(%lu) (%lu pages)\n", sz, npages); // TODO: remove
|
||||||
|
|
||||||
|
// get free virtual address space
|
||||||
|
viraddr = vma_alloc(npages*PAGE_SIZE, VMA_HEAP);
|
||||||
|
if (BUILTIN_EXPECT(!viraddr, 0))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
// get continous physical pages
|
||||||
|
phyaddr = get_pages(npages);
|
||||||
|
if (BUILTIN_EXPECT(!phyaddr, 0)) {
|
||||||
|
vma_free(viraddr, viraddr+npages*PAGE_SIZE);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// map physical pages to VMA
|
||||||
|
viraddr = map_region(viraddr, phyaddr, npages, flags);
|
||||||
|
if (BUILTIN_EXPECT(!viraddr, 0)) {
|
||||||
|
vma_free(viraddr, viraddr+npages*PAGE_SIZE);
|
||||||
|
put_pages(phyaddr, npages);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (void*) viraddr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void pfree(void* addr, size_t sz)
|
||||||
|
{
|
||||||
|
if (BUILTIN_EXPECT(!addr || !sz, 0))
|
||||||
|
return;
|
||||||
|
|
||||||
|
size_t i;
|
||||||
|
size_t phyaddr;
|
||||||
|
size_t viraddr = (size_t) addr & PAGE_MASK;
|
||||||
|
uint32_t npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
// memory is propably not continously mapped!
|
||||||
|
for (i=0; i<npages; i++) {
|
||||||
|
phyaddr = virt_to_phys(viraddr+i*PAGE_SHIFT);
|
||||||
|
put_page(phyaddr);
|
||||||
|
}
|
||||||
|
|
||||||
|
unmap_region(viraddr, npages);
|
||||||
|
vma_free(viraddr, viraddr+npages*PAGE_SIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
void* kmalloc(size_t sz)
|
||||||
|
{
|
||||||
|
if (BUILTIN_EXPECT(!sz, 0))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
// add space for the prefix
|
||||||
|
sz += sizeof(buddy_t);
|
||||||
|
|
||||||
|
int exp = buddy_exp(sz);
|
||||||
|
if (BUILTIN_EXPECT(!exp, 0))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
buddy_t* buddy = buddy_get(exp);
|
||||||
|
if (BUILTIN_EXPECT(!buddy, 0))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
// setup buddy prefix
|
||||||
|
buddy->prefix.magic = BUDDY_MAGIC;
|
||||||
|
buddy->prefix.exponent = exp;
|
||||||
|
|
||||||
|
// pointer arithmetic: we hide the prefix
|
||||||
|
return buddy+1;
|
||||||
|
}
|
||||||
|
|
||||||
|
void kfree(void *addr)
|
||||||
|
{
|
||||||
|
if (BUILTIN_EXPECT(!addr, 0))
|
||||||
|
return;
|
||||||
|
|
||||||
|
buddy_t* buddy = (buddy_t*) addr - 1; // get prefix
|
||||||
|
|
||||||
|
// check magic
|
||||||
|
if (BUILTIN_EXPECT(buddy->prefix.magic != BUDDY_MAGIC, 0))
|
||||||
|
return;
|
||||||
|
|
||||||
|
buddy_put(buddy);
|
||||||
|
}
|
402
mm/memory.c
402
mm/memory.c
|
@ -37,17 +37,15 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 0 => free
|
* Set whole address space as occupied:
|
||||||
* 1 => occupied
|
* 0 => free, 1 => occupied
|
||||||
*
|
|
||||||
* Set whole address space as occupied
|
|
||||||
*/
|
*/
|
||||||
static uint8_t bitmap[BITMAP_SIZE]; // = {[0 ... BITMAP_SIZE-1] = 0xFF};
|
static uint8_t bitmap[BITMAP_SIZE] = {[0 ... BITMAP_SIZE-1] = 0xFF};
|
||||||
static spinlock_t bitmap_lock = SPINLOCK_INIT;
|
static spinlock_t bitmap_lock = SPINLOCK_INIT;
|
||||||
static size_t alloc_start;
|
|
||||||
atomic_int32_t total_pages = ATOMIC_INIT(0);
|
atomic_int32_t total_pages = ATOMIC_INIT(0);
|
||||||
atomic_int32_t total_allocated_pages = ATOMIC_INIT(0);
|
atomic_int32_t total_allocated_pages = ATOMIC_INIT(0);
|
||||||
atomic_int32_t total_available_pages = ATOMIC_INIT(0);
|
atomic_int32_t total_available_pages = ATOMIC_INIT(0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note that linker symbols are not variables, they have no memory allocated for
|
* Note that linker symbols are not variables, they have no memory allocated for
|
||||||
|
@ -74,8 +72,8 @@ inline static void page_set_mark(size_t i)
|
||||||
size_t index = i >> 3;
|
size_t index = i >> 3;
|
||||||
size_t mod = i & 0x7;
|
size_t mod = i & 0x7;
|
||||||
|
|
||||||
//if (page_marked(i))
|
if (page_marked(i))
|
||||||
// kprintf("page %u is already marked\n", i);
|
kprintf("page_set_mark(%u): already marked\n", i);
|
||||||
|
|
||||||
bitmap[index] = bitmap[index] | (1 << mod);
|
bitmap[index] = bitmap[index] | (1 << mod);
|
||||||
}
|
}
|
||||||
|
@ -86,46 +84,145 @@ inline static void page_clear_mark(size_t i)
|
||||||
size_t mod = i % 8;
|
size_t mod = i % 8;
|
||||||
|
|
||||||
if (page_unmarked(i))
|
if (page_unmarked(i))
|
||||||
kprintf("page %u is already unmarked\n", i);
|
kprintf("page_clear_mark(%u): already unmarked\n", i);
|
||||||
|
|
||||||
bitmap[index] = bitmap[index] & ~(1 << mod);
|
bitmap[index] = bitmap[index] & ~(1 << mod);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t get_pages(uint32_t npages)
|
||||||
|
{
|
||||||
|
// skip first page
|
||||||
|
static size_t start = 1;
|
||||||
|
|
||||||
|
uint32_t i, j, l;
|
||||||
|
uint32_t k = 0;
|
||||||
|
size_t ret = 0;
|
||||||
|
|
||||||
|
if (BUILTIN_EXPECT(!npages, 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (BUILTIN_EXPECT(npages > atomic_int32_read(&total_available_pages), 0))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
spinlock_lock(&bitmap_lock);
|
||||||
|
i = start;
|
||||||
|
next_try:
|
||||||
|
while((k < BITMAP_SIZE) && page_marked(i)) {
|
||||||
|
k++;
|
||||||
|
i = (i+1) & (BITMAP_SIZE-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (k >= BITMAP_SIZE)
|
||||||
|
goto oom;
|
||||||
|
|
||||||
|
for(j=1; (j<npages) && (i+j < BITMAP_SIZE) && (k < BITMAP_SIZE); j++, k++) {
|
||||||
|
if (page_marked(i+j)) {
|
||||||
|
i = (i+j) & (BITMAP_SIZE-1);
|
||||||
|
goto next_try;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i+j >= BITMAP_SIZE) {
|
||||||
|
i = 1;
|
||||||
|
goto next_try;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (k >= BITMAP_SIZE)
|
||||||
|
goto oom;
|
||||||
|
|
||||||
|
ret = i*PAGE_SIZE;
|
||||||
|
kprintf("get_pages: ret 0x%x, i = %d, j = %d, npages = %d\n", ret, i, j, npages); // TODO: remove
|
||||||
|
for(l=i; l<i+j; l++)
|
||||||
|
page_set_mark(l);
|
||||||
|
|
||||||
|
start = i+j;
|
||||||
|
spinlock_unlock(&bitmap_lock);
|
||||||
|
|
||||||
|
atomic_int32_add(&total_allocated_pages, npages);
|
||||||
|
atomic_int32_sub(&total_available_pages, npages);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
oom:
|
||||||
|
spinlock_unlock(&bitmap_lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int put_pages(size_t phyaddr, size_t npages)
|
||||||
|
{
|
||||||
|
if (BUILTIN_EXPECT(!phyaddr || !npages, 0))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
uint32_t index;
|
||||||
|
uint32_t base = phyaddr >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
spinlock_lock(&bitmap_lock);
|
||||||
|
for (index=0; index<npages; index++)
|
||||||
|
page_clear_mark(base+index);
|
||||||
|
spinlock_unlock(&bitmap_lock);
|
||||||
|
|
||||||
|
atomic_int32_sub(&total_allocated_pages, npages);
|
||||||
|
atomic_int32_add(&total_available_pages, npages);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int mmu_init(void)
|
int mmu_init(void)
|
||||||
{
|
{
|
||||||
size_t kernel_size;
|
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
size_t addr;
|
size_t addr;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
// at first, set default value of the bitmap
|
|
||||||
memset(bitmap, 0xFF, sizeof(uint8_t)*BITMAP_SIZE);
|
|
||||||
|
|
||||||
#ifdef CONFIG_MULTIBOOT
|
#ifdef CONFIG_MULTIBOOT
|
||||||
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
|
if (mb_info) {
|
||||||
size_t end_addr;
|
if (mb_info->flags & MULTIBOOT_INFO_MEM_MAP) {
|
||||||
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) ((size_t) mb_info->mmap_addr);
|
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) ((size_t) mb_info->mmap_addr);
|
||||||
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
||||||
|
|
||||||
while (mmap < mmap_end) {
|
// mark available memory as free
|
||||||
if (mmap->type == MULTIBOOT_MEMORY_AVAILABLE) {
|
while (mmap < mmap_end) {
|
||||||
// set the available memory as "unused"
|
if (mmap->type == MULTIBOOT_MEMORY_AVAILABLE) {
|
||||||
addr = mmap->addr;
|
for (addr=mmap->addr; addr < mmap->addr + mmap->len; addr += PAGE_SIZE) {
|
||||||
end_addr = addr + mmap->len;
|
page_clear_mark(addr >> PAGE_SHIFT);
|
||||||
|
atomic_int32_inc(&total_pages);
|
||||||
while (addr < end_addr) {
|
atomic_int32_inc(&total_available_pages);
|
||||||
page_clear_mark(addr >> PAGE_SHIFT);
|
}
|
||||||
addr += PAGE_SIZE;
|
|
||||||
atomic_int32_inc(&total_pages);
|
|
||||||
atomic_int32_inc(&total_available_pages);
|
|
||||||
}
|
}
|
||||||
|
mmap++;
|
||||||
}
|
}
|
||||||
mmap++;
|
|
||||||
}
|
}
|
||||||
} else {
|
else if (mb_info->flags & MULTIBOOT_INFO_MEM) {
|
||||||
kputs("Unable to initialize the memory management subsystem\n");
|
size_t page;
|
||||||
while(1) {
|
size_t pages_lower = mb_info->mem_lower >> 2;
|
||||||
HALT;
|
size_t pages_upper = mb_info->mem_upper >> 2;
|
||||||
|
|
||||||
|
for (page=0; page<pages_lower; page++)
|
||||||
|
page_clear_mark(page);
|
||||||
|
|
||||||
|
for (page=0x100000; page<pages_upper+0x100000; page++)
|
||||||
|
page_clear_mark(page);
|
||||||
|
|
||||||
|
atomic_int32_add(&total_pages, pages_lower + pages_upper);
|
||||||
|
atomic_int32_add(&total_available_pages, pages_lower + pages_upper);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
kputs("Unable to initialize the memory management subsystem\n");
|
||||||
|
while (1) HALT;
|
||||||
|
}
|
||||||
|
|
||||||
|
// mark mb_info as used
|
||||||
|
page_set_mark((size_t) mb_info >> PAGE_SHIFT);
|
||||||
|
atomic_int32_inc(&total_allocated_pages);
|
||||||
|
atomic_int32_dec(&total_available_pages);
|
||||||
|
|
||||||
|
// mark modules list as used
|
||||||
|
if (mb_info->flags & MULTIBOOT_INFO_MODS) {
|
||||||
|
for(addr=mb_info->mods_addr; addr<mb_info->mods_addr+mb_info->mods_count*sizeof(multiboot_module_t); addr+=PAGE_SIZE) {
|
||||||
|
page_set_mark(addr >> PAGE_SHIFT);
|
||||||
|
atomic_int32_inc(&total_allocated_pages);
|
||||||
|
atomic_int32_dec(&total_available_pages);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#elif defined(CONFIG_ROCKCREEK)
|
#elif defined(CONFIG_ROCKCREEK)
|
||||||
|
@ -135,7 +232,7 @@ int mmu_init(void)
|
||||||
if (addr > addr + PAGE_SIZE)
|
if (addr > addr + PAGE_SIZE)
|
||||||
break;
|
break;
|
||||||
atomic_int32_inc(&total_pages);
|
atomic_int32_inc(&total_pages);
|
||||||
atomic_int32_inc(&total_available_pages);
|
atomic_int32_inc(&total_available_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note: The last slot belongs always to the private memory.
|
// Note: The last slot belongs always to the private memory.
|
||||||
|
@ -151,63 +248,74 @@ int mmu_init(void)
|
||||||
page_set_mark((size_t)bootinfo >> PAGE_SHIFT);
|
page_set_mark((size_t)bootinfo >> PAGE_SHIFT);
|
||||||
atomic_int32_inc(&total_allocated_pages);
|
atomic_int32_inc(&total_allocated_pages);
|
||||||
atomic_int32_dec(&total_available_pages);
|
atomic_int32_dec(&total_available_pages);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
#error Currently, MetalSVM supports only the Multiboot specification or the RockCreek processor!
|
#error Currently, MetalSVM supports only the Multiboot specification or the RockCreek processor!
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
kernel_size = (size_t) &kernel_end - (size_t) &kernel_start;
|
// mark kernel as used
|
||||||
if (kernel_size & (PAGE_SIZE-1))
|
for(addr=(size_t) &kernel_start; addr<(size_t) &kernel_end; addr+=PAGE_SIZE) {
|
||||||
kernel_size += PAGE_SIZE - (kernel_size & (PAGE_SIZE-1));
|
page_set_mark(addr >> PAGE_SHIFT);
|
||||||
atomic_int32_add(&total_allocated_pages, kernel_size >> PAGE_SHIFT);
|
atomic_int32_inc(&total_allocated_pages);
|
||||||
atomic_int32_sub(&total_available_pages, kernel_size >> PAGE_SHIFT);
|
atomic_int32_dec(&total_available_pages);
|
||||||
|
}
|
||||||
// set kernel space as used
|
|
||||||
for(i=(size_t) &kernel_start >> PAGE_SHIFT; i < (size_t) &kernel_end >> PAGE_SHIFT; i++)
|
|
||||||
page_set_mark(i);
|
|
||||||
if ((size_t) &kernel_end & (PAGE_SIZE-1))
|
|
||||||
page_set_mark(i);
|
|
||||||
|
|
||||||
alloc_start = (size_t) &kernel_end >> PAGE_SHIFT;
|
|
||||||
if ((size_t) &kernel_end & (PAGE_SIZE-1))
|
|
||||||
alloc_start++;
|
|
||||||
|
|
||||||
#if MAX_CORES > 1
|
#if MAX_CORES > 1
|
||||||
// reserve physical page for SMP boot code
|
|
||||||
page_set_mark(SMP_SETUP_ADDR >> PAGE_SHIFT);
|
page_set_mark(SMP_SETUP_ADDR >> PAGE_SHIFT);
|
||||||
atomic_int32_add(&total_allocated_pages, 1);
|
atomic_int32_inc(&total_allocated_pages);
|
||||||
atomic_int32_sub(&total_available_pages, 1);
|
atomic_int32_dec(&total_available_pages);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
// enable paging and map SMP, VGA, Multiboot modules etc.
|
||||||
ret = paging_init();
|
ret = paging_init();
|
||||||
if (ret) {
|
if (ret) {
|
||||||
kprintf("Failed to initialize paging: %d\n", ret);
|
kprintf("Failed to initialize paging: %d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// add kernel to VMA list
|
||||||
|
vma_add((size_t) &kernel_start & PAGE_MASK,
|
||||||
|
PAGE_ALIGN((size_t) &kernel_end),
|
||||||
|
VMA_READ|VMA_WRITE|VMA_EXECUTE|VMA_CACHEABLE);
|
||||||
|
|
||||||
|
// add LAPIC tp VMA list
|
||||||
|
vma_add((size_t) &kernel_start - PAGE_SIZE,
|
||||||
|
(size_t) &kernel_start,
|
||||||
|
VMA_READ|VMA_WRITE);
|
||||||
|
|
||||||
|
#if MAX_CORES > 1
|
||||||
|
// reserve page for SMP boot code
|
||||||
|
vma_add(SMP_SETUP_ADDR & PAGE_MASK,
|
||||||
|
PAGE_ALIGN(SMP_SETUP_ADDR + PAGE_SIZE),
|
||||||
|
VMA_READ|VMA_WRITE|VMA_EXECUTE|VMA_CACHEABLE);
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_MULTIBOOT
|
#ifdef CONFIG_MULTIBOOT
|
||||||
/*
|
/*
|
||||||
* Modules like the init ram disk are already loaded.
|
* Modules like the init ram disk are already loaded.
|
||||||
* Therefore, we set these pages as used.
|
* Therefore, we set these pages as used.
|
||||||
*/
|
*/
|
||||||
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) {
|
if (mb_info) {
|
||||||
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
|
vma_add((size_t) mb_info & PAGE_MASK,
|
||||||
|
PAGE_ALIGN((size_t) mb_info + sizeof(multiboot_info_t)),
|
||||||
|
VMA_READ|VMA_CACHEABLE);
|
||||||
|
|
||||||
// mark the mb_info as used.
|
if (mb_info->flags & MULTIBOOT_INFO_MODS) {
|
||||||
page_set_mark((size_t)mb_info >> PAGE_SHIFT);
|
multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr);
|
||||||
atomic_int32_inc(&total_allocated_pages);
|
|
||||||
atomic_int32_dec(&total_available_pages);
|
|
||||||
|
|
||||||
for(addr = mb_info->mods_addr; addr < mb_info->mods_addr + mb_info->mods_count * sizeof(multiboot_module_t); addr += PAGE_SIZE) {
|
vma_add((size_t) mb_info->mods_addr & PAGE_MASK,
|
||||||
page_set_mark(addr >> PAGE_SHIFT);
|
PAGE_ALIGN((size_t) mb_info->mods_addr + mb_info->mods_count*sizeof(multiboot_module_t)),
|
||||||
atomic_int32_inc(&total_allocated_pages);
|
VMA_READ|VMA_CACHEABLE);
|
||||||
atomic_int32_dec(&total_available_pages);
|
|
||||||
}
|
|
||||||
|
|
||||||
for(i=0; i<mb_info->mods_count; i++, mmodule++) {
|
for(i=0; i<mb_info->mods_count; i++) {
|
||||||
for(addr=mmodule->mod_start; addr<mmodule->mod_end; addr+=PAGE_SIZE) {
|
vma_add(PAGE_ALIGN(mmodule[i].mod_start),
|
||||||
page_set_mark(addr >> PAGE_SHIFT);
|
PAGE_ALIGN(mmodule[i].mod_end),
|
||||||
atomic_int32_inc(&total_allocated_pages);
|
VMA_READ|VMA_WRITE|VMA_CACHEABLE);
|
||||||
atomic_int32_dec(&total_available_pages);
|
|
||||||
|
for(addr=mmodule[i].mod_start; addr<mmodule[i].mod_end; addr+=PAGE_SIZE) {
|
||||||
|
page_set_mark(addr >> PAGE_SHIFT);
|
||||||
|
atomic_int32_inc(&total_allocated_pages);
|
||||||
|
atomic_int32_dec(&total_available_pages);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -235,8 +343,8 @@ int mmu_init(void)
|
||||||
* The init ram disk are already loaded.
|
* The init ram disk are already loaded.
|
||||||
* Therefore, we set these pages as used.
|
* Therefore, we set these pages as used.
|
||||||
*/
|
*/
|
||||||
for(addr=bootinfo->addr; addr < bootinfo->addr+bootinfo->size; addr+=PAGE_SIZE) {
|
for(addr=bootinfo->addr; addr<bootinfo->addr+bootinfo->size; addr+=PAGE_SIZE) {
|
||||||
// This area is already mapped, so we need to virt_to_phys() these addresses.
|
// this area is already mapped, so we need to virt_to_phys() these addresses.
|
||||||
page_set_mark(virt_to_phys(addr) >> PAGE_SHIFT);
|
page_set_mark(virt_to_phys(addr) >> PAGE_SHIFT);
|
||||||
atomic_int32_inc(&total_allocated_pages);
|
atomic_int32_inc(&total_allocated_pages);
|
||||||
atomic_int32_dec(&total_available_pages);
|
atomic_int32_dec(&total_available_pages);
|
||||||
|
@ -246,147 +354,3 @@ int mmu_init(void)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Use first fit algorithm to find a suitable physical memory region
|
|
||||||
*/
|
|
||||||
size_t get_pages(uint32_t npages)
|
|
||||||
{
|
|
||||||
uint32_t i, j, l;
|
|
||||||
uint32_t k = 0;
|
|
||||||
size_t ret = 0;
|
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!npages, 0))
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(npages > atomic_int32_read(&total_available_pages), 0))
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
spinlock_lock(&bitmap_lock);
|
|
||||||
i = alloc_start;
|
|
||||||
next_try:
|
|
||||||
while((k < BITMAP_SIZE) && page_marked(i)) {
|
|
||||||
k++;
|
|
||||||
i = (i+1) & (BITMAP_SIZE-1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (k >= BITMAP_SIZE)
|
|
||||||
goto oom;
|
|
||||||
|
|
||||||
for(j=1; (j<npages) && (i+j < BITMAP_SIZE) && (k < BITMAP_SIZE); j++, k++) {
|
|
||||||
if (page_marked(i+j)) {
|
|
||||||
i = (i+j) & (BITMAP_SIZE-1);
|
|
||||||
goto next_try;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (i+j >= BITMAP_SIZE) {
|
|
||||||
i = 0;
|
|
||||||
goto next_try;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (k >= BITMAP_SIZE)
|
|
||||||
goto oom;
|
|
||||||
|
|
||||||
ret = i*PAGE_SIZE;
|
|
||||||
//kprintf("alloc: ret 0x%x, i = %d, j = %d, npages = %d\n", ret, i, j, npages);
|
|
||||||
for(l=i; l<i+j; l++)
|
|
||||||
page_set_mark(l);
|
|
||||||
|
|
||||||
alloc_start = i+j;
|
|
||||||
spinlock_unlock(&bitmap_lock);
|
|
||||||
|
|
||||||
atomic_int32_add(&total_allocated_pages, npages);
|
|
||||||
atomic_int32_sub(&total_available_pages, npages);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
oom:
|
|
||||||
spinlock_unlock(&bitmap_lock);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
int put_page(size_t phyaddr)
|
|
||||||
{
|
|
||||||
uint32_t index = phyaddr >> PAGE_SHIFT;
|
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!phyaddr, 0))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
spinlock_lock(&bitmap_lock);
|
|
||||||
page_clear_mark(index);
|
|
||||||
spinlock_unlock(&bitmap_lock);
|
|
||||||
|
|
||||||
atomic_int32_sub(&total_allocated_pages, 1);
|
|
||||||
atomic_int32_add(&total_available_pages, 1);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void* mem_allocation(size_t sz, uint32_t flags)
|
|
||||||
{
|
|
||||||
size_t phyaddr, viraddr;
|
|
||||||
uint32_t npages = sz >> PAGE_SHIFT;
|
|
||||||
|
|
||||||
if (sz & (PAGE_SIZE-1))
|
|
||||||
npages++;
|
|
||||||
|
|
||||||
phyaddr = get_pages(npages);
|
|
||||||
if (BUILTIN_EXPECT(!phyaddr, 0))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
viraddr = map_region(0, phyaddr, npages, flags);
|
|
||||||
|
|
||||||
return (void*) viraddr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void* kmalloc(size_t sz)
|
|
||||||
{
|
|
||||||
return mem_allocation(sz, MAP_KERNEL_SPACE);
|
|
||||||
}
|
|
||||||
|
|
||||||
void kfree(void* addr, size_t sz)
|
|
||||||
{
|
|
||||||
uint32_t index, npages, i;
|
|
||||||
size_t phyaddr;
|
|
||||||
|
|
||||||
if (BUILTIN_EXPECT(!addr && !sz, 0))
|
|
||||||
return;
|
|
||||||
|
|
||||||
npages = sz >> PAGE_SHIFT;
|
|
||||||
if (sz & (PAGE_SIZE-1))
|
|
||||||
npages++;
|
|
||||||
|
|
||||||
spinlock_lock(&bitmap_lock);
|
|
||||||
for(i=0; i<npages; i++) {
|
|
||||||
unmap_region((size_t) addr+i*PAGE_SIZE, 1);
|
|
||||||
|
|
||||||
phyaddr = virt_to_phys((size_t) addr+i*PAGE_SIZE);
|
|
||||||
if (!phyaddr)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
index = phyaddr >> PAGE_SHIFT;
|
|
||||||
page_clear_mark(index);
|
|
||||||
}
|
|
||||||
spinlock_unlock(&bitmap_lock);
|
|
||||||
|
|
||||||
vm_free((size_t) addr, npages);
|
|
||||||
|
|
||||||
atomic_int32_sub(&total_allocated_pages, npages);
|
|
||||||
atomic_int32_add(&total_available_pages, npages);
|
|
||||||
}
|
|
||||||
|
|
||||||
void* create_stack(void)
|
|
||||||
{
|
|
||||||
return kmalloc(KERNEL_STACK_SIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
int destroy_stack(task_t* task)
|
|
||||||
{
|
|
||||||
if (BUILTIN_EXPECT(!task || !task->stack, 0))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
kfree(task->stack, KERNEL_STACK_SIZE);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
355
mm/vma.c
355
mm/vma.c
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright 2011 Stefan Lankes, Chair for Operating Systems,
|
* Copyright 2011 Steffen Vogel, Chair for Operating Systems,
|
||||||
* RWTH Aachen University
|
* RWTH Aachen University
|
||||||
*
|
*
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
@ -17,87 +17,322 @@
|
||||||
* This file is part of MetalSVM.
|
* This file is part of MetalSVM.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <metalsvm/vma.h>
|
||||||
#include <metalsvm/stdlib.h>
|
#include <metalsvm/stdlib.h>
|
||||||
#include <metalsvm/stdio.h>
|
#include <metalsvm/stdio.h>
|
||||||
#include <metalsvm/tasks_types.h>
|
#include <metalsvm/tasks_types.h>
|
||||||
#include <metalsvm/spinlock.h>
|
#include <metalsvm/spinlock.h>
|
||||||
#include <metalsvm/vma.h>
|
|
||||||
#include <metalsvm/errno.h>
|
#include <metalsvm/errno.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* add a new virtual memory region to the list of VMAs
|
* Kernel space VMA list and lock
|
||||||
|
*
|
||||||
|
* For bootstrapping we initialize the VMA list with one empty VMA
|
||||||
|
* (start == end) and expand this VMA by calls to vma_alloc()
|
||||||
*/
|
*/
|
||||||
int vma_add(task_t* task, size_t start, size_t end, uint32_t type)
|
static vma_t vma_boot = { VMA_KERN_MAX, VMA_KERN_MAX, VMA_HEAP };
|
||||||
|
static vma_t* vma_list = &vma_boot;
|
||||||
|
static spinlock_t vma_lock = SPINLOCK_INIT;
|
||||||
|
|
||||||
|
size_t vma_alloc(size_t size, uint32_t flags)
|
||||||
{
|
{
|
||||||
vma_t* new_vma;
|
task_t* task = per_core(current_task);
|
||||||
|
spinlock_t* lock;
|
||||||
if (BUILTIN_EXPECT(!task || start > end, 0))
|
vma_t** list;
|
||||||
|
size_t ret = 0;
|
||||||
|
|
||||||
|
kprintf("vma_alloc(0x%lx, 0x%x)\n", size, flags);
|
||||||
|
|
||||||
|
size_t base, limit; // boundaries for search
|
||||||
|
size_t start, end;
|
||||||
|
|
||||||
|
if (BUILTIN_EXPECT(!size, 0))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (flags & VMA_USER) {
|
||||||
|
base = VMA_KERN_MAX;
|
||||||
|
limit = VMA_USER_MAX;
|
||||||
|
list = &task->vma_list;
|
||||||
|
lock = &task->vma_lock;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
base = 0;
|
||||||
|
limit = VMA_KERN_MAX;
|
||||||
|
list = &vma_list;
|
||||||
|
lock = &vma_lock;
|
||||||
|
}
|
||||||
|
|
||||||
|
spinlock_lock(lock);
|
||||||
|
|
||||||
|
// "last" fit search for free memory area
|
||||||
|
vma_t* pred = *list; // vma before current gap
|
||||||
|
vma_t* succ = NULL; // vma after current gap
|
||||||
|
do {
|
||||||
|
start = (pred) ? pred->end : base;
|
||||||
|
end = (succ) ? succ->start : limit;
|
||||||
|
|
||||||
|
if (end > start && end - start > size)
|
||||||
|
break; // we found a gap
|
||||||
|
|
||||||
|
succ = pred;
|
||||||
|
pred = (pred) ? pred->prev : NULL;
|
||||||
|
} while (pred || succ);
|
||||||
|
|
||||||
|
if (BUILTIN_EXPECT(end > limit || end < start || end - start < size, 0)) {
|
||||||
|
spinlock_unlock(lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// resize existing vma
|
||||||
|
if (succ && succ->flags == flags) {
|
||||||
|
succ->start -= size;
|
||||||
|
ret = succ->start;
|
||||||
|
}
|
||||||
|
// insert new vma
|
||||||
|
else {
|
||||||
|
vma_t* new = kmalloc(sizeof(vma_t));
|
||||||
|
if (BUILTIN_EXPECT(!new, 0))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
new->start = end-size;
|
||||||
|
new->end = end;
|
||||||
|
new->flags = flags;
|
||||||
|
new->next = succ;
|
||||||
|
new->prev = pred;
|
||||||
|
|
||||||
|
if (pred)
|
||||||
|
pred->next = new;
|
||||||
|
if (succ)
|
||||||
|
succ->prev = new;
|
||||||
|
else
|
||||||
|
*list = new;
|
||||||
|
|
||||||
|
ret = new->start;
|
||||||
|
}
|
||||||
|
|
||||||
|
spinlock_unlock(lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int vma_free(size_t start, size_t end)
|
||||||
|
{
|
||||||
|
task_t* task = per_core(current_task);
|
||||||
|
spinlock_t* lock;
|
||||||
|
vma_t* vma;
|
||||||
|
vma_t** list;
|
||||||
|
|
||||||
|
if (BUILTIN_EXPECT(start >= end, 0))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
new_vma = kmalloc(sizeof(new_vma));
|
if (end <= VMA_KERN_MAX) {
|
||||||
if (!new_vma)
|
lock = &vma_lock;
|
||||||
return -ENOMEM;
|
list = &vma_list;
|
||||||
|
}
|
||||||
|
else if (start >= VMA_KERN_MAX) {
|
||||||
|
lock = &task->vma_lock;
|
||||||
|
list = &task->vma_list;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (BUILTIN_EXPECT(!*list, 0))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
spinlock_lock(lock);
|
||||||
|
|
||||||
|
// search vma
|
||||||
|
vma = *list;
|
||||||
|
while (vma) {
|
||||||
|
if (start >= vma->start && end <= vma->end) break;
|
||||||
|
vma = vma->prev;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (BUILTIN_EXPECT(!vma, 0)) {
|
||||||
|
spinlock_unlock(lock);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
// free/resize vma
|
||||||
|
if (start == vma->start && end == vma->end) {
|
||||||
|
if (vma == *list)
|
||||||
|
*list = vma->next; // update list head
|
||||||
|
if (vma->prev)
|
||||||
|
vma->prev->next = vma->next;
|
||||||
|
if (vma->next)
|
||||||
|
vma->next->prev = vma->prev;
|
||||||
|
kfree(vma);
|
||||||
|
}
|
||||||
|
else if (start == vma->start)
|
||||||
|
vma->start = end;
|
||||||
|
else if (end == vma->end)
|
||||||
|
vma->end = start;
|
||||||
|
else {
|
||||||
|
vma_t* new = kmalloc(sizeof(vma_t));
|
||||||
|
if (BUILTIN_EXPECT(!new, 0)) {
|
||||||
|
spinlock_unlock(lock);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
new->start = end;
|
||||||
|
vma->end = start;
|
||||||
|
|
||||||
|
new->end = vma->end;
|
||||||
|
new->next = vma->next;
|
||||||
|
new->prev = vma;
|
||||||
|
vma->next = new;
|
||||||
|
}
|
||||||
|
|
||||||
|
spinlock_unlock(lock);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int vma_add(size_t start, size_t end, uint32_t flags)
|
||||||
|
{
|
||||||
|
task_t* task = per_core(current_task);
|
||||||
|
spinlock_t* lock;
|
||||||
|
vma_t** list;
|
||||||
|
|
||||||
|
kprintf("vma_add(0x%lx, 0x%lx, 0x%x)\n", start, end, flags);
|
||||||
|
|
||||||
|
if (BUILTIN_EXPECT(start >= end, 0))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (flags & VMA_USER) {
|
||||||
|
list = &task->vma_list;
|
||||||
|
lock = &task->vma_lock;
|
||||||
|
|
||||||
|
// check if address is in userspace
|
||||||
|
if (BUILTIN_EXPECT(start < VMA_KERN_MAX, 0))
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
list = &vma_list;
|
||||||
|
lock = &vma_lock;
|
||||||
|
|
||||||
|
// check if address is in kernelspace
|
||||||
|
if (BUILTIN_EXPECT(end > VMA_KERN_MAX, 0))
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
spinlock_lock(lock);
|
||||||
|
|
||||||
|
// search gap
|
||||||
|
vma_t* pred = *list;
|
||||||
|
vma_t* succ = NULL;
|
||||||
|
while (pred) {
|
||||||
|
if ((!pred || pred->end <= start) &&
|
||||||
|
(!succ || succ->start >= end))
|
||||||
|
break;
|
||||||
|
|
||||||
|
succ = pred;
|
||||||
|
pred = pred->prev;
|
||||||
|
}
|
||||||
|
|
||||||
|
// resize existing vma
|
||||||
|
if (pred && pred->end == start && pred->flags == flags)
|
||||||
|
pred->end = end;
|
||||||
|
else if (succ && succ->start == end && succ->flags == flags)
|
||||||
|
succ->start = start;
|
||||||
|
// insert new vma
|
||||||
|
else {
|
||||||
|
vma_t* new = kmalloc(sizeof(vma_t));
|
||||||
|
if (BUILTIN_EXPECT(!new, 0))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
new->start = start;
|
||||||
|
new->end = end;
|
||||||
|
new->flags = flags;
|
||||||
|
new->next = succ;
|
||||||
|
new->prev = pred;
|
||||||
|
|
||||||
|
if (pred)
|
||||||
|
pred->next = new;
|
||||||
|
if (succ)
|
||||||
|
succ->prev = new;
|
||||||
|
else
|
||||||
|
*list = new;
|
||||||
|
}
|
||||||
|
|
||||||
|
spinlock_unlock(lock);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int copy_vma_list(task_t* task)
|
||||||
|
{
|
||||||
|
task_t* parent_task = per_core(current_task);
|
||||||
|
|
||||||
|
spinlock_init(&task->vma_lock);
|
||||||
|
spinlock_lock(&parent_task->vma_lock);
|
||||||
|
spinlock_lock(&task->vma_lock);
|
||||||
|
|
||||||
|
int ret = 0;
|
||||||
|
vma_t* last = NULL;
|
||||||
|
vma_t* parent = parent_task->vma_list;
|
||||||
|
|
||||||
|
while (parent) {
|
||||||
|
vma_t *new = kmalloc(sizeof(vma_t));
|
||||||
|
if (BUILTIN_EXPECT(!new, 0)) {
|
||||||
|
ret = -ENOMEM;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
new->start = parent->start;
|
||||||
|
new->end = parent->end;
|
||||||
|
new->flags = parent->flags;
|
||||||
|
new->prev = last;
|
||||||
|
|
||||||
|
if (last)
|
||||||
|
last->next = new;
|
||||||
|
else
|
||||||
|
task->vma_list = new;
|
||||||
|
|
||||||
|
last = new;
|
||||||
|
parent = parent->next;
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
spinlock_unlock(&task->vma_lock);
|
||||||
|
spinlock_unlock(&parent_task->vma_lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int drop_vma_list()
|
||||||
|
{
|
||||||
|
task_t* task = per_core(current_task);
|
||||||
|
|
||||||
spinlock_lock(&task->vma_lock);
|
spinlock_lock(&task->vma_lock);
|
||||||
|
|
||||||
new_vma->start = start;
|
while(task->vma_list)
|
||||||
new_vma->end = end;
|
pfree((void*) task->vma_list->start, task->vma_list->end - task->vma_list->start);
|
||||||
new_vma->type = type;
|
|
||||||
|
|
||||||
if (!(task->vma_list)) {
|
|
||||||
new_vma->next = new_vma->prev = NULL;
|
|
||||||
task->vma_list = new_vma;
|
|
||||||
} else {
|
|
||||||
vma_t* tmp = task->vma_list;
|
|
||||||
|
|
||||||
while (tmp->next && tmp->start < start)
|
|
||||||
tmp = tmp->next;
|
|
||||||
|
|
||||||
new_vma->next = tmp->next;
|
|
||||||
new_vma->prev = tmp;
|
|
||||||
tmp->next = new_vma;
|
|
||||||
}
|
|
||||||
|
|
||||||
spinlock_unlock(&task->vma_lock);
|
spinlock_unlock(&task->vma_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int vma_dump(task_t* task)
|
void vma_dump()
|
||||||
{
|
{
|
||||||
vma_t* tmp;
|
void print_vma(vma_t *vma) {
|
||||||
|
while (vma) {
|
||||||
if (BUILTIN_EXPECT(!task, 0))
|
kprintf("0x%lx - 0x%lx: size=%x, flags=%c%c%c\n", vma->start, vma->end, vma->end - vma->start,
|
||||||
return -EINVAL;
|
(vma->flags & VMA_READ) ? 'r' : '-',
|
||||||
|
(vma->flags & VMA_WRITE) ? 'w' : '-',
|
||||||
spinlock_lock(&task->vma_lock);
|
(vma->flags & VMA_EXECUTE) ? 'x' : '-');
|
||||||
|
vma = vma->prev;
|
||||||
int cnt = 0;
|
}
|
||||||
tmp = task->vma_list;
|
|
||||||
while (tmp) {
|
|
||||||
kprintf("#%d\t%8x - %8x: size=%6x, flags=", cnt, tmp->start, tmp->end, tmp->end - tmp->start);
|
|
||||||
|
|
||||||
if (tmp->type & VMA_READ)
|
|
||||||
kputs("r");
|
|
||||||
else
|
|
||||||
kputs("-");
|
|
||||||
|
|
||||||
if (tmp->type & VMA_WRITE)
|
|
||||||
kputs("w");
|
|
||||||
else
|
|
||||||
kputs("-");
|
|
||||||
|
|
||||||
if (tmp->type & VMA_EXECUTE)
|
|
||||||
kputs("x");
|
|
||||||
else
|
|
||||||
kputs("-");
|
|
||||||
kputs("\n");
|
|
||||||
|
|
||||||
tmp = tmp->next;
|
|
||||||
cnt++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
spinlock_unlock(&task->vma_lock);
|
task_t* task = per_core(current_task);
|
||||||
|
|
||||||
return 0;
|
kputs("Kernelspace VMAs:\n");
|
||||||
|
spinlock_lock(&vma_lock);
|
||||||
|
print_vma(vma_list);
|
||||||
|
spinlock_unlock(&vma_lock);
|
||||||
|
|
||||||
|
kputs("Userspace VMAs:\n");
|
||||||
|
spinlock_lock(&task->vma_lock);
|
||||||
|
print_vma(task->vma_list);
|
||||||
|
spinlock_unlock(&task->vma_lock);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue