diff --git a/Makefile.example b/Makefile.example index 028e6be..ba9d5c8 100644 --- a/Makefile.example +++ b/Makefile.example @@ -1,6 +1,7 @@ TOPDIR = $(shell pwd) ARCH = x86 NAME = eduos +KERNDIRS = libkern kernel mm arch/$(ARCH)/kernel arch/$(ARCH)/mm KERNDIRS = libkern kernel mm arch/$(ARCH)/kernel SUBDIRS = $(KERNDIRS) @@ -20,10 +21,19 @@ OBJCOPY_FOR_TARGET = $(CROSSCOMPREFIX)objcopy RANLIB_FOR_TARGET = $(CROSSCOMPREFIX)ranlib STRIP_FOR_TARGET = $(CROSSCOMPREFIX)strip READELF_FOR_TARGET = $(CROSSCOMPREFIX)readelf + NASM = nasm +GDB = gdb +QEMU = qemu-system-i386 QEMU = qemu-system-i386 NASMFLAGS = -felf32 -g -i$(TOPDIR)/include/eduos/ +GDBFLAGS = -x debug.gdb +QEMUFLAGS = -smp 2 -monitor stdio \ + -net nic,model=rtl8139 \ + -net user,hostfwd=tcp::12345-:7 \ + -serial tcp::12346,server,nowait + INCLUDE = -I$(TOPDIR)/include -I$(TOPDIR)/arch/$(ARCH)/include # Compiler options for final code CFLAGS = -g -m32 -march=i586 -Wall -O2 -fno-builtin -fstrength-reduce -fomit-frame-pointer -finline-functions -nostdinc $(INCLUDE) -fno-stack-protector @@ -63,14 +73,15 @@ veryclean: clean @echo Very cleaned qemu: $(NAME).elf - $(QEMU) -monitor stdio -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:7 -net dump -kernel $(NAME).elf + $(QEMU) $(QEMUFLAGS) -kernel $(NAME).elf -qemu-dbg: $(NAME).elf - $(QEMU) -monitor stdio -s -S -smp 2 -net nic,model=rtl8139 -net user,hostfwd=tcp::12345-:7 -net dump -kernel $(NAME).elf +debug: $(NAME).elf + $(TERM) -e $(GDB) $(GDBFLAGS) & + $(QEMU) $(QEMUFLAGS) -s -S -kernel $(NAME).elf doc: - @doxygen @echo Create documentation... + @doxygen %.o : %.c @echo [CC] $@ @@ -92,5 +103,5 @@ include/eduos/config.inc: include/eduos/config.h $Q$(CC_FOR_TARGET) $(CFLAGS) -c -o $@ $< .PHONY: default all clean emu gdb newlib tools - + include $(addsuffix /Makefile,$(SUBDIRS)) diff --git a/arch/x86/include/asm/multiboot.h b/arch/x86/include/asm/multiboot.h index 4756db5..e5d2c19 100644 --- a/arch/x86/include/asm/multiboot.h +++ b/arch/x86/include/asm/multiboot.h @@ -41,6 +41,13 @@ #include +/// Does the bootloader provide mem_* fields? +#define MULTIBOOT_INFO_MEM (1 << 0) +/// Does the bootloader provide a list of modules? +#define MULTIBOOT_INFO_MODS (1 << 3) +/// Does the bootloader provide a full memory map? +#define MULTIBOOT_INFO_MEM_MAP (1 << 6) + typedef uint16_t multiboot_uint16_t; typedef uint32_t multiboot_uint32_t; typedef uint64_t multiboot_uint64_t; @@ -143,6 +150,8 @@ struct multiboot_mod_list }; typedef struct multiboot_mod_list multiboot_module_t; -extern multiboot_info_t* mb_info; +/// Pointer to multiboot structure +/// This pointer is declared at set by entry.asm +extern multiboot_info_t* mb_info; #endif diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h new file mode 100644 index 0000000..089c732 --- /dev/null +++ b/arch/x86/include/asm/page.h @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2010, Stefan Lankes, RWTH Aachen University + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @author Steffen Vogel + * @file arch/x86/include/asm/page.h + * @brief Paging related functions + * + * This file contains the several functions to manage the page tables + */ + +#ifndef __PAGE_H__ +#define __PAGE_H__ + +/// Page offset bits +#define PAGE_BITS 12 +/// The size of a single page in bytes +#define PAGE_SIZE ( 1L << PAGE_BITS) +/// Mask the page address without page map flags +#define PAGE_MASK (-1L << PAGE_BITS) + +/// Total operand width in bits +#define BITS 32 +/// Linear/virtual address width +#define VIRT_BITS BITS +/// Physical address width (we dont support PAE) +#define PHYS_BITS BITS +/// Page map bits +#define PAGE_MAP_BITS 10 +/// Number of page map indirections +#define PAGE_LEVELS 2 + +/// Make address canonical +#define CANONICAL(addr) (addr) // only for 32 bit paging + +/// The number of entries in a page map table +#define PAGE_MAP_ENTRIES (1L << PAGE_MAP_BITS) + +/// Align to next page +#define PAGE_FLOOR(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) +/// Align to page +#define PAGE_CEIL(addr) ( (addr) & PAGE_MASK) + +/// Page is present +#define PG_PRESENT (1 << 0) +/// Page is read- and writable +#define PG_RW (1 << 1) +/// Page is addressable from userspace +#define PG_USER (1 << 2) +/// Page write through is activated +#define PG_PWT (1 << 3) +/// Page cache is disabled +#define PG_PCD (1 << 4) +/// Page was recently accessed (set by CPU) +#define PG_ACCESSED (1 << 5) +/// Page is dirty due to recent write-access (set by CPU) +#define PG_DIRTY (1 << 6) +/// Huge page: 4MB (or 2MB, 1GB) +#define PG_PSE (1 << 7) +/// Page attribute table +#define PG_PAT PG_PSE +/// Global TLB entry (Pentium Pro and later) +#define PG_GLOBAL (1 << 8) +/// This page or table is used during the boot process +#define PG_BOOT (1 << 9) + +/// This page is reserved for copying +#define PAGE_TMP (PAGE_FLOOR((size_t) &kernel_start) - PAGE_SIZE) + +/** @brief Converts a virtual address to a physical + * + * A non mapped virtual address causes a pagefault! + * + * @param addr Virtual address to convert + * @return physical address + */ +size_t page_virt_to_phys(size_t vir); + +/** @brief Initialize paging subsystem + * + * This function uses the existing bootstrap page tables (boot_{pgd, pgt}) + * to map required regions (video memory, kernel, etc..). + * Before calling page_init(), the bootstrap tables contain a simple identity + * paging. Which is replaced by more specific mappings. + */ +int page_init(); + +/** @brief Map a continious region of pages + * + * @param viraddr + * @param phyaddr + * @param npages + * @param bits + * @return + */ +int page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits); + +/** @brief Unmap a continious region of pages + * + * @param viraddr + * @param npages + * @return + */ +int page_unmap(size_t viraddr, size_t npages); + +/** @brief Copy a whole page map tree + * + * @param dest Physical address of new page map + * @retval 0 Success. Everything went fine. + * @retval <0 Error. Something went wrong. + */ +int page_map_copy(size_t dest); + +/** @brief Free a whole page map tree */ +int page_map_drop(); + +#endif diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 76e5c78..6b9db1d 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -57,6 +57,38 @@ inline static uint64_t rdtsc(void) return x; } +/** @brief Read cr3 register + * @return cr3's value + */ +static inline size_t read_cr3(void) { + size_t val; + asm volatile("mov %%cr3, %0" : "=r"(val)); + return val; +} + +/** @brief Read cr2 register + * @return cr2's value + */ +static inline size_t read_cr2(void) { + size_t val; + asm volatile("mov %%cr2, %0" : "=r"(val)); + return val; +} + +/** @brief Write a value into cr2 register + * @param val The value you want to write into cr2 + */ +static inline void write_cr2(size_t val) { + asm volatile("mov %0, %%cr2" : : "r"(val)); +} + +/** @brief Write a value into cr3 register + * @param val The value you want to write into cr3 + */ +static inline void write_cr3(size_t val) { + asm volatile("mov %0, %%cr3" : : "r"(val)); +} + /** @brief Flush cache * * The wbinvd asm instruction which stands for "Write back and invalidate" @@ -66,6 +98,26 @@ inline static void flush_cache(void) { asm volatile ("wbinvd" : : : "memory"); } +/** @brief Flush Translation Lookaside Buffer + * + * Just reads cr3 and writes the same value back into it. + */ +static inline void flush_tlb(void) +{ + size_t val = read_cr3(); + + if (val) + write_cr3(val); +} + +/** @brief Flush a specific page entry in TLB + * @param addr The (virtual) address of the page to flush + */ +static inline void tlb_flush_one_page(size_t addr) +{ + asm volatile("invlpg (%0)" : : "r"(addr) : "memory"); +} + /** @brief Invalidate cache * * The invd asm instruction which invalidates cache without writing back diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index b0c0676..2f4b45c 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -1,4 +1,4 @@ -C_source := tasks.c multiboot.c vga.c gdt.c irq.c idt.c isrs.c timer.c processor.c +C_source := tasks.c vga.c gdt.c irq.c idt.c isrs.c timer.c processor.c ASM_source := entry.asm string32.asm MODULE := arch_x86_kernel diff --git a/arch/x86/kernel/entry.asm b/arch/x86/kernel/entry.asm index 19df613..3241974 100644 --- a/arch/x86/kernel/entry.asm +++ b/arch/x86/kernel/entry.asm @@ -1,4 +1,4 @@ -; + ; Copyright (c) 2010, Stefan Lankes, RWTH Aachen University ; All rights reserved. ; @@ -32,19 +32,20 @@ %include "config.inc" [BITS 32] + ; We use a special name to map this section at the begin of our kernel -; => Multiboot needs its magic number at the begin of the kernel +; => Multiboot expects its magic number at the beginning of the kernel. SECTION .mboot global start start: jmp stublet -; This part MUST be 4byte aligned, so we solve that issue using 'ALIGN 4' +; This part MUST be 4 byte aligned, so we solve that issue using 'ALIGN 4'. ALIGN 4 mboot: ; Multiboot macros to make a few lines more readable later - MULTIBOOT_PAGE_ALIGN equ 1<<0 - MULTIBOOT_MEMORY_INFO equ 1<<1 + MULTIBOOT_PAGE_ALIGN equ (1 << 0) + MULTIBOOT_MEMORY_INFO equ (1 << 1) MULTIBOOT_HEADER_MAGIC equ 0x1BADB002 MULTIBOOT_HEADER_FLAGS equ MULTIBOOT_PAGE_ALIGN | MULTIBOOT_MEMORY_INFO MULTIBOOT_CHECKSUM equ -(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS) @@ -57,37 +58,43 @@ mboot: SECTION .text ALIGN 4 stublet: -; initialize stack pointer +; Initialize stack pointer mov esp, boot_stack add esp, KERNEL_STACK_SIZE-16 -; initialize cpu features +; Initialize CPU features call cpu_init -; interpret multiboot information - extern multiboot_init - push ebx - call multiboot_init - add esp, 4 +; Interpret multiboot information + mov DWORD [mb_info], ebx -; jump to the boot processors's C code +; Jump to the boot processors's C code extern main call main jmp $ +; This will set up the x86 control registers: +; Caching and the floating point unit are enabled +; Bootstrap page tables are loaded and page size +; extensions (huge pages) enabled. global cpu_init cpu_init: - mov eax, cr0 -; enable caching, disable paging and fpu emulation - and eax, 0x1ffffffb -; ...and turn on FPU exceptions - or eax, 0x22 - mov cr0, eax -; clears the current pgd entry - xor eax, eax +; Set CR3 + mov eax, boot_pgd mov cr3, eax -; at this stage, we disable the SSE support - mov eax, cr4 - and eax, 0xfffbf9ff + +; Set CR4 mov cr4, eax + and eax, ~(1 << 9) ; disable SSE + or eax, (1 << 4) ; enable PSE + mov cr4, eax + +; Set CR0 + mov eax, cr0 + and eax, ~(1 << 2) ; disable FPU emulation + and eax, ~(1 << 30) ; enable caching + or eax, (1 << 31) ; enable paging + or eax, (1 << 5) ; enable FPU exceptions + mov cr0, eax + ret ; This will set up our new segment registers. We need to do @@ -108,12 +115,12 @@ gdt_flush: flush2: ret -; The first 32 interrupt service routines (isr) entries correspond to exceptions. -; Some exceptions will push an error code onto the stack which is specific to +; The first 32 interrupt service routines (ISR) entries correspond to exceptions. +; Some exceptions will push an error code onto the stack which is specific to ; the exception caused. To decrease the complexity, we handle this by pushing a -; dummy error code of 0 onto the stack for any ISR that doesn't push an error -; code already. -; +; Dummy error code of 0 onto the stack for any ISR that doesn't push an error +; code already. +; ; ISRs are registered as "Interrupt Gate". ; Therefore, the interrupt flag (IF) is already cleared. @@ -136,8 +143,8 @@ flush2: jmp common_stub %endmacro -; create isr entries, where the number after the -; pseudo error code represents following interrupts +; Create isr entries, where the number after the +; pseudo error code represents following interrupts: ; 0: Divide By Zero Exception ; 1: Debug Exception ; 2: Non Maskable Interrupt Exception @@ -189,18 +196,16 @@ isrstub_pseudo_error 9 jmp common_stub %endmacro -; create entries for the interrupts 0 to 23 +; Create entries for the interrupts 0 to 23 %assign i 0 %rep 24 irqstub i %assign i i+1 %endrep -extern syscall_handler +; Used to realize system calls. +; By entering the handler, the interrupt flag is not cleared. global isrsyscall - -; used to realize system calls -; by entering the handler, the interrupt flag is not cleared isrsyscall: cli push es @@ -213,13 +218,14 @@ isrsyscall: push ebx push eax - ; set kernel data segmenets +; Set kernel data segmenets mov ax, 0x10 mov ds, ax mov es, ax mov eax, [esp] sti + extern syscall_handler call syscall_handler cli @@ -241,12 +247,12 @@ extern irq_handler extern get_current_stack extern finish_task_switch +; Create a pseudo interrupt on top of the stack. +; Afterwards, we switch to the task with iret. +; We already are in kernel space => no pushing of SS required. global switch_context ALIGN 4 switch_context: - ; create on the stack a pseudo interrupt - ; afterwards, we switch to the task with iret - ; we already in kernel space => no pushing of SS required mov eax, [esp+4] ; on the stack is already the address to store the old esp pushf ; push controll register push DWORD 0x8 ; CS @@ -263,8 +269,6 @@ ALIGN 4 rollback: ret -extern set_kernel_stack - ALIGN 4 common_stub: pusha @@ -274,8 +278,10 @@ common_stub: mov es, ax mov ds, ax - ; use the same handler for interrupts and exceptions +; Use the same handler for interrupts and exceptions push esp + + extern set_kernel_stack call irq_handler add esp, 4 @@ -287,15 +293,15 @@ common_switch: call get_current_stack ; get new esp xchg eax, esp - ; set task switched flag +; Set task switched flag mov eax, cr0 or eax, 8 mov cr0, eax - ; set esp0 in the task state segment +; Set esp0 in the task state segment call set_kernel_stack - ; call cleanup code +; Call cleanup code call finish_task_switch no_context_switch: @@ -305,9 +311,34 @@ no_context_switch: add esp, 8 iret -global boot_stack -ALIGN 4096 -boot_stack: -TIMES (KERNEL_STACK_SIZE) DB 0xcd +SECTION .data +global mb_info: +ALIGN 4 +mb_info: + DD 0 + +ALIGN 4096 +global boot_stack +boot_stack: + TIMES (KERNEL_STACK_SIZE) DB 0xcd + +; Bootstrap page tables are used during the initialization. +; These tables do a simple identity paging and will +; be replaced in page_init() by more fine-granular mappings. +ALIGN 4096 +global boot_map +boot_map: +boot_pgd: + DD boot_pgt + 0x103 ; PG_GLOBAL | PG_RW | PG_PRESENT + times 1022 DD 0 ; PAGE_MAP_ENTRIES - 2 + DD boot_pgd + 0x103 ; PG_GLOBAL | PG_RW | PG_PRESENT (self-reference) +boot_pgt: + %assign i 0 + %rep 1024 ; PAGE_MAP_ENTRIES + DD i | 0x203 ; PG_BOOT | PG_RW | PG_PRESENT + %assign i i + 4096 ; PAGE_SIZE + %endrep + +; add some hints to the ELF file SECTION .note.GNU-stack noalloc noexec nowrite progbits diff --git a/arch/x86/kernel/gdt.c b/arch/x86/kernel/gdt.c index 43190af..7bf336f 100644 --- a/arch/x86/kernel/gdt.c +++ b/arch/x86/kernel/gdt.c @@ -34,6 +34,7 @@ #include #include #include +#include gdt_ptr_t gp; static tss_t task_state_segment __attribute__ ((aligned (PAGE_SIZE))); diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile new file mode 100644 index 0000000..2f9b183 --- /dev/null +++ b/arch/x86/mm/Makefile @@ -0,0 +1,4 @@ +C_source := page.c +MODULE := arch_x86_mm + +include $(TOPDIR)/Makefile.inc diff --git a/arch/x86/mm/page.c b/arch/x86/mm/page.c new file mode 100644 index 0000000..a0e6d4b --- /dev/null +++ b/arch/x86/mm/page.c @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2010, Stefan Lankes, RWTH Aachen University + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this + * software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/** + * This is a 32/64 bit portable paging implementation for the x86 architecture + * using self-referenced page tables. + * See http://www.noteblok.net/2014/06/14/bachelor/ for a detailed description. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* Note that linker symbols are not variables, they have no memory + * allocated for maintaining a value, rather their address is their value. */ +extern const void kernel_start; +extern const void kernel_end; + +/** Lock for kernel space page tables */ +static spinlock_t kslock = SPINLOCK_INIT; + +/** This PGD table is initialized in entry.asm */ +extern size_t boot_map[PAGE_MAP_ENTRIES]; + +/** A self-reference enables direct access to all page tables */ +static size_t* self[PAGE_LEVELS] = { + (size_t *) 0xFFC00000, + (size_t *) 0xFFFFF000 +}; + +/* Addresses of child/parent tables */ +#define CHILD(map, lvl, vpn) &map[lvl-1][vpn<>PAGE_MAP_BITS] + +size_t page_virt_to_phys(size_t addr) +{ + size_t vpn = addr >> PAGE_BITS; // virtual page number + size_t entry = self[0][vpn]; // page table entry + size_t off = addr & ~PAGE_MASK; // offset within page + size_t phy = entry & PAGE_MASK; // physical page frame number + + return phy | off; +} + +int page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits) +{ + int lvl; + long vpn = viraddr >> PAGE_BITS; + long first[PAGE_LEVELS], last[PAGE_LEVELS]; + + /* Calculate index boundaries for page map traversal */ + for (lvl=0; lvl> (lvl * PAGE_MAP_BITS); + last[lvl] = (vpn+npages-1) >> (lvl * PAGE_MAP_BITS); + } + + spinlock_lock(&kslock); + + /* Start iterating through the entries + * beginning at the root table (PGD or PML4) */ + for (lvl=PAGE_LEVELS-1; lvl>=0; lvl--) { + for (vpn=first[lvl]; vpn<=last[lvl]; vpn++) { + if (lvl) { /* PML4, PDPT, PGD */ + if (!(self[lvl][vpn] & PG_PRESENT)) { + /* There's no table available which covers the region. + * Therefore we need to create a new empty table. */ + size_t phyaddr = get_pages(1); + if (BUILTIN_EXPECT(!phyaddr, 0)) { + spinlock_unlock(&kslock); + return -ENOMEM; + } + + /* Reference the new table within its parent */ + self[lvl][vpn] = phyaddr | bits; + + /* Fill new table with zeros */ + memset(CHILD(self, lvl, vpn), 0, PAGE_SIZE); + } + } + else { /* PGT */ + if (self[lvl][vpn] & PG_PRESENT) + /* There's already a page mapped at this address. + * We have to flush a single TLB entry. */ + tlb_flush_one_page(vpn << PAGE_BITS); + + self[lvl][vpn] = phyaddr | bits; + phyaddr += PAGE_SIZE; + } + } + } + + spinlock_unlock(&kslock); + + return 0; +} + +/** Tables are freed by page_map_drop() */ +int page_unmap(size_t viraddr, size_t npages) +{ + long vpn, start = viraddr >> PAGE_BITS; + long end = start + npages; + + spinlock_lock(&kslock); + + /* Start iterating through the entries. + * Only the PGT entries are removed. Tables remain allocated. */ + for (vpn=start; vpnint_no, s->cs, s->eip, current_task->id, viraddr, s->error, + (s->error & 0x4) ? "user" : "supervisor", + (s->error & 0x10) ? "instruction" : "data", + (s->error & 0x2) ? "write" : ((s->error & 0x10) ? "fetch" : "read"), + (s->error & 0x1) ? "protection" : "not present", + (s->error & 0x8) ? "reserved bit" : "\b"); + + while(1) HALT; +} + +int page_init() +{ + size_t addr, npages; + int i; + + /* Replace default pagefault handler */ + irq_uninstall_handler(14); + irq_install_handler(14, page_fault_handler); + + /* Map kernel */ + addr = (size_t) &kernel_start; + npages = PAGE_FLOOR((size_t) &kernel_end - (size_t) &kernel_start) >> PAGE_BITS; + page_map(addr, addr, npages, PG_PRESENT | PG_RW | PG_GLOBAL); + +#ifdef CONFIG_VGA + /* Map video memory */ + page_map(VIDEO_MEM_ADDR, VIDEO_MEM_ADDR, 1, PG_PRESENT | PG_RW | PG_PCD); +#endif + + /* Map multiboot information and modules */ + if (mb_info) { + addr = (size_t) mb_info & PAGE_MASK; + npages = PAGE_FLOOR(sizeof(*mb_info)) >> PAGE_BITS; + page_map(addr, addr, npages, PG_PRESENT | PG_GLOBAL); + + if (mb_info->flags & MULTIBOOT_INFO_MODS) { + addr = mb_info->mods_addr; + npages = PAGE_FLOOR(mb_info->mods_count*sizeof(multiboot_module_t)) >> PAGE_BITS; + page_map(addr, addr, npages, PG_PRESENT | PG_GLOBAL); + + multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr); + for(i=0; imods_count; i++) { + addr = mmodule[i].mod_start; + npages = PAGE_FLOOR(mmodule[i].mod_end - mmodule[i].mod_start) >> PAGE_BITS; + page_map(addr, addr, npages, PG_PRESENT | PG_USER | PG_GLOBAL); + } + } + } + + /* Unmap bootstrap identity paging (see entry.asm, PG_BOOT) */ + for (i=0; i -#include - -/* - * eduOS is able to use Multiboot (http://www.gnu.org/software/grub/manual/multiboot/), - * which specifies an interface between a boot loader and a operating system +/** + * @author Steffen Vogel + * @file include/memory.h + * @brief Memory related functions + * + * This file contains platform independent memory functions */ -multiboot_info_t* mb_info __attribute__ ((section (".data"))) = NULL; +#ifndef __MEMORY_H__ +#define __MEMORY_H__ -void multiboot_init(void* mb) -{ - mb_info = (multiboot_info_t*) mb; -} +/** @brief Initialize the memory subsystem */ +int memory_init(); + +/** @brief Request physical page frames */ +size_t get_pages(size_t npages); + +/** @brief release physical page frames */ +int put_pages(size_t phyaddr, size_t npages); + +#endif diff --git a/include/eduos/stddef.h b/include/eduos/stddef.h index ca3fa70..e488668 100644 --- a/include/eduos/stddef.h +++ b/include/eduos/stddef.h @@ -46,10 +46,6 @@ extern "C" { /// represents a task identifier typedef unsigned int tid_t; -#define PAGE_SIZE (1 << PAGE_SHIFT) -#define PAGE_MASK ~(PAGE_SIZE - 1) -#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) - struct task; /// pointer to the current (running) task extern struct task* current_task; diff --git a/kernel/main.c b/kernel/main.c index 34b8642..88798aa 100644 --- a/kernel/main.c +++ b/kernel/main.c @@ -33,10 +33,14 @@ #include #include #include +#include + #include #include +#include +#include -/* +/* * Note that linker symbols are not variables, they have no memory allocated for * maintaining a value, rather their address is their value. */ @@ -47,6 +51,11 @@ extern const void bss_end; extern char __BUILD_DATE; extern char __BUILD_TIME; +/* Page frame counters */ +extern atomic_int32_t total_pages; +extern atomic_int32_t total_allocated_pages; +extern atomic_int32_t total_available_pages; + static void userfoo(void* arg) { char str[256]; @@ -96,6 +105,7 @@ static int eduos_init(void) timer_init(); koutput_init(); multitasking_init(); + memory_init(); return 0; } @@ -104,16 +114,18 @@ int main(void) { tid_t id1; tid_t id2; + eduos_init(); - - kprintf("This is eduOS %s Build %u, %u\n", EDUOS_VERSION, &__BUILD_DATE, &__BUILD_TIME); - kprintf("Kernel starts at %p and ends at %p\n", &kernel_start, &kernel_end); - irq_enable(); system_calibration(); + kprintf("This is eduOS %s Build %u, %u\n", EDUOS_VERSION, &__BUILD_DATE, &__BUILD_TIME); + kprintf("Kernel starts at %p and ends at %p\n", &kernel_start, &kernel_end); kprintf("Processor frequency: %u MHz\n", get_cpu_frequency()); - + kprintf("Total memory: %lu KiB\n", atomic_int32_read(&total_pages) * PAGE_SIZE / 1024); + kprintf("Total memory available: %lu KiB\n", atomic_int32_read(&total_available_pages) * PAGE_SIZE / 1024); + + create_kernel_task(&id1, foo, "foo1", NORMAL_PRIO); create_kernel_task(&id2, wrapper, "userfoo", NORMAL_PRIO); diff --git a/mm/memory.c b/mm/memory.c index 9b32356..61cb925 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -27,8 +27,29 @@ #include #include +#include +#include +#include + +#include +#include +#include + +/* + * Note that linker symbols are not variables, they have no memory allocated for + * maintaining a value, rather their address is their value. + */ +extern const void kernel_start; +extern const void kernel_end; static char stack[MAX_TASKS-1][KERNEL_STACK_SIZE]; +static char bitmap[BITMAP_SIZE]; + +static spinlock_t bitmap_lock = SPINLOCK_INIT; + +atomic_int32_t total_pages = ATOMIC_INIT(0); +atomic_int32_t total_allocated_pages = ATOMIC_INIT(0); +atomic_int32_t total_available_pages = ATOMIC_INIT(0); void* create_stack(tid_t id) { @@ -41,3 +62,180 @@ void* create_stack(tid_t id) return (void*) stack[id-1]; } + +inline static int page_marked(size_t i) +{ + size_t index = i >> 3; + size_t mod = i & 0x7; + + return (bitmap[index] & (1 << mod)); +} + +inline static void page_set_mark(size_t i) +{ + size_t index = i >> 3; + size_t mod = i & 0x7; + + bitmap[index] = bitmap[index] | (1 << mod); +} + +inline static void page_clear_mark(size_t i) +{ + size_t index = i / 8; + size_t mod = i % 8; + + bitmap[index] = bitmap[index] & ~(1 << mod); +} + +size_t get_pages(size_t npages) +{ + size_t cnt, off; + + if (BUILTIN_EXPECT(!npages, 0)) + return 0; + if (BUILTIN_EXPECT(npages > atomic_int32_read(&total_available_pages), 0)) + return 0; + + spinlock_lock(&bitmap_lock); + + off = 1; + while (off <= BITMAP_SIZE*8 - npages) { + for (cnt=0; cnt> PAGE_BITS; + + if (BUILTIN_EXPECT(!phyaddr, 0)) + return -EINVAL; + if (BUILTIN_EXPECT(!npages, 0)) + return -EINVAL; + + spinlock_lock(&bitmap_lock); + + for (i=0; iflags & MULTIBOOT_INFO_MEM_MAP) { + multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) ((size_t) mb_info->mmap_addr); + multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length); + + // mark available memory as free + while (mmap < mmap_end) { + if (mmap->type == MULTIBOOT_MEMORY_AVAILABLE) { + for (addr=mmap->addr; addr < mmap->addr + mmap->len; addr += PAGE_SIZE) { + page_clear_mark(addr >> PAGE_BITS); + atomic_int32_inc(&total_pages); + atomic_int32_inc(&total_available_pages); + } + } + mmap++; + } + } + else if (mb_info->flags & MULTIBOOT_INFO_MEM) { + size_t page; + size_t pages_lower = mb_info->mem_lower >> 2; /* KiB to page number */ + size_t pages_upper = mb_info->mem_upper >> 2; + + for (page=0; page> PAGE_BITS); + atomic_int32_inc(&total_allocated_pages); + atomic_int32_dec(&total_available_pages); + + + if (mb_info->flags & MULTIBOOT_INFO_MODS) { + // mark modules list as used + for(addr=mb_info->mods_addr; addrmods_addr+mb_info->mods_count*sizeof(multiboot_module_t); addr+=PAGE_SIZE) { + page_set_mark(addr >> PAGE_BITS); + atomic_int32_inc(&total_allocated_pages); + atomic_int32_dec(&total_available_pages); + } + + // mark modules as used + multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr); + for(i=0; imods_count; i++) { + for(addr=mmodule[i].mod_start; addr> PAGE_BITS); + atomic_int32_inc(&total_allocated_pages); + atomic_int32_dec(&total_available_pages); + } + } + } + } + + // mark kernel as used + for(addr=(size_t) &kernel_start; addr<(size_t) &kernel_end; addr+=PAGE_SIZE) { + page_set_mark(addr >> PAGE_BITS); + atomic_int32_inc(&total_allocated_pages); + atomic_int32_dec(&total_available_pages); + } + + // enable paging and map SMP, VGA, Multiboot modules etc. + ret = page_init(); + if (BUILTIN_EXPECT(ret, 0)) { + kputs("Failed to initialize paging!\n"); + return ret; + } + + return ret; +}