/* * Copyright 2010 Stefan Lankes, Chair for Operating Systems, * RWTH Aachen University * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This file is part of MetalSVM. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CONFIG_ROCKCREEK #include #include #include #include #endif /* * Virtual Memory Layout of the standard configuration * (1 GB kernel space) * * 0x00000000 - 0x000FFFFF: reserved for IO devices (16MB) * 0x00100000 - 0x0DEADFFF: Kernel (size depends on the configuration) (221MB) * 0x0DEAE000 - 0x3FFFEFFF: Kernel heap (801MB) * 0x3FFFF000 - 0x3FFFFFFF: Page Tables are mapped in this region (4KB) * (The first 256 entries belongs to kernel space) */ /* * Note that linker symbols are not variables, they have no memory allocated for * maintaining a value, rather their address is their value. */ extern const void kernel_start; extern const void kernel_end; // boot task's page directory and page directory lock static page_map_t boot_pgd = {{[0 ... MAP_ENTRIES-1] = 0}}; static page_map_t boot_pgt[KERNEL_SPACE/(MAP_ENTRIES*PAGE_SIZE)]; static page_map_t pgt_container = {{[0 ... MAP_ENTRIES-1] = 0}}; static spinlock_t kslock = SPINLOCK_INIT; static int paging_enabled = 0; page_map_t* get_boot_page_map(void) { return &boot_pgd; } /* * TODO: We create a full copy of the current task. Copy-On-Access will be the better solution. * * No PGD locking is needed because only create_page_map use this function and holds already the * PGD lock. */ inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_map_t* pgt, int* counter) { uint32_t i; page_map_t* new_pgt; size_t phyaddr; if (BUILTIN_EXPECT(!pgt, 0)) return 0; new_pgt = kmalloc(sizeof(page_map_t)); if (!new_pgt) return 0; memset(new_pgt, 0x00, sizeof(page_map_t)); if (counter) (*counter)++; for(i=0; ientries[i] & PAGE_MASK) { if (!(pgt->entries[i] & PG_USER)) { // Kernel page => copy only page entries new_pgt->entries[i] = pgt->entries[i]; continue; } phyaddr = get_page(); if (!phyaddr) continue; if (counter) (*counter)++; copy_page_physical((void*)phyaddr, (void*) (pgt->entries[i] & PAGE_MASK)); new_pgt->entries[i] = phyaddr | (pgt->entries[i] & 0xFFF); atomic_int32_inc(&task->user_usage); } } phyaddr = virt_to_phys((size_t)new_pgt); return phyaddr; } int create_page_map(task_t* task, int copy) { page_map_t* pgd; page_map_t* pgt; page_map_t* pgt_container; uint32_t i; uint32_t index1, index2; size_t viraddr, phyaddr; int counter = 0; task_t* curr_task = per_core(current_task); if (BUILTIN_EXPECT(!paging_enabled, 0)) return -EINVAL; // we already know the virtual address of the "page table container" // (see file header) pgt_container = (page_map_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK); // create new page directory for the new task pgd = kmalloc(sizeof(page_map_t)); if (!pgd) return -ENOMEM; memset(pgd, 0x00, sizeof(page_map_t)); // create a new "page table container" for the new task pgt = kmalloc(sizeof(page_map_t)); if (!pgt) { kfree(pgd, sizeof(page_map_t)); return -ENOMEM; } memset(pgt, 0x00, sizeof(page_map_t)); // copy kernel tables spinlock_lock(&kslock); for(i=0; ientries[i] = boot_pgd.entries[i]; // only kernel entries will be copied if (pgd->entries[i] && !(pgd->entries[i] & PG_USER)) pgt->entries[i] = pgt_container->entries[i]; } spinlock_unlock(&kslock); // map page table container at the end of the kernel space viraddr = (KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK; index1 = viraddr >> 22; index2 = (viraddr >> 12) & 0x3FF; // now, we create a self reference pgd->entries[index1] = ((size_t) virt_to_phys((size_t) pgt) & PAGE_MASK)|KERN_TABLE; pgt->entries[index2] = ((size_t) virt_to_phys((size_t) pgt) & PAGE_MASK)|KERN_PAGE; task->page_map = pgd; if (copy) { spinlock_irqsave_lock(&curr_task->page_lock); for (i=KERNEL_SPACE/(1024*PAGE_SIZE); i<1024; i++) { if (!(curr_task->page_map->entries[i])) continue; if (!(curr_task->page_map->entries[i] & PG_USER)) continue; phyaddr = copy_page_table(task, i, (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + i*PAGE_SIZE) & PAGE_MASK), &counter); if (phyaddr) { pgd->entries[i] = (phyaddr & PAGE_MASK) | (curr_task->page_map->entries[i] & 0xFFF); pgt->entries[i] = (phyaddr & PAGE_MASK) | KERN_PAGE; } } spinlock_irqsave_unlock(&curr_task->page_lock); } return counter; } int drop_page_map(void) { page_map_t* pgd = per_core(current_task)->page_map; size_t phy_pgd = virt_to_phys((size_t) pgd); task_t* task = per_core(current_task); uint32_t i; if (BUILTIN_EXPECT(pgd == &boot_pgd, 0)) return -EINVAL; spinlock_irqsave_lock(&task->page_lock); for(i=0; ientries[i] & PG_USER) { put_page(pgd->entries[i] & PAGE_MASK); pgd->entries[i] = 0; } } // freeing the page directory put_page(phy_pgd); task->page_map = NULL; spinlock_irqsave_unlock(&task->page_lock); return 0; } size_t virt_to_phys(size_t viraddr) { task_t* task = per_core(current_task); uint32_t index1, index2; page_map_t* pgt; size_t ret = 0; if (!paging_enabled) return viraddr; if (BUILTIN_EXPECT(!task || !task->page_map, 0)) return 0; spinlock_irqsave_lock(&task->page_lock); index1 = viraddr >> 22; index2 = (viraddr >> 12) & 0x3FF; if (!(task->page_map->entries[index1] & PAGE_MASK)) goto out; pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK); if (!pgt || !(pgt->entries[index2])) goto out; ret = pgt->entries[index2] & PAGE_MASK; // determine page frame ret = ret | (viraddr & 0xFFF); // add page offset out: //kprintf("vir %p to phy %p\n", viraddr, ret); spinlock_irqsave_unlock(&task->page_lock); return ret; } size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags) { task_t* task = per_core(current_task); page_map_t* pgt; size_t index, i; size_t ret; if (BUILTIN_EXPECT(!task || !task->page_map, 0)) return 0; if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0)) return 0; if (flags & MAP_KERNEL_SPACE) spinlock_lock(&kslock); else spinlock_irqsave_lock(&task->page_lock); if (!viraddr) { viraddr = vm_alloc(npages, flags); if (BUILTIN_EXPECT(!viraddr, 0)) { kputs("map_adress: found no valid virtual address\n"); ret = 0; goto out; } } ret = viraddr; //kprintf("map %d pages from %p to %p\n", npages, phyaddr, ret); for(i=0; i> 22; if (!(task->page_map->entries[index])) { page_map_t* pgt_container; pgt = (page_map_t*) get_page(); if (BUILTIN_EXPECT(!pgt, 0)) { kputs("map_address: out of memory\n"); ret = 0; goto out; } // set the new page table into the directory if (flags & MAP_USER_SPACE) task->page_map->entries[index] = (uint32_t)pgt|USER_TABLE; else task->page_map->entries[index] = (uint32_t)pgt|KERN_TABLE; // if paging is already enabled, we need to use the virtual address if (paging_enabled) // we already know the virtual address of the "page table container" // (see file header) pgt_container = (page_map_t*) ((KERNEL_SPACE - PAGE_SIZE) & PAGE_MASK); else pgt_container = (page_map_t*) (task->page_map->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & PAGE_MASK); if (BUILTIN_EXPECT(!pgt_container, 0)) { kputs("map_address: internal error\n"); ret = 0; goto out; } // map the new table into the address space of the kernel space pgt_container->entries[index] = ((size_t) pgt)|KERN_PAGE; // clear the page table if (paging_enabled) memset((void*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK), 0x00, PAGE_SIZE); else memset(pgt, 0x00, PAGE_SIZE); } else pgt = (page_map_t*) (task->page_map->entries[index] & PAGE_MASK); /* convert physical address to virtual */ if (paging_enabled) pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & PAGE_MASK); index = (viraddr >> 12) & 0x3FF; if (pgt->entries[index] && !(flags & MAP_REMAP)) { kprintf("0x%x is already mapped\n", viraddr); ret = 0; goto out; } if (flags & MAP_USER_SPACE) pgt->entries[index] = USER_PAGE|(phyaddr & PAGE_MASK); else pgt->entries[index] = KERN_PAGE|(phyaddr & PAGE_MASK); if (flags & MAP_NO_CACHE) pgt->entries[index] |= PG_PCD; #ifdef CONFIG_ROCKCREEK if (flags & MAP_MPE) pgt->entries[index] |= PG_MPE; #endif if (flags & MAP_SVM_STRONG) #ifndef SVM_WB pgt->entries[index] |= PG_SVM_STRONG|PG_PWT; #else pgt->entries[index] |= PG_SVM; #endif if (flags & MAP_SVM_LAZYRELEASE) pgt->entries[index] |= PG_SVM_LAZYRELEASE|PG_PWT; if (flags & MAP_SVM_INIT) pgt->entries[index] |= PG_SVM_INIT; if (flags & MAP_NO_ACCESS) pgt->entries[index] &= ~PG_PRESENT; if (flags & MAP_WT) pgt->entries[index] |= PG_PWT; if (flags & MAP_USER_SPACE) atomic_int32_inc(&task->user_usage); tlb_flush_one_page(viraddr); } out: if (flags & MAP_KERNEL_SPACE) spinlock_unlock(&kslock); else spinlock_irqsave_unlock(&task->page_lock); return ret; } int change_page_permissions(size_t start, size_t end, uint32_t flags) { uint32_t index1, index2, newflags; size_t viraddr = start & 0xFFFFF000; size_t phyaddr; page_map_t* pgt; page_map_t* pgd; task_t* task = per_core(current_task); if (BUILTIN_EXPECT(!paging_enabled, 0)) return -EINVAL; pgd = per_core(current_task)->page_map; if (BUILTIN_EXPECT(!pgd, 0)) return -EINVAL; spinlock_irqsave_lock(&task->page_lock); while (viraddr < end) { index1 = viraddr >> 22; index2 = (viraddr >> 12) & 0x3FF; while ((viraddr < end) && (index2 < 1024)) { pgt = (page_map_t*) (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK); if (pgt && pgt->entries[index2]) { phyaddr = pgt->entries[index2] & PAGE_MASK; newflags = pgt->entries[index2] & 0xFFF; // get old flags if (!(newflags & PG_SVM_INIT)) { if ((newflags & PG_SVM_STRONG) && !(newflags & PG_PRESENT) && (flags & (VMA_READ|VMA_WRITE) && !(flags & VMA_NOACCESS))) newflags |= PG_PRESENT; else if ((newflags & PG_SVM_STRONG) && (newflags & PG_PRESENT) && (flags & VMA_NOACCESS)) newflags &= ~PG_PRESENT; } // update flags if (!(flags & VMA_WRITE)) { newflags &= ~PG_RW; #ifdef CONFIG_ROCKCREEK if (newflags & (PG_SVM_STRONG|PG_SVM_LAZYRELEASE)) newflags &= ~PG_MPE; #endif } else { newflags |= PG_RW; #ifdef CONFIG_ROCKCREEK if (newflags & (PG_SVM_STRONG|PG_SVM_LAZYRELEASE)) newflags |= PG_MPE; #endif } pgt->entries[index2] = (newflags & 0xFFF) | (phyaddr & PAGE_MASK); tlb_flush_one_page(viraddr); } index2++; viraddr += PAGE_SIZE; } } spinlock_irqsave_unlock(&task->page_lock); return 0; } /* * Use the first fit algorithm to find a valid address range * * TODO: O(n) => bad performance, we need a better approach */ size_t vm_alloc(uint32_t npages, uint32_t flags) { task_t* task = per_core(current_task); uint32_t index1, index2, j; size_t viraddr, i, ret = 0; size_t start, end; page_map_t* pgt; if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0)) return 0; if (flags & MAP_KERNEL_SPACE) { start = (((size_t) &kernel_end) + PAGE_SIZE) & PAGE_MASK; end = (KERNEL_SPACE - 2*PAGE_SIZE) & PAGE_MASK; // we need 1 PAGE for our PGTs } else { start = KERNEL_SPACE & PAGE_MASK; end = PAGE_MASK; } if (BUILTIN_EXPECT(!npages, 0)) return 0; if (flags & MAP_KERNEL_SPACE) spinlock_lock(&kslock); else spinlock_irqsave_lock(&task->page_lock); viraddr = i = start; j = 0; do { index1 = i >> 22; index2 = (i >> 12) & 0x3FF; pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK); if (!pgt || !(pgt->entries[index2])) { i+=PAGE_SIZE; j++; } else { // restart search j = 0; viraddr = i + PAGE_SIZE; i = i + PAGE_SIZE; } } while((j < npages) && (i<=end)); if ((j >= npages) && (viraddr < end)) ret = viraddr; if (flags & MAP_KERNEL_SPACE) spinlock_unlock(&kslock); else spinlock_irqsave_unlock(&task->page_lock); return ret; } int unmap_region(size_t viraddr, uint32_t npages) { task_t* task = per_core(current_task); uint32_t i; uint32_t index1, index2; page_map_t* pgt; if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0)) return -EINVAL; if (viraddr <= KERNEL_SPACE) spinlock_lock(&kslock); else spinlock_irqsave_lock(&task->page_lock); for(i=0; i> 22; index2 = (viraddr >> 12) & 0x3FF; pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK); if (!pgt) continue; pgt->entries[index2] &= ~PG_PRESENT; if (viraddr > KERNEL_SPACE) atomic_int32_dec(&task->user_usage); tlb_flush_one_page(viraddr); } if (viraddr <= KERNEL_SPACE) spinlock_unlock(&kslock); else spinlock_irqsave_unlock(&task->page_lock); return 0; } int vm_free(size_t viraddr, uint32_t npages) { task_t* task = per_core(current_task); uint32_t i; uint32_t index1, index2; page_map_t* pgt; if (BUILTIN_EXPECT(!task || !task->page_map || !paging_enabled, 0)) return -EINVAL; if (viraddr <= KERNEL_SPACE) spinlock_lock(&kslock); else spinlock_irqsave_lock(&task->page_lock); for(i=0; i> 22; index2 = (viraddr >> 12) & 0x3FF; pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK); if (!pgt) continue; pgt->entries[index2] = 0; tlb_flush_one_page(viraddr); } if (viraddr <= KERNEL_SPACE) spinlock_unlock(&kslock); else spinlock_irqsave_unlock(&task->page_lock); return 0; } int print_paging_tree(size_t viraddr) { task_t* task = per_core(current_task); uint32_t index1, index2; page_map_t* pgd = NULL; page_map_t* pgt = NULL; if (BUILTIN_EXPECT(!viraddr, 0)) return -EINVAL; index1 = viraddr >> 22; index2 = (viraddr >> 12) & 0x3FF; spinlock_irqsave_lock(&task->page_lock); kprintf("Paging dump of address 0x%x\n", viraddr); pgd = task->page_map; kprintf("\tPage directory entry %u: ", index1); if (pgd) { kprintf("0x%0x\n", pgd->entries[index1]); pgt = (page_map_t*) (pgd->entries[index1] & PAGE_MASK); } else kputs("invalid page directory\n"); // convert physical address to virtual if (paging_enabled && pgt) pgt = (page_map_t*) (KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE); kprintf("\tPage table entry %u: ", index2); if (pgt) kprintf("0x%x\n", pgt->entries[index2]); else kputs("invalid page table\n"); spinlock_irqsave_unlock(&task->page_lock); return 0; } static void pagefault_handler(struct state *s) { task_t* task = per_core(current_task); size_t viraddr = read_cr2(); size_t phyaddr; #ifdef CONFIG_ROCKCREEK uint32_t index1, index2; page_map_t* pgd = task->page_map; page_map_t* pgt = NULL; #endif if ((viraddr >= task->start_heap) && (viraddr <= task->end_heap) && (viraddr > KERNEL_SPACE)) { viraddr = viraddr & PAGE_MASK; phyaddr = get_page(); if (BUILTIN_EXPECT(!phyaddr, 0)) goto default_handler; if (map_region(viraddr, phyaddr, 1, MAP_USER_SPACE) == viraddr) { memset((void*) viraddr, 0x00, PAGE_SIZE); return; } kprintf("Could not map 0x%x at 0x%x\n", phyaddr, viraddr); put_page(phyaddr); } #ifdef CONFIG_ROCKCREEK // does our SVM system need to handle this page fault? index1 = viraddr >> 22; index2 = (viraddr >> 12) & 0x3FF; if (!pgd || !(pgd->entries[index1] & PAGE_MASK)) goto default_handler; pgt = (page_map_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & PAGE_MASK); if (!pgt || !(pgt->entries[index2])) goto default_handler; if (pgt->entries[index2] & PG_SVM_INIT) { if (BUILTIN_EXPECT(!svm_alloc_page(viraddr, pgt), 1)) return; else goto default_handler; } if (pgt->entries[index2] & PG_SVM_STRONG) if (BUILTIN_EXPECT(!svm_access_request(viraddr), 1)) return; #endif default_handler: kprintf("PAGE FAULT: Task %u got page fault at %p (irq %d, cs:eip 0x%x:0x%x)\n", task->id, viraddr, s->int_no, s->cs, s->eip); kprintf("Register state: eax = 0x%x, ebx = 0x%x, ecx = 0x%x, edx = 0x%x, edi = 0x%x, esi = 0x%x, ebp = 0x%x, esp = 0x%xi, ds = 0x%x, es = 0x%x\n", s->eax, s->ebx, s->ecx, s->edx, s->edi, s->esi, s->ebp, s->esp, s->ds, s->es); irq_enable(); abort(); } int arch_paging_init(void) { uint32_t i, npages, index1, index2; page_map_t* pgt; size_t viraddr; // replace default pagefault handler irq_uninstall_handler(14); irq_install_handler(14, pagefault_handler); // create a page table to reference to the other page tables pgt = &pgt_container; // map this table at the end of the kernel space viraddr = KERNEL_SPACE - PAGE_SIZE; index1 = viraddr >> 22; index2 = (viraddr >> 12) & 0x3FF; // now, we create a self reference per_core(current_task)->page_map->entries[index1] = (((size_t) pgt) & PAGE_MASK)|KERN_TABLE; pgt->entries[index2] = ((size_t) pgt & PAGE_MASK)|KERN_PAGE; // create the other PGTs for the kernel space for(i=0; ipage_map->entries[i] = (phyaddr & PAGE_MASK)|KERN_TABLE; pgt->entries[i] = (phyaddr & PAGE_MASK)|KERN_PAGE; } /* * Set the page table and page directory entries for the kernel. * We map the kernel's physical address to the same virtual address. */ npages = ((size_t) &kernel_end - (size_t) &kernel_start) >> PAGE_SHIFT; if ((size_t)&kernel_end & (PAGE_SIZE-1)) npages++; map_region((size_t)&kernel_start, (size_t)&kernel_start, npages, MAP_KERNEL_SPACE); #if MAX_CORES > 1 // reserve page for smp boot code if (!map_region(SMP_SETUP_ADDR, SMP_SETUP_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE)) { kputs("could not reserve page for smp boot code\n"); return -ENOMEM; } #endif #ifdef CONFIG_VGA // map the video memory into the kernel space map_region(VIDEO_MEM_ADDR, VIDEO_MEM_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE); #endif #ifdef CONFIG_MULTIBOOT // map mb_info into the kernel space if (mb_info) map_region((size_t) mb_info & PAGE_MASK, (size_t) mb_info & PAGE_MASK, 1, MAP_KERNEL_SPACE); #if 0 // map reserved memory regions into the kernel space if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) { multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr; multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length); while (mmap < mmap_end) { if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE) { npages = mmap->len / PAGE_SIZE; if ((mmap->addr+mmap->len) % PAGE_SIZE) npages++; map_region(mmap->addr, mmap->addr, npages, MAP_KERNEL_SPACE|MAP_NO_CACHE); } mmap++; } } #endif /* * Modules like the init ram disk are already loaded. * Therefore, we map these moduels into the kernel space. */ if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MODS)) { multiboot_module_t* mmodule = (multiboot_module_t*) ((size_t) mb_info->mods_addr); npages = mb_info->mods_count * sizeof(multiboot_module_t) >> PAGE_SHIFT; if (mb_info->mods_count * sizeof(multiboot_module_t) & (PAGE_SIZE-1)) npages++; map_region((size_t) mb_info->mods_addr, (size_t) mb_info->mods_addr, npages, MAP_KERNEL_SPACE); for(i=0; imods_count; i++, mmodule++) { // map physical address to the same virtual address npages = (mmodule->mod_end - mmodule->mod_start) >> PAGE_SHIFT; if (mmodule->mod_end & (PAGE_SIZE-1)) npages++; kprintf("Map module %s at 0x%x (%u pages)\n", (char*) mmodule->cmdline, mmodule->mod_start, npages); map_region((size_t) mmodule->mod_start, (size_t) mmodule->mod_start, npages, MAP_KERNEL_SPACE); } } #endif #ifdef CONFIG_ROCKCREEK // map SCC's bootinfo viraddr = map_region(SCC_BOOTINFO, SCC_BOOTINFO, 1, MAP_KERNEL_SPACE); kprintf("Map SCC's bootinfos at 0x%x\n", viraddr); // map SCC's configuration registers viraddr = map_region(CRB_X0_Y0, CRB_X0_Y0, (CRB_OWN-CRB_X0_Y0+16*1024*1024) >> PAGE_SHIFT, MAP_KERNEL_SPACE|MAP_NO_CACHE); kprintf("Map configuration registers at 0x%x\n", viraddr); // map SCC's message passing buffers viraddr = map_region(MPB_X0_Y0, MPB_X0_Y0, (MPB_OWN-MPB_X0_Y0+16*1024*1024) >> PAGE_SHIFT, MAP_KERNEL_SPACE|MAP_MPE); kprintf("Map message passing buffers at 0x%x\n", viraddr); // map the FPGA registers viraddr = map_region(FPGA_BASE, FPGA_BASE, 0x10000 >> PAGE_SHIFT, MAP_KERNEL_SPACE|MAP_NO_CACHE); kprintf("Map FPGA regsiters at 0x%x\n", viraddr); #endif // enable paging write_cr3((uint32_t) &boot_pgd); i = read_cr0(); i = i | (1 << 31); write_cr0(i); paging_enabled = 1; #ifdef CONFIG_ROCKCREEK // map the initial ramdisk npages = bootinfo->size >> PAGE_SHIFT; if (bootinfo->size & (PAGE_SIZE-1)) npages++; viraddr = map_region(0, bootinfo->addr, npages, MAP_KERNEL_SPACE); kprintf("Map initrd from 0x%x to 0x%x (size %u bytes)\n", bootinfo->addr, viraddr, bootinfo->size); bootinfo->addr = viraddr; #endif // we turned on paging => now, we are able to register our task register_task(); // APIC registers into the kernel address space map_apic(); return 0; }