- huge commit
- enable the paging support - redesign of the APIC code TODO: - Currently, we are not able to start user-level applications. - The RTL8139 driver does not longer work. Perhaps, a bug in the output function. - The APIC codes doesn't work on all systems. Therefore, the code is currently disabled. git-svn-id: http://svn.lfbs.rwth-aachen.de/svn/scc/trunk/MetalSVM@326 315a16e6-25f9-4109-90ae-ca3045a26c18
This commit is contained in:
parent
d4ac31f5bf
commit
45219bce2e
36 changed files with 1063 additions and 200 deletions
|
@ -1,4 +1,4 @@
|
|||
SUBDIRS = kernel #lib
|
||||
SUBDIRS = kernel mm #lib
|
||||
|
||||
default:
|
||||
for i in $(SUBDIRS); do $(MAKE) -C $$i default; done
|
||||
|
|
|
@ -26,6 +26,49 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define MP_FLT_SIGNATURE 0x5f504d5f
|
||||
|
||||
#define APIC_ID 0x0020 // Local APIC ID Register
|
||||
#define APIC_VERSION 0x0030 // Local APIC Version Register
|
||||
#define APIC_TPR 0x0080 // Task Priority Regster
|
||||
#define APIC_EOI 0x00B0 // EOI Register
|
||||
#define APIC_SVR 0x00F0 // Spurious Interrupt Vector Register
|
||||
#define APIC_ICR1 0x0300 // Interrupt Command Register [0-31]
|
||||
#define APIC_ICR2 0x0310 // Interrupt Command Register [32-63]
|
||||
#define APIC_LVT_T 0x0320 // LVT Timer Register
|
||||
#define APIC_LVT_PMC 0x0340 // LVT Performance Monitoring Counters Register
|
||||
#define APIC_LINT0 0x0350 // LVT LINT0 Register
|
||||
#define APIC_LINT1 0x0360 // LVT LINT1 Register
|
||||
#define APIC_LVT_ER 0x0370 // LVT Error Register
|
||||
#define APIC_ICR 0x0380 // Initial Count Register
|
||||
#define APIC_CCR 0x0390 // Current Count Register
|
||||
#define APIC_DCR 0x03E0 // Divide Configuration Register
|
||||
|
||||
#define IOAPIC_REG_ID 0x0000 // Register index: ID
|
||||
#define IOAPIC_REG_VER 0x0001 // Register index: version
|
||||
#define IOAPIC_REG_TABLE 0x0010 // Redirection table base
|
||||
|
||||
#define APIC_DEST_SELF 0x40000
|
||||
#define APIC_DEST_ALLINC 0x80000
|
||||
#define APIC_DEST_ALLBUT 0xC0000
|
||||
#define APIC_ICR_RR_MASK 0x30000
|
||||
#define APIC_ICR_RR_INVALID 0x00000
|
||||
#define APIC_ICR_RR_INPROG 0x10000
|
||||
#define APIC_ICR_RR_VALID 0x20000
|
||||
#define APIC_INT_LEVELTRIG 0x08000
|
||||
#define APIC_INT_ASSERT 0x04000
|
||||
#define APIC_ICR_BUSY 0x01000
|
||||
#define APIC_DEST_LOGICAL 0x00800
|
||||
#define APIC_DM_FIXED 0x00000
|
||||
#define APIC_DM_LOWEST 0x00100
|
||||
#define APIC_DM_SMI 0x00200
|
||||
#define APIC_DM_REMRD 0x00300
|
||||
#define APIC_DM_NMI 0x00400
|
||||
#define APIC_DM_INIT 0x00500
|
||||
#define APIC_DM_STARTUP 0x00600
|
||||
#define APIC_DM_EXTINT 0x00700
|
||||
#define APIC_VECTOR_MASK 0x000FF
|
||||
|
||||
/* MP Floating Pointer Structure */
|
||||
typedef struct {
|
||||
uint32_t signature;
|
||||
|
|
|
@ -61,8 +61,6 @@ typedef struct {
|
|||
#error Too many GDT entries!
|
||||
#endif
|
||||
|
||||
//extern gdt_entry_t gdt[GDT_ENTRIES];
|
||||
|
||||
void gdt_install(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
108
arch/x86/include/asm/page.h
Normal file
108
arch/x86/include/asm/page.h
Normal file
|
@ -0,0 +1,108 @@
|
|||
/*
|
||||
* Copyright 2010 Stefan Lankes, Chair for Operating Systems,
|
||||
* RWTH Aachen University
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* This file is part of MetalSVM.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Defines the interface for and structures relating to paging.
|
||||
*/
|
||||
|
||||
#ifndef __ARCH_PAGE_H__
|
||||
#define __ARCH_PAGE_H__
|
||||
|
||||
#include <metalsvm/stddef.h>
|
||||
#include <metalsvm/tasks_types.h>
|
||||
#include <metalsvm/stdlib.h>
|
||||
|
||||
#define _PAGE_BIT_PRESENT 0 /* is present */
|
||||
#define _PAGE_BIT_RW 1 /* writeable */
|
||||
#define _PAGE_BIT_USER 2 /* userspace addressable */
|
||||
#define _PAGE_BIT_PWT 3 /* page write through */
|
||||
#define _PAGE_BIT_PCD 4 /* page cache disabled */
|
||||
#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
|
||||
#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
|
||||
#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
|
||||
#define _PAGE_BIT_PAT 7 /* on 4KB pages */
|
||||
#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
|
||||
#define _PAGE_BIT_RESERVED 9 /* mark a virtual address range as reserved */
|
||||
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
|
||||
|
||||
#define PG_PRESENT (1 << _PAGE_BIT_PRESENT)
|
||||
#define PG_RW (1 << _PAGE_BIT_RW)
|
||||
#define PG_USER (1 << _PAGE_BIT_USER)
|
||||
#define PG_PWT (1 << _PAGE_BIT_PWT)
|
||||
#define PG_PCD (1 << _PAGE_BIT_PCD)
|
||||
#define PG_ACCESSED (1 << _PAGE_BIT_ACCESSED)
|
||||
#define PG_DIRTY (1 << _PAGE_BIT_DIRTY)
|
||||
#define PG_PSE (1 << _PAGE_BIT_PSE)
|
||||
#define PG_GLOBAL (1 << _PAGE_BIT_GLOBAL)
|
||||
#define PG_RESERVED (1 << _PAGE_BIT_RESERVED)
|
||||
#define PG_PAT (1 << _PAGE_BIT_PAT)
|
||||
#define PG_PAT_LARGE (1 << _PAGE_BIT_PAT_LARGE)
|
||||
|
||||
#define KERN_TABLE (PG_PRESENT|PG_RW|PG_ACCESSED|PG_DIRTY)
|
||||
#define USER_TABLE (PG_PRESENT|PG_RW|PG_ACCESSED|PG_DIRTY|PG_USER)
|
||||
#define KERN_PAGE (PG_PRESENT|PG_RW|PG_GLOBAL)
|
||||
#define USER_PAGE (PG_PRESENT|PG_RW|PG_USER)
|
||||
|
||||
typedef struct page_table
|
||||
{
|
||||
uint32_t entries[1024];
|
||||
} page_table_t __attribute__ ((aligned (4096)));
|
||||
|
||||
typedef struct page_dir
|
||||
{
|
||||
uint32_t entries[1024];
|
||||
} page_dir_t __attribute__ ((aligned (4096)));
|
||||
|
||||
/*
|
||||
* ATTENTION: Before you use one of these functions, you have to guarantee that task has
|
||||
* exlusive access to the PGD directory. => use the PGD lock
|
||||
*/
|
||||
|
||||
/*
|
||||
* Converts a virtual address to a physical
|
||||
*/
|
||||
size_t virt_to_phys(task_t*, size_t);
|
||||
|
||||
/*
|
||||
* Allocates an virtual address space range of npages
|
||||
*/
|
||||
size_t vm_alloc(task_t* task, uint32_t npages, uint32_t flags);
|
||||
|
||||
/*
|
||||
* Frees a range in the virtual address space
|
||||
*/
|
||||
int vm_free(task_t* task, size_t addr, uint32_t npages);
|
||||
|
||||
/*
|
||||
* Maps a physical memory region at a specific virtual address.
|
||||
* If the virtual address is zero, this functions allocates a valid virtual address on demand.
|
||||
*/
|
||||
size_t map_region(task_t* task, size_t viraddr, size_t phyaddr, uint32_t pages, uint32_t type);
|
||||
|
||||
/*
|
||||
* Sets up the environment and enables paging.
|
||||
*/
|
||||
int arch_paging_init(void);
|
||||
|
||||
/*
|
||||
* Setup a kernel task with a valid entry to the kernel's page directory
|
||||
*/
|
||||
int get_kernel_pgd(task_t* task);
|
||||
|
||||
#endif
|
|
@ -86,6 +86,39 @@ inline static uint64_t rdmsr(uint32_t msr) {
|
|||
return ((uint64_t)high << 32) | low;
|
||||
}
|
||||
|
||||
static inline uint32_t read_cr0(void) {
|
||||
uint32_t val;
|
||||
asm volatile("mov %%cr0, %0" : "=r"(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void write_cr0(uint32_t val) {
|
||||
asm volatile("mov %0, %%cr0" : : "r"(val));
|
||||
}
|
||||
|
||||
static inline uint32_t read_cr3(void) {
|
||||
uint32_t val;
|
||||
asm volatile("mov %%cr3, %0" : "=r"(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void write_cr3(uint32_t val) {
|
||||
asm volatile("mov %0, %%cr3" : : "r"(val));
|
||||
}
|
||||
|
||||
static inline void tlb_flush_one_page(uint32_t addr)
|
||||
{
|
||||
asm volatile("invlpg (%0)" : : "r"(addr) : "memory");
|
||||
}
|
||||
|
||||
static inline void tlb_flush(void)
|
||||
{
|
||||
uint32_t val = read_cr3();
|
||||
|
||||
if (val)
|
||||
write_cr3(val);
|
||||
}
|
||||
|
||||
/*
|
||||
* invalidate (not flush!) lines in L1 that map to MPB lines
|
||||
*/
|
||||
|
@ -106,15 +139,20 @@ inline static int system_init(void)
|
|||
scc_init();
|
||||
#endif
|
||||
gdt_install();
|
||||
apic_init();
|
||||
//apic_init();
|
||||
#ifdef CONFIG_PCI
|
||||
pci_init();
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t detect_cpu_frequency(void);
|
||||
uint32_t get_cpu_frequency(void);
|
||||
void udelay(uint32_t usecs);
|
||||
|
||||
inline static int system_calibration(void)
|
||||
{
|
||||
detect_cpu_frequency();
|
||||
apic_calibration();
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -58,6 +58,9 @@ struct state {
|
|||
/*unsigned int eip, cs, eflags, useresp, ss;*/ /* pushed by the processor automatically */
|
||||
};
|
||||
|
||||
uint32_t apic_cpu_id(void);
|
||||
#define LOGICAL_CPUID apic_cpu_id()
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -49,8 +49,6 @@ typedef struct {
|
|||
uint16_t trace, bitmap;
|
||||
} __attribute__ ((packed)) tss_t;
|
||||
|
||||
//extern tss_t task_state_segments[MAX_TASKS];
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
C_source = scc.c gdt.c kb.c timer.c irq.c isrs.c idt.c vga.c multiboot.c apic.c pci.c
|
||||
C_source = scc.c gdt.c kb.c timer.c irq.c isrs.c idt.c vga.c multiboot.c apic.c pci.c processor.c
|
||||
ASM_source = entry.asm string.asm
|
||||
|
||||
OBJS += $(patsubst %.c, %.o, $(filter %.c, $(C_source)))
|
||||
|
|
|
@ -23,30 +23,20 @@
|
|||
#include <metalsvm/errno.h>
|
||||
#include <metalsvm/processor.h>
|
||||
#include <metalsvm/time.h>
|
||||
#include <metalsvm/init.h>
|
||||
#include <metalsvm/page.h>
|
||||
#include <metalsvm/spinlock.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/multiboot.h>
|
||||
|
||||
#define MP_FLT_SIGNATURE 0x5f504d5f
|
||||
|
||||
#define APIC_ID 0x0020 // Local APIC ID Register
|
||||
#define APIC_VERSION 0x0030 // Local APIC Version Register
|
||||
#define APIC_TPR 0x0080 // Task Priority Regster
|
||||
#define APIC_EOI 0x00B0 // EOI Register
|
||||
#define APIC_SVR 0x00F0 // Spurious Interrupt Vector Register
|
||||
#define APIC_LVT_T 0x0320 // LVT Timer Register
|
||||
#define APIC_LVT_PMC 0x0340 // LVT Performance Monitoring Counters Register
|
||||
#define APIC_LINT0 0x0350 // LVT LINT0 Register
|
||||
#define APIC_LINT1 0x0360 // LVT LINT1 Register
|
||||
#define APIC_LVT_ER 0x0370 // LVT Error Register
|
||||
#define APIC_ICR 0x0380 // Initial Count Register
|
||||
#define APIC_CCR 0x0390 // Current Count Register
|
||||
#define APIC_DCR 0x03E0 // Divide Configuration Register
|
||||
|
||||
#define IOAPIC_REG_ID 0x0000 // Register index: ID
|
||||
#define IOAPIC_REG_VER 0x0001 // Register index: version
|
||||
#define IOAPIC_REG_TABLE 0x0010 // Redirection table base
|
||||
/* disable optimization for the following functions */
|
||||
//static int apic_send_ipi(uint32_t id, uint32_t mode, uint32_t vector) __attribute__((optimize(0)));
|
||||
static int wakeup_all_aps(uint32_t start_eip) __attribute__((optimize(0)));
|
||||
int apic_calibration(void) __attribute__((optimize(0)));
|
||||
//int ioapic_intoff(uint8_t irq, uint8_t apicid) __attribute__((optimize(0)));
|
||||
//int ioapic_inton(uint8_t irq, uint8_t apicid) __attribute__((optimize(0)));
|
||||
|
||||
// IO APIC MMIO structure: write reg, then read or write data.
|
||||
typedef struct {
|
||||
|
@ -63,11 +53,16 @@ static uint32_t lapic = 0;
|
|||
static volatile ioapic_t* ioapic = NULL;
|
||||
static uint32_t ncores = 1;
|
||||
static uint8_t irq_redirect[16] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF};
|
||||
#if MAX_CORES > 1
|
||||
static uint8_t boot_code[] = {0xE9, 0x1F, 0x00, 0x90, 0x17, 0x00, 0x0A, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x9A, 0xCF, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x92, 0xCF, 0x00, 0x0F, 0x01, 0x16, 0x04, 0x00, 0x0F, 0x20, 0xC0, 0x0C, 0x01, 0x0F, 0x22, 0xC0, 0x66, 0xEA, 0x37, 0x00, 0x01, 0x00, 0x08, 0x00, 0x31, 0xC0, 0x66, 0xB8, 0x10, 0x00, 0x8E, 0xD8, 0x8E, 0xC0, 0x8E, 0xE0, 0x8E, 0xE8, 0x8E, 0xD0, 0xB8, 0x02, 0xD0, 0xBA, 0x02, 0x31, 0xDB, 0xEA, 0x00, 0x10, 0x10, 0x00, 0x08, 0x00};
|
||||
#endif
|
||||
static uint8_t initialized = 0;
|
||||
static atomic_int32_t cpu_online = ATOMIC_INIT(1);
|
||||
spinlock_t bootlock = SPINLOCK_INIT;
|
||||
|
||||
static inline uint32_t lapic_read(uint32_t addr)
|
||||
{
|
||||
return *((uint32_t*) (lapic+addr));
|
||||
return *((volatile uint32_t*) (lapic+addr));
|
||||
}
|
||||
|
||||
static inline void lapic_write(uint32_t addr, uint32_t value)
|
||||
|
@ -77,7 +72,7 @@ static inline void lapic_write(uint32_t addr, uint32_t value)
|
|||
* before we write value to this register
|
||||
*/
|
||||
asm volatile ("movl (%%eax), %%edx; movl %%ebx, (%%eax)" :: "a"(addr+lapic), "b"(value) : "%edx");
|
||||
//*((uint32_t*) (lapic+addr)) = value;
|
||||
//*((volatile uint32_t*) (lapic+addr)) = value;
|
||||
}
|
||||
|
||||
static inline uint32_t ioapic_read(uint32_t reg)
|
||||
|
@ -93,9 +88,98 @@ static inline void ioapic_write(uint32_t reg, uint32_t value)
|
|||
ioapic->data = value;
|
||||
}
|
||||
|
||||
/*
|
||||
* Send a 'End of Interrupt' command to the APIC
|
||||
*/
|
||||
void apic_eoi(void)
|
||||
{
|
||||
if (BUILTIN_EXPECT(lapic, 1))
|
||||
lapic_write(APIC_EOI, 0);
|
||||
}
|
||||
|
||||
uint32_t apic_cpu_id(void)
|
||||
{
|
||||
return ((lapic_read(APIC_ID)) >> 24);
|
||||
if (lapic && initialized)
|
||||
return ((lapic_read(APIC_ID)) >> 24);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int has_apic(void)
|
||||
{
|
||||
return (lapic != 0);
|
||||
}
|
||||
|
||||
int apic_is_enabled(void)
|
||||
{
|
||||
return (lapic && initialized);
|
||||
}
|
||||
|
||||
#if MAX_CORES > 1
|
||||
#if 0
|
||||
static int apic_send_ipi(uint32_t id, uint32_t mode, uint32_t vector)
|
||||
{
|
||||
uint32_t i = 0;
|
||||
|
||||
if(lapic_read(APIC_ICR1) & APIC_ICR_BUSY) {
|
||||
kprintf("ERROR: previous send not complete");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* set destination and data */
|
||||
lapic_write(APIC_ICR2, (id << 24));
|
||||
lapic_write(APIC_ICR1, APIC_INT_ASSERT|mode|vector);
|
||||
|
||||
while((lapic_read(APIC_ICR1) & APIC_ICR_BUSY) && (i < 1000))
|
||||
i++; // wait for it to finish, give up eventualy tho
|
||||
|
||||
return ((lapic_read(APIC_ICR1) & APIC_ICR_BUSY) ? -EIO : 0); // did it fail (still delivering) or succeed ?
|
||||
}
|
||||
#endif
|
||||
|
||||
static int wakeup_all_aps(uint32_t start_eip)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
kputs("Wakeup all application processors via IPI\n");
|
||||
|
||||
if(lapic_read(APIC_ICR1) & APIC_ICR_BUSY) {
|
||||
kprintf("ERROR: previous send not complete");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
vga_puts("Send IPI\n");
|
||||
// send out INIT to all aps
|
||||
lapic_write(APIC_ICR1, APIC_INT_ASSERT|APIC_DEST_ALLBUT|APIC_DM_INIT);
|
||||
udelay(10000);
|
||||
// send out the startup
|
||||
lapic_write(APIC_ICR1, APIC_INT_ASSERT|APIC_DEST_ALLBUT|APIC_DM_STARTUP|(start_eip >> 12));
|
||||
udelay(200);
|
||||
// do it again
|
||||
lapic_write(APIC_ICR1, APIC_INT_ASSERT|APIC_DEST_ALLBUT|APIC_DM_STARTUP|(start_eip >> 12));
|
||||
udelay(200);
|
||||
vga_puts("IPI done...\n");
|
||||
|
||||
i = 0;
|
||||
while((lapic_read(APIC_ICR1) & APIC_ICR_BUSY) && (i < 1000))
|
||||
i++; // wait for it to finish, give up eventualy tho
|
||||
|
||||
return ((lapic_read(APIC_ICR1) & APIC_ICR_BUSY) ? -EIO : 0); // did it fail (still delivering) or succeed ?
|
||||
}
|
||||
#endif
|
||||
|
||||
int smp_main(void)
|
||||
{
|
||||
#if MAX_CORES > 1
|
||||
vga_puts("JJAJAJAJAJAJA\n");
|
||||
lowlevel_init();
|
||||
atomic_int32_inc(&cpu_online);
|
||||
kputs("JAJAJAJ\n");
|
||||
while(1)
|
||||
|
||||
;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_MULTIBOOT
|
||||
|
@ -113,14 +197,37 @@ static unsigned int* search_apic(unsigned int base, unsigned int limit) {
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Send a 'End of Interrupt' command to the APIC
|
||||
*/
|
||||
void apic_eoi(void)
|
||||
#if MAX_CORES > 1
|
||||
int smp_init(void)
|
||||
{
|
||||
if (BUILTIN_EXPECT(lapic, 1))
|
||||
lapic_write(APIC_EOI, 0);
|
||||
uint32_t i;
|
||||
int err;
|
||||
|
||||
if (ncores <= 1)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* dirty hack: Copy 16bit startup code (see tools/smp_setup.asm)
|
||||
* to a 16bit address. Wakeup the other cores via IPI. They start
|
||||
* at this address in real mode, switch to protected and finally
|
||||
* they jump to 0x101000 (see smp_start in entry.asm)
|
||||
*/
|
||||
memcpy((void*)0x10000, boot_code, sizeof(boot_code));
|
||||
|
||||
kprintf("size of the boot_code %d\n", sizeof(boot_code));
|
||||
err = wakeup_all_aps(0x10000);
|
||||
if (err)
|
||||
kprintf("Unable to wakeup application processors: %d\n", err);
|
||||
|
||||
i = 0;
|
||||
while((ncores != atomic_int32_read(&cpu_online)) && (i < 1000))
|
||||
i++;
|
||||
|
||||
kprintf("%d cores online\n", atomic_int32_read(&cpu_online));
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* detects the timer frequency of the APIC and restart
|
||||
|
@ -137,6 +244,10 @@ int apic_calibration(void)
|
|||
if (!has_apic())
|
||||
return -ENXIO;
|
||||
|
||||
map_region(per_core(current_task), lapic, lapic, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE);
|
||||
if (ioapic)
|
||||
map_region(per_core(current_task), (size_t)ioapic, (size_t)ioapic, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE);
|
||||
|
||||
old = get_clock_tick();
|
||||
|
||||
/* wait for the next time slice */
|
||||
|
@ -179,7 +290,7 @@ int apic_calibration(void)
|
|||
/* wait 3 time slices to determine a ICR */
|
||||
start = rdtsc();
|
||||
do {
|
||||
flush_pipeline();
|
||||
//flush_pipeline();
|
||||
end = rdtsc();
|
||||
ticks = end > start ? end - start : start - end;
|
||||
} while(ticks < 3*scc_info.tile_frequency*1000000 / TIMER_FREQ);
|
||||
|
@ -194,6 +305,10 @@ int apic_calibration(void)
|
|||
kprintf("APIC calibration detects an ICR of 0x%x\n", diff / 3);
|
||||
|
||||
irq_disable();
|
||||
#if MAX_CORES > 1
|
||||
smp_init();
|
||||
#endif
|
||||
|
||||
if (ioapic) {
|
||||
// now, we don't longer need the IOAPIC timer and turn it off
|
||||
ioapic_intoff(0, apic_processors[boot_processor]->id);
|
||||
|
@ -341,7 +456,12 @@ check_lapic:
|
|||
kprintf("Maximum LVT Entry: 0x%x\n", (i >> 16) & 0xFF);
|
||||
kprintf("APIC Version: 0x%x\n", i & 0xFF);
|
||||
|
||||
return 0;
|
||||
if (!((i & 0xFF) >> 4)) {
|
||||
kprintf("Currently, MetalSVM didn't supports extern APICs!\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
//return 0;
|
||||
|
||||
out:
|
||||
apic_mp = NULL;
|
||||
|
@ -374,7 +494,7 @@ int apic_init(void)
|
|||
lapic_write(APIC_LVT_ER, 0x7E); // connect error to idt entry 126
|
||||
lapic_write(APIC_SVR, 0x17F); // enable the apic and connect to the idt entry 127
|
||||
|
||||
if (ioapic) {
|
||||
if (0) { //ioapic) {
|
||||
// enable timer interrupt
|
||||
ioapic_inton(0, apic_processors[boot_processor]->id);
|
||||
// now lets turn everything else off
|
||||
|
@ -385,16 +505,6 @@ int apic_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int has_apic(void)
|
||||
{
|
||||
return (lapic != 0);
|
||||
}
|
||||
|
||||
int apic_is_enabled(void)
|
||||
{
|
||||
return ((lapic != 0) && initialized);
|
||||
}
|
||||
|
||||
int ioapic_inton(uint8_t irq, uint8_t apicid)
|
||||
{
|
||||
ioapic_route_t route;
|
||||
|
@ -409,7 +519,11 @@ int ioapic_inton(uint8_t irq, uint8_t apicid)
|
|||
off = irq_redirect[irq]*2;
|
||||
else
|
||||
off = irq*2;
|
||||
|
||||
#if 0
|
||||
route.lower.whole = ioapic_read(IOAPIC_REG_TABLE+1+off);
|
||||
route.dest.upper = ioapic_read(IOAPIC_REG_TABLE+off);
|
||||
route.lower.bitfield.mask = 0; // turn it on (stop masking)
|
||||
#else
|
||||
route.lower.bitfield.dest_mode = 0;
|
||||
route.lower.bitfield.mask = 0;
|
||||
route.dest.physical.physical_dest = apicid; // send to the boot processor
|
||||
|
@ -418,6 +532,7 @@ int ioapic_inton(uint8_t irq, uint8_t apicid)
|
|||
route.lower.bitfield.trigger = 0;
|
||||
route.lower.bitfield.vector = 0x20+irq;
|
||||
route.lower.bitfield.mask = 0; // turn it on (stop masking)
|
||||
#endif
|
||||
|
||||
ioapic_write(IOAPIC_REG_TABLE+off, route.lower.whole);
|
||||
ioapic_write(IOAPIC_REG_TABLE+1+off, route.dest.upper);
|
||||
|
@ -443,6 +558,11 @@ int ioapic_intoff(uint8_t irq, uint8_t apicid)
|
|||
else
|
||||
off = irq*2;
|
||||
|
||||
#if 0
|
||||
route.lower.whole = ioapic_read(IOAPIC_REG_TABLE+1+off);
|
||||
route.dest.upper = ioapic_read(IOAPIC_REG_TABLE+off);
|
||||
route.lower.bitfield.mask = 1; // turn it off (start masking)
|
||||
#else
|
||||
route.lower.bitfield.dest_mode = 0;
|
||||
route.lower.bitfield.mask = 0;
|
||||
route.dest.physical.physical_dest = apicid;
|
||||
|
@ -451,6 +571,7 @@ int ioapic_intoff(uint8_t irq, uint8_t apicid)
|
|||
route.lower.bitfield.trigger = 0;
|
||||
route.lower.bitfield.vector = 0x20+irq;
|
||||
route.lower.bitfield.mask = 1; // turn it off (start masking)
|
||||
#endif
|
||||
|
||||
ioapic_write(IOAPIC_REG_TABLE+off, route.lower.whole);
|
||||
ioapic_write(IOAPIC_REG_TABLE+1+off, route.dest.upper);
|
||||
|
|
|
@ -57,18 +57,25 @@ mboot:
|
|||
SECTION .text
|
||||
ALIGN 4
|
||||
stublet:
|
||||
; Initialize the stack pointer.
|
||||
mov esp, _sys_stack
|
||||
|
||||
; initialize stack pointer.
|
||||
mov esp, _sys_stack-4
|
||||
; enable cache and turn on FPU exceptions
|
||||
mov eax, cr0
|
||||
; enable cache
|
||||
and eax, 0x9fffffff
|
||||
; ...and turn on FPU exceptions
|
||||
or eax, 0x20
|
||||
mov cr0, eax
|
||||
; clears the current pgd entry
|
||||
xor eax, eax
|
||||
mov cr3, eax
|
||||
; interpret multiboot information
|
||||
extern multiboot_init
|
||||
push ebx
|
||||
call multiboot_init
|
||||
add esp, 4
|
||||
|
||||
; This is an endless loop. Make a note of this: Later on, we
|
||||
; will insert an 'extern _main', followed by 'call _main', right
|
||||
; before the 'jmp $'.
|
||||
; jump to the boot processors's C code
|
||||
extern main
|
||||
call main
|
||||
jmp $
|
||||
|
@ -142,7 +149,7 @@ isr0:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 0
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 1: Debug Exception
|
||||
isr1:
|
||||
|
@ -151,7 +158,7 @@ isr1:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 1
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 2: Non Maskable Interrupt Exception
|
||||
isr2:
|
||||
|
@ -160,7 +167,7 @@ isr2:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 2
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 3: Int 3 Exception
|
||||
isr3:
|
||||
|
@ -169,7 +176,7 @@ isr3:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 3
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 4: INTO Exception
|
||||
isr4:
|
||||
|
@ -178,7 +185,7 @@ isr4:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 4
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 5: Out of Bounds Exception
|
||||
isr5:
|
||||
|
@ -187,7 +194,7 @@ isr5:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 5
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 6: Invalid Opcode Exception
|
||||
isr6:
|
||||
|
@ -196,7 +203,7 @@ isr6:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 6
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 7: Coprocessor Not Available Exception
|
||||
isr7:
|
||||
|
@ -205,7 +212,7 @@ isr7:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 7
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 8: Double Fault Exception (With Error Code!)
|
||||
isr8:
|
||||
|
@ -213,7 +220,7 @@ isr8:
|
|||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 8
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 9: Coprocessor Segment Overrun Exception
|
||||
isr9:
|
||||
|
@ -222,7 +229,7 @@ isr9:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 9
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 10: Bad TSS Exception (With Error Code!)
|
||||
isr10:
|
||||
|
@ -230,7 +237,7 @@ isr10:
|
|||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 10
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 11: Segment Not Present Exception (With Error Code!)
|
||||
isr11:
|
||||
|
@ -238,7 +245,7 @@ isr11:
|
|||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 11
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 12: Stack Fault Exception (With Error Code!)
|
||||
isr12:
|
||||
|
@ -246,7 +253,7 @@ isr12:
|
|||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 12
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 13: General Protection Fault Exception (With Error Code!)
|
||||
isr13:
|
||||
|
@ -254,7 +261,7 @@ isr13:
|
|||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 13
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 14: Page Fault Exception (With Error Code!)
|
||||
isr14:
|
||||
|
@ -262,7 +269,7 @@ isr14:
|
|||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 14
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 15: Reserved Exception
|
||||
isr15:
|
||||
|
@ -271,7 +278,7 @@ isr15:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 15
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 16: Floating Point Exception
|
||||
isr16:
|
||||
|
@ -280,7 +287,7 @@ isr16:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 16
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 17: Alignment Check Exception
|
||||
isr17:
|
||||
|
@ -289,7 +296,7 @@ isr17:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 17
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 18: Machine Check Exception
|
||||
isr18:
|
||||
|
@ -298,7 +305,7 @@ isr18:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 18
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 19: Reserved
|
||||
isr19:
|
||||
|
@ -307,7 +314,7 @@ isr19:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 19
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 20: Reserved
|
||||
isr20:
|
||||
|
@ -316,7 +323,7 @@ isr20:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 20
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 21: Reserved
|
||||
isr21:
|
||||
|
@ -325,7 +332,7 @@ isr21:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 21
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 22: Reserved
|
||||
isr22:
|
||||
|
@ -334,7 +341,7 @@ isr22:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 22
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 23: Reserved
|
||||
isr23:
|
||||
|
@ -343,7 +350,7 @@ isr23:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 23
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 24: Reserved
|
||||
isr24:
|
||||
|
@ -352,7 +359,7 @@ isr24:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 24
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 25: Reserved
|
||||
isr25:
|
||||
|
@ -361,7 +368,7 @@ isr25:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 25
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 26: Reserved
|
||||
isr26:
|
||||
|
@ -370,7 +377,7 @@ isr26:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 26
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 27: Reserved
|
||||
isr27:
|
||||
|
@ -379,7 +386,7 @@ isr27:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 27
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 28: Reserved
|
||||
isr28:
|
||||
|
@ -388,7 +395,7 @@ isr28:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 28
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 29: Reserved
|
||||
isr29:
|
||||
|
@ -397,7 +404,7 @@ isr29:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 29
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 30: Reserved
|
||||
isr30:
|
||||
|
@ -406,7 +413,7 @@ isr30:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 30
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
; 31: Reserved
|
||||
isr31:
|
||||
|
@ -415,7 +422,7 @@ isr31:
|
|||
; cli
|
||||
push byte 0
|
||||
push byte 31
|
||||
jmp isr_common_stub
|
||||
jmp irq_common_stub
|
||||
|
||||
extern syscall_handler
|
||||
|
||||
|
@ -439,24 +446,6 @@ isrsyscall:
|
|||
pop ebp
|
||||
iret
|
||||
|
||||
; We call a C function in here. We need to let the assembler know
|
||||
; that 'fault_handler' exists in another file
|
||||
extern fault_handler
|
||||
|
||||
; This is our common ISR stub. It saves the processor state, sets
|
||||
; up for kernel mode segments, calls the C-level fault handler,
|
||||
; and finally restores the stack frame.
|
||||
isr_common_stub:
|
||||
pusha
|
||||
|
||||
push esp
|
||||
call fault_handler
|
||||
add esp, 4
|
||||
|
||||
popa
|
||||
add esp, 8
|
||||
iret
|
||||
|
||||
global irq0
|
||||
global irq1
|
||||
global irq2
|
||||
|
|
|
@ -20,13 +20,15 @@
|
|||
#include <metalsvm/string.h>
|
||||
#include <metalsvm/stdlib.h>
|
||||
#include <metalsvm/tasks.h>
|
||||
#include <metalsvm/errno.h>
|
||||
#include <asm/gdt.h>
|
||||
#include <asm/tss.h>
|
||||
|
||||
gdt_ptr_t gp;
|
||||
static tss_t task_state_segments[MAX_TASKS];
|
||||
static gdt_entry_t gdt[GDT_ENTRIES] = {[0 ... GDT_ENTRIES-1] = {0, 0, 0, 0, 0, 0}};
|
||||
static unsigned char kstacks[MAX_TASKS][KERNEL_STACK_SIZE];
|
||||
gdt_ptr_t gp;
|
||||
static tss_t task_state_segments[MAX_TASKS] __attribute__ ((aligned (4096)));
|
||||
// currently, our kernel has full access to the ioports
|
||||
static gdt_entry_t gdt[GDT_ENTRIES] = {[0 ... GDT_ENTRIES-1] = {0, 0, 0, 0, 0, 0}};
|
||||
static unsigned char kstacks[MAX_TASKS][KERNEL_STACK_SIZE];
|
||||
|
||||
/*
|
||||
* This is in start.asm. We use this to properly reload
|
||||
|
@ -35,9 +37,18 @@ static unsigned char kstacks[MAX_TASKS][KERNEL_STACK_SIZE];
|
|||
extern void gdt_flush(void);
|
||||
|
||||
int register_task(task_t* task) {
|
||||
uint16_t sel = (task->id+5) << 3;
|
||||
uint16_t sel;
|
||||
uint32_t id = task->id;
|
||||
|
||||
asm volatile ("mov %0, %%ax; ltr %%ax" : : "ir"(sel));
|
||||
if (BUILTIN_EXPECT(!task, 0))
|
||||
return -EINVAL;
|
||||
|
||||
sel = (task->id+5) << 3;
|
||||
asm volatile ("mov %0, %%ax; ltr %%ax" : : "ir"(sel) : "%eax");
|
||||
|
||||
// initialize the static elements of a TSS
|
||||
task_state_segments[id].cr3 = (uint32_t) (task->pgd);
|
||||
task_state_segments[id].ss0 = 0x10;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -70,11 +81,13 @@ int create_default_frame(task_t* task, entry_point_t ep, void* arg, int user)
|
|||
task_state_segments[id].gs = ds;
|
||||
task_state_segments[id].es = ds;
|
||||
task_state_segments[id].eflags = 0x1202;
|
||||
task_state_segments[id].cr3 = (uint32_t) (task->pgd);
|
||||
task_state_segments[id].eip = (uint32_t) ep;
|
||||
if (user)
|
||||
if (user) {
|
||||
task_state_segments[id].esp = (uint32_t) task->ustack + task->stack_size - sizeof(size_t);
|
||||
else
|
||||
} else {
|
||||
task_state_segments[id].esp = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
|
||||
}
|
||||
|
||||
/* build default stack frame */
|
||||
*((size_t*)task_state_segments[id].esp) = 0xDEADBEAF; /* dead-end */
|
||||
|
@ -92,7 +105,7 @@ int create_default_frame(task_t* task, entry_point_t ep, void* arg, int user)
|
|||
if (user)
|
||||
task_state_segments[id].esp0 = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
|
||||
else
|
||||
task_state_segments[id].esp0 = task_state_segments[id].esp;
|
||||
task_state_segments[id].esp0 = 0; //task_state_segments[id].esp;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -177,7 +190,6 @@ void gdt_install(void)
|
|||
GDT_FLAG_32_BIT);
|
||||
}
|
||||
|
||||
|
||||
/* Flush out the old GDT and install the new changes! */
|
||||
gdt_flush();
|
||||
}
|
||||
|
|
|
@ -226,7 +226,7 @@ void irq_handler(struct state *s)
|
|||
* Find out if we have a custom handler to run for this
|
||||
* IRQ and then finally, run it
|
||||
*/
|
||||
handler = irq_routines[s->int_no - 32];
|
||||
handler = irq_routines[s->int_no];
|
||||
if (handler)
|
||||
handler(s);
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <metalsvm/tasks.h>
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/isrs.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/idt.h>
|
||||
|
||||
|
@ -63,6 +64,8 @@ extern void isr29(void);
|
|||
extern void isr30(void);
|
||||
extern void isr31(void);
|
||||
|
||||
static void fault_handler(struct state *s);
|
||||
|
||||
/*
|
||||
* This is a very repetitive function... it's not hard, it's
|
||||
* just annoying. As you can see, we set the first 32 entries
|
||||
|
@ -76,6 +79,8 @@ extern void isr31(void);
|
|||
*/
|
||||
void isrs_install(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
idt_set_gate(0, (unsigned)isr0, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(1, (unsigned)isr1, KERNEL_CODE_SELECTOR,
|
||||
|
@ -140,6 +145,10 @@ void isrs_install(void)
|
|||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(31, (unsigned)isr31, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
|
||||
// install the default handler
|
||||
for(i=0; i<32; i++)
|
||||
irq_install_handler(i, fault_handler);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -166,7 +175,7 @@ static const char *exception_messages[] = {
|
|||
* serviced as a 'locking' mechanism to prevent an IRQ from
|
||||
* happening and messing up kernel data structures
|
||||
*/
|
||||
void fault_handler(struct state *s)
|
||||
static void fault_handler(struct state *s)
|
||||
{
|
||||
if (s->int_no < 32) {
|
||||
kputs(exception_messages[s->int_no]);
|
||||
|
|
|
@ -105,7 +105,7 @@ static void keyboard_handler(struct state *r)
|
|||
/* Installs the keyboard handler into IRQ1 */
|
||||
void keyboard_init(void)
|
||||
{
|
||||
irq_install_handler(1, keyboard_handler);
|
||||
irq_install_handler(1+32, keyboard_handler);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -21,14 +21,22 @@
|
|||
#include <metalsvm/stdio.h>
|
||||
#include <metalsvm/time.h>
|
||||
#include <metalsvm/processor.h>
|
||||
//#include <asm/io.h>
|
||||
|
||||
static uint32_t cpu_freq = 0;
|
||||
|
||||
/* disable optimization for the following functions */
|
||||
uint32_t detect_cpu_frequency(void) __attribute__((optimize(0)));
|
||||
void udelay(uint32_t usecs) __attribute__((optimize(0)));
|
||||
|
||||
uint32_t detect_cpu_frequency(void)
|
||||
{
|
||||
uint64_t start, end;
|
||||
uint64_t start, end, diff;
|
||||
uint64_t ticks, old;
|
||||
|
||||
if (cpu_freq > 0)
|
||||
return cpu_freq;
|
||||
|
||||
old = get_clock_tick();
|
||||
|
||||
/* wait for the next time slice */
|
||||
|
@ -41,7 +49,8 @@ uint32_t detect_cpu_frequency(void)
|
|||
;
|
||||
end = rdtsc();
|
||||
|
||||
cpu_freq = (uint32_t) ((end - start) / (uint64_t) 1000000);
|
||||
diff = end > start ? end - start : start - end;
|
||||
cpu_freq = (uint32_t) (diff / (uint64_t) 1000000);
|
||||
|
||||
return cpu_freq;
|
||||
}
|
||||
|
@ -54,11 +63,14 @@ uint32_t get_cpu_frequency(void)
|
|||
return detect_cpu_frequency();
|
||||
}
|
||||
|
||||
void udelay(unsigned int usecs)
|
||||
void udelay(uint32_t usecs)
|
||||
{
|
||||
uint64_t start = rdtsc();
|
||||
uint64_t diff, end, start = rdtsc();
|
||||
uint64_t deadline = get_cpu_frequency() * usecs;
|
||||
|
||||
while(rdtsc() - start < deadline)
|
||||
;
|
||||
do {
|
||||
end = rdtsc();
|
||||
diff = end > start ? end - start : start - end;
|
||||
} while(diff < deadline);
|
||||
}
|
||||
|
|
@ -84,8 +84,8 @@ int timer_init(void)
|
|||
* Installs 'timer_handler' for the PIC and APIC timer,
|
||||
* only one handler will be later used.
|
||||
*/
|
||||
irq_install_handler(0, timer_handler);
|
||||
irq_install_handler(123-32, timer_handler);
|
||||
irq_install_handler(0+32, timer_handler);
|
||||
irq_install_handler(123, timer_handler);
|
||||
|
||||
/*
|
||||
* The Rock Creek processor doesn't posseess an tradional PIC.
|
||||
|
|
|
@ -222,7 +222,7 @@ int vga_puts(const char *text)
|
|||
/* Sets our text-mode VGA pointer, then clears the screen for us */
|
||||
void vga_init(void)
|
||||
{
|
||||
textmemptr = (unsigned short *)0xB8000;
|
||||
textmemptr = (unsigned short *)VIDEO_MEM_ADDR;
|
||||
vga_clear();
|
||||
}
|
||||
|
||||
|
|
20
arch/x86/mm/Makefile
Normal file
20
arch/x86/mm/Makefile
Normal file
|
@ -0,0 +1,20 @@
|
|||
C_source = page.c
|
||||
|
||||
OBJS += $(patsubst %.c, %.o, $(filter %.c, $(C_source)))
|
||||
|
||||
# other implicit rules
|
||||
%.o : %.c
|
||||
$(CC) -c -D__KERNEL__ $(CFLAGS) -o $@ $<
|
||||
|
||||
default: $(OBJS)
|
||||
|
||||
all: $(OBJS)
|
||||
|
||||
clean:
|
||||
$(RM) *.o *~ $(NAME)
|
||||
|
||||
depend:
|
||||
$(CC) -MM $(CFLAGS) $(C_source) > Makefile.dep
|
||||
|
||||
-include Makefile.dep
|
||||
# DO NOT DELETE
|
394
arch/x86/mm/page.c
Normal file
394
arch/x86/mm/page.c
Normal file
|
@ -0,0 +1,394 @@
|
|||
/*
|
||||
* Copyright 2010 Stefan Lankes, Chair for Operating Systems,
|
||||
* RWTH Aachen University
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* This file is part of MetalSVM.
|
||||
*/
|
||||
|
||||
#include <metalsvm/stddef.h>
|
||||
#include <metalsvm/stdio.h>
|
||||
#include <metalsvm/stdlib.h>
|
||||
#include <metalsvm/mmu.h>
|
||||
#include <metalsvm/string.h>
|
||||
#include <metalsvm/page.h>
|
||||
#include <metalsvm/spinlock.h>
|
||||
#include <metalsvm/processor.h>
|
||||
#include <metalsvm/tasks.h>
|
||||
#include <metalsvm/errno.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/multiboot.h>
|
||||
|
||||
/*
|
||||
* Virtual Memory Layout of the standard configuration
|
||||
* (1 GB kernel space)
|
||||
*
|
||||
* 0x00000000 - 0x000FFFFF: reserved for IO devices
|
||||
* 0x00100000 - 0x0DEADFFF: Kernel (size depends on the configuration)
|
||||
* 0xDEAE0000 - 0x3FFFEFFF: Kernel heap
|
||||
* 0x3FFFF000 - 0x3FFFFFFF: Page Table are mapped in this region
|
||||
* (The first 256 entries belongs to kernel space)
|
||||
*/
|
||||
|
||||
/*
|
||||
* Note that linker symbols are not variables, they have no memory allocated for
|
||||
* maintaining a value, rather their address is their value.
|
||||
*/
|
||||
extern const void kernel_start;
|
||||
extern const void kernel_end;
|
||||
|
||||
// boot task's page directory and page directory lock
|
||||
static page_dir_t boot_pgd = {{[0 ... 1023] = 0}};
|
||||
static spinlock_t boot_pgd_lock = SPINLOCK_INIT;
|
||||
static int paging_enabled = 0;
|
||||
|
||||
int get_kernel_pgd(task_t* task)
|
||||
{
|
||||
if (BUILTIN_EXPECT(!task, 0))
|
||||
return -EINVAL;
|
||||
|
||||
task->pgd = &boot_pgd;
|
||||
task->pgd_lock = &boot_pgd_lock;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t virt_to_phys(task_t* task, size_t viraddr)
|
||||
{
|
||||
uint32_t index1, index2;
|
||||
page_table_t* pgt;
|
||||
size_t ret = 0;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
return 0;
|
||||
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
if (!(task->pgd->entries[index1] & 0xFFFFF000))
|
||||
goto out;
|
||||
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*4) & 0xFFFFF000);
|
||||
if (!pgt || !(pgt->entries[index2]))
|
||||
goto out;
|
||||
|
||||
ret = pgt->entries[index2] & 0xFFFFF000; // determine page frame
|
||||
ret = ret | (viraddr & 0xFFF); // add page offset
|
||||
out:
|
||||
//kprintf("vir %p to phy %p\n", viraddr, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
size_t map_region(task_t* task, size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flags)
|
||||
{
|
||||
page_table_t* pgt;
|
||||
size_t index, i;
|
||||
size_t ret;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !phyaddr, 0))
|
||||
return 0;
|
||||
|
||||
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
|
||||
return 0;
|
||||
|
||||
if (!(flags & MAP_KERNEL_SPACE))
|
||||
return 0;
|
||||
|
||||
if (!viraddr) {
|
||||
viraddr = vm_alloc(task, npages, flags);
|
||||
if (BUILTIN_EXPECT(!viraddr, 0)) {
|
||||
kputs("map_adress: found no valid virtual address\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
ret = viraddr;
|
||||
//kprintf("map %d pages from %p to %p\n", npages, phyaddr, ret);
|
||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE, phyaddr+=PAGE_SIZE) {
|
||||
index = viraddr >> 22;
|
||||
|
||||
if (!(task->pgd->entries[index])) {
|
||||
page_table_t* pgt_container;
|
||||
|
||||
pgt = (page_table_t*) get_pages(1);
|
||||
if (BUILTIN_EXPECT(!pgt, 0)) {
|
||||
spinlock_unlock(task->pgd_lock);
|
||||
kputs("map_address: out of memory\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// set the new page table into the directory
|
||||
task->pgd->entries[index] = (uint32_t)pgt|KERN_TABLE;
|
||||
|
||||
// if paging is already enabled, we need to use the virtual address
|
||||
if (paging_enabled)
|
||||
// we already know the virtual address of the "page table container"
|
||||
// (see file header)
|
||||
pgt_container = (page_table_t*) ((KERNEL_SPACE - PAGE_SIZE) & 0xFFFFF000);
|
||||
else
|
||||
pgt_container = (page_table_t*) (task->pgd->entries[(KERNEL_SPACE - PAGE_SIZE) >> 22] & 0xFFFFF000);
|
||||
|
||||
if (BUILTIN_EXPECT(!pgt_container, 0)) {
|
||||
spinlock_unlock(task->pgd_lock);
|
||||
kputs("map_address: internal error\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// map the new table into the address space of the kernel space
|
||||
pgt_container->entries[index] = ((size_t) pgt)|KERN_PAGE;
|
||||
|
||||
// clear the page table
|
||||
if (paging_enabled)
|
||||
memset((void*) (KERNEL_SPACE - 1024*PAGE_SIZE + index*4), 0, PAGE_SIZE);
|
||||
else
|
||||
memset(pgt, 0, PAGE_SIZE);
|
||||
} else pgt = (page_table_t*) (task->pgd->entries[index] & 0xFFFFF000);
|
||||
|
||||
/* convert physical address to virtual */
|
||||
if (paging_enabled)
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*4) & 0xFFFFF000);
|
||||
|
||||
index = (viraddr >> 12) & 0x3FF;
|
||||
if (BUILTIN_EXPECT(pgt->entries[index], 0)) {
|
||||
spinlock_unlock(task->pgd_lock);
|
||||
kprintf("0x%x is already maped\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
pgt->entries[index] = KERN_PAGE|(phyaddr & 0xFFFFF000);
|
||||
if (flags & MAP_NO_CACHE)
|
||||
pgt->entries[index] |= PG_PCD;
|
||||
|
||||
tlb_flush_one_page(viraddr);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the first fit algorithm to find a valid address range
|
||||
*
|
||||
* TODO: O(n) => bad performance, we need a better approach
|
||||
*/
|
||||
size_t vm_alloc(task_t* task, uint32_t npages, uint32_t flags)
|
||||
{
|
||||
uint32_t index1, index2, j;
|
||||
size_t viraddr, i;
|
||||
size_t start, end;
|
||||
page_table_t* pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
return 0;
|
||||
|
||||
if (flags & MAP_KERNEL_SPACE) {
|
||||
start = (((size_t) &kernel_end) + PAGE_SIZE) & 0xFFFFF000;
|
||||
end = (KERNEL_SPACE - 2*PAGE_SIZE) & 0xFFFFF000; // we need 1 PAGE for our PGTs
|
||||
} else {
|
||||
start = KERNEL_SPACE & 0xFFFFF000;
|
||||
end = 0xFFFFF000;
|
||||
}
|
||||
|
||||
if (BUILTIN_EXPECT(!npages, 0))
|
||||
return 0;
|
||||
|
||||
viraddr = i = start;
|
||||
j = 0;
|
||||
do {
|
||||
index1 = i >> 22;
|
||||
index2 = (i >> 12) & 0x3FF;
|
||||
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*4) & 0xFFFFF000);
|
||||
if (!pgt || !(pgt->entries[index2])) {
|
||||
i+=PAGE_SIZE;
|
||||
j++;
|
||||
} else {
|
||||
// restart search
|
||||
j = 0;
|
||||
viraddr = i + PAGE_SIZE;
|
||||
i = i + PAGE_SIZE;
|
||||
}
|
||||
} while((j < npages) && (i<=end));
|
||||
|
||||
if ((j >= npages) && (viraddr < end))
|
||||
return viraddr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vm_free(task_t* task, size_t viraddr, uint32_t npages)
|
||||
{
|
||||
uint32_t i;
|
||||
uint32_t index1, index2;
|
||||
page_table_t* pgt;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !paging_enabled, 0))
|
||||
return -EINVAL;
|
||||
|
||||
for(i=0; i<npages; i++, viraddr+=PAGE_SIZE)
|
||||
{
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*4) & 0xFFFFF000);
|
||||
if (!pgt)
|
||||
continue;
|
||||
pgt->entries[index2] = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int print_paging_tree(size_t viraddr)
|
||||
{
|
||||
uint32_t index1, index2;
|
||||
page_dir_t* pgd = NULL;
|
||||
page_table_t* pgt = NULL;
|
||||
|
||||
if (BUILTIN_EXPECT(!viraddr, 0))
|
||||
return -EINVAL;
|
||||
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
kprintf("Paging dump of address 0x%x\n", viraddr);
|
||||
pgd = per_core(current_task)->pgd;
|
||||
kprintf("\tPage directory entry %u: ", index1);
|
||||
if (pgd) {
|
||||
kprintf("0x%0x\n", pgd->entries[index1]);
|
||||
pgt = (page_table_t*) (pgd->entries[index1] & 0xFFFFF000);
|
||||
} else
|
||||
kputs("invalid page directory\n");
|
||||
|
||||
/* convert physical address to virtual */
|
||||
if (paging_enabled && pgt)
|
||||
pgt = (page_table_t*) (KERNEL_SPACE - 1024*PAGE_SIZE + index1*4);
|
||||
|
||||
kprintf("\tPage table entry %u: ", index2);
|
||||
if (pgt)
|
||||
kprintf("0x%x\n", pgt->entries[index2]);
|
||||
else
|
||||
kputs("invalid page table\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pagefault_handler(struct state *s)
|
||||
{
|
||||
kprintf("PAGE FAULT: Task %u got page fault at irq %u\n", per_core(current_task)->id, s->int_no);
|
||||
kprintf("Register state: eax = 0x%x, ebx = 0x%x, ecx = 0x%x, edx = 0x%x, edi = 0x%x, esi = 0x%x, ebp = 0x%x, esp = 0x%x\n",
|
||||
s->eax, s->ebx, s->ecx, s->edx, s->edi, s->esi, s->ebp, s->esp);
|
||||
|
||||
abort();
|
||||
}
|
||||
|
||||
int arch_paging_init(void)
|
||||
{
|
||||
uint32_t i, npages, index1, index2;
|
||||
page_table_t* pgt;
|
||||
size_t viraddr;
|
||||
|
||||
// uninstall default handler and install our own
|
||||
irq_uninstall_handler(14);
|
||||
irq_install_handler(14, pagefault_handler);
|
||||
|
||||
// Create a page table to reference to the other page tables
|
||||
pgt = (page_table_t*) get_pages(1);
|
||||
if (!pgt) {
|
||||
kputs("arch_paging_init: Not enough memory!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(pgt, 0, PAGE_SIZE);
|
||||
|
||||
// map this table at the end of the kernel space
|
||||
viraddr = KERNEL_SPACE - PAGE_SIZE;
|
||||
index1 = viraddr >> 22;
|
||||
index2 = (viraddr >> 12) & 0x3FF;
|
||||
|
||||
// now, we create a self reference
|
||||
per_core(current_task)->pgd->entries[index1] = (((size_t) pgt) & 0xFFFFF000)|USER_TABLE;
|
||||
pgt->entries[index2] = ((size_t) pgt & 0xFFFFF000)|KERN_PAGE;
|
||||
|
||||
/*
|
||||
* Set the page table and page directory entries for the kernel. We map the kernel's physical address
|
||||
* to the same virtual address.
|
||||
*/
|
||||
npages = ((size_t) &kernel_end - (size_t) &kernel_start) / PAGE_SIZE;
|
||||
if ((size_t)&kernel_end % PAGE_SIZE)
|
||||
npages++;
|
||||
map_region(per_core(current_task), (size_t)&kernel_start, (size_t)&kernel_start, npages, MAP_KERNEL_SPACE);
|
||||
|
||||
#ifdef CONFIG_VGA
|
||||
// map the video memory into the kernel space
|
||||
map_region(per_core(current_task), VIDEO_MEM_ADDR, VIDEO_MEM_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MULTIBOOT
|
||||
/*
|
||||
* of course, mb_info has to map into the kernel space
|
||||
*/
|
||||
if (mb_info)
|
||||
map_region(per_core(current_task), (size_t) mb_info, (size_t) mb_info, 1, MAP_KERNEL_SPACE);
|
||||
|
||||
/*
|
||||
* Map reserved memory regions into the kernel space
|
||||
*/
|
||||
if (mb_info && (mb_info->flags & (1 << 6))) {
|
||||
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr;
|
||||
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
||||
|
||||
while (mmap < mmap_end) {
|
||||
if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE) {
|
||||
npages = mmap->len / PAGE_SIZE;
|
||||
if ((mmap->addr+mmap->len) % PAGE_SIZE)
|
||||
npages++;
|
||||
map_region(per_core(current_task), mmap->addr, mmap->addr, npages, MAP_KERNEL_SPACE|MAP_NO_CACHE);
|
||||
}
|
||||
mmap++;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Modules like the init ram disk are already loaded.
|
||||
* Therefore, we map these moduels into the kernel space.
|
||||
*/
|
||||
if (mb_info && (mb_info->flags & (1 << 3))) {
|
||||
multiboot_module_t* mmodule = (multiboot_module_t*) mb_info->mods_addr;
|
||||
|
||||
for(i=0; i<mb_info->mods_count; i++, mmodule++) {
|
||||
// map physical address to the same virtual address
|
||||
npages = (mmodule->mod_end - mmodule->mod_start) / PAGE_SIZE;
|
||||
if (mmodule->mod_end % PAGE_SIZE)
|
||||
npages++;
|
||||
map_region(per_core(current_task), (size_t) (mmodule->mod_start), (size_t) (mmodule->mod_start), npages, MAP_KERNEL_SPACE);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* enable paging */
|
||||
write_cr3((uint32_t) &boot_pgd);
|
||||
i = read_cr0();
|
||||
i = i | (1 << 31);
|
||||
write_cr0(i);
|
||||
paging_enabled = 1;
|
||||
|
||||
/*
|
||||
* we turned on paging
|
||||
* => now, we are able to register our task for Task State Switching
|
||||
*/
|
||||
register_task(per_core(current_task));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -24,6 +24,7 @@
|
|||
#include <metalsvm/string.h>
|
||||
#include <metalsvm/processor.h>
|
||||
#include <metalsvm/mailbox.h>
|
||||
#include <metalsvm/page.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq.h>
|
||||
#if defined(CONFIG_LWIP) && defined(CONFIG_PCI)
|
||||
|
@ -314,15 +315,39 @@ err_t rtl8139if_init(struct netif* netif)
|
|||
uint16_t tmp16, speed;
|
||||
uint8_t tmp8;
|
||||
static uint8_t num = 0;
|
||||
task_t* task = per_core(current_task);
|
||||
|
||||
LWIP_ASSERT("netif != NULL", (netif != NULL));
|
||||
|
||||
rtl8139if = kmalloc(sizeof(rtl1839if_t)); //mem_malloc(sizeof(rtl1839if_t));
|
||||
if (rtl8139if == NULL) {
|
||||
rtl8139if = kmalloc(sizeof(rtl1839if_t));
|
||||
if (!rtl8139if) {
|
||||
LWIP_DEBUGF(NETIF_DEBUG, ("rtl8139if_init: out of memory\n"));
|
||||
return ERR_MEM;
|
||||
}
|
||||
memset(rtl8139if, 0, sizeof(rtl1839if_t));
|
||||
|
||||
/* allocate the receive buffer */
|
||||
rtl8139if->rx_buffer = mem_allocation(8192+16, MAP_KERNEL_SPACE|MAP_HEAP|MAP_NO_CACHE);
|
||||
if (!(rtl8139if->rx_buffer)) {
|
||||
LWIP_DEBUGF(NETIF_DEBUG, ("rtl8139if_init: out of memory\n"));
|
||||
kfree(rtl8139if, sizeof(rtl1839if_t));
|
||||
return ERR_MEM;
|
||||
}
|
||||
memset(rtl8139if->rx_buffer, 0, 8192+16);
|
||||
|
||||
/* allocate the send buffers */
|
||||
rtl8139if->tx_buffer[0] = mem_allocation(4*4096, MAP_KERNEL_SPACE|MAP_HEAP|MAP_NO_CACHE);
|
||||
if (!(rtl8139if->tx_buffer[0])) {
|
||||
LWIP_DEBUGF(NETIF_DEBUG, ("rtl8139if_init: out of memory\n"));
|
||||
kfree(rtl8139if->rx_buffer, 8192+16);
|
||||
kfree(rtl8139if, sizeof(rtl1839if_t));
|
||||
return ERR_MEM;
|
||||
}
|
||||
memset(rtl8139if->tx_buffer[0], 0, 4*4096);
|
||||
rtl8139if->tx_buffer[1] = rtl8139if->tx_buffer[0] + 4096;
|
||||
rtl8139if->tx_buffer[2] = rtl8139if->tx_buffer[1] + 4096;
|
||||
rtl8139if->tx_buffer[3] = rtl8139if->tx_buffer[2] + 4096;
|
||||
|
||||
mailbox_ptr_init(&rtl8139if->mbox);
|
||||
netif->state = rtl8139if;
|
||||
mynetif = netif;
|
||||
|
@ -409,13 +434,13 @@ err_t rtl8139if_init(struct netif* netif)
|
|||
outportl(rtl8139if->iobase + TCR, TCR_IFG|TCR_MXDMA0|TCR_MXDMA1|TCR_MXDMA2);
|
||||
|
||||
// register the receive buffer
|
||||
outportl(rtl8139if->iobase + RBSTART, (uint32_t) rtl8139if->rx_buffer);
|
||||
outportl(rtl8139if->iobase + RBSTART, virt_to_phys(task, (size_t) rtl8139if->rx_buffer));
|
||||
|
||||
// set each of the transmitter start address descriptors
|
||||
outportl(rtl8139if->iobase + TSAD0, (uint32_t) rtl8139if->tx_buffer[0]);
|
||||
outportl(rtl8139if->iobase + TSAD1, (uint32_t) rtl8139if->tx_buffer[1]);
|
||||
outportl(rtl8139if->iobase + TSAD2, (uint32_t) rtl8139if->tx_buffer[2]);
|
||||
outportl(rtl8139if->iobase + TSAD3, (uint32_t) rtl8139if->tx_buffer[3]);
|
||||
outportl(rtl8139if->iobase + TSAD0, virt_to_phys(task, (size_t) rtl8139if->tx_buffer[0]));
|
||||
outportl(rtl8139if->iobase + TSAD1, virt_to_phys(task, (size_t) rtl8139if->tx_buffer[1]));
|
||||
outportl(rtl8139if->iobase + TSAD2, virt_to_phys(task, (size_t) rtl8139if->tx_buffer[2]));
|
||||
outportl(rtl8139if->iobase + TSAD3, virt_to_phys(task, (size_t) rtl8139if->tx_buffer[3]));
|
||||
|
||||
/*
|
||||
* To set the RTL8139 to accept only the Transmit OK (TOK) and Receive OK (ROK)
|
||||
|
|
|
@ -216,10 +216,10 @@ typedef struct {
|
|||
* Helper struct to hold private data used to operate your ethernet interface.
|
||||
*/
|
||||
typedef struct rtl1839if {
|
||||
struct eth_addr *ethaddr;
|
||||
struct eth_addr *ethaddr;
|
||||
/* Add whatever per-interface state that is needed here. */
|
||||
uint8_t rx_buffer[8192+16] __attribute__((aligned (8)));
|
||||
uint8_t tx_buffer[4][4096] __attribute__((aligned (8)));
|
||||
uint8_t* tx_buffer[4];
|
||||
uint8_t* rx_buffer;
|
||||
uint32_t iobase;
|
||||
uint32_t irq;
|
||||
uint32_t tx_queue;
|
||||
|
|
|
@ -26,10 +26,10 @@ extern "C" {
|
|||
|
||||
#define METALSVM_VERSION "0.1"
|
||||
#define MAX_TASKS 16
|
||||
#define MAX_CORES 8
|
||||
#define MAX_CORES 4
|
||||
#define MAX_FNAME 128
|
||||
#define DEFAULT_STACK_SIZE (32*1024)
|
||||
#define KERNEL_STACK_SIZE 8192
|
||||
#define DEFAULT_STACK_SIZE (32*1024)
|
||||
#define KERNEL_STACK_SIZE 8192
|
||||
#define KMSG_SIZE (128*1024)
|
||||
#define PAGE_SIZE 4096
|
||||
#define CACHE_LINE 64
|
||||
|
@ -37,6 +37,8 @@ extern "C" {
|
|||
#define TIMER_FREQ 100 /* in HZ */
|
||||
#define CLOCK_TICK_RATE 1193182 /* 8254 chip's internal oscillator frequency */
|
||||
#define INT_SYSCALL 0x80
|
||||
#define KERNEL_SPACE (1*1024*1024*1024)
|
||||
#define VIDEO_MEM_ADDR 0xB8000 // the video memora address
|
||||
|
||||
#define BYTE_ORDER LITTLE_ENDIAN
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ extern atomic_int32_t total_allocated_pages;
|
|||
extern atomic_int32_t total_available_pages;
|
||||
|
||||
int mmu_init(void);
|
||||
size_t get_pages(uint32_t no_pages);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -28,10 +28,6 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
uint32_t detect_cpu_frequency(void);
|
||||
uint32_t get_cpu_frequency(void);
|
||||
void udelay(unsigned int usecs);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
#ifndef __SPINLOCK_H__
|
||||
#define __SPINLOCK_H__
|
||||
|
||||
#include <metalsvm/config.h>
|
||||
#include <metalsvm/tasks.h>
|
||||
#include <metalsvm/stddef.h>
|
||||
#include <metalsvm/tasks_types.h>
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
|
@ -29,7 +29,7 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
typedef struct spinlock {
|
||||
atomic_int32_t queue, dequeue;
|
||||
tid_t owner;
|
||||
} spinlock_t;
|
||||
|
|
|
@ -29,12 +29,14 @@ extern "C" {
|
|||
|
||||
#define NULL ((void*) 0)
|
||||
|
||||
typedef unsigned int tid_t;
|
||||
|
||||
#if MAX_CORES == 1
|
||||
#define per_core(name) name
|
||||
#define DECLARE_PER_CORE(type, name) extern type name;
|
||||
#define DEFINE_PER_CORE(type, name, def_value) type name = def_value;
|
||||
#else
|
||||
#define per_core(name) name[0].var
|
||||
#define per_core(name) name[LOGICAL_CPUID].var
|
||||
#define DECLARE_PER_CORE(type, name) \
|
||||
typedef struct { type var __attribute__ ((aligned (CACHE_LINE))); } aligned_##name;\
|
||||
extern aligned_##name name[MAX_CORES];
|
||||
|
@ -42,6 +44,10 @@ extern "C" {
|
|||
aligned_##name name[MAX_CORES] = {[0 ... MAX_CORES-1] = {def_value}};
|
||||
#endif
|
||||
|
||||
/* needed to find the task, which is currently running on this core */
|
||||
struct task;
|
||||
DECLARE_PER_CORE(struct task*, current_task);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -28,8 +28,17 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define MAP_KERNEL_SPACE (1 << 0)
|
||||
#define MAP_USER_SPACE (1 << 1)
|
||||
#define MAP_PAGE_TABLE (1 << 2)
|
||||
#define MAP_NO_CACHE (1 << 3)
|
||||
#define MAP_STACK (1 << 4)
|
||||
#define MAP_HEAP (1 << 5)
|
||||
#define MAP_CODE (1 << 6)
|
||||
|
||||
void NORETURN abort(void);
|
||||
void* kmalloc(size_t);
|
||||
void* mem_allocation(size_t sz, uint32_t flags);
|
||||
void kfree(void*, size_t);
|
||||
void* create_stack(task_t* task, size_t sz);
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
/* tasks, which are currently running */
|
||||
DECLARE_PER_CORE(task_t*, current_task);
|
||||
//DECLARE_PER_CORE(task_t*, current_task);
|
||||
|
||||
int multitasking_init(void);
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#ifndef __TASKS_TYPES_H__
|
||||
#define __TASKS_TYPES_H__
|
||||
|
||||
#include <metalsvm/config.h>
|
||||
#include <metalsvm/stddef.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
|
@ -36,17 +35,19 @@ extern "C" {
|
|||
#define TASK_IDLE 5
|
||||
|
||||
typedef int (STDCALL *entry_point_t)(void*);
|
||||
typedef unsigned int tid_t;
|
||||
struct mailbox_int32;
|
||||
struct page_dir;
|
||||
struct spinlock;
|
||||
|
||||
typedef struct {
|
||||
typedef struct task {
|
||||
tid_t id; /* task id = position in the task table */
|
||||
uint32_t status;
|
||||
unsigned char* ustack; /* stack of an user level task */
|
||||
size_t stack_size; /* only user level tasks
|
||||
* are able to specify its stack size
|
||||
*/
|
||||
* are able to specify its stack size */
|
||||
atomic_int32_t mem_usage; /* in number of pages */
|
||||
struct spinlock* pgd_lock; /* avoids concurrent access to the page directoriy */
|
||||
struct page_dir* pgd; /* pointer to the page directory */
|
||||
struct mailbox_int32* mbox[MAX_TASKS];
|
||||
} __attribute__((packed)) task_t;
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
C_source = main.c tasks.c processor.c syscall.c tests.c echo.c ping.c net.c
|
||||
C_source = main.c tasks.c syscall.c tests.c echo.c ping.c init.c
|
||||
|
||||
OBJS += $(patsubst %.c, %.o, $(filter %.c, $(C_source)))
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <metalsvm/time.h>
|
||||
#include <metalsvm/tasks.h>
|
||||
#include <metalsvm/errno.h>
|
||||
#include <metalsvm/init.h>
|
||||
#ifdef CONFIG_LWIP
|
||||
#include <lwip/init.h>
|
||||
#include <lwip/sys.h>
|
||||
|
@ -41,6 +42,11 @@ void ping_init(void);
|
|||
|
||||
static volatile int done = 0;
|
||||
|
||||
int lowlevel_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_LWIP) && defined(CONFIG_PCI)
|
||||
static tid_t netid;
|
||||
|
||||
|
@ -62,11 +68,6 @@ int STDCALL network_task(void* arg)
|
|||
IP4_ADDR(&ipaddr, 0,0,0,0);
|
||||
IP4_ADDR(&netmask, 0,0,0,0);
|
||||
|
||||
/* Clear the IP address, Subnet Mask, and Gateway */
|
||||
ipaddr.addr = 0;
|
||||
netmask.addr = 0;
|
||||
gw.addr = 0;
|
||||
|
||||
/* Bring up the network interface */
|
||||
if (!netif_add(&netif, &ipaddr, &netmask, &gw, NULL, rtl8139if_init, ethernet_input)) {
|
||||
kputs("Unable to add network interface\n");
|
|
@ -26,12 +26,12 @@
|
|||
#include <metalsvm/processor.h>
|
||||
#include <metalsvm/fs.h>
|
||||
#include <metalsvm/errno.h>
|
||||
#include <metalsvm/init.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/kb.h>
|
||||
|
||||
extern int test_init(void);
|
||||
extern int network_init(void);
|
||||
|
||||
/*
|
||||
* Note that linker symbols are not variables, they have no memory allocated for
|
||||
|
@ -87,8 +87,8 @@ int main(void)
|
|||
#ifdef CONFIG_KEYBOARD
|
||||
keyboard_init();
|
||||
#endif
|
||||
mmu_init();
|
||||
multitasking_init();
|
||||
mmu_init();
|
||||
koutput_init();
|
||||
initrd_init();
|
||||
|
||||
|
@ -97,7 +97,7 @@ int main(void)
|
|||
kprintf("Kernel starts at %p and ends at %p\n", &kernel_start, &kernel_end);
|
||||
|
||||
system_calibration();
|
||||
network_init();
|
||||
//network_init();
|
||||
|
||||
kprintf("Processor frequency: %u MHz\n", get_cpu_frequency());
|
||||
kprintf("Total memory: %u MBytes\n", atomic_int32_read(&total_pages)/((1024*1024)/PAGE_SIZE));
|
||||
|
@ -106,7 +106,7 @@ int main(void)
|
|||
|
||||
sleep(5);
|
||||
list_root();
|
||||
//test_init();
|
||||
test_init();
|
||||
|
||||
per_core(current_task)->status = TASK_IDLE;
|
||||
reschedule();
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <metalsvm/string.h>
|
||||
#include <metalsvm/errno.h>
|
||||
#include <metalsvm/mmu.h>
|
||||
#include <metalsvm/page.h>
|
||||
#include <metalsvm/tasks.h>
|
||||
#include <metalsvm/processor.h>
|
||||
#include <metalsvm/spinlock.h>
|
||||
|
@ -29,7 +30,7 @@
|
|||
#include <metalsvm/syscall.h>
|
||||
|
||||
DEFINE_PER_CORE(task_t*, current_task, NULL);
|
||||
static task_t task_table[MAX_TASKS] = {[0 ... MAX_TASKS-1] = {0, TASK_INVALID, NULL, 0, ATOMIC_INIT(0)}};
|
||||
static task_t task_table[MAX_TASKS] = {[0 ... MAX_TASKS-1] = {0, TASK_INVALID, NULL, 0, ATOMIC_INIT(0), NULL, NULL}};
|
||||
static spinlock_t table_lock = SPINLOCK_INIT;
|
||||
|
||||
/*
|
||||
|
@ -49,7 +50,7 @@ int multitasking_init(void) {
|
|||
task_table[i].status = TASK_RUNNING;
|
||||
memset(task_table[i].mbox, 0x00, sizeof(mailbox_int32_t*)*MAX_TASKS);
|
||||
per_core(current_task) = task_table+i;
|
||||
register_task(per_core(current_task));
|
||||
get_kernel_pgd(per_core(current_task));
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -143,9 +144,12 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, size_t stack_size
|
|||
if (!task_table[i].ustack)
|
||||
break;
|
||||
task_table[i].stack_size = stack_size;
|
||||
task_table[i].pgd = NULL;
|
||||
task_table[i].pgd_lock = NULL;
|
||||
} else {
|
||||
task_table[i].ustack = NULL;
|
||||
task_table[i].stack_size = 0;
|
||||
get_kernel_pgd(task_table+i);
|
||||
}
|
||||
|
||||
atomic_int32_set(&task_table[i].mem_usage, 0);
|
||||
|
@ -278,9 +282,9 @@ void scheduler(void)
|
|||
if (per_core(current_task)->status == TASK_FINISHED)
|
||||
per_core(current_task)->status = TASK_INVALID;
|
||||
|
||||
for(i=1; i<MAX_TASKS; i++) {
|
||||
new_id = (per_core(current_task)->id + i) % MAX_TASKS;
|
||||
|
||||
for(i=1, new_id=(per_core(current_task)->id + 1) % MAX_TASKS;
|
||||
i<MAX_TASKS; i++, new_id=(new_id+1) % MAX_TASKS)
|
||||
{
|
||||
if (task_table[new_id].status == TASK_READY) {
|
||||
if (per_core(current_task)->status == TASK_RUNNING)
|
||||
per_core(current_task)->status = TASK_READY;
|
||||
|
|
|
@ -124,10 +124,10 @@ int test_init(void)
|
|||
mailbox_int32_init(&mbox);
|
||||
|
||||
create_kernel_task(&id1, foo, "Hello from foo1\n");
|
||||
create_kernel_task(&id2, join_test, NULL);
|
||||
//create_kernel_task(&id2, join_test, NULL);
|
||||
create_kernel_task(&id3, producer, NULL);
|
||||
create_kernel_task(&id4, consumer, NULL);
|
||||
create_user_task(&id5, userfoo, "Hello from user process foo\n", 0);
|
||||
//create_user_task(&id5, userfoo, "Hello from user process foo\n", 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
2
link.ld
2
link.ld
|
@ -5,11 +5,11 @@ phys = 0x00100000;
|
|||
|
||||
SECTIONS
|
||||
{
|
||||
kernel_start = phys;
|
||||
.mboot phys : AT(ADDR(.mboot)) {
|
||||
*(.mboot)
|
||||
}
|
||||
.text ALIGN(4096) : AT(ADDR(.text)) {
|
||||
kernel_start = .;
|
||||
*(.text)
|
||||
*(.rodata)
|
||||
}
|
||||
|
|
129
mm/memory.c
129
mm/memory.c
|
@ -19,11 +19,13 @@
|
|||
|
||||
#include <metalsvm/stdio.h>
|
||||
#include <metalsvm/string.h>
|
||||
#include <metalsvm/stdlib.h>
|
||||
#include <metalsvm/mmu.h>
|
||||
#include <metalsvm/spinlock.h>
|
||||
#include <metalsvm/time.h>
|
||||
#include <metalsvm/elf.h>
|
||||
#include <metalsvm/processor.h>
|
||||
#include <metalsvm/page.h>
|
||||
#ifdef CONFIG_MULTIBOOT
|
||||
#include <asm/multiboot.h>
|
||||
#endif
|
||||
|
@ -43,7 +45,12 @@ static size_t alloc_start;
|
|||
atomic_int32_t total_pages = ATOMIC_INIT(0);
|
||||
atomic_int32_t total_allocated_pages = ATOMIC_INIT(0);
|
||||
atomic_int32_t total_available_pages = ATOMIC_INIT(0);
|
||||
atomic_int32_t total_kernel_pages = ATOMIC_INIT(0);
|
||||
|
||||
/*
|
||||
* Note that linker symbols are not variables, they have no memory allocated for
|
||||
* maintaining a value, rather their address is their value.
|
||||
*/
|
||||
extern const void kernel_start;
|
||||
extern const void kernel_end;
|
||||
|
||||
|
@ -161,26 +168,25 @@ int mmu_init(void)
|
|||
if ((size_t) &kernel_end % PAGE_SIZE)
|
||||
page_set_mark(i);
|
||||
|
||||
alloc_start = (unsigned int) (&kernel_end) / PAGE_SIZE;
|
||||
alloc_start += 128*1024;
|
||||
alloc_start = (size_t) &kernel_end / PAGE_SIZE;
|
||||
if ((size_t) &kernel_end % PAGE_SIZE)
|
||||
alloc_start++;
|
||||
|
||||
return 0;
|
||||
return paging_init();
|
||||
}
|
||||
|
||||
static void* task_malloc(task_t* task, size_t sz)
|
||||
/*
|
||||
* Use first fit algorithm to find a suitable physical memory region
|
||||
*/
|
||||
static size_t task_get_pages(task_t* task, uint32_t npages)
|
||||
{
|
||||
unsigned int i, j, l;
|
||||
unsigned int k = 0;
|
||||
unsigned char* ret = NULL;
|
||||
unsigned int npages;
|
||||
uint32_t i, j, l;
|
||||
uint32_t k = 0;
|
||||
size_t ret = 0;
|
||||
|
||||
if (BUILTIN_EXPECT(!sz, 0))
|
||||
if (BUILTIN_EXPECT(!npages, 0))
|
||||
return ret;
|
||||
|
||||
npages = sz / PAGE_SIZE;
|
||||
if (sz % PAGE_SIZE)
|
||||
npages++;
|
||||
|
||||
spinlock_lock(&bitmap_lock);
|
||||
i = alloc_start;
|
||||
next_try:
|
||||
|
@ -207,58 +213,125 @@ next_try:
|
|||
if (k >= BITMAP_SIZE)
|
||||
goto oom;
|
||||
|
||||
ret = (unsigned char*) (i*PAGE_SIZE);
|
||||
//kprintf("alloc: ret %p, i = %d, j = %d, npages = %d\n", ret, i, j, npages);
|
||||
ret = i*PAGE_SIZE;
|
||||
//kprintf("alloc: ret 0x%x, i = %d, j = %d, npages = %d\n", ret, i, j, npages);
|
||||
for(l=i; l<i+j; l++)
|
||||
page_set_mark(l);
|
||||
|
||||
alloc_start = i+j;
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
|
||||
atomic_int32_add(&total_allocated_pages, npages);
|
||||
atomic_int32_sub(&total_available_pages, npages);
|
||||
if (task)
|
||||
if (task && task->ustack)
|
||||
atomic_int32_add(&(task->mem_usage), npages);
|
||||
else
|
||||
atomic_int32_add(&total_kernel_pages, npages);
|
||||
|
||||
return ret;
|
||||
|
||||
oom:
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
size_t get_pages(uint32_t npages)
|
||||
{
|
||||
return task_get_pages(per_core(current_task), npages);
|
||||
}
|
||||
|
||||
void* mem_allocation(size_t sz, uint32_t flags)
|
||||
{
|
||||
size_t phyaddr, viraddr;
|
||||
uint32_t npages = sz / PAGE_SIZE;
|
||||
task_t* task = per_core(current_task);
|
||||
|
||||
if (sz % PAGE_SIZE)
|
||||
npages++;
|
||||
|
||||
phyaddr = task_get_pages(task, npages);
|
||||
if (BUILTIN_EXPECT(!phyaddr, 0))
|
||||
return 0;
|
||||
|
||||
spinlock_lock(task->pgd_lock);
|
||||
viraddr = map_region(task, 0, phyaddr, npages, flags);
|
||||
spinlock_unlock(task->pgd_lock);
|
||||
|
||||
return (void*) viraddr;
|
||||
}
|
||||
|
||||
void* kmalloc(size_t sz)
|
||||
{
|
||||
return task_malloc(per_core(current_task), sz);
|
||||
uint32_t flags;
|
||||
task_t* task = per_core(current_task);
|
||||
|
||||
if (task->ustack)
|
||||
flags = MAP_USER_SPACE|MAP_HEAP;
|
||||
else
|
||||
flags = MAP_KERNEL_SPACE|MAP_HEAP;
|
||||
|
||||
return mem_allocation(sz, flags);
|
||||
}
|
||||
|
||||
void* create_stack(task_t* task, size_t sz)
|
||||
{
|
||||
unsigned char* addr = (unsigned char*) task_malloc(task, sz);
|
||||
size_t viraddr;
|
||||
uint32_t npages = sz / PAGE_SIZE;
|
||||
|
||||
memset(addr, 0xCD, sz);
|
||||
if (sz % PAGE_SIZE)
|
||||
npages++;
|
||||
|
||||
return addr;
|
||||
spinlock_lock(task->pgd_lock);
|
||||
|
||||
size_t addr = (size_t) task_get_pages(task, npages);
|
||||
if (BUILTIN_EXPECT(!addr, 0))
|
||||
return 0;
|
||||
|
||||
spinlock_lock(task->pgd_lock);
|
||||
/*
|
||||
* We need only user-level stacks. Kernel stacks already initialized as
|
||||
* static array.
|
||||
*/
|
||||
viraddr = map_region(task, 0, addr, npages, MAP_USER_SPACE|MAP_STACK);
|
||||
spinlock_unlock(task->pgd_lock);
|
||||
|
||||
memset((unsigned char*)viraddr, 0xCD, sz);
|
||||
|
||||
return (void*) viraddr;
|
||||
}
|
||||
|
||||
static void task_free(task_t* task, void* addr, size_t sz)
|
||||
{
|
||||
unsigned int index, npages, i;
|
||||
uint32_t index, npages, i;
|
||||
size_t phyaddr;
|
||||
|
||||
if (BUILTIN_EXPECT(!addr && !sz, 0))
|
||||
return;
|
||||
|
||||
index = (size_t) addr / PAGE_SIZE;
|
||||
npages = sz / PAGE_SIZE;
|
||||
if (sz % PAGE_SIZE)
|
||||
npages++;
|
||||
|
||||
spinlock_lock(&bitmap_lock);
|
||||
|
||||
for(i=index; i<index+npages; i++)
|
||||
page_unmarked(i);
|
||||
spinlock_lock(task->pgd_lock);
|
||||
vm_free(task, (size_t) addr, npages);
|
||||
spinlock_unlock(task->pgd_lock);
|
||||
|
||||
spinlock_lock(&bitmap_lock);
|
||||
for(i=0; i<npages; i++) {
|
||||
phyaddr = virt_to_phys(task, (size_t) addr+i*PAGE_SIZE);
|
||||
index = phyaddr / PAGE_SIZE;
|
||||
page_unmarked(index);
|
||||
}
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
|
||||
atomic_int32_sub(&total_allocated_pages, npages);
|
||||
atomic_int32_add(&total_available_pages, npages);
|
||||
if (task)
|
||||
if (task && task->ustack)
|
||||
atomic_int32_sub(&(task->mem_usage), npages);
|
||||
|
||||
spinlock_unlock(&bitmap_lock);
|
||||
else
|
||||
atomic_int32_sub(&total_kernel_pages, npages);
|
||||
}
|
||||
|
||||
void kfree(void* addr, size_t sz)
|
||||
|
|
Loading…
Add table
Reference in a new issue