- complete restart of the project

- support of TSS-based task switching
- add a mailbox template
- suport of user level task
- support of system calls


git-svn-id: http://svn.lfbs.rwth-aachen.de/svn/scc/trunk/MetalSVM@47 315a16e6-25f9-4109-90ae-ca3045a26c18
This commit is contained in:
stefan 2010-08-09 11:47:51 +00:00
parent 9160888a8d
commit 71188c92f9
35 changed files with 1061 additions and 517 deletions

View file

@ -1,8 +1,7 @@
export TOPDIR = /home/stefan/SCC/MetalSVM
export ARCH = x86
NAME = metalsvm.bin
export LIBNAME = $(TOPDIR)/libmetalsvm.a
export LIBLWIP = $(TOPDIR)/liblwip.a
OBJS = $(shell find . -name *.o)
export MAKE = make
export ASM = nasm
@ -16,19 +15,23 @@ export RM = rm -rf
LD = ld
LDFLAGS = -T link.ld
SUBDIRS = libkern kernel mm arch lwip
SUBDIRS = libkern kernel mm arch #lwip drivers
default:
$(MAKE) all
for i in $(SUBDIRS); do $(MAKE) -C $$i default; done
$(MAKE) link
all:
for i in $(SUBDIRS); do $(MAKE) -C $$i all; done
$(LD) $(LDFLAGS) -o $(NAME) $(TOPDIR)/arch/$(ARCH)/kernel/entry.o $(LIBNAME) $(LIBLWIP)
$(MAKE) link
link:
$(LD) $(LDFLAGS) -o $(NAME) $(OBJS)
clean:
$(RM) $(NAME) $(LIBNAME) $(LIBLWIP) *~
for i in $(SUBDIRS); do $(MAKE) -C $$i clean; done
$(RM) $(NAME) *~
for i in $(SUBDIRS); do $(MAKE) -k -C $$i clean; done
depend:
for i in $(SUBDIRS); do $(MAKE) -C $$i depend; done
for i in $(SUBDIRS); do $(MAKE) -k -C $$i depend; done

View file

@ -1,7 +1,7 @@
SUBDIRS = $(ARCH)
default:
$(MAKE) all
for i in $(SUBDIRS); do $(MAKE) -C $$i default; done
all:
for i in $(SUBDIRS); do $(MAKE) -C $$i all; done

View file

@ -1,7 +1,7 @@
SUBDIRS = kernel #lib
default:
$(MAKE) all
for i in $(SUBDIRS); do $(MAKE) -C $$i default; done
all:
for i in $(SUBDIRS); do $(MAKE) -C $$i all; done

View file

@ -33,30 +33,31 @@ typedef uint32_t atomic_uint32_t;
* protocol is automatically implemented for the duration of the exchange
* operation, regardless of the presence or absence of the LOCK prefix.
*/
inline static atomic_uint32_t atomic_uint32_test_and_set(atomic_uint32_t* d) {
atomic_uint32_t ret = 1;
inline static atomic_uint32_t atomic_uint32_test_and_set(atomic_uint32_t* d, uint32_t ret) {
asm volatile ("xchgl %0, %1" : "=r"(ret) : "m"(*d), "0"(ret) : "memory");
return ret;
}
inline static void atomic_uint32_inc(atomic_uint32_t* d) {
asm volatile ("lock addl 1, %0" : "+m"(*d));
inline static atomic_uint32_t atomic_uint32_inc(atomic_uint32_t* d) {
asm volatile ("lock incl %0" : "+m"(*d));
return *d;
}
inline static void atomic_uint32_dec(atomic_uint32_t* d) {
asm volatile ("lock subl 1, %0" : "+m"(*d));
inline static atomic_uint32_t atomic_uint32_dec(atomic_uint32_t* d) {
asm volatile ("lock decl %0" : "+m"(*d));
return *d;
}
inline static void atomic_uint32_add(atomic_uint32_t *d, int32_t i)
inline static atomic_uint32_t atomic_uint32_add(atomic_uint32_t *d, int32_t i)
{
asm volatile("lock addl %1, %0" : "+m" (*d) : "ir" (i));
return *d;
}
inline static void atomic_uint32_sub(atomic_uint32_t *d, int32_t i)
inline static atomic_uint32_t atomic_uint32_sub(atomic_uint32_t *d, int32_t i)
{
asm volatile("lock subl %1, %0" : "+m" (*d) : "ir" (i));
return *d;
}
inline static atomic_uint32_t atomic_uint32_read(atomic_uint32_t *d)

View file

@ -22,6 +22,15 @@
#include <metalsvm/stddef.h>
#define IDT_FLAG_PRESENT 0x80
#define IDT_FLAG_RING0 0x00
#define IDT_FLAG_RING3 0x60
#define IDT_FLAG_16BIT 0x00
#define IDT_FLAG_32BIT 0x08
#define IDT_FLAG_INTTRAP 0x06
#define IDT_FLAG_TRAPGATE 0x07
#define IDT_FLAG_TASKGATE 0x05
#ifdef __cplusplus
extern "C" {
#endif

View file

@ -26,7 +26,7 @@
extern "C" {
#endif
typedef void (*irq_handler_t)(struct regs *);
typedef void (*irq_handler_t)(struct state *);
/* This installs a custom IRQ handler for the given IRQ */
void irq_install_handler(unsigned int, irq_handler_t);

View file

@ -27,11 +27,15 @@ extern "C" {
typedef unsigned int size_t;
/* This defines what the stack looks like after an ISR was running */
struct regs {
unsigned int gs, fs, es, ds; /* pushed the segs last */
unsigned int edi, esi, ebp, esp, ebx, edx, ecx, eax; /* pushed by 'pusha' */
unsigned int int_no, err_code; /* our 'push byte #' and ecodes do this */
unsigned int eip, cs, eflags, useresp, ss; /* pushed by the processor automatically */
struct state {
/*
* We switched from software- to hardwaree-based multitasking
* Therefore, we do not longer save the registers by hand.
*/
/*unsigned int gs, fs, es, ds; */ /* pushed the segs last */
unsigned int edi, esi, ebp, esp, ebx, edx, ecx, eax; /* pushed by 'pusha' */
unsigned int int_no, err_code; /* our 'push byte #' and ecodes do this */
/*unsigned int eip, cs, eflags, useresp, ss;*/ /* pushed by the processor automatically */
};
#ifdef __cplusplus

View file

@ -0,0 +1,39 @@
/*
* Copyright 2010 Stefan Lankes, Chair for Operating Systems,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of MetalSVM.
*/
#ifndef __ARCH_SYSCALL_H__
#define __ARCH_SYSCALL_H__
#include <metalsvm/stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
#define _STR(token) #token
#define _SYSCALLSTR(x) "int $" _STR(x) " "
#define SYSCALL0(NR) asm volatile (_SYSCALLSTR(INT_SYSCALL) : : "a"(NR))
#define SYSCALL1(NR, ARG1) asm volatile (_SYSCALLSTR(INT_SYSCALL) : : "a" (NR), "b" (ARG1))
#ifdef __cplusplus
}
#endif
#endif

View file

@ -24,53 +24,8 @@
#include <metalsvm/stddef.h>
#include <metalsvm/tasks_types.h>
void NORETURN leave_task(void);
inline static int create_default_frame(task_t* task, entry_point_t ep, void* arg)
{
unsigned int* top = (unsigned int*) task->top;
unsigned int* esp;
unsigned int* ebp;
*top = 0xDEADBEAF; /* dead-end */
ebp = top;
top--;
*top = (unsigned int) arg; /* argument of the function ep */
top--;
*top = (unsigned int) leave_task; /* exit function */
top--;
*top = (unsigned int) ep; /* entry point */
top--;
*top = 0x202; /* EFLAGS */
esp = top;
top--;
*top = 0x00; /* EAX */
top--;
*top = 0x00; /* ECX */
top--;
*top = 0x00; /* EDX */
top--;
*top = 0x00; /* EBX */
top--;
*top = (unsigned int) esp; /* ESP */
top--;
*top = (unsigned int) ebp; /* EBP */
top --;
*top = 0x00; /* ESI */
top --;
*top = 0x00; /* EDI */
top --;
*top = 0x10; /* DS */
top--;
*top = 0x10; /* ES */
top--;
*top = 0x10; /* FS */
top--;
*top = 0x10; /* GS */
task->top = (unsigned char*) top;
return 0;
}
int create_default_frame(task_t* task, entry_point_t ep, void* arg, int ring);
int register_task(task_t* task);
void reschedule(void);
#endif

View file

@ -0,0 +1,58 @@
/*
* Copyright 2010 Stefan Lankes, Chair for Operating Systems,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of MetalSVM.
*/
#ifndef __ARCH_TSS_H__
#define __ARCH_TSS_H__
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct {
uint16_t backlink, __blh;
uint32_t esp0;
uint16_t ss0, __ss0h;
uint32_t esp1;
uint16_t ss1, __ss1h;
uint32_t esp2;
uint16_t ss2, __ss2h;
uint32_t cr3;
uint32_t eip;
uint32_t eflags;
uint32_t eax, ecx, edx, ebx;
uint32_t esp, ebp, esi, edi;
uint16_t es, __esh;
uint16_t cs, __csh;
uint16_t ss, __ssh;
uint16_t ds, __dsh;
uint16_t fs, __fsh;
uint16_t gs, __gsh;
uint16_t ldt, __ldth;
uint16_t trace, bitmap;
} __attribute__ ((packed)) tss_t;
/*extern tss_t task_state_segments[MAX_TASKS];*/
#ifdef __cplusplus
}
#endif
#endif

View file

@ -1,5 +1,5 @@
C_source = gdt.c idt.c isrs.c irq.c vga.c kb.c timer.c multiboot.c pci.c
ASM_source = schedule.asm
C_source = syscall.c gdt.c kb.c timer.c irq.c isrs.c idt.c vga.c multiboot.c pci.c
ASM_source = entry.asm
OBJS += $(patsubst %.c, %.o, $(filter %.c, $(C_source)))
OBJS += $(patsubst %.asm, %.o, $(filter %.asm, $(ASM_source)))
@ -11,15 +11,10 @@ OBJS += $(patsubst %.asm, %.o, $(filter %.asm, $(ASM_source)))
%.o : %.asm
$(ASM) $(ASMFLAGS) -o $@ $<
default:
$(MAKE) all
all: $(OBJS) entry.o
$(MAKE) $(LIBNAME)
$(LIBNAME): $(OBJS) entry.o
$(AR) $(ARFLAGS) $(LIBNAME) $(OBJS)
default: $(OBJS)
all: $(OBJS)
clean:
$(RM) *~ *.o $(NAME)

View file

@ -129,225 +129,319 @@ global isr28
global isr29
global isr30
global isr31
global isrsyscall
; 0: Divide By Zero Exception
isr0:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 0
jmp isr_common_stub
; 1: Debug Exception
isr1:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 1
jmp isr_common_stub
; 2: Non Maskable Interrupt Exception
isr2:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 2
jmp isr_common_stub
; 3: Int 3 Exception
isr3:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 3
jmp isr_common_stub
; 4: INTO Exception
isr4:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 4
jmp isr_common_stub
; 5: Out of Bounds Exception
isr5:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 5
jmp isr_common_stub
; 6: Invalid Opcode Exception
isr6:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 6
jmp isr_common_stub
; 7: Coprocessor Not Available Exception
isr7:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 7
jmp isr_common_stub
; 8: Double Fault Exception (With Error Code!)
isr8:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 8
jmp isr_common_stub
; 9: Coprocessor Segment Overrun Exception
isr9:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 9
jmp isr_common_stub
; 10: Bad TSS Exception (With Error Code!)
isr10:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 10
jmp isr_common_stub
; 11: Segment Not Present Exception (With Error Code!)
isr11:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 11
jmp isr_common_stub
; 12: Stack Fault Exception (With Error Code!)
isr12:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 12
jmp isr_common_stub
; 13: General Protection Fault Exception (With Error Code!)
isr13:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 13
jmp isr_common_stub
; 14: Page Fault Exception (With Error Code!)
isr14:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 14
jmp isr_common_stub
; 15: Reserved Exception
isr15:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 15
jmp isr_common_stub
; 16: Floating Point Exception
isr16:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 16
jmp isr_common_stub
; 17: Alignment Check Exception
isr17:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 17
jmp isr_common_stub
; 18: Machine Check Exception
isr18:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 18
jmp isr_common_stub
; 19: Reserved
isr19:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 19
jmp isr_common_stub
; 20: Reserved
isr20:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 20
jmp isr_common_stub
; 21: Reserved
isr21:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 21
jmp isr_common_stub
; 22: Reserved
isr22:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 22
jmp isr_common_stub
; 23: Reserved
isr23:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 23
jmp isr_common_stub
; 24: Reserved
isr24:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 24
jmp isr_common_stub
; 25: Reserved
isr25:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 25
jmp isr_common_stub
; 26: Reserved
isr26:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 26
jmp isr_common_stub
; 27: Reserved
isr27:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 27
jmp isr_common_stub
; 28: Reserved
isr28:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 28
jmp isr_common_stub
; 29: Reserved
isr29:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 29
jmp isr_common_stub
; 30: Reserved
isr30:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 30
jmp isr_common_stub
; 31: Reserved
isr31:
cli
; isr0 - isr31 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 31
jmp isr_common_stub
extern syscall_handler
; used to realize system calls
isrsyscall:
pusha
; We switched from software- to hardwaree-based multitasking
; Therefore, we do not longer save the registers by hand.
; push ds
; push es
; push fs
; push gs
; mov ax, 0x10
; mov ds, ax
; mov es, ax
; mov fs, ax
; mov gs, ax
push esp
call syscall_handler
add esp, 4
; pop gs
; pop fs
; pop es
; pop ds
popa
iret
; We call a C function in here. We need to let the assembler know
; that 'fault_handler' exists in another file
extern fault_handler
@ -357,24 +451,28 @@ extern fault_handler
; and finally restores the stack frame.
isr_common_stub:
pusha
push ds
push es
push fs
push gs
mov ax, 0x10
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
mov eax, esp
push eax
mov eax, fault_handler
call eax
pop eax
pop gs
pop fs
pop es
pop ds
; We switched from software- to hardwaree-based multitasking
; Therefore, we do not longer save the registers by hand.
pusha
; push ds
; push es
; push fs
; push gs
; mov ax, 0x10
; mov ds, ax
; mov es, ax
; mov fs, ax
; mov gs, ax
push esp
call fault_handler
add esp, 4
; pop gs
; pop fs
; pop es
; pop ds
popa
add esp, 8
iret
@ -396,143 +494,243 @@ global irq13
global irq14
global irq15
extern irq_handler
extern current_task
extern scheduler
global reschedule
reschedule:
cli
pusha
push DWORD [current_task]
call scheduler
pop ebx
mov eax, DWORD [current_task]
cmp eax, ebx
je no_task_switch1
mov eax, [eax]
add ax, WORD 5
mov bx, WORD 8
mul bx
mov [hack1+5], ax
hack1:
jmp 0x00 : 0xDEADBEAF
no_task_switch1:
popa
sti
ret
; 32: IRQ0
irq0:
cli
; irq0 - irq15 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 32
jmp irq_common_stub
pusha
; We switched from software- to hardwaree-based multitasking
; Therefore, we do not longer save the registers by hand.
; push ds
; push es
; push fs
; push gs
; mov ax, 0x10
; mov ds, ax
; mov es, ax
; mov fs, ax
; mov gs, ax
push esp
call irq_handler
add esp, 4
mov eax, DWORD [current_task]
push eax
call scheduler
pop ebx
mov eax, DWORD [current_task]
cmp eax, ebx
je no_task_switch2
mov eax, [eax]
add ax, WORD 5
mov bx, WORD 8
mul bx
mov [hack2+5], ax
hack2:
jmp 0x00 : 0xDEADBEAF
no_task_switch2:
; pop gs
; pop fs
; pop es
; pop ds
popa
add esp, 8
iret
; 33: IRQ1
irq1:
cli
; irq0 - irq15 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 33
jmp irq_common_stub
; 34: IRQ2
irq2:
cli
; irq0 - irq15 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 34
jmp irq_common_stub
; 35: IRQ3
irq3:
cli
; irq0 - irq15 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 35
jmp irq_common_stub
; 36: IRQ4
irq4:
cli
; irq0 - irq15 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 36
jmp irq_common_stub
; 37: IRQ5
irq5:
cli
; irq0 - irq15 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 37
jmp irq_common_stub
; 38: IRQ6
irq6:
cli
; irq0 - irq15 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 38
jmp irq_common_stub
; 39: IRQ7
irq7:
cli
; irq0 - irq15 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 39
jmp irq_common_stub
; 40: IRQ8
irq8:
cli
; irq0 - irq15 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 40
jmp irq_common_stub
; 41: IRQ9
irq9:
cli
; irq0 - irq15 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 41
jmp irq_common_stub
; 42: IRQ10
irq10:
cli
; irq0 - irq15 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 42
jmp irq_common_stub
; 43: IRQ11
irq11:
cli
; irq0 - irq15 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 43
jmp irq_common_stub
; 44: IRQ12
irq12:
cli
; irq0 - irq15 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 44
jmp irq_common_stub
; 45: IRQ13
irq13:
cli
; irq0 - irq15 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 45
jmp irq_common_stub
; 46: IRQ14
irq14:
cli
; irq0 - irq15 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 46
jmp irq_common_stub
; 47: IRQ15
irq15:
cli
; irq0 - irq15 are registered as "Interrupt Gate"
; Therefore, the interrupt flag (IF) is already cleared.
; cli
push byte 0
push byte 47
jmp irq_common_stub
extern irq_handler
irq_common_stub:
pusha
push ds
push es
push fs
push gs
; We switched from software- to hardwaree-based multitasking
; Therefore, we do not longer save the registers by hand.
; push ds
; push es
; push fs
; push gs
mov ax, 0x10
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
mov eax, esp
; mov ax, 0x10
; mov ds, ax
; mov es, ax
; mov fs, ax
; mov gs, ax
push eax
mov eax, irq_handler
call eax
pop eax
push esp
call irq_handler
add esp, 4
pop gs
pop fs
pop es
pop ds
; pop gs
; pop fs
; pop es
; pop ds
popa
add esp, 8
iret

View file

@ -18,26 +18,35 @@
*/
#include <metalsvm/string.h>
#include <metalsvm/stdlib.h>
#include <metalsvm/tasks.h>
#include <asm/tss.h>
/* Defines a GDT entry */
struct gdt_entry {
typedef struct {
unsigned short limit_low;
unsigned short base_low;
unsigned char base_middle;
unsigned char access;
unsigned char granularity;
unsigned char base_high;
} __attribute__ ((packed));
} __attribute__ ((packed)) gdt_entry_t;
struct gdt_ptr {
typedef struct {
unsigned short limit;
unsigned int base;
} __attribute__ ((packed));
} __attribute__ ((packed)) gdt_ptr_t;
#define GDT_ENTRIES 5
struct gdt_entry gdt[GDT_ENTRIES];
struct gdt_ptr gp;
#define GDT_ENTRIES (5+MAX_TASKS)
#if GDT_ENTRIES > 8192
#error Too many GDT entries!
#endif
gdt_ptr_t gp;
static tss_t task_state_segments[MAX_TASKS];
static gdt_entry_t gdt[GDT_ENTRIES];
static unsigned char kstacks[MAX_TASKS][KERNEL_STACK_SIZE];
/*
* This is in start.asm. We use this to properly reload
@ -45,6 +54,70 @@ struct gdt_ptr gp;
*/
extern void gdt_flush(void);
int register_task(task_t* task) {
uint32_t id = task->id;
uint16_t sel = (id+5)*8;
asm volatile ("mov %0, %%ax; ltr %%ax" : : "ir"(sel));
return 0;
}
int create_default_frame(task_t* task, entry_point_t ep, void* arg, int user)
{
uint16_t cs = user ? 0x1B : 0x08;
uint16_t ds = user ? 0x23 : 0x10;
uint32_t id;
if (BUILTIN_EXPECT(!task, 0))
return -1;
if (BUILTIN_EXPECT(user && !task->ustack, 0))
return -1;
if (BUILTIN_EXPECT(user && !task->stack_size, 0))
return -1;
id = task->id;
/* reset buffers */
memset(task_state_segments+id, 0x00, sizeof(tss_t));
memset(kstacks[id], 0xCD, KERNEL_STACK_SIZE);
if (user)
memset(task->ustack, 0XCD, task->stack_size);
/* set default values of all regsiters */
task_state_segments[id].cs = cs;
task_state_segments[id].ss = ds;
task_state_segments[id].ds = ds;
task_state_segments[id].fs = ds;
task_state_segments[id].gs = ds;
task_state_segments[id].es = ds;
task_state_segments[id].eflags = 0x1202;
task_state_segments[id].eip = (uint32_t) ep;
if (user)
task_state_segments[id].esp = (uint32_t) task->ustack + task->stack_size - sizeof(size_t);
else
task_state_segments[id].esp = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
/* build default stack frame */
*((size_t*)task_state_segments[id].esp) = 0xDEADBEAF; /* dead-end */
task_state_segments[id].ebp = task_state_segments[id].esp;
task_state_segments[id].esp -= sizeof(size_t);
*((size_t*)task_state_segments[id].esp) = (size_t) arg;
task_state_segments[id].esp -= sizeof(size_t);
if (user)
*((size_t*)task_state_segments[id].esp) = (size_t) leave_user_task;
else
*((size_t*)task_state_segments[id].esp) = (size_t) leave_kernel_task;
/* setup for the kernel stack frame */
task_state_segments[id].ss0 = 0x10;
if (user)
task_state_segments[id].esp0 = (uint32_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
else
task_state_segments[id].esp0 = task_state_segments[id].esp;
return 0;
}
/* Setup a descriptor in the Global Descriptor Table */
static void gdt_set_gate(int num, unsigned long base, unsigned long limit,
unsigned char access, unsigned char gran)
@ -73,8 +146,13 @@ static void gdt_set_gate(int num, unsigned long base, unsigned long limit,
*/
void gdt_install(void)
{
unsigned int i;
memset(task_state_segments, 0x00, MAX_TASKS*sizeof(tss_t));
memset(gdt, 0x00, GDT_ENTRIES*sizeof(gdt_entry_t));
/* Setup the GDT pointer and limit */
gp.limit = (sizeof(struct gdt_entry) * GDT_ENTRIES) - 1;
gp.limit = (sizeof(gdt_entry_t) * GDT_ENTRIES) - 1;
gp.base = (unsigned int) &gdt;
/* Our NULL descriptor */
@ -104,6 +182,13 @@ void gdt_install(void)
*/
gdt_set_gate(4, 0, 0xFFFFFFFF, 0xF2, 0xCF);
/*
* Create TSS for each task (we use these segments for task switching)
*/
//gdt_set_gate(5, (unsigned long) task_state_segments, sizeof(tss_t)-1, 0x8B, 0x4F);
for(i=0; i<MAX_TASKS; i++)
gdt_set_gate(5+i, (unsigned long) (task_state_segments+i), sizeof(tss_t)-1, 0xE9, 0x4F);
/* Flush out the old GDT and install the new changes! */
gdt_flush();
}

View file

@ -21,18 +21,18 @@
#include <asm/idt.h>
/* Defines an IDT entry */
struct idt_entry {
typedef struct {
unsigned short base_lo;
unsigned short sel;
unsigned char always0;
unsigned char flags;
unsigned short base_hi;
} __attribute__ ((packed));
} __attribute__ ((packed)) idt_entry_t;
struct idt_ptr {
typedef struct {
unsigned short limit;
unsigned int base;
} __attribute__ ((packed));
} __attribute__ ((packed)) idt_ptr_t;
/*
* Declare an IDT of 256 entries. Although we will only use the
@ -42,8 +42,8 @@ struct idt_ptr {
* for which the 'presence' bit is cleared (0) will generate an
* "Unhandled Interrupt" exception
*/
struct idt_entry idt[256];
struct idt_ptr idtp;
static idt_entry_t idt[256];
idt_ptr_t idtp;
/* This exists in 'start.asm', and is used to load our IDT */
extern void idt_load(void);
@ -67,18 +67,21 @@ void idt_set_gate(unsigned char num, unsigned long base, unsigned short sel,
idt[num].flags = flags;
}
extern void isrsyscall(void);
/* Installs the IDT */
void idt_install(void)
{
/* Sets the special IDT pointer up, just like in 'gdt.c' */
idtp.limit = (sizeof(struct idt_entry) * 256) - 1;
idtp.limit = (sizeof(idt_entry_t) * 256) - 1;
idtp.base = (unsigned int)&idt;
/* Clear out the entire IDT, initializing it to zeros */
memset(&idt, 0, sizeof(struct idt_entry) * 256);
memset(&idt, 0, sizeof(idt_entry_t) * 256);
/* Add any new ISRs to the IDT here using idt_set_gate */
idt_set_gate(INT_SYSCALL, (unsigned int)isrsyscall, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING3|IDT_FLAG_32BIT|IDT_FLAG_TRAPGATE);
/* Points the processor's internal register to the new IDT */
idt_load();

View file

@ -18,6 +18,7 @@
*/
#include <metalsvm/string.h>
#include <metalsvm/tasks.h>
#include <asm/irq.h>
#include <asm/idt.h>
#include <asm/isrs.h>
@ -26,22 +27,22 @@
* These are own ISRs that point to our special IRQ handler
* instead of the regular 'fault_handler' function
*/
extern void irq0();
extern void irq1();
extern void irq2();
extern void irq3();
extern void irq4();
extern void irq5();
extern void irq6();
extern void irq7();
extern void irq8();
extern void irq9();
extern void irq10();
extern void irq11();
extern void irq12();
extern void irq13();
extern void irq14();
extern void irq15();
extern void irq0(void);
extern void irq1(void);
extern void irq2(void);
extern void irq3(void);
extern void irq4(void);
extern void irq5(void);
extern void irq6(void);
extern void irq7(void);
extern void irq8(void);
extern void irq9(void);
extern void irq10(void);
extern void irq11(void);
extern void irq12(void);
extern void irq13(void);
extern void irq14(void);
extern void irq15(void);
/*
* This array is actually an array of function pointers. We use
@ -130,18 +131,19 @@ void irq_init(void)
* an EOI command to the first controller. If you don't send
* an EOI, you won't raise any more IRQs
*/
void irq_handler(struct regs *r)
void irq_handler(struct state *s)
{
/* This is a blank function pointer */
void (*handler) (struct regs * r);
void (*handler) (struct state * s);
/*
* Find out if we have a custom handler to run for this
* IRQ, and then finally, run it
*/
handler = irq_routines[r->int_no - 32];
handler = irq_routines[s->int_no - 32];
if (handler) {
handler(r);
handler(s);
}
/*
@ -149,7 +151,7 @@ void irq_handler(struct regs *r)
* (meaning IRQ8 - 15), then we need to send an EOI to
* the slave controller
*/
if (r->int_no >= 40) {
if (s->int_no >= 40) {
outportb(0xA0, 0x20);
}

View file

@ -19,6 +19,8 @@
#include <metalsvm/stdio.h>
#include <metalsvm/string.h>
#include <metalsvm/tasks.h>
#include <asm/irqflags.h>
#include <asm/isrs.h>
#include <asm/idt.h>
@ -73,38 +75,70 @@ extern void isr31(void);
*/
void isrs_install(void)
{
idt_set_gate(0, (unsigned)isr0, 0x08, 0x8E);
idt_set_gate(1, (unsigned)isr1, 0x08, 0x8E);
idt_set_gate(2, (unsigned)isr2, 0x08, 0x8E);
idt_set_gate(3, (unsigned)isr3, 0x08, 0x8E);
idt_set_gate(4, (unsigned)isr4, 0x08, 0x8E);
idt_set_gate(5, (unsigned)isr5, 0x08, 0x8E);
idt_set_gate(6, (unsigned)isr6, 0x08, 0x8E);
idt_set_gate(7, (unsigned)isr7, 0x08, 0x8E);
idt_set_gate(8, (unsigned)isr8, 0x08, 0x8E);
idt_set_gate(9, (unsigned)isr9, 0x08, 0x8E);
idt_set_gate(10, (unsigned)isr10, 0x08, 0x8E);
idt_set_gate(11, (unsigned)isr11, 0x08, 0x8E);
idt_set_gate(12, (unsigned)isr12, 0x08, 0x8E);
idt_set_gate(13, (unsigned)isr13, 0x08, 0x8E);
idt_set_gate(14, (unsigned)isr14, 0x08, 0x8E);
idt_set_gate(15, (unsigned)isr15, 0x08, 0x8E);
idt_set_gate(16, (unsigned)isr16, 0x08, 0x8E);
idt_set_gate(17, (unsigned)isr17, 0x08, 0x8E);
idt_set_gate(18, (unsigned)isr18, 0x08, 0x8E);
idt_set_gate(19, (unsigned)isr19, 0x08, 0x8E);
idt_set_gate(20, (unsigned)isr20, 0x08, 0x8E);
idt_set_gate(21, (unsigned)isr21, 0x08, 0x8E);
idt_set_gate(22, (unsigned)isr22, 0x08, 0x8E);
idt_set_gate(23, (unsigned)isr23, 0x08, 0x8E);
idt_set_gate(24, (unsigned)isr24, 0x08, 0x8E);
idt_set_gate(25, (unsigned)isr25, 0x08, 0x8E);
idt_set_gate(26, (unsigned)isr26, 0x08, 0x8E);
idt_set_gate(27, (unsigned)isr27, 0x08, 0x8E);
idt_set_gate(28, (unsigned)isr28, 0x08, 0x8E);
idt_set_gate(29, (unsigned)isr29, 0x08, 0x8E);
idt_set_gate(30, (unsigned)isr30, 0x08, 0x8E);
idt_set_gate(31, (unsigned)isr31, 0x08, 0x8E);
idt_set_gate(0, (unsigned)isr0, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(1, (unsigned)isr1, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(2, (unsigned)isr2, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(3, (unsigned)isr3, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(4, (unsigned)isr4, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(5, (unsigned)isr5, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(6, (unsigned)isr6, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(7, (unsigned)isr7, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(8, (unsigned)isr8, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(9, (unsigned)isr9, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(10, (unsigned)isr10, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(11, (unsigned)isr11, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(12, (unsigned)isr12, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(13, (unsigned)isr13, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(14, (unsigned)isr14, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(15, (unsigned)isr15, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(16, (unsigned)isr16, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(17, (unsigned)isr17, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(18, (unsigned)isr18, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(19, (unsigned)isr19, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(20, (unsigned)isr20, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(21, (unsigned)isr21, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(22, (unsigned)isr22, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(23, (unsigned)isr23, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(24, (unsigned)isr24, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(25, (unsigned)isr25, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(26, (unsigned)isr26, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(27, (unsigned)isr27, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(28, (unsigned)isr28, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(29, (unsigned)isr29, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(30, (unsigned)isr30, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
idt_set_gate(31, (unsigned)isr31, 0x08,
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
}
/*
@ -126,16 +160,21 @@ static const char *exception_messages[] = {
/*
* All of our Exception handling Interrupt Service Routines will
* point to this function. This will tell us what exception has
* happened! Right now, we simply halt the system by hitting an
* endless loop. All ISRs disable interrupts while they are being
* happened! Right now, we simply abort the current task.
* All ISRs disable interrupts while they are being
* serviced as a 'locking' mechanism to prevent an IRQ from
* happening and messing up kernel data structures
*/
void fault_handler(struct regs *r)
void fault_handler(struct state *s)
{
if (r->int_no < 32) {
kputs(exception_messages[r->int_no]);
kputs(" Exception. System Halted!\n");
for (;;) ;
if (s->int_no < 32) {
kputs(exception_messages[s->int_no]);
kputs(" Exception.\n");
/* Now, we signalize that we have handled the interrupt */
outportb(0x20, 0x20);
irq_enable();
abort();
}
}

View file

@ -64,7 +64,7 @@ static const unsigned char kbdus[128] = { 0, 27, '1', '2', '3', '4', '5', '6', '
};
/* Handles the keyboard interrupt */
static void keyboard_handler(struct regs *r)
static void keyboard_handler(struct state *r)
{
unsigned char scancode;

View file

@ -28,8 +28,7 @@ extern current_task
; unsigned char* top;
; unsigned int ip;
; tid_t id;
; unsigned char status;
; unsigned char idle;
; ...
; } task_t;
; After an interrupt, the original return address has to push on the stack
@ -46,6 +45,7 @@ schedule_entry:
push es
push fs
push gs
mov ax, 0x10
mov ds, ax
mov es, ax
@ -67,6 +67,45 @@ schedule_entry:
; Scheduler, which switchs to the new tasks
global schedule
schedule:
ret
cli
; return address is already on the stack (via call schedule)
pop eax
pushfd
push 0x08
push eax
push byte 0
push byte 32
pusha
push ds
push es
push fs
push gs
mov ax, 0x10
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
mov eax, DWORD [current_task]
mov [eax], esp
;call get_new_task
mov DWORD [current_task], eax
mov esp, [eax]
pop gs
pop fs
pop es
pop ds
popa
add esp, 8
xor eax, eax ; return value is 0
iret
cli
pushfd
pusha
@ -74,6 +113,7 @@ schedule:
push es
push fs
push gs
mov ax, 0x10
mov ds, ax
mov es, ax
@ -88,7 +128,7 @@ L1:
mov eax, DWORD [current_task]
mov [eax], esp
call get_new_task
;call get_new_task
; set current task and restore esp
;mov DWORD [current_id], eax

40
arch/x86/kernel/syscall.c Normal file
View file

@ -0,0 +1,40 @@
/*
* Copyright 2010 Stefan Lankes, Chair for Operating Systems,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of MetalSVM.
*/
#include <metalsvm/stdio.h>
#include <metalsvm/syscall.h>
#include <metalsvm/tasks.h>
void syscall_handler(struct state* s)
{
uint32_t id = current_task->id;
switch(s->eax)
{
case __NR_exit:
sys_exit(s->ebx);
break;
case __NR_write:
kputs((char*) s->ebx);
break;
default:
kputs("invalid system call\n");
break;
};
}

View file

@ -33,17 +33,13 @@
static volatile uint64_t timer_ticks = 0;
static unsigned int curr_timer_freq = 18;
void schedule_entry(void);
/*
* Handles the timer. In this case, it's very simple: We
* increment the 'timer_ticks' variable every time the
* timer fires.
*/
static void timer_handler(struct regs *r)
static void timer_handler(struct state *s)
{
/*unsigned int tmp;*/
/* Increment our 'tick counter' */
timer_ticks++;
@ -51,14 +47,8 @@ static void timer_handler(struct regs *r)
* Every TIMER_FREQ clocks (approximately 1 second), we will
* display a message on the screen
*/
/*tmp = (unsigned int) (timer_ticks & 0xFFFFFFFF);
if (tmp % TIMER_FREQ == 0) {
if (timer_ticks % TIMER_FREQ == 0) {
vga_puts("One second has passed\n");
}*/
if (current_task && !(current_task->ip)) {
current_task->ip = r->eip;
r->eip = (unsigned int) schedule_entry;
}
}
@ -71,7 +61,7 @@ void timer_wait(unsigned int ticks)
uint64_t eticks = timer_ticks + ticks;
while (timer_ticks < eticks)
schedule();
reschedule();
}
/*

View file

@ -28,73 +28,91 @@
extern "C" {
#endif
typedef struct {
void* buffer[MAILBOX_SIZE];
int wpos, rpos;
sem_t mails;
sem_t boxes;
} mailbox_t;
#define MAILBOX(name, type) \
typedef struct mailbox_##name { \
type buffer[MAILBOX_SIZE]; \
int wpos, rpos; \
sem_t mails; \
sem_t boxes; \
spinlock_t rlock, wlock; \
} mailbox_##name##_t; \
\
inline static int mailbox_##name##_init(mailbox_##name##_t* m) { \
if (BUILTIN_EXPECT(!m, 0)) \
return -1; \
\
memset(m->buffer, 0x00, sizeof(type)*MAILBOX_SIZE); \
m->wpos = m->rpos = 0; \
sem_init(&m->mails, 0); \
sem_init(&m->boxes, MAILBOX_SIZE); \
spinlock_init(&m->rlock); \
spinlock_init(&m->wlock); \
\
return 0; \
}\
\
inline static int mailbox_##name##_destroy(mailbox_##name##_t* m) { \
if (BUILTIN_EXPECT(!m, 0)) \
return -1; \
\
sem_destroy(&m->mails); \
sem_destroy(&m->boxes); \
spinlock_destroy(&m->rlock); \
spinlock_destroy(&m->wlock); \
\
return 0; \
} \
\
inline static int mailbox_##name##_post(mailbox_##name##_t* m, type mail) { \
if (BUILTIN_EXPECT(!m, 0)) \
return -1; \
\
sem_wait(&m->boxes); \
spinlock_lock(&m->wlock); \
m->buffer[m->wpos] = mail; \
m->wpos = (m->wpos+1) % MAILBOX_SIZE; \
spinlock_unlock(&m->wlock); \
sem_post(&m->mails); \
\
return 0; \
} \
\
inline static int mailbox_##name##_fetch(mailbox_##name##_t* m, type* mail) { \
if (BUILTIN_EXPECT(!m || !mail, 0)) \
return -1; \
\
sem_wait(&m->mails); \
spinlock_lock(&m->rlock); \
*mail = m->buffer[m->rpos]; \
m->rpos = (m->rpos+1) % MAILBOX_SIZE; \
spinlock_unlock(&m->rlock); \
sem_post(&m->boxes); \
\
return 0; \
} \
\
inline static int mailbox_##name##_tryfetch(mailbox_##name##_t* m, type* mail) { \
if (BUILTIN_EXPECT(!m || !mail, 0)) \
return -1; \
\
if (sem_trywait(&m->mails) != 0) \
return -1; \
spinlock_lock(&m->rlock); \
*mail = m->buffer[m->rpos]; \
m->rpos = (m->rpos+1) % MAILBOX_SIZE; \
spinlock_unlock(&m->rlock); \
sem_post(&m->boxes); \
\
return 0; \
}\
inline static int mailbox_init(mailbox_t* m) {
if (BUILTIN_EXPECT(!m, 0))
return -1;
memset(m->buffer, 0x00, sizeof(void*)*MAILBOX_SIZE);
m->wpos = m->rpos = 0;
sem_init(&m->mails, 0);
sem_init(&m->boxes, MAILBOX_SIZE);
return 0;
}
inline static int mailbox_destroy(mailbox_t* m) {
if (BUILTIN_EXPECT(!m, 0))
return -1;
sem_destroy(&m->mails);
sem_destroy(&m->boxes);
return 0;
}
inline static int mailbox_post(mailbox_t* m, void* mail) {
if (BUILTIN_EXPECT(!m, 0))
return -1;
sem_wait(&m->boxes);
m->buffer[m->wpos] = mail;
m->wpos = (m->wpos+1) % MAILBOX_SIZE;
sem_post(&m->mails);
return 0;
}
inline static int mailbox_fetch(mailbox_t* m, void** mail) {
if (BUILTIN_EXPECT(!m || !mail, 0))
return -1;
sem_wait(&m->mails);
*mail = m->buffer[m->rpos];
m->buffer[m->rpos] = NULL;
m->rpos = (m->rpos+1) % MAILBOX_SIZE;
sem_post(&m->boxes);
return 0;
}
inline static int mailbox_tryfetch(mailbox_t* m, void** mail) {
if (BUILTIN_EXPECT(!m || !mail, 0))
return -1;
if (sem_trywait(&m->mails) != 0)
return -1;
*mail = m->buffer[m->rpos];
m->buffer[m->rpos] = NULL;
m->rpos = (m->rpos+1) % MAILBOX_SIZE;
sem_post(&m->boxes);
return 0;
}
MAILBOX(int32, int32_t)
MAILBOX(int16, int16_t)
MAILBOX(int8, int8_t)
MAILBOX(uint32, uint32_t)
MAILBOX(uint16, uint16_t)
MAILBOX(uint8, uint8_t)
MAILBOX(ptr, void*)
#ifdef __cplusplus
}

View file

@ -32,10 +32,6 @@ extern atomic_size_t total_memory;
extern atomic_size_t total_allocated_memory;
extern atomic_size_t total_available_memory;
typedef struct {
atomic_size_t usage;
} mm_t;
int mmu_init(void);
#ifdef __cplusplus

View file

@ -22,7 +22,7 @@
#include <metalsvm/string.h>
#include <metalsvm/tasks.h>
#include <metalsvm/spinlocks.h>
#include <metalsvm/spinlock.h>
#ifdef __cplusplus
extern "C" {
@ -73,7 +73,7 @@ next_try:
s->pos = (s->pos + 1) % MAX_TASKS;
current_task->status = TASK_BLOCKED;
spinlock_unlock(&s->lock);
schedule();
reschedule();
goto next_try;
}

View file

@ -17,8 +17,8 @@
* This file is part of MetalSVM.
*/
#ifndef __SPINLOCKS_H__
#define __SPINLOCKS_H__
#ifndef __SPINLOCK_H__
#define __SPINLOCK_H__
#include <metalsvm/config.h>
#include <metalsvm/tasks.h>
@ -57,19 +57,11 @@ inline static int spinlock_lock(spinlock_t* s) {
if (BUILTIN_EXPECT(!s, 0))
return -1;
while (atomic_uint32_test_and_set(&(s->lock)))
schedule();
while(atomic_uint32_test_and_set(&s->lock, 1))
reschedule();
return 0;
}
inline static int spinlock_trylock(spinlock_t* s) {
if (BUILTIN_EXPECT(!s, 0))
return -1;
return !atomic_uint32_test_and_set(&(s->lock));
}
inline static int spinlock_unlock(spinlock_t* s) {
if (BUILTIN_EXPECT(!s, 0))
return -1;

View file

@ -0,0 +1,37 @@
/*
* Copyright 2010 Stefan Lankes, Chair for Operating Systems,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of MetalSVM.
*/
#ifndef __SYSCALL_H__
#define __SYSCALL_H__
#include <metalsvm/config.h>
#include <asm/syscall.h>
#ifdef __cplusplus
extern "C" {
#endif
#define __NR_exit 0
#define __NR_write 1
#ifdef __cplusplus
}
#endif
#endif

View file

@ -29,15 +29,41 @@
extern "C" {
#endif
/* task, which is currently running */
extern task_t* current_task;
int multitasking_init(void);
int create_kernel_task(tid_t*, entry_point_t, void*, size_t);
int join_kernel_task(tid_t, void**);
void schedule(void);
task_t* get_new_task(void);
/* create a kernel task. */
int create_kernel_task(tid_t*, entry_point_t, void*);
/* create a user level task. if sz is zero, the task with the default stack size will be created */
int create_user_task(tid_t* id, entry_point_t ep, void* arg, size_t sz);
/* until the task id is runnint, the current task is block */
int join_task(tid_t id, int* result);
/* timer interrupt use this function for task switching */
void scheduler(void);
/*
* a blocked task will be waked up
* and its status changed to TASK_READY
*/
int wakeup_task(tid_t);
/* abort the current task */
void NORETURN abort(void);
/* by leaving kernel level task, this function will be called */
void NORETURN leave_kernel_task(void);
/* by leaving user level task, this function will be called */
void NORETURN leave_user_task(void);
/* system call to terminate a user level task */
void NORETURN sys_exit(int);
#ifdef __cplusplus
}
#endif

View file

@ -22,7 +22,7 @@
#include <metalsvm/config.h>
#include <metalsvm/stddef.h>
#include <metalsvm/mmu.h>
#include <asm/atomic.h>
#ifdef __cplusplus
extern "C" {
@ -35,20 +35,20 @@ extern "C" {
#define TASK_FINISHED 4
#define TASK_IDLE 5
typedef void* (STDCALL *entry_point_t)(void*);
typedef int (STDCALL *entry_point_t)(void*);
typedef unsigned int tid_t;
struct mailbox_int32;
typedef struct {
unsigned char* top;
unsigned int ip;
tid_t id;
mm_t mm;
unsigned char* stack;
size_t stack_size;
unsigned char blocked_tasks[MAX_TASKS];
void* return_value;
unsigned char status;
} task_t;
tid_t id; /* task id = position in the task table */
unsigned char* ustack; /* stack of an user level task */
size_t stack_size; /* only user level tasks
* are able to specify its stack size
*/
atomic_size_t mem_usage;
struct mailbox_int32* mbox[MAX_TASKS];
uint32_t status;
} __attribute__((packed)) task_t;
#ifdef __cplusplus
}

View file

@ -1,20 +1,16 @@
C_source = main.c tasks.c processor.c
OBJS += $(patsubst %.c, %.o, $(filter %.c, $(C_source)))
ALLOBJS += $(patsubst %.c, %.o, $(filter %.c, $(C_source)))
# other implicit rules
%.o : %.c
$(CC) -c $(CFLAGS) -o $@ $<
default:
$(MAKE) all
all:
$(MAKE) $(LIBNAME)
default: $(OBJS)
all: $(OBJS)
$(LIBNAME): $(OBJS)
$(AR) $(ARFLAGS) $(LIBNAME) $(OBJS)
clean:
$(RM) *.o *~ $(NAME)

View file

@ -25,6 +25,7 @@
#include <metalsvm/processor.h>
#include <metalsvm/semaphore.h>
#include <metalsvm/mailbox.h>
#include <metalsvm/syscall.h>
#include <asm/irq.h>
#include <asm/kb.h>
#ifdef CONFIG_PCI
@ -42,16 +43,15 @@
#endif
static sem_t consuming, producing;
static mailbox_t mbox;
static mailbox_int32_t mbox;
static int val = 0;
extern const void kernel_start;
extern const void kernel_end;
void* STDCALL consumer(void* arg)
int STDCALL consumer(void* arg)
{
int i;
int* m = NULL;
int i, m = 0;
for(i=0; i<5; i++) {
sem_wait(&consuming);
@ -61,20 +61,17 @@ void* STDCALL consumer(void* arg)
}
for(i=0; i<5; i++) {
mailbox_fetch(&mbox, (void**) &m);
kprintf("Got mail %d\n", *m);
mailbox_int32_fetch(&mbox, &m);
kprintf("Got mail %d\n", m);
}
return NULL;
return 0;
}
void* STDCALL producer(void* arg)
int STDCALL producer(void* arg)
{
int i;
int mail[5] = {1, 2, 3, 4, 5};
tid_t id;
create_kernel_task(&id, consumer, NULL, 0);
for(i=0; i<5; i++) {
sem_wait(&producing);
@ -85,45 +82,62 @@ void* STDCALL producer(void* arg)
for(i=0; i<5; i++) {
//kprintf("Send mail %d\n", mail[i]);
mailbox_post(&mbox, mail+i);
mailbox_int32_post(&mbox, mail[i]);
}
join_kernel_task(id, NULL);
return NULL;
return 0;
}
void* STDCALL foo(void* arg)
int STDCALL foo(void* arg)
{
int i;
if (!arg)
return NULL;
return 0;
for(i=0; i<5; i++) {
kputs((char*) arg);
sleep(1);
}
return (void*) 42;
return 42;
}
void* STDCALL join_test(void* arg)
int STDCALL join_test(void* arg)
{
int ret;
tid_t id;
void* result = NULL;
int ret, result = -1234;
ret = create_kernel_task(&id, foo, "Hello from foo2\n", 0);
ret = create_kernel_task(&id, foo, "Hello from foo2\n");
kprintf("Wait for task %u: ret = %d\n", id, ret);
ret = join_kernel_task(id, &result);
kprintf("Task %u finished: ret = %d, result = %u\n", id, ret, (int)result);
ret = join_task(id, &result);
kprintf("Task %u finished: ret = %d, result = %d\n", id, ret, result);
return NULL;
return 0;
}
int STDCALL userfoo(void* arg)
{
int i;
if (!arg)
return 0;
for (i = 0; i < 5; i++) {
SYSCALL1(__NR_write, arg);
}
// demo of a general protection fault
//kprintf("test user\n");
/* task exit */
SYSCALL1(__NR_exit, 0);
return 0;
}
#ifdef CONFIG_LWIP
void* STDCALL lwip_task(void* arg)
int STDCALL lwip_task(void* arg)
{
struct netif netif;
struct ip_addr ipaddr;
@ -171,7 +185,7 @@ void* STDCALL lwip_task(void* arg)
int main(void)
{
tid_t id1, id2, id3;
tid_t id1, id2, id3, id4, id5;
#ifdef CONFIG_LWIP
tid_t lwip_id;
#endif
@ -204,19 +218,21 @@ int main(void)
sem_init(&producing, 1);
sem_init(&consuming, 0);
mailbox_init(&mbox);
mailbox_int32_init(&mbox);
sleep(5);
#ifdef CONFIG_LWIP
//create_kernel_task(&lwip_id, lwip_task, NULL, 0);
//create_kernel_task(&lwip_id, lwip_task, NULL);
#endif
create_kernel_task(&id1, foo, "Hello from foo1\n", 8192);
create_kernel_task(&id2, join_test, NULL, 0);
create_kernel_task(&id3, producer, NULL, 0);
create_kernel_task(&id1, foo, "Hello from foo1\n");
create_kernel_task(&id2, join_test, NULL);
create_kernel_task(&id3, producer, NULL);
create_kernel_task(&id4, consumer, NULL);
create_user_task(&id5, userfoo, "Hello from user process foo\n", 0);
current_task->status = TASK_IDLE;
schedule();
reschedule();
while(1) {
NOP8;

View file

@ -23,24 +23,25 @@
#include <metalsvm/mmu.h>
#include <metalsvm/tasks.h>
#include <metalsvm/processor.h>
#include <metalsvm/spinlocks.h>
#include <metalsvm/spinlock.h>
#include <metalsvm/mailbox.h>
#include <metalsvm/syscall.h>
task_t* current_task = NULL;
static task_t task_table[MAX_TASKS];
static spinlock_t table_lock = SPINLOCK_INIT;
int multitasking_init(void) {
memset(task_table, 0, sizeof(task_t)*MAX_TASKS);
task_table[0].id = 0;
task_table[0].mm.usage = 0;
task_table[0].stack = NULL;
task_table[0].stack_size = 8192;
memset(task_table, 0x00, sizeof(task_t)*MAX_TASKS);
task_table[0].status = TASK_RUNNING;
current_task = task_table;
current_task = task_table+0;
register_task(current_task);
return 0;
}
static void wakeup_blocked_tasks(void* result)
static void wakeup_blocked_tasks(int result)
{
unsigned int i;
@ -48,49 +49,60 @@ static void wakeup_blocked_tasks(void* result)
/* wake up blocked tasks */
for(i=0; i<MAX_TASKS; i++) {
if (current_task->blocked_tasks[i] && (task_table[i].status == TASK_BLOCKED)) {
task_table[i].return_value = result;
task_table[i].status = TASK_READY;
if (current_task->mbox[i]) {
mailbox_int32_post(current_task->mbox[i], result);
current_task->mbox[i] = NULL;
}
current_task->blocked_tasks[i] = 0;
}
spinlock_unlock_irqsave(&table_lock);
}
void NORETURN leave_task(void) {
void* result = NULL;
static void NORETURN do_exit(int arg) {
kprintf("Terminate task: %u, return value %d\n", current_task->id, arg);
get_return_value(result);
kprintf("Terminate task: %u, return value = %p\n", current_task->id, result);
wakeup_blocked_tasks(result);
wakeup_blocked_tasks(arg);
if (current_task->ustack)
kfree(current_task->ustack, current_task->stack_size);
if (current_task->mem_usage)
kprintf("Memory leak! Task %d did not release %d bytes\n", current_task->id, current_task->mem_usage);
current_task->status = TASK_FINISHED;
schedule();
reschedule();
kputs("Kernel panic: scheduler found no valid task\n");
while(1) {
NOP8;
}
}
void NORETURN leave_kernel_task(void) {
int result = 0;
get_return_value(result);
do_exit(result);
}
void NORETURN leave_user_task(void) {
int result = 0;
get_return_value(result);
SYSCALL1(__NR_exit, result);
kprintf("Kernel panic! Task %d comes back from syscall \"exit\"\n", current_task->id);
while(1) ;
}
void NORETURN sys_exit(int arg)
{
do_exit(arg);
}
void NORETURN abort(void) {
void* result = (void*) -1;
kprintf("Abort task: %u\n", current_task->id);
wakeup_blocked_tasks(result);
current_task->status = TASK_FINISHED;
schedule();
kputs("Kernel panic: scheduler found no valid task\n");
while(1) {
NOP8;
}
do_exit(-1);
}
int create_kernel_task(tid_t* id, entry_point_t ep, void* arg, size_t stack_size)
static int create_task(tid_t* id, entry_point_t ep, void* arg, size_t stack_size, int user)
{
int ret = -1;
unsigned int i;
@ -98,34 +110,32 @@ int create_kernel_task(tid_t* id, entry_point_t ep, void* arg, size_t stack_size
if (BUILTIN_EXPECT(!ep, 0))
return -1;
if (!stack_size)
if (user && !stack_size)
stack_size = DEFAULT_STACK_SIZE;
spinlock_lock_irqsave(&table_lock);
for(i=0; i<MAX_TASKS; i++) {
if (task_table[i].status == TASK_INVALID) {
if (task_table[i].stack)
kfree(task_table[i].stack, task_table[i].stack_size);
if (task_table[i].mm.usage) {
kprintf("Task %d has a memory leax (%d byte)\n", task_table[i].id, task_table[i].mm.usage);
task_table[i].mm.usage = 0;
}
task_table[i].stack = create_stack(task_table+i, stack_size);
if (!task_table[i].stack)
break;
task_table[i].stack_size = stack_size;
task_table[i].top = task_table[i].stack + stack_size - sizeof(size_t);
task_table[i].ip = 0;
task_table[i].mem_usage = 0;
task_table[i].id = i;
memset(task_table[i].blocked_tasks, 0x00, sizeof(unsigned char)*MAX_TASKS);
task_table[i].return_value = NULL;
if (user) {
task_table[i].ustack = create_stack(task_table+i, stack_size);
if (!task_table[i].ustack)
break;
task_table[i].stack_size = stack_size;
} else {
task_table[i].ustack = NULL;
task_table[i].stack_size = 0;
}
memset(task_table[i].mbox, 0x00, sizeof(mailbox_int32_t*)*MAX_TASKS);
task_table[i].status = TASK_READY;
if (id)
*id = i;
ret = create_default_frame(task_table+i, ep, arg);
ret = create_default_frame(task_table+i, ep, arg, user);
break;
}
}
@ -135,8 +145,23 @@ int create_kernel_task(tid_t* id, entry_point_t ep, void* arg, size_t stack_size
return ret;
}
int join_kernel_task(tid_t id, void** result)
int create_kernel_task(tid_t* id, entry_point_t ep, void* arg)
{
return create_task(id, ep, arg, 0, 0);
}
int create_user_task(tid_t* id, entry_point_t ep, void* arg, size_t stack_size)
{
return create_task(id, ep, arg, stack_size, 1);
}
int join_task(tid_t id, int* result)
{
int32_t tmp;
mailbox_int32_t mbox;
mailbox_int32_init(&mbox);
spinlock_lock_irqsave(&table_lock);
/*
@ -162,46 +187,34 @@ int join_kernel_task(tid_t id, void** result)
if (BUILTIN_EXPECT(task_table[id].status == TASK_FINISHED, 0))
goto join_out;
task_table[id].blocked_tasks[current_task->id] = 1;
current_task->status = TASK_BLOCKED;
task_table[id].mbox[current_task->id] = &mbox;
spinlock_unlock_irqsave(&table_lock);
schedule();
mailbox_int32_fetch(&mbox, &tmp);
if (result) {
*result = current_task->return_value;
current_task->return_value = NULL;
}
if (result)
*result = tmp;
mailbox_int32_destroy(&mbox);
return 0;
join_out:
spinlock_unlock_irqsave(&table_lock);
mailbox_int32_destroy(&mbox);
return -1;
}
int wakeup_task(tid_t id)
{
int ret = -1;
spinlock_lock_irqsave(&table_lock);
if (task_table[id].status != TASK_BLOCKED) {
kprintf("Task %u is already unblocked\n", id);
goto wakeup_out;
}
task_table[id].status = TASK_READY;
ret = 0;
wakeup_out:
spinlock_unlock_irqsave(&table_lock);
return ret;
return 0;
}
task_t* get_new_task(void)
void scheduler(void)
{
task_t* ret;
unsigned int i, new_id;
spinlock_lock(&table_lock);
@ -218,23 +231,20 @@ task_t* get_new_task(void)
current_task->status = TASK_READY;
task_table[new_id].status = TASK_RUNNING;
ret = task_table+new_id;
current_task = task_table+new_id;
goto get_task_out;
}
}
if (current_task->status == TASK_RUNNING) {
ret = current_task;
if (current_task->status == TASK_RUNNING)
goto get_task_out;
}
/*
* we switch to the idle task (id=0), if the current task terminates
* and no other is ready
*/
ret = task_table+0;
current_task = task_table+0;
get_task_out:
spinlock_unlock(&table_lock);
return ret;
}

View file

@ -6,15 +6,10 @@ OBJS += $(patsubst %.c, %.o, $(filter %.c, $(C_source)))
%.o : %.c
$(CC) -c $(CFLAGS) -o $@ $<
default:
$(MAKE) all
all:
$(MAKE) $(LIBNAME)
$(LIBNAME): $(OBJS)
$(AR) $(ARFLAGS) $(LIBNAME) $(OBJS)
default: $(OBJS)
all: $(OBJS)
clean:
$(RM) *.o *~ $(NAME)

View file

@ -20,14 +20,13 @@
#include <metalsvm/stdio.h>
#include <metalsvm/string.h>
#include <metalsvm/stdarg.h>
#include <metalsvm/spinlocks.h>
#include <asm/atomic.h>
#ifdef CONFIG_VGA
#include <asm/vga.h>
#endif
static unsigned int kmsg_counter = 0;
static atomic_uint32_t kmsg_counter = 0;
static unsigned char kmessages[KMSG_SIZE];
static spinlock_t kio_lock = SPINLOCK_INIT;
int koutput_init(void)
{
@ -41,29 +40,32 @@ int koutput_init(void)
int kputchar(int c)
{
int pos;
int ret = 1;
spinlock_lock(&kio_lock);
kmessages[kmsg_counter++ % KMSG_SIZE] = c;
pos = atomic_uint32_inc(&kmsg_counter);
kmessages[pos % KMSG_SIZE] = c;
#ifdef CONFIG_VGA
ret = vga_putchar(c);
#endif
spinlock_unlock(&kio_lock);
return ret;
}
int kputs(const char *str)
{
int pos;
int i;
spinlock_lock(&kio_lock);
for(i=0; str[i] != '\0'; i++)
kmessages[kmsg_counter++ % KMSG_SIZE] = str[i];
for(i=0; str[i] != '\0'; i++) {
pos = atomic_uint32_inc(&kmsg_counter);
kmessages[pos % KMSG_SIZE] = str[i];
}
#ifdef CONFIG_VGA
i = vga_puts(str);
#endif
spinlock_unlock(&kio_lock);
return i;
}

View file

@ -13,7 +13,7 @@
#define SYS_SEM_NULL NULL
typedef sem_t* sys_sem_t;
typedef mailbox_t* sys_mbox_t;
typedef mailbox_ptr_t* sys_mbox_t;
typedef tid_t* sys_thread_t;
#endif /* __ARCH_SYS_ARCH_H__ */

View file

@ -6,15 +6,10 @@ OBJS += $(patsubst %.c, %.o, $(filter %.c, $(C_source)))
%.o : %.c
$(CC) -c $(CFLAGS) -o $@ $<
default:
$(MAKE) all
all:
$(MAKE) $(LIBNAME)
default: $(OBJS)
all: $(OBJS)
$(LIBNAME): $(OBJS)
$(AR) $(ARFLAGS) $(LIBNAME) $(OBJS)
clean:
$(RM) *.o *~ $(NAME)

View file

@ -20,7 +20,7 @@
#include <metalsvm/stdio.h>
#include <metalsvm/string.h>
#include <metalsvm/mmu.h>
#include <metalsvm/spinlocks.h>
#include <metalsvm/spinlock.h>
#include <metalsvm/time.h>
#ifdef CONFIG_MULTIBOOT
#include <asm/multiboot.h>
@ -174,7 +174,7 @@ next_try:
atomic_size_add(&total_allocated_memory, npages*PAGE_SIZE);
atomic_size_sub(&total_available_memory, npages*PAGE_SIZE);
if (task)
atomic_size_add(&(task->mm.usage), npages*PAGE_SIZE);
atomic_size_add(&(task->mem_usage), npages*PAGE_SIZE);
oom:
spinlock_unlock(&bitmap_lock);
@ -214,7 +214,7 @@ static void task_free(task_t* task, void* addr, size_t sz)
atomic_size_sub(&total_allocated_memory, npages*PAGE_SIZE);
atomic_size_add(&total_available_memory, npages*PAGE_SIZE);
if (task)
atomic_size_sub(&(task->mm.usage), npages*PAGE_SIZE);
atomic_size_sub(&(task->mem_usage), npages*PAGE_SIZE);
spinlock_unlock(&bitmap_lock);
}