merge && test for master
This commit is contained in:
parent
2535fbd83d
commit
4d5150558a
210 changed files with 2340 additions and 1120 deletions
|
@ -195,6 +195,7 @@ int has_apic(void);
|
|||
int apic_is_enabled(void);
|
||||
int ioapic_inton(uint8_t irq, uint8_t apicid);
|
||||
int ioapic_intoff(uint8_t irq, uint8_t apicid);
|
||||
int map_apic(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -88,7 +88,7 @@ inline static int32_t atomic_int32_test_and_set(atomic_int32_t* d, int32_t ret)
|
|||
inline static int32_t atomic_int32_add(atomic_int32_t *d, int32_t i)
|
||||
{
|
||||
int32_t res = i;
|
||||
asm volatile(LOCK "xaddl %0, %1" : "=r"(i) : "m"(d->counter), "0"(i));
|
||||
asm volatile(LOCK "xaddl %0, %1" : "=r"(i) : "m"(d->counter), "0"(i) : "memory");
|
||||
return res+i;
|
||||
}
|
||||
|
||||
|
@ -152,7 +152,7 @@ inline static int32_t atomic_int32_read(atomic_int32_t *d) {
|
|||
* @param v The value to set
|
||||
*/
|
||||
inline static void atomic_int32_set(atomic_int32_t *d, int32_t v) {
|
||||
d->counter = v;
|
||||
atomic_int32_test_and_set(d, v);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -30,12 +30,29 @@
|
|||
#ifndef IRCCE_H
|
||||
#define IRCCE_H
|
||||
|
||||
#include <asm/RCCE.h>
|
||||
#include "RCCE.h"
|
||||
|
||||
#define iRCCE_SUCCESS RCCE_SUCCESS
|
||||
#define iRCCE_PENDING -1
|
||||
#define iRCCE_RESERVED -2
|
||||
#define iRCCE_NOT_ENQUEUED -3
|
||||
#define iRCCE_PENDING -1
|
||||
#define iRCCE_RESERVED -2
|
||||
#define iRCCE_NOT_ENQUEUED -3
|
||||
|
||||
#define iRCCE_ANY_SOURCE -1
|
||||
|
||||
#define iRCCE_PRIOS 5
|
||||
#define iRCCE_MAILBOX_EMPTY -2
|
||||
#define iRCCE_LAST_MAILS_NOT_RECV -3
|
||||
#define iRCCE_MAILBOX_ALL -4
|
||||
#define iRCCE_MAILBOX_OPEN 0
|
||||
#define iRCCE_MAILBOX_CLOSED 1
|
||||
|
||||
// iRCCE-mailbox-system tags
|
||||
#define iRCCE_LAST_MAIL -1
|
||||
#define iRCCE_ANYLENGTH -2
|
||||
#define iRCCE_ANYLENGTH_PIGGYBACK -3
|
||||
|
||||
|
||||
typedef volatile char iRCCE_SHORT_FLAG;
|
||||
|
||||
typedef struct _iRCCE_SEND_REQUEST {
|
||||
char *privbuf; // source buffer in local private memory (send buffer)
|
||||
|
@ -94,15 +111,33 @@ typedef struct _iRCCE_WAIT_LIST {
|
|||
} iRCCE_WAIT_LIST;
|
||||
|
||||
|
||||
#define iRCCE_MAIL_HEADER_PAYLOAD 13
|
||||
typedef struct _iRCCE_MAIL_HEADER {
|
||||
int source; // UE that will send the header
|
||||
size_t size; // size of the message which will be send/received
|
||||
int tag; // tag indicating which kind of message we have
|
||||
struct _iRCCE_MAIL_HEADER* next;// pointer for queue - could be replaced by list-object
|
||||
char prio; // priority of the mail
|
||||
iRCCE_SHORT_FLAG sent; // flag indicating that header is new
|
||||
iRCCE_SHORT_FLAG closed; // flag indication that mailbox is closed
|
||||
char payload[iRCCE_MAIL_HEADER_PAYLOAD]; // payload for small messages
|
||||
} iRCCE_MAIL_HEADER;
|
||||
|
||||
typedef struct _iRCCE_MAIL_TRASH_BIN {
|
||||
iRCCE_MAIL_HEADER* first;
|
||||
iRCCE_MAIL_HEADER* last;
|
||||
} iRCCE_MAIL_TRASH_BIN;
|
||||
|
||||
///////////////////////////////////////////////////////////////
|
||||
//
|
||||
// THE iRCCE API:
|
||||
//
|
||||
// Initialize function:
|
||||
// Initialize/Finalize functions:
|
||||
int iRCCE_init(void);
|
||||
int iRCCE_finalize(void);
|
||||
//
|
||||
// Non-blocking send/recv functions:
|
||||
int iRCCE_isend(char *, size_t, int, iRCCE_SEND_REQUEST *);
|
||||
int iRCCE_isend(char *, ssize_t, int, iRCCE_SEND_REQUEST *);
|
||||
int iRCCE_isend_test(iRCCE_SEND_REQUEST *, int *);
|
||||
int iRCCE_isend_wait(iRCCE_SEND_REQUEST *);
|
||||
int iRCCE_isend_push(void);
|
||||
|
@ -133,6 +168,18 @@ int iRCCE_wait_any(iRCCE_WAIT_LIST*, iRCCE_SEND_REQUEST **, iRCCE_RECV_REQUEST
|
|||
int iRCCE_isend_cancel(iRCCE_SEND_REQUEST *, int *);
|
||||
int iRCCE_irecv_cancel(iRCCE_RECV_REQUEST *, int *);
|
||||
//
|
||||
// Blocking send/recv functions for mailbox system
|
||||
int iRCCE_mail_send(size_t, int, char, char*, int);
|
||||
int iRCCE_mail_recv(iRCCE_MAIL_HEADER**);
|
||||
//
|
||||
// functions to empty mailbox-queue and to check for last mails:
|
||||
int iRCCE_mail_release(iRCCE_MAIL_HEADER**);
|
||||
int iRCCE_last_mail_recv(void);
|
||||
int iRCCE_mailbox_wait(void);
|
||||
int iRCCE_mailbox_flush(void);
|
||||
int iRCCE_mailbox_close(int);
|
||||
void iRCCE_mailbox_print_header(iRCCE_MAIL_HEADER*);
|
||||
//
|
||||
///////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Just for for convenience:
|
||||
|
|
|
@ -29,6 +29,23 @@
|
|||
|
||||
extern iRCCE_SEND_REQUEST* iRCCE_isend_queue;
|
||||
extern iRCCE_RECV_REQUEST* iRCCE_irecv_queue[RCCE_MAXNP];
|
||||
extern iRCCE_RECV_REQUEST* iRCCE_irecv_any_source_queue;
|
||||
|
||||
// pointer to MPB-mailbox-space
|
||||
extern volatile iRCCE_MAIL_HEADER* iRCCE_mailbox_send[RCCE_MAXNP];
|
||||
extern volatile iRCCE_MAIL_HEADER* iRCCE_mailbox_recv[RCCE_MAXNP];
|
||||
|
||||
// queue for received headers
|
||||
extern iRCCE_MAIL_HEADER* iRCCE_mailbox_recv_queue[iRCCE_PRIOS];
|
||||
|
||||
// flags for last mail
|
||||
extern iRCCE_SHORT_FLAG iRCCE_last_mail[RCCE_MAXNP];
|
||||
|
||||
// field to store open/closed status of mailboxes
|
||||
extern iRCCE_SHORT_FLAG iRCCE_mailbox_status[RCCE_MAXNP];
|
||||
|
||||
// garbage collection for mailbox
|
||||
extern iRCCE_MAIL_TRASH_BIN iRCCE_mail_garbage;
|
||||
#ifdef _OPENMP
|
||||
#pragma omp threadprivate (iRCCE_isend_queue, iRCCE_irecv_queue)
|
||||
#endif
|
||||
|
|
|
@ -39,21 +39,17 @@ typedef struct {
|
|||
|
||||
extern bootinfo_t* bootinfo;
|
||||
|
||||
typedef struct {
|
||||
uint8_t type;
|
||||
uint8_t tag;
|
||||
uint32_t length;
|
||||
} icc_header_t;
|
||||
|
||||
#define ICC_TYPE_IP (1 << 0)
|
||||
#define ICC_TYPE_SVM (1 << 1)
|
||||
#define ICC_TYPE_PINGREQUEST (1 << 2)
|
||||
#define ICC_TYPE_PINGRESPONSE (1 << 3)
|
||||
#define ICC_TAG_IP 0
|
||||
#define ICC_TAG_SVM 1
|
||||
#define ICC_TAG_PINGREQUEST 2
|
||||
#define ICC_TAG_PINGRESPONSE 3
|
||||
|
||||
int icc_init(void);
|
||||
int icc_ping(int ue);
|
||||
void icc_check(void);
|
||||
int icc_halt(void);
|
||||
int icc_send_irq(int ue);
|
||||
void icc_mail_check(void);
|
||||
int icc_mail_ping(void);
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -82,7 +82,6 @@ typedef struct {
|
|||
unsigned short base_hi;
|
||||
} __attribute__ ((packed)) idt_entry_t;
|
||||
|
||||
|
||||
/** @brief Defines the idt pointer structure.
|
||||
*
|
||||
* This structure keeps information about
|
||||
|
@ -95,8 +94,6 @@ typedef struct {
|
|||
unsigned int base;
|
||||
} __attribute__ ((packed)) idt_ptr_t;
|
||||
|
||||
|
||||
|
||||
/** @brief Installs IDT
|
||||
*
|
||||
* The installation involves the following steps:
|
||||
|
@ -113,7 +110,7 @@ void idt_install(void);
|
|||
* @param sel Segment the IDT will use
|
||||
* @param flags Flags this entry will have
|
||||
*/
|
||||
void idt_set_gate(unsigned char num, unsigned long base, unsigned short sel,
|
||||
void idt_set_gate(unsigned char num, size_t base, unsigned short sel,
|
||||
unsigned char flags);
|
||||
|
||||
/** @brief Configures and returns a IDT entry with chosen attributes
|
||||
|
@ -123,7 +120,7 @@ void idt_set_gate(unsigned char num, unsigned long base, unsigned short sel,
|
|||
*
|
||||
* @return a preconfigured idt descriptor
|
||||
*/
|
||||
idt_entry_t configure_idt_entry(unsigned long base, unsigned short sel,
|
||||
idt_entry_t configure_idt_entry(size_t base, unsigned short sel,
|
||||
unsigned char flags);
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -32,6 +32,13 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef CMOS_PORT_ADDRESS
|
||||
#define CMOS_PORT_ADDRESS 0x70
|
||||
#endif
|
||||
#ifndef CMOS_PORT_DATA
|
||||
#define CMOS_PORT_DATA 0x71
|
||||
#endif
|
||||
|
||||
/** @brief Read a byte from an IO port
|
||||
*
|
||||
* @param _port The port you want to read from
|
||||
|
@ -93,6 +100,33 @@ inline static void outportl(unsigned short _port, unsigned int _data)
|
|||
asm volatile("outl %1, %0"::"dN"(_port), "a"(_data));
|
||||
}
|
||||
|
||||
inline static void uart_putchar(unsigned char _data)
|
||||
{
|
||||
outportb(0x2F8, _data);
|
||||
}
|
||||
|
||||
/**
|
||||
* read a byte from CMOS
|
||||
* @param offset CMOS offset
|
||||
* @return value you want to read
|
||||
*/
|
||||
inline static uint8_t cmos_read(uint8_t offset)
|
||||
{
|
||||
outportb(CMOS_PORT_ADDRESS, offset);
|
||||
return inportb(CMOS_PORT_DATA);
|
||||
}
|
||||
|
||||
/**
|
||||
* write a byte in CMOS
|
||||
* @param offset CMOS offset
|
||||
* @param val the value you want wto write
|
||||
*/
|
||||
inline static void cmos_write(uint8_t offset, uint8_t val)
|
||||
{
|
||||
outportb(CMOS_PORT_ADDRESS, offset);
|
||||
outportb(CMOS_PORT_DATA, val);
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -227,12 +227,23 @@ static inline void write_cr4(uint32_t val) {
|
|||
asm volatile("mov %0, %%cr4" : : "r"(val));
|
||||
}
|
||||
|
||||
int ipi_tlb_flush(void);
|
||||
|
||||
/** @brief Flush a specific page entry in TLB
|
||||
* @param addr The (virtual) address of the page to flush
|
||||
*/
|
||||
static inline void tlb_flush_one_page(uint32_t addr)
|
||||
{
|
||||
asm volatile("invlpg (%0)" : : "r"(addr) : "memory");
|
||||
#if MAX_CORES > 1
|
||||
/*
|
||||
* Currently, we didn't support user-level threads.
|
||||
* => User-level applications run only on one
|
||||
* and we didn't flush the TLB of the other cores
|
||||
*/
|
||||
if (addr <= KERNEL_SPACE)
|
||||
ipi_tlb_flush();
|
||||
#endif
|
||||
}
|
||||
|
||||
/** @brief Invalidate the whole TLB
|
||||
|
@ -245,6 +256,9 @@ static inline void tlb_flush(void)
|
|||
|
||||
if (val)
|
||||
write_cr3(val);
|
||||
#if MAX_CORES > 1
|
||||
ipi_tlb_flush();
|
||||
#endif
|
||||
}
|
||||
|
||||
/** @brief Read EFLAGS
|
||||
|
|
|
@ -70,16 +70,6 @@ typedef unsigned int wint_t;
|
|||
* All the interrupt handler routines use this type for their only parameter.
|
||||
*/
|
||||
struct state {
|
||||
/*
|
||||
* We switched from software- to hardwaree-based multitasking
|
||||
* Therefore, we do not longer save the registers by hand.
|
||||
*/
|
||||
/*unsigned int gs, fs, es, ds; */ /* pushed the segs last */
|
||||
/*
|
||||
* Commented this out to write it out in a longer form for HTML-documentation
|
||||
*/
|
||||
//unsigned int edi, esi, ebp, esp, ebx, edx, ecx, eax; /* pushed by 'pusha' */
|
||||
//unsigned int int_no, err_code; /* our 'push byte #' and ecodes do this */
|
||||
/// EDI register
|
||||
unsigned int edi;
|
||||
/// ESI register
|
||||
|
@ -99,18 +89,19 @@ struct state {
|
|||
|
||||
/// Interrupt number
|
||||
unsigned int int_no;
|
||||
/// Error code
|
||||
unsigned int err_code; /* our 'push byte #' and ecodes do this */
|
||||
/*unsigned int eip, cs, eflags, useresp, ss;*/ /* pushed by the processor automatically */
|
||||
|
||||
// pushed by the processor automatically
|
||||
unsigned int error;
|
||||
unsigned int eip;
|
||||
unsigned int cs;
|
||||
unsigned int eflags;
|
||||
unsigned int useresp;
|
||||
unsigned int ss;
|
||||
};
|
||||
|
||||
/** @brief Read out APIC CPU ID
|
||||
* @return The APIC CPU ID
|
||||
*/
|
||||
uint32_t apic_cpu_id(void);
|
||||
|
||||
/// Convenience-define constant
|
||||
#define LOGICAL_CPUID apic_cpu_id()
|
||||
#define smp_id apic_cpu_id
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -45,6 +45,11 @@ extern "C" {
|
|||
*/
|
||||
int arch_fork(task_t* task);
|
||||
|
||||
/** @brieff Switch to new task
|
||||
* @param id Task Id
|
||||
*/
|
||||
void switch_task(uint32_t id);
|
||||
|
||||
/** @brief Setup a default frame for a new task
|
||||
*
|
||||
* @param task Pointer to the task structure
|
||||
|
@ -54,7 +59,7 @@ int arch_fork(task_t* task);
|
|||
* - 0 on success
|
||||
* - -EINVAL (-22) on failure
|
||||
*/
|
||||
int create_default_frame(task_t* task, entry_point_t ep, void* arg);
|
||||
int create_default_frame(task_t* task, internal_entry_point_t ep, void* arg);
|
||||
|
||||
/** @brief Register a task's TSS at GDT
|
||||
*
|
||||
|
@ -65,12 +70,6 @@ int create_default_frame(task_t* task, entry_point_t ep, void* arg);
|
|||
*/
|
||||
int register_task(task_t* task);
|
||||
|
||||
/** @brief Call to rescheduling
|
||||
*
|
||||
* This is a purely assembled procedure for rescheduling
|
||||
*/
|
||||
void reschedule(void);
|
||||
|
||||
/** @brief Jump back to user code
|
||||
*
|
||||
* This function runs the user code after stopping it just as if
|
||||
|
@ -87,6 +86,12 @@ static inline int jump_to_user_code(uint32_t ep, uint32_t stack)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/** @brief determines the stack of a specific task
|
||||
*
|
||||
* @return start address of a specific task
|
||||
*/
|
||||
size_t get_stack(uint32_t id);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -28,7 +28,9 @@
|
|||
#include <metalsvm/page.h>
|
||||
#include <metalsvm/spinlock.h>
|
||||
#include <metalsvm/mmu.h>
|
||||
#include <metalsvm/tasks.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/idt.h>
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/multiboot.h>
|
||||
|
@ -36,12 +38,9 @@
|
|||
#include <asm/RCCE_lib.h>
|
||||
#endif
|
||||
|
||||
/* disable optimization for the following functions */
|
||||
//static int apic_send_ipi(uint32_t id, uint32_t mode, uint32_t vector) __attribute__((optimize(0)));
|
||||
static int wakeup_all_aps(uint32_t start_eip) __attribute__((optimize(0)));
|
||||
//int apic_calibration(void) __attribute__((optimize(0)));
|
||||
//int ioapic_intoff(uint8_t irq, uint8_t apicid) __attribute__((optimize(0)));
|
||||
//int ioapic_inton(uint8_t irq, uint8_t apicid) __attribute__((optimize(0)));
|
||||
#if defined(CONFIG_ROCKCREEK) && (MAX_CORES > 1)
|
||||
#error RockCreek is not a SMP system
|
||||
#endif
|
||||
|
||||
// IO APIC MMIO structure: write reg, then read or write data.
|
||||
typedef struct {
|
||||
|
@ -60,12 +59,15 @@ static uint32_t icr = 0;
|
|||
static uint32_t ncores = 1;
|
||||
static uint8_t irq_redirect[16] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF};
|
||||
#if MAX_CORES > 1
|
||||
static uint8_t boot_code[] = {0xE9, 0x1E, 0x00, 0x17, 0x00, 0x09, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x9A, 0xCF, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x92, 0xCF, 0x00, 0x0F, 0x01, 0x16, 0x03, 0x00, 0x0F, 0x20, 0xC0, 0x0C, 0x01, 0x0F, 0x22, 0xC0, 0x66, 0xEA, 0x36, 0x00, 0x01, 0x00, 0x08, 0x00, 0xFA, 0x31, 0xC0, 0x66, 0xB8, 0x10, 0x00, 0x8E, 0xD8, 0x8E, 0xC0, 0x8E, 0xE0, 0x8E, 0xE8, 0x8E, 0xD0, 0x0F, 0x20, 0xC0, 0x25, 0xFF, 0xFF, 0xFF, 0x9F, 0x0D, 0x20, 0x00, 0x00, 0x00, 0x0F, 0x22, 0xC0, 0x31, 0xC0, 0x0F, 0x22, 0xD8, 0xBC, 0xEF, 0xBE, 0xAD, 0xDE, 0x31, 0xC0, 0x31, 0xDB, 0xEA, 0xDE, 0xC0, 0xAD, 0xDE, 0x08, 0x00};
|
||||
static uint8_t boot_code[] = { 0xFA, 0x0F, 0x01, 0x16, 0x3B, 0x70, 0x0F, 0x20, 0xC0, 0x0C, 0x01, 0x0F, 0x22, 0xC0, 0x66, 0xEA, 0x16, 0x70, 0x00, 0x00, 0x08, 0x00, 0x31, 0xC0, 0x66, 0xB8, 0x10, 0x00, 0x8E, 0xD8, 0x8E, 0xC0, 0x8E, 0xE0, 0x8E, 0xE8, 0x8E, 0xD0, 0xBC, 0xEF, 0xBE, 0xAD, 0xDE, 0x68, 0xAD, 0xDE, 0xAD, 0xDE, 0x6A, 0x00, 0xEA, 0xDE, 0xC0, 0xAD, 0xDE, 0x08, 0x00, 0xEB, 0xFE, 0x17, 0x00, 0x41, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x9A, 0xCF, 0x00, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x92, 0xCF, 0x00};
|
||||
static atomic_int32_t cpu_online = ATOMIC_INIT(1);
|
||||
#endif
|
||||
static uint8_t initialized = 0;
|
||||
static atomic_int32_t cpu_online = ATOMIC_INIT(1);
|
||||
spinlock_t bootlock = SPINLOCK_INIT;
|
||||
|
||||
// forward declaration
|
||||
static int lapic_reset(void);
|
||||
|
||||
static inline uint32_t lapic_read(uint32_t addr)
|
||||
{
|
||||
return *((volatile uint32_t*) (lapic+addr));
|
||||
|
@ -110,6 +112,12 @@ uint32_t apic_cpu_id(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void apic_set_cpu_id(uint32_t id)
|
||||
{
|
||||
if (lapic && initialized)
|
||||
lapic_write(APIC_ID, id << 24);
|
||||
}
|
||||
|
||||
static inline uint32_t apic_version(void)
|
||||
{
|
||||
if (lapic)
|
||||
|
@ -137,6 +145,53 @@ int apic_is_enabled(void)
|
|||
}
|
||||
|
||||
#if MAX_CORES > 1
|
||||
static inline void set_ipi_dest(uint32_t cpu_id) {
|
||||
uint32_t tmp;
|
||||
tmp = lapic_read(APIC_ICR2);
|
||||
tmp &= 0x00FFFFFF;
|
||||
tmp |= (cpu_id << 24);
|
||||
lapic_write(APIC_ICR2, tmp);
|
||||
}
|
||||
|
||||
int ipi_tlb_flush(void)
|
||||
{
|
||||
uint32_t flags;
|
||||
uint32_t i, j;
|
||||
|
||||
if (atomic_int32_read(&cpu_online) == 1)
|
||||
return 0;
|
||||
|
||||
if (lapic_read(APIC_ICR1) & APIC_ICR_BUSY) {
|
||||
kputs("ERROR: previous send not complete");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
flags = irq_nested_disable();
|
||||
if (atomic_int32_read(&cpu_online) == ncores) {
|
||||
lapic_write(APIC_ICR1, APIC_INT_ASSERT|APIC_DEST_ALLBUT|APIC_DM_FIXED|124);
|
||||
|
||||
j = 0;
|
||||
while((lapic_read(APIC_ICR1) & APIC_ICR_BUSY) && (j < 1000))
|
||||
j++; // wait for it to finish, give up eventualy tho
|
||||
} else {
|
||||
for(i=0; i<atomic_int32_read(&cpu_online); i++)
|
||||
{
|
||||
if (i == smp_id())
|
||||
continue;
|
||||
|
||||
set_ipi_dest(i);
|
||||
lapic_write(APIC_ICR1, APIC_INT_ASSERT|APIC_DM_FIXED|124);
|
||||
|
||||
j = 0;
|
||||
while((lapic_read(APIC_ICR1) & APIC_ICR_BUSY) && (j < 1000))
|
||||
j++; // wait for it to finish, give up eventualy tho
|
||||
}
|
||||
}
|
||||
irq_nested_enable(flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if 0
|
||||
static int apic_send_ipi(uint32_t id, uint32_t mode, uint32_t vector)
|
||||
{
|
||||
|
@ -158,28 +213,49 @@ static int apic_send_ipi(uint32_t id, uint32_t mode, uint32_t vector)
|
|||
}
|
||||
#endif
|
||||
|
||||
static int wakeup_all_aps(uint32_t start_eip)
|
||||
/*
|
||||
* use the universal startup algorithm of Intel's MultiProcessor Specification
|
||||
*/
|
||||
static int wakeup_ap(uint32_t start_eip, uint32_t id)
|
||||
{
|
||||
static char* reset_vector = 0;
|
||||
uint32_t i;
|
||||
|
||||
kputs("Wakeup all application processors via IPI\n");
|
||||
kprintf("Wakeup application processor %d via IPI\n", id);
|
||||
|
||||
// set shutdown code to 0x0A
|
||||
cmos_write(0x0F, 0x0A);
|
||||
|
||||
if(lapic_read(APIC_ICR1) & APIC_ICR_BUSY) {
|
||||
kprintf("ERROR: previous send not complete");
|
||||
if (!reset_vector) {
|
||||
reset_vector = (char*) map_region(0x00, 0x00, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE);
|
||||
reset_vector += 0x467; // add base address of the reset vector
|
||||
kprintf("Map reset vector to %p\n", reset_vector);
|
||||
}
|
||||
*((volatile unsigned short *) (reset_vector+2)) = start_eip >> 4;
|
||||
*((volatile unsigned short *) reset_vector) = 0x00;
|
||||
|
||||
if (lapic_read(APIC_ICR1) & APIC_ICR_BUSY) {
|
||||
kputs("ERROR: previous send not complete");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
vga_puts("Send IPI\n");
|
||||
// send out INIT to all aps
|
||||
lapic_write(APIC_ICR1, APIC_INT_ASSERT|APIC_DEST_ALLBUT|APIC_DM_INIT);
|
||||
//kputs("Send IPI\n");
|
||||
// send out INIT to AP
|
||||
set_ipi_dest(id);
|
||||
lapic_write(APIC_ICR1, APIC_INT_LEVELTRIG|APIC_INT_ASSERT|APIC_DM_INIT);
|
||||
udelay(200);
|
||||
// reset INIT
|
||||
lapic_write(APIC_ICR1, APIC_INT_LEVELTRIG|APIC_DM_INIT);
|
||||
udelay(10000);
|
||||
// send out the startup
|
||||
lapic_write(APIC_ICR1, APIC_INT_ASSERT|APIC_DEST_ALLBUT|APIC_DM_STARTUP|(start_eip >> 12));
|
||||
set_ipi_dest(id);
|
||||
lapic_write(APIC_ICR1, APIC_DM_STARTUP|(start_eip >> 12));
|
||||
udelay(200);
|
||||
// do it again
|
||||
lapic_write(APIC_ICR1, APIC_INT_ASSERT|APIC_DEST_ALLBUT|APIC_DM_STARTUP|(start_eip >> 12));
|
||||
set_ipi_dest(id);
|
||||
lapic_write(APIC_ICR1, APIC_DM_STARTUP|(start_eip >> 12));
|
||||
udelay(200);
|
||||
vga_puts("IPI done...\n");
|
||||
//kputs("IPI done...\n");
|
||||
|
||||
i = 0;
|
||||
while((lapic_read(APIC_ICR1) & APIC_ICR_BUSY) && (i < 1000))
|
||||
|
@ -187,20 +263,70 @@ static int wakeup_all_aps(uint32_t start_eip)
|
|||
|
||||
return ((lapic_read(APIC_ICR1) & APIC_ICR_BUSY) ? -EIO : 0); // did it fail (still delivering) or succeed ?
|
||||
}
|
||||
#endif
|
||||
|
||||
#if MAX_CORES > 1
|
||||
static void smp_main(void)
|
||||
/*
|
||||
* This is defined in entry.asm. We use this to properly reload
|
||||
* the new segment registers
|
||||
*/
|
||||
extern void gdt_flush(void);
|
||||
|
||||
/*
|
||||
* This is defined in entry.asm and initialized the processors.
|
||||
*/
|
||||
extern void cpu_init(void);
|
||||
|
||||
/*
|
||||
* platform independent entry point of the application processors
|
||||
*/
|
||||
extern int smp_main(void);
|
||||
|
||||
void smp_start(uint32_t id)
|
||||
{
|
||||
vga_puts("JJAJAJAJAJAJA\n");
|
||||
lowlevel_init();
|
||||
uint32_t i;
|
||||
|
||||
atomic_int32_inc(&cpu_online);
|
||||
kputs("JAJAJAJ\n");
|
||||
|
||||
// reset APIC and set id
|
||||
lapic_reset();
|
||||
apic_set_cpu_id(id);
|
||||
|
||||
kprintf("Application processor %d is entering its idle task\n", apic_cpu_id());
|
||||
|
||||
// initialize default cpu features
|
||||
cpu_init();
|
||||
|
||||
// use the same gdt like the boot processors
|
||||
gdt_flush();
|
||||
|
||||
// install IDT
|
||||
idt_install();
|
||||
|
||||
// enable additional cpu features
|
||||
cpu_detection();
|
||||
|
||||
/* enable paging */
|
||||
write_cr3((uint32_t)get_boot_pgd());
|
||||
i = read_cr0();
|
||||
i = i | (1 << 31);
|
||||
write_cr0(i);
|
||||
|
||||
// reset APIC and set id
|
||||
lapic_reset(); // sets also the timer interrupt
|
||||
apic_set_cpu_id(id);
|
||||
|
||||
/*
|
||||
* we turned on paging
|
||||
* => now, we are able to register our task for Task State Switching
|
||||
*/
|
||||
register_task(per_core(current_task));
|
||||
|
||||
smp_main();
|
||||
|
||||
// idle loop
|
||||
while(1) ;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_MULTIBOOT
|
||||
static unsigned int* search_apic(unsigned int base, unsigned int limit) {
|
||||
uint32_t* ptr;
|
||||
|
||||
|
@ -213,42 +339,63 @@ static unsigned int* search_apic(unsigned int base, unsigned int limit) {
|
|||
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if MAX_CORES > 1
|
||||
int smp_init(void)
|
||||
{
|
||||
uint32_t i;
|
||||
size_t bootaddr;
|
||||
uint32_t i, j;
|
||||
char* bootaddr;
|
||||
int err;
|
||||
|
||||
if (ncores <= 1)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* dirty hack: Copy 16bit startup code (see tools/smp_setup.asm)
|
||||
* to a 16bit address. Wakeup the other cores via IPI. They start
|
||||
* at this address in real mode, switch to protected and finally
|
||||
* they jump to smp_main.
|
||||
*/
|
||||
bootaddr = 0x10000;
|
||||
map_region(bootaddr, get_pages(1), 1, MAP_KERNEL_SPACE);
|
||||
for(i=0; i<sizeof(boot_code); i+=sizeof(size_t))
|
||||
for(i=1; (i<ncores) && (i<MAX_CORES); i++)
|
||||
{
|
||||
// replace 0xDEADC0DE with the address of the smp entry code
|
||||
if (*((size_t*) (bootaddr+i)) == 0xDEADC0DE)
|
||||
*((size_t*) (bootaddr+i)) = (size_t) smp_main;
|
||||
/*
|
||||
* dirty hack: Copy 16bit startup code (see tools/smp_setup.asm)
|
||||
* to a 16bit address. Wakeup the other cores via IPI. They start
|
||||
* at this address in real mode, switch to protected and finally
|
||||
* they jump to smp_main.
|
||||
*
|
||||
* The page at SMP_SETUP_ADDR is already reserved for this hack!
|
||||
*/
|
||||
bootaddr = (char*) SMP_SETUP_ADDR;
|
||||
memcpy(bootaddr, boot_code, sizeof(boot_code));
|
||||
for(j=0; j<sizeof(boot_code)-sizeof(uint32_t); j++)
|
||||
{
|
||||
// replace 0xDEADC0DE with the address of the smp entry code
|
||||
if (*((uint32_t*) (bootaddr+j)) == 0xDEADC0DE) {
|
||||
*((uint32_t*) (bootaddr+j)) = (size_t) smp_start;
|
||||
kprintf("Set entry point of the application processors at 0x%x\n", (size_t) smp_start);
|
||||
}
|
||||
|
||||
// replace APIC ID 0xDEADDEAD
|
||||
if (*((uint32_t*) (bootaddr+j)) == 0xDEADDEAD)
|
||||
*((uint32_t*) (bootaddr+j)) = i;
|
||||
|
||||
// replace 0xDEADBEEF with the addres of the stack
|
||||
if (*((uint32_t*) (bootaddr+j)) == 0xDEADBEEF) {
|
||||
size_t esp = get_idle_task(i);
|
||||
*((uint32_t*) (bootaddr+j)) = (uint32_t) esp;
|
||||
if ((int) esp < 0)
|
||||
kprintf("Invalid stack value\n");
|
||||
kprintf("Set stack of the application processors to 0x%x\n", esp);
|
||||
}
|
||||
}
|
||||
|
||||
//kprintf("size of the boot_code %d\n", sizeof(boot_code));
|
||||
err = wakeup_ap((uint32_t)bootaddr, i);
|
||||
if (err)
|
||||
kprintf("Unable to wakeup application processor %d: %d\n", i, err);
|
||||
|
||||
j = 0;
|
||||
while((ncores != atomic_int32_read(&cpu_online)) && (j < 100)) {
|
||||
udelay(1000);
|
||||
j++;
|
||||
}
|
||||
}
|
||||
|
||||
kprintf("size of the boot_code %d\n", sizeof(boot_code));
|
||||
err = wakeup_all_aps(bootaddr);
|
||||
if (err)
|
||||
kprintf("Unable to wakeup application processors: %d\n", err);
|
||||
|
||||
i = 0;
|
||||
while((ncores != atomic_int32_read(&cpu_online)) && (i < 1000))
|
||||
i++;
|
||||
|
||||
kprintf("%d cores online\n", atomic_int32_read(&cpu_online));
|
||||
|
||||
return 0;
|
||||
|
@ -283,21 +430,9 @@ static int lapic_reset(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* detects the timer frequency of the APIC and restart
|
||||
* the APIC timer with the correct period
|
||||
*/
|
||||
int apic_calibration(void)
|
||||
int map_apic(void)
|
||||
{
|
||||
uint8_t i;
|
||||
uint32_t flags;
|
||||
#ifndef CONFIG_ROCKCREEK
|
||||
uint64_t ticks, old;
|
||||
uint32_t diff;
|
||||
#else
|
||||
uint64_t start, end, ticks;
|
||||
uint32_t diff;
|
||||
#endif
|
||||
uint32_t i;
|
||||
|
||||
if (!has_apic())
|
||||
return -ENXIO;
|
||||
|
@ -315,11 +450,33 @@ int apic_calibration(void)
|
|||
|
||||
// map all processor entries
|
||||
for(i=0; i<MAX_CORES; i++) {
|
||||
if (apic_processors[i] && (old != (((size_t)apic_processors[i]) & 0xFFFFF000)))
|
||||
if (apic_processors[i] && (old != (((size_t)apic_processors[i]) & 0xFFFFF000)))
|
||||
old = map_region(((size_t) apic_processors[i]) & 0xFFFFF000, ((size_t) apic_processors[i]) & 0xFFFFF000, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* detects the timer frequency of the APIC and restart
|
||||
* the APIC timer with the correct period
|
||||
*/
|
||||
int apic_calibration(void)
|
||||
{
|
||||
uint32_t i;
|
||||
uint32_t flags;
|
||||
#ifndef CONFIG_ROCKCREEK
|
||||
uint64_t ticks, old;
|
||||
uint32_t diff;
|
||||
#else
|
||||
uint64_t start, end, ticks;
|
||||
uint32_t diff;
|
||||
#endif
|
||||
|
||||
if (!has_apic())
|
||||
return -ENXIO;
|
||||
|
||||
#ifndef CONFIG_ROCKCREEK
|
||||
old = get_clock_tick();
|
||||
|
||||
|
@ -379,9 +536,6 @@ int apic_calibration(void)
|
|||
kprintf("APIC calibration determines an ICR of 0x%x\n", icr);
|
||||
|
||||
flags = irq_nested_disable();
|
||||
#if MAX_CORES > 1
|
||||
//smp_init();
|
||||
#endif
|
||||
|
||||
if (ioapic) {
|
||||
// now, we don't longer need the IOAPIC timer and turn it off
|
||||
|
@ -391,6 +545,9 @@ int apic_calibration(void)
|
|||
ioapic_inton(i, apic_processors[boot_processor]->id);
|
||||
}
|
||||
initialized = 1;
|
||||
#if MAX_CORES > 1
|
||||
smp_init();
|
||||
#endif
|
||||
irq_nested_enable(flags);
|
||||
|
||||
return 0;
|
||||
|
@ -402,9 +559,16 @@ static int apic_probe(void)
|
|||
uint32_t i, count;
|
||||
int isa_bus = -1;
|
||||
|
||||
apic_mp = (apic_mp_t*) search_apic(0xF0000, 0x100000);
|
||||
if (apic_mp)
|
||||
goto found_mp;
|
||||
apic_mp = (apic_mp_t*) search_apic(0x9F000, 0xA0000);
|
||||
if (apic_mp)
|
||||
goto found_mp;
|
||||
|
||||
// searching MP signature in the reserved memory areas
|
||||
#ifdef CONFIG_MULTIBOOT
|
||||
if (mb_info && (mb_info->flags & (1 << 6))) {
|
||||
if (mb_info && (mb_info->flags & MULTIBOOT_INFO_MEM_MAP)) {
|
||||
multiboot_memory_map_t* mmap = (multiboot_memory_map_t*) mb_info->mmap_addr;
|
||||
multiboot_memory_map_t* mmap_end = (void*) ((size_t) mb_info->mmap_addr + mb_info->mmap_length);
|
||||
|
||||
|
@ -412,7 +576,7 @@ static int apic_probe(void)
|
|||
if (mmap->type == MULTIBOOT_MEMORY_RESERVED) {
|
||||
addr = mmap->addr;
|
||||
|
||||
for(i=0; i<mmap->len; i++, addr++) {
|
||||
for(i=0; i<mmap->len-sizeof(uint32_t); i++, addr++) {
|
||||
if (*((uint32_t*) addr) == MP_FLT_SIGNATURE) {
|
||||
apic_mp = (apic_mp_t*) addr;
|
||||
if (!(apic_mp->version > 4) && apic_mp->features[0])
|
||||
|
@ -424,13 +588,8 @@ static int apic_probe(void)
|
|||
mmap++;
|
||||
}
|
||||
}
|
||||
found_mp:
|
||||
#else
|
||||
apic_mp = (apic_mp_t*) search_apic(0xF0000, 0x100000);
|
||||
if (!apic_mp)
|
||||
apic_mp = (apic_mp_t*) search_apic(0x9F000, 0xA0000);
|
||||
#endif
|
||||
|
||||
found_mp:
|
||||
if (!apic_mp)
|
||||
goto no_mp;
|
||||
|
||||
|
@ -555,6 +714,16 @@ no_mp:
|
|||
goto check_lapic;
|
||||
}
|
||||
|
||||
#if MAX_CORES > 1
|
||||
static void apic_tlb_handler(struct state *s)
|
||||
{
|
||||
uint32_t val = read_cr3();
|
||||
|
||||
if (val)
|
||||
write_cr3(val);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void apic_err_handler(struct state *s)
|
||||
{
|
||||
kprintf("Got APIC error 0x%x\n", lapic_read(APIC_ESR));
|
||||
|
@ -563,27 +732,33 @@ static void apic_err_handler(struct state *s)
|
|||
int apic_init(void)
|
||||
{
|
||||
int ret;
|
||||
uint8_t i;
|
||||
|
||||
ret = apic_probe();
|
||||
if (!ret)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
// set APIC error handler
|
||||
irq_install_handler(126, apic_err_handler);
|
||||
#if MAX_CORES > 1
|
||||
irq_install_handler(124, apic_tlb_handler);
|
||||
#endif
|
||||
|
||||
#if 0
|
||||
// initialize local apic
|
||||
ret = lapic_reset();
|
||||
if (!ret)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (ioapic) {
|
||||
uint32_t i;
|
||||
|
||||
// enable timer interrupt
|
||||
ioapic_inton(0, apic_processors[boot_processor]->id);
|
||||
// now lets turn everything else off
|
||||
for(i=1; i<24; i++)
|
||||
ioapic_intoff(i, apic_processors[boot_processor]->id);
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -64,6 +64,21 @@ ALIGN 4
|
|||
stublet:
|
||||
; initialize stack pointer.
|
||||
mov esp, default_stack_pointer
|
||||
; initialize cpu features
|
||||
call cpu_init
|
||||
; interpret multiboot information
|
||||
extern multiboot_init
|
||||
push ebx
|
||||
call multiboot_init
|
||||
add esp, 4
|
||||
|
||||
; jump to the boot processors's C code
|
||||
extern main
|
||||
call main
|
||||
jmp $
|
||||
|
||||
global cpu_init
|
||||
cpu_init:
|
||||
mov eax, cr0
|
||||
; enable caching, disable paging and fpu emulation
|
||||
and eax, 0x1ffffffb
|
||||
|
@ -77,16 +92,7 @@ stublet:
|
|||
mov eax, cr4
|
||||
and eax, 0xfffbf9ff
|
||||
mov cr4, eax
|
||||
; interpret multiboot information
|
||||
extern multiboot_init
|
||||
push ebx
|
||||
call multiboot_init
|
||||
add esp, 4
|
||||
|
||||
; jump to the boot processors's C code
|
||||
extern main
|
||||
call main
|
||||
jmp $
|
||||
ret
|
||||
|
||||
; This will set up our new segment registers. We need to do
|
||||
; something special in order to set CS. We do what is called a
|
||||
|
@ -154,72 +160,72 @@ isr0:
|
|||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 0
|
||||
push byte 0
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 1: Debug Exception
|
||||
isr1:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 1
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 2: Non Maskable Interrupt Exception
|
||||
isr2:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 2
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 3: Int 3 Exception
|
||||
isr3:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 3
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 4: INTO Exception
|
||||
isr4:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 4
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 5: Out of Bounds Exception
|
||||
isr5:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 5
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 6: Invalid Opcode Exception
|
||||
isr6:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 6
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 7: Coprocessor Not Available Exception
|
||||
isr7:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 7
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 8: Double Fault Exception (With Error Code!)
|
||||
isr8:
|
||||
|
@ -227,16 +233,16 @@ isr8:
|
|||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 8
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 9: Coprocessor Segment Overrun Exception
|
||||
isr9:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 9
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 10: Bad TSS Exception (With Error Code!)
|
||||
isr10:
|
||||
|
@ -244,7 +250,7 @@ isr10:
|
|||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 10
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 11: Segment Not Present Exception (With Error Code!)
|
||||
isr11:
|
||||
|
@ -252,7 +258,7 @@ isr11:
|
|||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 11
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 12: Stack Fault Exception (With Error Code!)
|
||||
isr12:
|
||||
|
@ -260,7 +266,7 @@ isr12:
|
|||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 12
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 13: General Protection Fault Exception (With Error Code!)
|
||||
isr13:
|
||||
|
@ -268,7 +274,7 @@ isr13:
|
|||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 13
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 14: Page Fault Exception (With Error Code!)
|
||||
isr14:
|
||||
|
@ -276,169 +282,168 @@ isr14:
|
|||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 14
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 15: Reserved Exception
|
||||
isr15:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 15
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 16: Floating Point Exception
|
||||
isr16:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 16
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 17: Alignment Check Exception
|
||||
isr17:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 17
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 18: Machine Check Exception
|
||||
isr18:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 18
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 19: Reserved
|
||||
isr19:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 19
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 20: Reserved
|
||||
isr20:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 20
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 21: Reserved
|
||||
isr21:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 21
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 22: Reserved
|
||||
isr22:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 22
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 23: Reserved
|
||||
isr23:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 23
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 24: Reserved
|
||||
isr24:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 24
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 25: Reserved
|
||||
isr25:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 25
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 26: Reserved
|
||||
isr26:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 26
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 27: Reserved
|
||||
isr27:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 27
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 28: Reserved
|
||||
isr28:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 28
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 29: Reserved
|
||||
isr29:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 29
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 30: Reserved
|
||||
isr30:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 30
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 31: Reserved
|
||||
isr31:
|
||||
; isr0 - isr31 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 31
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
extern syscall_handler
|
||||
|
||||
; used to realize system calls
|
||||
isrsyscall:
|
||||
push ds
|
||||
push fs
|
||||
push gs
|
||||
push es
|
||||
;push ds
|
||||
;push fs
|
||||
;push gs
|
||||
;push es
|
||||
push ebp
|
||||
push edi
|
||||
push esi
|
||||
|
@ -455,10 +460,10 @@ isrsyscall:
|
|||
pop esi
|
||||
pop edi
|
||||
pop ebp
|
||||
pop es
|
||||
pop gs
|
||||
pop fs
|
||||
pop ds
|
||||
;pop es
|
||||
;pop gs
|
||||
;pop fs
|
||||
;pop ds
|
||||
iret
|
||||
|
||||
global irq0
|
||||
|
@ -491,324 +496,279 @@ global apic_lint1
|
|||
global apic_error
|
||||
global apic_svr
|
||||
|
||||
extern irq_handler
|
||||
extern get_current_task
|
||||
extern scheduler
|
||||
|
||||
global reschedule
|
||||
reschedule:
|
||||
cli
|
||||
; eax could change across a function call
|
||||
; => so we don't have to save the original eax value
|
||||
push ebx
|
||||
|
||||
call get_current_task
|
||||
push eax
|
||||
call scheduler
|
||||
call get_current_task
|
||||
pop ebx
|
||||
cmp eax, ebx
|
||||
je no_task_switch1
|
||||
|
||||
mov eax, [eax]
|
||||
add ax, WORD 5
|
||||
mov bx, WORD 8
|
||||
mul bx
|
||||
mov [hack1+5], ax
|
||||
hack1:
|
||||
jmp 0x00 : 0xDEADBEAF
|
||||
|
||||
no_task_switch1:
|
||||
pop ebx
|
||||
sti
|
||||
ret
|
||||
global switch_task
|
||||
switch_task:
|
||||
mov eax, [esp+4]
|
||||
add ax, WORD 5
|
||||
mov bx, WORD 8
|
||||
mul bx
|
||||
mov [hack+5], ax
|
||||
hack:
|
||||
jmp 0x00 : 0xDEADBEAF
|
||||
ret
|
||||
|
||||
; 32: IRQ0
|
||||
irq0:
|
||||
; irq0 - irq15 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 32
|
||||
|
||||
Lirq0:
|
||||
pusha
|
||||
push esp
|
||||
call irq_handler
|
||||
add esp, 4
|
||||
|
||||
call get_current_task
|
||||
push eax
|
||||
call scheduler
|
||||
call get_current_task
|
||||
pop ebx
|
||||
cmp eax, ebx
|
||||
je no_task_switch2
|
||||
|
||||
mov eax, [eax]
|
||||
add ax, WORD 5
|
||||
mov bx, WORD 8
|
||||
mul bx
|
||||
mov [hack2+5], ax
|
||||
hack2:
|
||||
jmp 0x00 : 0xDEADBEAF
|
||||
|
||||
no_task_switch2:
|
||||
popa
|
||||
add esp, 8
|
||||
iret
|
||||
jmp common_stub
|
||||
|
||||
; 33: IRQ1
|
||||
irq1:
|
||||
; irq0 - irq15 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 33
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 34: IRQ2
|
||||
irq2:
|
||||
; irq0 - irq15 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 34
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 35: IRQ3
|
||||
irq3:
|
||||
; irq0 - irq15 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 35
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 36: IRQ4
|
||||
irq4:
|
||||
; irq0 - irq15 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 36
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 37: IRQ5
|
||||
irq5:
|
||||
; irq0 - irq15 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 37
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 38: IRQ6
|
||||
irq6:
|
||||
; irq0 - irq15 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 38
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 39: IRQ7
|
||||
irq7:
|
||||
; irq0 - irq15 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 39
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 40: IRQ8
|
||||
irq8:
|
||||
; irq0 - irq15 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 40
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 41: IRQ9
|
||||
irq9:
|
||||
; irq0 - irq15 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 41
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 42: IRQ10
|
||||
irq10:
|
||||
; irq0 - irq15 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 42
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 43: IRQ11
|
||||
irq11:
|
||||
; irq0 - irq15 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 43
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 44: IRQ12
|
||||
irq12:
|
||||
; irq0 - irq15 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 44
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 45: IRQ13
|
||||
irq13:
|
||||
; irq0 - irq15 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 45
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 46: IRQ14
|
||||
irq14:
|
||||
; irq0 - irq15 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 46
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 47: IRQ15
|
||||
irq15:
|
||||
; irq0 - irq15 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 47
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 48: IRQ16
|
||||
irq16:
|
||||
; irq16 - irq23 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 48
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 49: IRQ17
|
||||
irq17:
|
||||
; irq16- irq23 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 49
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 50: IRQ18
|
||||
irq18:
|
||||
; irq16 - irq23 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 50
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 51: IRQ19
|
||||
irq19:
|
||||
; irq16 - irq23 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 51
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 52: IRQ20
|
||||
irq20:
|
||||
; irq16- irq23 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 52
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 53: IRQ21
|
||||
irq21:
|
||||
; irq16 - irq23 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; error code
|
||||
push byte 53
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 54: IRQ22
|
||||
irq22:
|
||||
; irq16- irq23 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 54
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
; 55: IRQ23
|
||||
irq23:
|
||||
; irq16 - irq23 are registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 55
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
apic_timer:
|
||||
; apic timer is registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 123
|
||||
; we reuse code of the "traditional" timer interrupt (PIC)
|
||||
jmp Lirq0
|
||||
jmp common_stub
|
||||
|
||||
apic_lint0:
|
||||
; lint0 is registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 124
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
apic_lint1:
|
||||
; lint1 is registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 125
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
apic_error:
|
||||
; LVT error interrupt is registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 126
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
apic_svr:
|
||||
; SVR is registered as "Interrupt Gate"
|
||||
; Therefore, the interrupt flag (IF) is already cleared.
|
||||
; cli
|
||||
push byte 0
|
||||
push byte 0 ; pseudo error code
|
||||
push byte 127
|
||||
jmp irq_common_stub
|
||||
jmp common_stub
|
||||
|
||||
irq_common_stub:
|
||||
extern irq_handler
|
||||
|
||||
common_stub:
|
||||
pusha
|
||||
|
||||
; use the same handler for interrupts and exceptions
|
||||
push esp
|
||||
call irq_handler
|
||||
add esp, 4
|
||||
|
|
|
@ -34,11 +34,24 @@ static gdt_entry_t gdt[GDT_ENTRIES] = {[0 ... GDT_ENTRIES-1] = {0, 0, 0, 0, 0,
|
|||
unsigned char* default_stack_pointer __attribute__ ((section (".data"))) = kstacks[0] + KERNEL_STACK_SIZE - sizeof(size_t);
|
||||
|
||||
/*
|
||||
* This is in start.asm. We use this to properly reload
|
||||
* This is defined in entry.asm. We use this to properly reload
|
||||
* the new segment registers
|
||||
*/
|
||||
extern void gdt_flush(void);
|
||||
|
||||
/*
|
||||
* This is defined in entry.asm. We use this for a
|
||||
* hardware-based task switch.
|
||||
*/
|
||||
extern void tss_switch(uint32_t id);
|
||||
|
||||
size_t get_stack(uint32_t id)
|
||||
{
|
||||
if (BUILTIN_EXPECT(id >= MAX_TASKS, 0))
|
||||
return -EINVAL;
|
||||
return (size_t) kstacks[id] + KERNEL_STACK_SIZE - sizeof(size_t);
|
||||
}
|
||||
|
||||
int register_task(task_t* task) {
|
||||
uint16_t sel;
|
||||
uint32_t id = task->id;
|
||||
|
@ -112,7 +125,7 @@ int arch_fork(task_t* task)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int create_default_frame(task_t* task, entry_point_t ep, void* arg)
|
||||
int create_default_frame(task_t* task, internal_entry_point_t ep, void* arg)
|
||||
{
|
||||
uint16_t cs = 0x08;
|
||||
uint16_t ds = 0x10;
|
||||
|
|
|
@ -41,17 +41,8 @@
|
|||
*/
|
||||
static idt_entry_t idt[256] = {[0 ... 255] = {0, 0, 0, 0, 0}};
|
||||
static idt_ptr_t idtp;
|
||||
/*
|
||||
* Use this function to set an entry in the IDT. Alot simpler
|
||||
* than twiddling with the GDT ;)
|
||||
*/
|
||||
void idt_set_gate(unsigned char num, unsigned long base, unsigned short sel,
|
||||
unsigned char flags)
|
||||
{
|
||||
idt[num] = configure_idt_entry(base, sel, flags);
|
||||
}
|
||||
|
||||
idt_entry_t configure_idt_entry(unsigned long base, unsigned short sel,
|
||||
idt_entry_t configure_idt_entry(size_t base, unsigned short sel,
|
||||
unsigned char flags)
|
||||
{
|
||||
idt_entry_t desc;
|
||||
|
@ -69,18 +60,34 @@ idt_entry_t configure_idt_entry(unsigned long base, unsigned short sel,
|
|||
return desc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use this function to set an entry in the IDT. Alot simpler
|
||||
* than twiddling with the GDT ;)
|
||||
*/
|
||||
void idt_set_gate(unsigned char num, size_t base, unsigned short sel,
|
||||
unsigned char flags)
|
||||
{
|
||||
idt[num] = configure_idt_entry(base, sel, flags);
|
||||
}
|
||||
|
||||
extern void isrsyscall(void);
|
||||
|
||||
/* Installs the IDT */
|
||||
void idt_install(void)
|
||||
{
|
||||
/* Sets the special IDT pointer up, just like in 'gdt.c' */
|
||||
idtp.limit = (sizeof(idt_entry_t) * 256) - 1;
|
||||
idtp.base = (unsigned int)&idt;
|
||||
static int initialized = 0;
|
||||
|
||||
/* Add any new ISRs to the IDT here using idt_set_gate */
|
||||
idt_set_gate(INT_SYSCALL, (unsigned int)isrsyscall, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING3|IDT_FLAG_32BIT|IDT_FLAG_TRAPGATE);
|
||||
if (!initialized) {
|
||||
initialized = 1;
|
||||
|
||||
/* Sets the special IDT pointer up, just like in 'gdt.c' */
|
||||
idtp.limit = (sizeof(idt_entry_t) * 256) - 1;
|
||||
idtp.base = (unsigned int)&idt;
|
||||
|
||||
/* Add any new ISRs to the IDT here using idt_set_gate */
|
||||
idt_set_gate(INT_SYSCALL, (size_t)isrsyscall, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING3|IDT_FLAG_32BIT|IDT_FLAG_TRAPGATE);
|
||||
}
|
||||
|
||||
/* Points the processor's internal register to the new IDT */
|
||||
asm volatile("lidt %0" : : "m" (idtp));
|
||||
|
|
|
@ -138,66 +138,66 @@ static int irq_install(void)
|
|||
{
|
||||
irq_remap();
|
||||
|
||||
idt_set_gate(32, (unsigned)irq0, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(32, (size_t)irq0, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(33, (unsigned)irq1, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(33, (size_t)irq1, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(34, (unsigned)irq2, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(34, (size_t)irq2, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(35, (unsigned)irq3, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(35, (size_t)irq3, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(36, (unsigned)irq4, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(36, (size_t)irq4, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(37, (unsigned)irq5, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(37, (size_t)irq5, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(38, (unsigned)irq6, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(38, (size_t)irq6, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(39, (unsigned)irq7, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(39, (size_t)irq7, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(40, (unsigned)irq8, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(40, (size_t)irq8, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(41, (unsigned)irq9, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(41, (size_t)irq9, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(42, (unsigned)irq10, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(42, (size_t)irq10, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(43, (unsigned)irq11, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(43, (size_t)irq11, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(44, (unsigned)irq12, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(44, (size_t)irq12, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(45, (unsigned)irq13, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(45, (size_t)irq13, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(46, (unsigned)irq14, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(46, (size_t)irq14, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(47, (unsigned)irq15, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(47, (size_t)irq15, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
|
||||
if (has_apic()) {
|
||||
idt_set_gate(48, (unsigned)irq16, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(48, (size_t)irq16, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(49, (unsigned)irq17, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(49, (size_t)irq17, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(50, (unsigned)irq18, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(50, (size_t)irq18, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(51, (unsigned)irq19, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(51, (size_t)irq19, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(52, (unsigned)irq20, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(52, (size_t)irq20, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(53, (unsigned)irq21, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(53, (size_t)irq21, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(54, (unsigned)irq22, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(54, (size_t)irq22, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(55, (unsigned)irq23, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(55, (size_t)irq23, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
|
||||
idt_set_gate(123, (unsigned)apic_timer, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(123, (size_t)apic_timer, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(124, (unsigned)apic_lint0, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(124, (size_t)apic_lint0, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(125, (unsigned)apic_lint1, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(125, (size_t)apic_lint1, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(126, (unsigned)apic_error, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(126, (size_t)apic_error, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(127, (unsigned)apic_svr, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(127, (size_t)apic_svr, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
}
|
||||
|
||||
|
@ -253,9 +253,10 @@ void irq_handler(struct state *s)
|
|||
*/
|
||||
if (apic_is_enabled() || s->int_no >= 123) {
|
||||
apic_eoi();
|
||||
return;
|
||||
goto leave_handler;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ROCKCREEK
|
||||
/*
|
||||
* If the IDT entry that was invoked was greater-than-or-equal to 40
|
||||
* and lower than 48 (meaning IRQ8 - 15), then we need to
|
||||
|
@ -269,4 +270,12 @@ void irq_handler(struct state *s)
|
|||
* interrupt controller of the PIC, too
|
||||
*/
|
||||
outportb(0x20, 0x20);
|
||||
#else
|
||||
kprintf("Upps, RockCreek uses IRQs below 123!\n");
|
||||
#endif
|
||||
|
||||
leave_handler:
|
||||
// timer interrupt?
|
||||
if ((s->int_no == 32) || (s->int_no == 123))
|
||||
scheduler(); // switch to a new task
|
||||
}
|
||||
|
|
|
@ -91,69 +91,69 @@ void isrs_install(void)
|
|||
{
|
||||
int i;
|
||||
|
||||
idt_set_gate(0, (unsigned)isr0, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(0, (size_t)isr0, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(1, (unsigned)isr1, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(1, (size_t)isr1, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(2, (unsigned)isr2, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(2, (size_t)isr2, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(3, (unsigned)isr3, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(3, (size_t)isr3, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(4, (unsigned)isr4, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(4, (size_t)isr4, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(5, (unsigned)isr5, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(5, (size_t)isr5, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(6, (unsigned)isr6, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(6, (size_t)isr6, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(7, (unsigned)isr7, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(7, (size_t)isr7, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(8, (unsigned)isr8, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(8, (size_t)isr8, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(9, (unsigned)isr9, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(9, (size_t)isr9, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(10, (unsigned)isr10, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(10, (size_t)isr10, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(11, (unsigned)isr11, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(11, (size_t)isr11, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(12, (unsigned)isr12, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(12, (size_t)isr12, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(13, (unsigned)isr13, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(13, (size_t)isr13, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(14, (unsigned)isr14, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(14, (size_t)isr14, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(15, (unsigned)isr15, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(15, (size_t)isr15, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(16, (unsigned)isr16, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(16, (size_t)isr16, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(17, (unsigned)isr17, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(17, (size_t)isr17, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(18, (unsigned)isr18, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(18, (size_t)isr18, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(19, (unsigned)isr19, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(19, (size_t)isr19, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(20, (unsigned)isr20, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(20, (size_t)isr20, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(21, (unsigned)isr21, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(21, (size_t)isr21, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(22, (unsigned)isr22, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(22, (size_t)isr22, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(23, (unsigned)isr23, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(23, (size_t)isr23, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(24, (unsigned)isr24, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(24, (size_t)isr24, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(25, (unsigned)isr25, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(25, (size_t)isr25, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(26, (unsigned)isr26, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(26, (size_t)isr26, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(27, (unsigned)isr27, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(27, (size_t)isr27, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(28, (unsigned)isr28, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(28, (size_t)isr28, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(29, (unsigned)isr29, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(29, (size_t)isr29, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(30, (unsigned)isr30, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(30, (size_t)isr30, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
idt_set_gate(31, (unsigned)isr31, KERNEL_CODE_SELECTOR,
|
||||
idt_set_gate(31, (size_t)isr31, KERNEL_CODE_SELECTOR,
|
||||
IDT_FLAG_PRESENT|IDT_FLAG_RING0|IDT_FLAG_32BIT|IDT_FLAG_INTTRAP);
|
||||
|
||||
// install the default handler
|
||||
|
@ -172,7 +172,7 @@ static void fpu_init(union fpu_state* fpu)
|
|||
|
||||
memset(fx, 0, sizeof(union fpu_state));
|
||||
fx->cwd = 0x37f;
|
||||
if (has_xmm)
|
||||
if (has_xmm())
|
||||
fx->mxcsr = 0x1f80;
|
||||
} else {
|
||||
i387_fsave_t *fp = &fpu->fsave;
|
||||
|
@ -229,12 +229,13 @@ static void fault_handler(struct state *s)
|
|||
{
|
||||
if (s->int_no < 32) {
|
||||
kputs(exception_messages[s->int_no]);
|
||||
kprintf(" Exception. (%d)\n", s->int_no);
|
||||
kprintf(" Exception (%d) at 0x%x:0x%x on core %u, error code 0x%x, eflags 0x%x\n",
|
||||
s->int_no, s->cs, s->eip, CORE_ID, s->error, s->eflags);
|
||||
|
||||
/* Now, we signalize that we have handled the interrupt */
|
||||
if (apic_is_enabled())
|
||||
apic_eoi();
|
||||
else
|
||||
else
|
||||
outportb(0x20, 0x20);
|
||||
|
||||
irq_enable();
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <metalsvm/stdio.h>
|
||||
#include <metalsvm/time.h>
|
||||
#include <metalsvm/processor.h>
|
||||
#include <metalsvm/tasks.h>
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
#include <asm/RCCE_lib.h>
|
||||
#endif
|
||||
|
@ -104,6 +105,8 @@ void udelay(uint32_t usecs)
|
|||
mb();
|
||||
end = rdtsc();
|
||||
diff = end > start ? end - start : start - end;
|
||||
if ((diff < deadline) && (deadline - diff > 50000))
|
||||
check_workqueues();
|
||||
} while(diff < deadline);
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
* This will keep track of how many ticks the system
|
||||
* has been running for
|
||||
*/
|
||||
static volatile uint64_t timer_ticks = 0;
|
||||
static volatile uint64_t timer_ticks __attribute__ ((aligned (CACHE_LINE))) = 0;
|
||||
|
||||
uint64_t get_clock_tick(void)
|
||||
{
|
||||
|
@ -61,12 +61,17 @@ int sys_times(struct tms* buffer, clock_t* clock)
|
|||
static void timer_handler(struct state *s)
|
||||
{
|
||||
/* Increment our 'tick counter' */
|
||||
#if MAX_CORES > 1
|
||||
if (smp_id() == 0)
|
||||
timer_ticks++;
|
||||
#else
|
||||
timer_ticks++;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Every TIMER_FREQ clocks (approximately 1 second), we will
|
||||
* Every TIMER_FREQ clocks (approximately 1 second), we will
|
||||
* display a message on the screen
|
||||
*/
|
||||
*/
|
||||
/*if (timer_ticks % TIMER_FREQ == 0) {
|
||||
vga_puts("One second has passed\n");
|
||||
}*/
|
||||
|
@ -105,7 +110,7 @@ int timer_init(void)
|
|||
* Installs 'timer_handler' for the PIC and APIC timer,
|
||||
* only one handler will be later used.
|
||||
*/
|
||||
irq_install_handler(0+32, timer_handler);
|
||||
irq_install_handler(32, timer_handler);
|
||||
irq_install_handler(123, timer_handler);
|
||||
|
||||
/*
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <metalsvm/errno.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/multiboot.h>
|
||||
#include <asm/apic.h>
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
#include <asm/RCCE_lib.h>
|
||||
#include <asm/SCC_API.h>
|
||||
|
@ -264,7 +265,7 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
|
|||
size_t index, i;
|
||||
size_t ret;
|
||||
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd || !phyaddr, 0))
|
||||
if (BUILTIN_EXPECT(!task || !task->pgd, 0))
|
||||
return 0;
|
||||
|
||||
if (BUILTIN_EXPECT(!paging_enabled && (viraddr != phyaddr), 0))
|
||||
|
@ -667,6 +668,14 @@ int arch_paging_init(void)
|
|||
npages++;
|
||||
map_region((size_t)&kernel_start, (size_t)&kernel_start, npages, MAP_KERNEL_SPACE);
|
||||
|
||||
#if MAX_CORES > 1
|
||||
// Reserve page for smp boot code
|
||||
if (!map_region(SMP_SETUP_ADDR, SMP_SETUP_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE)) {
|
||||
kputs("could not reserve page for smp boot code\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VGA
|
||||
// map the video memory into the kernel space
|
||||
map_region(VIDEO_MEM_ADDR, VIDEO_MEM_ADDR, 1, MAP_KERNEL_SPACE|MAP_NO_CACHE);
|
||||
|
@ -752,6 +761,9 @@ int arch_paging_init(void)
|
|||
*/
|
||||
register_task(per_core(current_task));
|
||||
|
||||
// APIC registers into the kernel address space
|
||||
map_apic();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
C_source := icc.c SCC_API.c iRCCE_admin.c iRCCE_send.c iRCCE_isend.c iRCCE_irecv.c iRCCE_recv.c iRCCE_get.c iRCCE_put.c iRCCE_synch.c RCCE_malloc.c RCCE_shmalloc.c RCCE_debug.c RCCE_qsort.c RCCE_DCMflush.c RCCE_send.c RCCE_recv.c RCCE_flags.c RCCE_comm.c RCCE_put.c RCCE_get.c RCCE_synch.c RCCE_bcast.c RCCE_admin.c # RCCE_power_management.c
|
||||
C_source := icc.c SCC_API.c iRCCE_admin.c iRCCE_send.c iRCCE_isend.c iRCCE_irecv.c iRCCE_recv.c iRCCE_get.c iRCCE_put.c iRCCE_synch.c iRCCE_mailbox.c RCCE_malloc.c RCCE_shmalloc.c RCCE_debug.c RCCE_qsort.c RCCE_DCMflush.c RCCE_send.c RCCE_recv.c RCCE_flags.c RCCE_comm.c RCCE_put.c RCCE_get.c RCCE_synch.c RCCE_bcast.c RCCE_admin.c # RCCE_power_management.c
|
||||
ASM_source :=
|
||||
MODULE := arch_x86_scc
|
||||
|
||||
|
|
|
@ -47,6 +47,27 @@ iRCCE_SEND_REQUEST* iRCCE_isend_queue;
|
|||
// recv request queue
|
||||
iRCCE_RECV_REQUEST* iRCCE_irecv_queue[RCCE_MAXNP];
|
||||
|
||||
// recv request queue for those with source = iRCCE_ANY_SOURCE:
|
||||
iRCCE_RECV_REQUEST* iRCCE_irecv_any_source_queue;
|
||||
|
||||
// mailbox in MPB
|
||||
volatile iRCCE_MAIL_HEADER* iRCCE_mailbox_recv[RCCE_MAXNP]; // store addresses for receiving headers
|
||||
volatile iRCCE_MAIL_HEADER* iRCCE_mailbox_send[RCCE_MAXNP]; // store addresses for sending headeres
|
||||
|
||||
// mailbox recv queue
|
||||
iRCCE_MAIL_HEADER* iRCCE_mailbox_recv_queue[iRCCE_PRIOS];
|
||||
|
||||
// mail garbage queue
|
||||
iRCCE_MAIL_TRASH_BIN iRCCE_mail_garbage;
|
||||
|
||||
// flag indicating if last header was received
|
||||
iRCCE_SHORT_FLAG iRCCE_last_mail[RCCE_MAXNP];
|
||||
|
||||
// field to store open/closed status of mailboxes
|
||||
iRCCE_SHORT_FLAG iRCCE_mailbox_status[RCCE_MAXNP];
|
||||
|
||||
|
||||
|
||||
//--------------------------------------------------------------------------------------
|
||||
// FUNCTION: iRCCE_init
|
||||
//--------------------------------------------------------------------------------------
|
||||
|
@ -56,12 +77,55 @@ int iRCCE_init(void) {
|
|||
int i;
|
||||
|
||||
for(i=0; i<RCCE_MAXNP; i++) {
|
||||
iRCCE_irecv_queue[i] = NULL;
|
||||
iRCCE_irecv_queue[i] = NULL;
|
||||
iRCCE_mailbox_recv[i] = NULL;
|
||||
iRCCE_mailbox_send[i] = NULL;
|
||||
iRCCE_last_mail[i] = 0;
|
||||
iRCCE_mailbox_status[i] = iRCCE_MAILBOX_OPEN;
|
||||
}
|
||||
|
||||
iRCCE_isend_queue = NULL;
|
||||
iRCCE_irecv_any_source_queue = NULL;
|
||||
|
||||
// init trash bin for mailbox
|
||||
iRCCE_mail_garbage.first = NULL;
|
||||
iRCCE_mail_garbage.last = NULL;
|
||||
|
||||
// init mail-priority lists
|
||||
for( i=0; i<iRCCE_PRIOS; ++i ) {
|
||||
iRCCE_mailbox_recv_queue[i] = NULL;
|
||||
}
|
||||
// allocate space in MPB for mailbox and set senders mailbox-pointer
|
||||
for( i=0; i<RCCE_NP; i++ ) {
|
||||
iRCCE_mailbox_recv[i] = (iRCCE_MAIL_HEADER*)RCCE_malloc(RCCE_LINE_SIZE);
|
||||
|
||||
}
|
||||
|
||||
|
||||
for( i=0; i<RCCE_NP; i++ ) {
|
||||
iRCCE_mailbox_send[i] = (iRCCE_MAIL_HEADER*)(RCCE_comm_buffer[i] + ((RCCE_buff_ptr - RCCE_comm_buffer[RCCE_IAM]) - (RCCE_NP-RCCE_IAM)*RCCE_LINE_SIZE ));
|
||||
}
|
||||
|
||||
return (iRCCE_SUCCESS);
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------------
|
||||
// FUNCTION: iRCCE_finalize
|
||||
//--------------------------------------------------------------------------------------
|
||||
// finalize the library
|
||||
//--------------------------------------------------------------------------------------
|
||||
int iRCCE_finalize(void) {
|
||||
// empty iRCCE_mail_garbage
|
||||
iRCCE_MAIL_HEADER* run;
|
||||
iRCCE_MAIL_HEADER* erase_header;
|
||||
|
||||
for( run = iRCCE_mail_garbage.first; run != NULL; ) {
|
||||
erase_header = run;
|
||||
run = run->next;
|
||||
kfree( erase_header, sizeof(iRCCE_MAIL_HEADER) );
|
||||
}
|
||||
|
||||
iRCCE_mail_garbage.first = iRCCE_mail_garbage.last = NULL;
|
||||
return iRCCE_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -50,7 +50,9 @@ static int iRCCE_push_recv_request(iRCCE_RECV_REQUEST *request) {
|
|||
char padline[RCCE_LINE_SIZE]; // copy buffer, used if message not multiple of line size
|
||||
int test; // flag for calling iRCCE_test_flag()
|
||||
|
||||
if(request->finished) return(iRCCE_SUCCESS);
|
||||
if(request->finished) {
|
||||
return(iRCCE_SUCCESS);
|
||||
}
|
||||
|
||||
if(request->label == 1) goto label1;
|
||||
if(request->label == 2) goto label2;
|
||||
|
@ -167,6 +169,25 @@ static void iRCCE_init_recv_request(
|
|||
return;
|
||||
}
|
||||
|
||||
int iRCCE_irecv_search_source() {
|
||||
int i, j;
|
||||
int res =iRCCE_ANY_SOURCE;
|
||||
|
||||
for( i=0; i<RCCE_NP*3; ++i ){
|
||||
j =i%RCCE_NP;
|
||||
if ( j == RCCE_IAM )
|
||||
continue;
|
||||
|
||||
// only take source if recv-queue is empty
|
||||
if( (!iRCCE_irecv_queue[j]) && (RCCE_probe(RCCE_sent_flag[j])) ) {
|
||||
res = j;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
//--------------------------------------------------------------------------------------
|
||||
// FUNCTION: iRCCE_irecv
|
||||
|
@ -176,7 +197,48 @@ static void iRCCE_init_recv_request(
|
|||
static iRCCE_RECV_REQUEST blocking_irecv_request;
|
||||
int iRCCE_irecv(char *privbuf, size_t size, int source, iRCCE_RECV_REQUEST *request) {
|
||||
|
||||
if(request == NULL) request = &blocking_irecv_request;
|
||||
if(request == NULL){
|
||||
request = &blocking_irecv_request;
|
||||
|
||||
// find source (blocking)
|
||||
if( source == iRCCE_ANY_SOURCE ){
|
||||
int i, res;
|
||||
for( i=0;;i=(++i)%RCCE_NP ){
|
||||
if ( (!iRCCE_irecv_queue[i]) && (i != RCCE_IAM) && (res = RCCE_probe(RCCE_sent_flag[i])) ){
|
||||
source = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if( source == iRCCE_ANY_SOURCE ) {
|
||||
source = iRCCE_irecv_search_source(); // first try to find a source
|
||||
|
||||
if( source == iRCCE_ANY_SOURCE ){ // queue request if no source available
|
||||
|
||||
iRCCE_init_recv_request(privbuf, RCCE_buff_ptr, RCCE_chunk,
|
||||
&RCCE_ready_flag[RCCE_IAM], NULL,
|
||||
size, iRCCE_ANY_SOURCE, request);
|
||||
|
||||
// put anysource-request in irecv_any_source_queue
|
||||
if( iRCCE_irecv_any_source_queue == NULL ){
|
||||
iRCCE_irecv_any_source_queue = request;
|
||||
}
|
||||
else {
|
||||
if( iRCCE_irecv_any_source_queue->next == NULL ) {
|
||||
iRCCE_irecv_any_source_queue->next = request;
|
||||
}
|
||||
else {
|
||||
iRCCE_RECV_REQUEST* run = iRCCE_irecv_any_source_queue;
|
||||
while( run->next != NULL ) run = run->next;
|
||||
run->next = request;
|
||||
}
|
||||
}
|
||||
return iRCCE_RESERVED;
|
||||
}
|
||||
}
|
||||
|
||||
if (source<0 || source >= RCCE_NP)
|
||||
return(RCCE_error_return(RCCE_debug_comm,RCCE_ERROR_ID));
|
||||
|
@ -211,10 +273,10 @@ int iRCCE_irecv(char *privbuf, size_t size, int source, iRCCE_RECV_REQUEST *requ
|
|||
run->next = request;
|
||||
}
|
||||
|
||||
if(request == &blocking_irecv_request) {
|
||||
iRCCE_irecv_wait(request);
|
||||
return(iRCCE_SUCCESS);
|
||||
}
|
||||
if(request == &blocking_irecv_request) {
|
||||
iRCCE_irecv_wait(request);
|
||||
return(iRCCE_SUCCESS);
|
||||
}
|
||||
|
||||
return(iRCCE_RESERVED);
|
||||
}
|
||||
|
@ -243,29 +305,98 @@ int iRCCE_irecv_test(iRCCE_RECV_REQUEST *request, int *test) {
|
|||
}
|
||||
}
|
||||
|
||||
source = request->source;
|
||||
// does request still have no source?
|
||||
if( request->source == iRCCE_ANY_SOURCE ) {
|
||||
request->source = iRCCE_irecv_search_source();
|
||||
|
||||
if(request->finished) {
|
||||
if (test) (*test) = 1;
|
||||
return(iRCCE_SUCCESS);
|
||||
if( request->source == iRCCE_ANY_SOURCE ) {
|
||||
if (test) (*test) = 0;
|
||||
return iRCCE_RESERVED;
|
||||
}
|
||||
else { // take request out of wait_any_source-list
|
||||
|
||||
// find request in queue
|
||||
if( request == iRCCE_irecv_any_source_queue ) {
|
||||
iRCCE_irecv_any_source_queue = iRCCE_irecv_any_source_queue->next;
|
||||
}
|
||||
else {
|
||||
iRCCE_RECV_REQUEST* run = iRCCE_irecv_any_source_queue;
|
||||
while( run->next != request ) run = run->next;
|
||||
run->next = request->next;
|
||||
}
|
||||
|
||||
request->next = NULL;
|
||||
request->sent = &RCCE_sent_flag[request->source]; // set senders flag
|
||||
source = request->source;
|
||||
|
||||
// queue request in iRCCE_irecv_queue
|
||||
if(iRCCE_irecv_queue[source] == NULL) {
|
||||
|
||||
if(iRCCE_push_recv_request(request) == iRCCE_SUCCESS) {
|
||||
if (test) (*test) = 1;
|
||||
return(iRCCE_SUCCESS);
|
||||
}
|
||||
else {
|
||||
iRCCE_irecv_queue[source] = request;
|
||||
|
||||
if(request == &blocking_irecv_request) {
|
||||
iRCCE_irecv_wait(request);
|
||||
if (test) (*test) = 1;
|
||||
return(iRCCE_SUCCESS);
|
||||
}
|
||||
if (test) (*test) = 0;
|
||||
return(iRCCE_PENDING);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if(iRCCE_irecv_queue[source]->next == NULL) {
|
||||
iRCCE_irecv_queue[source]->next = request;
|
||||
}
|
||||
else {
|
||||
iRCCE_RECV_REQUEST *run = iRCCE_irecv_queue[source];
|
||||
while(run->next != NULL) run = run->next;
|
||||
run->next = request;
|
||||
}
|
||||
|
||||
if(request == &blocking_irecv_request) {
|
||||
iRCCE_irecv_wait(request);
|
||||
if (test) (*test) = 1;
|
||||
return(iRCCE_SUCCESS);
|
||||
}
|
||||
|
||||
if (test) (*test) = 1;
|
||||
return(iRCCE_RESERVED);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
||||
source = request->source;
|
||||
|
||||
if(request->finished) {
|
||||
if (test) (*test) = 1;
|
||||
return(iRCCE_SUCCESS);
|
||||
}
|
||||
|
||||
if(iRCCE_irecv_queue[source] != request) {
|
||||
if (test) (*test) = 0;
|
||||
return(iRCCE_RESERVED);
|
||||
}
|
||||
|
||||
iRCCE_push_recv_request(request);
|
||||
|
||||
if(request->finished) {
|
||||
iRCCE_irecv_queue[source] = request->next;
|
||||
|
||||
if (test) (*test) = 1;
|
||||
return(iRCCE_SUCCESS);
|
||||
}
|
||||
|
||||
if(iRCCE_irecv_queue[source] != request) {
|
||||
if (test) (*test) = 0;
|
||||
return(iRCCE_RESERVED);
|
||||
return(iRCCE_PENDING);
|
||||
}
|
||||
|
||||
iRCCE_push_recv_request(request);
|
||||
|
||||
if(request->finished) {
|
||||
iRCCE_irecv_queue[source] = request->next;
|
||||
|
||||
if (test) (*test) = 1;
|
||||
return(iRCCE_SUCCESS);
|
||||
}
|
||||
|
||||
if (test) (*test) = 0;
|
||||
return(iRCCE_PENDING);
|
||||
}
|
||||
|
||||
|
||||
|
@ -297,6 +428,40 @@ static int iRCCE_irecv_push_source(int source) {
|
|||
}
|
||||
|
||||
int iRCCE_irecv_push(void) {
|
||||
iRCCE_RECV_REQUEST* help_request;
|
||||
|
||||
// first check sourceless requests
|
||||
if( iRCCE_irecv_any_source_queue != NULL) {
|
||||
while( iRCCE_irecv_any_source_queue != NULL ) {
|
||||
iRCCE_irecv_any_source_queue->source = iRCCE_irecv_search_source();
|
||||
|
||||
if( iRCCE_irecv_any_source_queue->source == iRCCE_ANY_SOURCE ) {
|
||||
|
||||
break;
|
||||
}
|
||||
// source found for first request in iRCCE_irecv_any_source_queue
|
||||
else {
|
||||
// set senders flag
|
||||
iRCCE_irecv_any_source_queue->sent = &RCCE_sent_flag[iRCCE_irecv_any_source_queue->source];
|
||||
|
||||
// take request out of irecv_any_source_queue
|
||||
help_request = iRCCE_irecv_any_source_queue;
|
||||
iRCCE_irecv_any_source_queue = iRCCE_irecv_any_source_queue->next;
|
||||
help_request->next = NULL;
|
||||
|
||||
// put request into irecv_queue
|
||||
if(iRCCE_irecv_queue[help_request->source] == NULL) {
|
||||
iRCCE_irecv_queue[help_request->source] = help_request;
|
||||
}
|
||||
else {
|
||||
iRCCE_RECV_REQUEST *run = iRCCE_irecv_queue[help_request->source];
|
||||
while(run->next != NULL) run = run->next;
|
||||
run->next = help_request;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
int i, j;
|
||||
int retval = iRCCE_SUCCESS;
|
||||
|
@ -310,7 +475,7 @@ int iRCCE_irecv_push(void) {
|
|||
}
|
||||
}
|
||||
|
||||
return retval;
|
||||
return (iRCCE_irecv_any_source_queue == NULL)? retval : iRCCE_RESERVED;
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------------
|
||||
|
@ -352,6 +517,24 @@ int iRCCE_irecv_cancel(iRCCE_RECV_REQUEST *request, int *test) {
|
|||
return iRCCE_NOT_ENQUEUED;
|
||||
}
|
||||
|
||||
|
||||
// does request have any source specified?
|
||||
if( request->source == iRCCE_ANY_SOURCE ) {
|
||||
for( run = iRCCE_irecv_any_source_queue; run->next != NULL; run = run->next ) {
|
||||
if( run->next == request ) {
|
||||
run->next = run->next->next;
|
||||
|
||||
if (test) (*test) = 1;
|
||||
return iRCCE_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
if (test) (*test) = 0;
|
||||
return iRCCE_NOT_ENQUEUED;
|
||||
}
|
||||
|
||||
|
||||
|
||||
source = request->source;
|
||||
|
||||
if(iRCCE_irecv_queue[source] == NULL) {
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
//***************************************************************************************
|
||||
//******************************************************************************
|
||||
// Non-blocking send routines.
|
||||
//***************************************************************************************
|
||||
//******************************************************************************
|
||||
//
|
||||
// Author: Rob F. Van der Wijngaart
|
||||
// Intel Corporation
|
||||
// Date: 008/30/2010
|
||||
//
|
||||
//***************************************************************************************
|
||||
//******************************************************************************
|
||||
//
|
||||
// Copyright 2010 Intel Corporation
|
||||
//
|
||||
|
@ -34,6 +34,8 @@
|
|||
// [2010-12-09] added cancel functions for non-blocking send/recv requests
|
||||
// by Carsten Clauss
|
||||
//
|
||||
// [2011-06-29] added the support of using IPIs
|
||||
// by Simon Pickartz, Stefan Lankes
|
||||
|
||||
#include <metalsvm/stdlib.h>
|
||||
#include <metalsvm/string.h>
|
||||
|
@ -41,6 +43,8 @@
|
|||
#ifdef CONFIG_ROCKCREEK
|
||||
|
||||
#include <asm/iRCCE_lib.h>
|
||||
#include <asm/SCC_API.h>
|
||||
#include <asm/icc.h>
|
||||
|
||||
static int iRCCE_push_send_request(iRCCE_SEND_REQUEST *request) {
|
||||
|
||||
|
@ -158,56 +162,121 @@ static void iRCCE_init_send_request(
|
|||
}
|
||||
|
||||
//--------------------------------------------------------------------------------------
|
||||
// FUNCTION: iRCCE_isend
|
||||
// FUNCTION: iRCCE_isend_general
|
||||
//--------------------------------------------------------------------------------------
|
||||
// non-blocking send function; returns a handle of type iRCCE_SEND_REQUEST
|
||||
//--------------------------------------------------------------------------------------
|
||||
static iRCCE_SEND_REQUEST blocking_isend_request;
|
||||
int iRCCE_isend(char *privbuf, size_t size, int dest, iRCCE_SEND_REQUEST *request) {
|
||||
static int iRCCE_isend_general(char *privbuf, size_t size, int dest, iRCCE_SEND_REQUEST *request) {
|
||||
|
||||
if(request == NULL) request = &blocking_isend_request;
|
||||
|
||||
if (dest<0 || dest >= RCCE_NP)
|
||||
return(RCCE_error_return(RCCE_debug_comm,RCCE_ERROR_ID));
|
||||
else {
|
||||
iRCCE_init_send_request(privbuf, RCCE_buff_ptr, RCCE_chunk,
|
||||
&RCCE_ready_flag[dest], &RCCE_sent_flag[RCCE_IAM],
|
||||
size, dest, request);
|
||||
|
||||
if(iRCCE_isend_queue == NULL) {
|
||||
iRCCE_init_send_request(privbuf, RCCE_buff_ptr, RCCE_chunk,
|
||||
&RCCE_ready_flag[dest], &RCCE_sent_flag[RCCE_IAM],
|
||||
size, dest, request);
|
||||
|
||||
if(iRCCE_push_send_request(request) == iRCCE_SUCCESS) {
|
||||
return(iRCCE_SUCCESS);
|
||||
}
|
||||
else {
|
||||
iRCCE_isend_queue = request;
|
||||
if(iRCCE_isend_queue == NULL) {
|
||||
|
||||
if(request == &blocking_isend_request) {
|
||||
iRCCE_isend_wait(request);
|
||||
return(iRCCE_SUCCESS);
|
||||
}
|
||||
|
||||
return(iRCCE_PENDING);
|
||||
}
|
||||
if(iRCCE_push_send_request(request) == iRCCE_SUCCESS) {
|
||||
return(iRCCE_SUCCESS);
|
||||
}
|
||||
else {
|
||||
if(iRCCE_isend_queue->next == NULL) {
|
||||
iRCCE_isend_queue->next = request;
|
||||
}
|
||||
else {
|
||||
iRCCE_SEND_REQUEST *run = iRCCE_isend_queue;
|
||||
while(run->next != NULL) run = run->next;
|
||||
run->next = request;
|
||||
}
|
||||
iRCCE_isend_queue = request;
|
||||
|
||||
if(request == &blocking_isend_request) {
|
||||
iRCCE_isend_wait(request);
|
||||
return(iRCCE_SUCCESS);
|
||||
}
|
||||
|
||||
return(iRCCE_RESERVED);
|
||||
return(iRCCE_PENDING);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if(iRCCE_isend_queue->next == NULL) {
|
||||
iRCCE_isend_queue->next = request;
|
||||
}
|
||||
else {
|
||||
iRCCE_SEND_REQUEST *run = iRCCE_isend_queue;
|
||||
while(run->next != NULL) run = run->next;
|
||||
run->next = request;
|
||||
}
|
||||
|
||||
if(request == &blocking_isend_request) {
|
||||
iRCCE_isend_wait(request);
|
||||
return(iRCCE_SUCCESS);
|
||||
}
|
||||
|
||||
return(iRCCE_RESERVED);
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// FUNCTION: iRCCE_isend
|
||||
//------------------------------------------------------------------------------
|
||||
// wrapper function to differentiate between anylength and normal call
|
||||
//------------------------------------------------------------------------------
|
||||
static iRCCE_SEND_REQUEST blocking_isend_request;
|
||||
int iRCCE_isend(
|
||||
char *privbuf,
|
||||
ssize_t size,
|
||||
int dest,
|
||||
iRCCE_SEND_REQUEST *request
|
||||
) {
|
||||
if(request == NULL) request = &blocking_isend_request;
|
||||
|
||||
if (dest<0 || dest >= RCCE_NP) {
|
||||
return(RCCE_error_return(RCCE_debug_comm,RCCE_ERROR_ID));
|
||||
}
|
||||
else {
|
||||
// anylength call
|
||||
if( size < 0 ){
|
||||
// convert size to positive range */
|
||||
int send_size = -size;
|
||||
|
||||
// use header payload
|
||||
if( send_size <= iRCCE_MAIL_HEADER_PAYLOAD ) {
|
||||
iRCCE_init_send_request(privbuf, RCCE_buff_ptr,
|
||||
RCCE_chunk, &RCCE_ready_flag[dest],
|
||||
&RCCE_sent_flag[RCCE_IAM],
|
||||
send_size, dest, request);
|
||||
request->finished = 1;
|
||||
|
||||
iRCCE_mail_send( send_size,
|
||||
iRCCE_ANYLENGTH_PIGGYBACK,
|
||||
0, privbuf, dest );
|
||||
NOP8;
|
||||
NOP8;
|
||||
icc_send_irq( dest );
|
||||
return iRCCE_SUCCESS;
|
||||
}
|
||||
// we need an extra isend-call
|
||||
else {
|
||||
iRCCE_mail_send( send_size, iRCCE_ANYLENGTH,
|
||||
0, NULL, dest );
|
||||
NOP8;
|
||||
NOP8;
|
||||
icc_send_irq( dest );
|
||||
return iRCCE_isend_general( privbuf, send_size,
|
||||
dest, request );
|
||||
}
|
||||
}
|
||||
// normal call
|
||||
else if( size > 0 ) {
|
||||
return iRCCE_isend_general( privbuf, size,
|
||||
dest, request );
|
||||
}
|
||||
// do nothing
|
||||
else {
|
||||
iRCCE_init_send_request(privbuf, RCCE_buff_ptr,
|
||||
RCCE_chunk, &RCCE_ready_flag[dest],
|
||||
&RCCE_sent_flag[RCCE_IAM],
|
||||
size, dest, request);
|
||||
request->finished = 1;
|
||||
return(iRCCE_SUCCESS);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------------
|
||||
|
|
482
arch/x86/scc/iRCCE_mailbox.c
Normal file
482
arch/x86/scc/iRCCE_mailbox.c
Normal file
|
@ -0,0 +1,482 @@
|
|||
/*
|
||||
* Copyright 2011 Simon Pickartz, Chair for Operating Systems,
|
||||
* RWTH Aachen University
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* This file is part of MetalSVM.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Mailbox system
|
||||
*
|
||||
* [2011-05-08] implemented mailbox send/recv routines
|
||||
* by Simon Pickartz, Chair for Operating Systems,
|
||||
* RWTH Aachen University
|
||||
*/
|
||||
|
||||
#include <metalsvm/stdlib.h>
|
||||
#include <metalsvm/string.h>
|
||||
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
#include <asm/iRCCE_lib.h>
|
||||
|
||||
/**
|
||||
*
|
||||
* @file contains implementation of the mailbox system
|
||||
* @author Simon Pickartz
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
// forward declaration
|
||||
static int iRCCE_mailbox_close_one(int rank, int check);
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// FUNCTION: iRCCE_mailbox_print_header
|
||||
//------------------------------------------------------------------------------
|
||||
// routine for printing given header (debugging purpose)
|
||||
//------------------------------------------------------------------------------
|
||||
/**
|
||||
* @brief routine for printing a given header
|
||||
* @param header is a pointer to a given iRCCE_MAIL_HEADER structure
|
||||
*/
|
||||
|
||||
void iRCCE_mailbox_print_header(iRCCE_MAIL_HEADER* header) {
|
||||
|
||||
kprintf( "\n"
|
||||
"-------------------------\n"
|
||||
"| RCK%d\n"
|
||||
"-------------------------\n"
|
||||
"| Sender\t: %d\t\n"
|
||||
"| Size\t\t: %d\t\n"
|
||||
"| Tag\t\t: %d\t\n"
|
||||
"| Prio\t\t: %d\t\n"
|
||||
"| Payload\t: %s\n"
|
||||
"-------------------------\n\n",
|
||||
RCCE_IAM, header->source,
|
||||
header->size, header->tag,
|
||||
header->prio, header->payload);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// FUNCTION: iRCCE_mail_fetch
|
||||
//------------------------------------------------------------------------------
|
||||
/**
|
||||
* @brief routine to check for new mail in a given mailbox
|
||||
* @param rank is the ID of the ranks mailbox to be emptied
|
||||
*
|
||||
* The function checks if the mailbox has new mail for a given rank. In case of
|
||||
* new mail it needs memory for the received header. Either there is memory
|
||||
* in the internal garbage collection or it has to allocated. The next step is
|
||||
* to check wheter a last-mail was received or a normal one. A last-mail is
|
||||
* indicated by the iRCCE_LAST_MAIL tag. A last-mail entails the appropriate
|
||||
* flag in the iRCCE_last_mail array to be set. Otherwise the header has to be
|
||||
* enqueued in the mailbox_recv_queue accordingly to the priority field.
|
||||
*/
|
||||
//------------------------------------------------------------------------------
|
||||
static int iRCCE_mail_fetch(
|
||||
int rank // rank from whom to empty mailbox
|
||||
) {
|
||||
|
||||
iRCCE_MAIL_HEADER* header;
|
||||
|
||||
// check for memory in garbage collection or allocate new
|
||||
if( iRCCE_mail_garbage.first ) {
|
||||
header = iRCCE_mail_garbage.first;
|
||||
iRCCE_mail_garbage.first =
|
||||
iRCCE_mail_garbage.first->next;
|
||||
|
||||
header->next = NULL;
|
||||
if( iRCCE_mail_garbage.first == NULL ) {
|
||||
iRCCE_mail_garbage.last = NULL;
|
||||
}
|
||||
}
|
||||
else {
|
||||
header = (iRCCE_MAIL_HEADER*)kmalloc(sizeof(iRCCE_MAIL_HEADER));
|
||||
}
|
||||
|
||||
|
||||
// copy header to allocated memory
|
||||
RC_cache_invalidate();
|
||||
iRCCE_memcpy_get( (void*)header, (void*)iRCCE_mailbox_recv[rank],
|
||||
RCCE_LINE_SIZE );
|
||||
|
||||
// check if received a last-mail
|
||||
if( header->tag == iRCCE_LAST_MAIL ) {
|
||||
iRCCE_last_mail[rank] = 1;
|
||||
iRCCE_mailbox_close_one( rank, 0 ); // we can close respective mailbox
|
||||
iRCCE_mail_release( &header );
|
||||
}
|
||||
else {
|
||||
// check mail priority
|
||||
int prio = header->prio;
|
||||
|
||||
// enqueue accordingly
|
||||
if( iRCCE_mailbox_recv_queue[prio] == NULL ) {
|
||||
iRCCE_mailbox_recv_queue[prio] = header;
|
||||
}
|
||||
else {
|
||||
iRCCE_MAIL_HEADER* run = iRCCE_mailbox_recv_queue[prio];
|
||||
while( run->next != NULL ) run = run->next;
|
||||
run->next = header;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return iRCCE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// FUNCTION: iRCCE_mailbox_check
|
||||
//------------------------------------------------------------------------------
|
||||
/**
|
||||
* @brief routine to check for new mail in mailboxes
|
||||
*
|
||||
* This function has to be called from time to time. It empties all mailboxes of
|
||||
* the participating cores if the corresponding sent-flag is set and the mailbox
|
||||
* is not closed. After calling iRCCE_mail_fetch the sent-flag has to be reset.
|
||||
* Here we have to use a little trick because we can only write to the MPB in
|
||||
* cacheline granularity. We set the appropriate flag to zero and afterwords
|
||||
* touch the MPB on another cacheline. That causes the write combine buffer to
|
||||
* write out the data.
|
||||
*/
|
||||
//------------------------------------------------------------------------------
|
||||
static int iRCCE_mailbox_check() {
|
||||
int i;
|
||||
|
||||
for( i=0; i<RCCE_NP; ++i ) {
|
||||
if( i == RCCE_IAM ) continue;
|
||||
// only check open mailboxes
|
||||
if( iRCCE_mailbox_status[i] == iRCCE_MAILBOX_OPEN ) {
|
||||
|
||||
RC_cache_invalidate();
|
||||
if( iRCCE_mailbox_recv[i]->sent ) {
|
||||
iRCCE_mail_fetch(i);
|
||||
|
||||
// reset senders flag
|
||||
RC_cache_invalidate();
|
||||
iRCCE_mailbox_recv[i]->sent = RCCE_FLAG_UNSET;
|
||||
*(int *)RCCE_fool_write_combine_buffer = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return iRCCE_SUCCESS;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// FUNCTION: iRCCE_mail_recv
|
||||
//------------------------------------------------------------------------------
|
||||
/**
|
||||
* @brief routine for fetching received headers out of iRCCE_mailbox_recv_queue
|
||||
* @param header is the address of a pointer to an iRCCE_MAIL_HEADER structure
|
||||
* @return iRCCE_SUCCESS if there was new mail; iRCCE_MAILBOX_EMPTY else
|
||||
* @todo implement fairer dequeue mechanism
|
||||
*
|
||||
* The function checks if the receive queue with highest priority (priority 0)
|
||||
* contains any mail headers. In this case we pop the first element of that list
|
||||
* in a FIFO maner. Otherwise iRCCE_mailbox_check() has to be called. Afterwards
|
||||
* the first element of a non-empty receive queue with highest priority is
|
||||
* returned.
|
||||
*/
|
||||
//------------------------------------------------------------------------------
|
||||
int iRCCE_mail_recv(
|
||||
iRCCE_MAIL_HEADER** header // pointer to incoming header
|
||||
) { // (memory allocated by iRCCE)
|
||||
|
||||
int i;
|
||||
|
||||
// if there is no mail, check for incoming
|
||||
if ( !iRCCE_mailbox_recv_queue[0] ) {
|
||||
iRCCE_mailbox_check();
|
||||
}
|
||||
|
||||
// check priority queues
|
||||
for( i=0; i<iRCCE_PRIOS; ++i ) {
|
||||
if ( iRCCE_mailbox_recv_queue[i] ) {
|
||||
iRCCE_MAIL_HEADER* help_header =
|
||||
iRCCE_mailbox_recv_queue[i];
|
||||
iRCCE_mailbox_recv_queue[i] =
|
||||
iRCCE_mailbox_recv_queue[i]->next;
|
||||
help_header->next = NULL;
|
||||
|
||||
*header = help_header;
|
||||
return iRCCE_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
// no mail queued
|
||||
*header = NULL;
|
||||
return iRCCE_MAILBOX_EMPTY;
|
||||
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// FUNCTION: iRCCE_mail_release
|
||||
//------------------------------------------------------------------------------
|
||||
/**
|
||||
* @brief routine to store released header by user in garbage collection
|
||||
* @param header is the address of a pointer to an iRCCE_MAIL_HEADER structure
|
||||
* @return iRCCE_SUCCESS in any case
|
||||
*
|
||||
* This function enqueus a pointer to memory for an iRCCE_MAIL_HEADER structure
|
||||
* that is not used by the user program anymore. 'header' points to NULL by
|
||||
* return of the function.
|
||||
*/
|
||||
//------------------------------------------------------------------------------
|
||||
int iRCCE_mail_release(
|
||||
iRCCE_MAIL_HEADER** header
|
||||
) {
|
||||
// put header in garbage collection
|
||||
if( (iRCCE_mail_garbage.first == NULL)
|
||||
&& (iRCCE_mail_garbage.last == NULL ) ) {
|
||||
|
||||
iRCCE_mail_garbage.first = *header;
|
||||
iRCCE_mail_garbage.last = *header;
|
||||
}
|
||||
else {
|
||||
iRCCE_mail_garbage.last->next = *header;
|
||||
iRCCE_mail_garbage.last = *header;
|
||||
}
|
||||
iRCCE_mail_garbage.last->next = NULL;
|
||||
|
||||
// reset header
|
||||
*header = NULL;
|
||||
return iRCCE_SUCCESS;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// FUNCTION: iRCCE_mail_send
|
||||
//------------------------------------------------------------------------------
|
||||
/**
|
||||
* @brief routine to send a mail to a given destination (blocking)
|
||||
* @param size is the size of the following message. This message may be
|
||||
* contained in the payload or send by an isend()-call
|
||||
* @param tag is an integer value to distinguish between different mail types
|
||||
* @param prio indicates the priority of the mail. 0 is the highest priority
|
||||
* whereas 4 is the lowest.
|
||||
* @param payload is a pointer to byte array with a size of
|
||||
* iRCCE_MAIL_HEADER_PAYLOAD. If NULL is passed nothing is done, otherwise array
|
||||
* pointed by payload is copied into the header.
|
||||
* @param dest indicates the destination of the mail in terms of the ID of
|
||||
* one of the participating ranks
|
||||
* @return iRCCE_SUCCESS if send was successful. If target mailbox is closed
|
||||
* iRCCE_MAILBOX_CLOESD is returned.
|
||||
*
|
||||
* First it has to be checked if the target mailbox still contains an unread mail.
|
||||
* If this is the case there is time to empty the own mailboxes. It blocks until
|
||||
* the receiver has emptied its mailbox. The next step is to acquire the lock
|
||||
* for the target mailbox to be sure that the mailbox is not closed by the
|
||||
* receiver while the mail is delivered. After locking the mailbox an
|
||||
* iRCCE_MAIL_HEADER is generated according with the parameters (but with a
|
||||
* sent-flag set to zero)and is copied into the target mailbox. After all data
|
||||
* beeing written the appropropriate sent-flag has to be set with the same trick
|
||||
* already used in iRCCE_mail_check(). Now the lock can be released.
|
||||
*/
|
||||
//------------------------------------------------------------------------------
|
||||
int iRCCE_mail_send(
|
||||
size_t size, // size of following message expected to be send/received
|
||||
int tag, // tag to indicate message type
|
||||
char prio, // mail priority
|
||||
char* payload, // pointer to buffer for header payload
|
||||
int dest // UE that will receive the header
|
||||
) {
|
||||
|
||||
// if dest mailbox is full, check for incoming mail
|
||||
RC_cache_invalidate();
|
||||
while( iRCCE_mailbox_send[dest]->sent ) {
|
||||
iRCCE_mailbox_check();
|
||||
RC_cache_invalidate();
|
||||
}
|
||||
|
||||
// check if mailbox is closed
|
||||
RCCE_acquire_lock( dest );
|
||||
RC_cache_invalidate();
|
||||
if( iRCCE_mailbox_send[dest]->closed ) {
|
||||
RCCE_release_lock( dest );
|
||||
return iRCCE_MAILBOX_CLOSED;
|
||||
}
|
||||
|
||||
// prepare header
|
||||
iRCCE_MAIL_HEADER header = { RCCE_IAM, size, tag, NULL, prio,
|
||||
RCCE_FLAG_UNSET, RCCE_FLAG_UNSET,
|
||||
{[0 ... iRCCE_MAIL_HEADER_PAYLOAD-1] = 0} };
|
||||
|
||||
// payload within the header?
|
||||
if( payload ) {
|
||||
memcpy( header.payload, payload, iRCCE_MAIL_HEADER_PAYLOAD );
|
||||
}
|
||||
|
||||
// do the actual copy to MPB
|
||||
RC_cache_invalidate();
|
||||
iRCCE_memcpy_put( (void*)iRCCE_mailbox_send[dest],
|
||||
(void*)&header, RCCE_LINE_SIZE );
|
||||
|
||||
// set senders flag
|
||||
RC_cache_invalidate();
|
||||
iRCCE_mailbox_send[dest]->sent = RCCE_FLAG_SET;
|
||||
*(int *)RCCE_fool_write_combine_buffer = 1;
|
||||
RC_cache_invalidate();
|
||||
|
||||
RCCE_release_lock( dest );
|
||||
|
||||
return iRCCE_SUCCESS;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// FUNCTION: iRCCE_last_mail_recv
|
||||
//------------------------------------------------------------------------------
|
||||
/**
|
||||
* @brief check if all final headers are received from all UEs
|
||||
* @return iRCCE_SUCCES if all last-mails arrive iRCCE_LAST_MAILS_NOT_RECV
|
||||
* otherwise
|
||||
*
|
||||
* This functions is used to determine if all last-mails arrived at the calling
|
||||
* UE. Therefore it checks the iRCCE_last_mail array if all flags are set.
|
||||
*/
|
||||
//------------------------------------------------------------------------------
|
||||
int iRCCE_last_mail_recv(void) {
|
||||
int i;
|
||||
int res = iRCCE_SUCCESS;
|
||||
|
||||
for( i=0; i<RCCE_NP; ++i ) {
|
||||
if( iRCCE_last_mail[i] == 0 ) {
|
||||
res = iRCCE_LAST_MAILS_NOT_RECV;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// FUNCTION: iRCCE_mailbox_wait
|
||||
//------------------------------------------------------------------------------
|
||||
/**
|
||||
* @brief wait for all last-mails to be received
|
||||
* @return iRCCE_SUCCESS
|
||||
*
|
||||
* This functions blocks in a loop calling continously iRCCE_last_mail_recv()
|
||||
* until its return value is iRCCE_SUCCESS what implicates that all last-mails
|
||||
* of the participating UEs arrived at the calling UE.
|
||||
* This function is used to shut down the mailbox environment.
|
||||
*/
|
||||
//------------------------------------------------------------------------------
|
||||
int iRCCE_mailbox_wait(void) {
|
||||
while( iRCCE_last_mail_recv() == iRCCE_LAST_MAILS_NOT_RECV ) {
|
||||
iRCCE_mailbox_check();
|
||||
}
|
||||
|
||||
return iRCCE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// FUNCTION: iRCCE_mailbox_flush
|
||||
//------------------------------------------------------------------------------
|
||||
/**
|
||||
* @brief dequeue all iRCCE_mailbox_recv_queue elements and free memory
|
||||
* @return iRCCE_SUCCESS
|
||||
*
|
||||
* This function empties all iRCCE_mailbox_recv_queues whith no regard to their
|
||||
* content. This function is used to shut down the mailbox environment.
|
||||
*/
|
||||
//------------------------------------------------------------------------------
|
||||
int iRCCE_mailbox_flush(void) {
|
||||
int i;
|
||||
|
||||
for( i=0; i<iRCCE_PRIOS; ++i ) {
|
||||
iRCCE_MAIL_HEADER* erase_header = iRCCE_mailbox_recv_queue[i];
|
||||
|
||||
while( erase_header != NULL ) {
|
||||
iRCCE_mailbox_recv_queue[i] = iRCCE_mailbox_recv_queue[i]->next;
|
||||
kfree( erase_header, sizeof(iRCCE_MAIL_HEADER) );
|
||||
erase_header = iRCCE_mailbox_recv_queue[i];
|
||||
}
|
||||
}
|
||||
return iRCCE_SUCCESS;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// FUNCTION: iRCCE_mailbox_close_one
|
||||
//------------------------------------------------------------------------------
|
||||
/**
|
||||
* @brief routine to close one mailbox
|
||||
* @return iRCCE_SUCCESS
|
||||
* @param rank is the ID of the ranks mailbox to be closed
|
||||
* @param check is a flag indicating wether the mailbox has to be emptied before
|
||||
* closing or not. This is required for a close-call as a result of a received
|
||||
* last-mail.
|
||||
*
|
||||
* This function closes a mailbox of the given rank. If the check flag is set
|
||||
* an iRCCE_mail_check()-call is performed. The close procedure has to be locked
|
||||
* to be sure that no UE sends any mail while closing the mailbox.
|
||||
*/
|
||||
//------------------------------------------------------------------------------
|
||||
static int iRCCE_mailbox_close_one(int rank, int check) {
|
||||
RCCE_acquire_lock( RCCE_IAM );
|
||||
|
||||
// check if it contains new mail
|
||||
RC_cache_invalidate();
|
||||
if( check && iRCCE_mailbox_recv[rank]->sent ) {
|
||||
iRCCE_mail_fetch(rank);
|
||||
|
||||
}
|
||||
|
||||
// close mailbox
|
||||
iRCCE_MAIL_HEADER help_header = { 0, 0, 0, NULL, 0, RCCE_FLAG_UNSET,
|
||||
RCCE_FLAG_SET, {[0 ... iRCCE_MAIL_HEADER_PAYLOAD-1] = 0} };
|
||||
|
||||
RC_cache_invalidate();
|
||||
iRCCE_memcpy_put( (void*)iRCCE_mailbox_recv[rank],
|
||||
&help_header, RCCE_LINE_SIZE );
|
||||
|
||||
RCCE_release_lock( RCCE_IAM );
|
||||
|
||||
iRCCE_mailbox_status[rank] = iRCCE_MAILBOX_CLOSED;
|
||||
|
||||
return iRCCE_SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
// FUNCTION: iRCCE_mailbox_close()
|
||||
//------------------------------------------------------------------------------
|
||||
/**
|
||||
* @brief routine to close one or all mailboxes
|
||||
* @param rank is the ID of the UEs mailbox to be closed if iRCCE_MAILBOX_ALL
|
||||
* is passed all mailboxes are closed by the calling UE
|
||||
* @return iRCCE_SUCCESS
|
||||
*
|
||||
* This functions closed one or all mailboxes of the calling UE. This is done by
|
||||
* calling iRCCE_mailbox_close_one for one or all mailboxes.
|
||||
*/
|
||||
//------------------------------------------------------------------------------
|
||||
int iRCCE_mailbox_close(int rank) {
|
||||
if( rank == iRCCE_MAILBOX_ALL ) {
|
||||
int i;
|
||||
for( i=0; i<RCCE_NP; ++i ) {
|
||||
iRCCE_mailbox_close_one( i, 1 );
|
||||
}
|
||||
}
|
||||
else {
|
||||
iRCCE_mailbox_close_one( rank, 1 );
|
||||
}
|
||||
|
||||
return iRCCE_SUCCESS;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -177,6 +177,18 @@ int iRCCE_recv(char *privbuf, size_t size, int source) {
|
|||
iRCCE_isend_push();
|
||||
}
|
||||
|
||||
|
||||
// determine source of request if given source = iRCCE_ANY_SOURCE
|
||||
if ( source==iRCCE_ANY_SOURCE ){
|
||||
int i, res;
|
||||
for( i=0;;i=(++i)%RCCE_NP ){
|
||||
if ( (i != RCCE_IAM) && (res = RCCE_probe(RCCE_sent_flag[i])) ){
|
||||
source = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (source<0 || source >= RCCE_NP)
|
||||
return(RCCE_error_return(RCCE_debug_comm,RCCE_ERROR_ID));
|
||||
else {
|
||||
|
|
|
@ -82,7 +82,9 @@ static void intr_handler(struct state *s)
|
|||
// reset appropriate bit in the core configuration register
|
||||
int tmp, z;
|
||||
|
||||
#ifdef CONFIG_LWIP
|
||||
mmnif_irqhandler();
|
||||
#endif
|
||||
|
||||
z = Z_PID(RC_COREID[my_ue]);
|
||||
tmp=ReadConfigReg(CRB_OWN + (z==0 ? GLCFG0 : GLCFG1));
|
||||
|
@ -172,7 +174,7 @@ int icc_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
inline int icc_send_irq(int ue)
|
||||
int icc_send_irq(int ue)
|
||||
{
|
||||
int tmp, x, y, z, addr;
|
||||
|
||||
|
@ -195,123 +197,103 @@ inline int icc_send_irq(int ue)
|
|||
int icc_halt(void)
|
||||
{
|
||||
uint32_t flags;
|
||||
#if 0
|
||||
uint32_t do_send = 1;
|
||||
|
||||
do {
|
||||
// iRCCE is not thread save => disable interrupts
|
||||
flags = irq_nested_disable();
|
||||
|
||||
if (do_send)
|
||||
if (do_send) {
|
||||
do_send = (iRCCE_isend_push() == iRCCE_PENDING);
|
||||
icc_check();
|
||||
iRCCE_irecv_push();
|
||||
}
|
||||
icc_mail_check();
|
||||
|
||||
irq_nested_enable(flags);
|
||||
} while(do_send);
|
||||
|
||||
HALT;
|
||||
NOP1;
|
||||
} while(do_send);
|
||||
#else
|
||||
// iRCCE is not thread save => disable interrupts
|
||||
flags = irq_nested_disable();
|
||||
icc_mail_check();
|
||||
irq_nested_enable(flags);
|
||||
NOP1;
|
||||
#endif
|
||||
//HALT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static volatile uint64_t ping_start = 0;
|
||||
static icc_header_t ping_request = {ICC_TYPE_PINGREQUEST, 0, 0};
|
||||
static icc_header_t ping_response = {ICC_TYPE_PINGRESPONSE, 0, 0};
|
||||
|
||||
int icc_ping(int ue)
|
||||
{
|
||||
int icc_mail_ping( void )
|
||||
{
|
||||
uint32_t flags;
|
||||
int remote_rank = (my_ue+1)%2;
|
||||
uint8_t payload[iRCCE_MAIL_HEADER_PAYLOAD];
|
||||
uint64_t* timer = (uint64_t*) payload;
|
||||
|
||||
if (BUILTIN_EXPECT(ue == my_ue, 0))
|
||||
return -EINVAL;
|
||||
if (BUILTIN_EXPECT((ue < 0) || (ue >= num_ues), 0))
|
||||
return -EINVAL;
|
||||
|
||||
while(ping_start) {
|
||||
NOP8;
|
||||
}
|
||||
|
||||
ping_start = rdtsc();
|
||||
|
||||
// iRCCE is not thread save => disable interrupts
|
||||
if (my_ue)
|
||||
return -1;
|
||||
|
||||
kprintf( "Hello from mail_ping ... \n" );
|
||||
|
||||
// disable interrupts
|
||||
flags = irq_nested_disable();
|
||||
|
||||
iRCCE_isend((char*) &ping_request, sizeof(icc_header_t), ue, NULL);
|
||||
// start timer
|
||||
*timer = rdtsc();
|
||||
|
||||
/* send ping request */
|
||||
iRCCE_mail_send(sizeof(uint64_t), ICC_TAG_PINGREQUEST, 0, payload, remote_rank);
|
||||
|
||||
// wait some time
|
||||
NOP8;
|
||||
icc_send_irq(remote_rank);
|
||||
|
||||
// wake up receiver
|
||||
icc_send_irq(ue);
|
||||
/* check for incoming messages */
|
||||
icc_mail_check();
|
||||
|
||||
// enable interrupts
|
||||
irq_nested_enable(flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void interpret_header(icc_header_t* header, int recv_ue)
|
||||
void icc_mail_check(void)
|
||||
{
|
||||
//kprintf("Got ICC message %d from %d\n", header->type, recv_ue);
|
||||
iRCCE_MAIL_HEADER* header = NULL;
|
||||
int res;
|
||||
uint64_t timer;
|
||||
//char* recv_buffer;
|
||||
|
||||
switch(header->type)
|
||||
{
|
||||
case ICC_TYPE_PINGREQUEST: {
|
||||
|
||||
iRCCE_isend((char*) &ping_response, sizeof(icc_header_t), recv_ue, NULL);
|
||||
|
||||
// wait some time
|
||||
// empty mailbox and interpret headers
|
||||
while( (res = iRCCE_mail_recv( &header )) == iRCCE_SUCCESS ) {
|
||||
switch(header->tag)
|
||||
{
|
||||
case ICC_TAG_PINGREQUEST:
|
||||
iRCCE_mail_send( header->size, ICC_TAG_PINGRESPONSE, 0, header->payload, header->source );
|
||||
NOP8;
|
||||
|
||||
// wake up remote core
|
||||
icc_send_irq(recv_ue);
|
||||
icc_send_irq( header->source );
|
||||
break;
|
||||
case ICC_TAG_PINGRESPONSE:
|
||||
timer = rdtsc() - *((uint64_t*) header->payload);
|
||||
kprintf( "Response received in %d ticks!\n", timer );
|
||||
break;
|
||||
default:
|
||||
kprintf("Invalid mail: tag = %d\n", header->tag);
|
||||
break;
|
||||
}
|
||||
/*else if( header->tag == iRCCE_ANYLENGTH ) {
|
||||
recv_buffer = (char*)kmalloc( header->size );
|
||||
iRCCE_irecv( recv_buffer, header->size, header->source, NULL );
|
||||
iRCCE_mail_send( 0, 2, 0, NULL, header->source );
|
||||
}
|
||||
break;
|
||||
case ICC_TYPE_PINGRESPONSE:
|
||||
kprintf("Receive ping response. Ticks: %d\n", rdtsc()-ping_start);
|
||||
ping_start = 0;
|
||||
break;
|
||||
default:
|
||||
kprintf("Receive unknown ICC message (%d)\n", header->type);
|
||||
else if( header->tag == iRCCE_ANYLENGTH_PIGGYBACK ) {
|
||||
iRCCE_mail_send( 0, 2, 0, NULL, header->source );
|
||||
}*/
|
||||
|
||||
iRCCE_mail_release( &header );
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* By entering this function, interrupts are already disables
|
||||
* => No race by using the static variables
|
||||
*/
|
||||
void icc_check(void)
|
||||
{
|
||||
static icc_header_t header[MAX_SCC_CORES];
|
||||
static iRCCE_RECV_REQUEST request[MAX_SCC_CORES];
|
||||
static int8_t first_call = 1;
|
||||
int i, ret;
|
||||
|
||||
if (first_call) {
|
||||
first_call = 0;
|
||||
|
||||
for(i=0; i<num_ues; i++) {
|
||||
if (i == my_ue)
|
||||
continue;
|
||||
|
||||
iRCCE_irecv((char*) (header+i), sizeof(icc_header_t), i, request+i);
|
||||
}
|
||||
}
|
||||
|
||||
// pushes the progress of non-blocking communication requests
|
||||
iRCCE_irecv_push();
|
||||
|
||||
for(i=0; i<num_ues; i++) {
|
||||
if (i == my_ue)
|
||||
continue;
|
||||
|
||||
ret = iRCCE_irecv_test(request+i, NULL);
|
||||
if (ret == iRCCE_SUCCESS) {
|
||||
interpret_header(header+i, i);
|
||||
iRCCE_irecv((char*) (header+i), sizeof(icc_header_t), i, request+i);
|
||||
}
|
||||
}
|
||||
|
||||
// pushes the progress of non-blocking communication requests
|
||||
iRCCE_isend_push();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -258,6 +258,34 @@ again:
|
|||
return ERR_OK;
|
||||
}
|
||||
|
||||
static void rckemacif_input(struct netif* netif, struct pbuf* p)
|
||||
{
|
||||
struct eth_hdr *ethhdr;
|
||||
|
||||
/* points to packet payload, which starts with an Ethernet header */
|
||||
ethhdr = p->payload;
|
||||
|
||||
switch (htons(ethhdr->type)) {
|
||||
/* IP or ARP packet? */
|
||||
case ETHTYPE_ARP:
|
||||
case ETHTYPE_IP:
|
||||
#if PPPOE_SUPPORT
|
||||
/* PPPoE packet? */
|
||||
case ETHTYPE_PPPOEDISC:
|
||||
case ETHTYPE_PPPOE:
|
||||
#endif /* PPPOE_SUPPORT */
|
||||
/* full packet send to tcpip_thread to process */
|
||||
if (mynetif->input(p, mynetif) != ERR_OK) {
|
||||
LWIP_DEBUGF(NETIF_DEBUG, ("rtl8139if_input: IP input error\n"));
|
||||
pbuf_free(p);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
pbuf_free(p);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void rckemacif_rx_handler(struct netif* netif, unsigned int write_offset)
|
||||
{
|
||||
rckemacif_t* rckemacif = netif->state;
|
||||
|
@ -372,7 +400,7 @@ out:
|
|||
#if ETH_PAD_SIZE
|
||||
pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */
|
||||
#endif
|
||||
mailbox_ptr_post(&rckemacif->mbox, (void*)p);
|
||||
rckemacif_input(netif, p);
|
||||
LINK_STATS_INC(link.recv);
|
||||
} else {
|
||||
LWIP_DEBUGF(NETIF_DEBUG, ("rckemacif_rx_inthandler: not enough memory!\n"));
|
||||
|
@ -422,48 +450,6 @@ nexttry:
|
|||
*((volatile unsigned*) (FPGA_BASE + IRQ_RESET + rckemacif->core * 2 * 4)) = (1 << rckemacif->num_emac);
|
||||
}
|
||||
|
||||
err_t rckemacif_wait(struct netif* netif, uint32_t poll)
|
||||
{
|
||||
rckemacif_t* rckemacif = netif->state;
|
||||
struct eth_hdr *ethhdr;
|
||||
struct pbuf *p = NULL;
|
||||
err_t err = ERR_OK;
|
||||
|
||||
if (poll) {
|
||||
if (mailbox_ptr_tryfetch(&(rckemacif->mbox), (void**) &p))
|
||||
return err;
|
||||
} else {
|
||||
mailbox_ptr_fetch(&(rckemacif->mbox), (void**) &p);
|
||||
}
|
||||
|
||||
/* points to packet payload, which starts with an Ethernet header */
|
||||
ethhdr = p->payload;
|
||||
|
||||
//LWIP_DEBUGF(NETIF_DEBUG, ("Got packet of type 0x%x!\n", htons(ethhdr->type)));
|
||||
|
||||
switch (htons(ethhdr->type)) {
|
||||
/* IP or ARP packet? */
|
||||
case ETHTYPE_ARP:
|
||||
case ETHTYPE_IP:
|
||||
#if PPPOE_SUPPORT
|
||||
/* PPPoE packet? */
|
||||
case ETHTYPE_PPPOEDISC:
|
||||
case ETHTYPE_PPPOE:
|
||||
#endif /* PPPOE_SUPPORT */
|
||||
/* full packet send to tcpip_thread to process */
|
||||
if ((err = mynetif->input(p, mynetif)) != ERR_OK) {
|
||||
LWIP_DEBUGF(NETIF_DEBUG, ("rckemacif_poll: IP input error\n"));
|
||||
pbuf_free(p);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
pbuf_free(p);
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
err_t rckemacif_init(struct netif* netif)
|
||||
{
|
||||
rckemacif_t* rckemacif;
|
||||
|
@ -516,8 +502,6 @@ err_t rckemacif_init(struct netif* netif)
|
|||
memset(rckemacif->tx_buffer, 0x00, 0x20);
|
||||
memset(rckemacif->tx_buffer + 0x20, 0xDA, BUFFER_SIZE - 0x20);
|
||||
rckemacif->tx_buffer_max = CLINE_PACKETS(BUFFER_SIZE) - 1;
|
||||
|
||||
mailbox_ptr_init(&rckemacif->mbox);
|
||||
netif->state = rckemacif;
|
||||
|
||||
/* Depending on core location read own private data
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
#define __HAVE_RCKEMAC_H__
|
||||
|
||||
#include <metalsvm/stddef.h>
|
||||
#include <metalsvm/mailbox.h>
|
||||
|
||||
#if defined(CONFIG_LWIP) && defined(CONFIG_ROCKCREEK)
|
||||
|
||||
|
@ -40,17 +39,8 @@ typedef struct rckemacif {
|
|||
void* irq_address;
|
||||
uint32_t core;
|
||||
uint32_t num_emac;
|
||||
mailbox_ptr_t mbox;
|
||||
} rckemacif_t;
|
||||
|
||||
/*
|
||||
* Wait for incoming messages.
|
||||
*
|
||||
* poll = 0 : wait blocks until a message is received
|
||||
* poll != 0: non-blocking wait
|
||||
*/
|
||||
err_t rckemacif_wait(struct netif* netif, uint32_t poll);
|
||||
|
||||
/*
|
||||
* Initialize the eMAC network driver
|
||||
*/
|
||||
|
|
|
@ -110,8 +110,7 @@ static err_t rtl8139if_output(struct netif* netif, struct pbuf* p)
|
|||
return ERR_OK;
|
||||
}
|
||||
|
||||
#if 0
|
||||
void rtl8139if_input(struct netif* netif, struct pbuf* p)
|
||||
static void rtl8139if_input(struct netif* netif, struct pbuf* p)
|
||||
{
|
||||
struct eth_hdr *ethhdr;
|
||||
|
||||
|
@ -138,7 +137,6 @@ void rtl8139if_input(struct netif* netif, struct pbuf* p)
|
|||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void rtl_rx_inthandler(struct netif* netif)
|
||||
{
|
||||
|
@ -175,8 +173,7 @@ static void rtl_rx_inthandler(struct netif* netif)
|
|||
#if ETH_PAD_SIZE
|
||||
pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */
|
||||
#endif
|
||||
mailbox_ptr_post(&rtl8139if->mbox, (void*)p);
|
||||
//rtl8139if_input(netif, p);
|
||||
rtl8139if_input(netif, p);
|
||||
LINK_STATS_INC(link.recv);
|
||||
} else {
|
||||
LWIP_DEBUGF(NETIF_DEBUG, ("rtl8139if_rx_inthandler: not enough memory!\n"));
|
||||
|
@ -237,7 +234,7 @@ static void rtl8139if_handler(struct state* s)
|
|||
uint16_t isr_contents;
|
||||
|
||||
while (1) {
|
||||
isr_contents = inportw(rtl8139if->iobase + ISR);
|
||||
isr_contents = inportw(rtl8139if->iobase + ISR);
|
||||
if (isr_contents == 0)
|
||||
break;
|
||||
|
||||
|
@ -268,46 +265,6 @@ static void rtl8139if_handler(struct state* s)
|
|||
}
|
||||
}
|
||||
|
||||
err_t rtl8139if_wait(struct netif* netif, uint32_t poll)
|
||||
{
|
||||
rtl1839if_t* rtl8139if = netif->state;
|
||||
struct eth_hdr *ethhdr;
|
||||
struct pbuf *p = NULL;
|
||||
err_t err = ERR_OK;
|
||||
|
||||
if (poll) {
|
||||
if (mailbox_ptr_tryfetch(&(rtl8139if->mbox), (void**) &p))
|
||||
return err;
|
||||
} else {
|
||||
mailbox_ptr_fetch(&(rtl8139if->mbox), (void**) &p);
|
||||
}
|
||||
|
||||
/* points to packet payload, which starts with an Ethernet header */
|
||||
ethhdr = p->payload;
|
||||
|
||||
switch (htons(ethhdr->type)) {
|
||||
/* IP or ARP packet? */
|
||||
case ETHTYPE_ARP:
|
||||
case ETHTYPE_IP:
|
||||
#if PPPOE_SUPPORT
|
||||
/* PPPoE packet? */
|
||||
case ETHTYPE_PPPOEDISC:
|
||||
case ETHTYPE_PPPOE:
|
||||
#endif /* PPPOE_SUPPORT */
|
||||
/* full packet send to tcpip_thread to process */
|
||||
if ((err = mynetif->input(p, mynetif)) != ERR_OK) {
|
||||
LWIP_DEBUGF(NETIF_DEBUG, ("rtl8139if_poll: IP input error\n"));
|
||||
pbuf_free(p);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
pbuf_free(p);
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
err_t rtl8139if_init(struct netif* netif)
|
||||
{
|
||||
rtl1839if_t* rtl8139if;
|
||||
|
@ -348,7 +305,6 @@ err_t rtl8139if_init(struct netif* netif)
|
|||
rtl8139if->tx_buffer[2] = rtl8139if->tx_buffer[1] + 4096;
|
||||
rtl8139if->tx_buffer[3] = rtl8139if->tx_buffer[2] + 4096;
|
||||
|
||||
mailbox_ptr_init(&rtl8139if->mbox);
|
||||
netif->state = rtl8139if;
|
||||
mynetif = netif;
|
||||
|
||||
|
@ -398,7 +354,7 @@ err_t rtl8139if_init(struct netif* netif)
|
|||
}
|
||||
|
||||
if (!tmp16) {
|
||||
// it seems not to work
|
||||
// it seems not to work
|
||||
kprintf("RTL8139 reset failed\n");
|
||||
return ERR_ARG;
|
||||
}
|
||||
|
@ -416,7 +372,7 @@ err_t rtl8139if_init(struct netif* netif)
|
|||
outportb(rtl8139if->iobase + CONFIG1,
|
||||
(inportb(rtl8139if->iobase + CONFIG1) & ~(CONFIG1_DVRLOAD | CONFIG1_LWACT)) | CONFIG1_DVRLOAD);
|
||||
|
||||
// unlock config register
|
||||
// unlock config register
|
||||
outportb(rtl8139if->iobase + CR9346, 0);
|
||||
|
||||
/*
|
||||
|
|
|
@ -23,7 +23,8 @@
|
|||
#define __HAVE_RTL8139_H__
|
||||
|
||||
#include <metalsvm/stddef.h>
|
||||
#include <metalsvm/mailbox.h>
|
||||
#include <metalsvm/spinlock.h>
|
||||
|
||||
#if defined(CONFIG_LWIP) && defined(CONFIG_PCI)
|
||||
|
||||
// the registers are at the following places
|
||||
|
@ -226,17 +227,8 @@ typedef struct rtl1839if {
|
|||
uint32_t tx_complete;
|
||||
uint16_t rx_pos;
|
||||
uint8_t tx_inuse[4];
|
||||
mailbox_ptr_t mbox;
|
||||
} rtl1839if_t;
|
||||
|
||||
/*
|
||||
* Wait for incoming messages.
|
||||
*
|
||||
* poll = 0 : wait blocks until a message is received
|
||||
* poll != 0: non-blocking wait
|
||||
*/
|
||||
err_t rtl8139if_wait(struct netif* netif, uint32_t poll);
|
||||
|
||||
/*
|
||||
* Initialize the network driver for the RealTek RTL8139 family
|
||||
*/
|
||||
|
|
|
@ -1,3 +1,22 @@
|
|||
/*
|
||||
* Copyright 2011 Carl-Benedikt Krueger, Chair for Operating Systems,
|
||||
* RWTH Aachen University
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* This file is part of MetalSVM.
|
||||
*/
|
||||
|
||||
#include "util.h"
|
||||
|
||||
|
||||
|
@ -10,7 +29,8 @@ __inline int isprint(char e)
|
|||
// hex_dumb display network packets in a good way
|
||||
void hex_dump(unsigned n, const unsigned char* buf)
|
||||
{
|
||||
int on_this_line = 0;
|
||||
int on_this_line = 0;
|
||||
|
||||
while (n-- > 0)
|
||||
{
|
||||
kprintf("%02X ", *buf++);
|
||||
|
|
|
@ -1,9 +1,26 @@
|
|||
/*
|
||||
* Copyright 2011 Carl-Benedikt Krueger, Chair for Operating Systems,
|
||||
* RWTH Aachen University
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* This file is part of MetalSVM.
|
||||
*/
|
||||
|
||||
#ifndef __UTIL__
|
||||
#define __UTIL__
|
||||
|
||||
|
||||
// hex_dumb display network packets in a good way
|
||||
void hex_dump(unsigned n, const unsigned char* buf);
|
||||
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -26,7 +26,7 @@ extern "C" {
|
|||
|
||||
#define METALSVM_VERSION "0.1"
|
||||
#define MAX_TASKS 16
|
||||
#define MAX_CORES 4
|
||||
#define MAX_CORES 1
|
||||
#define MAX_FNAME 128
|
||||
#define DEFAULT_STACK_SIZE (32*1024)
|
||||
#define KERNEL_STACK_SIZE 8192
|
||||
|
@ -40,6 +40,7 @@ extern "C" {
|
|||
#define INT_SYSCALL 0x80
|
||||
#define KERNEL_SPACE (1*1024*1024*1024)
|
||||
#define VIDEO_MEM_ADDR 0xB8000 // the video memora address
|
||||
#define SMP_SETUP_ADDR 0x07000
|
||||
|
||||
#define BYTE_ORDER LITTLE_ENDIAN
|
||||
|
||||
|
@ -52,6 +53,7 @@ extern "C" {
|
|||
#define CONFIG_PCI
|
||||
#define CONFIG_LWIP
|
||||
#define CONFIG_VGA
|
||||
//#define CONFIG_UART
|
||||
#define CONFIG_KEYBOARD
|
||||
#define CONFIG_MULTIBOOT
|
||||
//#define CONFIG_ROCKCREEK
|
||||
|
@ -63,11 +65,6 @@ extern "C" {
|
|||
#define SHMADD
|
||||
#define SHMDBG
|
||||
//#define SHMADD_CACHEABLE
|
||||
/* default values for 16 GB system */
|
||||
#define PRIVATE_MEM1_START 0x00000000
|
||||
#define PRIVATE_MEM1_END 0x13FFFFFF
|
||||
#define PRIVATE_MEM2_START 0xFF000000
|
||||
#define PRIVATE_MEM2_END 0xFFFFFFFF
|
||||
#define SCC_BOOTINFO 0x80000
|
||||
|
||||
#define BUILTIN_EXPECT(exp, b) __builtin_expect((exp), (b))
|
||||
|
|
|
@ -81,11 +81,13 @@ inline static int spinlock_destroy(spinlock_t* s) {
|
|||
*/
|
||||
inline static int spinlock_lock(spinlock_t* s) {
|
||||
int32_t ticket;
|
||||
task_t* curr_task;
|
||||
|
||||
if (BUILTIN_EXPECT(!s, 0))
|
||||
return -EINVAL;
|
||||
|
||||
if (s->owner == per_core(current_task)->id) {
|
||||
curr_task = per_core(current_task);
|
||||
if (s->owner == curr_task->id) {
|
||||
s->counter++;
|
||||
return 0;
|
||||
}
|
||||
|
@ -94,7 +96,7 @@ inline static int spinlock_lock(spinlock_t* s) {
|
|||
while(atomic_int32_read(&s->dequeue) != ticket) {
|
||||
NOP1;
|
||||
}
|
||||
s->owner = per_core(current_task)->id;
|
||||
s->owner = curr_task->id;
|
||||
s->counter = 1;
|
||||
|
||||
return 0;
|
||||
|
@ -181,7 +183,7 @@ inline static int spinlock_irqsave_lock(spinlock_irqsave_t* s) {
|
|||
s->coreid = CORE_ID;
|
||||
s->flags = flags;
|
||||
s->counter = 1;
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
#include <metalsvm/config.h>
|
||||
#include <asm/stddef.h>
|
||||
#include <asm/irqflags.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -35,15 +36,33 @@ typedef unsigned int tid_t;
|
|||
#define per_core(name) name
|
||||
#define DECLARE_PER_CORE(type, name) extern type name;
|
||||
#define DEFINE_PER_CORE(type, name, def_value) type name = def_value;
|
||||
#define DEFINE_PER_CORE_STATIC(type, name, def_value) static type name = def_value;
|
||||
#define CORE_ID 0
|
||||
#else
|
||||
#define per_core(name) name[LOGICAL_CPUID].var
|
||||
#define per_core(name) (*__get_percore_##name())
|
||||
#define DECLARE_PER_CORE(type, name) \
|
||||
typedef struct { type var __attribute__ ((aligned (CACHE_LINE))); } aligned_##name;\
|
||||
extern aligned_##name name[MAX_CORES];
|
||||
extern aligned_##name name[MAX_CORES];\
|
||||
inline static type* __get_percore_##name(void) {\
|
||||
type* ret; \
|
||||
uint32_t flags = irq_nested_disable(); \
|
||||
ret = &(name[smp_id()].var); \
|
||||
irq_nested_enable(flags);\
|
||||
return ret; \
|
||||
}
|
||||
#define DEFINE_PER_CORE(type, name, def_value) \
|
||||
aligned_##name name[MAX_CORES] = {[0 ... MAX_CORES-1] = {def_value}};
|
||||
#define CORE_ID LOGICAL_CPUID
|
||||
#define DEFINE_PER_CORE_STATIC(type, name, def_value) \
|
||||
typedef struct { type var __attribute__ ((aligned (CACHE_LINE))); } aligned_##name;\
|
||||
static aligned_##name name[MAX_CORES] = {[0 ... MAX_CORES-1] = {def_value}}; \
|
||||
inline static type* __get_percore_##name(void) {\
|
||||
type* ret; \
|
||||
uint32_t flags = irq_nested_disable(); \
|
||||
ret = &(name[smp_id()].var); \
|
||||
irq_nested_enable(flags);\
|
||||
return ret; \
|
||||
}
|
||||
#define CORE_ID smp_id()
|
||||
#endif
|
||||
|
||||
/* needed to find the task, which is currently running on this core */
|
||||
|
|
|
@ -51,18 +51,6 @@ extern "C" {
|
|||
#define __NR_execve 14
|
||||
#define __NR_times 15
|
||||
|
||||
/* networking
|
||||
*/
|
||||
|
||||
#define __NR_socket 16
|
||||
#define __NR_bind 17
|
||||
#define __NR_listen 18
|
||||
#define __NR_accept 19
|
||||
#define __NR_connect 20
|
||||
#define __NR_send 21
|
||||
#define __NR_recv 22
|
||||
#define __NR_closesocket 23
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -126,6 +126,16 @@ void NORETURN sys_exit(int);
|
|||
* */
|
||||
int sys_fork(void);
|
||||
|
||||
/** @brief Reserve an idel task for an additional core
|
||||
*
|
||||
* @param id core number
|
||||
*
|
||||
* return
|
||||
* - address of the stack (< KERNEL_SPACE)
|
||||
* - -ENIVAL (-22) on failure
|
||||
*/
|
||||
size_t get_idle_task(uint32_t id);
|
||||
|
||||
/** @brief System call to execute a program
|
||||
*
|
||||
* @param fname Filename of the executable
|
||||
|
@ -138,15 +148,19 @@ int sys_fork(void);
|
|||
*/
|
||||
int sys_execve(const char* fname, char** argv, char** env);
|
||||
|
||||
/** @brief Call to rescheduling
|
||||
*
|
||||
* This is a purely assembled procedure for rescheduling
|
||||
*/
|
||||
void reschedule(void);
|
||||
|
||||
static inline void check_workqueues(void)
|
||||
{
|
||||
uint32_t flags = irq_nested_disable();
|
||||
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
icc_check();
|
||||
#endif
|
||||
|
||||
uint32_t flags = irq_nested_disable();
|
||||
icc_mail_check();
|
||||
irq_nested_enable(flags);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -41,17 +41,18 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
#define TASK_INVALID 0
|
||||
#define TASK_READY 1
|
||||
#define TASK_READY 1
|
||||
#define TASK_RUNNING 2
|
||||
#define TASK_BLOCKED 3
|
||||
#define TASK_FINISHED 4
|
||||
#define TASK_IDLE 5
|
||||
#define TASK_IDLE 5
|
||||
|
||||
#define TASK_DEFAULT_FLAGS 0
|
||||
#define TASK_FPU_INIT (1 << 0)
|
||||
#define TASK_FPU_USED (1 << 1)
|
||||
#define TASK_DEFAULT_FLAGS 0
|
||||
#define TASK_FPU_INIT (1 << 0)
|
||||
#define TASK_FPU_USED (1 << 1)
|
||||
|
||||
typedef int (STDCALL *entry_point_t)(void*);
|
||||
typedef int (*entry_point_t)(void*);
|
||||
typedef int (STDCALL *internal_entry_point_t)(void*);
|
||||
struct page_dir;
|
||||
|
||||
/* @brief The task_t structure */
|
||||
|
@ -59,9 +60,9 @@ typedef struct task {
|
|||
/// Task id = position in the task table
|
||||
tid_t id;
|
||||
/// Task status (INVALID, READY, RUNNING, ...)
|
||||
uint32_t status;
|
||||
uint32_t status;
|
||||
/// Usage in number of pages
|
||||
atomic_int32_t user_usage;
|
||||
atomic_int32_t user_usage;
|
||||
/// Avoids concurrent access to the page directory
|
||||
spinlock_t pgd_lock;
|
||||
/// pointer to the page directory
|
||||
|
|
117
kernel/init.c
117
kernel/init.c
|
@ -31,6 +31,7 @@
|
|||
#include <lwip/stats.h>
|
||||
#include <lwip/udp.h>
|
||||
#include <lwip/tcp.h>
|
||||
#include <lwip/tcpip.h>
|
||||
#include <lwip/dhcp.h>
|
||||
#include <lwip/netif.h>
|
||||
#include <lwip/timers.h>
|
||||
|
@ -38,6 +39,7 @@
|
|||
#endif
|
||||
#include <net/rtl8139.h>
|
||||
#include <net/rckemac.h>
|
||||
#include <net/mmnif.h>
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
#include <asm/RCCE.h>
|
||||
#include <asm/RCCE_lib.h>
|
||||
|
@ -46,8 +48,6 @@
|
|||
void echo_init(void);
|
||||
void ping_init(void);
|
||||
|
||||
static volatile int done = 0;
|
||||
|
||||
/*
|
||||
* Note that linker symbols are not variables, they have no memory allocated for
|
||||
* maintaining a value, rather their address is their value.
|
||||
|
@ -68,17 +68,42 @@ int lowlevel_init(void)
|
|||
}
|
||||
|
||||
#if defined(CONFIG_LWIP) && (defined(CONFIG_PCI) || defined(CONFIG_ROCKCREEK))
|
||||
static tid_t netid;
|
||||
static struct netif* default_netif = NULL;
|
||||
static volatile uint32_t lwip_initialized = 0;
|
||||
|
||||
int STDCALL network_task(void* arg)
|
||||
static void tcp_init_ok(void* e)
|
||||
{
|
||||
struct netif netif;
|
||||
kputs("TCP/IP init COMPLETE!!\n");
|
||||
lwip_initialized = 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
int network_init(void)
|
||||
{
|
||||
#if defined(CONFIG_LWIP) && (defined(CONFIG_PCI) || defined(CONFIG_ROCKCREEK))
|
||||
struct ip_addr ipaddr;
|
||||
struct ip_addr netmask;
|
||||
struct ip_addr gw;
|
||||
|
||||
kputs("Network task is started\n");
|
||||
kputs("Initialize network...\n");
|
||||
|
||||
// Initialize lwIP modules
|
||||
tcpip_init(tcp_init_ok, NULL);
|
||||
|
||||
while(!lwip_initialized) {
|
||||
reschedule();
|
||||
}
|
||||
|
||||
// Set up the lwIP network interface
|
||||
// Allocate and configure netif
|
||||
default_netif = (struct netif *) mem_malloc(sizeof(struct netif));
|
||||
if(default_netif == NULL)
|
||||
{
|
||||
kprintf("ERROR: Out of memory for default netif\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(default_netif, 0x00, sizeof(struct netif));
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
/* Set network address variables */
|
||||
IP4_ADDR(&gw, 192,168,4,254);
|
||||
|
@ -86,7 +111,7 @@ int STDCALL network_task(void* arg)
|
|||
IP4_ADDR(&netmask, 255,255,255,0);
|
||||
|
||||
/* Bring up the network interface */
|
||||
if (!netif_add(&netif, &ipaddr, &netmask, &gw, NULL, rckemacif_init, ethernet_input)) {
|
||||
if (!netif_add(default_netif, &ipaddr, &netmask, &gw, NULL, rckemacif_init, tcpip_input)) {
|
||||
#else
|
||||
/* Clear network address because we use DHCP to get an ip address */
|
||||
IP4_ADDR(&gw, 0,0,0,0);
|
||||
|
@ -94,75 +119,61 @@ int STDCALL network_task(void* arg)
|
|||
IP4_ADDR(&netmask, 0,0,0,0);
|
||||
|
||||
/* Bring up the network interface */
|
||||
if (!netif_add(&netif, &ipaddr, &netmask, &gw, NULL, rtl8139if_init, ethernet_input)) {
|
||||
if (!netif_add(default_netif, &ipaddr, &netmask, &gw, NULL, rtl8139if_init, tcpip_input)) {
|
||||
#endif
|
||||
kputs("Unable to add network interface\n");
|
||||
return -ENXIO;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
netif_set_default(&netif);
|
||||
netif_set_up(&netif);
|
||||
netif_set_default(default_netif);
|
||||
netif_set_up(default_netif);
|
||||
|
||||
/* test if interface is really up */
|
||||
if (!netif_is_up(default_netif)) {
|
||||
kputs("network interface is not up\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ROCKCREEK
|
||||
kprintf("Starting DHCPCD...\n");
|
||||
dhcp_start(&netif);
|
||||
dhcp_start(default_netif);
|
||||
|
||||
int mscnt = 0;
|
||||
/* wait for ip address */
|
||||
while(!netif.ip_addr.addr) {
|
||||
rtl8139if_wait(&netif, 1);
|
||||
udelay(500000);
|
||||
while(!default_netif->ip_addr.addr) {
|
||||
sys_msleep(DHCP_FINE_TIMER_MSECS);
|
||||
dhcp_fine_tmr();
|
||||
mscnt += DHCP_FINE_TIMER_MSECS;
|
||||
if (mscnt >= DHCP_COARSE_TIMER_SECS*1000) {
|
||||
dhcp_coarse_tmr();
|
||||
mscnt = 0;
|
||||
}
|
||||
}
|
||||
#else
|
||||
//mmnif_open();
|
||||
#endif
|
||||
|
||||
// start echo and ping server
|
||||
echo_init();
|
||||
//ping_init();
|
||||
|
||||
while(!done) {
|
||||
#ifdef CONFIG_PCI
|
||||
rtl8139if_wait(&netif, 0);
|
||||
#elif defined(CONFIG_ROCKCREEK)
|
||||
rckemacif_wait(&netif, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ROCKCREEK
|
||||
dhcp_release(&netif);
|
||||
dhcp_stop(&netif);
|
||||
ping_init();
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int network_shutdown(void)
|
||||
{
|
||||
done = 1;
|
||||
#if defined(CONFIG_LWIP) && defined(CONFIG_ROCKCREEK)
|
||||
//mmnif_close();
|
||||
#elif defined(CONFIG_LWIP) && defined(CONFIG_PCI)
|
||||
dhcp_release(default_netif);
|
||||
dhcp_stop(default_netif);
|
||||
#endif
|
||||
|
||||
mem_free(default_netif);
|
||||
default_netif = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tcp_init_ok(void* e)
|
||||
{
|
||||
kprintf("TCP/IP init COMPLETE!!!!!!");
|
||||
}
|
||||
|
||||
int network_init(void)
|
||||
{
|
||||
tcpip_init(tcp_init_ok,NULL);
|
||||
kprintf("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA");
|
||||
mmnif_open();
|
||||
kprintf("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA");
|
||||
// return 0;
|
||||
|
||||
#if defined(CONFIG_LWIP)
|
||||
// Initialize lwIP modules
|
||||
// lwip_init();
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_LWIP) && (defined(CONFIG_PCI) || defined(CONFIG_ROCKCREEK))
|
||||
return create_kernel_task(&netid, network_task, NULL);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -78,6 +78,20 @@ static void list_root(void) {
|
|||
list_fs(fs_root, 1);
|
||||
}
|
||||
|
||||
#if MAX_CORES > 1
|
||||
// idle loop of the application processors
|
||||
int smp_main(void)
|
||||
{
|
||||
irq_enable();
|
||||
|
||||
while(1) {
|
||||
HALT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
int main(void)
|
||||
{
|
||||
lowlevel_init();
|
||||
|
@ -86,7 +100,6 @@ int main(void)
|
|||
kprintf("This is MetalSVM %s Build %u, %u\n",
|
||||
METALSVM_VERSION, &__BUILD_DATE, &__BUILD_TIME);
|
||||
popbg();
|
||||
|
||||
system_init();
|
||||
irq_init();
|
||||
timer_init();
|
||||
|
@ -103,7 +116,6 @@ int main(void)
|
|||
irq_enable();
|
||||
|
||||
kprintf("Kernel starts at %p and ends at %p\n", &kernel_start, &kernel_end);
|
||||
|
||||
system_calibration();
|
||||
network_init();
|
||||
|
||||
|
@ -112,10 +124,9 @@ int main(void)
|
|||
kprintf("Current allocated memory: %u KBytes\n", atomic_int32_read(&total_allocated_pages)*(PAGE_SIZE/1024));
|
||||
kprintf("Current available memory: %u MBytes\n", atomic_int32_read(&total_available_pages)/((1024*1024)/PAGE_SIZE));
|
||||
|
||||
// sleep(5);
|
||||
// list_root();
|
||||
sleep(5);
|
||||
list_root();
|
||||
test_init();
|
||||
|
||||
per_core(current_task)->status = TASK_IDLE;
|
||||
reschedule();
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@
|
|||
#include <metalsvm/processor.h>
|
||||
#include <lwip/opt.h>
|
||||
|
||||
#if LWIP_RAW /* don't build if not configured for use in lwipopts.h */
|
||||
#if LWIP_RAW || LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */
|
||||
|
||||
#include <lwip/mem.h>
|
||||
#include <lwip/raw.h>
|
||||
|
@ -52,6 +52,7 @@
|
|||
#include <lwip/sys.h>
|
||||
#include <lwip/timers.h>
|
||||
#include <lwip/inet_chksum.h>
|
||||
#include <lwip/ip.h>
|
||||
|
||||
#if LWIP_SOCKET
|
||||
#define PING_USE_SOCKETS 1
|
||||
|
@ -198,23 +199,24 @@ ping_recv(int s)
|
|||
static void
|
||||
ping_thread(void *arg)
|
||||
{
|
||||
int s;
|
||||
int s, i;
|
||||
int timeout = PING_RCV_TIMEO;
|
||||
ip_addr_t ping_target;
|
||||
|
||||
LWIP_UNUSED_ARG(arg);
|
||||
|
||||
if ((s = lwip_socket(AF_INET, SOCK_RAW, IP_PROTO_ICMP)) < 0) {
|
||||
LWIP_DEBUGF( PING_DEBUG, ("ping: invalid socket\n"));
|
||||
return;
|
||||
}
|
||||
|
||||
lwip_setsockopt(s, SOL_SOCKET, SO_RCVTIMEO, &timeout, sizeof(timeout));
|
||||
|
||||
while (1) {
|
||||
for(i=0; i<5; i++) {
|
||||
ping_target = PING_TARGET;
|
||||
|
||||
if (ping_send(s, &ping_target) == ERR_OK) {
|
||||
LWIP_DEBUGF( PING_DEBUG, ("ping: send "));
|
||||
LWIP_DEBUGF( PING_DEBUG, ("ping: send on core %d to " , CORE_ID));
|
||||
ip_addr_debug_print(PING_DEBUG, &ping_target);
|
||||
LWIP_DEBUGF( PING_DEBUG, ("\n"));
|
||||
|
||||
|
@ -227,6 +229,8 @@ ping_thread(void *arg)
|
|||
}
|
||||
sys_msleep(PING_DELAY);
|
||||
}
|
||||
|
||||
lwip_close(s);
|
||||
}
|
||||
|
||||
#else /* PING_USE_SOCKETS */
|
||||
|
|
|
@ -25,8 +25,6 @@
|
|||
#include <metalsvm/spinlock.h>
|
||||
#include <metalsvm/time.h>
|
||||
|
||||
#include <lwip/sockets.h>
|
||||
|
||||
static int sys_write(int fildes, const char *buf, size_t len)
|
||||
{
|
||||
int i;
|
||||
|
@ -129,61 +127,6 @@ int syscall_handler(uint32_t sys_nr, ...)
|
|||
ret = sys_times(buffer, clock);
|
||||
break;
|
||||
}
|
||||
case __NR_socket: {
|
||||
int domain= va_arg(vl,int);
|
||||
int type = va_arg(vl,int);
|
||||
int protocol = va_arg(vl,int);
|
||||
ret = socket(domain,type,protocol);
|
||||
break;
|
||||
}
|
||||
case __NR_bind: {
|
||||
int s = va_arg(vl,int);
|
||||
const struct sockaddr *name = va_arg(vl,struct sockaddr *);
|
||||
socklen_t namelen = va_arg(vl,socklen_t);
|
||||
ret = bind(s,name,namelen);
|
||||
break;
|
||||
}
|
||||
case __NR_listen: {
|
||||
int s = va_arg(vl,int);
|
||||
int backlog = va_arg(vl,int);
|
||||
ret = listen(s,backlog);
|
||||
break;
|
||||
}
|
||||
case __NR_accept: {
|
||||
int s = va_arg(vl,int);
|
||||
struct sockaddr *addr = va_arg(vl,struct sockaddr *);
|
||||
socklen_t *addrlen = va_arg(vl,socklen_t*);
|
||||
ret = accept(s,addr,addrlen);
|
||||
break;
|
||||
}
|
||||
case __NR_connect: {
|
||||
int s = va_arg(vl,int);
|
||||
const struct sockaddr *name =va_arg(vl, struct sockaddr *);
|
||||
socklen_t namelen = va_arg(vl,socklen_t);
|
||||
ret = connect(s,name,namelen);
|
||||
break;
|
||||
}
|
||||
case __NR_send: {
|
||||
int s = va_arg(vl,int);
|
||||
const void *data = va_arg(vl,void*);
|
||||
size_t size = va_arg(vl,size_t);
|
||||
int flags = va_arg(vl,int);
|
||||
ret = send(s,data,size,flags);
|
||||
break;
|
||||
}
|
||||
case __NR_recv: {
|
||||
int s = va_arg(vl,int);
|
||||
const void *data = va_arg(vl,void*);
|
||||
size_t size = va_arg(vl,size_t);
|
||||
int flags = va_arg(vl,int);
|
||||
ret = recv(s,data,size,flags);
|
||||
break;
|
||||
}
|
||||
case __NR_closesocket: {
|
||||
int s = va_arg(vl,int);
|
||||
ret = closesocket(s);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
kputs("invalid system call\n");
|
||||
ret = -ENOSYS;
|
||||
|
|
238
kernel/tasks.c
238
kernel/tasks.c
|
@ -42,16 +42,20 @@
|
|||
#include <asm/apic.h>
|
||||
#include <asm/elf.h>
|
||||
|
||||
DEFINE_PER_CORE(task_t*, current_task, NULL);
|
||||
|
||||
/** @brief Array of task structures
|
||||
*
|
||||
* A task's id will be its position in this array.
|
||||
*/
|
||||
static task_t task_table[MAX_TASKS] = {[0 ... MAX_TASKS-1] = {0, TASK_INVALID, ATOMIC_INIT(0), \
|
||||
SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, 0, 0, 0, 0}};
|
||||
static task_t task_table[MAX_TASKS] = { \
|
||||
[0] = {0, TASK_RUNNING, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, 0, 0, 0, 0}, \
|
||||
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, 0, 0, 0, 0}};
|
||||
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
|
||||
|
||||
DEFINE_PER_CORE(task_t*, current_task, task_table+0);
|
||||
#if MAX_CORES > 1
|
||||
DEFINE_PER_CORE_STATIC(task_t*, old_task, NULL);
|
||||
#endif
|
||||
|
||||
/** @brief helper function for the assembly code to determine the current task
|
||||
* @return Pointer to the task_t structure of current task
|
||||
*/
|
||||
|
@ -60,14 +64,10 @@ task_t* get_current_task(void) {
|
|||
}
|
||||
|
||||
int multitasking_init(void) {
|
||||
if (task_table[0].status == TASK_INVALID) {
|
||||
task_table[0].id = 0;
|
||||
task_table[0].status = TASK_RUNNING;
|
||||
atomic_int32_set(&task_table[0].user_usage, 0);
|
||||
if (BUILTIN_EXPECT(task_table[0].status == TASK_RUNNING, 1)) {
|
||||
mailbox_wait_msg_init(&task_table[0].inbox);
|
||||
memset(task_table[0].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
per_core(current_task) = task_table+0;
|
||||
per_core(current_task)->pgd = get_boot_pgd();
|
||||
task_table[0].pgd = get_boot_pgd();
|
||||
task_table[0].flags = TASK_DEFAULT_FLAGS;
|
||||
return 0;
|
||||
}
|
||||
|
@ -75,22 +75,44 @@ int multitasking_init(void) {
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
size_t get_idle_task(uint32_t id)
|
||||
{
|
||||
#if MAX_CORES > 1
|
||||
if (BUILTIN_EXPECT((id >= MAX_TASKS) || (task_table[id].status != TASK_INVALID), 0))
|
||||
return -EINVAL;
|
||||
|
||||
task_table[id].id = id;
|
||||
task_table[id].status = TASK_IDLE;
|
||||
atomic_int32_set(&task_table[id].user_usage, 0);
|
||||
mailbox_wait_msg_init(&task_table[id].inbox);
|
||||
memset(task_table[id].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
task_table[id].pgd = get_boot_pgd();
|
||||
task_table[id].flags = TASK_DEFAULT_FLAGS;
|
||||
current_task[id].var = task_table+id;
|
||||
|
||||
return get_stack(id);
|
||||
#else
|
||||
return -EINVAL;
|
||||
#endif
|
||||
}
|
||||
|
||||
/** @brief Wakeup tasks which are waiting for a message from the current one
|
||||
*
|
||||
* @param result Current task's resulting return value
|
||||
*/
|
||||
static void wakeup_blocked_tasks(int result)
|
||||
{
|
||||
wait_msg_t tmp = { per_core(current_task)->id, result };
|
||||
task_t* curr_task = per_core(current_task);
|
||||
wait_msg_t tmp = { curr_task->id, result };
|
||||
unsigned int i;
|
||||
|
||||
spinlock_irqsave_lock(&table_lock);
|
||||
|
||||
/* wake up blocked tasks */
|
||||
for(i=0; i<MAX_TASKS; i++) {
|
||||
if (per_core(current_task)->outbox[i]) {
|
||||
mailbox_wait_msg_post(per_core(current_task)->outbox[i], tmp);
|
||||
per_core(current_task)->outbox[i] = NULL;
|
||||
if (curr_task->outbox[i]) {
|
||||
mailbox_wait_msg_post(curr_task->outbox[i], tmp);
|
||||
curr_task->outbox[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -101,32 +123,33 @@ static void wakeup_blocked_tasks(int result)
|
|||
* procedures which are called by exiting tasks. */
|
||||
static void NORETURN do_exit(int arg) {
|
||||
vma_t* tmp;
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
kprintf("Terminate task: %u, return value %d\n", per_core(current_task)->id, arg);
|
||||
kprintf("Terminate task: %u, return value %d\n", curr_task->id, arg);
|
||||
|
||||
wakeup_blocked_tasks(arg);
|
||||
|
||||
//vma_dump(per_core(current_task));
|
||||
spinlock_lock(&(per_core(current_task)->vma_lock));
|
||||
//vma_dump(curr_task);
|
||||
spinlock_lock(&curr_task->vma_lock);
|
||||
|
||||
// remove memory regions
|
||||
while((tmp = per_core(current_task)->vma_list) != NULL) {
|
||||
while((tmp = curr_task->vma_list) != NULL) {
|
||||
kfree((void*) tmp->start, tmp->end - tmp->start + 1);
|
||||
per_core(current_task)->vma_list = tmp->next;
|
||||
curr_task->vma_list = tmp->next;
|
||||
kfree((void*) tmp, sizeof(vma_t));
|
||||
}
|
||||
|
||||
spinlock_unlock(&(per_core(current_task)->vma_lock));
|
||||
spinlock_unlock(&curr_task->vma_lock);
|
||||
|
||||
drop_pgd(); // delete page directory and its page tables
|
||||
|
||||
if (atomic_int32_read(&per_core(current_task)->user_usage))
|
||||
if (atomic_int32_read(&curr_task->user_usage))
|
||||
kprintf("Memory leak! Task %d did not release %d pages\n",
|
||||
per_core(current_task)->id, atomic_int32_read(&per_core(current_task)->user_usage));
|
||||
per_core(current_task)->status = TASK_FINISHED;
|
||||
curr_task->id, atomic_int32_read(&curr_task->user_usage));
|
||||
curr_task->status = TASK_FINISHED;
|
||||
reschedule();
|
||||
|
||||
kputs("Kernel panic: scheduler found no valid task\n");
|
||||
kprintf("Kernel panic: scheduler on core %d found no valid task\n", CORE_ID);
|
||||
while(1) {
|
||||
HALT;
|
||||
}
|
||||
|
@ -150,6 +173,17 @@ void NORETURN abort(void) {
|
|||
do_exit(-1);
|
||||
}
|
||||
|
||||
/*
|
||||
* @brief: if the task gets the first time slice,
|
||||
* the table_lock is hold and have to be released.
|
||||
*/
|
||||
inline static void start_first_time_slice(void)
|
||||
{
|
||||
#if MAX_CORES > 1
|
||||
spinlock_irqsave_unlock(&table_lock);
|
||||
#endif
|
||||
}
|
||||
|
||||
/** @brief Create a task with a specific entry point
|
||||
*
|
||||
* @param id Pointer to a tid_t struct were the id shall be set
|
||||
|
@ -159,8 +193,9 @@ void NORETURN abort(void) {
|
|||
* - 0 on success
|
||||
* - -ENOMEM (-12) or -EINVAL (-22) on failure
|
||||
*/
|
||||
static int create_task(tid_t* id, entry_point_t ep, void* arg)
|
||||
static int create_task(tid_t* id, internal_entry_point_t ep, void* arg)
|
||||
{
|
||||
task_t* curr_task;
|
||||
int ret = -ENOMEM;
|
||||
unsigned int i;
|
||||
|
||||
|
@ -169,6 +204,8 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg)
|
|||
|
||||
spinlock_irqsave_lock(&table_lock);
|
||||
|
||||
curr_task = per_core(current_task);
|
||||
|
||||
for(i=0; i<MAX_TASKS; i++) {
|
||||
if (task_table[i].status == TASK_INVALID) {
|
||||
atomic_int32_set(&task_table[i].user_usage, 0);
|
||||
|
@ -184,7 +221,7 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg)
|
|||
task_table[i].vma_list = NULL;
|
||||
mailbox_wait_msg_init(&task_table[i].inbox);
|
||||
memset(task_table[i].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
task_table[i].outbox[per_core(current_task)->id] = &per_core(current_task)->inbox;
|
||||
task_table[i].outbox[curr_task->id] = &curr_task->inbox;
|
||||
|
||||
if (id)
|
||||
*id = i;
|
||||
|
@ -206,7 +243,6 @@ create_task_out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int sys_fork(void)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
|
@ -216,7 +252,7 @@ int sys_fork(void)
|
|||
vma_t* parent;
|
||||
vma_t* tmp;
|
||||
|
||||
spinlock_lock(&per_core(current_task)->vma_lock);
|
||||
spinlock_lock(&parent_task->vma_lock);
|
||||
spinlock_irqsave_lock(&table_lock);
|
||||
|
||||
for(i=0; i<MAX_TASKS; i++) {
|
||||
|
@ -234,7 +270,7 @@ int sys_fork(void)
|
|||
|
||||
// copy VMA list
|
||||
child = &task_table[i].vma_list;
|
||||
parent = per_core(current_task)->vma_list;
|
||||
parent = parent_task->vma_list;
|
||||
tmp = NULL;
|
||||
|
||||
while(parent) {
|
||||
|
@ -255,9 +291,9 @@ int sys_fork(void)
|
|||
|
||||
mailbox_wait_msg_init(&task_table[i].inbox);
|
||||
memset(task_table[i].outbox, 0x00, sizeof(mailbox_wait_msg_t*)*MAX_TASKS);
|
||||
task_table[i].outbox[per_core(current_task)->id] = &per_core(current_task)->inbox;
|
||||
task_table[i].flags = per_core(current_task)->flags;
|
||||
memcpy(&(task_table[i].fpu), &(per_core(current_task)->fpu), sizeof(union fpu_state));
|
||||
task_table[i].outbox[parent_task->id] = &parent_task->inbox;
|
||||
task_table[i].flags = parent_task->flags;
|
||||
memcpy(&(task_table[i].fpu), &(parent_task->fpu), sizeof(union fpu_state));
|
||||
task_table[i].start_tick = get_clock_tick();
|
||||
task_table[i].start_heap = 0;
|
||||
task_table[i].end_heap = 0;
|
||||
|
@ -269,6 +305,7 @@ int sys_fork(void)
|
|||
// Leave the function without releasing the locks
|
||||
// because the locks are already released
|
||||
// by the parent task!
|
||||
start_first_time_slice();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -282,20 +319,57 @@ int sys_fork(void)
|
|||
|
||||
create_task_out:
|
||||
spinlock_irqsave_unlock(&table_lock);
|
||||
spinlock_unlock(&per_core(current_task)->vma_lock);
|
||||
spinlock_unlock(&parent_task->vma_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int create_kernel_task(tid_t* id, entry_point_t ep, void* arg)
|
||||
/** @brief Structure which keeps all
|
||||
* relevant data for a new kernel task to start */
|
||||
typedef struct {
|
||||
/// entry point of the kernel task
|
||||
entry_point_t func;
|
||||
/// arguments
|
||||
void* args;
|
||||
} kernel_args_t;
|
||||
|
||||
/** @brief This call is used to adapt create_task calls
|
||||
* which want to have a start function and argument list */
|
||||
static int STDCALL kernel_entry(void* args)
|
||||
{
|
||||
return create_task(id, ep, arg);
|
||||
int ret;
|
||||
kernel_args_t* kernel_args = (kernel_args_t*) args;
|
||||
|
||||
start_first_time_slice();
|
||||
|
||||
if (BUILTIN_EXPECT(!kernel_args, 0))
|
||||
return -EINVAL;
|
||||
|
||||
ret = kernel_args->func(kernel_args->args);
|
||||
|
||||
kfree(kernel_args, sizeof(kernel_args_t));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int create_kernel_task(tid_t* id, entry_point_t ep, void* args)
|
||||
{
|
||||
kernel_args_t* kernel_args;
|
||||
|
||||
kernel_args = kmalloc(sizeof(kernel_args_t));
|
||||
if (BUILTIN_EXPECT(!kernel_args, 0))
|
||||
return -ENOMEM;
|
||||
|
||||
kernel_args->func = ep;
|
||||
kernel_args->args = args;
|
||||
|
||||
return create_task(id, kernel_entry, kernel_args);
|
||||
}
|
||||
|
||||
#define MAX_ARGS (PAGE_SIZE - 2*sizeof(int) - sizeof(vfs_node_t*))
|
||||
|
||||
/** @brief Structure which keeps all
|
||||
* relevant data for a new task to start */
|
||||
* relevant data for a new user task to start */
|
||||
typedef struct {
|
||||
/// Points to the node with the executable in the file system
|
||||
vfs_node_t* node;
|
||||
|
@ -320,6 +394,7 @@ static int load_task(load_args_t* largs)
|
|||
elf_program_header_t prog_header;
|
||||
//elf_section_header_t sec_header;
|
||||
vfs_node_t* node;
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
if (!largs)
|
||||
return -EINVAL;
|
||||
|
@ -378,8 +453,8 @@ static int load_task(load_args_t* largs)
|
|||
memset((void*) prog_header.virt_addr, 0, npages*PAGE_SIZE);
|
||||
|
||||
// set starting point of the heap
|
||||
if (per_core(current_task)->start_heap < prog_header.virt_addr+prog_header.mem_size)
|
||||
per_core(current_task)->start_heap = per_core(current_task)->end_heap = prog_header.virt_addr+prog_header.mem_size;
|
||||
if (curr_task->start_heap < prog_header.virt_addr+prog_header.mem_size)
|
||||
curr_task->start_heap = curr_task->end_heap = prog_header.virt_addr+prog_header.mem_size;
|
||||
|
||||
// load program
|
||||
read_fs(node, (uint8_t*)prog_header.virt_addr, prog_header.file_size, prog_header.offset);
|
||||
|
@ -391,7 +466,7 @@ static int load_task(load_args_t* largs)
|
|||
flags |= VMA_WRITE;
|
||||
if (prog_header.flags & PF_X)
|
||||
flags |= VMA_EXECUTE;
|
||||
vma_add(per_core(current_task), prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
|
||||
vma_add(curr_task, prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
|
||||
|
||||
if (!(prog_header.flags & PF_W))
|
||||
change_page_permissions(prog_header.virt_addr, prog_header.virt_addr+npages*PAGE_SIZE-1, flags);
|
||||
|
@ -420,7 +495,7 @@ static int load_task(load_args_t* largs)
|
|||
flags |= VMA_WRITE;
|
||||
if (prog_header.flags & PF_X)
|
||||
flags |= VMA_EXECUTE;
|
||||
vma_add(per_core(current_task), stack, stack+npages*PAGE_SIZE-1, flags);
|
||||
vma_add(curr_task, stack, stack+npages*PAGE_SIZE-1, flags);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -488,7 +563,7 @@ static int load_task(load_args_t* largs)
|
|||
kfree(largs, sizeof(load_args_t));
|
||||
|
||||
// clear fpu state
|
||||
per_core(current_task)->flags &= ~(TASK_FPU_USED|TASK_FPU_INIT);
|
||||
curr_task->flags &= ~(TASK_FPU_USED|TASK_FPU_INIT);
|
||||
|
||||
jump_to_user_code(header.entry, stack+offset);
|
||||
|
||||
|
@ -504,7 +579,18 @@ invalid:
|
|||
* which want to have a start function and argument list */
|
||||
static int STDCALL user_entry(void* arg)
|
||||
{
|
||||
return load_task((load_args_t*) arg);
|
||||
int ret;
|
||||
|
||||
start_first_time_slice();
|
||||
|
||||
if (BUILTIN_EXPECT(!arg, 0))
|
||||
return -EINVAL;
|
||||
|
||||
ret = load_task((load_args_t*) arg);
|
||||
|
||||
kfree(arg, sizeof(load_args_t));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/** @brief Luxus-edition of create_user_task functions. Just call with an exe name
|
||||
|
@ -566,6 +652,7 @@ int sys_execve(const char* fname, char** argv, char** env)
|
|||
char *dest, *src;
|
||||
int ret, argc = 0;
|
||||
int envc = 0;
|
||||
task_t* curr_task = per_core(current_task);
|
||||
|
||||
node = findnode_fs((char*) fname);
|
||||
if (!node || !(node->type == FS_FILE))
|
||||
|
@ -608,16 +695,16 @@ int sys_execve(const char* fname, char** argv, char** env)
|
|||
while ((*dest++ = *src++) != 0);
|
||||
}
|
||||
|
||||
spinlock_lock(&(per_core(current_task)->vma_lock));
|
||||
spinlock_lock(&curr_task->vma_lock);
|
||||
|
||||
// remove old program
|
||||
while((tmp = per_core(current_task)->vma_list) != NULL) {
|
||||
while((tmp = curr_task->vma_list) != NULL) {
|
||||
kfree((void*) tmp->start, tmp->end - tmp->start + 1);
|
||||
per_core(current_task)->vma_list = tmp->next;
|
||||
curr_task->vma_list = tmp->next;
|
||||
kfree((void*) tmp, sizeof(vma_t));
|
||||
}
|
||||
|
||||
spinlock_unlock(&(per_core(current_task)->vma_lock));
|
||||
spinlock_unlock(&curr_task->vma_lock);
|
||||
|
||||
/*
|
||||
* we use a trap gate to enter the kernel
|
||||
|
@ -637,16 +724,17 @@ int sys_execve(const char* fname, char** argv, char** env)
|
|||
* return value. */
|
||||
tid_t wait(int32_t* result)
|
||||
{
|
||||
task_t* curr_task = per_core(current_task);
|
||||
wait_msg_t tmp = { -1, -1};
|
||||
|
||||
/*
|
||||
* idle tasks are not allowed to wait for another task
|
||||
* they should always run...
|
||||
*/
|
||||
if (BUILTIN_EXPECT(per_core(current_task)->status == TASK_IDLE, 0))
|
||||
if (BUILTIN_EXPECT(curr_task->status == TASK_IDLE, 0))
|
||||
return -EINVAL;
|
||||
|
||||
mailbox_wait_msg_fetch(&per_core(current_task)->inbox, &tmp);
|
||||
mailbox_wait_msg_fetch(&curr_task->inbox, &tmp);
|
||||
|
||||
if (result)
|
||||
*result = tmp.result;
|
||||
|
@ -691,14 +779,14 @@ int block_task(tid_t id)
|
|||
|
||||
spinlock_irqsave_lock(&table_lock);
|
||||
|
||||
if ((task_table[id].status == TASK_RUNNING) || (task_table[id].status == TASK_READY)) {
|
||||
if ((task_table[id].status == TASK_RUNNING) || (task_table[id].status == TASK_READY)) {
|
||||
task_table[id].status = TASK_BLOCKED;
|
||||
ret = 0;
|
||||
} else kprintf("Unable to block task %d!\n", id);
|
||||
|
||||
spinlock_irqsave_unlock(&table_lock);
|
||||
spinlock_irqsave_unlock(&table_lock);
|
||||
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/** @brief _The_ scheduler procedure
|
||||
|
@ -707,6 +795,8 @@ int block_task(tid_t id)
|
|||
*/
|
||||
void scheduler(void)
|
||||
{
|
||||
task_t* orig_task;
|
||||
task_t* curr_task;
|
||||
unsigned int i;
|
||||
unsigned int new_id;
|
||||
|
||||
|
@ -714,47 +804,55 @@ void scheduler(void)
|
|||
spinlock_irqsave_lock(&table_lock);
|
||||
#endif
|
||||
|
||||
/* signalize that this task could be reused */
|
||||
if (per_core(current_task)->status == TASK_FINISHED)
|
||||
per_core(current_task)->status = TASK_INVALID;
|
||||
orig_task = curr_task = per_core(current_task);
|
||||
|
||||
/* signalizes that this task could be reused */
|
||||
if (curr_task->status == TASK_FINISHED)
|
||||
curr_task->status = TASK_INVALID;
|
||||
|
||||
/* if the task is using the FPU, we need to save the FPU context */
|
||||
if (per_core(current_task)->flags & TASK_FPU_USED) {
|
||||
save_fpu_state(&(per_core(current_task)->fpu));
|
||||
per_core(current_task)->flags &= ~TASK_FPU_USED;
|
||||
if (curr_task->flags & TASK_FPU_USED) {
|
||||
save_fpu_state(&(curr_task->fpu));
|
||||
curr_task->flags &= ~TASK_FPU_USED;
|
||||
}
|
||||
|
||||
for(i=1, new_id=(per_core(current_task)->id + 1) % MAX_TASKS;
|
||||
for(i=1, new_id=(curr_task->id + 1) % MAX_TASKS;
|
||||
i<MAX_TASKS; i++, new_id=(new_id+1) % MAX_TASKS)
|
||||
{
|
||||
if (task_table[new_id].status == TASK_READY) {
|
||||
if (per_core(current_task)->status == TASK_RUNNING)
|
||||
per_core(current_task)->status = TASK_READY;
|
||||
if (curr_task->status == TASK_RUNNING)
|
||||
curr_task->status = TASK_READY;
|
||||
task_table[new_id].status = TASK_RUNNING;
|
||||
per_core(current_task) = task_table+new_id;
|
||||
curr_task = per_core(current_task) = task_table+new_id;
|
||||
|
||||
goto get_task_out;
|
||||
}
|
||||
}
|
||||
|
||||
if ((per_core(current_task)->status == TASK_RUNNING) || (per_core(current_task)->status == TASK_IDLE))
|
||||
if ((curr_task->status == TASK_RUNNING) || (curr_task->status == TASK_IDLE))
|
||||
goto get_task_out;
|
||||
|
||||
/*
|
||||
* we switch to the idle task, if the current task terminates
|
||||
* and no other is ready
|
||||
*/
|
||||
for(i=0; i<MAX_TASKS; i++) {
|
||||
if (task_table[i].status == TASK_IDLE) {
|
||||
per_core(current_task) = task_table+i;
|
||||
goto get_task_out;
|
||||
}
|
||||
}
|
||||
new_id = CORE_ID;
|
||||
curr_task = per_core(current_task) = task_table+CORE_ID;
|
||||
|
||||
get_task_out:
|
||||
//kprintf("schedule %d on core %d\n", per_core(current_task)->id, smp_id());
|
||||
|
||||
if (curr_task != orig_task)
|
||||
switch_task(new_id);
|
||||
|
||||
#if MAX_CORES > 1
|
||||
spinlock_irqsave_unlock(&table_lock);
|
||||
#else
|
||||
return;
|
||||
#endif
|
||||
}
|
||||
|
||||
void reschedule(void)
|
||||
{
|
||||
uint32_t flags = irq_nested_disable();
|
||||
scheduler();
|
||||
irq_nested_enable(flags);
|
||||
}
|
||||
|
|
164
kernel/tests.c
164
kernel/tests.c
|
@ -26,7 +26,6 @@
|
|||
#include <metalsvm/syscall.h>
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
#include <asm/icc.h>
|
||||
#include <asm/RCCE_lib.h>
|
||||
#include <asm/RCCE.h>
|
||||
#include <asm/RCCE_lib.h>
|
||||
#include <asm/iRCCE.h>
|
||||
|
@ -34,18 +33,19 @@
|
|||
|
||||
#include <asm/SCC_API.h>
|
||||
#include <lwip/sockets.h>
|
||||
#endif
|
||||
|
||||
#include "client.h"
|
||||
#include "server.h"
|
||||
|
||||
#include "shell.h"
|
||||
|
||||
#endif
|
||||
|
||||
static sem_t consuming, producing;
|
||||
static mailbox_int32_t mbox;
|
||||
static int val = 0;
|
||||
|
||||
static int STDCALL consumer(void* arg)
|
||||
static int consumer(void* arg)
|
||||
{
|
||||
int i, m = 0;
|
||||
|
||||
|
@ -64,7 +64,7 @@ static int STDCALL consumer(void* arg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int STDCALL producer(void* arg)
|
||||
static int producer(void* arg)
|
||||
{
|
||||
int i;
|
||||
int mail[5] = {1, 2, 3, 4, 5};
|
||||
|
@ -84,7 +84,7 @@ static int STDCALL producer(void* arg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int STDCALL foo(void* arg)
|
||||
static int foo(void* arg)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -92,7 +92,7 @@ static int STDCALL foo(void* arg)
|
|||
return 0;
|
||||
|
||||
for(i=0; i<5; i++) {
|
||||
kputs((char*) arg);
|
||||
kprintf("Message from core %d: %s\n", smp_id(), (char*) arg);
|
||||
sleep(1);
|
||||
}
|
||||
|
||||
|
@ -100,25 +100,25 @@ static int STDCALL foo(void* arg)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
static int STDCALL ping(void* arg)
|
||||
{
|
||||
int mail_ping(void* arg) {
|
||||
int i;
|
||||
|
||||
for(i=0; i<20; i++) {
|
||||
icc_ping(1);
|
||||
HALT;
|
||||
if (BUILTIN_EXPECT(icc_mail_ping(), 0))
|
||||
return -1;
|
||||
udelay(500000);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int STDCALL join_test(void* arg)
|
||||
static int join_test(void* arg)
|
||||
{
|
||||
tid_t id, ret;
|
||||
int result = -1234;
|
||||
|
||||
create_kernel_task(&id, foo, "Hello from foo2\n");
|
||||
create_kernel_task(&id, foo, "Hello from foo2");
|
||||
|
||||
kprintf("Wait for child %u\n", id);
|
||||
do {
|
||||
|
@ -130,26 +130,7 @@ static int STDCALL join_test(void* arg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void ping_send_now();
|
||||
|
||||
__inline int get_core_no(void)
|
||||
{
|
||||
unsigned int tmp;
|
||||
unsigned int pid;
|
||||
unsigned int x,y,z;
|
||||
/* Determine the local IP address from the core number in the
|
||||
* tile ID register
|
||||
*/
|
||||
tmp = ReadConfigReg(0xF8000000 + 0x100);
|
||||
x = (tmp>>3) & 0x0f; /* bits 06:03 */
|
||||
y = (tmp>>7) & 0x0f; /* bits 10:07 */
|
||||
z = (tmp ) & 0x07; /* bits 02:00 */
|
||||
pid = 12*y + 2*x + z;
|
||||
/* Add 1 to the processor ID to avoid *.*.*.0 IP addresses */
|
||||
return pid;
|
||||
}
|
||||
|
||||
|
||||
#if defined(CONFIG_LWIP) && defined(CONFIG_ROCKCREEK)
|
||||
static int srv_cnt = 0;
|
||||
void srv_on_read(ServerEventArgs* e)
|
||||
{
|
||||
|
@ -218,7 +199,7 @@ void* server_task(void* e)
|
|||
|
||||
/* First call to socket() function */
|
||||
sockfd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
|
||||
if (sockfd < 0)
|
||||
if (sockfd < 0)
|
||||
{
|
||||
SHELLDEBUGPRINTF("ERROR opening socket");
|
||||
return;
|
||||
|
@ -229,7 +210,7 @@ void* server_task(void* e)
|
|||
serv_addr.sin_family = AF_INET;
|
||||
serv_addr.sin_addr.s_addr = INADDR_ANY;
|
||||
serv_addr.sin_port = htons(portno);
|
||||
|
||||
|
||||
SHELLDEBUGPRINTF("binding");
|
||||
/* Now bind the host address using bind() call.*/
|
||||
if (bind(sockfd, (struct sockaddr *) &serv_addr,
|
||||
|
@ -248,9 +229,9 @@ void* server_task(void* e)
|
|||
|
||||
/* Accept actual connection from the client */
|
||||
SHELLDEBUGPRINTF("accepting");
|
||||
newsockfd = accept(sockfd, (struct sockaddr *)&cli_addr,
|
||||
newsockfd = accept(sockfd, (struct sockaddr *)&cli_addr,
|
||||
&clilen);
|
||||
if (newsockfd < 0)
|
||||
if (newsockfd < 0)
|
||||
{
|
||||
kprintf("ERROR on accept");
|
||||
return;
|
||||
|
@ -297,7 +278,7 @@ void* server_task(void* e)
|
|||
SHELLDEBUGPRINTF("Send 1024*256 Bytes in : %d clock ticks",tmp2-tmp1);
|
||||
|
||||
#endif
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cli_cnt = 0;
|
||||
|
@ -350,44 +331,44 @@ void* client_task(void* e)
|
|||
|
||||
#if 0
|
||||
char dir[2048];
|
||||
int sd;
|
||||
struct sockaddr_in sin;
|
||||
struct sockaddr_in pin;
|
||||
struct hostent *hp;
|
||||
int sd;
|
||||
struct sockaddr_in sin;
|
||||
struct sockaddr_in pin;
|
||||
struct hostent *hp;
|
||||
int n;
|
||||
|
||||
int on = 1;
|
||||
|
||||
sleep(1);
|
||||
sleep(1);
|
||||
|
||||
/* fill in the socket structure with host information */
|
||||
memset(&pin, 0, sizeof(pin));
|
||||
pin.sin_family = AF_INET;
|
||||
/* fill in the socket structure with host information */
|
||||
memset(&pin, 0, sizeof(pin));
|
||||
pin.sin_family = AF_INET;
|
||||
pin.sin_addr.s_addr = inet_addr("192.168.0.1");
|
||||
pin.sin_port = htons(5001);
|
||||
pin.sin_port = htons(5001);
|
||||
|
||||
/* grab an Internet domain socket */
|
||||
if ((sd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) == -1) {
|
||||
kprintf("socketfail");
|
||||
return;
|
||||
}
|
||||
/* grab an Internet domain socket */
|
||||
if ((sd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) == -1) {
|
||||
kprintf("socketfail");
|
||||
return;
|
||||
}
|
||||
|
||||
// setsockopt( sd, IPPROTO_TCP, TCP_NODELAY, (char *) &on, sizeof( on));
|
||||
|
||||
kprintf("connecting with socket nr : %d",sd);
|
||||
/* connect to PORT on HOST */
|
||||
kprintf("connecting with socket nr : %d",sd);
|
||||
/* connect to PORT on HOST */
|
||||
|
||||
if (connect(sd,(struct sockaddr *) &pin, sizeof(pin)) == -1) {
|
||||
kprintf("connectfail");
|
||||
return;
|
||||
}
|
||||
kprintf("sending");
|
||||
/* send a message to the server PORT on machine HOST */
|
||||
if (send(sd, "HELLO THERE", strlen("HELLO THERE"), 0) == -1) {
|
||||
kprintf("sendfail");
|
||||
return;
|
||||
}
|
||||
kprintf("recieving");
|
||||
if (connect(sd,(struct sockaddr *) &pin, sizeof(pin)) == -1) {
|
||||
kprintf("connectfail");
|
||||
return;
|
||||
}
|
||||
kprintf("sending");
|
||||
/* send a message to the server PORT on machine HOST */
|
||||
if (send(sd, "HELLO THERE", strlen("HELLO THERE"), 0) == -1) {
|
||||
kprintf("sendfail");
|
||||
return;
|
||||
}
|
||||
kprintf("recieving");
|
||||
/* wait for a message to come back from the server */
|
||||
if (recv(sd, dir, 256, 0) == -1) {
|
||||
kprintf("recvfail");
|
||||
|
@ -408,57 +389,38 @@ void* client_task(void* e)
|
|||
#endif
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
int test_init(void)
|
||||
{
|
||||
int i = 0;
|
||||
kprintf("start testing");
|
||||
// char* argv[] = {"/bin/tests", NULL};
|
||||
|
||||
// sem_init(&producing, 1);
|
||||
// sem_init(&consuming, 0);
|
||||
// mailbox_int32_init(&mbox);
|
||||
|
||||
#if defined(CONFIG_LWIP) && defined(CONFIG_ROCKCREEK)
|
||||
|
||||
shell_init(get_core_no());
|
||||
shell_init(RCCE_ue());
|
||||
|
||||
sleep(10);
|
||||
SHELLDEBUGPRINTF("hello World! I AM CORE NO. %d =) \n",get_core_no());
|
||||
sleep(10);
|
||||
SHELLDEBUGPRINTF("hello World! I AM CORE NO. %d =) \n",get_core_no());
|
||||
|
||||
// if (get_core_no())
|
||||
// {
|
||||
// sleep(5);
|
||||
// shelldebugprint("sleeped 5 seconds\n");
|
||||
// sleep(5);
|
||||
// shelldebugprint("sleeped another 5 seconds\n");
|
||||
// shelldebugprint("This is so wonderfull!\nEverything is so well formated.\n");
|
||||
if (!RCCE_ue())
|
||||
create_kernel_task(NULL,server_task,NULL);
|
||||
else
|
||||
create_kernel_task(NULL,client_task,NULL);
|
||||
#endif
|
||||
|
||||
// for (i = 0; i < 10; i++)
|
||||
// {
|
||||
// SHELLDEBUGPRINTF("for-Schleife-no: %d\n",i);
|
||||
// }
|
||||
// }
|
||||
|
||||
if (!get_core_no())
|
||||
create_kernel_task(NULL,server_task,NULL);
|
||||
else
|
||||
create_kernel_task(NULL,client_task,NULL);
|
||||
|
||||
#if 0
|
||||
|
||||
char* argv[] = {"/bin/tests", NULL};
|
||||
|
||||
sem_init(&producing, 1);
|
||||
sem_init(&consuming, 0);
|
||||
mailbox_int32_init(&mbox);
|
||||
|
||||
create_kernel_task(NULL, foo, "Hello from foo1\n");
|
||||
//create_kernel_task(NULL, join_test, NULL);
|
||||
// create_kernel_task(NULL, foo, "Hello from foo1");
|
||||
// create_kernel_task(NULL, join_test, NULL);
|
||||
//create_kernel_task(NULL, producer, NULL);
|
||||
//create_kernel_task(NULL, consumer, NULL);
|
||||
//create_kernel_task(NULL, ping, NULL);
|
||||
//create_kernel_task(NULL, mail_ping, NULL);
|
||||
//create_user_task(NULL, "/bin/hello", argv);
|
||||
create_user_task(NULL, "/bin/tests", argv);
|
||||
// create_user_task(NULL, "/bin/tests", argv);
|
||||
//create_user_task(NULL, "/bin/jacobi", argv);
|
||||
//create_user_task(NULL, "/bin/jacobi", argv);
|
||||
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
|
||||
|
||||
/*
|
||||
* eduOS's printf implementation is based on a implementation which was
|
||||
* MetalSVM's printf implementation is based on a implementation which was
|
||||
* published at http://www.pagetable.com/?p=298.
|
||||
* The authors built a full-featured standalone version of printf(). The
|
||||
* base code has been taken from FreeBSD (sys/kern/subr_prf.c) and is
|
||||
|
|
|
@ -24,15 +24,19 @@
|
|||
#include <metalsvm/semaphore.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/io.h>
|
||||
#ifdef CONFIG_VGA
|
||||
#include <asm/vga.h>
|
||||
#endif
|
||||
|
||||
#define NO_EARLY_PRINT 0
|
||||
#define VGA_EARLY_PRINT 1
|
||||
#define UART_EARLY_PRINT 2
|
||||
|
||||
#ifdef CONFIG_VGA
|
||||
static uint32_t early_print = VGA_EARLY_PRINT;
|
||||
#elif defined(CONFIG_UART)
|
||||
static uint32_t early_print = UART_EARLY_PRINT;
|
||||
#else
|
||||
static uint32_t early_print = NO_EARLY_PRINT;
|
||||
#endif
|
||||
|
@ -56,8 +60,12 @@ int kputchar(int c)
|
|||
kmessages[pos % KMSG_SIZE] = (unsigned char) c;
|
||||
#ifdef CONFIG_VGA
|
||||
if (early_print == VGA_EARLY_PRINT)
|
||||
vga_putchar(c);
|
||||
vga_putchar(c);
|
||||
#endif
|
||||
#ifdef CONFIG_UART
|
||||
if (early_print == UART_EARLY_PRINT)
|
||||
uart_putchar(c);
|
||||
#endif
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -73,6 +81,10 @@ int kputs(const char *str)
|
|||
#ifdef CONFIG_VGA
|
||||
if (early_print == VGA_EARLY_PRINT)
|
||||
vga_putchar(str[i]);
|
||||
#endif
|
||||
#ifdef CONFIG_UART
|
||||
if (early_print == UART_EARLY_PRINT)
|
||||
uart_putchar(str[i]);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -74,13 +74,16 @@ sys_msleep(u32_t ms)
|
|||
}
|
||||
}
|
||||
|
||||
/* sys_thread_new(): Spawns a new thread with given attributes as supportet
|
||||
/* sys_thread_new(): Spawns a new thread with given attributes as supported
|
||||
* Note: In MetalSVM this is realized as kernel tasks
|
||||
*/
|
||||
sys_thread_t sys_thread_new(const char *name, lwip_thread_fn thread, void *arg, int stacksize, int prio)
|
||||
{
|
||||
tid_t tmp;
|
||||
|
||||
kprintf("Create LWIP task %s\n", name);
|
||||
create_kernel_task(&tmp,thread,arg);
|
||||
|
||||
return tmp;
|
||||
}
|
||||
|
||||
|
@ -139,8 +142,8 @@ u32_t sys_arch_sem_wait(sys_sem_t *sem, u32_t timeout)
|
|||
while (timeout)
|
||||
{
|
||||
err = sem_trywait(&sem->sem);
|
||||
if (err == 0)
|
||||
return err;
|
||||
if (err != -1)
|
||||
return err;
|
||||
udelay(1000);
|
||||
timeout--;
|
||||
}
|
||||
|
@ -232,7 +235,7 @@ void sys_mbox_post(sys_mbox_t* mbox,void* msg)
|
|||
*/
|
||||
void sys_mutex_lock(sys_mutex_t* mutex)
|
||||
{
|
||||
sem_wait(mutex);
|
||||
sem_wait(mutex);
|
||||
}
|
||||
|
||||
/* sys_mutex_unlock(): unlock the given mutex
|
||||
|
@ -252,4 +255,21 @@ err_t sys_mutex_new(sys_mutex_t * mutex)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#if SYS_LIGHTWEIGHT_PROT
|
||||
#if MAX_CORES > 1
|
||||
static spinlock_irqsave_t lwprot_lock = SPINLOCK_IRQSAVE_INIT;
|
||||
|
||||
sys_prot_t sys_arch_protect(void)
|
||||
{
|
||||
spinlock_irqsave_lock(&lwprot_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void sys_arch_unprotect(sys_prot_t pval)
|
||||
{
|
||||
spinlock_irqsave_unlock(&lwprot_lock);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* !NO_SYS */
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include <asm/tasks.h>
|
||||
#include <metalsvm/mailbox.h>
|
||||
#include <metalsvm/errno.h>
|
||||
#include <asm/irqflags.h>
|
||||
|
||||
#define EWOULDBLOCK EAGAIN /* Operation would block */
|
||||
|
||||
|
@ -22,4 +23,24 @@ typedef struct
|
|||
|
||||
typedef tid_t* sys_thread_t;
|
||||
|
||||
#if SYS_LIGHTWEIGHT_PROT
|
||||
#if MAX_CORES > 1
|
||||
typedef uint32_t sys_prot_t;
|
||||
sys_prot_t sys_arch_protect(void);
|
||||
void sys_arch_unprotect(sys_prot_t pval);
|
||||
#else
|
||||
typedef uint32_t sys_prot_t;
|
||||
|
||||
static inline sys_prot_t sys_arch_protect(void)
|
||||
{
|
||||
return irq_nested_disable();
|
||||
}
|
||||
|
||||
static inline void sys_arch_unprotect(sys_prot_t pval)
|
||||
{
|
||||
irq_nested_enable(pval);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* __ARCH_SYS_ARCH_H__ */
|
||||
|
|
|
@ -3,12 +3,25 @@
|
|||
#ifndef __LWIPOPTS_H__
|
||||
#define __LWIPOPTS_H_
|
||||
|
||||
/**
|
||||
* SYS_LIGHTWEIGHT_PROT==1: if you want inter-task protection for certain
|
||||
* critical regions during buffer allocation, deallocation and memory
|
||||
* allocation and deallocation.
|
||||
*/
|
||||
#define SYS_LIGHTWEIGHT_PROT 1
|
||||
|
||||
/**
|
||||
* NO_SYS==1: Provides VERY minimal functionality. Otherwise,
|
||||
* use lwIP facilities.
|
||||
*/
|
||||
#define NO_SYS 0
|
||||
|
||||
/**
|
||||
* LWIP_RAW==1: Enable application layer to hook into the IP layer itself.
|
||||
* LWIP_RAW==0: speeds up input processing
|
||||
*/
|
||||
#define LWIP_RAW 1
|
||||
|
||||
/**
|
||||
* LWIP_SOCKET==1: Enable Socket API (require to use sockets.c)
|
||||
*/
|
||||
|
@ -49,9 +62,7 @@
|
|||
/**
|
||||
* TCP_SND_BUF: TCP sender buffer space (bytes).
|
||||
*/
|
||||
#define TCP_SND_BUF 1512
|
||||
|
||||
#define TCP_SND_QUEUELEN 4
|
||||
#define TCP_SND_BUF 2048
|
||||
|
||||
/**
|
||||
* LWIP_BROADCAST_PING==1: respond to broadcast pings (default is unicast only)
|
||||
|
@ -86,8 +97,15 @@
|
|||
*/
|
||||
#define LWIP_CHECKSUM_ON_COPY 1
|
||||
|
||||
/**
|
||||
* IP_FORWARD==1: Enables the ability to forward IP packets across network
|
||||
* interfaces. If you are going to run lwIP on a device with only one network
|
||||
* interface, define this to 0.
|
||||
*/
|
||||
#define IP_FORWARD 1
|
||||
|
||||
/* DEBUG options */
|
||||
#define LWIP_DEBUG 0
|
||||
#define LWIP_DEBUG 1
|
||||
#define DHCP_DEBUG LWIP_DBG_OFF
|
||||
#define ETHARP_DEBUG LWIP_DBG_OFF
|
||||
#define TCPIP_DEBUG LWIP_DBG_OFF
|
||||
|
@ -96,15 +114,8 @@
|
|||
#define MEM_DEBUG LWIP_DBG_OFF
|
||||
#define IP_DEBUG LWIP_DBG_OFF
|
||||
#define INET_DEBUG LWIP_DBG_OFF
|
||||
#define NETIF_DEBUG LWIP_DBG_OFF
|
||||
#define NETIF_DEBUG LWIP_DBG_ON
|
||||
#define TIMERS_DEBUG LWIP_DBG_OFF
|
||||
|
||||
#define IP_FORWARD 1
|
||||
|
||||
#if 0
|
||||
#define LWIP_TCPIP_CORE_LOCKING_INPUT 1
|
||||
#define LWIP_TCPIP_CORE_LOCKING 1
|
||||
#endif
|
||||
|
||||
#define SOCKETS_DEBUG LWIP_DBG_OFF
|
||||
|
||||
#endif
|
||||
|
|
75
mm/memory.c
75
mm/memory.c
|
@ -30,6 +30,9 @@
|
|||
#include <asm/multiboot.h>
|
||||
#endif
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
#include <asm/RCCE.h>
|
||||
#include <asm/RCCE_lib.h>
|
||||
#include <asm/SCC_API.h>
|
||||
#include <asm/icc.h>
|
||||
#endif
|
||||
|
||||
|
@ -53,23 +56,23 @@ atomic_int32_t total_available_pages = ATOMIC_INIT(0);
|
|||
extern const void kernel_start;
|
||||
extern const void kernel_end;
|
||||
|
||||
inline static int page_marked(unsigned int i)
|
||||
inline static int page_marked(size_t i)
|
||||
{
|
||||
unsigned int index = i >> 3;
|
||||
unsigned int mod = i & 0x7;
|
||||
size_t index = i >> 3;
|
||||
size_t mod = i & 0x7;
|
||||
|
||||
return (bitmap[index] & (1 << mod));
|
||||
}
|
||||
|
||||
inline static int page_unmarked(unsigned int i)
|
||||
inline static int page_unmarked(size_t i)
|
||||
{
|
||||
return !page_marked(i);
|
||||
}
|
||||
|
||||
inline static void page_set_mark(unsigned int i)
|
||||
inline static void page_set_mark(size_t i)
|
||||
{
|
||||
unsigned int index = i >> 3;
|
||||
unsigned int mod = i & 0x7;
|
||||
size_t index = i >> 3;
|
||||
size_t mod = i & 0x7;
|
||||
|
||||
//if (page_marked(i))
|
||||
// kprintf("page %u is alread marked\n", i);
|
||||
|
@ -77,10 +80,10 @@ inline static void page_set_mark(unsigned int i)
|
|||
bitmap[index] = bitmap[index] | (1 << mod);
|
||||
}
|
||||
|
||||
inline static void page_clear_mark(unsigned int i)
|
||||
inline static void page_clear_mark(size_t i)
|
||||
{
|
||||
unsigned int index = i / 8;
|
||||
unsigned int mod = i % 8;
|
||||
size_t index = i / 8;
|
||||
size_t mod = i % 8;
|
||||
|
||||
if (page_unmarked(i))
|
||||
kprintf("page %u is already unmarked\n", i);
|
||||
|
@ -93,6 +96,7 @@ int mmu_init(void)
|
|||
size_t kernel_size;
|
||||
unsigned int i;
|
||||
size_t addr;
|
||||
int ret;
|
||||
|
||||
// at first, set default value of the bitmap
|
||||
memset(bitmap, 0xFF, sizeof(uint8_t)*BITMAP_SIZE);
|
||||
|
@ -148,15 +152,8 @@ int mmu_init(void)
|
|||
}
|
||||
}
|
||||
#elif defined(CONFIG_ROCKCREEK)
|
||||
for(addr=PRIVATE_MEM1_START; addr<PRIVATE_MEM1_END; addr+=PAGE_SIZE) {
|
||||
page_clear_mark(addr >> PAGE_SHIFT);
|
||||
if (addr > addr + PAGE_SIZE)
|
||||
break;
|
||||
atomic_int32_inc(&total_pages);
|
||||
atomic_int32_inc(&total_available_pages);
|
||||
}
|
||||
|
||||
for(addr=PRIVATE_MEM2_START; addr<PRIVATE_MEM2_END; addr+=PAGE_SIZE) {
|
||||
/* of course, the first twenty slots belong to the private memory */
|
||||
for(addr=0x00; addr<20*0x1000000; addr+=PAGE_SIZE) {
|
||||
page_clear_mark(addr >> PAGE_SHIFT);
|
||||
if (addr > addr + PAGE_SIZE)
|
||||
break;
|
||||
|
@ -164,6 +161,15 @@ int mmu_init(void)
|
|||
atomic_int32_inc(&total_available_pages);
|
||||
}
|
||||
|
||||
// Note: The last slot belongs always to the private memory.
|
||||
for(addr=0xFF000000; addr<0xFFFFFFFF; addr+=PAGE_SIZE) {
|
||||
page_clear_mark(addr >> PAGE_SHIFT);
|
||||
if (addr > addr + PAGE_SIZE)
|
||||
break;
|
||||
atomic_int32_inc(&total_pages);
|
||||
atomic_int32_inc(&total_available_pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark the bootinfo as used.
|
||||
*/
|
||||
|
@ -200,7 +206,36 @@ int mmu_init(void)
|
|||
if ((size_t) &kernel_end & (PAGE_SIZE-1))
|
||||
alloc_start++;
|
||||
|
||||
return paging_init();
|
||||
#if MAX_CORES > 1
|
||||
// reserve physical page for SMP boot code
|
||||
page_set_mark(SMP_SETUP_ADDR >> PAGE_SHIFT);
|
||||
atomic_int32_add(&total_allocated_pages, 1);
|
||||
atomic_int32_sub(&total_available_pages, 1);
|
||||
#endif
|
||||
ret = paging_init();
|
||||
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
/*
|
||||
* Now, we are able to read the FPGA registers and to
|
||||
* determine the number of slots for private memory.
|
||||
*/
|
||||
uint32_t slots = *((volatile uint32_t*) (FPGA_BASE + 0x8244));
|
||||
if (slots == 0)
|
||||
slots = 21;
|
||||
|
||||
kprintf("MetalSVM use %d slots for private memory\n", slots);
|
||||
|
||||
// define the residual private slots as free
|
||||
for(addr=20*0x1000000; addr<(slots-1)*0x1000000; addr+=PAGE_SIZE) {
|
||||
page_clear_mark(addr >> PAGE_SHIFT);
|
||||
if (addr > addr + PAGE_SIZE)
|
||||
break;
|
||||
atomic_int32_inc(&total_pages);
|
||||
atomic_int32_inc(&total_available_pages);
|
||||
}
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
78
newlib/net/syscall.h
Normal file
78
newlib/net/syscall.h
Normal file
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Copyright 2010 Stefan Lankes, Chair for Operating Systems,
|
||||
* RWTH Aachen University
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
* Standard x86 syscalls for user programs running under MetalSVM
|
||||
*/
|
||||
|
||||
#ifndef __SYSCALL_H__
|
||||
#define __SYSCALL_H__
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#define __NR_exit 0
|
||||
#define __NR_write 1
|
||||
#define __NR_open 2
|
||||
#define __NR_close 3
|
||||
#define __NR_read 4
|
||||
#define __NR_lseek 6
|
||||
#define __NR_unlink 7
|
||||
#define __NR_getpid 8
|
||||
#define __NR_kill 9
|
||||
#define __NR_fstat 10
|
||||
#define __NR_sbrk 11
|
||||
#define __NR_fork 12
|
||||
#define __NR_wait 13
|
||||
#define __NR_execve 14
|
||||
#define __NR_times 15
|
||||
|
||||
#define _STR(token) #token
|
||||
#define _SYSCALLSTR(x) "int $" _STR(x) " "
|
||||
#define INT_SYSCALL 0x80
|
||||
|
||||
inline static long
|
||||
syscall(int nr, unsigned long arg0, unsigned long arg1, unsigned long arg2,
|
||||
unsigned long arg3, unsigned long arg4)
|
||||
{
|
||||
long res;
|
||||
|
||||
asm volatile (_SYSCALLSTR(INT_SYSCALL)
|
||||
: "=a" (res)
|
||||
: "0" (nr), "b" (arg0), "c" (arg1), "d" (arg2), "S" (arg3), "D" (arg4)
|
||||
: "memory", "cc");
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
#define SYSCALL0(NR) \
|
||||
syscall(NR, 0, 0, 0, 0, 0)
|
||||
#define SYSCALL1(NR, ARG1) \
|
||||
syscall(NR, (unsigned long)ARG1, 0, 0, 0, 0)
|
||||
#define SYSCALL2(NR, ARG1, ARG2) \
|
||||
syscall(NR, (unsigned long)ARG1, (unsigned long)ARG2, 0, 0, 0)
|
||||
#define SYSCALL3(NR, ARG1, ARG2, ARG3) \
|
||||
syscall(NR, (unsigned long)ARG1, (unsigned long)ARG2, (unsigned long)ARG3, 0, 0)
|
||||
#define SYSCALL4(NR, ARG1, ARG2, ARG3, ARG4) \
|
||||
syscall(NR, (unsigned long)ARG1, (unsigned long)ARG2, (unsigned long)ARG3, (unsigned long) ARG4, 0)
|
||||
#define SYSCALL5(NR, ARG1, ARG2, ARG3, ARG4) \
|
||||
syscall(NR, (unsigned long)ARG1, (unsigned long)ARG2, (unsigned long)ARG3, (unsigned long) ARG4, (unsigned long) ARG5)
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
0
newlib/src/compile
Normal file → Executable file
0
newlib/src/compile
Normal file → Executable file
0
newlib/src/config.guess
vendored
Normal file → Executable file
0
newlib/src/config.guess
vendored
Normal file → Executable file
0
newlib/src/config.rpath
Normal file → Executable file
0
newlib/src/config.rpath
Normal file → Executable file
0
newlib/src/config.status
Normal file → Executable file
0
newlib/src/config.status
Normal file → Executable file
0
newlib/src/config.sub
vendored
Normal file → Executable file
0
newlib/src/config.sub
vendored
Normal file → Executable file
0
newlib/src/config/acinclude.m4
Normal file → Executable file
0
newlib/src/config/acinclude.m4
Normal file → Executable file
0
newlib/src/configure
vendored
Normal file → Executable file
0
newlib/src/configure
vendored
Normal file → Executable file
0
newlib/src/depcomp
Normal file → Executable file
0
newlib/src/depcomp
Normal file → Executable file
0
newlib/src/etc/config.status
Normal file → Executable file
0
newlib/src/etc/config.status
Normal file → Executable file
0
newlib/src/etc/configure
vendored
Normal file → Executable file
0
newlib/src/etc/configure
vendored
Normal file → Executable file
0
newlib/src/install-sh
Normal file → Executable file
0
newlib/src/install-sh
Normal file → Executable file
0
newlib/src/libgloss/bfin/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/bfin/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/cris/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/cris/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/crx/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/crx/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/d30v/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/d30v/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/doc/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/doc/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/fr30/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/fr30/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/frv/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/frv/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/hp74x/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/hp74x/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/i386/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/i386/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/i960/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/i960/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/iq2000/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/iq2000/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/libnosys/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/libnosys/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/lm32/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/lm32/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/m32c/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/m32c/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/m32r/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/m32r/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/m68hc11/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/m68hc11/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/m68k/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/m68k/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/mcore/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/mcore/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/mep/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/mep/configure
vendored
Normal file → Executable file
|
@ -63,8 +63,7 @@ CRT0 = crt0.o
|
|||
METALSVM_BSP = libgloss.a
|
||||
METALSVM_OBJS = chown.o errno.o fork.o gettod.o kill.o open.o sbrk.o times.o write.o \
|
||||
close.o execve.o fstat.o init.o link.o read.o stat.o unlink.o \
|
||||
environ.o _exit.o getpid.o isatty.o lseek.o readlink.o symlink.o wait.o \
|
||||
socket.o bind.o listen.o accept.o connect.o send.o recv.o closesocket.o
|
||||
environ.o _exit.o getpid.o isatty.o lseek.o readlink.o symlink.o wait.o
|
||||
|
||||
|
||||
#### Host specific Makefile fragment comes in here.
|
||||
|
|
0
newlib/src/libgloss/metalsvm/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/metalsvm/configure
vendored
Normal file → Executable file
|
@ -40,18 +40,6 @@ extern "C" {
|
|||
#define __NR_execve 14
|
||||
#define __NR_times 15
|
||||
|
||||
/* networking
|
||||
*/
|
||||
|
||||
#define __NR_socket 16
|
||||
#define __NR_bind 17
|
||||
#define __NR_listen 18
|
||||
#define __NR_accept 19
|
||||
#define __NR_connect 20
|
||||
#define __NR_send 21
|
||||
#define __NR_recv 22
|
||||
#define __NR_closesocket 23
|
||||
|
||||
#define _STR(token) #token
|
||||
#define _SYSCALLSTR(x) "int $" _STR(x) " "
|
||||
#define INT_SYSCALL 0x80
|
||||
|
|
0
newlib/src/libgloss/mips/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/mips/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/mn10200/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/mn10200/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/mn10300/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/mn10300/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/mt/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/mt/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/pa/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/pa/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/rs6000/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/rs6000/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/sparc/configure
vendored
Normal file → Executable file
0
newlib/src/libgloss/sparc/configure
vendored
Normal file → Executable file
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue