Merge branch 'master' into demo

This commit is contained in:
Stefan Lankes 2011-08-28 23:59:12 -07:00
commit 38d26aecae
11 changed files with 137 additions and 123 deletions

View file

@ -63,7 +63,7 @@ SECTION .text
ALIGN 4
stublet:
; initialize stack pointer.
mov esp, default_stack_pointer
mov esp, [default_stack_pointer]
; initialize cpu features
call cpu_init
; interpret multiboot information

View file

@ -28,10 +28,10 @@
gdt_ptr_t gp;
static tss_t task_state_segments[MAX_TASKS] __attribute__ ((aligned (PAGE_SIZE)));
static unsigned char kstacks[MAX_TASKS][KERNEL_STACK_SIZE] __attribute__ ((aligned (PAGE_SIZE), section (".data")));
static unsigned char kstacks[MAX_TASKS][KERNEL_STACK_SIZE] __attribute__ ((aligned (PAGE_SIZE))) = {[0 ... MAX_TASKS-1][0 ... KERNEL_STACK_SIZE-1] = 0xCD};
uint32_t default_stack_pointer = (uint32_t) kstacks[0] + KERNEL_STACK_SIZE - sizeof(size_t);
// currently, our kernel has full access to the ioports
static gdt_entry_t gdt[GDT_ENTRIES] = {[0 ... GDT_ENTRIES-1] = {0, 0, 0, 0, 0, 0}};
unsigned char* default_stack_pointer __attribute__ ((section (".data"))) = kstacks[0] + KERNEL_STACK_SIZE - sizeof(size_t);
/*
* This is defined in entry.asm. We use this to properly reload

View file

@ -280,13 +280,10 @@ void irq_handler(struct state *s)
#endif
leave_handler:
#if 1
// add fast lane for the driver task
if (s->int_no >= 32)
scheduler();
#else
// timer interrupt?
if ((s->int_no == 32) || (s->int_no == 123))
scheduler(); // switch to a new task
#endif
// exists a new (driver) task with a higher priority?
else if ((s->int_no >= 32) && (get_highest_priority(CORE_ID) > per_core(current_task)->prio))
scheduler();
}

View file

@ -170,13 +170,13 @@ static void fpu_init(union fpu_state* fpu)
if (has_fxsr()) {
i387_fxsave_t* fx = &fpu->fxsave;
memset(fx, 0, sizeof(union fpu_state));
memset(fx, 0x00, sizeof(union fpu_state));
fx->cwd = 0x37f;
if (has_xmm())
fx->mxcsr = 0x1f80;
} else {
i387_fsave_t *fp = &fpu->fsave;
memset(fp, 0, sizeof(union fpu_state));
memset(fp, 0x00, sizeof(union fpu_state));
fp->cwd = 0xffff037fu;
fp->swd = 0xffff0000u;
fp->twd = 0xffffffffu;

View file

@ -84,7 +84,7 @@ inline static size_t copy_page_table(task_t* task, uint32_t pgd_index, page_tabl
new_pgt = kmalloc(sizeof(page_table_t));
if (!new_pgt)
return 0;
memset(new_pgt, 0, sizeof(page_table_t));
memset(new_pgt, 0x00, sizeof(page_table_t));
if (counter)
(*counter)++;
@ -137,7 +137,7 @@ int create_pgd(task_t* task, int copy)
pgd = kmalloc(sizeof(page_dir_t));
if (!pgd)
return -ENOMEM;
memset(pgd, 0, sizeof(page_dir_t));
memset(pgd, 0x00, sizeof(page_dir_t));
// create a new "page table container" for the new task
pgt = kmalloc(sizeof(page_table_t));
@ -145,7 +145,7 @@ int create_pgd(task_t* task, int copy)
kfree(pgd, sizeof(page_dir_t));
return -ENOMEM;
}
memset(pgt, 0, sizeof(page_table_t));
memset(pgt, 0x00, sizeof(page_table_t));
spinlock_lock(&kslock);
@ -328,9 +328,9 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
// clear the page table
if (paging_enabled)
memset((void*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & 0xFFFFF000), 0, PAGE_SIZE);
memset((void*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index*PAGE_SIZE) & 0xFFFFF000), 0x00, PAGE_SIZE);
else
memset(pgt, 0, PAGE_SIZE);
memset(pgt, 0x00, PAGE_SIZE);
} else pgt = (page_table_t*) (task->pgd->entries[index] & 0xFFFFF000);
/* convert physical address to virtual */

View file

@ -90,7 +90,7 @@ static void intr_handler(struct state *s)
int tmp, z;
#ifdef CONFIG_LWIP
mmnif_irqhandler();
//mmnif_irqhandler();
#endif
z = Z_PID(RC_COREID[my_ue]);

View file

@ -290,131 +290,135 @@ static void rckemacif_input(struct netif* netif, struct pbuf* p)
static void rckemacif_rx_handler(struct netif* netif, unsigned int write_offset)
{
rckemacif_t* rckemacif = netif->state;
unsigned short read_offset;
unsigned short read_offset = rckemacif->rx_read_offset;
unsigned int counter;
volatile void *addr;
uint16_t i, length;
volatile void *addr = NULL;
uint16_t i, length = 0;
uint32_t packets = 0;
struct pbuf *p;
struct pbuf* q;
if (write_offset > rckemacif->rx_buffer_max) {
LWIP_DEBUGF(NETIF_DEBUG, ("Warning, write offset > buffer max!! (%d > %d)\n", write_offset, rckemacif->rx_buffer_max));
read_offset = 1;
write_emac(rckemacif->num_emac, EMAC_RX_CONTROL + EMAC_RX_BUFFER_READ_OFFSET, rckemacif->core, read_offset);
rckemacif->rx_read_offset = read_offset;
return;
goto rxDone;
}
while(1) {
if ((write_offset != 0) && (rckemacif->rx_read_offset != write_offset)) {
read_offset = rckemacif->rx_read_offset;
read_offset++;
if (read_offset < 1 || read_offset > rckemacif->rx_buffer_max) {
read_offset = 1;
}
addr = rckemacif->rx_buffer + read_offset * 32;
again:
read_offset++;
if (read_offset < 1 || read_offset > rckemacif->rx_buffer_max) {
read_offset = 1;
}
addr = rckemacif->rx_buffer + read_offset * 32;
length = U16(addr);
length = U16(addr);
// Check for over/underflow
if ((length < 20) || (length > 1536)) {
LWIP_DEBUGF(NETIF_DEBUG, ("rckemacif_rx_handler(): illegal packet length %d => drop\n", length));
LWIP_DEBUGF(NETIF_DEBUG, ("start read at %d; write_offset at %d; addr: %p, packet len: %d\n", read_offset, write_offset, addr, length));
// Check for over/underflow
if ((length < 20) || (length > 1536)) {
LWIP_DEBUGF(NETIF_DEBUG, ("rckemacif_rx_handler(): illegal packet length %d => drop\n", length));
LWIP_DEBUGF(NETIF_DEBUG, ("start read at %d; write_offset at %d; addr: %p, packet len: %d\n", read_offset, write_offset, addr, length));
read_offset = write_offset;
read_offset = write_offset;
#if 1
kprintf("Buffer:\n");
for (i = 0; i < 32; i++) {
kprintf("%2.2x ", ((char*)addr)[i] & 0xFF);
}
kprintf("\n");
kprintf("Buffer:\n");
for (i = 0; i < 32; i++) {
kprintf("%2.2x ", ((char*)addr)[i] & 0xFF);
}
kprintf("\n");
kprintf("Buffer0:\n");
for (i = 0; i < 32; i++) {
kprintf("%2.2x ", ((char*)rckemacif->rx_buffer)[i] & 0xFF);
}
kprintf("\n");
kprintf("Buffer0:\n");
for (i = 0; i < 32; i++) {
kprintf("%2.2x ", ((char*)rckemacif->rx_buffer)[i] & 0xFF);
}
kprintf("\n");
#endif
LINK_STATS_INC(link.memerr);
LINK_STATS_INC(link.drop);
LINK_STATS_INC(link.memerr);
LINK_STATS_INC(link.drop);
goto rxDone;
goto rxDone;
}
#if ETH_PAD_SIZE
length += ETH_PAD_SIZE; /* allow room for Ethernet padding */
#endif
//LWIP_DEBUGF(NETIF_DEBUG, ("length %u, read_offset %u, write_offset %u\n", length, read_offset, write_offset));
p = pbuf_alloc(PBUF_RAW, length, PBUF_POOL);
if (p) {
#if ETH_PAD_SIZE
pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */
#endif
if (read_offset < write_offset) {
for (q=p, i/*counter=0*/; q!=NULL; q=q->next) {
memcpy((uint8_t*) q->payload, (uint8_t*)addr + 2, q->len);
//for(i=0; i<q->len; i++, counter++) {
// ((uint8_t*) q->payload)[i] = ((uint8_t*)addr)[2 + counter];
//}
}
#if ETH_PAD_SIZE
length += ETH_PAD_SIZE; /* allow room for Ethernet padding */
#endif
//LWIP_DEBUGF(NETIF_DEBUG, ("length %u, read_offset %u, write_offset %u\n", length, read_offset, write_offset));
read_offset += CLINE_PACKETS(p->len + 2) - 1;
} else {
int rest;
int bytesLeft = length;
int bytesToCopy = length;
p = pbuf_alloc(PBUF_RAW, length, PBUF_POOL);
if (p) {
#if ETH_PAD_SIZE
pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */
#endif
if (read_offset < write_offset) {
for (q=p, counter=0; q!=NULL; q=q->next) {
for(i=0; i<q->len; i++, counter++) {
((uint8_t*) q->payload)[i] = ((uint8_t*)addr)[2 + counter];
}
}
/* rest to the end of buffer - 2 bytes length information */
rest = (rckemacif->rx_buffer_max - read_offset + 1) * 32 - 2;
if (length > rest)
bytesToCopy = rest;
LWIP_DEBUGF(NETIF_DEBUG, ("bytes to copy: %d, bytesLeft: %d\n", bytesToCopy, bytesLeft));
read_offset += CLINE_PACKETS(p->len + 2) - 1;
} else {
int rest;
int bytesLeft = length;
int bytesToCopy = length;
/* rest to the end of buffer - 2 bytes length information */
rest = (rckemacif->rx_buffer_max - read_offset + 1) * 32 - 2;
if (length > rest)
bytesToCopy = rest;
LWIP_DEBUGF(NETIF_DEBUG, ("bytes to copy: %d, bytesLeft: %d\n", bytesToCopy, bytesLeft));
for (q=p, counter=0; q!=NULL; q=q->next) {
for(i=0; i<q->len; i++, counter++) {
if (counter < bytesToCopy)
((uint8_t*) q->payload)[i] = ((uint8_t*)addr)[2 + counter];
else
goto out;
}
}
for (q=p, counter=0; q!=NULL; q=q->next) {
for(i=0; i<q->len; i++, counter++) {
if (counter < bytesToCopy)
((uint8_t*) q->payload)[i] = ((uint8_t*)addr)[2 + counter];
else
goto out;
}
}
out:
bytesLeft -= bytesToCopy;
bytesLeft -= bytesToCopy;
if (bytesLeft != 0) {
addr = rckemacif->rx_buffer + 0x20;
LWIP_DEBUGF(NETIF_DEBUG, ("copying from %p, left: %d (%x)\n", addr, bytesLeft, ((uint8_t*)addr)[0]));
for(counter=0; (i<q->len) && (counter < bytesLeft); i++, counter++)
((uint8_t*) q->payload)[i] = ((uint8_t*)addr)[counter];
for(q=q->next; (q!=NULL) && (counter < bytesLeft); q=q->next) {
for(i=0; (i<q->len) && (counter < bytesLeft); i++, counter++) {
((uint8_t*) q->payload)[i] = ((uint8_t*)addr)[counter];
}
}
read_offset = CLINE_PACKETS(bytesLeft);
} else {
read_offset += CLINE_PACKETS(p->len + 2) - 1;
if (bytesLeft != 0) {
addr = rckemacif->rx_buffer + 0x20;
LWIP_DEBUGF(NETIF_DEBUG, ("copying from %p, left: %d (%x)\n", addr, bytesLeft, ((uint8_t*)addr)[0]));
for(counter=0; (i<q->len) && (counter < bytesLeft); i++, counter++)
((uint8_t*) q->payload)[i] = ((uint8_t*)addr)[counter];
for(q=q->next; (q!=NULL) && (counter < bytesLeft); q=q->next) {
for(i=0; (i<q->len) && (counter < bytesLeft); i++, counter++) {
((uint8_t*) q->payload)[i] = ((uint8_t*)addr)[counter];
}
}
}
read_offset = CLINE_PACKETS(bytesLeft);
} else {
read_offset += CLINE_PACKETS(p->len + 2) - 1;
}
}
#if ETH_PAD_SIZE
pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */
pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */
#endif
rckemacif_input(netif, p);
LINK_STATS_INC(link.recv);
} else {
LWIP_DEBUGF(NETIF_DEBUG, ("rckemacif_rx_inthandler: not enough memory!\n"));
LINK_STATS_INC(link.memerr);
LINK_STATS_INC(link.drop);
}
rckemacif_input(netif, p);
LINK_STATS_INC(link.recv);
} else {
LWIP_DEBUGF(NETIF_DEBUG, ("rckemacif_rx_inthandler: not enough memory!\n"));
LINK_STATS_INC(link.memerr);
LINK_STATS_INC(link.drop);
}
packets++;
rxDone:
/* set new read pointer */
//LWIP_DEBUGF(NETIF_DEBUG, ("Update rx read offset: %d\n", read_offset));
write_emac(rckemacif->num_emac, EMAC_RX_CONTROL + EMAC_RX_BUFFER_READ_OFFSET, rckemacif->core, read_offset);
rckemacif->rx_read_offset = read_offset;
} else break;
/* set new read pointer */
//LWIP_DEBUGF(NETIF_DEBUG, ("Update rx read offset: %d\n", read_offset));
write_emac(rckemacif->num_emac, EMAC_RX_CONTROL + EMAC_RX_BUFFER_READ_OFFSET, rckemacif->core, read_offset);
rckemacif->rx_read_offset = read_offset;
if (read_offset != write_offset) {
if (packets < 5 /*max_num*/)
goto again;
}
}

View file

@ -80,7 +80,7 @@ inline static int spinlock_destroy(spinlock_t* s) {
* - -EINVAL (-22) on failure
*/
inline static int spinlock_lock(spinlock_t* s) {
int32_t ticket;
//int32_t ticket;
task_t* curr_task;
if (BUILTIN_EXPECT(!s, 0))
@ -172,7 +172,7 @@ inline static int spinlock_irqsave_destroy(spinlock_irqsave_t* s) {
*/
inline static int spinlock_irqsave_lock(spinlock_irqsave_t* s) {
uint32_t flags;
int32_t ticket;
//int32_t ticket;
if (BUILTIN_EXPECT(!s, 0))
return -EINVAL;

View file

@ -176,6 +176,13 @@ size_t get_idle_task(uint32_t id);
*/
int sys_execve(const char* fname, char** argv, char** env);
/** @brief determines the highest priority of all ready tasks on core core_id
*
* @param core_id core id
* @return highest priority
*/
uint32_t get_highest_priority(uint32_t core_id);
/** @brief Call to rescheduling
*
* This is a purely assembled procedure for rescheduling

View file

@ -93,7 +93,6 @@ int network_init(void)
// Initialize lwIP modules
tcpip_init(tcp_init_ok, NULL);
lwip_initialized = 0;
while(!lwip_initialized) {
reschedule();
}
@ -154,7 +153,7 @@ int network_init(void)
}
}
#else
mmnif_open();
//mmnif_open();
#endif
// start echo and ping server
@ -170,7 +169,7 @@ int network_shutdown(void)
{
#ifdef CONFIG_LWIP
#ifdef CONFIG_ROCKCREEK
mmnif_close();
//mmnif_close();
#elif defined(CONFIG_PCI)
dhcp_release(default_netif);
dhcp_stop(default_netif);

View file

@ -68,6 +68,10 @@ task_t* get_current_task(void) {
return per_core(current_task);
}
uint32_t get_highest_priority(uint32_t core_id) {
return last_set(runqueues[core_id].prio_bitmap);
}
int multitasking_init(void) {
if (BUILTIN_EXPECT(task_table[0].status != TASK_IDLE, 0)) {
kputs("Task 0 is not an idle task\n");
@ -543,7 +547,7 @@ static int load_task(load_args_t* largs)
kprintf("Could not map 0x%x at 0x%x\n", addr, prog_header.virt_addr);
// clear pages
memset((void*) prog_header.virt_addr, 0, npages*PAGE_SIZE);
memset((void*) prog_header.virt_addr, 0x00, npages*PAGE_SIZE);
// set starting point of the heap
if (curr_task->start_heap < prog_header.virt_addr+prog_header.mem_size)
@ -578,7 +582,7 @@ static int load_task(load_args_t* largs)
kprintf("Could not map stack at 0x%x\n", stack);
return -ENOMEM;
}
memset((void*) stack, 0, npages*PAGE_SIZE);
memset((void*) stack, 0x00, npages*PAGE_SIZE);
// create vma regions for the user-level stack
flags = VMA_CACHEABLE;
@ -1013,8 +1017,9 @@ int set_timer(uint64_t deadline)
if (runqueues[core_id].timers.last)
runqueues[core_id].timers.last->next = curr_task;
runqueues[core_id].timers.last = curr_task;
if (!runqueues[core_id].timers.first)
runqueues[core_id].timers.first = curr_task;
// obsolete lines...
//if (!runqueues[core_id].timers.first)
// runqueues[core_id].timers.first = curr_task;
} else {
curr_task->prev = tmp->prev;
curr_task->next = tmp;
@ -1194,7 +1199,9 @@ void scheduler(void)
// remove timer from queue
runqueues[core_id].timers.first = runqueues[core_id].timers.first->next;
if (!runqueues[core_id].timers.first)
if (runqueues[core_id].timers.first)
runqueues[core_id].timers.first->prev = NULL;
else
runqueues[core_id].timers.last = NULL;
task->flags &= ~TASK_TIMER;