add prototype of a tickless kernel
only by entering the kernel, the tick counter will be updated => by using this feature, MetalSVM supports only LwIP's raw interface, because we have no guarantee that the tcpip thread will get computation time => no NETCONN & BSD socket support => no load balancing support
This commit is contained in:
parent
d9f311877c
commit
24914fcb4f
22 changed files with 531 additions and 23 deletions
295
apps/echo.c
295
apps/echo.c
|
@ -101,6 +101,299 @@ echo_init(void)
|
|||
}
|
||||
/*-----------------------------------------------------------------------------------*/
|
||||
|
||||
#else
|
||||
|
||||
#include <lwip/debug.h>
|
||||
#include <lwip/stats.h>
|
||||
#include <lwip/tcp.h>
|
||||
|
||||
/*
|
||||
* TCP echo server example using raw API.
|
||||
*
|
||||
* Echos all bytes sent by connecting client,
|
||||
* and passively closes when client is done.
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
static struct tcp_pcb *echo_pcb;
|
||||
|
||||
enum echo_states
|
||||
{
|
||||
ES_NONE = 0,
|
||||
ES_ACCEPTED,
|
||||
ES_RECEIVED,
|
||||
ES_CLOSING
|
||||
};
|
||||
|
||||
struct echo_state
|
||||
{
|
||||
u8_t state;
|
||||
u8_t retries;
|
||||
struct tcp_pcb *pcb;
|
||||
/* pbuf (chain) to recycle */
|
||||
struct pbuf *p;
|
||||
};
|
||||
|
||||
static err_t echo_accept(void *arg, struct tcp_pcb *newpcb, err_t err);
|
||||
static err_t echo_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err);
|
||||
static void echo_error(void *arg, err_t err);
|
||||
static err_t echo_poll(void *arg, struct tcp_pcb *tpcb);
|
||||
static err_t echo_sent(void *arg, struct tcp_pcb *tpcb, u16_t len);
|
||||
static void echo_send(struct tcp_pcb *tpcb, struct echo_state *es);
|
||||
static void echo_close(struct tcp_pcb *tpcb, struct echo_state *es);
|
||||
|
||||
void
|
||||
echo_init(void)
|
||||
{
|
||||
echo_pcb = tcp_new();
|
||||
if (echo_pcb != NULL)
|
||||
{
|
||||
err_t err;
|
||||
|
||||
err = tcp_bind(echo_pcb, IP_ADDR_ANY, 7);
|
||||
if (err == ERR_OK)
|
||||
{
|
||||
echo_pcb = tcp_listen(echo_pcb);
|
||||
tcp_accept(echo_pcb, echo_accept);
|
||||
} else {
|
||||
/* abort? output diagnostic? */
|
||||
}
|
||||
} else {
|
||||
/* abort? output diagnostic? */
|
||||
}
|
||||
}
|
||||
|
||||
static err_t
|
||||
echo_accept(void *arg, struct tcp_pcb *newpcb, err_t err)
|
||||
{
|
||||
err_t ret_err;
|
||||
struct echo_state *es;
|
||||
|
||||
LWIP_UNUSED_ARG(arg);
|
||||
LWIP_UNUSED_ARG(err);
|
||||
|
||||
/* commonly observed practive to call tcp_setprio(), why? */
|
||||
tcp_setprio(newpcb, TCP_PRIO_MIN);
|
||||
|
||||
es = (struct echo_state *)mem_malloc(sizeof(struct echo_state));
|
||||
if (es != NULL)
|
||||
{
|
||||
es->state = ES_ACCEPTED;
|
||||
es->pcb = newpcb;
|
||||
es->retries = 0;
|
||||
es->p = NULL;
|
||||
/* pass newly allocated es to our callbacks */
|
||||
tcp_arg(newpcb, es);
|
||||
tcp_recv(newpcb, echo_recv);
|
||||
tcp_err(newpcb, echo_error);
|
||||
tcp_poll(newpcb, echo_poll, 0);
|
||||
ret_err = ERR_OK;
|
||||
} else {
|
||||
ret_err = ERR_MEM;
|
||||
}
|
||||
return ret_err;
|
||||
}
|
||||
|
||||
static err_t
|
||||
echo_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err)
|
||||
{
|
||||
struct echo_state *es;
|
||||
err_t ret_err;
|
||||
|
||||
LWIP_ASSERT("arg != NULL",arg != NULL);
|
||||
es = (struct echo_state *)arg;
|
||||
if (p == NULL)
|
||||
{
|
||||
/* remote host closed connection */
|
||||
es->state = ES_CLOSING;
|
||||
if(es->p == NULL)
|
||||
{
|
||||
/* we're done sending, close it */
|
||||
echo_close(tpcb, es);
|
||||
} else {
|
||||
/* we're not done yet */
|
||||
tcp_sent(tpcb, echo_sent);
|
||||
echo_send(tpcb, es);
|
||||
}
|
||||
ret_err = ERR_OK;
|
||||
} else if(err != ERR_OK) {
|
||||
/* cleanup, for unkown reason */
|
||||
if (p != NULL)
|
||||
{
|
||||
es->p = NULL;
|
||||
pbuf_free(p);
|
||||
}
|
||||
ret_err = err;
|
||||
} else if(es->state == ES_ACCEPTED) {
|
||||
/* first data chunk in p->payload */
|
||||
es->state = ES_RECEIVED;
|
||||
/* store reference to incoming pbuf (chain) */
|
||||
es->p = p;
|
||||
/* install send completion notifier */
|
||||
tcp_sent(tpcb, echo_sent);
|
||||
echo_send(tpcb, es);
|
||||
ret_err = ERR_OK;
|
||||
} else if (es->state == ES_RECEIVED) {
|
||||
/* read some more data */
|
||||
if(es->p == NULL)
|
||||
{
|
||||
es->p = p;
|
||||
tcp_sent(tpcb, echo_sent);
|
||||
echo_send(tpcb, es);
|
||||
} else {
|
||||
struct pbuf *ptr;
|
||||
|
||||
/* chain pbufs to the end of what we recv'ed previously */
|
||||
ptr = es->p;
|
||||
pbuf_chain(ptr,p);
|
||||
}
|
||||
ret_err = ERR_OK;
|
||||
} else if(es->state == ES_CLOSING) {
|
||||
/* odd case, remote side closing twice, trash data */
|
||||
tcp_recved(tpcb, p->tot_len);
|
||||
es->p = NULL;
|
||||
pbuf_free(p);
|
||||
ret_err = ERR_OK;
|
||||
} else {
|
||||
/* unkown es->state, trash data */
|
||||
tcp_recved(tpcb, p->tot_len);
|
||||
es->p = NULL;
|
||||
pbuf_free(p);
|
||||
ret_err = ERR_OK;
|
||||
}
|
||||
return ret_err;
|
||||
}
|
||||
|
||||
static void
|
||||
echo_error(void *arg, err_t err)
|
||||
{
|
||||
struct echo_state *es;
|
||||
|
||||
LWIP_UNUSED_ARG(err);
|
||||
|
||||
es = (struct echo_state *)arg;
|
||||
if (es != NULL)
|
||||
{
|
||||
mem_free(es);
|
||||
}
|
||||
}
|
||||
|
||||
static err_t
|
||||
echo_poll(void *arg, struct tcp_pcb *tpcb)
|
||||
{
|
||||
err_t ret_err;
|
||||
struct echo_state *es;
|
||||
|
||||
es = (struct echo_state *)arg;
|
||||
if (es != NULL)
|
||||
{
|
||||
if (es->p != NULL)
|
||||
{
|
||||
/* there is a remaining pbuf (chain) */
|
||||
tcp_sent(tpcb, echo_sent);
|
||||
echo_send(tpcb, es);
|
||||
} else {
|
||||
/* no remaining pbuf (chain) */
|
||||
if(es->state == ES_CLOSING)
|
||||
{
|
||||
echo_close(tpcb, es);
|
||||
}
|
||||
}
|
||||
ret_err = ERR_OK;
|
||||
} else {
|
||||
/* nothing to be done */
|
||||
tcp_abort(tpcb);
|
||||
ret_err = ERR_ABRT;
|
||||
}
|
||||
return ret_err;
|
||||
}
|
||||
|
||||
static err_t
|
||||
echo_sent(void *arg, struct tcp_pcb *tpcb, u16_t len)
|
||||
{
|
||||
struct echo_state *es;
|
||||
|
||||
LWIP_UNUSED_ARG(len);
|
||||
|
||||
es = (struct echo_state *)arg;
|
||||
es->retries = 0;
|
||||
|
||||
if(es->p != NULL)
|
||||
{
|
||||
/* still got pbufs to send */
|
||||
tcp_sent(tpcb, echo_sent);
|
||||
echo_send(tpcb, es);
|
||||
} else {
|
||||
/* no more pbufs to send */
|
||||
if(es->state == ES_CLOSING)
|
||||
{
|
||||
echo_close(tpcb, es);
|
||||
}
|
||||
}
|
||||
return ERR_OK;
|
||||
}
|
||||
|
||||
static void
|
||||
echo_send(struct tcp_pcb *tpcb, struct echo_state *es)
|
||||
{
|
||||
struct pbuf *ptr;
|
||||
err_t wr_err = ERR_OK;
|
||||
|
||||
while ((wr_err == ERR_OK) &&
|
||||
(es->p != NULL) &&
|
||||
(es->p->len <= tcp_sndbuf(tpcb)))
|
||||
{
|
||||
ptr = es->p;
|
||||
|
||||
/* enqueue data for transmission */
|
||||
wr_err = tcp_write(tpcb, ptr->payload, ptr->len, 1);
|
||||
if (wr_err == ERR_OK)
|
||||
{
|
||||
u16_t plen;
|
||||
u8_t freed;
|
||||
|
||||
plen = ptr->len;
|
||||
/* continue with next pbuf in chain (if any) */
|
||||
es->p = ptr->next;
|
||||
if(es->p != NULL)
|
||||
{
|
||||
/* new reference! */
|
||||
pbuf_ref(es->p);
|
||||
}
|
||||
/* chop first pbuf from chain */
|
||||
do
|
||||
{
|
||||
/* try hard to free pbuf */
|
||||
freed = pbuf_free(ptr);
|
||||
} while(freed == 0);
|
||||
/* we can read more data now */
|
||||
tcp_recved(tpcb, plen);
|
||||
} else if(wr_err == ERR_MEM) {
|
||||
/* we are low on memory, try later / harder, defer to poll */
|
||||
es->p = ptr;
|
||||
} else {
|
||||
/* other problem ?? */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
echo_close(struct tcp_pcb *tpcb, struct echo_state *es)
|
||||
{
|
||||
tcp_arg(tpcb, NULL);
|
||||
tcp_sent(tpcb, NULL);
|
||||
tcp_recv(tpcb, NULL);
|
||||
tcp_err(tpcb, NULL);
|
||||
tcp_poll(tpcb, NULL, 0);
|
||||
|
||||
if (es != NULL)
|
||||
{
|
||||
mem_free(es);
|
||||
}
|
||||
tcp_close(tpcb);
|
||||
}
|
||||
|
||||
#endif /* LWIP_NETCONN */
|
||||
|
||||
#endif
|
||||
#endif /* CONFIG_LWIP */
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
#include <metalsvm/stdlib.h>
|
||||
|
||||
#ifdef CONFIG_LWIP
|
||||
#if defined(CONFIG_LWIP) && LWIP_SOCKET
|
||||
|
||||
#include "gfx_client.h"
|
||||
#include <lwip/sockets.h>
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#include "gfx_client.h"
|
||||
#include "gfx_generic.h"
|
||||
|
||||
#ifdef CONFIG_LWIP
|
||||
#if defined(CONFIG_LWIP) && LWIP_SOCKET
|
||||
|
||||
int GFX_update()
|
||||
{
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
|
||||
/* See http://www.nwlab.net/art/netio/netio.html to get the netio tool */
|
||||
|
||||
#ifdef CONFIG_LWIP
|
||||
#if defined(CONFIG_LWIP) && LWIP_SOCKET
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
#if USE_SOCKET_BYPASSING // for socket bypassing
|
||||
#include <lwip/opt.h>
|
||||
|
|
|
@ -193,6 +193,8 @@ uint32_t apic_cpu_id(void);
|
|||
int apic_calibration(void);
|
||||
int has_apic(void);
|
||||
int apic_is_enabled(void);
|
||||
int apic_enable_timer(void);
|
||||
int apic_disable_timer(void);
|
||||
int ioapic_inton(uint8_t irq, uint8_t apicid);
|
||||
int ioapic_intoff(uint8_t irq, uint8_t apicid);
|
||||
int map_apic(void);
|
||||
|
|
|
@ -65,6 +65,18 @@ int irq_uninstall_handler(unsigned int irq);
|
|||
*/
|
||||
int irq_init(void);
|
||||
|
||||
/** @brief Disable the timer interrupt
|
||||
*
|
||||
* @return 0 on success
|
||||
*/
|
||||
int disable_timer_irq(void);
|
||||
|
||||
/** @brief Enable the timer interrupt
|
||||
*
|
||||
* @return 0 on success
|
||||
*/
|
||||
int enable_timer_irq(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -321,7 +321,11 @@ uint32_t read_eip(void);
|
|||
#define NOP4 asm volatile ("nop;nop;nop;nop")
|
||||
/// Do nothing for 8 instructions
|
||||
#define NOP8 asm volatile ("nop;nop;nop;nop;nop;nop;nop;nop")
|
||||
#define HALT asm volatile ("hlt");
|
||||
#ifndef CONFIG_TICKLESS
|
||||
#define HALT asm volatile ("hlt")
|
||||
#else
|
||||
#define HALT asm volatile ("nop;nop;nop;nop;nop;nop;nop;nop")
|
||||
#endif
|
||||
|
||||
/** @brief Init several subsystems
|
||||
*
|
||||
|
|
|
@ -38,6 +38,9 @@
|
|||
#include <asm/RCCE_lib.h>
|
||||
#endif
|
||||
|
||||
void start_tickless(void);
|
||||
void end_tickless(void);
|
||||
|
||||
#if defined(CONFIG_ROCKCREEK) && (MAX_CORES > 1)
|
||||
#error RockCreek is not a SMP system
|
||||
#endif
|
||||
|
@ -154,6 +157,31 @@ int apic_is_enabled(void)
|
|||
return (lapic && initialized);
|
||||
}
|
||||
|
||||
int apic_disable_timer(void)
|
||||
{
|
||||
if (BUILTIN_EXPECT(!apic_is_enabled(), 0))
|
||||
return -EINVAL;
|
||||
|
||||
lapic_write(APIC_LVT_T, 0x10000); // disable timer interrupt
|
||||
start_tickless();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int apic_enable_timer(void)
|
||||
{
|
||||
if (BUILTIN_EXPECT(apic_is_enabled() && icr, 1)) {
|
||||
lapic_write(APIC_DCR, 0xB); // set it to 1 clock increments
|
||||
lapic_write(APIC_LVT_T, 0x2007B); // connects the timer to 123 and enables it
|
||||
lapic_write(APIC_ICR, icr);
|
||||
end_tickless();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#if MAX_CORES > 1
|
||||
static inline void set_ipi_dest(uint32_t cpu_id) {
|
||||
uint32_t tmp;
|
||||
|
|
|
@ -129,6 +129,22 @@ static int irq_remap(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int disable_timer_irq(void)
|
||||
{
|
||||
if (BUILTIN_EXPECT(apic_is_enabled(), 1))
|
||||
return apic_disable_timer();
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int enable_timer_irq(void)
|
||||
{
|
||||
if (BUILTIN_EXPECT(apic_is_enabled(), 1))
|
||||
return apic_enable_timer();
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/** @brief Remap IRQs and install ISRs in IDT
|
||||
*
|
||||
* We first remap the interrupt controllers, and then we install
|
||||
|
|
|
@ -39,12 +39,47 @@ static volatile uint64_t timer_ticks = 0;
|
|||
#if MAX_CORES > 1
|
||||
extern atomic_int32_t cpu_online;
|
||||
#endif
|
||||
static int8_t use_tickless = 0;
|
||||
static uint64_t last_rdtsc = 0;
|
||||
|
||||
uint64_t get_clock_tick(void)
|
||||
{
|
||||
return timer_ticks;
|
||||
}
|
||||
|
||||
void start_tickless(void)
|
||||
{
|
||||
use_tickless = 1;
|
||||
last_rdtsc = rdtsc();
|
||||
mb();
|
||||
}
|
||||
|
||||
void end_tickless(void)
|
||||
{
|
||||
use_tickless = 0;
|
||||
last_rdtsc = 0;
|
||||
}
|
||||
|
||||
void check_ticks(void)
|
||||
{
|
||||
if (!use_tickless)
|
||||
return;
|
||||
|
||||
#if MAX_CORES > 1
|
||||
if (smp_id() == 0)
|
||||
#endif
|
||||
{
|
||||
uint64_t curr_rdtsc = rdtsc();
|
||||
|
||||
mb();
|
||||
if (curr_rdtsc - last_rdtsc > 1000000ULL*(uint64_t)get_cpu_frequency() / (uint64_t)TIMER_FREQ) {
|
||||
timer_ticks++;
|
||||
last_rdtsc = curr_rdtsc;
|
||||
mb();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int sys_times(struct tms* buffer, clock_t* clock)
|
||||
{
|
||||
if (BUILTIN_EXPECT(!buffer, 0))
|
||||
|
@ -85,12 +120,14 @@ static void timer_handler(struct state *s)
|
|||
// dump_load();
|
||||
}
|
||||
|
||||
#ifndef CONFIG_TICKLESS
|
||||
update_load();
|
||||
|
||||
#if MAX_CORES > 1
|
||||
if (atomic_int32_read(&cpu_online) > 1)
|
||||
load_balancing();
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
int timer_wait(unsigned int ticks)
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
|
||||
#define AF_MMNIF_NET 0x42
|
||||
|
||||
#define MMNIF_AUTOACTIVATE_FAST_SOCKETS 1
|
||||
#define MMNIF_AUTOACTIVATE_FAST_SOCKETS LWIP_SOCKET
|
||||
|
||||
#if MMNIF_AUTOACTIVATE_FAST_SOCKETS
|
||||
|
||||
|
|
|
@ -256,7 +256,7 @@ static void rtl_tx_inthandler(struct netif* netif)
|
|||
}
|
||||
}
|
||||
|
||||
/* this function is called in the context of the tcpip thread */
|
||||
/* this function is called in the context of the tcpip thread or the irq handler (by using NO_SYS) */
|
||||
static void rtl8139if_poll(void* ctx)
|
||||
{
|
||||
rtl_rx_inthandler(mynetif);
|
||||
|
@ -276,11 +276,15 @@ static void rtl8139if_handler(struct state* s)
|
|||
break;
|
||||
|
||||
if ((isr_contents & ISR_ROK) && !rtl8139if->polling) {
|
||||
#if NO_SYS
|
||||
rtl8139if_poll(NULL);
|
||||
#else
|
||||
if (tcpip_callback_with_block(rtl8139if_poll, NULL, 0) == ERR_OK) {
|
||||
rtl8139if->polling = 1;
|
||||
} else {
|
||||
LWIP_DEBUGF(NETIF_DEBUG, ("rtl8139if_handler: unable to send a poll request to the tcpip thread\n"));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (isr_contents & ISR_TOK)
|
||||
|
|
|
@ -56,6 +56,7 @@ extern "C" {
|
|||
#define CONFIG_KEYBOARD
|
||||
#define CONFIG_MULTIBOOT
|
||||
//#define CONFIG_ROCKCREEK
|
||||
//#define CONFIG_TICKLESS
|
||||
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
#ifndef __SIZEOF_POINTER__
|
||||
|
|
|
@ -86,6 +86,7 @@ int create_user_task(tid_t* id, const char* fame, char** argv);
|
|||
*/
|
||||
tid_t wait(int32_t* result);
|
||||
|
||||
#ifndef CONFIG_TICKLESS
|
||||
/** @brief Update the load of the current core
|
||||
*
|
||||
* This function is called from the timer interrupt
|
||||
|
@ -106,6 +107,7 @@ void dump_load(void);
|
|||
*/
|
||||
void load_balancing(void);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/** @brief Task switcher
|
||||
*
|
||||
|
@ -146,6 +148,11 @@ int block_current_task(void);
|
|||
*/
|
||||
int set_timer(uint64_t deadline);
|
||||
|
||||
/** @biref check is a timer is expired
|
||||
*
|
||||
*/
|
||||
void check_timers(void);
|
||||
|
||||
/** @brief Abort current task */
|
||||
void NORETURN abort(void);
|
||||
|
||||
|
@ -205,12 +212,19 @@ uint32_t get_highest_priority(void);
|
|||
*/
|
||||
void reschedule(void);
|
||||
|
||||
/** @brief check, if the tick counter has to be updated
|
||||
*/
|
||||
void check_ticks(void);
|
||||
|
||||
static inline void check_workqueues_in_irqhandler(int irq)
|
||||
{
|
||||
#ifdef CONFIG_ROCKCREEK
|
||||
if (irq != 124)
|
||||
icc_mail_check();
|
||||
#endif
|
||||
check_ticks();
|
||||
check_timers();
|
||||
|
||||
if (irq < 0)
|
||||
check_scheduling();
|
||||
}
|
||||
|
|
|
@ -124,12 +124,14 @@ typedef struct {
|
|||
task_t* old_task;
|
||||
/// total number of tasks in the queue
|
||||
uint32_t nr_tasks;
|
||||
#ifndef CONFIG_TICKLESS
|
||||
// current load = average number of tasks in the queue (1-minute average)
|
||||
uint32_t load[3];
|
||||
// help counter to determine the the cpu load
|
||||
int32_t load_counter;
|
||||
// help counter to avoid "over balancing"
|
||||
int32_t balance_counter;
|
||||
#endif
|
||||
/// indicates the used priority queues
|
||||
uint32_t prio_bitmap;
|
||||
/// a queue for each priority
|
||||
|
|
|
@ -87,7 +87,9 @@ static int init_netifs(void)
|
|||
struct ip_addr ipaddr;
|
||||
struct ip_addr netmask;
|
||||
struct ip_addr gw;
|
||||
#if !NO_SYS
|
||||
err_t err;
|
||||
#endif
|
||||
|
||||
kputs("Initialize NICs...\n");
|
||||
|
||||
|
@ -102,6 +104,12 @@ static int init_netifs(void)
|
|||
IP4_ADDR(&netmask, 255,255,255,0);
|
||||
|
||||
/* Bring up the network interface */
|
||||
#if NO_SYS
|
||||
netif_add(&default_netif, &ipaddr, &netmask, &gw, NULL, rckemacif_init, ethernet_input);
|
||||
|
||||
netif_set_default(&default_netif);
|
||||
netif_set_up(&default_netif);
|
||||
#else
|
||||
if ((err = netifapi_netif_add(&default_netif, &ipaddr, &netmask, &gw, NULL, rckemacif_init, tcpip_input)) != ERR_OK) {
|
||||
kprintf("Unable to add the network interface: err = %d\n", err);
|
||||
return -ENODEV;
|
||||
|
@ -109,6 +117,7 @@ static int init_netifs(void)
|
|||
|
||||
netifapi_netif_set_default(&default_netif);
|
||||
netifapi_netif_set_up(&default_netif);
|
||||
#endif
|
||||
|
||||
/* Bring up the intra network interface */
|
||||
struct ip_addr intra_ipaddr;
|
||||
|
@ -127,6 +136,12 @@ static int init_netifs(void)
|
|||
* - mmnif_init : the initialization which has to be done in order to use our interface
|
||||
* - ethernet_input : tells him that he should get ethernet input (inclusice ARP)
|
||||
*/
|
||||
#if NO_SYS
|
||||
netif_add(&mmnif_netif, &intra_ipaddr, &intra_netmask, &intra_gw, NULL, mmnif_init, ethernet_input);
|
||||
|
||||
/* tell lwip all initialization is done and we want to set it ab */
|
||||
netif_set_up(&mmnif_netif);
|
||||
#else
|
||||
if ((err = netifapi_netif_add(&mmnif_netif, &intra_ipaddr, &intra_netmask, &intra_gw, NULL, mmnif_init, tcpip_input)) != ERR_OK)
|
||||
{
|
||||
kprintf("Unable to add the intra network interface: err = %d\n", err);
|
||||
|
@ -135,6 +150,7 @@ static int init_netifs(void)
|
|||
|
||||
/* tell lwip all initialization is done and we want to set it ab */
|
||||
netifapi_netif_set_up(&mmnif_netif);
|
||||
#endif
|
||||
#else
|
||||
/* Clear network address because we use DHCP to get an ip address */
|
||||
IP4_ADDR(&gw, 0,0,0,0);
|
||||
|
@ -142,15 +158,25 @@ static int init_netifs(void)
|
|||
IP4_ADDR(&netmask, 0,0,0,0);
|
||||
|
||||
/* Bring up the network interface */
|
||||
#if NO_SYS
|
||||
uint32_t flags = irq_nested_disable();
|
||||
netif_add(&default_netif, &ipaddr, &netmask, &gw, NULL, rtl8139if_init, ethernet_input);
|
||||
netif_set_default(&default_netif);
|
||||
#else
|
||||
if ((err = netifapi_netif_add(&default_netif, &ipaddr, &netmask, &gw, NULL, rtl8139if_init, tcpip_input)) != ERR_OK) {
|
||||
kprintf("Unable to add the network interface: err = %d\n", err);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
netifapi_netif_set_default(&default_netif);
|
||||
#endif
|
||||
|
||||
kprintf("Starting DHCPCD...\n");
|
||||
#if NO_SYS
|
||||
dhcp_start(&default_netif);
|
||||
irq_nested_enable(flags);
|
||||
#else
|
||||
netifapi_dhcp_start(&default_netif);
|
||||
#endif
|
||||
|
||||
int mscnt = 0;
|
||||
/* wait for ip address */
|
||||
|
@ -170,6 +196,7 @@ static int init_netifs(void)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_LWIP
|
||||
#if !NO_SYS
|
||||
static void tcpip_init_done(void* arg)
|
||||
{
|
||||
sys_sem_t* sem = (sys_sem_t*)arg;
|
||||
|
@ -179,6 +206,7 @@ static void tcpip_init_done(void* arg)
|
|||
sys_sem_signal(sem);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
int network_shutdown(void)
|
||||
{
|
||||
|
@ -186,7 +214,11 @@ int network_shutdown(void)
|
|||
mmnif_shutdown();
|
||||
netifapi_netif_set_down(&default_netif);
|
||||
#elif defined(CONFIG_LWIP) && defined(CONFIG_PCI)
|
||||
#if NO_SYS
|
||||
dhcp_stop(&default_netif);
|
||||
#else
|
||||
netifapi_dhcp_stop(&default_netif);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
@ -237,17 +269,24 @@ static void list_root(void) {
|
|||
int initd(void* arg)
|
||||
{
|
||||
#ifdef CONFIG_LWIP
|
||||
#if !NO_SYS
|
||||
sys_sem_t sem;
|
||||
#endif
|
||||
tid_t id;
|
||||
char* argv[] = {"/bin/rlogind ", NULL};
|
||||
|
||||
// Initialize lwIP modules
|
||||
#if NO_SYS
|
||||
lwip_init();
|
||||
#else
|
||||
if(sys_sem_new(&sem, 0) != ERR_OK)
|
||||
LWIP_ASSERT("Failed to create semaphore", 0);
|
||||
|
||||
tcpip_init(tcpip_init_done, &sem);
|
||||
sys_sem_wait(&sem);
|
||||
kprintf("TCP/IP initialized.\n");
|
||||
sys_sem_free(&sem);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_LWIP) && (defined(CONFIG_PCI) || defined(CONFIG_ROCKCREEK))
|
||||
init_netifs();
|
||||
|
@ -261,8 +300,10 @@ int initd(void* arg)
|
|||
|
||||
// start echo, netio and rlogind
|
||||
echo_init();
|
||||
#if !NO_SYS
|
||||
create_user_task(&id, "/bin/rlogind", argv);
|
||||
kprintf("Create rlogind with id %u\n", id);
|
||||
#endif
|
||||
//netio_init();
|
||||
#endif
|
||||
|
||||
|
|
|
@ -50,7 +50,12 @@ int smp_main(void)
|
|||
{
|
||||
irq_enable();
|
||||
|
||||
#ifdef CONFIG_TICKLESS
|
||||
disable_timer_irq();
|
||||
#endif
|
||||
|
||||
while(1) {
|
||||
check_workqueues();
|
||||
HALT;
|
||||
}
|
||||
|
||||
|
@ -92,6 +97,10 @@ int main(void)
|
|||
kprintf("Current allocated memory: %u KBytes\n", atomic_int32_read(&total_allocated_pages)*(PAGE_SIZE/1024));
|
||||
kprintf("Current available memory: %u MBytes\n", atomic_int32_read(&total_available_pages)/((1024*1024)/PAGE_SIZE));
|
||||
|
||||
#ifdef CONFIG_TICKLESS
|
||||
disable_timer_irq();
|
||||
#endif
|
||||
|
||||
sleep(5);
|
||||
create_kernel_task(&id, initd, NULL, NORMAL_PRIO);
|
||||
kprintf("Create initd with id %u\n", id);
|
||||
|
|
|
@ -50,6 +50,7 @@ static task_t task_table[MAX_TASKS] = { \
|
|||
[0] = {0, TASK_IDLE, NULL, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}, \
|
||||
[1 ... MAX_TASKS-1] = {0, TASK_INVALID, NULL, 0, 0, 0, NULL, NULL, 0, ATOMIC_INIT(0), SPINLOCK_INIT, NULL, SPINLOCK_INIT, NULL, NULL, 0, 0, 0, 0}};
|
||||
static spinlock_irqsave_t table_lock = SPINLOCK_IRQSAVE_INIT;
|
||||
#ifndef CONFIG_TICKLESS
|
||||
#if MAX_CORES > 1
|
||||
static runqueue_t runqueues[MAX_CORES] = { \
|
||||
[0] = {task_table+0, NULL, 0, {[0 ... 2] = 0}, TIMER_FREQ/5, TIMER_FREQ/2, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}, \
|
||||
|
@ -58,6 +59,16 @@ static runqueue_t runqueues[MAX_CORES] = { \
|
|||
static runqueue_t runqueues[1] = { \
|
||||
[0] = {task_table+0, NULL, 0, {[0 ... 2] = 0}, TIMER_FREQ/5, TIMER_FREQ/2, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}};
|
||||
#endif
|
||||
#else
|
||||
#if MAX_CORES > 1
|
||||
static runqueue_t runqueues[MAX_CORES] = { \
|
||||
[0] = {task_table+0, NULL, 0, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}, \
|
||||
[1 ... MAX_CORES-1] = {NULL, NULL, 0, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}};
|
||||
#else
|
||||
static runqueue_t runqueues[1] = { \
|
||||
[0] = {task_table+0, NULL, 0, 0, {[0 ... MAX_PRIO-1] = {NULL, NULL}}, {NULL, NULL}, SPINLOCK_IRQSAVE_INIT}};
|
||||
#endif
|
||||
#endif
|
||||
|
||||
DEFINE_PER_CORE(task_t*, current_task, task_table+0);
|
||||
|
||||
|
@ -1083,6 +1094,8 @@ int set_timer(uint64_t deadline)
|
|||
return ret;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_TICKLESS
|
||||
|
||||
/* determining the load as fix-point */
|
||||
#define FSHIFT 11 /* nr of bits of precision */
|
||||
#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
|
||||
|
@ -1261,21 +1274,14 @@ void load_balancing(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
size_t** scheduler(void)
|
||||
#endif // CONFIG_TICKLESS
|
||||
|
||||
void check_timers(void)
|
||||
{
|
||||
task_t* orig_task;
|
||||
task_t* curr_task;
|
||||
uint32_t core_id = CORE_ID;
|
||||
uint32_t prio;
|
||||
uint64_t current_tick;
|
||||
|
||||
orig_task = curr_task = per_core(current_task);
|
||||
curr_task->last_core = core_id;
|
||||
|
||||
/* signalizes that this task could be reused */
|
||||
if (curr_task->status == TASK_FINISHED)
|
||||
curr_task->status = TASK_INVALID;
|
||||
|
||||
spinlock_irqsave_lock(&runqueues[core_id].lock);
|
||||
|
||||
// check timers
|
||||
|
@ -1314,8 +1320,28 @@ size_t** scheduler(void)
|
|||
}
|
||||
}
|
||||
|
||||
spinlock_irqsave_unlock(&runqueues[core_id].lock);
|
||||
}
|
||||
|
||||
size_t** scheduler(void)
|
||||
{
|
||||
task_t* orig_task;
|
||||
task_t* curr_task;
|
||||
uint32_t core_id = CORE_ID;
|
||||
uint32_t prio;
|
||||
|
||||
orig_task = curr_task = per_core(current_task);
|
||||
curr_task->last_core = core_id;
|
||||
|
||||
/* signalizes that this task could be reused */
|
||||
if (curr_task->status == TASK_FINISHED)
|
||||
curr_task->status = TASK_INVALID;
|
||||
|
||||
spinlock_irqsave_lock(&runqueues[core_id].lock);
|
||||
|
||||
runqueues[core_id].old_task = NULL; // reset old task
|
||||
prio = msb(runqueues[core_id].prio_bitmap); // determines highest priority
|
||||
#ifndef CONFIG_TICKLESS
|
||||
#if MAX_CORES > 1
|
||||
if (prio >= sizeof(size_t)*8) {
|
||||
// push load balancing
|
||||
|
@ -1323,6 +1349,7 @@ size_t** scheduler(void)
|
|||
load_balancing();
|
||||
prio = msb(runqueues[core_id].prio_bitmap); // retry...
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
if (prio >= sizeof(size_t)*8) {
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
#define FALSE 0
|
||||
#endif
|
||||
|
||||
#if SYS_LIGHTWEIGHT_PROT
|
||||
#if SYS_LIGHTWEIGHT_PROT && !NO_SYS
|
||||
#if MAX_CORES > 1
|
||||
static spinlock_irqsave_t lwprot_lock;
|
||||
#endif
|
||||
|
|
|
@ -90,4 +90,18 @@ typedef size_t mem_ptr_t;
|
|||
#define LWIP_PLATFORM_ASSERT(x) do {kprintf("Assertion \"%s\" failed at line %d in %s\n", \
|
||||
x, __LINE__, __FILE__); abort();} while(0)
|
||||
|
||||
#if NO_SYS
|
||||
typedef uint32_t sys_prot_t;
|
||||
|
||||
static inline sys_prot_t sys_arch_protect(void)
|
||||
{
|
||||
return irq_nested_disable();
|
||||
}
|
||||
|
||||
static inline void sys_arch_unprotect(sys_prot_t pval)
|
||||
{
|
||||
irq_nested_enable(pval);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __ARCH_CC_H__ */
|
||||
|
|
|
@ -24,7 +24,7 @@ typedef struct
|
|||
typedef tid_t sys_thread_t;
|
||||
|
||||
#if SYS_LIGHTWEIGHT_PROT
|
||||
#if MAX_CORES > 1
|
||||
#if (MAX_CORES > 1) && !defined(CONFIG_TICKLESS)
|
||||
typedef uint32_t sys_prot_t;
|
||||
sys_prot_t sys_arch_protect(void);
|
||||
void sys_arch_unprotect(sys_prot_t pval);
|
||||
|
|
|
@ -16,7 +16,11 @@
|
|||
* NO_SYS==1: Provides VERY minimal functionality. Otherwise,
|
||||
* use lwIP facilities.
|
||||
*/
|
||||
#ifdef CONFIG_TICKLESS
|
||||
#define NO_SYS 1
|
||||
#else
|
||||
#define NO_SYS 0
|
||||
#endif
|
||||
|
||||
/**
|
||||
* LWIP_RAW==1: Enable application layer to hook into the IP layer itself.
|
||||
|
@ -27,17 +31,17 @@
|
|||
/**
|
||||
* LWIP_SOCKET==1: Enable Socket API (require to use sockets.c)
|
||||
*/
|
||||
#define LWIP_SOCKET 1
|
||||
#define LWIP_SOCKET !NO_SYS
|
||||
|
||||
/**
|
||||
* LWIP_NETCONN==1: Enable Netconn API (require to use api_lib.c)
|
||||
*/
|
||||
#define LWIP_NETCONN 1
|
||||
#define LWIP_NETCONN !NO_SYS
|
||||
|
||||
/**
|
||||
* LWIP_NETIF_API==1: Support netif api (in netifapi.c)
|
||||
*/
|
||||
#define LWIP_NETIF_API 1
|
||||
#define LWIP_NETIF_API !NO_SYS
|
||||
|
||||
/**
|
||||
* LWIP_DHCP==1: Enable DHCP module.
|
||||
|
|
Loading…
Add table
Reference in a new issue