extending the functionality and increasing the compatability of the Socket API

- removing some minor bugs (races)
- increasing the readability
This commit is contained in:
Stefan Lankes 2011-10-19 04:48:16 -07:00
parent 62f2038a6e
commit 5f84a93c01
2 changed files with 158 additions and 173 deletions

View file

@ -18,29 +18,34 @@
*
* mmnif.c --- memmory mapped interface
*
* Virutal IP Interface for the concept processor SCC
* Virtual IP Interface for the concept processor SCC
*
*/
/*
* 15th October 2011:
* - Redesign of the interrupt handling (by Stefan Lankes)
* - Add iRCCE support (by Stefan Lankes)
* - Extending the BSD socket interface
*/
#include <metalsvm/stddef.h>
#if defined(CONFIG_LWIP) && defined(CONFIG_ROCKCREEK)
#include "mmnif.h" /* definitions */
#include <lwip/netif.h> /* lwip netif */
#include <lwip/stats.h> /* inteface stats */
#include <netif/etharp.h> /* ethernet arp packets */
#include <lwip/ip.h> /* struct iphdr */
#include <lwip/tcpip.h> /* tcpip_input() */
#include <lwip/sockets.h>
//#include <asm/page.h>
#include <lwip/ip_addr.h>
#include <metalsvm/mailbox.h> /* mailbox_ptr_t */
#include <metalsvm/semaphore.h>
#include <metalsvm/spinlock.h>
#include <metalsvm/page.h>
#include <metalsvm/time.h>
#include <asm/irq.h>
#include <asm/RCCE.h>
@ -51,7 +56,7 @@
#include <asm/SCC_API.h>
#include <asm/scc_memcpy.h>
#include <metalsvm/time.h>
#include <net/mmnif.h>
#define TRUE 1
#define FALSE 0
@ -80,7 +85,7 @@
#define MMNIF_AUTO_SOCKET_TIMEOUT 500
#define MMNIF_FAST_SOCKET_BLOCK 1
#define MMNIF_FAST_SOCKET_BLOCK 0
#ifdef DEBUG_MMNIF
#include "util.h" /* hex dump */
@ -120,11 +125,6 @@
static int npseudosocket = MMNIF_PSEUDO_SOCKET_START;
static spinlock_t pseudolock;
/* IP address of the local core and the router core to get packets forwarded
*/
static unsigned int own_ip_address = 0xC0A80000; /* 192.168.0.0 */
static unsigned int router_ip_address = 0xC0A80001; /* 192.168.0.1 */
/* "message passing buffer" specific constants:
* - start address
* - size
@ -152,7 +152,6 @@ typedef struct acceptor {
uint8_t stat;
uint8_t src_ip;
uint16_t port;
spinlock_t alock;
int nsock;
int rsock;
} acceptor_t;
@ -222,7 +221,6 @@ typedef struct mm_rx_buffer {
*/
uint16_t head;
uint16_t tail;
spinlock_t rlock;
/* descritpor queue
* desc_table : descriptor table
@ -261,10 +259,6 @@ typedef struct mmnif {
mm_rx_buffer_t *rx_buff;
uint8_t *rx_heap;
/* lock to protect members
*/
spinlock_t lock;
/* semaphore to regulate polling vs. interrupts
*/
sem_t com_poll;
@ -343,10 +337,10 @@ inline static int mmnif_trigger_irq(dest_ip)
addr = CRB_ADDR(x, y) + (z == 0 ? GLCFG0 : GLCFG1);
// send interrupt to ue
do {
NOP1;
//do {
// NOP1;
tmp = ReadConfigReg(addr);
} while (tmp & 1);
//} while (tmp & 1);
tmp |= 1;
SetConfigReg(addr, tmp);
@ -437,35 +431,28 @@ void mmnif_print_driver_status(void)
static uint8_t mmnif_get_destination(struct netif *netif, struct pbuf *p)
{
struct ip_hdr *iphdr;
ip_addr_p_t ip;
uint8_t core;
uint8_t *ip4addr;
uint8_t addr[4];
uint32_t netmask = 0xFFFFFF00;
/* grab the destination ip address out of the ip header
* for internal routing the last ocet is interpreted as core ID.
*/
iphdr = (struct ip_hdr *)(p->payload);
ip4addr = (uint8_t*) &iphdr->dest.addr;
/* revert the address to host format */
addr[3] = ip4addr[0];
addr[2] = ip4addr[1];
addr[1] = ip4addr[2];
addr[0] = ip4addr[3];
ip = iphdr->dest;
/* check if the ip address is in the Local Area Network of the 48 cores */
/* if it's not the same network the router core is addressed
* Note: the router core is core 1
*/
if (!((netmask & *(uint32_t *) addr) == (netmask & own_ip_address)))
if (ip_addr_netcmp(&ip, &netif->ip_addr, &netif->netmask))
return 1;
core = addr[0];
core = ip4_addr4(&ip);
/* check if the address is legitimata else return router core again */
if ((core) < 1 || (core > MMNIF_CORES))
core = 1;
kprintf("core %d\n", (int) core);
return core;
}
@ -477,7 +464,7 @@ static uint8_t mmnif_get_destination(struct netif *netif, struct pbuf *p)
static uint32_t mmnif_rxbuff_alloc(uint8_t dest, uint16_t len)
{
uint32_t ret = 0;
mm_rx_buffer_t *rb = (mm_rx_buffer_t *) ((char *)header_start_address + (dest - 1) * header_size);
volatile mm_rx_buffer_t *rb = (mm_rx_buffer_t *) ((char *)header_start_address + (dest - 1) * header_size);
#if MMNIF_USE_MPB
char* memblock = (char*)heap_start_address + (dest-1)/2*16*1024*1024 + (dest-1)%2 * 0x2000;
@ -492,7 +479,7 @@ static uint32_t mmnif_rxbuff_alloc(uint8_t dest, uint16_t len)
// if ((rb->head - rb->tail < len)&&(rb->tail != rb->head))
// return NULL;
RCCE_acquire_lock(dest-1);
RCCE_acquire_lock(RC_COREID[dest-1]);
if (rb->dcount)
{
if (rb->tail > rb->head)
@ -542,9 +529,8 @@ static uint32_t mmnif_rxbuff_alloc(uint8_t dest, uint16_t len)
}
}
}
RCCE_release_lock(RC_COREID[dest-1]);
out:
RCCE_release_lock(dest-1);
return ret;
}
@ -554,7 +540,7 @@ out:
*/
static int mmnif_commit_packet(uint8_t dest, uint32_t addr)
{
mm_rx_buffer_t *rb = (mm_rx_buffer_t *) ((char *)header_start_address + (dest - 1) * header_size);
volatile mm_rx_buffer_t *rb = (mm_rx_buffer_t *) ((char *)header_start_address + (dest - 1) * header_size);
uint32_t i;
for (i = 0; i < MMNIF_MAX_DESCRIPTORS; i++)
@ -564,6 +550,7 @@ static int mmnif_commit_packet(uint8_t dest, uint32_t addr)
{
rb->desc_table[i].stat = MMNIF_STATUS_RDY;
rb->desc_table[i].fast_sock = -1;
return 0;
}
}
@ -577,7 +564,7 @@ static int mmnif_commit_packet(uint8_t dest, uint32_t addr)
*/
static int mmnif_commit_packet_bypass(uint8_t dest, uint32_t addr, int dest_socket)
{
mm_rx_buffer_t* rb = (mm_rx_buffer_t *) ((char *)header_start_address + (dest - 1) * header_size);
volatile mm_rx_buffer_t* rb = (mm_rx_buffer_t *) ((char *)header_start_address + (dest - 1) * header_size);
uint32_t i;
for (i = 0; i < MMNIF_MAX_DESCRIPTORS; i++)
@ -600,11 +587,11 @@ static int mmnif_commit_packet_bypass(uint8_t dest, uint32_t addr, int dest_sock
static void mmnif_rxbuff_free(void)
{
mmnif_t *mmnif = mmnif_dev->state;
mm_rx_buffer_t *b = mmnif->rx_buff;
volatile mm_rx_buffer_t *b = mmnif->rx_buff;
uint32_t i, j;
uint32_t rpos;
RCCE_acquire_lock(RCCE_IAM);
RCCE_acquire_lock(RC_MY_COREID);
rpos = b->dread;
for (i = 0, j = rpos; i < MMNIF_MAX_DESCRIPTORS; i++)
@ -631,7 +618,7 @@ static void mmnif_rxbuff_free(void)
break;
}
RCCE_release_lock(RCCE_IAM);
RCCE_release_lock(RC_MY_COREID);
}
/*
@ -703,6 +690,7 @@ realloc:
LINK_STATS_INC(link.xmit);
mmnif->stats.tx++;
mmnif->stats.tx_bytes += p->tot_len;
mmnif_trigger_irq(dest_ip);
return ERR_OK;
@ -756,6 +744,7 @@ static int mmnif_hashadd(int sock, int rsock, uint8_t dest_ip)
p->socket = sock;
p->remote_socket = rsock;
p->dest_ip = dest_ip;
return 0;
}
}
@ -783,6 +772,7 @@ static int mmnif_hashdelete(int sock)
p->socket = -1;
p->remote_socket = 0;
p->dest_ip = 0;
return 0;
}
}
@ -801,19 +791,9 @@ static err_t mmnif_tx_bypass(struct netif * netif, void *pbuff, uint16_t size, i
//uint32_t exp_delay = 2;
//mm_rx_buffer_t *rb = (mm_rx_buffer_t *) ((char *)header_start_address + (dest->dest_ip - 1) * header_size);
/* Perform serveral sanity checks on the packet and the buffers:
* - is the output packet to big?
*/
// if (size > MMNIF_TX_BUFFERLEN)
// {
// DEBUGPRINTF("mmnif_tx(): packet is longer than %d bytes\n",MMNIF_TX_BUFFERLEN);
// goto drop_packet;
// }
/* allocate memory for the packet in the remote buffer */
realloc:
write_address = mmnif_rxbuff_alloc(dest->dest_ip, CLINE_ALIGN(size));
write_address = mmnif_rxbuff_alloc(dest->dest_ip, size);
if (!write_address)
{
@ -821,14 +801,7 @@ realloc:
// udelay(exp_delay);
// exp_delay << 1;
// reschedule();
NOP8;
NOP8;
NOP8;
NOP8;
NOP8;
NOP8;
NOP8;
NOP8;
NOP8;NOP8;NOP8;NOP8;NOP8;NOP8;NOP8;NOP8;
goto realloc;
}
@ -854,7 +827,7 @@ realloc:
if (mmnif_commit_packet_bypass(dest->dest_ip, write_address, dest->remote_socket))
{
DEBUGPRINTF("mmnif_tx(): packet somehow lost during commit\n");
DEBUGPRINTF("mmnif_tx_bypass(): packet somehow lost during commit\n");
}
#ifdef DEBUG_MMNIF_PACKET
// DEBUGPRINTF("\n SEND %p with length: %d\n",(char*)mpb_start_address + (dest_ip -1)*mpb_size + pos * 1792,p->tot_len +2);
@ -865,7 +838,9 @@ realloc:
LINK_STATS_INC(link.xmit);
mmnif->stats.tx++;
mmnif->stats.tx_bytes += size;
mmnif_trigger_irq(dest->dest_ip);
return ERR_OK;
drop_packet:
@ -882,27 +857,30 @@ drop_packet:
int mmnif_send(int s, void *data, size_t size, int flags)
{
bypass_rxdesc_t *p = mmnif_hashlookup(s);
uint32_t i, j, k, ret;
uint32_t i, j, k;
int total_size = 0;
if (p != 0)
{
if (size < ((MMNIF_RX_BUFFERLEN / 2) - 1))
return mmnif_tx_bypass(mmnif_dev, data, size, s);
else
{
if (size < ((MMNIF_RX_BUFFERLEN / 2) - 1)) {
if (mmnif_tx_bypass(mmnif_dev, data, size, s) == ERR_OK)
return size;
else
return -1;
} else {
j = size / (((MMNIF_RX_BUFFERLEN / 2) - 1));
k = size - (j * (((MMNIF_RX_BUFFERLEN / 2) - 1)));
for (i = 0; i < j; i++)
{
ret = mmnif_tx_bypass(mmnif_dev, data + i * ((MMNIF_RX_BUFFERLEN / 2) - 1), ((MMNIF_RX_BUFFERLEN / 2) - 1), s);
if (ret < 0)
return ret;
if (mmnif_tx_bypass(mmnif_dev, data + i * ((MMNIF_RX_BUFFERLEN / 2) - 1), ((MMNIF_RX_BUFFERLEN / 2) - 1), s) != ERR_OK)
return total_size;
total_size += (MMNIF_RX_BUFFERLEN / 2) - 1;
}
ret = mmnif_tx_bypass(mmnif_dev, data + (j - 1) * ((MMNIF_RX_BUFFERLEN / 2) - 1), k, s);
return ret;
if (mmnif_tx_bypass(mmnif_dev, data + (j - 1) * ((MMNIF_RX_BUFFERLEN / 2) - 1), k, s) != ERR_OK)
return total_size;
return total_size + k;
}
}
@ -914,8 +892,7 @@ int mmnif_send(int s, void *data, size_t size, int flags)
* because we have no link layer and everything is reliable we don't need
* to add anything so we just pass it to our tx function
*/
static err_t
mmnif_link_layer(struct netif *netif, struct pbuf *q, ip_addr_t * ipaddr)
static err_t mmnif_link_layer(struct netif *netif, struct pbuf *q, ip_addr_t * ipaddr)
{
return netif->linkoutput(netif, q);
}
@ -934,7 +911,6 @@ err_t mmnif_init(struct netif *netif)
DEBUGPRINTF("mmnif init attempt\n");
mmnif_dev = netif;
own_ip_address += RCCE_ue() + 1;
/* Alloc and clear memory for the device struct
*/
@ -959,7 +935,7 @@ err_t mmnif_init(struct netif *netif)
// map physical address in the virtual address space
header_start_address = (void*) map_region(0, (size_t) header_start_address, (MMNIF_CORES * header_size) >> PAGE_SHIFT, MAP_KERNEL_SPACE | MAP_WT | MAP_NO_CACHE);
DEBUGPRINTF("map_region : %p\n", header_start_address);
mmnif->rx_buff = (mm_rx_buffer_t *) (header_start_address + (header_size) * (own_ip_address - router_ip_address));
mmnif->rx_buff = (mm_rx_buffer_t *) (header_start_address + header_size * RCCE_IAM);
/* Alloc and clear shared memory for rx_buff
*/
@ -980,25 +956,25 @@ err_t mmnif_init(struct netif *netif)
// align size to the granularity of a page size
heap_size = (heap_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
heap_start_address = (void*) RCCE_shmalloc(heap_size * MMNIF_CORES);
DEBUGPRINTF("RCCE_shmalloc : %p (size %u)\n", header_start_address, MMNIF_CORES * header_size);
DEBUGPRINTF("RCCE_shmalloc : %p (size %u)\n", heap_start_address, MMNIF_CORES * header_size);
// map physical address in the virtual address space
#if USE_CACHE
uint32_t n = (uint32_t) heap_start_address;
heap_start_address = map_region(0, heap_start_address, (MMNIF_CORES * heap_size) >> PAGE_SHIFT, MAP_KERNEL_SPACE | MAP_NO_CACHE | MAP_MPE | MAP_WT);
map_region(heap_start_address + (heap_size) * (own_ip_address - router_ip_address), n + (heap_size) * (own_ip_address - router_ip_address), header_size >> PAGE_SHIFT, MAP_KERNEL_SPACE | MAP_MPE | MAP_WT | MAP_REMAP);
map_region(heap_start_address + (heap_size) * RCCE_IAM, n + (heap_size) * RCCE_IAM, header_size >> PAGE_SHIFT, MAP_KERNEL_SPACE | MAP_MPE | MAP_WT | MAP_REMAP);
#else
heap_start_address = (void*) map_region(0, (size_t) heap_start_address, (MMNIF_CORES * heap_size) >> PAGE_SHIFT, MAP_KERNEL_SPACE | MAP_NO_CACHE | MAP_MPE | MAP_WT);
#endif // USE_CACHE
#endif // MMNIF_USE_MPB
DEBUGPRINTF("map_region : %p\n", header_start_address);
DEBUGPRINTF("map_region : %p\n", heap_start_address);
#if MMNIF_USE_MPB
mmnif->rx_heap = heap_start_address;
heap_start_address = heap_start_address - (RC_MY_COREID/2 * 16*1024*1024 ) - (RC_MY_COREID%2 * 0x2000);
DEBUGPRINTF("heap_start_address : %p\n", heap_start_address);
#else
mmnif->rx_heap = heap_start_address + (heap_size) * (own_ip_address - router_ip_address);
mmnif->rx_heap = heap_start_address + heap_size * RCCE_IAM;
#endif
if (!(heap_start_address))
@ -1007,13 +983,9 @@ err_t mmnif_init(struct netif *netif)
return ERR_MEM;
}
#if !MMNIF_USE_MPB
memset(mmnif->rx_buff, 0x00, header_size);
memset(mmnif->rx_heap, 0x00, heap_size);
*((int *)RCCE_fool_write_combine_buffer) = 1;
#else
#if MMNIF_USE_MPB
asm volatile (".byte 0x0f; .byte 0x0a;\n");
#endif
for(i=0; i<header_size; i++)
((uint8_t*)mmnif->rx_buff)[i] = 0x00;
@ -1021,6 +993,7 @@ err_t mmnif_init(struct netif *netif)
((uint8_t*)mmnif->rx_heap)[i] = 0x00;
*((int *)RCCE_fool_write_combine_buffer) = 1;
#if MMNIF_USE_MPB
asm volatile (".byte 0x0f; .byte 0x0a;\n");
#endif
@ -1030,9 +1003,7 @@ err_t mmnif_init(struct netif *netif)
/* init the lock's for the hdr
*/
spinlock_init(&mmnif->rx_buff->rlock);
spinlock_init(&pseudolock);
spinlock_init(&mmnif->lock);
/* init the sems for communication art
*/
@ -1056,9 +1027,6 @@ err_t mmnif_init(struct netif *netif)
mmnif->rx_buff->acceptors[i].rsock = -1;
mmnif->rx_buff->acceptors[i].src_ip = 0;
mmnif->rx_buff->acceptors[i].port = 0;
spinlock_init(&mmnif->rx_buff->acceptors[i].alock);
spinlock_lock(&mmnif->rx_buff->acceptors[i].alock);
spinlock_unlock(&mmnif->rx_buff->acceptors[i].alock);
}
/* pass the device state to lwip */
@ -1100,7 +1068,7 @@ err_t mmnif_init(struct netif *netif)
static void mmnif_rx(struct netif *netif)
{
mmnif_t *mmnif = netif->state;
mm_rx_buffer_t *b = mmnif->rx_buff;
volatile mm_rx_buffer_t *b = mmnif->rx_buff;
uint16_t length = 0;
struct pbuf *p;
struct pbuf *q;
@ -1112,13 +1080,11 @@ static void mmnif_rx(struct netif *netif)
anotherpacket:
rdesc = 0xFF;
spinlock_lock(&b->rlock);
/* check if this call to mmnif_rx makes any sense
*/
if (b->desc_table[b->dread].stat == MMNIF_STATUS_FREE)
{
spinlock_unlock(&b->rlock);
goto out;
}
@ -1142,6 +1108,7 @@ anotherpacket:
DEBUGPRINTF("mmnif_rx(): no fast socket associated with %d", b->desc_table[rdesc].fast_sock);
mmnif->rx_buff->desc_table[rdesc].stat = MMNIF_STATUS_PROC;
mmnif_rxbuff_free();
goto out;
} else {
b->desc_table[rdesc].stat = MMNIF_STATUS_INPROC;
#if MMNIF_FAST_SOCKET_BLOCK
@ -1149,19 +1116,16 @@ anotherpacket:
#else
atomic_int32_inc(&bp->cnt);
#endif
goto anotherpacket;
}
spinlock_unlock(&b->rlock);
goto out;
}
}
if (b->desc_table[(j + i) % MMNIF_MAX_DESCRIPTORS].stat == MMNIF_STATUS_FREE)
{
spinlock_unlock(&b->rlock);
goto out;
}
}
spinlock_unlock(&b->rlock);
/* if there is no packet finished we encountered a random error
*/
@ -1265,19 +1229,15 @@ out:
static int mmnif_rx_bypass(struct netif *netif, int s, void *data, uint32_t len)
{
mmnif_t *mmnif = netif->state;
mm_rx_buffer_t *b = mmnif->rx_buff;
uint16_t length;
char *packet;
volatile mm_rx_buffer_t *b = mmnif->rx_buff;
uint16_t length = 0;
char *packet = NULL;
uint32_t i, j;
uint8_t rdesc = 0xFF;
// spinlock_lock(&b->rlock);
/* check if this call to mmnif_rx makes any sense
*/
if (b->desc_table[b->dread].stat == MMNIF_STATUS_FREE)
{
// spinlock_unlock(&b->rlock);
if (b->desc_table[b->dread].stat == MMNIF_STATUS_FREE) {
return -1;
}
@ -1296,14 +1256,10 @@ static int mmnif_rx_bypass(struct netif *netif, int s, void *data, uint32_t len)
}
}
// spinlock_unlock(&b->rlock);
/* if there is no packet finished we encountered a random error
*/
if (rdesc == 0xFF)
{
return -1;
}
/* If length is zero return silently
*/
@ -1320,13 +1276,28 @@ static int mmnif_rx_bypass(struct netif *netif, int s, void *data, uint32_t len)
#ifdef DEBUG_MMNIF_PACKET
DEBUGPRINTF("\n RECIEVED - %p with legth: %d\n", packet, length);
hex_dump(length, packet);
#endif
if (len >= length)
memcpy(data, (void*) mmnif->rx_buff->desc_table[rdesc].addr, mmnif->rx_buff->desc_table[rdesc].len);
else
if (BUILTIN_EXPECT(len < length, 0))
goto drop_packet;
#if USE_CACHE || MMNIF_USE_MPB
asm volatile (".byte 0x0f; .byte 0x0a;\n");
#endif
#if !USE_CACHE && !MMNIF_USE_MBP
memcpy_from_nc(data, (void*) mmnif->rx_buff->desc_table[rdesc].addr, mmnif->rx_buff->desc_table[rdesc].len);
#elif MMNIF_USE_MPB
memcpy_get(data, (void*) mmnif->rx_buff->desc_table[rdesc].addr, mmnif->rx_buff->desc_table[rdesc].len);
#else
memcpy(data, (void*) mmnif->rx_buff->desc_table[rdesc].addr, mmnif->rx_buff->desc_table[rdesc].len);
#endif
*((int *)RCCE_fool_write_combine_buffer) = 1;
#if MMNIF_USE_MPB
asm volatile (".byte 0x0f; .byte 0x0a;\n");
#endif
/* indicate that the copy process is done and the packet can be freed
* note that we did not lock here because we are the only one editing this value
*/
@ -1345,10 +1316,6 @@ static int mmnif_rx_bypass(struct netif *netif, int s, void *data, uint32_t len)
return length;
drop_packet:
//spinlock_lock(&mmnif->rx_buff->rlock);
/*error handling */
//spinlock_unlock(&mmnif->rx_buff->rlock);
LINK_STATS_INC(link.drop);
mmnif->stats.rx_err++;
@ -1375,7 +1342,6 @@ int mmnif_recv(int s, void *data, uint32_t len, int flags)
//reschedule();
NOP8;
}
#endif
ret = mmnif_rx_bypass(mmnif_dev, s, data, len);
@ -1407,27 +1373,30 @@ int mmnif_socket(int domain, int type, int protocol)
*/
int mmnif_accept(int s, struct sockaddr *addr, socklen_t * addrlen)
{
struct sockaddr_in *bp = (struct sockaddr_in*)addr;
uint16_t port = bp->sin_port;
mm_rx_buffer_t *b = ((mmnif_t *) mmnif_dev->state)->rx_buff;
int i;
struct sockaddr_in *client = (struct sockaddr_in*)addr;
volatile mm_rx_buffer_t *b = ((mmnif_t *) mmnif_dev->state)->rx_buff;
bypass_rxdesc_t *p;
int tmp1 = get_clock_tick();
int tmp2 = 0;
int i, tmp2 = 0;
uint16_t port;
// TODO: Bug, not compatible with BSD sockets
port = client->sin_port;
if ((unsigned int)s >= MMNIF_PSEUDO_SOCKET_START)
{
for (i = 0; i < MMNIF_MAX_ACCEPTORS; i++)
{
if (b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat == MMNIF_ACC_STAT_CLOSED)
{
spinlock_lock(&b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].alock);
RCCE_acquire_lock(RC_MY_COREID);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].port = port;
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat = MMNIF_ACC_STAT_ACCEPTING;
spinlock_lock(&pseudolock);
mmnif_hashadd(npseudosocket, -1, 0);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].nsock = npseudosocket++;
spinlock_unlock(&pseudolock);
spinlock_unlock(&b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].alock);
RCCE_release_lock(RC_MY_COREID);
while (b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat != MMNIF_ACC_STAT_ACCEPT_ME)
NOP8;
@ -1435,48 +1404,46 @@ int mmnif_accept(int s, struct sockaddr *addr, socklen_t * addrlen)
p = mmnif_hashlookup(b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].nsock);
p->dest_ip = b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].src_ip;
p->remote_socket = b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].rsock;
spinlock_lock(&b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].alock);
RCCE_acquire_lock(RC_MY_COREID);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat = MMNIF_ACC_STAT_ACCEPTED;
i = b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].nsock;
spinlock_unlock(&b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].alock);
RCCE_release_lock(RC_MY_COREID);
return i;
}
}
return -1;
}
else
{
return -1;
} else {
for (i = 0; i < MMNIF_MAX_ACCEPTORS; i++)
{
if (b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat == MMNIF_ACC_STAT_CLOSED)
{
spinlock_lock(&b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].alock);
RCCE_acquire_lock(RC_MY_COREID);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].port = port;
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat = MMNIF_ACC_STAT_ACCEPTING;
spinlock_lock(&pseudolock);
mmnif_hashadd(npseudosocket, -1, 0);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].nsock = npseudosocket++;
spinlock_unlock(&pseudolock);
spinlock_unlock(&b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].alock);
RCCE_release_lock(RC_MY_COREID);
while (b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat != MMNIF_ACC_STAT_ACCEPT_ME)
{
tmp2 = get_clock_tick();
if (tmp2 - tmp1 > MMNIF_AUTO_SOCKET_TIMEOUT)
{
spinlock_lock(&b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].alock);
RCCE_acquire_lock(RC_MY_COREID);
if (b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat == MMNIF_ACC_STAT_ACCEPT_ME)
{
spinlock_unlock(&b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].alock);
RCCE_acquire_lock(RC_MY_COREID);
break;
}
DEBUGPRINTF("mmnif_accept(): Timout occoured, switching to normal accept()");
mmnif_hashdelete(b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].nsock);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat = MMNIF_ACC_STAT_CLOSED;
spinlock_unlock(&b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].alock);
RCCE_release_lock(RC_MY_COREID);
goto normalaccept;
}
NOP8;
@ -1485,14 +1452,15 @@ int mmnif_accept(int s, struct sockaddr *addr, socklen_t * addrlen)
p = mmnif_hashlookup(b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].nsock);
p->dest_ip = b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].src_ip;
p->remote_socket = b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].rsock;
spinlock_lock(&b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].alock);
RCCE_acquire_lock(RC_MY_COREID);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat = MMNIF_ACC_STAT_ACCEPTED;
i = b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].nsock;
spinlock_unlock(&b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].alock);
RCCE_release_lock(RC_MY_COREID);
return i;
}
}
return -1;
}
@ -1507,48 +1475,29 @@ int mmnif_connect(int s, const struct sockaddr *name, socklen_t namelen)
{
struct sockaddr_in *p = (struct sockaddr_in*) name;
uint16_t port = p->sin_port;
mm_rx_buffer_t *b;
volatile mm_rx_buffer_t *b;
int i;
//int tmp1 = get_clock_tick();
//int tmp2 = 0;
uint8_t core;
uint8_t *ip4addr;
uint8_t addr[4];
//uint32_t netmask = 0xFFFFFF00;
/* grab the destination ip address out of the ip header
* for internal routing the last ocet is interpreted as core ID.
*/
ip4addr = (uint8_t*) &p->sin_addr.s_addr;
/* revert the address to host format */
addr[3] = ip4addr[0];
addr[2] = ip4addr[1];
addr[1] = ip4addr[2];
addr[0] = ip4addr[3];
/* check if the ip address is in the Local Area Network of the 48 cores */
// if (!((netmask & *(uint32_t*)addr) == (netmask & own_ip_address) ))
// return -1;
core = addr[0];
core = ip4_addr4(&p->sin_addr.s_addr);
if ((core) < 1 || (core > MMNIF_CORES))
return lwip_connect(s, name, namelen);
b = (mm_rx_buffer_t *) ((char *)header_start_address +
(core - 1) * header_size);
b = (volatile mm_rx_buffer_t *) ((char *)header_start_address + (core - 1) * header_size);
for (i = 0; i < MMNIF_MAX_ACCEPTORS; i++)
{
if (b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat == MMNIF_ACC_STAT_ACCEPTING
&& b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].port == port)
{
spinlock_lock(&b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].alock);
RCCE_acquire_lock(RC_COREID[core-1]);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat = MMNIF_ACC_STAT_ACCEPT_ME;
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].rsock = s;
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].src_ip = own_ip_address & 0xFF;
mmnif_hashadd(s,
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].nsock, core);
spinlock_unlock(&b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].alock);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].src_ip = ip4_addr4(&mmnif_dev->ip_addr);
mmnif_hashadd(s, b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].nsock, core);
RCCE_release_lock(RC_COREID[core-1]);
while (b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat != MMNIF_ACC_STAT_ACCEPTED)
{
@ -1563,9 +1512,11 @@ int mmnif_connect(int s, const struct sockaddr *name, socklen_t namelen)
// }
NOP8;
}
spinlock_lock(&b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].alock);
RCCE_acquire_lock(RC_COREID[core-1]);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat = MMNIF_ACC_STAT_CLOSED;
spinlock_unlock(&b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].alock);
RCCE_release_lock(RC_COREID[core-1]);
return 0;
}
}
@ -1589,6 +1540,13 @@ int mmnif_bind(int s, const struct sockaddr *name, socklen_t namelen)
return 0;
}
int mmnif_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen)
{
if ((unsigned int)s < MMNIF_PSEUDO_SOCKET_START)
return lwip_setsockopt(s, level, optname, optval, optlen);
return 0;
}
/* mmnif_closesocket(): replacement if lwip_close for
* fast_sockets
*/

View file

@ -24,26 +24,53 @@
#ifdef CONFIG_LWIP
#include <lwip/err.h>
#include <lwip/netif.h> /* lwip netif */
#include <lwip/sockets.h>
#define AF_MMNIF_NET 0x1337
#define AF_MMNIF_NET 0x42
#define MMNIF_AUTOACTIVATE_FAST_SOCKETS 0
#define MMNIF_AUTOACTIVATE_FAST_SOCKETS 1
#if MMNIF_AUTOACTIVATE_FAST_SOCKETS
//#ifndef socklen_t
//# define socklen_t u32_t
//#endif
int mmnif_socket(int domain, int type, int protocol);
int mmnif_send(int s, void *data, size_t size, int flags);
int mmnif_recv(int s, void *data, uint32_t len, int flags);
int mmnif_accept(int s, struct sockaddr *addr, socklen_t * addrlen);
int mmnif_connect(int s, const struct sockaddr *name, socklen_t namelen);
int mmnif_listen(int s, int backlog);
int mmnif_bind(int s, const struct sockaddr *name, socklen_t namelen);
int mmnif_closesocket(int s);
int mmnif_getsockopt (int s, int level, int optname, void *optval, socklen_t *optlen);
int mmnif_setsockopt (int s, int level, int optname, const void *optval, socklen_t optlen);
#undef accept
#define accept(a,b,c) mmnif_accept(a,b,c)
#undef closesocket
#define closesocket(s) mmnif_closesocket(s)
#undef connect
#define connect(a,b,c) mmnif_connect(a,b,c)
#undef recv
#define recv(a,b,c,d) mmnif_recv(a,b,c,d)
#undef send
#define send(a,b,c,d) mmnif_send(a,b,c,d)
#undef socket
#define socket(a,b,c) mmnif_socket(a,b,c)
#undef bind
#define bind(a,b,c) mmnif_bind(a,b,c)
#undef listen
#define listen(a,b) mmnif_listen(a,b)
#undef setsockopt
#define setsockopt(a,b,c,d,e) mmnif_setsockopt(a,b,c,d,e)
#undef select
#endif
err_t mmnif_init(struct netif*);
err_t mmnif_shutdown(void);
int mmnif_worker(void *e);
void mmnif_print_driver_status();
void mmnif_print_driver_status(void);
#endif