metalsvm/drivers/net/mmnif.c

1634 lines
42 KiB
C
Raw Permalink Normal View History

/*
* Copyright 2011 Carl-Benedikt Krueger, Chair for Operating Systems,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of MetalSVM.
*
* mmnif.c --- memmory mapped interface
*
* Virtual IP Interface for the concept processor SCC
*
*/
/*
* 15th October 2011:
* - Redesign of the interrupt handling (by Stefan Lankes)
* - Add iRCCE support (by Stefan Lankes)
* - Extending the BSD socket interface
*/
2011-10-04 09:14:03 +02:00
#include <metalsvm/stddef.h>
#if defined(CONFIG_LWIP) && defined(CONFIG_ROCKCREEK)
#include <lwip/netif.h> /* lwip netif */
#include <lwip/stats.h> /* inteface stats */
#include <netif/etharp.h> /* ethernet arp packets */
#include <lwip/ip.h> /* struct iphdr */
#include <lwip/tcpip.h> /* tcpip_input() */
#include <lwip/sockets.h>
#include <lwip/ip_addr.h>
#include <metalsvm/mailbox.h> /* mailbox_ptr_t */
#include <metalsvm/semaphore.h>
#include <metalsvm/spinlock.h>
#include <metalsvm/page.h>
#include <metalsvm/time.h>
#include <asm/irq.h>
#include <asm/RCCE.h>
#include <asm/RCCE_lib.h>
#include <asm/SCC_API.h>
#include <asm/scc_memcpy.h>
#include <asm/svm.h>
#include <net/mmnif.h>
#define TRUE 1
#define FALSE 0
2011-10-03 23:59:52 -07:00
#define DEBUGPRINTF(x,...) LWIP_DEBUGF(NETIF_DEBUG, (x, ##__VA_ARGS__))
#define DEBUG_MMNIF
//#define DEBUG_MMNIF_PACKET
// set to 1 if you want to enable the L1 cache for the receive buffers
#define USE_CACHE 1
// set to 1 if you want to use the message passing buffer
#define MMNIF_USE_MPB 0
#if USE_CACHE && MMNIF_USE_MBP
#error MBP version uses implicitly the cache
#endif
/* Cache line wrappers */
#define CLINE_SHIFT 5
#define CLINE_SIZE (1UL << CLINE_SHIFT)
#define CLINE_MASK (~(CLINE_SIZE - 1))
#define CLINE_ALIGN(_x) (((_x) + CLINE_SIZE - 1) & CLINE_MASK)
//#define CLINE_ALIGN(_x) (_x)
#define MMNIF_AUTO_SOCKET_TIMEOUT 500
#ifdef DEBUG_MMNIF
2011-10-20 03:45:51 -07:00
#include <net/util.h> /* hex dump */
#endif
/* define constants
* regarding the driver & its configuration
*/
2011-10-05 13:23:59 -07:00
#if MMNIF_USE_MPB
2011-10-12 02:09:57 -07:00
#define MMNIF_RX_BUFFERLEN (7*1024)
2011-10-05 13:23:59 -07:00
#else
#define MMNIF_RX_BUFFERLEN (8*1024)
2011-10-05 13:23:59 -07:00
#endif
#define MMNIF_MAX_DESCRIPTORS 64
#define MMNIF_CORES 48
#define MMNIF_STATUS_FREE 0x00
#define MMNIF_STATUS_PENDING 0x01
#define MMNIF_STATUS_RDY 0x02
#define MMNIF_STATUS_INPROC 0x03
#define MMNIF_STATUS_INPROC_BYPASS 0x04
#define MMNIF_STATUS_PROC 0x05
#define MMNIF_MAX_ACCEPTORS 0x20
#define MMNIF_ACC_STAT_CLOSED 0x00
#define MMNIF_ACC_STAT_ACCEPTING 0x01
#define MMNIF_ACC_STAT_ACCEPT_ME 0x02
#define MMNIF_ACC_STAT_ACCEPTED 0x03
#define MMNIF_HASHTABLE_SIZE 0x20
#define MMNIF_PSEUDO_SOCKET_START 0x31337
#if LWIP_SOCKET
static int npseudosocket = MMNIF_PSEUDO_SOCKET_START;
#endif
static spinlock_t pseudolock;
/* "message passing buffer" specific constants:
* - start address
* - size
*/
static char* header_start_address = NULL;
static unsigned int header_size = 0;
static char* heap_start_address = NULL;
static unsigned int heap_size = 0;
/*
* the memory mapped network device
*/
static struct netif* mmnif_dev = NULL;
/* accept struct
*/
typedef struct acceptor {
/* stat: status of the acceptor
* src_ip: where did the connect request came from
* port: port on which the acceptor is listening
* nsock : next pseudo socket which is used in further connection
* rsock : remote socket which has to be assosicated with the nsock
*/
uint8_t stat;
uint8_t src_ip;
uint16_t port;
int nsock;
int rsock;
} acceptor_t;
/* bypass descriptor struct
*/
typedef struct bypass_rxdesc {
/* socket : hashtarget
* remote_socket: socket on the remote end
2011-10-20 03:45:51 -07:00
* counter : packet counter
* last_id : last packet id
* dest_ip : associated destination ip/core
*/
int socket;
int remote_socket;
sem_t sem;
uint8_t dest_ip;
} bypass_rxdesc_t;
/*
*/
static bypass_rxdesc_t mmnif_hashtable[MMNIF_HASHTABLE_SIZE];
typedef struct mmnif_device_stats {
/* device stats (granularity in packets):
* - recieve errors
* - recieve successes
* - recieved bytes
* - transmit errors
* - transmit successes
* - transmitted bytes
*/
unsigned int rx_err;
unsigned int rx;
unsigned int rx_bytes;
unsigned int tx_err;
unsigned int tx;
unsigned int tx_bytes;
/* Heuristics :
* - how many times an budget overflow occured
* - how many times the polling thread polled without recieving a new message
*/
unsigned int bdg_overflow;
unsigned int pll_empty;
} mmnif_device_stats_t;
/* receive descror structure */
typedef struct rx_desc {
/* stat : status of the descriptor
* len : length of the packet
* addr : memory address of the packet
* fast_sock: (-1) if no socket is associated
* else the socket n of the fast socket
2011-10-20 03:45:51 -07:00
* id : packet id
*/
uint8_t stat;
uint16_t len;
uint32_t addr;
uint32_t fast_sock;
} rx_desc_t;
/* receive ring buffer structure */
typedef struct mm_rx_buffer {
/* memory "pseudo-ring/heap"
* packets are always in one single chunk of memory
* head : head of allocated memory region
* tail : tail of allocated memory region
*/
uint16_t head;
uint16_t tail;
/* descritpor queue
* desc_table : descriptor table
* dcount : descriptor's free in queue
* dread : next descriptor to read
* dwrite : next descriptor to write
*/
rx_desc_t desc_table[MMNIF_MAX_DESCRIPTORS];
uint8_t dcount;
uint8_t dread;
uint8_t dwrite;
/* acceptors
* shared memory "hashtable" to realize
* fast socket accept/connect
*/
acceptor_t acceptors[MMNIF_MAX_ACCEPTORS];
} mm_rx_buffer_t;
typedef struct mmnif {
struct mmnif_device_stats stats;
/* Interface constants:
* - ehternet address
* - local ip address
*/
struct eth_addr *ethaddr;
uint32_t ipaddr;
2011-10-03 23:59:52 -07:00
// checks the TCPIP thread already the rx buffers?
2011-10-20 03:45:51 -07:00
volatile uint8_t check_in_progress;
2011-10-03 23:59:52 -07:00
/* memory interaction variables:
* - pointer to recive buffer
*/
2011-10-20 03:45:51 -07:00
volatile mm_rx_buffer_t *rx_buff;
uint8_t *rx_heap;
/* semaphore to regulate polling vs. interrupts
*/
sem_t com_poll;
} mmnif_t;
// forward declaration
static void mmnif_irqhandler(struct state* s);
inline static void* memcpy_from_nc(void *dest, const void *src, size_t count)
{
int32_t h, i, j, k, l, m;
asm volatile ("cld;\n\t"
"1: cmpl $0, %%eax ; je 3f\n\t"
"movl (%%edi), %%edx\n\t"
"cmpl $1, %%eax ; je 2f\n\t"
"movl 32(%%edi), %%edx\n\t"
"2: movl 0(%%esi), %%ecx\n\t"
"movl 4(%%esi), %%edx\n\t"
"movl %%ecx, 0(%%edi)\n\t"
"movl %%edx, 4(%%edi)\n\t"
"movl 8(%%esi), %%ecx\n\t"
"movl 12(%%esi), %%edx\n\t"
"movl %%ecx, 8(%%edi)\n\t"
"movl %%edx, 12(%%edi)\n\t"
"movl 16(%%esi), %%ecx\n\t"
"movl 20(%%esi), %%edx\n\t"
"movl %%ecx, 16(%%edi)\n\t"
"movl %%edx, 20(%%edi)\n\t"
"movl 24(%%esi), %%ecx\n\t"
"movl 28(%%esi), %%edx\n\t"
"movl %%ecx, 24(%%edi)\n\t"
"movl %%edx, 28(%%edi)\n\t"
"addl $32, %%esi\n\t"
"addl $32, %%edi\n\t"
"dec %%eax ; jmp 1b\n\t"
"3: movl %%ebx, %%ecx\n\t"
"movl (%%edi), %%edx\n\t"
"andl $31, %%ecx\n\t"
"rep ; movsb\n\t" : "=&a"(h), "=&D"(i), "=&S"(j), "=&b"(k), "=&c"(l), "=&d"(m)
: "0"(count / 32), "1"(dest), "2"(src), "3"(count) : "memory","cc");
return dest;
}
inline static void* memcpy_to_nc(void* dest, const void *src, size_t count)
{
int32_t i, j, k;
asm volatile (
"cld; rep movsl\n\t"
"movl %4, %%ecx\n\t"
"andl $3, %%ecx\n\t"
"rep movsb\n\t"
: "=&c"(i), "=&D"(j), "=&S"(k)
: "0"(count/4), "g"(count), "1"(dest), "2"(src) : "memory","cc");
return dest;
}
/*
* memory maped interface helper functions
*/
/* trigger an interrupt on the remote processor
* so he knows there is a packet to read
*/
2011-10-20 08:46:27 -07:00
inline static int mmnif_trigger_irq(int dest_ip)
{
int tmp, x, y, z, addr;
int ue = dest_ip - 1;
z = Z_PID(RC_COREID[ue]);
x = X_PID(RC_COREID[ue]);
y = Y_PID(RC_COREID[ue]);
addr = CRB_ADDR(x, y) + (z == 0 ? GLCFG0 : GLCFG1);
2011-10-20 08:46:27 -07:00
// send interrupt to ue
do {
NOP8;
tmp = ReadConfigReg(addr);
} while (tmp & 1);
tmp |= 1;
SetConfigReg(addr, tmp);
return 0;
}
/* mmnif_print_stats(): Print the devices stats of the
* current device
*/
static void mmnif_print_stats(void)
{
mmnif_t *mmnif;
2011-10-03 23:59:52 -07:00
if (!mmnif_dev)
{
2011-10-04 11:59:00 -07:00
DEBUGPRINTF("mmnif_print_stats(): the device is not initialized yet.\n");
return;
}
2011-10-03 23:59:52 -07:00
mmnif = (mmnif_t *) mmnif_dev->state;
DEBUGPRINTF("/dev/mmnif - stats:\n");
DEBUGPRINTF("Received: %d packets successfull\n", mmnif->stats.rx);
DEBUGPRINTF("Received: %d bytes\n", mmnif->stats.rx_bytes);
DEBUGPRINTF("Received: %d packets containuing errors\n", mmnif->stats.rx_err);
DEBUGPRINTF("Transmitted: %d packests successfull\n", mmnif->stats.tx);
DEBUGPRINTF("Transmitted: %d bytes\n", mmnif->stats.tx_bytes);
DEBUGPRINTF("Transmitted: %d packests were dropped due to errors\n", mmnif->stats.tx_err);
}
/* mmnif_print_driver_status
*
*/
void mmnif_print_driver_status(void)
{
mmnif_t *mmnif;
int i;
2011-10-03 23:59:52 -07:00
if (!mmnif_dev)
{
DEBUGPRINTF("mmnif_print_driver_status(): the device is not initialized yet.\n");
return;
}
2011-10-03 23:59:52 -07:00
mmnif = (mmnif_t *) mmnif_dev->state;
DEBUGPRINTF("/dev/mmnif driver status: \n\n");
DEBUGPRINTF("rx_buf: 0xp\n", mmnif->rx_buff);
DEBUGPRINTF("free descriptors : %d\n\n", mmnif->rx_buff->dcount);
DEBUGPRINTF("descriptor table: (only print descriptors in use)\n");
DEBUGPRINTF("status\taddr\tsize\n");
2011-10-03 23:59:52 -07:00
for (i = 0; i < MMNIF_MAX_DESCRIPTORS; i++)
{
if (mmnif->rx_buff->desc_table[i].stat != 0)
2011-10-03 03:52:14 -07:00
DEBUGPRINTF("0x%.2X\t%p\t%X\t\n",
mmnif->rx_buff->desc_table[i].stat,
mmnif->rx_buff->desc_table[i].addr,
mmnif->rx_buff->desc_table[i].len);
}
2011-10-03 23:59:52 -07:00
2011-10-03 03:52:14 -07:00
DEBUGPRINTF("ring heap start addr: %p\n", mmnif->rx_buff + sizeof(mm_rx_buffer_t));
DEBUGPRINTF("head: 0x%X\ttail: 0x%X\n", mmnif->rx_buff->head, mmnif->rx_buff->tail);
mmnif_print_stats();
}
/*
* memory maped interface main functions
*/
/* mmnif_get_destination(): low level transmid helper function
* this function deals with some HW details, it checks to wich core this packet
* should be routed and returns the destination
*/
static uint8_t mmnif_get_destination(struct netif *netif, struct pbuf *p)
{
struct ip_hdr *iphdr;
ip_addr_p_t ip;
/* grab the destination ip address out of the ip header
* for internal routing the last ocet is interpreted as core ID.
*/
iphdr = (struct ip_hdr *)(p->payload);
ip = iphdr->dest;
2011-10-20 04:51:34 -07:00
return ip4_addr4(&ip);
}
/* mmnif_rxbuff_alloc():
* this function allocates a continues chunk of memory
* right inside of the buffer which is used for communication
* with the remote end
*/
2011-10-20 08:46:27 -07:00
static uint32_t mmnif_rxbuff_alloc(uint8_t dest, uint16_t len)
{
uint32_t ret = 0;
volatile mm_rx_buffer_t *rb = (mm_rx_buffer_t *) ((char *)header_start_address + (dest - 1) * header_size);
#if MMNIF_USE_MPB
char* memblock = (char*)heap_start_address + (dest-1)/2*16*1024*1024 + (dest-1)%2 * 0x2000;
#else
char *memblock = (char *)heap_start_address + (dest - 1) * heap_size;
#endif
// if (rb->tail > rb->head)
// if ((MMNIF_RX_BUFFERLEN - rb->tail < len)&&(rb->head < len))
// return NULL;
// else
// if ((rb->head - rb->tail < len)&&(rb->tail != rb->head))
// return NULL;
RCCE_acquire_lock(RC_COREID[dest-1]);
if (rb->dcount)
{
if (rb->tail > rb->head)
{
if (MMNIF_RX_BUFFERLEN - rb->tail > len)
{
rb->desc_table[rb->dwrite].stat = MMNIF_STATUS_PENDING;
ret = (uint32_t) (memblock + rb->tail);
rb->desc_table[rb->dwrite].addr = ret;
rb->desc_table[rb->dwrite].len = len;
rb->dcount--;
rb->dwrite = (rb->dwrite + 1) % MMNIF_MAX_DESCRIPTORS;
rb->tail = (rb->tail + len);
} else if (rb->head > len) {
rb->desc_table[rb->dwrite].stat = MMNIF_STATUS_PENDING;
ret = (uint32_t) memblock;
rb->desc_table[rb->dwrite].addr = ret;
rb->desc_table[rb->dwrite].len = len;
rb->dcount--;
rb->dwrite = (rb->dwrite + 1) % MMNIF_MAX_DESCRIPTORS;
rb->tail = len;
}
} else {
if (rb->head - rb->tail > len)
{
rb->desc_table[rb->dwrite].stat = MMNIF_STATUS_PENDING;
ret = (uint32_t) (memblock + rb->tail);
rb->desc_table[rb->dwrite].addr = ret;
rb->desc_table[rb->dwrite].len = len;
rb->dcount--;
rb->dwrite = (rb->dwrite + 1) % MMNIF_MAX_DESCRIPTORS;
rb->tail = (rb->tail + len);
} else if (rb->tail == rb->head) {
if (MMNIF_RX_BUFFERLEN - rb->tail < len)
{
rb->tail = 0;
if (rb->dread == rb->dwrite)
rb->head = 0;
}
rb->desc_table[rb->dwrite].stat = MMNIF_STATUS_PENDING;
ret = (uint32_t) (memblock + rb->tail);
rb->desc_table[rb->dwrite].addr = ret;
rb->desc_table[rb->dwrite].len = len;
rb->dcount--;
rb->dwrite = (rb->dwrite + 1) % MMNIF_MAX_DESCRIPTORS;
rb->tail = (rb->tail + len);
}
}
}
RCCE_release_lock(RC_COREID[dest-1]);
return ret;
}
/* mmnif_commit_packet: this function set the state of the (in advance)
* allocated packet to RDY so the recieve queue knows that it can be
* processed further
*/
static int mmnif_commit_packet(uint8_t dest, uint32_t addr)
{
volatile mm_rx_buffer_t *rb = (mm_rx_buffer_t *) ((char *)header_start_address + (dest - 1) * header_size);
uint32_t i;
for (i = 0; i < MMNIF_MAX_DESCRIPTORS; i++)
{
if (rb->desc_table[i].addr == addr
&& rb->desc_table[i].stat == MMNIF_STATUS_PENDING)
{
rb->desc_table[i].stat = MMNIF_STATUS_RDY;
rb->desc_table[i].fast_sock = -1;
return 0;
}
}
return -1;
}
#if LWIP_SOCKET
/* mmnif_commit_packet: this function set the state of the (in advance)
* allocated packet to RDY so the recieve queue knows that it can be
* processed further
*/
static int mmnif_commit_packet_bypass(uint8_t dest, uint32_t addr, int dest_socket)
{
volatile mm_rx_buffer_t* rb = (mm_rx_buffer_t *) ((char *)header_start_address + (dest - 1) * header_size);
uint32_t i;
for (i = 0; i < MMNIF_MAX_DESCRIPTORS; i++)
{
if (rb->desc_table[i].addr == addr
&& rb->desc_table[i].stat == MMNIF_STATUS_PENDING)
{
rb->desc_table[i].stat = MMNIF_STATUS_RDY;
rb->desc_table[i].fast_sock = dest_socket;
return 0;
}
}
return -1;
}
#endif
/* mmnif_rxbuff_free() : the opposite to mmnif_rxbuff_alloc() a from the receiver
* already processed chunk of memory is freed so that it can be allocated again
*/
static void mmnif_rxbuff_free(void)
{
mmnif_t *mmnif = mmnif_dev->state;
volatile mm_rx_buffer_t *b = mmnif->rx_buff;
uint32_t i, j;
uint32_t rpos;
RCCE_acquire_lock(RC_MY_COREID);
rpos = b->dread;
for (i = 0, j = rpos; i < MMNIF_MAX_DESCRIPTORS; i++)
{
j = (j + i) % MMNIF_MAX_DESCRIPTORS;
if (b->desc_table[j].stat == MMNIF_STATUS_PROC)
{
b->dcount++;
b->dread = (b->dread + 1) % MMNIF_MAX_DESCRIPTORS;
b->desc_table[j].stat = MMNIF_STATUS_FREE;
if (b->tail > b->head)
{
b->head += b->desc_table[j].len;
} else {
if ((b->desc_table[(j + 1) % MMNIF_MAX_DESCRIPTORS].stat != MMNIF_STATUS_FREE)
&& (b->desc_table[j].addr > b->desc_table[(j + 1) % MMNIF_MAX_DESCRIPTORS].addr))
{
b->head = 0;
} else {
b->head += b->desc_table[j].len;
}
}
} else
break;
}
2011-10-05 13:23:59 -07:00
RCCE_release_lock(RC_MY_COREID);
}
/*
* Transmid a packet (called by the lwip)
*/
static err_t mmnif_tx(struct netif *netif, struct pbuf *p)
{
mmnif_t *mmnif = netif->state;
2011-10-04 08:36:46 -07:00
size_t write_address;
uint32_t i;
struct pbuf *q; /* interator */
uint32_t dest_ip = mmnif_get_destination(netif, p);
2011-10-04 11:59:00 -07:00
/* check for over/underflow */
if (BUILTIN_EXPECT((p->tot_len < 20 /* IP header size */) || (p->tot_len > 1536), 0)) {
DEBUGPRINTF("mmnif_tx: illegal packet length %d => drop\n", p->tot_len);
goto drop_packet;
2011-10-04 11:59:00 -07:00
}
2011-10-20 04:51:34 -07:00
/* check destination ip */
if (BUILTIN_EXPECT((dest_ip < 1) || (dest_ip > MMNIF_CORES), 0)) {
DEBUGPRINTF("mmnif_tx: invalid destination IP %d => drop\n", dest_ip);
goto drop_packet;
}
/* allocate memory for the packet in the remote buffer */
realloc:
2011-10-04 11:59:00 -07:00
write_address = mmnif_rxbuff_alloc(dest_ip, p->tot_len);
if (!write_address)
{
2011-10-04 11:59:00 -07:00
//DEBUGPRINTF("mmnif_tx(): concurrency");
NOP8;NOP8;NOP8;NOP8;NOP8;NOP8;NOP8;NOP8;
goto realloc;
}
#if MMNIF_USE_MPB
asm volatile (".byte 0x0f; .byte 0x0a;\n");
#endif
2011-10-04 08:36:46 -07:00
for (q = p, i = 0; q != 0; q = q->next)
{
#if !MMNIF_USE_MBP
memcpy_to_nc((char*) write_address + i, q->payload, q->len);
#else
memcpy_put((char*) write_address + i, q->payload, q->len);
#endif
2011-10-04 08:36:46 -07:00
i += q->len;
}
*((int *)RCCE_fool_write_combine_buffer) = 1;
#if MMNIF_USE_MPB
asm volatile (".byte 0x0f; .byte 0x0a;\n");
#endif
if (mmnif_commit_packet(dest_ip, write_address))
{
DEBUGPRINTF("mmnif_tx(): packet somehow lost during commit\n");
}
2011-10-03 23:59:52 -07:00
#ifdef DEBUG_MMNIF_PACKET
2011-10-03 03:52:14 -07:00
// DEBUGPRINTF("\n SEND %p with length: %d\n",(char*)heap_start_address + (dest_ip -1)*mpb_size + pos * 1792,p->tot_len +2);
// hex_dump(p->tot_len, p->payload);
#endif
/* just gather some stats */
LINK_STATS_INC(link.xmit);
mmnif->stats.tx++;
mmnif->stats.tx_bytes += p->tot_len;
2011-10-20 08:46:27 -07:00
mmnif_trigger_irq(dest_ip);
return ERR_OK;
drop_packet:
/* drop packet for one or another reason
*/
DEBUGPRINTF("mmnif_tx(): packet dropped");
LINK_STATS_INC(link.drop);
mmnif->stats.tx_err++;
return ERR_IF;
}
/* mmnif_hashlookup(): looks up a bypass descriptor by
* the associated socket
*/
static bypass_rxdesc_t *mmnif_hashlookup(int s)
{
int i;
bypass_rxdesc_t *p;
for (i=0, p = &mmnif_hashtable[s % MMNIF_HASHTABLE_SIZE]; i<MMNIF_HASHTABLE_SIZE; i++)
{
if (p->socket == s)
return p;
p = &mmnif_hashtable[(s + i + 1) % MMNIF_HASHTABLE_SIZE];
}
2011-10-03 23:59:52 -07:00
return 0;
}
#if LWIP_SOCKET
/* mmnif_hashadd(): adds a entry to the hashtable
* by the socket
*/
static int mmnif_hashadd(int sock, int rsock, uint8_t dest_ip)
{
bypass_rxdesc_t *p;
int i;
2011-10-03 23:59:52 -07:00
p = mmnif_hashlookup(sock);
if (p != 0)
return -1;
2011-10-03 23:59:52 -07:00
for (i = 0; i < MMNIF_HASHTABLE_SIZE; i++)
{
p = &mmnif_hashtable[(sock + i) % MMNIF_HASHTABLE_SIZE];
if (p->socket == -1)
{
p->socket = sock;
p->remote_socket = rsock;
p->dest_ip = dest_ip;
return 0;
}
}
2011-10-03 23:59:52 -07:00
return -1;
}
/* mmnif_hashdelete(): deletes an entry from the
* hashtable
*/
static int mmnif_hashdelete(int sock)
{
bypass_rxdesc_t *p;
int i;
p = mmnif_hashlookup(sock);
if (p != 0)
return -1;
for (i = 0; i < MMNIF_HASHTABLE_SIZE; i++)
{
p = &mmnif_hashtable[(sock + i) % MMNIF_HASHTABLE_SIZE];
if (p->socket == sock)
{
p->socket = -1;
p->remote_socket = 0;
p->dest_ip = 0;
return 0;
}
}
2011-10-03 23:59:52 -07:00
return -1;
}
/*
* Transmid a packet (with insane speed)
*/
static err_t mmnif_tx_bypass(struct netif * netif, void *pbuff, uint16_t size, int s)
{
mmnif_t *mmnif = netif->state;
uint32_t write_address;
2011-10-20 03:45:51 -07:00
//uint32_t id;
bypass_rxdesc_t *dest = mmnif_hashlookup(s);
2011-10-03 23:59:52 -07:00
//mm_rx_buffer_t *rb = (mm_rx_buffer_t *) ((char *)header_start_address + (dest->dest_ip - 1) * header_size);
/* allocate memory for the packet in the remote buffer */
realloc:
write_address = mmnif_rxbuff_alloc(dest->dest_ip, size);
if (!write_address)
{
NOP8;NOP8;NOP8;NOP8;NOP8;NOP8;NOP8;NOP8;
goto realloc;
}
/* write buffer to buffer & increment the queued packet count
* this can be safely done without locking because this place is
* reserved for us because it has the status "pending"
*/
#if MMNIF_USE_MPB
asm volatile (".byte 0x0f; .byte 0x0a;\n");
#endif
#if !MMNIF_USE_MPB
memcpy_to_nc((void*) write_address, pbuff, size);
#else
2011-10-20 03:45:51 -07:00
memcpy_put((void*) write_address, pbuff, size);
#endif
*((int *)RCCE_fool_write_combine_buffer) = 1;
#if MMNIF_USE_MPB
asm volatile (".byte 0x0f; .byte 0x0a;\n");
#endif
if (mmnif_commit_packet_bypass(dest->dest_ip, write_address, dest->remote_socket))
{
DEBUGPRINTF("mmnif_tx_bypass(): packet somehow lost during commit\n");
}
#ifdef DEBUG_MMNIF_PACKET
2011-10-03 03:52:14 -07:00
// DEBUGPRINTF("\n SEND %p with length: %d\n",(char*)mpb_start_address + (dest_ip -1)*mpb_size + pos * 1792,p->tot_len +2);
// hex_dump(p->tot_len, p->payload);
#endif
/* just gather some stats */
LINK_STATS_INC(link.xmit);
mmnif->stats.tx++;
mmnif->stats.tx_bytes += size;
2011-10-20 08:46:27 -07:00
mmnif_trigger_irq(dest->dest_ip);
return ERR_OK;
}
/* mmnif_send(): is going to be used as replacement of
* lwip_send with fast_sockets
*/
int mmnif_send(int s, void *data, size_t size, int flags)
{
bypass_rxdesc_t *p = mmnif_hashlookup(s);
uint32_t i, j, k;
int total_size = 0;
if (p != 0)
{
2011-10-20 08:46:27 -07:00
if (size < ((MMNIF_RX_BUFFERLEN / 2) - CLINE_SIZE)) {
if (mmnif_tx_bypass(mmnif_dev, data, size, s) == ERR_OK)
2011-10-20 08:46:27 -07:00
return size;
} else {
2011-10-20 08:46:27 -07:00
j = size / (((MMNIF_RX_BUFFERLEN / 2) - CLINE_SIZE));
k = size - (j * (((MMNIF_RX_BUFFERLEN / 2) - CLINE_SIZE)));
for (i = 0; i < j; i++)
{
2011-10-20 08:46:27 -07:00
if (mmnif_tx_bypass(mmnif_dev, (char*) data + i * ((MMNIF_RX_BUFFERLEN / 2) - CLINE_SIZE), ((MMNIF_RX_BUFFERLEN / 2) - CLINE_SIZE), s) != ERR_OK)
return total_size;
total_size += (MMNIF_RX_BUFFERLEN / 2) - CLINE_SIZE;
}
2011-10-20 08:46:27 -07:00
if (mmnif_tx_bypass(mmnif_dev, data + (j - 1) * ((MMNIF_RX_BUFFERLEN / 2) - CLINE_SIZE), k, s) == ERR_OK)
2011-10-20 03:45:51 -07:00
total_size += k;
}
2011-10-20 08:46:27 -07:00
2011-10-20 03:45:51 -07:00
return total_size;
}
return lwip_send(s, data, size, flags);
}
#endif
/* mmnif_link_layer(): wrapper function called by ip_output()
* adding all needed headers for the link layer
* because we have no link layer and everything is reliable we don't need
* to add anything so we just pass it to our tx function
*/
static err_t mmnif_link_layer(struct netif *netif, struct pbuf *q, ip_addr_t * ipaddr)
{
return netif->linkoutput(netif, q);
}
/*
* Init the device (called from lwip)
* It's invoked in netif_add
*/
err_t mmnif_init(struct netif *netif)
{
mmnif_t *mmnif;
uint32_t i, flags;
int num = 0;
2011-10-03 23:59:52 -07:00
//int tmp1, tmp2, n;
DEBUGPRINTF("mmnif init attempt\n");
mmnif_dev = netif;
/* Alloc and clear memory for the device struct
*/
mmnif = kmalloc(sizeof(mmnif_t));
if (!mmnif)
{
DEBUGPRINTF("mmnif init():out of memory\n");
return ERR_MEM;
}
2011-10-03 23:59:52 -07:00
memset(mmnif, 0x00, sizeof(mmnif_t));
/* Alloc and clear shared memory for rx_buff
*/
header_size = sizeof(mm_rx_buffer_t);
2011-10-03 03:52:14 -07:00
DEBUGPRINTF("mmnif_init() : size of mm_rx_buffer_t : %d\n", sizeof(mm_rx_buffer_t));
// align mpb size to the granularity of a page size
header_size = (header_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
#if 1
if (!RCCE_IAM)
header_start_address = (char*) shmalloc((MMNIF_CORES * header_size) >> PAGE_SHIFT);
flags = irq_nested_disable();
RCCE_bcast((char*) &header_start_address, sizeof(header_start_address), 0, RCCE_COMM_WORLD);
irq_nested_enable(flags);
DEBUGPRINTF("shmalloc : %p (size %u)\n", header_start_address, MMNIF_CORES * header_size);
#else
header_start_address = (char*) RCCE_shmalloc(header_size * MMNIF_CORES);
DEBUGPRINTF("RCCE_shmalloc : %p (size %u)\n", header_start_address, MMNIF_CORES * header_size);
#endif
// map physical address in the virtual address space
header_start_address = (char*) map_region(0, (size_t) header_start_address, (MMNIF_CORES * header_size) >> PAGE_SHIFT, MAP_KERNEL_SPACE | MAP_WT | MAP_NO_CACHE);
2011-10-03 03:52:14 -07:00
DEBUGPRINTF("map_region : %p\n", header_start_address);
mmnif->rx_buff = (mm_rx_buffer_t *) (header_start_address + header_size * RCCE_IAM);
/* Alloc and clear shared memory for rx_buff
*/
heap_size = MMNIF_RX_BUFFERLEN;
#if MMNIF_USE_MPB
heap_start_address = RCCE_malloc(RCCE_LINE_SIZE);
2011-10-03 03:52:14 -07:00
DEBUGPRINTF("MessagePassingBuffer allocated @ : %p\n", heap_start_address);
for (i = heap_size / RCCE_LINE_SIZE - 1; i > 0; i--)
{
2011-10-03 03:52:14 -07:00
if (!RCCE_malloc(RCCE_LINE_SIZE))
{
DEBUGPRINTF("mmnif init(): allocating shared memory failed\n");
return ERR_MEM;
}
}
#else
2011-10-12 02:09:57 -07:00
// align size to the granularity of a page size
heap_size = (heap_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
#if 1
if (!RCCE_IAM)
heap_start_address = (char*) shmalloc((heap_size * MMNIF_CORES) >> PAGE_SHIFT);
flags = irq_nested_disable();
RCCE_bcast((char*) &heap_start_address, sizeof(heap_start_address), 0, RCCE_COMM_WORLD);
irq_nested_enable(flags);
DEBUGPRINTF("shmalloc : %p (size %u)\n", heap_start_address, MMNIF_CORES * header_size);
#else
heap_start_address = (char*) RCCE_shmalloc(heap_size * MMNIF_CORES);
DEBUGPRINTF("RCCE_shmalloc : %p (size %u)\n", heap_start_address, MMNIF_CORES * header_size);
#endif
// map physical address in the virtual address space
#if USE_CACHE
size_t n = (size_t) heap_start_address;
heap_start_address = (char*) map_region(0, (size_t) heap_start_address, (MMNIF_CORES * heap_size) >> PAGE_SHIFT, MAP_KERNEL_SPACE | MAP_NO_CACHE | MAP_MPE | MAP_WT);
map_region((size_t) (heap_start_address + (heap_size) * RCCE_IAM), n + (heap_size) * RCCE_IAM, header_size >> PAGE_SHIFT, MAP_KERNEL_SPACE | MAP_MPE | MAP_WT | MAP_REMAP);
#else
heap_start_address = (char*) map_region(0, (size_t) heap_start_address, (MMNIF_CORES * heap_size) >> PAGE_SHIFT, MAP_KERNEL_SPACE | MAP_NO_CACHE | MAP_MPE | MAP_WT);
#endif // USE_CACHE
#endif // MMNIF_USE_MPB
DEBUGPRINTF("map_region : %p\n", heap_start_address);
#if MMNIF_USE_MPB
mmnif->rx_heap = heap_start_address;
heap_start_address = heap_start_address - (RC_MY_COREID/2 * 16*1024*1024 ) - (RC_MY_COREID%2 * 0x2000);
2011-10-03 03:52:14 -07:00
DEBUGPRINTF("heap_start_address : %p\n", heap_start_address);
#else
mmnif->rx_heap = heap_start_address + heap_size * RCCE_IAM;
#endif
if (!(heap_start_address))
{
DEBUGPRINTF("mmnif init(): allocating shared memory failed\n");
return ERR_MEM;
}
#if MMNIF_USE_MPB
2011-10-03 03:52:14 -07:00
asm volatile (".byte 0x0f; .byte 0x0a;\n");
#endif
2011-10-03 03:52:14 -07:00
for(i=0; i<header_size; i++)
((uint8_t*)mmnif->rx_buff)[i] = 0x00;
for(i=0; i<heap_size; i++)
((uint8_t*)mmnif->rx_heap)[i] = 0x00;
*((int *)RCCE_fool_write_combine_buffer) = 1;
#if MMNIF_USE_MPB
asm volatile (".byte 0x0f; .byte 0x0a;\n");
2011-10-03 23:59:52 -07:00
#endif
/* set initial values
*/
mmnif->rx_buff->dcount = MMNIF_MAX_DESCRIPTORS;
/* init the lock's for the hdr
*/
spinlock_init(&pseudolock);
/* init the sems for communication art
*/
sem_init(&mmnif->com_poll, 0);
for (i=0; i<MMNIF_HASHTABLE_SIZE; i++)
{
mmnif_hashtable[i].socket = -1;
mmnif_hashtable[i].remote_socket = -1;
mmnif_hashtable[i].dest_ip = 0;
2011-10-20 03:45:51 -07:00
//mmnif_hashtable[i].counter = 0;
sem_init(&mmnif_hashtable[i].sem, 0);
}
for (i=0; i<MMNIF_MAX_ACCEPTORS; i++)
{
mmnif->rx_buff->acceptors[i].stat = MMNIF_ACC_STAT_CLOSED;
mmnif->rx_buff->acceptors[i].nsock = -1;
mmnif->rx_buff->acceptors[i].rsock = -1;
mmnif->rx_buff->acceptors[i].src_ip = 0;
mmnif->rx_buff->acceptors[i].port = 0;
}
/* pass the device state to lwip */
netif->state = mmnif;
mmnif_dev = netif;
/* administrative details */
netif->name[0] = 'm';
netif->name[1] = 'm';
netif->num = num;
num++;
/* downward functions */
netif->output = mmnif_link_layer;
/* there is no special link layer just the ip layer */
netif->linkoutput = mmnif_tx;
/* maximum transfer unit */
2011-10-04 11:59:00 -07:00
netif->mtu = 1500;
/* broadcast capability, keep all default flags */
netif->flags |= NETIF_FLAG_BROADCAST;
/* hardware address length */
netif->hwaddr_len = 0;
// set interrupt handler (LINT1)
irq_install_handler(125, mmnif_irqhandler);
DEBUGPRINTF("mmnif init complete\n");
return ERR_OK;
}
/*
* Receive a packet : recieve, pack it up and pass over to higher levels
*/
static void mmnif_rx(struct netif *netif)
{
mmnif_t *mmnif = netif->state;
volatile mm_rx_buffer_t *b = mmnif->rx_buff;
uint16_t length = 0;
2011-10-03 23:59:52 -07:00
struct pbuf *p;
struct pbuf *q;
char *packet = NULL;
2011-10-20 03:45:51 -07:00
uint32_t i, j, flags;
2011-10-03 23:59:52 -07:00
uint8_t rdesc;
err_t err = ERR_OK;
bypass_rxdesc_t *bp;
anotherpacket:
2011-10-20 03:45:51 -07:00
flags = irq_nested_disable();
2011-10-03 23:59:52 -07:00
rdesc = 0xFF;
/* check if this call to mmnif_rx makes any sense
*/
if (b->desc_table[b->dread].stat == MMNIF_STATUS_FREE)
{
2011-10-03 23:59:52 -07:00
goto out;
}
/* search the packet whose transmission is finished
*/
for (i = 0, j = b->dread; i < MMNIF_MAX_DESCRIPTORS; i++)
{
2011-10-03 23:59:52 -07:00
if (b->desc_table[(j + i) % MMNIF_MAX_DESCRIPTORS].stat == MMNIF_STATUS_RDY)
{
rdesc = (j + i) % MMNIF_MAX_DESCRIPTORS;
2011-10-03 23:59:52 -07:00
if (b->desc_table[(j + i) % MMNIF_MAX_DESCRIPTORS].fast_sock == -1)
{
b->desc_table[rdesc].stat = MMNIF_STATUS_INPROC;
packet = (char *)b->desc_table[rdesc].addr;
length = b->desc_table[rdesc].len;
break;
2011-10-03 03:52:14 -07:00
} else {
2011-10-03 23:59:52 -07:00
bp = mmnif_hashlookup(b->desc_table[rdesc].fast_sock);
if (!bp)
{
2011-10-03 23:59:52 -07:00
DEBUGPRINTF("mmnif_rx(): no fast socket associated with %d", b->desc_table[rdesc].fast_sock);
mmnif->rx_buff->desc_table[rdesc].stat = MMNIF_STATUS_PROC;
mmnif_rxbuff_free();
goto out;
2011-10-03 03:52:14 -07:00
} else {
b->desc_table[rdesc].stat = MMNIF_STATUS_INPROC;
sem_post(&bp->sem);
2011-10-20 03:45:51 -07:00
irq_nested_enable(flags);
return;
}
}
}
2011-10-03 23:59:52 -07:00
if (b->desc_table[(j + i) % MMNIF_MAX_DESCRIPTORS].stat == MMNIF_STATUS_FREE)
{
2011-10-03 23:59:52 -07:00
goto out;
}
}
/* if there is no packet finished we encountered a random error
*/
if (rdesc == 0xFF)
2011-10-03 23:59:52 -07:00
goto out;
/* If length is zero return silently
*/
2011-10-04 11:59:00 -07:00
if (BUILTIN_EXPECT(length == 0, 0))
{
DEBUGPRINTF("mmnif_rx(): empty packet error\n");
2011-10-03 23:59:52 -07:00
goto out;
}
2011-10-03 23:59:52 -07:00
2011-10-20 03:45:51 -07:00
irq_nested_enable(flags);
2011-10-04 11:59:00 -07:00
/* check for over/underflow */
if (BUILTIN_EXPECT((length < 20 /* IP header size */) || (length > 1536), 0))
{
DEBUGPRINTF("mmnif_rx(): illegal packet length %d => drop the packet\n", length);
goto drop_packet;
}
/* From now on there is a real packet and it
* has to be worked on
*/
#ifdef DEBUG_MMNIF_PACKET
2011-10-03 03:52:14 -07:00
DEBUGPRINTF("\n RECIEVED - %p with legth: %d\n", packet, length);
hex_dump(length, packet);
#endif
/* Build the pbuf for the packet so the lwip
* and other higher layer can handle it
*/
p = pbuf_alloc(PBUF_RAW, length, PBUF_POOL);
if (BUILTIN_EXPECT(!p, 0))
{
DEBUGPRINTF("mmnif_rx(): low on mem - packet dropped\n");
goto drop_packet;
}
2011-10-03 23:59:52 -07:00
2011-10-04 11:59:00 -07:00
#if USE_CACHE || MMNIF_USE_MPB
asm volatile (".byte 0x0f; .byte 0x0a;\n");
2011-10-03 03:52:14 -07:00
#endif
/* copy packet to pbuf structure going through linked list */
for (q = p, i = 0; q != NULL; q = q->next)
{
#if !USE_CACHE && !MMNIF_USE_MBP
2011-10-17 04:57:17 -07:00
memcpy_from_nc((uint8_t *) q->payload, packet + i, q->len);
#elif MMNIF_USE_MPB
2011-10-17 04:57:17 -07:00
memcpy_get((uint8_t *) q->payload, packet + i, q->len);
2011-10-03 03:52:14 -07:00
#else
2011-10-17 04:57:17 -07:00
memcpy((uint8_t *) q->payload, packet + i, q->len);
2011-10-03 03:52:14 -07:00
#endif
i += q->len;
}
*((int *)RCCE_fool_write_combine_buffer) = 1;
#if MMNIF_USE_MPB
asm volatile (".byte 0x0f; .byte 0x0a;\n");
#endif
/* indicate that the copy process is done and the packet can be freed
* note that we did not lock here because we are the only one editing this value
*/
mmnif->rx_buff->desc_table[rdesc].stat = MMNIF_STATUS_PROC;
/* everything is copied to a new buffer so it's save to release
* the old one for new incoming packets
*/
mmnif_rxbuff_free();
2011-10-03 23:59:52 -07:00
/*
* This function is called in the context of the tcpip thread.
* Therefore, we are able to call directly the input functions.
*/
if ((err = mmnif_dev->input(p, mmnif_dev)) != ERR_OK)
{
DEBUGPRINTF("mmnif_rx: IP input error\n");
pbuf_free(p);
}
/* gather some stats and leave the rx handler */
LINK_STATS_INC(link.xmit);
mmnif->stats.rx++;
mmnif->stats.rx_bytes += p->tot_len;
goto anotherpacket;
drop_packet:
2011-10-04 11:59:00 -07:00
/* TODO: error handling */
LINK_STATS_INC(link.drop);
mmnif->stats.rx_err++;
2011-10-20 03:45:51 -07:00
mmnif->check_in_progress = 0;
return;
2011-10-03 23:59:52 -07:00
out:
mmnif->check_in_progress = 0;
2011-10-20 03:45:51 -07:00
irq_nested_enable(flags);
return;
}
#if LWIP_SOCKET
/* mmnif_rx_bypass(): recieve packets
* with insane speed ;)
*/
static int mmnif_rx_bypass(struct netif *netif, int s, void *data, uint32_t len)
{
mmnif_t *mmnif = netif->state;
volatile mm_rx_buffer_t *b = mmnif->rx_buff;
uint16_t length = 0;
char *packet = NULL;
uint32_t i, j;
uint8_t rdesc = 0xFF;
/* check if this call to mmnif_rx makes any sense
*/
if (b->desc_table[b->dread].stat == MMNIF_STATUS_FREE) {
return -1;
}
/* search the packet whose transmission is finished
*/
for (i = 0, j = b->dread; i < MMNIF_MAX_DESCRIPTORS; i++)
{
2011-10-03 23:59:52 -07:00
if (b->desc_table[(j + i) % MMNIF_MAX_DESCRIPTORS].stat == MMNIF_STATUS_INPROC
&& b->desc_table[(j + i) % MMNIF_MAX_DESCRIPTORS].fast_sock != -1)
{
rdesc = (j + i) % MMNIF_MAX_DESCRIPTORS;
packet = (char *)b->desc_table[rdesc].addr;
length = b->desc_table[rdesc].len;
b->desc_table[rdesc].stat = MMNIF_STATUS_INPROC_BYPASS;
break;
}
}
/* if there is no packet finished we encountered a random error
*/
if (rdesc == 0xFF)
return -1;
/* If length is zero return silently
*/
if (length == 0)
{
DEBUGPRINTF("mmnif_rx(): empty packet error\n");
return -1;
}
/* From now on there is a real packet and it
* has to be worked on
*/
#ifdef DEBUG_MMNIF_PACKET
2011-10-03 03:52:14 -07:00
DEBUGPRINTF("\n RECIEVED - %p with legth: %d\n", packet, length);
hex_dump(length, packet);
#endif
if (BUILTIN_EXPECT(len < length, 0))
goto drop_packet;
#if USE_CACHE || MMNIF_USE_MPB
asm volatile (".byte 0x0f; .byte 0x0a;\n");
#endif
#if !USE_CACHE && !MMNIF_USE_MBP
memcpy_from_nc(data, (void*) mmnif->rx_buff->desc_table[rdesc].addr, mmnif->rx_buff->desc_table[rdesc].len);
#elif MMNIF_USE_MPB
memcpy_get(data, (void*) mmnif->rx_buff->desc_table[rdesc].addr, mmnif->rx_buff->desc_table[rdesc].len);
#else
memcpy(data, (void*) mmnif->rx_buff->desc_table[rdesc].addr, mmnif->rx_buff->desc_table[rdesc].len);
#endif
*((int *)RCCE_fool_write_combine_buffer) = 1;
#if MMNIF_USE_MPB
asm volatile (".byte 0x0f; .byte 0x0a;\n");
#endif
/* indicate that the copy process is done and the packet can be freed
* note that we did not lock here because we are the only one editing this value
*/
b->desc_table[rdesc].stat = MMNIF_STATUS_PROC;
/* everything is copied to a new buffer so it's save to release
* the old one for new incoming packets
*/
mmnif_rxbuff_free();
/* gather some stats and leave the rx handler */
LINK_STATS_INC(link.xmit);
mmnif->stats.rx++;
mmnif->stats.rx_bytes += length;
return length;
drop_packet:
LINK_STATS_INC(link.drop);
mmnif->stats.rx_err++;
return -1;
}
/* mmnif_recv(): replacement of lwip_recv
* for fast sockets
*/
int mmnif_recv(int s, void *data, uint32_t len, int flags)
{
2011-10-20 03:45:51 -07:00
mmnif_t* mmnif = (mmnif_t *) mmnif_dev->state;
bypass_rxdesc_t *p = mmnif_hashlookup(s);
2011-10-03 23:59:52 -07:00
if (p == 0)
return lwip_recv(s, data, len, flags);
2011-10-20 03:45:51 -07:00
if (sem_trywait(&p->sem) == 0)
return mmnif_rx_bypass(mmnif_dev, s, data, len);
2011-10-20 03:45:51 -07:00
uint32_t state = irq_nested_disable();
if (mmnif->check_in_progress) {
uint32_t i,j;
volatile mm_rx_buffer_t *b = mmnif->rx_buff;
bypass_rxdesc_t *bp;
uint8_t rdesc;
2011-10-03 23:59:52 -07:00
2011-10-20 03:45:51 -07:00
/* search the packet whose transmission is finished
*/
for (i = 0, j = b->dread; i < MMNIF_MAX_DESCRIPTORS; i++)
{
if (b->desc_table[(j + i) % MMNIF_MAX_DESCRIPTORS].stat == MMNIF_STATUS_RDY)
{
rdesc = (j + i) % MMNIF_MAX_DESCRIPTORS;
if (b->desc_table[(j + i) % MMNIF_MAX_DESCRIPTORS].fast_sock != -1) {
bp = mmnif_hashlookup(b->desc_table[rdesc].fast_sock);
if (bp) {
b->desc_table[rdesc].stat = MMNIF_STATUS_INPROC;
irq_nested_enable(state);
2011-10-20 08:46:27 -07:00
return mmnif_rx_bypass(mmnif_dev, s, data, len);
2011-10-20 03:45:51 -07:00
}
}
}
}
2011-10-20 03:45:51 -07:00
mmnif->check_in_progress = 0;
}
irq_nested_enable(state);
sem_wait(&p->sem, 0);
return mmnif_rx_bypass(mmnif_dev, s, data, len);
}
/* mmnif_socket(): replacement of lwip_socket for
* fast sockets
*/
int mmnif_socket(int domain, int type, int protocol)
{
int ret = 0;
if (domain == AF_MMNIF_NET)
{
spinlock_lock(&pseudolock);
ret = npseudosocket++;
spinlock_unlock(&pseudolock);
return ret;
}
return lwip_socket(domain, type, protocol);
}
/* mmnif_accept(): replacement of lwip_accept for
* fast sockets
*/
int mmnif_accept(int s, struct sockaddr *addr, socklen_t * addrlen)
{
struct sockaddr_in *client = (struct sockaddr_in*)addr;
volatile mm_rx_buffer_t *b = ((mmnif_t *) mmnif_dev->state)->rx_buff;
bypass_rxdesc_t *p;
int tmp1 = get_clock_tick();
int i, tmp2 = 0;
uint16_t port;
// TODO: Bug, not compatible with BSD sockets
port = client->sin_port;
if ((unsigned int)s >= MMNIF_PSEUDO_SOCKET_START)
{
for (i = 0; i < MMNIF_MAX_ACCEPTORS; i++)
{
2011-10-03 23:59:52 -07:00
if (b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat == MMNIF_ACC_STAT_CLOSED)
{
RCCE_acquire_lock(RC_MY_COREID);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].port = port;
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat = MMNIF_ACC_STAT_ACCEPTING;
spinlock_lock(&pseudolock);
mmnif_hashadd(npseudosocket, -1, 0);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].nsock = npseudosocket++;
spinlock_unlock(&pseudolock);
RCCE_release_lock(RC_MY_COREID);
2011-10-03 23:59:52 -07:00
while (b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat != MMNIF_ACC_STAT_ACCEPT_ME)
NOP8;
2011-10-03 23:59:52 -07:00
p = mmnif_hashlookup(b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].nsock);
p->dest_ip = b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].src_ip;
p->remote_socket = b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].rsock;
RCCE_acquire_lock(RC_MY_COREID);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat = MMNIF_ACC_STAT_ACCEPTED;
i = b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].nsock;
RCCE_release_lock(RC_MY_COREID);
2011-10-03 23:59:52 -07:00
return i;
}
}
return -1;
} else {
for (i = 0; i < MMNIF_MAX_ACCEPTORS; i++)
{
if (b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat == MMNIF_ACC_STAT_CLOSED)
{
RCCE_acquire_lock(RC_MY_COREID);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].port = port;
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat = MMNIF_ACC_STAT_ACCEPTING;
spinlock_lock(&pseudolock);
mmnif_hashadd(npseudosocket, -1, 0);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].nsock = npseudosocket++;
spinlock_unlock(&pseudolock);
RCCE_release_lock(RC_MY_COREID);
2011-10-03 23:59:52 -07:00
while (b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat != MMNIF_ACC_STAT_ACCEPT_ME)
{
tmp2 = get_clock_tick();
if (tmp2 - tmp1 > MMNIF_AUTO_SOCKET_TIMEOUT)
{
RCCE_acquire_lock(RC_MY_COREID);
if (b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat == MMNIF_ACC_STAT_ACCEPT_ME)
{
RCCE_acquire_lock(RC_MY_COREID);
break;
}
DEBUGPRINTF("mmnif_accept(): Timout occoured, switching to normal accept()");
mmnif_hashdelete(b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].nsock);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat = MMNIF_ACC_STAT_CLOSED;
RCCE_release_lock(RC_MY_COREID);
goto normalaccept;
}
NOP8;
}
2011-10-03 23:59:52 -07:00
p = mmnif_hashlookup(b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].nsock);
p->dest_ip = b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].src_ip;
p->remote_socket = b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].rsock;
RCCE_acquire_lock(RC_MY_COREID);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat = MMNIF_ACC_STAT_ACCEPTED;
i = b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].nsock;
RCCE_release_lock(RC_MY_COREID);
2011-10-03 23:59:52 -07:00
return i;
}
}
return -1;
}
normalaccept:
return lwip_accept(s, addr, addrlen);
}
/* mmnif_connect(): replacement of lwip_connect for
* fast sockets
*/
int mmnif_connect(int s, const struct sockaddr *name, socklen_t namelen)
{
struct sockaddr_in *p = (struct sockaddr_in*) name;
uint16_t port = p->sin_port;
volatile mm_rx_buffer_t *b;
int i;
//int tmp1 = get_clock_tick();
//int tmp2 = 0;
uint8_t core;
core = ip4_addr4(&p->sin_addr.s_addr);
if ((core) < 1 || (core > MMNIF_CORES))
return lwip_connect(s, name, namelen);
b = (volatile mm_rx_buffer_t *) ((char *)header_start_address + (core - 1) * header_size);
for (i = 0; i < MMNIF_MAX_ACCEPTORS; i++)
{
if (b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat == MMNIF_ACC_STAT_ACCEPTING
&& b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].port == port)
{
RCCE_acquire_lock(RC_COREID[core-1]);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat = MMNIF_ACC_STAT_ACCEPT_ME;
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].rsock = s;
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].src_ip = ip4_addr4(&mmnif_dev->ip_addr);
mmnif_hashadd(s, b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].nsock, core);
RCCE_release_lock(RC_COREID[core-1]);
while (b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat != MMNIF_ACC_STAT_ACCEPTED)
{
// tmp2 = get_clock_tick();
// if (tmp2 - tmp1 > MMNIF_AUTO_SOCKET_TIMEOUT)
// {
//#ifdef DEBUG_MMNIF
// DEBUGPRINTF("mmnif_connect(): Timout occoured, switching to normal connect()");
//#endif
// mmnif_hashdelete(s);
// goto normalsend;
// }
NOP8;
}
RCCE_acquire_lock(RC_COREID[core-1]);
b->acceptors[(i + port) % MMNIF_MAX_ACCEPTORS].stat = MMNIF_ACC_STAT_CLOSED;
RCCE_release_lock(RC_COREID[core-1]);
return 0;
}
}
DEBUGPRINTF("mmnif_connect(): no acceptor found");
return -1;
}
int mmnif_listen(int s, int backlog)
{
if ((unsigned int)s < MMNIF_PSEUDO_SOCKET_START)
return lwip_listen(s, backlog);
return 0;
}
int mmnif_bind(int s, const struct sockaddr *name, socklen_t namelen)
{
if ((unsigned int)s < MMNIF_PSEUDO_SOCKET_START)
return lwip_bind(s, name, namelen);
return 0;
}
int mmnif_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen)
{
if ((unsigned int)s < MMNIF_PSEUDO_SOCKET_START)
return lwip_setsockopt(s, level, optname, optval, optlen);
return 0;
}
/* mmnif_closesocket(): replacement if lwip_close for
* fast_sockets
*/
int mmnif_closesocket(int s)
{
bypass_rxdesc_t *p = mmnif_hashlookup(s);
if (p == 0)
return -1;
2011-10-03 23:59:52 -07:00
mmnif_hashdelete(s);
if ((unsigned int)s < MMNIF_PSEUDO_SOCKET_START)
return lwip_close(s);
2011-10-03 23:59:52 -07:00
return 0;
}
#endif
/* mmnif_irqhandler():
* handles the incomint interrupts
*/
static void mmnif_irqhandler(struct state* s)
{
static int z = -1;
int tmp;
2012-09-10 05:54:30 -07:00
#if !NO_SYS
2011-10-03 23:59:52 -07:00
mmnif_t *mmnif;
2012-09-10 05:54:30 -07:00
#endif
if (z < 0) // Find out who I am...
z = Z_PID(RC_MY_COREID);
/* return if mmnif_dev is not yet initialized */
if (!mmnif_dev)
{
2011-10-03 03:52:14 -07:00
DEBUGPRINTF("mmnif_irqhandler(): the driver is not initialized yet\n");
return;
}
#if NO_SYS
mmnif_rx((void*) mmnif_dev);
#else
2012-09-10 05:54:30 -07:00
mmnif = (mmnif_t *) mmnif_dev->state;
if (!mmnif->check_in_progress) {
if (tcpip_callback_with_block((tcpip_callback_fn) mmnif_rx, (void*) mmnif_dev, 0) == ERR_OK) {
2011-10-20 03:45:51 -07:00
mmnif->check_in_progress = 1;
2011-10-03 23:59:52 -07:00
} else {
DEBUGPRINTF("rckemacif_handler: unable to send a poll request to the tcpip thread\n");
}
}
2012-09-10 05:54:30 -07:00
#endif
tmp = ReadConfigReg(CRB_OWN + (z==0 ? GLCFG0 : GLCFG1));
tmp &= ~1;
SetConfigReg(CRB_OWN + (z==0 ? GLCFG0 : GLCFG1), tmp);
}
/*
* close the interface should be called by kernel to close this interface and release resources
* Note: it's temporarly empty. Support will be added.
*/
err_t mmnif_shutdown(void)
{
err_t err;
if (!mmnif_dev) {
DEBUGPRINTF("mmnif_shutdown(): you closed the device before it was properly initialized -.-* \n");
return ERR_MEM;
}
#if NO_SYS
netif_set_down(mmnif_dev);
err = ERR_OK;
#else
err = netifapi_netif_set_down(mmnif_dev);
#endif
//RCCE_shfree(mpb_start_address);
2011-10-03 23:59:52 -07:00
mmnif_dev = NULL;
return err;
}
2011-10-04 09:14:03 +02:00
#endif