per default the kernel enables L1$ for RX buffers

This commit is contained in:
Stefan Lankes 2012-09-12 00:41:15 -07:00
parent 859c11e399
commit 0c61f5dee1
1 changed files with 21 additions and 19 deletions

View File

@ -50,8 +50,6 @@
#include <asm/irq.h>
#include <asm/RCCE.h>
#include <asm/RCCE_lib.h>
#include <asm/iRCCE.h>
#include <asm/iRCCE_lib.h>
#include <asm/SCC_API.h>
#include <asm/scc_memcpy.h>
@ -68,7 +66,7 @@
//#define DEBUG_MMNIF_PACKET
// set to 1 if you want to enable the L1 cache for the receive buffers
#define USE_CACHE 0
#define USE_CACHE 1
// set to 1 if you want to use the message passing buffer
#define MMNIF_USE_MPB 0
@ -865,7 +863,7 @@ static err_t mmnif_link_layer(struct netif *netif, struct pbuf *q, ip_addr_t * i
err_t mmnif_init(struct netif *netif)
{
mmnif_t *mmnif;
uint32_t i;
uint32_t i, flags;
int num = 0;
//int tmp1, tmp2, n;
@ -885,23 +883,25 @@ err_t mmnif_init(struct netif *netif)
/* Alloc and clear shared memory for rx_buff
*/
header_size = (sizeof(mm_rx_buffer_t));
header_size = sizeof(mm_rx_buffer_t);
DEBUGPRINTF("mmnif_init() : size of mm_rx_buffer_t : %d\n", sizeof(mm_rx_buffer_t));
// align mpb size to the granularity of a page size
header_size = (header_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
#if 1
if (RCCE_IAM == 0)
header_start_address = (void*) shmalloc((MMNIF_CORES * header_size) >> PAGE_SHIFT);
if (!RCCE_IAM)
header_start_address = (char*) shmalloc((MMNIF_CORES * header_size) >> PAGE_SHIFT);
flags = irq_nested_disable();
RCCE_bcast((char*) &header_start_address, sizeof(header_start_address), 0, RCCE_COMM_WORLD);
irq_nested_enable(flags);
DEBUGPRINTF("shmalloc : %p (size %u)\n", header_start_address, MMNIF_CORES * header_size);
#else
header_start_address = (void*) RCCE_shmalloc(header_size * MMNIF_CORES);
header_start_address = (char*) RCCE_shmalloc(header_size * MMNIF_CORES);
DEBUGPRINTF("RCCE_shmalloc : %p (size %u)\n", header_start_address, MMNIF_CORES * header_size);
#endif
// map physical address in the virtual address space
header_start_address = (void*) map_region(0, (size_t) header_start_address, (MMNIF_CORES * header_size) >> PAGE_SHIFT, MAP_KERNEL_SPACE | MAP_WT | MAP_NO_CACHE);
header_start_address = (char*) map_region(0, (size_t) header_start_address, (MMNIF_CORES * header_size) >> PAGE_SHIFT, MAP_KERNEL_SPACE | MAP_WT | MAP_NO_CACHE);
DEBUGPRINTF("map_region : %p\n", header_start_address);
mmnif->rx_buff = (mm_rx_buffer_t *) (header_start_address + header_size * RCCE_IAM);
@ -924,24 +924,26 @@ err_t mmnif_init(struct netif *netif)
// align size to the granularity of a page size
heap_size = (heap_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
#if 1
if (RCCE_IAM == 0)
heap_start_address = (void*) shmalloc((heap_size * MMNIF_CORES) >> PAGE_SHIFT);
if (!RCCE_IAM)
heap_start_address = (char*) shmalloc((heap_size * MMNIF_CORES) >> PAGE_SHIFT);
flags = irq_nested_disable();
RCCE_bcast((char*) &heap_start_address, sizeof(heap_start_address), 0, RCCE_COMM_WORLD);
irq_nested_enable(flags);
DEBUGPRINTF("shmalloc : %p (size %u)\n", heap_start_address, MMNIF_CORES * header_size);
#else
heap_start_address = (void*) RCCE_shmalloc(heap_size * MMNIF_CORES);
heap_start_address = (char*) RCCE_shmalloc(heap_size * MMNIF_CORES);
DEBUGPRINTF("RCCE_shmalloc : %p (size %u)\n", heap_start_address, MMNIF_CORES * header_size);
#endif
// map physical address in the virtual address space
#if USE_CACHE
uint32_t n = (uint32_t) heap_start_address;
heap_start_address = map_region(0, heap_start_address, (MMNIF_CORES * heap_size) >> PAGE_SHIFT, MAP_KERNEL_SPACE | MAP_NO_CACHE | MAP_MPE | MAP_WT);
map_region(heap_start_address + (heap_size) * RCCE_IAM, n + (heap_size) * RCCE_IAM, header_size >> PAGE_SHIFT, MAP_KERNEL_SPACE | MAP_MPE | MAP_WT | MAP_REMAP);
size_t n = (size_t) heap_start_address;
heap_start_address = (char*) map_region(0, (size_t) heap_start_address, (MMNIF_CORES * heap_size) >> PAGE_SHIFT, MAP_KERNEL_SPACE | MAP_NO_CACHE | MAP_MPE | MAP_WT);
map_region((size_t) (heap_start_address + (heap_size) * RCCE_IAM), n + (heap_size) * RCCE_IAM, header_size >> PAGE_SHIFT, MAP_KERNEL_SPACE | MAP_MPE | MAP_WT | MAP_REMAP);
#else
heap_start_address = (void*) map_region(0, (size_t) heap_start_address, (MMNIF_CORES * heap_size) >> PAGE_SHIFT, MAP_KERNEL_SPACE | MAP_NO_CACHE | MAP_MPE | MAP_WT);
#endif // USE_CACHE
#endif // MMNIF_USE_MPB
heap_start_address = (char*) map_region(0, (size_t) heap_start_address, (MMNIF_CORES * heap_size) >> PAGE_SHIFT, MAP_KERNEL_SPACE | MAP_NO_CACHE | MAP_MPE | MAP_WT);
#endif // USE_CACHE
#endif // MMNIF_USE_MPB
DEBUGPRINTF("map_region : %p\n", heap_start_address);
#if MMNIF_USE_MPB
@ -1589,7 +1591,7 @@ static void mmnif_irqhandler(struct state* s)
#else
mmnif = (mmnif_t *) mmnif_dev->state;
if (!mmnif->check_in_progress) {
if (tcpip_callback_with_block(mmnif_rx, (void*) mmnif_dev, 0) == ERR_OK) {
if (tcpip_callback_with_block((tcpip_callback_fn) mmnif_rx, (void*) mmnif_dev, 0) == ERR_OK) {
mmnif->check_in_progress = 1;
} else {
DEBUGPRINTF("rckemacif_handler: unable to send a poll request to the tcpip thread\n");