first steps to support affinity on the touch

This commit is contained in:
Stefan Lankes 2011-11-16 03:12:09 -08:00
parent d9e0d932f0
commit cf76781baa
9 changed files with 177 additions and 57 deletions

View file

@ -42,7 +42,7 @@ extern bootinfo_t* bootinfo;
enum icc_mail_requests {
PING_REQ=1,
PING_RESP,
SVM_REQUEST,
SVM_REQ,
SVM_RESP,
NOISE,
};

View file

@ -43,6 +43,7 @@
#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
#define _PAGE_BIT_SVM_STRONG 9 /* mark a virtual address range as used by the SVM system */
#define _PAGE_BIT_SVM_LAZYRELEASE 10 /* mark a virtual address range as used by the SVM system */
#define _PAGE_BIT_SVM_INIT 11 /* mark if the MBP proxy is used */
/// Page is present
#define PG_PRESENT (1 << _PAGE_BIT_PRESENT)
@ -71,6 +72,8 @@
#define PG_SVM_STRONG (1 << _PAGE_BIT_SVM_STRONG)
/// This virtual address range is used by SVM system as marked
#define PG_SVM_LAZYRELEASE (1 << _PAGE_BIT_SVM_LAZYRELEASE)
/// Currently, no page frame is behind this page (only the MBP proxy)
#define PG_SVM_INIT (1 << _PAGE_BIT_SVM_INIT)
/// This is a whole set of flags (PRESENT,RW,ACCESSED,DIRTY) for kernelspace tables
#define KERN_TABLE (PG_PRESENT|PG_RW|PG_ACCESSED|PG_DIRTY)

View file

@ -21,6 +21,7 @@
#define __ARCH_SVM_H__
#include <metalsvm/stddef.h>
#include <asm/page.h>
#ifdef CONFIG_ROCKCREEK
#include <asm/RCCE_lib.h>
#endif
@ -71,6 +72,8 @@ int svm_barrier(uint32_t flags);
*/
int svm_access_request(size_t addr);
int svm_alloc_page(size_t addr, page_table_t* pgt);
/** @brief emit page to core ue
*
* @return

View file

@ -364,6 +364,9 @@ size_t map_region(size_t viraddr, size_t phyaddr, uint32_t npages, uint32_t flag
if (flags & MAP_SVM_LAZYRELEASE)
pgt->entries[index] |= PG_SVM_LAZYRELEASE|PG_PWT;
if (flags & MAP_SVM_INIT)
pgt->entries[index] |= PG_SVM_INIT;
if (flags & MAP_NO_ACCESS)
pgt->entries[index] &= ~PG_PRESENT;
@ -653,8 +656,13 @@ static void pagefault_handler(struct state *s)
pgt = (page_table_t*) ((KERNEL_SPACE - 1024*PAGE_SIZE + index1*PAGE_SIZE) & 0xFFFFF000);
if (!pgt || !(pgt->entries[index2]))
goto default_handler;
if (pgt->entries[index2] & PG_SVM_INIT)
if (BUILTIN_EXPECT(!svm_alloc_page(viraddr, pgt), 1))
return;
else
goto default_handler;
if (pgt->entries[index2] & PG_SVM_STRONG)
if (!svm_access_request(viraddr))
if (BUILTIN_EXPECT(!svm_access_request(viraddr), 1))
return;
#endif

View file

@ -26,6 +26,7 @@
#include <metalsvm/errno.h>
#include <asm/irqflags.h>
#include <asm/processor.h>
#include <asm/page.h>
#ifdef CONFIG_ROCKCREEK
#include <asm/RCCE.h>
#include <asm/RCCE_lib.h>
@ -39,6 +40,9 @@
#define AIREG1 0
#define AIREG2 (AIREG1 + 1)
#define LOCK_ID 0
#define ABS(a) (((a) < 0) ? -(a) : (a))
t_vcharp RC_SHM_BUFFER_START();
@ -89,7 +93,7 @@ static volatile uint8_t* page_owner = NULL;
// helper array to convert a physical to a virtual address
static size_t phys2virt[SHARED_PAGES] = {[0 ... SHARED_PAGES-1] = 0};
static size_t shmbegin = 0;
static const size_t shmbegin = SHM_ADDR;
static uint32_t emit[RCCE_MAXNP] = {[0 ... RCCE_MAXNP-1] = 0};
static uint32_t request[RCCE_MAXNP] = {[0 ... RCCE_MAXNP-1] = 0};
static uint32_t forward[RCCE_MAXNP] = {[0 ... RCCE_MAXNP-1] = 0};
@ -101,60 +105,136 @@ static uint64_t min_wait = (uint64_t) -1;
int svm_init(void)
{
size_t phyaddr;
uint32_t flags;
// iRCCE is not thread save => disable interrupts
flags = irq_nested_disable();
shmbegin = (size_t)RC_SHM_BUFFER_START();
phyaddr = (size_t) RCCE_shmalloc(OWNER_SIZE);
irq_nested_enable(flags);
if (BUILTIN_EXPECT(!phyaddr, 0))
return -ENOMEM;
if (BUILTIN_EXPECT(phyaddr & 0xFFF, 0)) {
kprintf("RCCE_shmalloc returns not a page aligned physiacl address: 0x%x\n", phyaddr);
return -ENOMEM;
}
uint32_t i, flags;
kprintf("Shared memory starts at the physical address 0x%x\n", shmbegin);
page_owner = (uint8_t*) map_region(0, phyaddr, OWNER_SIZE >> PAGE_SHIFT, MAP_KERNEL_SPACE|MAP_NO_CACHE);
if (BUILTIN_EXPECT(!page_owner, 0)) {
flags = irq_nested_disable();
RCCE_shfree((t_vcharp) phyaddr);
irq_nested_enable(flags);
return -ENOMEM;
}
// per default is no owner specified
if (!RCCE_IAM)
page_owner = (uint8_t*) map_region(0, shmbegin, OWNER_SIZE >> PAGE_SHIFT, MAP_KERNEL_SPACE|MAP_NO_CACHE);
if (BUILTIN_EXPECT(!page_owner, 0))
return -ENOMEM;
if (!RCCE_IAM) {
memset((void*)page_owner, 0xFF, OWNER_SIZE);
// owner vector is owned by core 0
for(i=0; i<(OWNER_SIZE >> PAGE_SHIFT); i++)
page_owner[i] = 0;
}
// initialize svm barrier
incregs = (volatile atomic_increg_t*) map_region(0, 0xF900E000, 2, MAP_KERNEL_SPACE|MAP_NO_CACHE);
if (BUILTIN_EXPECT(!incregs, 0)) {
flags = irq_nested_disable();
RCCE_shfree((t_vcharp) phyaddr);
irq_nested_enable(flags);
if (BUILTIN_EXPECT(!incregs, 0))
return -ENOMEM;
}
kprintf("Map atomic counters at 0x%x\n", incregs);
if (!RCCE_IAM) {
incregs[AIREG1].initializer = 0;
incregs[AIREG2].initializer = 0;
}
// iRCCE is not thread save => disable interrupts
flags = irq_nested_disable();
RCCE_flag_alloc(&release);
irq_nested_enable(flags);
/* INIT: yafbarrier */
incregs[AIREG1].initializer = 0;
incregs[AIREG2].initializer = 0;
RCCE_barrier(&RCCE_COMM_WORLD);
return 0;
}
static size_t get_shpage(void)
{
int x = X_PID(RC_MY_COREID);
int y = Y_PID(RC_MY_COREID);
size_t i, j, start = SHM_X0_Y0;
int diff, min = x + y;
diff = ABS(5 - x) + ABS(0 - y);
if (diff < min) {
min = diff;
start = SHM_X5_Y0;
}
diff = ABS(0 - x) + ABS(2 - y);
if (diff < min) {
min = diff;
start = SHM_X0_Y2;
}
diff = ABS(5 - x) + ABS(2 - y);
if (diff < min) {
min = diff;
start = SHM_X5_Y2;
}
for(i=0; i < SHARED_PAGES; i++) {
j = (((start - shmbegin) >> PAGE_SHIFT) + i) % SHARED_PAGES;
if (page_owner[j] >= RCCE_MAXNP) {
page_owner[j] = RCCE_IAM;
RCCE_release_lock(RC_COREID[LOCK_ID]);
return shmbegin + (j << PAGE_SHIFT);
}
}
return 0;
}
/*
* This function is called by the pagefault handler
* => the interrupt flags is already cleared
*/
int svm_alloc_page(size_t addr, page_table_t* pgt)
{
uint32_t index2 = (addr >> 12) & 0x3FF;
size_t phyaddr;
t_vcharp mpb = (t_vcharp) ((size_t)(virt_to_phys(addr) >> PAGE_SHIFT) | (size_t) RCCE_comm_buffer[RCCE_IAM]);
uint16_t offset = 0xFFFF;
RCCE_acquire_lock(RC_COREID[LOCK_ID]);
RCCE_get((t_vcharp) &offset, mpb, sizeof(uint16_t), RCCE_IAM);
if (!offset) {
int i;
phyaddr = get_shpage();
offset = (uint16_t) ((phyaddr - shmbegin) >> PAGE_SHIFT);
for(i=0; i<RCCE_NP; i++)
RCCE_put(mpb, (t_vcharp) &offset, sizeof(uint16_t), i);
RCCE_release_lock(RC_COREID[LOCK_ID]);
pgt->entries[index2] &= 0xFFF;
pgt->entries[index2] &= ~PG_SVM_INIT;
pgt->entries[index2] |= phyaddr|PG_PRESENT;
phys2virt[(phyaddr - shmbegin) >> PAGE_SHIFT] = addr;
tlb_flush_one_page(addr);
kprintf("map new page frame 0x%x at 0x%x, flags0x%x, offset 0x%x, mpb 0x%x\n", phyaddr, addr, pgt->entries[index2] & 0xFFF, (int) offset, mpb);
return 0;
} else {
RCCE_release_lock(RC_COREID[LOCK_ID]);
phyaddr = shmbegin + ((size_t)offset << PAGE_SHIFT);
pgt->entries[index2] &= 0xFFF;
pgt->entries[index2] &= ~PG_SVM_INIT;
if (pgt->entries[index2] & PG_SVM_LAZYRELEASE)
pgt->entries[index2] |= phyaddr|PG_PRESENT;
else
pgt->entries[index2] |= phyaddr;
phys2virt[(phyaddr - shmbegin) >> PAGE_SHIFT] = addr;
tlb_flush_one_page(addr);
kprintf("map existing page frame 0x%x at 0x%x, offset 0x%x, mpb 0x%x\n", phyaddr, addr, offset, mpb);
if (pgt->entries[index2] & PG_SVM_LAZYRELEASE)
return 0;
kprintf("send request to %d, 0x%x\n", (int) page_owner[(phyaddr - shmbegin) >> PAGE_SHIFT], (phyaddr - shmbegin) >> PAGE_SHIFT);
return svm_access_request(addr);
}
}
/*
* This function is called by the pagefault handler
* => the interrupt flags is already cleared
@ -168,12 +248,6 @@ int svm_access_request(size_t addr)
uint8_t payload[iRCCE_MAIL_HEADER_PAYLOAD];
int ret;
if (!phyaddr) {
kputs("Need new shared page\n");
while(1);
}
if (phyaddr < shmbegin)
return -EINVAL;
if (phyaddr >= shmbegin + RCCE_SHM_SIZE_MAX)
@ -188,7 +262,7 @@ int svm_access_request(size_t addr)
((size_t*) payload)[1] = phyaddr;
/* send ping request */
iRCCE_mail_send(2*sizeof(size_t), SVM_REQUEST, 0, (char*) payload, remote_rank);
iRCCE_mail_send(2*sizeof(size_t), SVM_REQ, 0, (char*) payload, remote_rank);
icc_send_gic_irq(remote_rank);
request[remote_rank]++;
@ -209,15 +283,16 @@ int svm_access_request(size_t addr)
return ret;
}
static atomic_int32_t size_counter = ATOMIC_INIT(0);
//static atomic_int32_t size_counter = ATOMIC_INIT(0);
void* svm_malloc(size_t size, uint32_t consistency)
{
size_t viraddr;
//size_t phyaddr, viraddr, i;
//uint32_t flags;
size_t viraddr, phyaddr, i, j;
t_vcharp mpb_addr;
uint32_t flags;
task_t* task = per_core(current_task);
uint32_t map_flags = MAP_KERNEL_SPACE;
uint32_t map_flags = MAP_KERNEL_SPACE|MAP_SVM_INIT;
uint8_t buffer[RCCE_LINE_SIZE]= {[0 ... RCCE_LINE_SIZE-1] = 0};
if( !(consistency & SVM_L2) )
map_flags |= MAP_MPE;
@ -311,10 +386,36 @@ void* svm_malloc(size_t size, uint32_t consistency)
map_flags |= MAP_NO_ACCESS;
viraddr = map_region(0, 0, size >> PAGE_SHIFT, map_flags);
kprintf("svmmalloc: viraddr 0x%x, size 0x%x\n", viraddr, size);
for(i=0, j=0, mpb_addr=0; i<size; i+=PAGE_SIZE, j++) {
if (j % (RCCE_LINE_SIZE/sizeof(uint16_t)) == 0) {
flags = irq_nested_disable();
mpb_addr = RCCE_malloc(RCCE_LINE_SIZE);
if (BUILTIN_EXPECT(!mpb_addr, 0)) {
irq_nested_enable(flags);
kputs("RCCE_malloc failed\n");
goto out;
}
//kprintf("mpb_addr 0x%x\n", mpb_addr);
RCCE_put(mpb_addr, buffer, RCCE_LINE_SIZE, RCCE_IAM);
irq_nested_enable(flags);
}
phyaddr = (size_t)mpb_addr + (j % (RCCE_LINE_SIZE/sizeof(uint16_t))) * sizeof(uint16_t);
phyaddr <<= PAGE_SHIFT;
//kprintf("phyaddr 0x%x\n", phyaddr);
map_region(viraddr+i, phyaddr, 1, map_flags|MAP_REMAP);
}
return (void*) viraddr;
out:
// TODO: error handling
return NULL;
}
void svm_free(void* addr, size_t size)
@ -376,7 +477,7 @@ int svm_emit_page(size_t phyaddr, int ue)
((size_t*) payload)[1] = phyaddr;
/* send ping request */
iRCCE_mail_send(2*sizeof(size_t), SVM_REQUEST, 0, (char*)payload, remote_rank);
iRCCE_mail_send(2*sizeof(size_t), SVM_REQ, 0, (char*)payload, remote_rank);
/* send interrupt */
icc_send_gic_irq(remote_rank);
@ -542,7 +643,7 @@ int svm_barrier(uint32_t flags)
svm_invalidate();
}
#if 0
#if 1
// Lubachevsky barrier with flags
index = !index;
if (incregs[AIREG1].counter > (comm->size - 2)) {

View file

@ -1,4 +1,4 @@
C_source := icc.c SCC_API.c iRCCE_admin.c iRCCE_send.c iRCCE_isend.c iRCCE_irecv.c iRCCE_recv.c iRCCE_get.c iRCCE_put.c iRCCE_synch.c iRCCE_mailbox.c RCCE_malloc.c RCCE_shmalloc.c RCCE_debug.c RCCE_qsort.c RCCE_DCMflush.c RCCE_send.c RCCE_recv.c RCCE_flags.c RCCE_comm.c RCCE_put.c RCCE_get.c RCCE_synch.c RCCE_bcast.c RCCE_admin.c # RCCE_power_management.c
C_source := icc.c SCC_API.c iRCCE_admin.c iRCCE_send.c iRCCE_isend.c iRCCE_irecv.c iRCCE_recv.c iRCCE_get.c iRCCE_put.c iRCCE_synch.c iRCCE_mailbox.c RCCE_malloc.c RCCE_debug.c RCCE_qsort.c RCCE_send.c RCCE_recv.c RCCE_flags.c RCCE_comm.c RCCE_put.c RCCE_get.c RCCE_synch.c RCCE_bcast.c RCCE_admin.c # RCCE_shmalloc.c RCCE_DCMflush.c RCCE_power_management.c
ASM_source :=
MODULE := arch_x86_scc

View file

@ -370,6 +370,9 @@ int RCCE_init(
// initialize RCCE_malloc
RCCE_malloc_init(RCCE_comm_buffer[RCCE_IAM],RCCE_BUFF_SIZE);
// MetalSVM has its own system to manage the shared regions
#if 0
#ifdef SHMADD
RCCE_shmalloc_init(RC_SHM_BUFFER_START()+RCCE_SHM_BUFFER_offset, RCCE_SHM_SIZE_MAX);
@ -379,6 +382,7 @@ int RCCE_init(
#endif
#else
RCCE_shmalloc_init(RC_SHM_BUFFER_START(), RCCE_SHM_SIZE_MAX);
#endif
#endif
// initialize the (global) flag bookkeeping data structure

View file

@ -97,7 +97,7 @@ static inline void icc_mail_check_tag(iRCCE_MAIL_HEADER* mail) {
case PING_REQ:
iRCCE_mail_send(0, PING_RESP, 0, NULL, mail->source);
break;
case SVM_REQUEST:
case SVM_REQ:
svm_emit_page(((size_t*) mail->payload)[1], ((size_t*) mail->payload)[0]);
break;
case SVM_RESP:

View file

@ -49,8 +49,9 @@ extern "C" {
#endif
#define MAP_SVM_STRONG (1 << 9)
#define MAP_SVM_LAZYRELEASE (1 << 10)
#define MAP_NO_ACCESS (1 << 11)
#define MAP_REMAP (1 << 12)
#define MAP_SVM_INIT (1 << 11)
#define MAP_NO_ACCESS (1 << 12)
#define MAP_REMAP (1 << 13)
void NORETURN abort(void);