From 9d422ed700c302b9923b4b8b10ccbfc93d153c9e Mon Sep 17 00:00:00 2001 From: Stefan Lankes Date: Sun, 15 Jul 2012 05:57:39 -0700 Subject: [PATCH] define the function shmalloc to allocate a shared page caution, the function returns the physical address --- arch/x86/include/asm/svm.h | 11 +++++++++-- arch/x86/mm/svm.c | 34 +++++++++++++++++++++++++--------- 2 files changed, 34 insertions(+), 11 deletions(-) diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index eb06b9d7..f2e3060d 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -55,7 +55,7 @@ int svm_init(void); * * @return Pointer to the new memory range */ -void* svm_malloc(size_t sizei, uint32_t flags); +void* svm_malloc(size_t size, uint32_t flags); /** @brief Frees memory, which is managed by the SVM subsystem * @@ -72,6 +72,13 @@ int svm_barrier(uint32_t flags); */ int svm_access_request(size_t addr); +/** @brief Allocate n shared pages + * + * @param n number of requested pages + * @return physical address of the shared pages + */ +size_t shmalloc(uint32_t n); + int svm_alloc_page(size_t addr, page_table_t* pgt); /** @brief emit page to core ue @@ -93,7 +100,7 @@ static inline void svm_invalidate(void) void svm_invalidate(void); #endif -/* *brief flushs the cache for all SVM regions +/* @brief flushs the cache for all SVM regions */ #ifndef SVM_WB static inline void svm_flush(size_t unused) diff --git a/arch/x86/mm/svm.c b/arch/x86/mm/svm.c index 57dc1d49..68b62fbb 100644 --- a/arch/x86/mm/svm.c +++ b/arch/x86/mm/svm.c @@ -152,11 +152,11 @@ int svm_init(void) return 0; } -static size_t get_shpage(void) +static size_t get_shpages(uint32_t n) { int x = X_PID(RC_MY_COREID); int y = Y_PID(RC_MY_COREID); - size_t i, j, start = SHM_X0_Y0; + size_t i, j = 0, k = 0, start = SHM_X0_Y0; int diff, min = x + y; diff = ABS(5 - x) + ABS(0 - y); @@ -177,16 +177,32 @@ static size_t get_shpage(void) start = SHM_X5_Y2; } - for(i=0; i < SHARED_PAGES; i++) { + for(i=0; (i < SHARED_PAGES) && (k < n); i++) { + k = 0; j = (((start - shmbegin) >> PAGE_SHIFT) + i) % SHARED_PAGES; - if (page_owner[j] >= RCCE_MAXNP) { - page_owner[j] = RCCE_IAM; - RCCE_release_lock(RC_COREID[LOCK_ID]); - return shmbegin + (j << PAGE_SHIFT); + + while((k= RCCE_MAXNP)) { + k++; i++; } } - return 0; + if (BUILTIN_EXPECT(i >= SHARED_PAGES, 0)) + return 0; + + memset((void*) (page_owner+j), RCCE_IAM, sizeof(uint8_t)*n); + + return shmbegin + (j << PAGE_SHIFT); +} + +size_t shmalloc(uint32_t n) +{ + size_t ret; + + RCCE_acquire_lock(RC_COREID[LOCK_ID]); + ret = get_shpages(n); + RCCE_release_lock(RC_COREID[LOCK_ID]); + + return ret; } /* @@ -212,7 +228,7 @@ int svm_alloc_page(size_t addr, page_table_t* pgt) if (!offset) { int i; - phyaddr = get_shpage(); + phyaddr = get_shpages(1); offset = (uint16_t) ((phyaddr - shmbegin) >> PAGE_SHIFT); for(i=0; i