metalsvm/arch/x86/mm/svm.c
2011-08-23 14:03:34 -07:00

293 lines
7.6 KiB
C

/*
* Copyright 2011 Stefan Lankes, Chair for Operating Systems,
* RWTH Aachen University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* This file is part of MetalSVM.
*/
#include <metalsvm/stddef.h>
#include <metalsvm/stdio.h>
#include <metalsvm/stdlib.h>
#include <metalsvm/mmu.h>
#include <metalsvm/page.h>
#include <metalsvm/errno.h>
#include <asm/irqflags.h>
#include <asm/processor.h>
#ifdef CONFIG_ROCKCREEK
#include <asm/RCCE.h>
#include <asm/RCCE_lib.h>
#include <asm/iRCCE.h>
#include <asm/SCC_API.h>
#include <asm/icc.h>
#include <asm/svm.h>
#define SHARED_PAGES (RCCE_SHM_SIZE_MAX >> PAGE_SHIFT)
#define OWNER_SIZE ((SHARED_PAGES * sizeof(uint8_t) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
t_vcharp RC_SHM_BUFFER_START();
/*
* This array describes the owner of a specific page.
* Only the owner of a page is able to change the possession.
* => No lock is needded.
*/
static volatile uint8_t* page_owner = NULL;
// helper array to convert a physical to a virtual address
static size_t phys2virt[SHARED_PAGES] = {[0 ... SHARED_PAGES-1] = 0};
static size_t shmbegin = 0;
static int my_ue = 0;
static uint32_t emit[RCCE_MAXNP] = {[0 ... RCCE_MAXNP-1] = 0};
static uint32_t request[RCCE_MAXNP] = {[0 ... RCCE_MAXNP-1] = 0};
static uint32_t forward[RCCE_MAXNP] = {[0 ... RCCE_MAXNP-1] = 0};
int svm_init(void)
{
size_t phyaddr;
uint32_t flags;
// iRCCE is not thread save => disable interrupts
flags = irq_nested_disable();
my_ue = RCCE_ue();
shmbegin = (size_t)RC_SHM_BUFFER_START();
phyaddr = (size_t) RCCE_shmalloc(OWNER_SIZE);
irq_nested_enable(flags);
if (BUILTIN_EXPECT(!phyaddr, 0))
return -ENOMEM;
if (BUILTIN_EXPECT(phyaddr & 0xFFF, 0)) {
kprintf("RCCE_shmalloc returns not a page aligned physiacl address: 0x%x\n", phyaddr);
return -ENOMEM;
}
kprintf("Shared memory starts at the physical address 0x%x\n", shmbegin);
page_owner = (uint8_t*) map_region(0, phyaddr, OWNER_SIZE >> PAGE_SHIFT, MAP_KERNEL_SPACE|MAP_NO_CACHE);
if (BUILTIN_EXPECT(!page_owner, 0)) {
flags = irq_nested_disable();
RCCE_shfree((t_vcharp) phyaddr);
irq_nested_enable(flags);
return -ENOMEM;
}
// per default is core 0 owner
if (!my_ue)
memset((void*)page_owner, 0x00, OWNER_SIZE);
// iRCCE is not thread save => disable interrupts
flags = irq_nested_disable();
RCCE_barrier(&RCCE_COMM_WORLD);
irq_nested_enable(flags);
return 0;
}
/*
* This function is called by the pagefault handler
* => the interrupt flags is already cleared
*/
int svm_access_request(size_t addr)
{
size_t phyaddr = virt_to_phys(addr);
uint32_t pageid;
int remote_rank;
uint8_t payload[iRCCE_MAIL_HEADER_PAYLOAD];
if (phyaddr < shmbegin)
return -EINVAL;
if (phyaddr >= shmbegin + RCCE_SHM_SIZE_MAX)
return -EINVAL;
pageid = (phyaddr-shmbegin) >> PAGE_SHIFT;
//svm_flush();
if (page_owner[pageid] == my_ue)
return 0;
remote_rank = page_owner[pageid];
((size_t*) payload)[0] = my_ue;
((size_t*) payload)[1] = phyaddr;
//kprintf("send access request to %d of 0x%x\n", remote_rank, phyaddr);
/* send ping request */
iRCCE_mail_send(2*sizeof(size_t), ICC_TAG_SVMREQUEST, 0, payload, remote_rank);
request[remote_rank]++;
NOP8;
icc_send_irq(remote_rank);
/* check for incoming messages */
icc_mail_check();
while (page_owner[pageid] != my_ue) {
NOP4;
}
return change_page_permissions(addr, addr+PAGE_SIZE, VMA_READ|VMA_WRITE|VMA_CACHEABLE);
}
void* svmmalloc(size_t size, uint32_t consistency)
{
size_t phyaddr, viraddr, i;
uint32_t flags;
uint32_t map_flags = MAP_KERNEL_SPACE|MAP_MPE;
if (consistency & SVM_STRONG)
map_flags |= MAP_SVM_STRONG;
else if (consistency & SVM_LAZYRELEASE)
map_flags |= MAP_SVM_LAZYRELEASE;
else return 0;
// currently, we allocate memory in page size granulation
size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
// iRCCE is not thread save => disable interrupts
flags = irq_nested_disable();
phyaddr = (size_t) RCCE_shmalloc(size);
if (RCCE_ue() && (consistency & SVM_STRONG))
map_flags |= MAP_NO_ACCESS;
irq_nested_enable(flags);
if (BUILTIN_EXPECT(!phyaddr, 0))
return NULL;
if (BUILTIN_EXPECT(phyaddr & 0xFFF, 0)) {
kprintf("RCCE_shmalloc returns not a page aligned physiacl address: 0x%x\n", phyaddr);
return NULL;
}
viraddr = map_region(0, phyaddr, size >> PAGE_SHIFT, map_flags);
for(i=0; i<size; i+=PAGE_SIZE)
phys2virt[(phyaddr + i - shmbegin) >> PAGE_SHIFT] = viraddr + i;
kprintf("svmmalloc: phyaddr 0x%x, viraddr 0x%x, size 0x%x\n", phyaddr, viraddr, size);
return (void*) viraddr;
}
void svmfree(void* addr, size_t size)
{
size_t phyaddr, i;
uint32_t flags;
if (BUILTIN_EXPECT(!addr || !size, 0))
return;
phyaddr = virt_to_phys((size_t) addr);
// currently, we allocate memory in page size granulation
size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
kprintf("svmfree: phyaddr 0x%x, viraddr 0x%x, size 0x%x\n", phyaddr, addr, size);
unmap_region((size_t) addr, size >> PAGE_SHIFT);
for(i=0; i<size; i+=PAGE_SIZE)
phys2virt[(phyaddr + i - shmbegin) >> PAGE_SHIFT] = 0;
// iRCCE is not thread save => disable interrupts
flags = irq_nested_disable();
RCCE_shfree((t_vcharp) phyaddr);
irq_nested_enable(flags);
}
/*
* This function is called by icc_mail_check.
* => Interrupt flag is alread cleared.
*/
int svm_emit_page(size_t phyaddr, int ue)
{
uint32_t pageid;
//kprintf("Try to emit page 0x%x to %d\n", phyaddr, ue);
if (phyaddr < shmbegin)
return -EINVAL;
if (phyaddr >= shmbegin + RCCE_SHM_SIZE_MAX)
return -EINVAL;
pageid = (phyaddr-shmbegin) >> PAGE_SHIFT;
if (page_owner[pageid] != my_ue) {
// Core is nor owner => forward request to new owner
int remote_rank;
uint8_t payload[iRCCE_MAIL_HEADER_PAYLOAD];
kprintf("Ups, core %d is not owner of page 0x%x\n", my_ue, phyaddr);
remote_rank = page_owner[pageid];
((size_t*) payload)[0] = ue;
((size_t*) payload)[1] = phyaddr;
/* send ping request */
iRCCE_mail_send(2*sizeof(size_t), ICC_TAG_SVMREQUEST, 0, payload, remote_rank);
NOP8;
icc_send_irq(remote_rank);
forward[remote_rank]++;
} else {
size_t viraddr;
svm_flush();
page_owner[pageid] = ue;
emit[ue]++;
viraddr = phys2virt[(phyaddr - shmbegin) >> PAGE_SHIFT];
change_page_permissions(viraddr, viraddr+PAGE_SIZE, VMA_NOACCESS|VMA_READ|VMA_CACHEABLE);
}
return 0;
}
#ifdef SVM_WB
void svm_flush(void)
{
int z, tmp;
// need to write to another line to make sure the write combine buffer gets flushed
*(int *)RCCE_fool_write_combine_buffer = 1;
flush_cache();
#if 0
// try to flush L2 cache
z = Z_PID(RC_COREID[my_ue]);
tmp=ReadConfigReg(CRB_OWN + (z==0 ? GLCFG0 : GLCFG1));
tmp &= ~(1 << GLCFG_XFLSHNN_BIT);
SetConfigReg(CRB_OWN + (z==0 ? GLCFG0 : GLCFG1), tmp);
while(!(ReadConfigReg(CRB_OWN + (z==0 ? GLCFG0 : GLCFG1)) & (1 << GLCFG_XFLSHNN_BIT))) {
NOP8;
}
#endif
}
#endif
int svm_statistics(void)
{
uint32_t i;
kprintf("emit\t:");
for(i=0; i<RCCE_MAXNP; i++)
kprintf("\t%u", emit[i]);
kprintf("\nrequest\t:");
for(i=0; i<RCCE_MAXNP; i++)
kprintf("\t%u", request[i]);
kprintf("\nforward\t:");
for(i=0; i<RCCE_MAXNP; i++)
kprintf("\t%u", forward[i]);
kputs("\n");
return 0;
}
#endif