- redesign of the atomic operations

- now, we use a ticket lock



git-svn-id: http://svn.lfbs.rwth-aachen.de/svn/scc/trunk/MetalSVM@48 315a16e6-25f9-4109-90ae-ca3045a26c18
This commit is contained in:
stefan 2010-08-09 13:22:03 +00:00
parent 71188c92f9
commit 9754bffcc0
8 changed files with 61 additions and 70 deletions

View file

@ -26,57 +26,53 @@
extern "C" {
#endif
typedef uint32_t atomic_uint32_t;
#if MAX_CORES > 1
#define LOCK "lock ; "
#else
#define LOCK ""
#endif
#define ATOMIC_INIT(i) { (i) }
typedef struct { volatile int32_t counter; } atomic_int32_t;
/*
* Intel manuals: If a memory operand is referenced, the processor's locking
* protocol is automatically implemented for the duration of the exchange
* operation, regardless of the presence or absence of the LOCK prefix.
*/
inline static atomic_uint32_t atomic_uint32_test_and_set(atomic_uint32_t* d, uint32_t ret) {
asm volatile ("xchgl %0, %1" : "=r"(ret) : "m"(*d), "0"(ret) : "memory");
inline static int32_t atomic_int32_test_and_set(atomic_int32_t* d, int32_t ret) {
asm volatile ("xchgl %0, %1" : "=r"(ret) : "m"(d->counter), "0"(ret) : "memory");
return ret;
}
inline static atomic_uint32_t atomic_uint32_inc(atomic_uint32_t* d) {
asm volatile ("lock incl %0" : "+m"(*d));
return *d;
}
inline static atomic_uint32_t atomic_uint32_dec(atomic_uint32_t* d) {
asm volatile ("lock decl %0" : "+m"(*d));
return *d;
}
inline static atomic_uint32_t atomic_uint32_add(atomic_uint32_t *d, int32_t i)
inline static int32_t atomic_int32_add(atomic_int32_t *d, int32_t i)
{
asm volatile("lock addl %1, %0" : "+m" (*d) : "ir" (i));
return *d;
int32_t res = i;
asm volatile(LOCK "xaddl %0, %1" : "=r"(i) : "m"(d->counter), "0"(i));
return res+i;
}
inline static atomic_uint32_t atomic_uint32_sub(atomic_uint32_t *d, int32_t i)
inline static int32_t atomic_int32_sub(atomic_int32_t *d, int32_t i)
{
asm volatile("lock subl %1, %0" : "+m" (*d) : "ir" (i));
return *d;
return atomic_int32_add(d, -i);
}
inline static atomic_uint32_t atomic_uint32_read(atomic_uint32_t *d)
{
return *d;
inline static int32_t atomic_int32_inc(atomic_int32_t* d) {
return atomic_int32_add(d, 1);
}
inline static void atomic_uint32_set(atomic_uint32_t *d, uint32_t v)
{
*d = v;
inline static int32_t atomic_uint32_dec(atomic_int32_t* d) {
return atomic_int32_add(d, -1);
}
#define atomic_size_t atomic_uint32_t
#define atomic_size_read atomic_uint32_read
#define atomic_size_set atomic_uint32_set
#define atomic_size_inc atomic_uint32_inc
#define atomic_size_dec atomic_uint32_dec
#define atomic_size_add atomic_uint32_add
#define atomic_size_sub atomic_uint32_sub
inline static int32_t atomic_int32_read(atomic_int32_t *d) {
return d->counter;
}
inline static void atomic_int32_set(atomic_int32_t *d, int32_t v) {
d->counter = v;
}
#ifdef __cplusplus
}

View file

@ -28,9 +28,9 @@
extern "C" {
#endif
extern atomic_size_t total_memory;
extern atomic_size_t total_allocated_memory;
extern atomic_size_t total_available_memory;
extern atomic_int32_t total_pages;
extern atomic_int32_t total_allocated_pages;
extern atomic_int32_t total_available_pages;
int mmu_init(void);

View file

@ -30,26 +30,22 @@ extern "C" {
#endif
typedef struct {
atomic_uint32_t lock;
atomic_int32_t queue, dequeue;
} spinlock_t;
#define SPINLOCK_INIT { 0 }
#define SPINLOCK_INIT { ATOMIC_INIT(0), ATOMIC_INIT(1) }
inline static int spinlock_init(spinlock_t* s) {
if (BUILTIN_EXPECT(!s, 0))
return -1;
s->lock = 0;
atomic_int32_set(&s->queue, 0);
atomic_int32_set(&s->dequeue, 1);
return 0;
}
inline static int spinlock_destroy(spinlock_t* s) {
if (BUILTIN_EXPECT(!s, 0))
return -1;
s->lock = 0;
return 0;
}
@ -57,7 +53,8 @@ inline static int spinlock_lock(spinlock_t* s) {
if (BUILTIN_EXPECT(!s, 0))
return -1;
while(atomic_uint32_test_and_set(&s->lock, 1))
int32_t ticket = atomic_int32_inc(&s->queue);
while(atomic_int32_read(&s->dequeue) != ticket)
reschedule();
return 0;
}
@ -65,10 +62,8 @@ inline static int spinlock_lock(spinlock_t* s) {
inline static int spinlock_unlock(spinlock_t* s) {
if (BUILTIN_EXPECT(!s, 0))
return -1;
if (BUILTIN_EXPECT(!(s->lock), 0))
return -1;
s->lock = 0;
atomic_int32_inc(&s->dequeue);
return 0;
}

View file

@ -45,7 +45,7 @@ typedef struct {
size_t stack_size; /* only user level tasks
* are able to specify its stack size
*/
atomic_size_t mem_usage;
atomic_int32_t mem_usage; /* in number of pages */
struct mailbox_int32* mbox[MAX_TASKS];
uint32_t status;
} __attribute__((packed)) task_t;

View file

@ -207,9 +207,9 @@ int main(void)
detect_cpu_frequency();
kprintf("Processor frequency: %d MHz\n", get_cpu_frequency()/1000000);
kprintf("Total memory: %u MBytes\n", atomic_size_read(&total_memory)/(1024*1024));
kprintf("Current allocated memory: %u KBytes\n", atomic_size_read(&total_allocated_memory)/1024);
kprintf("Current available memory: %u MBytes\n", atomic_size_read(&total_available_memory)/(1024*1024));
kprintf("Total memory: %u MBytes\n", atomic_int32_read(&total_pages)/((1024*1024)/PAGE_SIZE));
kprintf("Current allocated memory: %u KBytes\n", atomic_int32_read(&total_allocated_pages)*(PAGE_SIZE/1024));
kprintf("Current available memory: %u MBytes\n", atomic_int32_read(&total_available_pages)/((1024*1024)/PAGE_SIZE));
#ifdef CONFIG_PCI
print_pci_adapters();
#endif

View file

@ -64,8 +64,8 @@ static void NORETURN do_exit(int arg) {
wakeup_blocked_tasks(arg);
if (current_task->ustack)
kfree(current_task->ustack, current_task->stack_size);
if (current_task->mem_usage)
kprintf("Memory leak! Task %d did not release %d bytes\n", current_task->id, current_task->mem_usage);
if (atomic_int32_read(&current_task->mem_usage))
kprintf("Memory leak! Task %d did not release %d bytes\n", current_task->id, atomic_int32_read(&current_task->mem_usage));
current_task->status = TASK_FINISHED;
reschedule();
@ -117,7 +117,7 @@ static int create_task(tid_t* id, entry_point_t ep, void* arg, size_t stack_size
for(i=0; i<MAX_TASKS; i++) {
if (task_table[i].status == TASK_INVALID) {
task_table[i].mem_usage = 0;
atomic_int32_set(&task_table[i].mem_usage, 0);
task_table[i].id = i;
if (user) {
task_table[i].ustack = create_stack(task_table+i, stack_size);

View file

@ -25,7 +25,7 @@
#include <asm/vga.h>
#endif
static atomic_uint32_t kmsg_counter = 0;
static atomic_int32_t kmsg_counter = ATOMIC_INIT(0);
static unsigned char kmessages[KMSG_SIZE];
int koutput_init(void)
@ -43,7 +43,7 @@ int kputchar(int c)
int pos;
int ret = 1;
pos = atomic_uint32_inc(&kmsg_counter);
pos = atomic_int32_inc(&kmsg_counter);
kmessages[pos % KMSG_SIZE] = c;
#ifdef CONFIG_VGA
@ -59,7 +59,7 @@ int kputs(const char *str)
int i;
for(i=0; str[i] != '\0'; i++) {
pos = atomic_uint32_inc(&kmsg_counter);
pos = atomic_int32_inc(&kmsg_counter);
kmessages[pos % KMSG_SIZE] = str[i];
}

View file

@ -33,9 +33,9 @@
static uint8_t bitmap[BITMAP_SIZE];
static spinlock_t bitmap_lock = SPINLOCK_INIT;
static size_t alloc_start;
atomic_size_t total_memory = 0;
atomic_size_t total_allocated_memory = 0;
atomic_size_t total_available_memory = 0;
atomic_int32_t total_pages = ATOMIC_INIT(0);
atomic_int32_t total_allocated_pages = ATOMIC_INIT(0);
atomic_int32_t total_available_pages = ATOMIC_INIT(0);
extern const void kernel_start;
extern const void kernel_end;
@ -97,8 +97,8 @@ int mmu_init(void)
while (addr < end_addr) {
page_clear_mark(addr / PAGE_SIZE);
addr += PAGE_SIZE;
atomic_size_add(&total_memory, PAGE_SIZE);
atomic_size_add(&total_available_memory, PAGE_SIZE);
atomic_int32_inc(&total_pages);
atomic_int32_inc(&total_available_pages);
}
}
mmap++;
@ -113,8 +113,8 @@ int mmu_init(void)
/* kernel is aligned to page size */
kernel_size = (size_t) &kernel_end - (size_t) &kernel_start;
atomic_size_set(&total_allocated_memory, kernel_size);
atomic_size_sub(&total_available_memory, kernel_size);
atomic_int32_set(&total_allocated_pages, kernel_size/PAGE_SIZE);
atomic_int32_sub(&total_available_pages, kernel_size/PAGE_SIZE);
/* set kernel space as used */
for(i=(size_t) &kernel_start / PAGE_SIZE; i < (size_t) &kernel_end / PAGE_SIZE; i++)
@ -171,10 +171,10 @@ next_try:
page_set_mark(l);
alloc_start = i+j;
atomic_size_add(&total_allocated_memory, npages*PAGE_SIZE);
atomic_size_sub(&total_available_memory, npages*PAGE_SIZE);
atomic_int32_add(&total_allocated_pages, npages*PAGE_SIZE);
atomic_int32_sub(&total_available_pages, npages*PAGE_SIZE);
if (task)
atomic_size_add(&(task->mem_usage), npages*PAGE_SIZE);
atomic_int32_add(&(task->mem_usage), npages*PAGE_SIZE);
oom:
spinlock_unlock(&bitmap_lock);
@ -211,10 +211,10 @@ static void task_free(task_t* task, void* addr, size_t sz)
for(i=index; i<index+npages; i++)
page_unmarked(i);
atomic_size_sub(&total_allocated_memory, npages*PAGE_SIZE);
atomic_size_add(&total_available_memory, npages*PAGE_SIZE);
atomic_int32_sub(&total_allocated_pages, npages);
atomic_int32_add(&total_available_pages, npages);
if (task)
atomic_size_sub(&(task->mem_usage), npages*PAGE_SIZE);
atomic_int32_sub(&(task->mem_usage), npages);
spinlock_unlock(&bitmap_lock);
}