diff --git a/kernel/tasks.c b/kernel/tasks.c
index 6800e662..00e23098 100644
--- a/kernel/tasks.c
+++ b/kernel/tasks.c
@@ -789,6 +789,15 @@ int block_task(tid_t id)
 	return ret;
 }
 
+/*
+ * we use this struct to guarantee that the id
+ * has its own cache line
+ */
+typedef struct {
+	uint32_t	id __attribute__ ((aligned (CACHE_LINE)));
+	uint8_t		gap[CACHE_LINE-sizeof(uint32_t)];
+} last_id_t;
+
 /** @brief _The_ scheduler procedure
  *
  * Manages scheduling - right now this is just a round robin scheduler.
@@ -797,8 +806,9 @@ void scheduler(void)
 {
 	task_t* orig_task;
 	task_t* curr_task;
-	unsigned int i;
-	unsigned int new_id;
+	uint32_t i;
+	uint32_t new_id;
+	static last_id_t last_id = { 0 };
 
 #if MAX_CORES > 1
 	spinlock_irqsave_lock(&table_lock);
@@ -816,7 +826,7 @@ void scheduler(void)
 		curr_task->flags &= ~TASK_FPU_USED;
 	}
 
-	for(i=1, new_id=(curr_task->id + 1) % MAX_TASKS; 
+	for(i=1, new_id=(last_id.id + 1) % MAX_TASKS;
 		i<MAX_TASKS; i++, new_id=(new_id+1) % MAX_TASKS) 
 	{
 		if (task_table[new_id].status == TASK_READY) {
@@ -824,6 +834,7 @@ void scheduler(void)
 				curr_task->status = TASK_READY;
 			task_table[new_id].status = TASK_RUNNING;
 			curr_task = per_core(current_task) = task_table+new_id;
+			last_id.id = new_id;
 
 			goto get_task_out;
 		}