1
0
Fork 0
mirror of https://github.com/hermitcore/libhermit.git synced 2025-03-09 00:00:03 +01:00

revise GDT initialization, load segements after GDT update

This commit is contained in:
Stefan Lankes 2017-08-22 22:12:51 +02:00
parent 3500995762
commit d06c116211

View file

@ -1,4 +1,4 @@
// Copyright (c) 2017 Stefan Lankes, RWTH Aachen University
// Copyright (c) 2017 Stefan Lankes, RWTH Aachen University
//
// MIT License
//
@ -25,9 +25,15 @@
#![allow(private_no_mangle_fns)]
use consts::*;
use x86::dtables::*;
use spin;
use x86::dtables::{self, DescriptorTablePointer};
use x86::segmentation::{self, SegmentSelector};
use core::mem::size_of;
const GDT_NULL: usize = 0;
const GDT_KERNEL_CODE: usize = 1;
const GDT_KERNEL_DATA: usize = 2;
// This segment is a data segment
const GDT_FLAG_DATASEG: u8 = 0x02;
/// This segment is a code segment
@ -60,7 +66,7 @@ const GDT_FLAG_32_BIT: u8 = 0x40;
const GDT_FLAG_64_BIT: u8 = 0x20;
// a TSS descriptor is twice larger than a code/data descriptor
const GDT_ENTRIES: usize = (6+MAX_CORES*2);
const GDT_ENTRIES: usize = (7+MAX_CORES*2);
const MAX_IST: usize = 3;
// thread_local on a static mut, signals that the value of this static may
@ -69,6 +75,7 @@ static mut GDT: [GdtEntry; GDT_ENTRIES] = [GdtEntry::new(0, 0, 0, 0); GDT_ENTRIE
static mut GDTR: DescriptorTablePointer = DescriptorTablePointer { limit: 0, base: 0 };
static mut TSS_BUFFER: TssBuffer = TssBuffer::new();
static STACK_TABLE: [[IrqStack; MAX_IST]; MAX_CORES] = [[IrqStack::new(); MAX_IST]; MAX_CORES];
static GDT_INIT: spin::Once<()> = spin::Once::new();
extern "C" {
static boot_stack: [u8; MAX_CORES*KERNEL_STACK_SIZE];
@ -142,7 +149,7 @@ struct TssBuffer {
}
impl TssBuffer {
pub const fn new() -> TssBuffer {
const fn new() -> TssBuffer {
TssBuffer {
tss: [TaskStateSegment::new(); MAX_CORES],
}
@ -180,82 +187,84 @@ impl IrqStack {
#[no_mangle]
pub unsafe fn gdt_install()
{
let mut num: usize = 0;
GDT_INIT.call_once(|| {
let mut num: usize = 0;
GDTR.limit = (size_of::<GdtEntry>() * GDT.len() - 1) as u16;
GDTR.base = GDT.as_ptr() as u64;
GDTR.limit = (size_of::<GdtEntry>() * GDT.len() - 1) as u16;
GDTR.base = GDT.as_ptr() as u64;
/* Our NULL descriptor */
GDT[num] = GdtEntry::new(0, 0, 0, 0);
num += 1;
/* Our NULL descriptor */
GDT[num] = GdtEntry::new(0, 0, 0, 0);
num += 1;
/*
* The second entry is our Code Segment. The base address
* is 0, the limit is 4 GByte, it uses 4KByte granularity,
* and is a Code Segment descriptor.
*/
GDT[num] = GdtEntry::new(0, 0,
GDT_FLAG_RING0 | GDT_FLAG_SEGMENT | GDT_FLAG_CODESEG | GDT_FLAG_PRESENT, GDT_FLAG_64_BIT);
num += 1;
/*
* The second entry is our Code Segment. The base address
* is 0, the limit is 4 GByte, it uses 4KByte granularity,
* and is a Code Segment descriptor.
*/
GDT[num] = GdtEntry::new(0, 0,
GDT_FLAG_RING0 | GDT_FLAG_SEGMENT | GDT_FLAG_CODESEG | GDT_FLAG_PRESENT, GDT_FLAG_64_BIT);
num += 1;
/*
* The third entry is our Data Segment. It's EXACTLY the
* same as our code segment, but the descriptor type in
* this entry's access byte says it's a Data Segment
*/
GDT[num] = GdtEntry::new(0, 0,
GDT_FLAG_RING0 | GDT_FLAG_SEGMENT | GDT_FLAG_DATASEG | GDT_FLAG_PRESENT, 0);
num += 1;
/*
* The third entry is our Data Segment. It's EXACTLY the
* same as our code segment, but the descriptor type in
* this entry's access byte says it's a Data Segment
*/
GDT[num] = GdtEntry::new(0, 0,
GDT_FLAG_RING0 | GDT_FLAG_SEGMENT | GDT_FLAG_DATASEG | GDT_FLAG_PRESENT, GDT_FLAG_64_BIT);
num += 1;
/*
* Create code segment for 32bit user-space applications (ring 3)
*/
GDT[num] = GdtEntry::new(0, 0xFFFFFFFF,
GDT_FLAG_RING3 | GDT_FLAG_SEGMENT | GDT_FLAG_CODESEG | GDT_FLAG_PRESENT,
GDT_FLAG_32_BIT | GDT_FLAG_4K_GRAN);
num += 1;
/*
* Create code segment for 32bit user-space applications (ring 3)
*/
GDT[num] = GdtEntry::new(0, 0xFFFFFFFF,
GDT_FLAG_RING3 | GDT_FLAG_SEGMENT | GDT_FLAG_CODESEG | GDT_FLAG_PRESENT,
GDT_FLAG_32_BIT | GDT_FLAG_4K_GRAN);
num += 1;
/*
* Create data segment for 32bit user-space applications (ring 3)
*/
GDT[num] = GdtEntry::new(0, 0xFFFFFFFF,
GDT_FLAG_RING3 | GDT_FLAG_SEGMENT | GDT_FLAG_DATASEG | GDT_FLAG_PRESENT,
GDT_FLAG_32_BIT | GDT_FLAG_4K_GRAN);
num += 1;
/*
* Create data segment for 32bit user-space applications (ring 3)
*/
GDT[num] = GdtEntry::new(0, 0xFFFFFFFF,
GDT_FLAG_RING3 | GDT_FLAG_SEGMENT | GDT_FLAG_DATASEG | GDT_FLAG_PRESENT,
GDT_FLAG_32_BIT | GDT_FLAG_4K_GRAN);
num += 1;
/*
* Create code segment for 64bit user-space applications (ring 3)
*/
GDT[num] = GdtEntry::new(0, 0,
GDT_FLAG_RING3 | GDT_FLAG_SEGMENT | GDT_FLAG_CODESEG | GDT_FLAG_PRESENT,
GDT_FLAG_64_BIT);
num += 1;
/*
* Create code segment for 64bit user-space applications (ring 3)
*/
GDT[num] = GdtEntry::new(0, 0,
GDT_FLAG_RING3 | GDT_FLAG_SEGMENT | GDT_FLAG_CODESEG | GDT_FLAG_PRESENT,
GDT_FLAG_64_BIT);
num += 1;
/*
* Create data segment for 64bit user-space applications (ring 3)
*/
GDT[num] = GdtEntry::new(0, 0,
GDT_FLAG_RING3 | GDT_FLAG_SEGMENT | GDT_FLAG_DATASEG | GDT_FLAG_PRESENT, 0);
num += 1;
/*
* Create data segment for 64bit user-space applications (ring 3)
*/
GDT[num] = GdtEntry::new(0, 0,
GDT_FLAG_RING3 | GDT_FLAG_SEGMENT | GDT_FLAG_DATASEG | GDT_FLAG_PRESENT, GDT_FLAG_64_BIT);
num += 1;
/*
* Create TSS for each core (we use these segments for task switching)
*/
for i in 0..MAX_CORES {
TSS_BUFFER.tss[i].rsp[0] = (&(boot_stack[0]) as *const _) as u64;
TSS_BUFFER.tss[i].rsp[0] += ((i+1) * KERNEL_STACK_SIZE - 0x10) as u64;
TSS_BUFFER.tss[i].ist[0] = 0; // ist will created per task
TSS_BUFFER.tss[i].ist[1] = (&(STACK_TABLE[i][2 /*IST number */ - 2]) as *const _) as u64;
TSS_BUFFER.tss[i].ist[1] += (KERNEL_STACK_SIZE - 0x10) as u64;
TSS_BUFFER.tss[i].ist[2] = (&(STACK_TABLE[i][3 /*IST number */ - 2]) as *const _) as u64;
TSS_BUFFER.tss[i].ist[2] += (KERNEL_STACK_SIZE - 0x10) as u64;
TSS_BUFFER.tss[i].ist[3] = (&(STACK_TABLE[i][4 /*IST number */ - 2]) as *const _) as u64;
TSS_BUFFER.tss[i].ist[3] += (KERNEL_STACK_SIZE - 0x10) as u64;
/*
* Create TSS for each core (we use these segments for task switching)
*/
for i in 0..MAX_CORES {
TSS_BUFFER.tss[i].rsp[0] = (&(boot_stack[0]) as *const _) as u64;
TSS_BUFFER.tss[i].rsp[0] += ((i+1) * KERNEL_STACK_SIZE - 0x10) as u64;
TSS_BUFFER.tss[i].ist[0] = 0; // ist will created per task
TSS_BUFFER.tss[i].ist[1] = (&(STACK_TABLE[i][2 /*IST number */ - 2]) as *const _) as u64;
TSS_BUFFER.tss[i].ist[1] += (KERNEL_STACK_SIZE - 0x10) as u64;
TSS_BUFFER.tss[i].ist[2] = (&(STACK_TABLE[i][3 /*IST number */ - 2]) as *const _) as u64;
TSS_BUFFER.tss[i].ist[2] += (KERNEL_STACK_SIZE - 0x10) as u64;
TSS_BUFFER.tss[i].ist[3] = (&(STACK_TABLE[i][4 /*IST number */ - 2]) as *const _) as u64;
TSS_BUFFER.tss[i].ist[3] += (KERNEL_STACK_SIZE - 0x10) as u64;
let tss_ptr = &(TSS_BUFFER.tss[i]) as *const TaskStateSegment;
GDT[num+i*2] = GdtEntry::new(tss_ptr as u32, size_of::<TaskStateSegment>() as u32,
GDT_FLAG_PRESENT | GDT_FLAG_TSS | GDT_FLAG_RING0, 0);
}
let tss_ptr = &(TSS_BUFFER.tss[i]) as *const TaskStateSegment;
GDT[num+i*2] = GdtEntry::new(tss_ptr as u32, size_of::<TaskStateSegment>() as u32,
GDT_FLAG_PRESENT | GDT_FLAG_TSS | GDT_FLAG_RING0, 0);
}
});
gdt_flush();
}
@ -270,5 +279,13 @@ pub unsafe fn set_tss(rsp: u64, ist: u64)
#[no_mangle]
pub unsafe fn gdt_flush()
{
lgdt(&GDTR);
dtables::lgdt(&GDTR);
// Reload the segment descriptors
segmentation::load_cs(SegmentSelector::new(GDT_KERNEL_CODE as u16));
segmentation::load_ds(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_es(SegmentSelector::new(GDT_KERNEL_DATA as u16));
segmentation::load_ss(SegmentSelector::new(GDT_KERNEL_DATA as u16));
//segmentation::load_fs(SegmentSelector::new(GDT_KERNEL_DATA as u16));
//segmentation::load_gs(SegmentSelector::new(GDT_KERNEL_DATA as u16));
}