1
0
Fork 0
mirror of https://github.com/hermitcore/libhermit.git synced 2025-03-09 00:00:03 +01:00

add debug messages, unmap pages after the failed search for the MP tables

This commit is contained in:
Stefan Lankes 2016-11-05 23:02:24 +01:00
parent d69895a766
commit 0f0df0e66e
2 changed files with 19 additions and 8 deletions

View file

@ -327,7 +327,7 @@ int apic_timer_is_running(void)
int apic_timer_deadline(uint32_t ticks)
{
if (BUILTIN_EXPECT(apic_is_enabled() && icr, 1)) {
//kprintf("timer oneshot %ld\n", t);
//kprintf("timer oneshot %ld at core %d\n", ticks, CORE_ID);
lapic_timer_oneshot();
lapic_timer_set_counter(ticks * icr);
@ -343,6 +343,7 @@ int apic_disable_timer(void)
if (BUILTIN_EXPECT(!apic_is_enabled(), 0))
return -EINVAL;
//kprintf("Disable local APIC timer at core %d\n", CORE_ID);
lapic_timer_disable();
return 0;
@ -351,6 +352,7 @@ int apic_disable_timer(void)
int apic_enable_timer(void)
{
if (BUILTIN_EXPECT(apic_is_enabled() && icr, 1)) {
//kprintf("Enable local APIC timer at core %d\n", CORE_ID);
lapic_timer_periodic();
lapic_timer_set_counter(icr);
@ -378,13 +380,15 @@ static apic_mp_t* search_mptable(size_t base, size_t limit) {
vptr = 0;
}
if (BUILTIN_EXPECT(!page_map(ptr & PAGE_MASK, ptr & PAGE_MASK, 1, flags), 1))
if (BUILTIN_EXPECT(!page_map(ptr & PAGE_MASK, ptr & PAGE_MASK, 1, flags), 1)) {
vptr = ptr & PAGE_MASK;
else
} else {
kprintf("Failed to map 0x%zx, which is required to search for the MP tables\n", ptr);
return NULL;
}
for(i=0; (vptr) && (i<PAGE_SIZE); i+=4, vptr+=4) {
tmp = (apic_mp_t*) vptr;
for(i=0; (vptr) && (i<PAGE_SIZE); i+=4) {
tmp = (apic_mp_t*) (vptr+i);
if (tmp->signature == MP_FLT_SIGNATURE) {
if (!((tmp->version > 4) || (tmp->features[0]))) {
vma_add(ptr & PAGE_MASK, (ptr & PAGE_MASK) + PAGE_SIZE, VMA_READ|VMA_WRITE);
@ -983,7 +987,7 @@ int ipi_tlb_flush(void)
static void apic_tlb_handler(struct state *s)
{
//kputs("Receive IPI to flush the TLB\n");
//kprintf("Receive IPI at core %d to flush the TLB\n", CORE_ID);
write_cr3(read_cr3());
}
#endif

View file

@ -122,6 +122,8 @@ int __page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits, uint8
long first[PAGE_LEVELS], last[PAGE_LEVELS];
int8_t send_ipi = 0;
//kprintf("Map %d pages at 0x%zx\n", npages, viraddr);
/* Calculate index boundaries for page map traversal */
for (lvl=0; lvl<PAGE_LEVELS; lvl++) {
first[lvl] = (vpn ) >> (lvl * PAGE_MAP_BITS);
@ -157,8 +159,10 @@ int __page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits, uint8
int8_t flush = 0;
/* do we have to flush the TLB? */
if (self[lvl][vpn] & PG_PRESENT)
if (self[lvl][vpn] & PG_PRESENT) {
//kprintf("Remap address 0x%zx at core %d\n", viraddr, CORE_ID);
send_ipi = flush = 1;
}
self[lvl][vpn] = phyaddr | bits | PG_PRESENT | PG_ACCESSED;
@ -168,6 +172,7 @@ int __page_map(size_t viraddr, size_t phyaddr, size_t npages, size_t bits, uint8
tlb_flush_one_page(vpn << PAGE_BITS, 0);
phyaddr += PAGE_SIZE;
//viraddr += PAGE_SIZE;
}
}
}
@ -187,6 +192,8 @@ int page_unmap(size_t viraddr, size_t npages)
if (BUILTIN_EXPECT(!npages, 0))
return 0;
//kprintf("Unmap %d pages at 0x%zx\n", npages, viraddr);
spinlock_irqsave_lock(&page_lock);
/* Start iterating through the entries.
@ -258,7 +265,7 @@ void page_fault_handler(struct state *s)
flags = PG_USER|PG_RW;
if (has_nx()) // set no execution flag to protect the heap
flags |= PG_XD;
ret = page_map(viraddr, phyaddr, 1, flags);
ret = __page_map(viraddr, phyaddr, 1, flags, 0);
if (BUILTIN_EXPECT(ret, 0)) {
kprintf("map_region: could not map %#lx to %#lx, task = %u\n", phyaddr, viraddr, task->id);