/tmp/sos-code-article3/hwcore/paging.c (1970-01-01 01:00:00.000000000 +0100
) |
|
../sos-code-article4/hwcore/paging.c (2004-08-27 12:14:18.000000000 +0200
) |
|
|
|
| /* Copyright (C) 2004 David Decotigny |
| |
| This program is free software; you can redistribute it and/or |
| modify it under the terms of the GNU General Public License |
| as published by the Free Software Foundation; either version 2 |
| of the License, or (at your option) any later version. |
| |
| This program is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with this program; if not, write to the Free Software |
| Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, |
| USA. |
| */ |
| #include <sos/physmem.h> |
| #include <sos/klibc.h> |
| #include <sos/assert.h> |
| |
| #include "paging.h" |
| |
| /** The structure of a page directory entry. See Intel vol 3 section |
| 3.6.4 */ |
| struct x86_pde |
| { |
| sos_ui32_t present :1; /* 1=PT mapped */ |
| sos_ui32_t write :1; /* 0=read-only, 1=read/write */ |
| sos_ui32_t user :1; /* 0=supervisor, 1=user */ |
| sos_ui32_t write_through :1; /* 0=write-back, 1=write-through */ |
| sos_ui32_t cache_disabled :1; /* 1=cache disabled */ |
| sos_ui32_t accessed :1; /* 1=read/write access since last clear */ |
| sos_ui32_t zero :1; /* Intel reserved */ |
| sos_ui32_t page_size :1; /* 0=4kB, 1=4MB or 2MB (depending on PAE) */ |
| sos_ui32_t global_page :1; /* Ignored (Intel reserved) */ |
| sos_ui32_t custom :3; /* Do what you want with them */ |
| sos_ui32_t pt_paddr :20; |
| } __attribute__ ((packed)); |
| |
| |
| /** The structure of a page table entry. See Intel vol 3 section |
| 3.6.4 */ |
| struct x86_pte |
| { |
| sos_ui32_t present :1; /* 1=PT mapped */ |
| sos_ui32_t write :1; /* 0=read-only, 1=read/write */ |
| sos_ui32_t user :1; /* 0=supervisor, 1=user */ |
| sos_ui32_t write_through :1; /* 0=write-back, 1=write-through */ |
| sos_ui32_t cache_disabled :1; /* 1=cache disabled */ |
| sos_ui32_t accessed :1; /* 1=read/write access since last clear */ |
| sos_ui32_t dirty :1; /* 1=write access since last clear */ |
| sos_ui32_t zero :1; /* Intel reserved */ |
| sos_ui32_t global_page :1; /* 1=No TLB invalidation upon cr3 switch |
| (when PG set in cr4) */ |
| sos_ui32_t custom :3; /* Do what you want with them */ |
| sos_ui32_t paddr :20; |
| } __attribute__ ((packed)); |
| |
| |
| /** Structure of the x86 CR3 register: the Page Directory Base |
| Register. See Intel x86 doc Vol 3 section 2.5 */ |
| struct x86_pdbr |
| { |
| sos_ui32_t zero1 :3; /* Intel reserved */ |
| sos_ui32_t write_through :1; /* 0=write-back, 1=write-through */ |
| sos_ui32_t cache_disabled :1; /* 1=cache disabled */ |
| sos_ui32_t zero2 :7; /* Intel reserved */ |
| sos_ui32_t pd_paddr :20; |
| } __attribute__ ((packed)); |
| |
| |
| /** |
| * Helper macro to control the MMU: invalidate the TLB entry for the |
| * page located at the given virtual address. See Intel x86 vol 3 |
| * section 3.7. |
| */ |
| #define invlpg(vaddr) \ |
| do { \ |
| __asm__ __volatile__("invlpg %0"::"m"(*((unsigned *)(vaddr)))); \ |
| } while(0) |
| |
| |
| /** |
| * Helper macro to control the MMU: invalidate the whole TLB. See |
| * Intel x86 vol 3 section 3.7. |
| */ |
| #define flush_tlb() \ |
| do { \ |
| unsigned long tmpreg; \ |
| asm volatile("movl %%cr3,%0\n\tmovl %0,%%cr3" :"=r" \ |
| (tmpreg) : :"memory"); \ |
| } while (0) |
| |
| |
| /** |
| * Helper macro to compute the index in the PD for the given virtual |
| * address |
| */ |
| #define virt_to_pd_index(vaddr) \ |
| (((unsigned)(vaddr)) >> 22) |
| |
| |
| /** |
| * Helper macro to compute the index in the PT for the given virtual |
| * address |
| */ |
| #define virt_to_pt_index(vaddr) \ |
| ( (((unsigned)(vaddr)) >> 12) & 0x3ff ) |
| |
| |
| /** |
| * Helper macro to compute the offset in the page for the given virtual |
| * address |
| */ |
| #define virt_to_page_offset(vaddr) \ |
| (((unsigned)(vaddr)) & SOS_PAGE_MASK) |
| |
| |
| /** |
| * Helper function to map a page in the pd.\ Suppose that the RAM |
| * is identity mapped to resolve PT actual (CPU) address from the PD |
| * entry |
| */ |
| static sos_ret_t paging_setup_map_helper(struct x86_pde * pd, |
| sos_paddr_t ppage, |
| sos_vaddr_t vaddr) |
| { |
| /* Get the page directory entry and table entry index for this |
| address */ |
| unsigned index_in_pd = virt_to_pd_index(vaddr); |
| unsigned index_in_pt = virt_to_pt_index(vaddr); |
| |
| /* Make sure the page table was mapped */ |
| struct x86_pte * pt; |
| if (pd[index_in_pd].present) |
| { |
| pt = (struct x86_pte*) (pd[index_in_pd].pt_paddr << 12); |
| |
| /* If we allocate a new entry in the PT, increase its reference |
| count. This test will always be TRUE here, since the setup |
| routine scans the kernel pages in a strictly increasing |
| order: at each step, the map will result in the allocation of |
| a new PT entry. For the sake of clarity, we keep the test |
| here. */ |
| if (! pt[index_in_pt].present) |
| sos_physmem_ref_physpage_at((sos_paddr_t)pt); |
| |
| /* The previous test should always be TRUE */ |
| else |
| SOS_ASSERT_FATAL(FALSE); /* indicate a fatal error */ |
| } |
| else |
| { |
| /* No : allocate a new one */ |
| pt = (struct x86_pte*) sos_physmem_ref_physpage_new(FALSE); |
| if (! pt) |
| return -SOS_ENOMEM; |
| |
| memset((void*)pt, 0x0, SOS_PAGE_SIZE); |
| |
| pd[index_in_pd].present = TRUE; |
| pd[index_in_pd].write = 1; /* It would be too complicated to |
| determine whether it |
| corresponds to a real R/W area |
| of the kernel code/data or |
| read-only */ |
| pd[index_in_pd].pt_paddr = ((sos_paddr_t)pt) >> 12; |
| } |
| |
| |
| /* Map the page in the page table */ |
| pt[index_in_pt].present = 1; |
| pt[index_in_pt].write = 1; /* It would be too complicated to |
| determine whether it corresponds to |
| a real R/W area of the kernel |
| code/data or R/O only */ |
| pt[index_in_pt].user = 0; |
| pt[index_in_pt].paddr = ppage >> 12; |
| |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t sos_paging_setup(sos_paddr_t identity_mapping_base, |
| sos_paddr_t identity_mapping_top) |
| { |
| /* The PDBR we will setup below */ |
| struct x86_pdbr cr3; |
| |
| /* Get the PD for the kernel */ |
| struct x86_pde * pd |
| = (struct x86_pde*) sos_physmem_ref_physpage_new(FALSE); |
| |
| /* The iterator for scanning the kernel area */ |
| sos_paddr_t paddr; |
| |
| /* Reset the PD. For the moment, there is still an IM for the whole |
| RAM, so that the paddr are also vaddr */ |
| memset((void*)pd, |
| 0x0, |
| SOS_PAGE_SIZE); |
| |
| /* Identity-map the identity_mapping_* area */ |
| for (paddr = identity_mapping_base ; |
| paddr < identity_mapping_top ; |
| paddr += SOS_PAGE_SIZE) |
| { |
| if (paging_setup_map_helper(pd, paddr, paddr)) |
| return -SOS_ENOMEM; |
| } |
| |
| /* Identity-map the PC-specific BIOS/Video area */ |
| for (paddr = BIOS_N_VIDEO_START ; |
| paddr < BIOS_N_VIDEO_END ; |
| paddr += SOS_PAGE_SIZE) |
| { |
| if (paging_setup_map_helper(pd, paddr, paddr)) |
| return -SOS_ENOMEM; |
| } |
| |
| /* Ok, kernel is now identity mapped in the PD. We still have to set |
| up the mirroring */ |
| pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].present = TRUE; |
| pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].write = 1; |
| pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].user = 0; |
| pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].pt_paddr |
| = ((sos_paddr_t)pd)>>12; |
| |
| /* We now just have to configure the MMU to use our PD. See Intel |
| x86 doc vol 3, section 3.6.3 */ |
| memset(& cr3, 0x0, sizeof(struct x86_pdbr)); /* Reset the PDBR */ |
| cr3.pd_paddr = ((sos_paddr_t)pd) >> 12; |
| |
| /* Actual loading of the PDBR in the MMU: setup cr3 + bits 31[Paging |
| Enabled] and 16[Write Protect] of cr0, see Intel x86 doc vol 3, |
| sections 2.5, 3.6.1 and 4.11.3 + note table 4-2 */ |
| asm volatile ("movl %0,%%cr3\n\t" |
| "movl %%cr0,%%eax\n\t" |
| "orl $0x80010000, %%eax\n\t" /* bit 31 | bit 16 */ |
| "movl %%eax,%%cr0\n\t" |
| "jmp 1f\n\t" |
| "1:\n\t" |
| "movl $2f, %%eax\n\t" |
| "jmp *%%eax\n\t" |
| "2:\n\t" ::"r"(cr3):"memory","eax"); |
| |
| /* |
| * Here, the only memory available is: |
| * - The BIOS+video area |
| * - the identity_mapping_base .. identity_mapping_top area |
| * - the PD mirroring area (4M) |
| * All accesses to other virtual addresses will generate a #PF |
| */ |
| |
| return SOS_OK; |
| } |
| |
| |
| /* Suppose that the current address is configured with the mirroring |
| * enabled to access the PD and PT. */ |
| sos_ret_t sos_paging_map(sos_paddr_t ppage_paddr, |
| sos_vaddr_t vpage_vaddr, |
| sos_bool_t is_user_page, |
| int flags) |
| { |
| /* Get the page directory entry and table entry index for this |
| address */ |
| unsigned index_in_pd = virt_to_pd_index(vpage_vaddr); |
| unsigned index_in_pt = virt_to_pt_index(vpage_vaddr); |
| |
| /* Get the PD of the current context */ |
| struct x86_pde *pd = (struct x86_pde*) |
| (SOS_PAGING_MIRROR_VADDR |
| + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)); |
| |
| /* Address of the PT in the mirroring */ |
| struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR |
| + SOS_PAGE_SIZE*index_in_pd); |
| |
| /* The mapping of anywhere in the PD mirroring is FORBIDDEN ;) */ |
| if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR) |
| && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE)) |
| return -SOS_EINVAL; |
| |
| /* Map a page for the PT if necessary */ |
| if (! pd[index_in_pd].present) |
| { |
| /* No : allocate a new one */ |
| sos_paddr_t pt_ppage |
| = sos_physmem_ref_physpage_new(! (flags & SOS_VM_MAP_ATOMIC)); |
| if (! pt_ppage) |
| { |
| return -SOS_ENOMEM; |
| } |
| |
| pd[index_in_pd].present = TRUE; |
| pd[index_in_pd].write = 1; /* Ignored in supervisor mode, see |
| Intel vol 3 section 4.12 */ |
| pd[index_in_pd].user |= (is_user_page)?1:0; |
| pd[index_in_pd].pt_paddr = ((sos_paddr_t)pt_ppage) >> 12; |
| |
| /* |
| * The PT is now mapped in the PD mirroring |
| */ |
| |
| /* Invalidate TLB for the page we just added */ |
| invlpg(pt); |
| |
| /* Reset this new PT */ |
| memset((void*)pt, 0x0, SOS_PAGE_SIZE); |
| } |
| |
| /* If we allocate a new entry in the PT, increase its reference |
| count. */ |
| else if (! pt[index_in_pt].present) |
| sos_physmem_ref_physpage_at(pd[index_in_pd].pt_paddr << 12); |
| |
| /* Otherwise, that means that a physical page is implicitely |
| unmapped */ |
| else |
| sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12); |
| |
| /* Map the page in the page table */ |
| pt[index_in_pt].present = TRUE; |
| pt[index_in_pt].write = (flags & SOS_VM_MAP_PROT_WRITE)?1:0; |
| pt[index_in_pt].user = (is_user_page)?1:0; |
| pt[index_in_pt].paddr = ppage_paddr >> 12; |
| sos_physmem_ref_physpage_at(ppage_paddr); |
| |
| /* |
| * The page is now mapped in the current address space |
| */ |
| |
| /* Invalidate TLB for the page we just added */ |
| invlpg(vpage_vaddr); |
| |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_vaddr) |
| { |
| sos_ret_t pt_unref_retval; |
| |
| /* Get the page directory entry and table entry index for this |
| address */ |
| unsigned index_in_pd = virt_to_pd_index(vpage_vaddr); |
| unsigned index_in_pt = virt_to_pt_index(vpage_vaddr); |
| |
| /* Get the PD of the current context */ |
| struct x86_pde *pd = (struct x86_pde*) |
| (SOS_PAGING_MIRROR_VADDR |
| + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)); |
| |
| /* Address of the PT in the mirroring */ |
| struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR |
| + SOS_PAGE_SIZE*index_in_pd); |
| |
| /* No page mapped at this address ? */ |
| if (! pd[index_in_pd].present) |
| return -SOS_EINVAL; |
| if (! pt[index_in_pt].present) |
| return -SOS_EINVAL; |
| |
| /* The unmapping of anywhere in the PD mirroring is FORBIDDEN ;) */ |
| if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR) |
| && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE)) |
| return -SOS_EINVAL; |
| |
| /* Reclaim the physical page */ |
| sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12); |
| |
| /* Unmap the page in the page table */ |
| memset(pt + index_in_pt, 0x0, sizeof(struct x86_pte)); |
| |
| /* Invalidate TLB for the page we just unmapped */ |
| invlpg(vpage_vaddr); |
| |
| /* Reclaim this entry in the PT, which may free the PT */ |
| pt_unref_retval = sos_physmem_unref_physpage(pd[index_in_pd].pt_paddr << 12); |
| SOS_ASSERT_FATAL(pt_unref_retval >= 0); |
| if (pt_unref_retval > 0) |
| /* If the PT is now completely unused... */ |
| { |
| /* Release the PDE */ |
| memset(pd + index_in_pd, 0x0, sizeof(struct x86_pde)); |
| |
| /* Update the TLB */ |
| invlpg(pt); |
| } |
| |
| return SOS_OK; |
| } |
| |
| |
| int sos_paging_get_prot(sos_vaddr_t vaddr) |
| { |
| int retval; |
| |
| /* Get the page directory entry and table entry index for this |
| address */ |
| unsigned index_in_pd = virt_to_pd_index(vaddr); |
| unsigned index_in_pt = virt_to_pt_index(vaddr); |
| |
| /* Get the PD of the current context */ |
| struct x86_pde *pd = (struct x86_pde*) |
| (SOS_PAGING_MIRROR_VADDR |
| + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)); |
| |
| /* Address of the PT in the mirroring */ |
| struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR |
| + SOS_PAGE_SIZE*index_in_pd); |
| |
| /* No page mapped at this address ? */ |
| if (! pd[index_in_pd].present) |
| return SOS_VM_MAP_PROT_NONE; |
| if (! pt[index_in_pt].present) |
| return SOS_VM_MAP_PROT_NONE; |
| |
| /* Default access right of an available page is "read" on x86 */ |
| retval = SOS_VM_MAP_PROT_READ; |
| if (pd[index_in_pd].write && pt[index_in_pt].write) |
| retval |= SOS_VM_MAP_PROT_WRITE; |
| |
| return retval; |
| } |
| |
| |
| sos_paddr_t sos_paging_get_paddr(sos_vaddr_t vaddr) |
| { |
| /* Get the page directory entry and table entry index for this |
| address */ |
| unsigned index_in_pd = virt_to_pd_index(vaddr); |
| unsigned index_in_pt = virt_to_pt_index(vaddr); |
| unsigned offset_in_page = virt_to_page_offset(vaddr); |
| |
| /* Get the PD of the current context */ |
| struct x86_pde *pd = (struct x86_pde*) |
| (SOS_PAGING_MIRROR_VADDR |
| + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)); |
| |
| /* Address of the PT in the mirroring */ |
| struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR |
| + SOS_PAGE_SIZE*index_in_pd); |
| |
| /* No page mapped at this address ? */ |
| if (! pd[index_in_pd].present) |
| return (sos_paddr_t)NULL; |
| if (! pt[index_in_pt].present) |
| return (sos_paddr_t)NULL; |
| |
| return (pt[index_in_pt].paddr << 12) + offset_in_page; |
| } |
| |
| |
/tmp/sos-code-article3/hwcore/paging.h (1970-01-01 01:00:00.000000000 +0100
) |
|
../sos-code-article4/hwcore/paging.h (2004-08-27 12:14:18.000000000 +0200
) |
|
|
|
| /* Copyright (C) 2004 David Decotigny |
| |
| This program is free software; you can redistribute it and/or |
| modify it under the terms of the GNU General Public License |
| as published by the Free Software Foundation; either version 2 |
| of the License, or (at your option) any later version. |
| |
| This program is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with this program; if not, write to the Free Software |
| Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, |
| USA. |
| */ |
| #ifndef _SOS_PAGING_H_ |
| #define _SOS_PAGING_H_ |
| |
| /** |
| * @file paging.h |
| * |
| * MMU management routines (arch-dependent). Setup the MMU without |
| * identity-mapping physical<->virtual addresses over the whole |
| * physical address space: a single, restricted and known, area is |
| * identity-mapped, the remaining kernel/user space is not. To access |
| * and manage the MMU translation tables (PD/PT on x86), we rely on a |
| * particular configuration, called "mirroring", where the top-level |
| * translation table (PD on x86) maps itself at a known and fixed (virtual) |
| * address. The only assumption for this to be possible is that the |
| * structure of the translation table entries are compatible at the |
| * different levels of vadddr->paddr translation process (PDE and PTE |
| * on x86 are Ok). Credits go to Christophe Avoinne for that. |
| */ |
| |
| #include <sos/types.h> |
| #include <sos/errno.h> |
| |
| /** |
| * sos_paging_map flags |
| */ |
| /** Usual virtual memory access rights */ |
| #define SOS_VM_MAP_PROT_NONE 0 |
| #define SOS_VM_MAP_PROT_READ (1<<0) |
| #define SOS_VM_MAP_PROT_WRITE (1<<1) |
| /* EXEC not supported */ |
| /** Mapping a page may involve an physical page allocation (for a new |
| PT), hence may potentially block */ |
| #define SOS_VM_MAP_ATOMIC (1<<31) |
| |
| /** Virtual address where the mirroring takes place */ |
| #define SOS_PAGING_MIRROR_VADDR 0x3fc00000 /* 1GB - 4MB */ |
| /** Length of the space reserved for the mirroring in the kernel |
| virtual space */ |
| #define SOS_PAGING_MIRROR_SIZE (1 << 22) /* 1 PD = 1024 Page Tables = 4MB */ |
| |
| /** |
| * Setup initial page directory structure where the kernel is |
| * identically-mapped, and the mirroring. This routine also |
| * identity-maps the BIOS and video areas, to allow some debugging |
| * text to be printed to the console. Finally, this routine installs |
| * the whole configuration into the MMU. |
| */ |
| sos_ret_t sos_paging_setup(sos_paddr_t identity_mapping_base, |
| sos_paddr_t identity_mapping_top); |
| |
| /** |
| * Map the given physical page at the given virtual address in the |
| * current address space. |
| * |
| * @note *IMPORTANT*: The physical page ppage_paddr *MUST* have been |
| * referenced by the caller through either a call to |
| * sos_physmem_ref_physpage_new() or sos_physmem_ref_physpage_at(). It |
| * would work if this were untrue, but this would be INCORRECT (it is |
| * expected that one is owning the page before mapping it, or |
| * otherwise the page could have been stolen by an interrupt or |
| * another thread). |
| * |
| * @param ppage_paddr The address of a physical page (page-aligned) |
| * @param vpage_vaddr The address of the virtual page (page-aligned) |
| * @param is_user_page TRUE when the page is available from user space |
| * @param flags A mask made of SOS_VM_* bits |
| * |
| * @note Unless the SOS_VM_MAP_ATOMIC bit is set in the flags, the |
| * function may potentially block, because a physical page may be |
| * allocated for a new PT. |
| */ |
| sos_ret_t sos_paging_map(sos_paddr_t ppage_paddr, |
| sos_vaddr_t vpage_vaddr, |
| sos_bool_t is_user_page, |
| int flags); |
| |
| /** |
| * Undo the mapping from vaddr to the underlying physical page (if any) |
| * @param vpage_vaddr The address of the virtual page (page-aligned) |
| */ |
| sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_vaddr); |
| |
| /** |
| * Return the page protection flags (SOS_VM_MAP_PROT_*) associated |
| * with the address, or SOS_VM_MAP_PROT_NONE when page is not mapped |
| */ |
| int sos_paging_get_prot(sos_vaddr_t vaddr); |
| |
| /** |
| * Return the physical address of the given virtual address. Since page |
| * at physical addr 0 is not mapped, the NULL result means "page not |
| * mapped". |
| */ |
| sos_paddr_t sos_paging_get_paddr(sos_vaddr_t vaddr); |
| |
| /** |
| * Tell whether the address is physically mapped |
| */ |
| #define sos_paging_check_present(vaddr) \ |
| (sos_paging_get_paddr(vaddr) != NULL) |
| |
| |
| #endif /* _SOS_PAGING_H_ */ |
| |
/tmp/sos-code-article3/sos/main.c (2004-08-03 10:03:27.000000000 +0200
) |
|
../sos-code-article4/sos/main.c (2004-08-27 12:14:19.000000000 +0200
) |
|
|
|
#include <hwcore/i8254.h> | #include <hwcore/i8254.h> |
#include <sos/list.h> | #include <sos/list.h> |
#include <sos/physmem.h> | #include <sos/physmem.h> |
| #include <hwcore/paging.h> |
| #include <sos/list.h> |
#include <sos/klibc.h> | #include <sos/klibc.h> |
#include <sos/assert.h> | #include <sos/assert.h> |
#include <drivers/x86_videomem.h> | #include <drivers/x86_videomem.h> |
|
|
| |
} | } |
| |
#define MY_PPAGE_NUM_INT 511 | /* Page fault exception handler */ |
struct my_ppage | static void pgflt_ex(int exid) |
sos_ui32_t before[MY_PPAGE_NUM_INT]; | sos_bochs_printf("Got page fault\n"); |
struct my_ppage *prev, *next; | sos_x86_videomem_printf(10, 30, |
sos_ui32_t after[MY_PPAGE_NUM_INT]; | SOS_X86_VIDEO_FG_LTRED | SOS_X86_VIDEO_BG_BLUE, |
}; /* sizeof() Must be <= 4kB */ | "Got EXPECTED (?) Page fault ! But where ???"); |
| for (;;) ; |
| } |
static void test_physmem() | static void test_paging(sos_vaddr_t sos_kernel_core_top_vaddr) |
/* We place the pages we did allocate here */ | /* The (linear) address of the page holding the code we are |
struct my_ppage *ppage_list, *my_ppage; | currently executing */ |
sos_count_t num_alloc_ppages = 0, num_free_ppages = 0; | sos_vaddr_t vpage_code = SOS_PAGE_ALIGN_INF(test_paging); |
| |
ppage_list = NULL; | /* The new physical page that will hold the code */ |
while ((my_ppage = (struct my_ppage*)sos_physmem_ref_physpage_new(FALSE)) | sos_paddr_t ppage_new; |
!= NULL) | |
{ | /* Where this page will be mapped temporarily in order to copy the |
int i; | code into it: right after the kernel code/data */ |
num_alloc_ppages++; | sos_vaddr_t vpage_tmp = sos_kernel_core_top_vaddr; |
| |
/* Print the allocation status */ | |
sos_x86_videomem_printf(2, 0, | |
SOS_X86_VIDEO_FG_YELLOW | |
| SOS_X86_VIDEO_BG_BLUE, | |
"Could allocate %d pages ", | |
num_alloc_ppages); | |
| |
/* We fill this page with its address */ | |
for (i = 0 ; i < MY_PPAGE_NUM_INT ; i++) | |
my_ppage->before[i] = my_ppage->after[i] = (sos_ui32_t)my_ppage; | |
/* We add this page at the tail of our list of ppages */ | unsigned i; |
list_add_tail(ppage_list, my_ppage); | |
} | |
/* Now we release these pages in FIFO order */ | /* Bind the page fault exception to one of our routines */ |
while ((my_ppage = list_pop_head(ppage_list)) != NULL) | sos_exception_set_routine(SOS_EXCEPT_PAGE_FAULT, |
| pgflt_ex); |
| |
| /* |
| * Test 1: move the page where we execute the code elsewhere in |
| * physical memory |
| */ |
| sos_x86_videomem_printf(4, 0, |
| SOS_X86_VIDEO_FG_LTGREEN | SOS_X86_VIDEO_BG_BLUE, |
| "Moving current code elsewhere in physical memory:"); |
| |
| |
| /* Allocate a new physical page */ |
| ppage_new = sos_physmem_ref_physpage_new(FALSE); |
| if (! ppage_new) |
/* We make sure this page was not overwritten by any unexpected | /* STOP ! No memory left */ |
value */ | sos_x86_videomem_putstring(20, 0, |
int i; | SOS_X86_VIDEO_FG_LTRED |
for (i = 0 ; i < MY_PPAGE_NUM_INT ; i++) | | SOS_X86_VIDEO_BG_BLUE, |
{ | "test_paging : Cannot allocate page"); |
/* We don't get what we expect ! */ | return; |
if ((my_ppage->before[i] != (sos_ui32_t)my_ppage) | |
|| (my_ppage->after[i] != (sos_ui32_t)my_ppage)) | |
{ | |
/* STOP ! */ | |
sos_x86_videomem_putstring(20, 0, | |
SOS_X86_VIDEO_FG_LTRED | |
| SOS_X86_VIDEO_BG_BLUE, | |
"Page overwritten"); | |
return; | |
} | |
} | |
| |
/* Release the descriptor */ | |
if (sos_physmem_unref_physpage((sos_paddr_t)my_ppage) < 0) | |
{ | |
/* STOP ! */ | |
sos_x86_videomem_putstring(20, 0, | |
SOS_X86_VIDEO_FG_LTRED | |
| SOS_X86_VIDEO_BG_BLUE, | |
"Cannot release page"); | |
return; | |
} | |
| |
/* Print the deallocation status */ | |
num_free_ppages ++; | |
sos_x86_videomem_printf(2, 0, | |
SOS_X86_VIDEO_FG_YELLOW | |
| SOS_X86_VIDEO_BG_BLUE, | |
"Could free %d pages ", | |
num_free_ppages); | |
| |
/* Print the overall stats */ | sos_x86_videomem_printf(5, 0, |
sos_x86_videomem_printf(2, 0, | SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, |
SOS_X86_VIDEO_FG_LTGREEN | "Hello from the address 0x%x in physical memory", |
| SOS_X86_VIDEO_BG_BLUE, | sos_paging_get_paddr(vpage_code)); |
"Could allocate %d bytes, could free %d bytes ", | |
num_alloc_ppages << SOS_PAGE_SHIFT, | sos_x86_videomem_printf(6, 0, |
num_free_ppages << SOS_PAGE_SHIFT); | SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, |
| "Transfer vpage 0x%x: ppage 0x%x -> 0x%x (tmp vpage 0x%x)", |
| vpage_code, |
| sos_paging_get_paddr(vpage_code), |
| ppage_new, |
| (unsigned)vpage_tmp); |
| |
| /* Map the page somewhere (right after the kernel mapping) in order |
| to copy the code we are currently executing */ |
| sos_paging_map(ppage_new, vpage_tmp, |
| FALSE, |
| SOS_VM_MAP_ATOMIC |
| | SOS_VM_MAP_PROT_READ |
| | SOS_VM_MAP_PROT_WRITE); |
| |
| /* Ok, the new page is referenced by the mapping, we can release our |
| reference to it */ |
| sos_physmem_unref_physpage(ppage_new); |
| |
| /* Copy the contents of the current page of code to this new page |
| mapping */ |
| memcpy((void*)vpage_tmp, |
| (void*)vpage_code, |
| SOS_PAGE_SIZE); |
| |
| /* Transfer the mapping of the current page of code to this new page */ |
| sos_paging_map(ppage_new, vpage_code, |
| FALSE, |
| SOS_VM_MAP_ATOMIC |
| | SOS_VM_MAP_PROT_READ |
| | SOS_VM_MAP_PROT_WRITE); |
| |
| /* Ok, here we are: we have changed the physcal page that holds the |
| code we are executing ;). However, this new page is mapped at 2 |
| virtual addresses: |
| - vpage_tmp |
| - vpage_code |
| We can safely unmap it from sos_kernel_core_top_vaddr, while |
| still keeping the vpage_code mapping */ |
| sos_paging_unmap(vpage_tmp); |
| |
| sos_x86_videomem_printf(7, 0, |
| SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, |
| "Hello from the address 0x%x in physical memory", |
| sos_paging_get_paddr(vpage_code)); |
| |
| sos_x86_videomem_printf(9, 0, |
| SOS_X86_VIDEO_FG_LTGREEN | SOS_X86_VIDEO_BG_BLUE, |
| "Provoking a page fault:"); |
| |
| /* |
| * Test 2: make sure the #PF handler works |
| */ |
| |
| /* Scan part of the kernel up to a page fault. This page fault |
| should occur on the first page unmapped after the kernel area, |
| which is exactly the page we temporarily mapped/unmapped |
| (vpage_tmp) above to move the kernel code we are executing */ |
| for (i = vpage_code ; /* none */ ; i += SOS_PAGE_SIZE) |
| { |
| unsigned *pint = (unsigned *)SOS_PAGE_ALIGN_INF(i); |
| sos_bochs_printf("Test vaddr 0x%x : val=", (unsigned)pint); |
| sos_x86_videomem_printf(10, 0, |
| SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, |
| "Test vaddr 0x%x : val= ", |
| (unsigned)pint); |
| sos_bochs_printf("0x%x\n", *pint); |
| sos_x86_videomem_printf(10, 30, |
| SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, |
| "0x%x ", *pint); |
| } |
SOS_ASSERT_FATAL(num_alloc_ppages == num_free_ppages); | /* BAD ! Did not get the page fault... */ |
| sos_x86_videomem_printf(20, 0, |
| SOS_X86_VIDEO_FG_LTRED | SOS_X86_VIDEO_BG_BLUE, |
| "We should have had a #PF at vaddr 0x%x !", |
| vpage_tmp); |
| |
| |
void sos_main(unsigned long magic, unsigned long addr) | void sos_main(unsigned long magic, unsigned long addr) |
{ | { |
|
|
sos_physmem_setup((mbi->mem_upper<<10) + (1<<20), | sos_physmem_setup((mbi->mem_upper<<10) + (1<<20), |
& sos_kernel_core_base_paddr, | & sos_kernel_core_base_paddr, |
& sos_kernel_core_top_paddr); | & sos_kernel_core_top_paddr); |
test_physmem(); | |
| /* |
| * Switch to paged-memory mode |
| */ |
| |
| /* Disabling interrupts should seem more correct, but it's not really |
| necessary at this stage */ |
| if (sos_paging_setup(sos_kernel_core_base_paddr, |
| sos_kernel_core_top_paddr)) |
| sos_bochs_printf("Could not setup paged memory mode\n"); |
| sos_x86_videomem_printf(2, 0, |
| SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, |
| "Paged-memory mode is activated"); |
| |
| test_paging(sos_kernel_core_top_paddr); |
/* An operatig system never ends */ | /* An operatig system never ends */ |
for (;;) | for (;;) |
| |