SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2004  David Decotigny
002 
003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any later version.
007    
008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details.
012    
013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA. 
017 */
018 #include <sos/physmem.h>
019 #include <sos/klibc.h>
020 #include <sos/assert.h>
021 
022 #include "paging.h"
023 
024 
025 /** The structure of a page directory entry. See Intel vol 3 section
026     3.6.4 */
027 struct x86_pde
028 {
029   sos_ui32_t present        :1; /* 1=PT mapped */
030   sos_ui32_t write          :1; /* 0=read-only, 1=read/write */
031   sos_ui32_t user           :1; /* 0=supervisor, 1=user */
032   sos_ui32_t write_through  :1; /* 0=write-back, 1=write-through */
033   sos_ui32_t cache_disabled :1; /* 1=cache disabled */
034   sos_ui32_t accessed       :1; /* 1=read/write access since last clear */
035   sos_ui32_t zero           :1; /* Intel reserved */
036   sos_ui32_t page_size      :1; /* 0=4kB, 1=4MB or 2MB (depending on PAE) */
037   sos_ui32_t global_page    :1; /* Ignored (Intel reserved) */
038   sos_ui32_t custom         :3; /* Do what you want with them */
039   sos_ui32_t pt_paddr       :20;
040 } __attribute__ ((packed));
041 
042 
043 /** The structure of a page table entry. See Intel vol 3 section
044     3.6.4 */
045 struct x86_pte
046 {
047   sos_ui32_t present        :1; /* 1=PT mapped */
048   sos_ui32_t write          :1; /* 0=read-only, 1=read/write */
049   sos_ui32_t user           :1; /* 0=supervisor, 1=user */
050   sos_ui32_t write_through  :1; /* 0=write-back, 1=write-through */
051   sos_ui32_t cache_disabled :1; /* 1=cache disabled */
052   sos_ui32_t accessed       :1; /* 1=read/write access since last clear */
053   sos_ui32_t dirty          :1; /* 1=write access since last clear */
054   sos_ui32_t zero           :1; /* Intel reserved */
055   sos_ui32_t global_page    :1; /* 1=No TLB invalidation upon cr3 switch
056                                    (when PG set in cr4) */
057   sos_ui32_t custom         :3; /* Do what you want with them */
058   sos_ui32_t paddr          :20;
059 } __attribute__ ((packed));
060 
061 
062 /** Structure of the x86 CR3 register: the Page Directory Base
063     Register. See Intel x86 doc Vol 3 section 2.5 */
064 struct x86_pdbr
065 {
066   sos_ui32_t zero1          :3; /* Intel reserved */
067   sos_ui32_t write_through  :1; /* 0=write-back, 1=write-through */
068   sos_ui32_t cache_disabled :1; /* 1=cache disabled */
069   sos_ui32_t zero2          :7; /* Intel reserved */
070   sos_ui32_t pd_paddr       :20;
071 } __attribute__ ((packed));
072 
073 
074 /**
075  * Helper macro to control the MMU: invalidate the TLB entry for the
076  * page located at the given virtual address. See Intel x86 vol 3
077  * section 3.7.
078  */
079 #define invlpg(vaddr) \
080   do { \
081        __asm__ __volatile__("invlpg %0"::"m"(*((unsigned *)(vaddr)))); \
082   } while(0)
083 
084 
085 /**
086  * Helper macro to control the MMU: invalidate the whole TLB. See
087  * Intel x86 vol 3 section 3.7.
088  */
089 #define flush_tlb() \
090   do { \
091         unsigned long tmpreg; \
092         asm volatile("movl %%cr3,%0\n\tmovl %0,%%cr3" :"=r" \
093                      (tmpreg) : :"memory"); \
094   } while (0)
095 
096 
097 /**
098  * Helper macro to compute the index in the PD for the given virtual
099  * address
100  */
101 #define virt_to_pd_index(vaddr) \
102   (((unsigned)(vaddr)) >> 22)
103 
104 
105 /**
106  * Helper macro to compute the index in the PT for the given virtual
107  * address
108  */
109 #define virt_to_pt_index(vaddr) \
110   ( (((unsigned)(vaddr)) >> 12) & 0x3ff )
111 
112 
113 /**
114  * Helper macro to compute the offset in the page for the given virtual
115  * address
116  */
117 #define virt_to_page_offset(vaddr) \
118   (((unsigned)(vaddr)) & SOS_PAGE_MASK)
119 
120 
121 /**
122  * Helper function to map a page in the pd.\ Suppose that the RAM
123  * is identity mapped to resolve PT actual (CPU) address from the PD
124  * entry
125  */
126 static sos_ret_t paging_setup_map_helper(struct x86_pde * pd,
127                                          sos_paddr_t ppage,
128                                          sos_vaddr_t vaddr)
129 {
130   /* Get the page directory entry and table entry index for this
131      address */
132   unsigned index_in_pd = virt_to_pd_index(vaddr);
133   unsigned index_in_pt = virt_to_pt_index(vaddr);
134 
135   /* Make sure the page table was mapped */
136   struct x86_pte * pt;
137   if (pd[index_in_pd].present)
138     {
139       pt = (struct x86_pte*) (pd[index_in_pd].pt_paddr << 12);
140 
141       /* If we allocate a new entry in the PT, increase its reference
142          count. This test will always be TRUE here, since the setup
143          routine scans the kernel pages in a strictly increasing
144          order: at each step, the map will result in the allocation of
145          a new PT entry. For the sake of clarity, we keep the test
146          here. */
147       if (! pt[index_in_pt].present)
148         sos_physmem_ref_physpage_at((sos_paddr_t)pt);
149 
150       /* The previous test should always be TRUE */
151       else
152         SOS_ASSERT_FATAL(FALSE); /* indicate a fatal error */
153     }
154   else
155     {
156       /* No : allocate a new one */
157       pt = (struct x86_pte*) sos_physmem_ref_physpage_new(FALSE);
158       if (! pt)
159         return -SOS_ENOMEM;
160       
161       memset((void*)pt, 0x0, SOS_PAGE_SIZE);
162 
163       pd[index_in_pd].present  = TRUE;
164       pd[index_in_pd].write    = 1; /* It would be too complicated to
165                                        determine whether it
166                                        corresponds to a real R/W area
167                                        of the kernel code/data or
168                                        read-only */
169       pd[index_in_pd].pt_paddr = ((sos_paddr_t)pt) >> 12;
170     }
171 
172   
173   /* Map the page in the page table */
174   pt[index_in_pt].present = 1;
175   pt[index_in_pt].write   = 1;  /* It would be too complicated to
176                                    determine whether it corresponds to
177                                    a real R/W area of the kernel
178                                    code/data or R/O only */
179   pt[index_in_pt].user    = 0;
180   pt[index_in_pt].paddr   = ppage >> 12;
181 
182   return SOS_OK;
183 }
184 
185 
186 sos_ret_t sos_paging_subsystem_setup(sos_paddr_t identity_mapping_base,
187                                      sos_paddr_t identity_mapping_top)
188 {
189   /* The PDBR we will setup below */
190   struct x86_pdbr cr3;  
191 
192   /* Get the PD for the kernel */
193   struct x86_pde * pd
194     = (struct x86_pde*) sos_physmem_ref_physpage_new(FALSE);
195 
196   /* The iterator for scanning the kernel area */
197   sos_paddr_t paddr;
198 
199   /* Reset the PD. For the moment, there is still an IM for the whole
200      RAM, so that the paddr are also vaddr */
201   memset((void*)pd,
202          0x0,
203          SOS_PAGE_SIZE);
204 
205   /* Identity-map the identity_mapping_* area */
206   for (paddr = identity_mapping_base ;
207        paddr < identity_mapping_top ;
208        paddr += SOS_PAGE_SIZE)
209     {
210       if (paging_setup_map_helper(pd, paddr, paddr))
211         return -SOS_ENOMEM;
212     }
213 
214   /* Identity-map the PC-specific BIOS/Video area */
215   for (paddr = BIOS_N_VIDEO_START ;
216        paddr < BIOS_N_VIDEO_END ;
217        paddr += SOS_PAGE_SIZE)
218     {
219       if (paging_setup_map_helper(pd, paddr, paddr))
220         return -SOS_ENOMEM;
221     }
222 
223   /* Ok, kernel is now identity mapped in the PD. We still have to set
224      up the mirroring */
225   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].present = TRUE;
226   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].write = 1;
227   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].user  = 0;
228   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].pt_paddr 
229     = ((sos_paddr_t)pd)>>12;
230 
231   /* We now just have to configure the MMU to use our PD. See Intel
232      x86 doc vol 3, section 3.6.3 */
233   memset(& cr3, 0x0, sizeof(struct x86_pdbr)); /* Reset the PDBR */
234   cr3.pd_paddr = ((sos_paddr_t)pd) >> 12;
235 
236  /* Actual loading of the PDBR in the MMU: setup cr3 + bits 31[Paging
237     Enabled] and 16[Write Protect] of cr0, see Intel x86 doc vol 3,
238     sections 2.5, 3.6.1 and 4.11.3 + note table 4-2 */
239   asm volatile ("movl %0,%%cr3\n\t"
240                 "movl %%cr0,%%eax\n\t"
241                 "orl $0x80010000, %%eax\n\t" /* bit 31 | bit 16 */
242                 "movl %%eax,%%cr0\n\t"
243                 "jmp 1f\n\t"
244                 "1:\n\t"
245                 "movl $2f, %%eax\n\t"
246                 "jmp *%%eax\n\t"
247                 "2:\n\t" ::"r"(cr3):"memory","eax");
248 
249   /*
250    * Here, the only memory available is:
251    * - The BIOS+video area
252    * - the identity_mapping_base .. identity_mapping_top area
253    * - the PD mirroring area (4M)
254    * All accesses to other virtual addresses will generate a #PF
255    */
256 
257   return SOS_OK;
258 }
259 
260 
261 /* Suppose that the current address is configured with the mirroring
262  * enabled to access the PD and PT. */
263 sos_ret_t sos_paging_map(sos_paddr_t ppage_paddr,
264                          sos_vaddr_t vpage_vaddr,
265                          sos_bool_t is_user_page,
266                          sos_ui32_t flags)
267 {
268   /* Get the page directory entry and table entry index for this
269      address */
270   unsigned index_in_pd = virt_to_pd_index(vpage_vaddr);
271   unsigned index_in_pt = virt_to_pt_index(vpage_vaddr);
272   
273   /* Get the PD of the current context */
274   struct x86_pde *pd = (struct x86_pde*)
275     (SOS_PAGING_MIRROR_VADDR
276      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
277 
278   /* Address of the PT in the mirroring */
279   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
280                                            + SOS_PAGE_SIZE*index_in_pd);
281 
282   /* The mapping of anywhere in the PD mirroring is FORBIDDEN ;) */
283   if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)
284       && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE))
285     return -SOS_EINVAL;
286 
287   /* Map a page for the PT if necessary */
288   if (! pd[index_in_pd].present)
289     {
290       
291       /* No : allocate a new one */
292       sos_paddr_t pt_ppage
293         = sos_physmem_ref_physpage_new(! (flags & SOS_VM_MAP_ATOMIC));
294       if (! pt_ppage)
295         {
296           return -SOS_ENOMEM;
297         }
298 
299       pd[index_in_pd].present  = TRUE;
300       pd[index_in_pd].write    = 1; /* Ignored in supervisor mode, see
301                                        Intel vol 3 section 4.12 */
302       pd[index_in_pd].user     = (is_user_page)?1:0;
303       pd[index_in_pd].pt_paddr = ((sos_paddr_t)pt_ppage) >> 12;
304       
305       /*
306        * The PT is now mapped in the PD mirroring
307        */
308 
309       /* Invalidate TLB for the page we just added */
310       invlpg(pt);
311      
312       /* Reset this new PT */
313       memset((void*)pt, 0x0, SOS_PAGE_SIZE);
314     }
315 
316   /* If we allocate a new entry in the PT, increase its reference
317      count. */
318   else if (! pt[index_in_pt].present)
319     sos_physmem_ref_physpage_at(pd[index_in_pd].pt_paddr << 12);
320   
321   /* Otherwise, that means that a physical page is implicitely
322      unmapped */
323   else
324     sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12);
325 
326   /* Map the page in the page table */
327   pt[index_in_pt].present = TRUE;
328   pt[index_in_pt].write   = (flags & SOS_VM_MAP_PROT_WRITE)?1:0;
329   pt[index_in_pt].user    = (is_user_page)?1:0;
330   pt[index_in_pt].paddr   = ppage_paddr >> 12;
331   sos_physmem_ref_physpage_at(ppage_paddr);
332 
333   /*
334    * The page is now mapped in the current address space
335    */
336   
337   /* Invalidate TLB for the page we just added */
338   invlpg(vpage_vaddr);
339 
340   return SOS_OK;
341 }
342 
343 
344 sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_vaddr)
345 {
346   sos_ret_t pt_unref_retval;
347 
348   /* Get the page directory entry and table entry index for this
349      address */
350   unsigned index_in_pd = virt_to_pd_index(vpage_vaddr);
351   unsigned index_in_pt = virt_to_pt_index(vpage_vaddr);
352   
353   /* Get the PD of the current context */
354   struct x86_pde *pd = (struct x86_pde*)
355     (SOS_PAGING_MIRROR_VADDR
356      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
357 
358   /* Address of the PT in the mirroring */
359   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
360                                            + SOS_PAGE_SIZE*index_in_pd);
361 
362   /* No page mapped at this address ? */
363   if (! pd[index_in_pd].present)
364     return -SOS_EINVAL;
365   if (! pt[index_in_pt].present)
366     return -SOS_EINVAL;
367 
368   /* The unmapping of anywhere in the PD mirroring is FORBIDDEN ;) */
369   if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)
370       && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE))
371     return -SOS_EINVAL;
372 
373   /* Reclaim the physical page */
374   sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12);
375 
376   /* Unmap the page in the page table */
377   memset(pt + index_in_pt, 0x0, sizeof(struct x86_pte));
378 
379   /* Invalidate TLB for the page we just unmapped */
380   invlpg(vpage_vaddr);
381 
382   /* Reclaim this entry in the PT, which may free the PT */
383   pt_unref_retval = sos_physmem_unref_physpage(pd[index_in_pd].pt_paddr << 12);
384   SOS_ASSERT_FATAL(pt_unref_retval >= 0);
385   if (pt_unref_retval == TRUE)
386     /* If the PT is now completely unused... */
387     {
388       union { struct x86_pde pde; sos_ui32_t ui32; } u;
389 
390       /*
391        * Reset the PDE
392        */
393 
394       /* Mark the PDE as unavailable */
395       u.ui32 = 0;
396 
397       /* Update the PD */
398       pd[index_in_pd] = u.pde;
399       
400       /* Update the TLB */
401       invlpg(pt);
402     }
403 
404   return SOS_OK;  
405 }
406 
407 
408 int sos_paging_get_prot(sos_vaddr_t vaddr)
409 {
410   int retval;
411 
412   /* Get the page directory entry and table entry index for this
413      address */
414   unsigned index_in_pd = virt_to_pd_index(vaddr);
415   unsigned index_in_pt = virt_to_pt_index(vaddr);
416   
417   /* Get the PD of the current context */
418   struct x86_pde *pd = (struct x86_pde*)
419     (SOS_PAGING_MIRROR_VADDR
420      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
421 
422   /* Address of the PT in the mirroring */
423   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
424                                            + SOS_PAGE_SIZE*index_in_pd);
425 
426   /* No page mapped at this address ? */
427   if (! pd[index_in_pd].present)
428     return SOS_VM_MAP_PROT_NONE;
429   if (! pt[index_in_pt].present)
430     return SOS_VM_MAP_PROT_NONE;
431   
432   /* Default access right of an available page is "read" on x86 */
433   retval = SOS_VM_MAP_PROT_READ;
434   if (pd[index_in_pd].write && pt[index_in_pt].write)
435     retval |= SOS_VM_MAP_PROT_WRITE;
436 
437   return retval;
438 }
439 
440 
441 sos_paddr_t sos_paging_get_paddr(sos_vaddr_t vaddr)
442 {
443   /* Get the page directory entry and table entry index for this
444      address */
445   unsigned index_in_pd = virt_to_pd_index(vaddr);
446   unsigned index_in_pt = virt_to_pt_index(vaddr);
447   unsigned offset_in_page = virt_to_page_offset(vaddr);
448   
449   /* Get the PD of the current context */
450   struct x86_pde *pd = (struct x86_pde*)
451     (SOS_PAGING_MIRROR_VADDR
452      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
453 
454   /* Address of the PT in the mirroring */
455   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
456                                            + SOS_PAGE_SIZE*index_in_pd);
457 
458   /* No page mapped at this address ? */
459   if (! pd[index_in_pd].present)
460     return (sos_paddr_t)NULL;
461   if (! pt[index_in_pt].present)
462     return (sos_paddr_t)NULL;
463 
464   return (pt[index_in_pt].paddr << 12) + offset_in_page;
465 }

source navigation ] diff markup ] identifier search ] general search ]