SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2004  David Decotigny
002 
003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any later version.
007    
008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details.
012    
013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA. 
017 */
018 #include <sos/physmem.h>
019 #include <sos/klibc.h>
020 #include <sos/assert.h>
021 
022 #include "paging.h"
023 
024 /** The structure of a page directory entry. See Intel vol 3 section
025     3.6.4 */
026 struct x86_pde
027 {
028   sos_ui32_t present        :1; /* 1=PT mapped */
029   sos_ui32_t write          :1; /* 0=read-only, 1=read/write */
030   sos_ui32_t user           :1; /* 0=supervisor, 1=user */
031   sos_ui32_t write_through  :1; /* 0=write-back, 1=write-through */
032   sos_ui32_t cache_disabled :1; /* 1=cache disabled */
033   sos_ui32_t accessed       :1; /* 1=read/write access since last clear */
034   sos_ui32_t zero           :1; /* Intel reserved */
035   sos_ui32_t page_size      :1; /* 0=4kB, 1=4MB or 2MB (depending on PAE) */
036   sos_ui32_t global_page    :1; /* Ignored (Intel reserved) */
037   sos_ui32_t custom         :3; /* Do what you want with them */
038   sos_ui32_t pt_paddr       :20;
039 } __attribute__ ((packed));
040 
041 
042 /** The structure of a page table entry. See Intel vol 3 section
043     3.6.4 */
044 struct x86_pte
045 {
046   sos_ui32_t present        :1; /* 1=PT mapped */
047   sos_ui32_t write          :1; /* 0=read-only, 1=read/write */
048   sos_ui32_t user           :1; /* 0=supervisor, 1=user */
049   sos_ui32_t write_through  :1; /* 0=write-back, 1=write-through */
050   sos_ui32_t cache_disabled :1; /* 1=cache disabled */
051   sos_ui32_t accessed       :1; /* 1=read/write access since last clear */
052   sos_ui32_t dirty          :1; /* 1=write access since last clear */
053   sos_ui32_t zero           :1; /* Intel reserved */
054   sos_ui32_t global_page    :1; /* 1=No TLB invalidation upon cr3 switch
055                                    (when PG set in cr4) */
056   sos_ui32_t custom         :3; /* Do what you want with them */
057   sos_ui32_t paddr          :20;
058 } __attribute__ ((packed));
059 
060 
061 /** Structure of the x86 CR3 register: the Page Directory Base
062     Register. See Intel x86 doc Vol 3 section 2.5 */
063 struct x86_pdbr
064 {
065   sos_ui32_t zero1          :3; /* Intel reserved */
066   sos_ui32_t write_through  :1; /* 0=write-back, 1=write-through */
067   sos_ui32_t cache_disabled :1; /* 1=cache disabled */
068   sos_ui32_t zero2          :7; /* Intel reserved */
069   sos_ui32_t pd_paddr       :20;
070 } __attribute__ ((packed));
071 
072 
073 /**
074  * Helper macro to control the MMU: invalidate the TLB entry for the
075  * page located at the given virtual address. See Intel x86 vol 3
076  * section 3.7.
077  */
078 #define invlpg(vaddr) \
079   do { \
080        __asm__ __volatile__("invlpg %0"::"m"(*((unsigned *)(vaddr)))); \
081   } while(0)
082 
083 
084 /**
085  * Helper macro to control the MMU: invalidate the whole TLB. See
086  * Intel x86 vol 3 section 3.7.
087  */
088 #define flush_tlb() \
089   do { \
090         unsigned long tmpreg; \
091         asm volatile("movl %%cr3,%0\n\tmovl %0,%%cr3" :"=r" \
092                      (tmpreg) : :"memory"); \
093   } while (0)
094 
095 
096 /**
097  * Helper macro to compute the index in the PD for the given virtual
098  * address
099  */
100 #define virt_to_pd_index(vaddr) \
101   (((unsigned)(vaddr)) >> 22)
102 
103 
104 /**
105  * Helper macro to compute the index in the PT for the given virtual
106  * address
107  */
108 #define virt_to_pt_index(vaddr) \
109   ( (((unsigned)(vaddr)) >> 12) & 0x3ff )
110 
111 
112 /**
113  * Helper macro to compute the offset in the page for the given virtual
114  * address
115  */
116 #define virt_to_page_offset(vaddr) \
117   (((unsigned)(vaddr)) & SOS_PAGE_MASK)
118 
119 
120 /**
121  * Helper function to map a page in the pd.\ Suppose that the RAM
122  * is identity mapped to resolve PT actual (CPU) address from the PD
123  * entry
124  */
125 static sos_ret_t paging_setup_map_helper(struct x86_pde * pd,
126                                          sos_paddr_t ppage,
127                                          sos_vaddr_t vaddr)
128 {
129   /* Get the page directory entry and table entry index for this
130      address */
131   unsigned index_in_pd = virt_to_pd_index(vaddr);
132   unsigned index_in_pt = virt_to_pt_index(vaddr);
133 
134   /* Make sure the page table was mapped */
135   struct x86_pte * pt;
136   if (pd[index_in_pd].present)
137     {
138       pt = (struct x86_pte*) (pd[index_in_pd].pt_paddr << 12);
139 
140       /* If we allocate a new entry in the PT, increase its reference
141          count. This test will always be TRUE here, since the setup
142          routine scans the kernel pages in a strictly increasing
143          order: at each step, the map will result in the allocation of
144          a new PT entry. For the sake of clarity, we keep the test
145          here. */
146       if (! pt[index_in_pt].present)
147         sos_physmem_ref_physpage_at((sos_paddr_t)pt);
148 
149       /* The previous test should always be TRUE */
150       else
151         SOS_ASSERT_FATAL(FALSE); /* indicate a fatal error */
152     }
153   else
154     {
155       /* No : allocate a new one */
156       pt = (struct x86_pte*) sos_physmem_ref_physpage_new(FALSE);
157       if (! pt)
158         return -SOS_ENOMEM;
159       
160       memset((void*)pt, 0x0, SOS_PAGE_SIZE);
161 
162       pd[index_in_pd].present  = TRUE;
163       pd[index_in_pd].write    = 1; /* It would be too complicated to
164                                        determine whether it
165                                        corresponds to a real R/W area
166                                        of the kernel code/data or
167                                        read-only */
168       pd[index_in_pd].pt_paddr = ((sos_paddr_t)pt) >> 12;
169     }
170 
171   
172   /* Map the page in the page table */
173   pt[index_in_pt].present = 1;
174   pt[index_in_pt].write   = 1;  /* It would be too complicated to
175                                    determine whether it corresponds to
176                                    a real R/W area of the kernel
177                                    code/data or R/O only */
178   pt[index_in_pt].user    = 0;
179   pt[index_in_pt].paddr   = ppage >> 12;
180 
181   return SOS_OK;
182 }
183 
184 
185 sos_ret_t sos_paging_setup(sos_paddr_t identity_mapping_base,
186                            sos_paddr_t identity_mapping_top)
187 {
188   /* The PDBR we will setup below */
189   struct x86_pdbr cr3;  
190 
191   /* Get the PD for the kernel */
192   struct x86_pde * pd
193     = (struct x86_pde*) sos_physmem_ref_physpage_new(FALSE);
194 
195   /* The iterator for scanning the kernel area */
196   sos_paddr_t paddr;
197 
198   /* Reset the PD. For the moment, there is still an IM for the whole
199      RAM, so that the paddr are also vaddr */
200   memset((void*)pd,
201          0x0,
202          SOS_PAGE_SIZE);
203 
204   /* Identity-map the identity_mapping_* area */
205   for (paddr = identity_mapping_base ;
206        paddr < identity_mapping_top ;
207        paddr += SOS_PAGE_SIZE)
208     {
209       if (paging_setup_map_helper(pd, paddr, paddr))
210         return -SOS_ENOMEM;
211     }
212 
213   /* Identity-map the PC-specific BIOS/Video area */
214   for (paddr = BIOS_N_VIDEO_START ;
215        paddr < BIOS_N_VIDEO_END ;
216        paddr += SOS_PAGE_SIZE)
217     {
218       if (paging_setup_map_helper(pd, paddr, paddr))
219         return -SOS_ENOMEM;
220     }
221 
222   /* Ok, kernel is now identity mapped in the PD. We still have to set
223      up the mirroring */
224   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].present = TRUE;
225   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].write = 1;
226   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].user  = 0;
227   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].pt_paddr 
228     = ((sos_paddr_t)pd)>>12;
229 
230   /* We now just have to configure the MMU to use our PD. See Intel
231      x86 doc vol 3, section 3.6.3 */
232   memset(& cr3, 0x0, sizeof(struct x86_pdbr)); /* Reset the PDBR */
233   cr3.pd_paddr = ((sos_paddr_t)pd) >> 12;
234 
235  /* Actual loading of the PDBR in the MMU: setup cr3 + bits 31[Paging
236     Enabled] and 16[Write Protect] of cr0, see Intel x86 doc vol 3,
237     sections 2.5, 3.6.1 and 4.11.3 + note table 4-2 */
238   asm volatile ("movl %0,%%cr3\n\t"
239                 "movl %%cr0,%%eax\n\t"
240                 "orl $0x80010000, %%eax\n\t" /* bit 31 | bit 16 */
241                 "movl %%eax,%%cr0\n\t"
242                 "jmp 1f\n\t"
243                 "1:\n\t"
244                 "movl $2f, %%eax\n\t"
245                 "jmp *%%eax\n\t"
246                 "2:\n\t" ::"r"(cr3):"memory","eax");
247 
248   /*
249    * Here, the only memory available is:
250    * - The BIOS+video area
251    * - the identity_mapping_base .. identity_mapping_top area
252    * - the PD mirroring area (4M)
253    * All accesses to other virtual addresses will generate a #PF
254    */
255 
256   return SOS_OK;
257 }
258 
259 
260 /* Suppose that the current address is configured with the mirroring
261  * enabled to access the PD and PT. */
262 sos_ret_t sos_paging_map(sos_paddr_t ppage_paddr,
263                          sos_vaddr_t vpage_vaddr,
264                          sos_bool_t is_user_page,
265                          int flags)
266 {
267   /* Get the page directory entry and table entry index for this
268      address */
269   unsigned index_in_pd = virt_to_pd_index(vpage_vaddr);
270   unsigned index_in_pt = virt_to_pt_index(vpage_vaddr);
271   
272   /* Get the PD of the current context */
273   struct x86_pde *pd = (struct x86_pde*)
274     (SOS_PAGING_MIRROR_VADDR
275      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
276 
277   /* Address of the PT in the mirroring */
278   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
279                                            + SOS_PAGE_SIZE*index_in_pd);
280 
281   /* The mapping of anywhere in the PD mirroring is FORBIDDEN ;) */
282   if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)
283       && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE))
284     return -SOS_EINVAL;
285 
286   /* Map a page for the PT if necessary */
287   if (! pd[index_in_pd].present)
288     {
289       /* No : allocate a new one */
290       sos_paddr_t pt_ppage
291         = sos_physmem_ref_physpage_new(! (flags & SOS_VM_MAP_ATOMIC));
292       if (! pt_ppage)
293         {
294           return -SOS_ENOMEM;
295         }
296 
297       pd[index_in_pd].present  = TRUE;
298       pd[index_in_pd].write    = 1; /* Ignored in supervisor mode, see
299                                        Intel vol 3 section 4.12 */
300       pd[index_in_pd].user     |= (is_user_page)?1:0;
301       pd[index_in_pd].pt_paddr = ((sos_paddr_t)pt_ppage) >> 12;
302       
303       /*
304        * The PT is now mapped in the PD mirroring
305        */
306 
307       /* Invalidate TLB for the page we just added */
308       invlpg(pt);
309      
310       /* Reset this new PT */
311       memset((void*)pt, 0x0, SOS_PAGE_SIZE);
312     }
313 
314   /* If we allocate a new entry in the PT, increase its reference
315      count. */
316   else if (! pt[index_in_pt].present)
317     sos_physmem_ref_physpage_at(pd[index_in_pd].pt_paddr << 12);
318   
319   /* Otherwise, that means that a physical page is implicitely
320      unmapped */
321   else
322     sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12);
323 
324   /* Map the page in the page table */
325   pt[index_in_pt].present = TRUE;
326   pt[index_in_pt].write   = (flags & SOS_VM_MAP_PROT_WRITE)?1:0;
327   pt[index_in_pt].user    = (is_user_page)?1:0;
328   pt[index_in_pt].paddr   = ppage_paddr >> 12;
329   sos_physmem_ref_physpage_at(ppage_paddr);
330 
331   /*
332    * The page is now mapped in the current address space
333    */
334   
335   /* Invalidate TLB for the page we just added */
336   invlpg(vpage_vaddr);
337 
338   return SOS_OK;
339 }
340 
341 
342 sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_vaddr)
343 {
344   sos_ret_t pt_unref_retval;
345 
346   /* Get the page directory entry and table entry index for this
347      address */
348   unsigned index_in_pd = virt_to_pd_index(vpage_vaddr);
349   unsigned index_in_pt = virt_to_pt_index(vpage_vaddr);
350   
351   /* Get the PD of the current context */
352   struct x86_pde *pd = (struct x86_pde*)
353     (SOS_PAGING_MIRROR_VADDR
354      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
355 
356   /* Address of the PT in the mirroring */
357   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
358                                            + SOS_PAGE_SIZE*index_in_pd);
359 
360   /* No page mapped at this address ? */
361   if (! pd[index_in_pd].present)
362     return -SOS_EINVAL;
363   if (! pt[index_in_pt].present)
364     return -SOS_EINVAL;
365 
366   /* The unmapping of anywhere in the PD mirroring is FORBIDDEN ;) */
367   if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)
368       && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE))
369     return -SOS_EINVAL;
370 
371   /* Reclaim the physical page */
372   sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12);
373 
374   /* Unmap the page in the page table */
375   memset(pt + index_in_pt, 0x0, sizeof(struct x86_pte));
376 
377   /* Invalidate TLB for the page we just unmapped */
378   invlpg(vpage_vaddr);
379 
380   /* Reclaim this entry in the PT, which may free the PT */
381   pt_unref_retval = sos_physmem_unref_physpage(pd[index_in_pd].pt_paddr << 12);
382   SOS_ASSERT_FATAL(pt_unref_retval >= 0);
383   if (pt_unref_retval > 0)
384     /* If the PT is now completely unused... */
385     {
386       /* Release the PDE */
387       memset(pd + index_in_pd, 0x0, sizeof(struct x86_pde));
388       
389       /* Update the TLB */
390       invlpg(pt);
391     }
392 
393   return SOS_OK;  
394 }
395 
396 
397 int sos_paging_get_prot(sos_vaddr_t vaddr)
398 {
399   int retval;
400 
401   /* Get the page directory entry and table entry index for this
402      address */
403   unsigned index_in_pd = virt_to_pd_index(vaddr);
404   unsigned index_in_pt = virt_to_pt_index(vaddr);
405   
406   /* Get the PD of the current context */
407   struct x86_pde *pd = (struct x86_pde*)
408     (SOS_PAGING_MIRROR_VADDR
409      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
410 
411   /* Address of the PT in the mirroring */
412   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
413                                            + SOS_PAGE_SIZE*index_in_pd);
414 
415   /* No page mapped at this address ? */
416   if (! pd[index_in_pd].present)
417     return SOS_VM_MAP_PROT_NONE;
418   if (! pt[index_in_pt].present)
419     return SOS_VM_MAP_PROT_NONE;
420   
421   /* Default access right of an available page is "read" on x86 */
422   retval = SOS_VM_MAP_PROT_READ;
423   if (pd[index_in_pd].write && pt[index_in_pt].write)
424     retval |= SOS_VM_MAP_PROT_WRITE;
425 
426   return retval;
427 }
428 
429 
430 sos_paddr_t sos_paging_get_paddr(sos_vaddr_t vaddr)
431 {
432   /* Get the page directory entry and table entry index for this
433      address */
434   unsigned index_in_pd = virt_to_pd_index(vaddr);
435   unsigned index_in_pt = virt_to_pt_index(vaddr);
436   unsigned offset_in_page = virt_to_page_offset(vaddr);
437   
438   /* Get the PD of the current context */
439   struct x86_pde *pd = (struct x86_pde*)
440     (SOS_PAGING_MIRROR_VADDR
441      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
442 
443   /* Address of the PT in the mirroring */
444   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
445                                            + SOS_PAGE_SIZE*index_in_pd);
446 
447   /* No page mapped at this address ? */
448   if (! pd[index_in_pd].present)
449     return (sos_paddr_t)NULL;
450   if (! pt[index_in_pt].present)
451     return (sos_paddr_t)NULL;
452 
453   return (pt[index_in_pt].paddr << 12) + offset_in_page;
454 }
455 

source navigation ] diff markup ] identifier search ] general search ]