SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

Diff markup

Differences between /hwcore/paging.c (Article 4) and /hwcore/paging.c (Article 6.5)


001 /* Copyright (C) 2004  David Decotigny            001 /* Copyright (C) 2004  David Decotigny
002                                                   002 
003    This program is free software; you can redi    003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU Genera    004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundatio    005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any lat    006    of the License, or (at your option) any later version.
007                                                   007    
008    This program is distributed in the hope tha    008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the     009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR    010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details    011    GNU General Public License for more details.
012                                                   012    
013    You should have received a copy of the GNU     013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to t    014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 3    015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA.                                           016    USA. 
017 */                                                017 */
018 #include <sos/physmem.h>                          018 #include <sos/physmem.h>
019 #include <sos/klibc.h>                            019 #include <sos/klibc.h>
020 #include <sos/assert.h>                           020 #include <sos/assert.h>
021                                                   021 
022 #include "paging.h"                               022 #include "paging.h"
023                                                   023 
                                                   >> 024 
024 /** The structure of a page directory entry. S    025 /** The structure of a page directory entry. See Intel vol 3 section
025     3.6.4 */                                      026     3.6.4 */
026 struct x86_pde                                    027 struct x86_pde
027 {                                                 028 {
028   sos_ui32_t present        :1; /* 1=PT mapped    029   sos_ui32_t present        :1; /* 1=PT mapped */
029   sos_ui32_t write          :1; /* 0=read-only    030   sos_ui32_t write          :1; /* 0=read-only, 1=read/write */
030   sos_ui32_t user           :1; /* 0=superviso    031   sos_ui32_t user           :1; /* 0=supervisor, 1=user */
031   sos_ui32_t write_through  :1; /* 0=write-bac    032   sos_ui32_t write_through  :1; /* 0=write-back, 1=write-through */
032   sos_ui32_t cache_disabled :1; /* 1=cache dis    033   sos_ui32_t cache_disabled :1; /* 1=cache disabled */
033   sos_ui32_t accessed       :1; /* 1=read/writ    034   sos_ui32_t accessed       :1; /* 1=read/write access since last clear */
034   sos_ui32_t zero           :1; /* Intel reser    035   sos_ui32_t zero           :1; /* Intel reserved */
035   sos_ui32_t page_size      :1; /* 0=4kB, 1=4M    036   sos_ui32_t page_size      :1; /* 0=4kB, 1=4MB or 2MB (depending on PAE) */
036   sos_ui32_t global_page    :1; /* Ignored (In    037   sos_ui32_t global_page    :1; /* Ignored (Intel reserved) */
037   sos_ui32_t custom         :3; /* Do what you    038   sos_ui32_t custom         :3; /* Do what you want with them */
038   sos_ui32_t pt_paddr       :20;                  039   sos_ui32_t pt_paddr       :20;
039 } __attribute__ ((packed));                       040 } __attribute__ ((packed));
040                                                   041 
041                                                   042 
042 /** The structure of a page table entry. See I    043 /** The structure of a page table entry. See Intel vol 3 section
043     3.6.4 */                                      044     3.6.4 */
044 struct x86_pte                                    045 struct x86_pte
045 {                                                 046 {
046   sos_ui32_t present        :1; /* 1=PT mapped    047   sos_ui32_t present        :1; /* 1=PT mapped */
047   sos_ui32_t write          :1; /* 0=read-only    048   sos_ui32_t write          :1; /* 0=read-only, 1=read/write */
048   sos_ui32_t user           :1; /* 0=superviso    049   sos_ui32_t user           :1; /* 0=supervisor, 1=user */
049   sos_ui32_t write_through  :1; /* 0=write-bac    050   sos_ui32_t write_through  :1; /* 0=write-back, 1=write-through */
050   sos_ui32_t cache_disabled :1; /* 1=cache dis    051   sos_ui32_t cache_disabled :1; /* 1=cache disabled */
051   sos_ui32_t accessed       :1; /* 1=read/writ    052   sos_ui32_t accessed       :1; /* 1=read/write access since last clear */
052   sos_ui32_t dirty          :1; /* 1=write acc    053   sos_ui32_t dirty          :1; /* 1=write access since last clear */
053   sos_ui32_t zero           :1; /* Intel reser    054   sos_ui32_t zero           :1; /* Intel reserved */
054   sos_ui32_t global_page    :1; /* 1=No TLB in    055   sos_ui32_t global_page    :1; /* 1=No TLB invalidation upon cr3 switch
055                                    (when PG se    056                                    (when PG set in cr4) */
056   sos_ui32_t custom         :3; /* Do what you    057   sos_ui32_t custom         :3; /* Do what you want with them */
057   sos_ui32_t paddr          :20;                  058   sos_ui32_t paddr          :20;
058 } __attribute__ ((packed));                       059 } __attribute__ ((packed));
059                                                   060 
060                                                   061 
061 /** Structure of the x86 CR3 register: the Pag    062 /** Structure of the x86 CR3 register: the Page Directory Base
062     Register. See Intel x86 doc Vol 3 section     063     Register. See Intel x86 doc Vol 3 section 2.5 */
063 struct x86_pdbr                                   064 struct x86_pdbr
064 {                                                 065 {
065   sos_ui32_t zero1          :3; /* Intel reser    066   sos_ui32_t zero1          :3; /* Intel reserved */
066   sos_ui32_t write_through  :1; /* 0=write-bac    067   sos_ui32_t write_through  :1; /* 0=write-back, 1=write-through */
067   sos_ui32_t cache_disabled :1; /* 1=cache dis    068   sos_ui32_t cache_disabled :1; /* 1=cache disabled */
068   sos_ui32_t zero2          :7; /* Intel reser    069   sos_ui32_t zero2          :7; /* Intel reserved */
069   sos_ui32_t pd_paddr       :20;                  070   sos_ui32_t pd_paddr       :20;
070 } __attribute__ ((packed));                       071 } __attribute__ ((packed));
071                                                   072 
072                                                   073 
073 /**                                               074 /**
074  * Helper macro to control the MMU: invalidate    075  * Helper macro to control the MMU: invalidate the TLB entry for the
075  * page located at the given virtual address.     076  * page located at the given virtual address. See Intel x86 vol 3
076  * section 3.7.                                   077  * section 3.7.
077  */                                               078  */
078 #define invlpg(vaddr) \                           079 #define invlpg(vaddr) \
079   do { \                                          080   do { \
080        __asm__ __volatile__("invlpg %0"::"m"(*    081        __asm__ __volatile__("invlpg %0"::"m"(*((unsigned *)(vaddr)))); \
081   } while(0)                                      082   } while(0)
082                                                   083 
083                                                   084 
084 /**                                               085 /**
085  * Helper macro to control the MMU: invalidate    086  * Helper macro to control the MMU: invalidate the whole TLB. See
086  * Intel x86 vol 3 section 3.7.                   087  * Intel x86 vol 3 section 3.7.
087  */                                               088  */
088 #define flush_tlb() \                             089 #define flush_tlb() \
089   do { \                                          090   do { \
090         unsigned long tmpreg; \                   091         unsigned long tmpreg; \
091         asm volatile("movl %%cr3,%0\n\tmovl %0    092         asm volatile("movl %%cr3,%0\n\tmovl %0,%%cr3" :"=r" \
092                      (tmpreg) : :"memory"); \     093                      (tmpreg) : :"memory"); \
093   } while (0)                                     094   } while (0)
094                                                   095 
095                                                   096 
096 /**                                               097 /**
097  * Helper macro to compute the index in the PD    098  * Helper macro to compute the index in the PD for the given virtual
098  * address                                        099  * address
099  */                                               100  */
100 #define virt_to_pd_index(vaddr) \                 101 #define virt_to_pd_index(vaddr) \
101   (((unsigned)(vaddr)) >> 22)                     102   (((unsigned)(vaddr)) >> 22)
102                                                   103 
103                                                   104 
104 /**                                               105 /**
105  * Helper macro to compute the index in the PT    106  * Helper macro to compute the index in the PT for the given virtual
106  * address                                        107  * address
107  */                                               108  */
108 #define virt_to_pt_index(vaddr) \                 109 #define virt_to_pt_index(vaddr) \
109   ( (((unsigned)(vaddr)) >> 12) & 0x3ff )         110   ( (((unsigned)(vaddr)) >> 12) & 0x3ff )
110                                                   111 
111                                                   112 
112 /**                                               113 /**
113  * Helper macro to compute the offset in the p    114  * Helper macro to compute the offset in the page for the given virtual
114  * address                                        115  * address
115  */                                               116  */
116 #define virt_to_page_offset(vaddr) \              117 #define virt_to_page_offset(vaddr) \
117   (((unsigned)(vaddr)) & SOS_PAGE_MASK)           118   (((unsigned)(vaddr)) & SOS_PAGE_MASK)
118                                                   119 
119                                                   120 
120 /**                                               121 /**
121  * Helper function to map a page in the pd.\ S    122  * Helper function to map a page in the pd.\ Suppose that the RAM
122  * is identity mapped to resolve PT actual (CP    123  * is identity mapped to resolve PT actual (CPU) address from the PD
123  * entry                                          124  * entry
124  */                                               125  */
125 static sos_ret_t paging_setup_map_helper(struc    126 static sos_ret_t paging_setup_map_helper(struct x86_pde * pd,
126                                          sos_p    127                                          sos_paddr_t ppage,
127                                          sos_v    128                                          sos_vaddr_t vaddr)
128 {                                                 129 {
129   /* Get the page directory entry and table en    130   /* Get the page directory entry and table entry index for this
130      address */                                   131      address */
131   unsigned index_in_pd = virt_to_pd_index(vadd    132   unsigned index_in_pd = virt_to_pd_index(vaddr);
132   unsigned index_in_pt = virt_to_pt_index(vadd    133   unsigned index_in_pt = virt_to_pt_index(vaddr);
133                                                   134 
134   /* Make sure the page table was mapped */       135   /* Make sure the page table was mapped */
135   struct x86_pte * pt;                            136   struct x86_pte * pt;
136   if (pd[index_in_pd].present)                    137   if (pd[index_in_pd].present)
137     {                                             138     {
138       pt = (struct x86_pte*) (pd[index_in_pd].    139       pt = (struct x86_pte*) (pd[index_in_pd].pt_paddr << 12);
139                                                   140 
140       /* If we allocate a new entry in the PT,    141       /* If we allocate a new entry in the PT, increase its reference
141          count. This test will always be TRUE     142          count. This test will always be TRUE here, since the setup
142          routine scans the kernel pages in a s    143          routine scans the kernel pages in a strictly increasing
143          order: at each step, the map will res    144          order: at each step, the map will result in the allocation of
144          a new PT entry. For the sake of clari    145          a new PT entry. For the sake of clarity, we keep the test
145          here. */                                 146          here. */
146       if (! pt[index_in_pt].present)              147       if (! pt[index_in_pt].present)
147         sos_physmem_ref_physpage_at((sos_paddr    148         sos_physmem_ref_physpage_at((sos_paddr_t)pt);
148                                                   149 
149       /* The previous test should always be TR    150       /* The previous test should always be TRUE */
150       else                                        151       else
151         SOS_ASSERT_FATAL(FALSE); /* indicate a    152         SOS_ASSERT_FATAL(FALSE); /* indicate a fatal error */
152     }                                             153     }
153   else                                            154   else
154     {                                             155     {
155       /* No : allocate a new one */               156       /* No : allocate a new one */
156       pt = (struct x86_pte*) sos_physmem_ref_p    157       pt = (struct x86_pte*) sos_physmem_ref_physpage_new(FALSE);
157       if (! pt)                                   158       if (! pt)
158         return -SOS_ENOMEM;                       159         return -SOS_ENOMEM;
159                                                   160       
160       memset((void*)pt, 0x0, SOS_PAGE_SIZE);      161       memset((void*)pt, 0x0, SOS_PAGE_SIZE);
161                                                   162 
162       pd[index_in_pd].present  = TRUE;            163       pd[index_in_pd].present  = TRUE;
163       pd[index_in_pd].write    = 1; /* It woul    164       pd[index_in_pd].write    = 1; /* It would be too complicated to
164                                        determi    165                                        determine whether it
165                                        corresp    166                                        corresponds to a real R/W area
166                                        of the     167                                        of the kernel code/data or
167                                        read-on    168                                        read-only */
168       pd[index_in_pd].pt_paddr = ((sos_paddr_t    169       pd[index_in_pd].pt_paddr = ((sos_paddr_t)pt) >> 12;
169     }                                             170     }
170                                                   171 
171                                                   172   
172   /* Map the page in the page table */            173   /* Map the page in the page table */
173   pt[index_in_pt].present = 1;                    174   pt[index_in_pt].present = 1;
174   pt[index_in_pt].write   = 1;  /* It would be    175   pt[index_in_pt].write   = 1;  /* It would be too complicated to
175                                    determine w    176                                    determine whether it corresponds to
176                                    a real R/W     177                                    a real R/W area of the kernel
177                                    code/data o    178                                    code/data or R/O only */
178   pt[index_in_pt].user    = 0;                    179   pt[index_in_pt].user    = 0;
179   pt[index_in_pt].paddr   = ppage >> 12;          180   pt[index_in_pt].paddr   = ppage >> 12;
180                                                   181 
181   return SOS_OK;                                  182   return SOS_OK;
182 }                                                 183 }
183                                                   184 
184                                                   185 
185 sos_ret_t sos_paging_setup(sos_paddr_t identit !! 186 sos_ret_t sos_paging_subsystem_setup(sos_paddr_t identity_mapping_base,
186                            sos_paddr_t identit !! 187                                      sos_paddr_t identity_mapping_top)
187 {                                                 188 {
188   /* The PDBR we will setup below */              189   /* The PDBR we will setup below */
189   struct x86_pdbr cr3;                            190   struct x86_pdbr cr3;  
190                                                   191 
191   /* Get the PD for the kernel */                 192   /* Get the PD for the kernel */
192   struct x86_pde * pd                             193   struct x86_pde * pd
193     = (struct x86_pde*) sos_physmem_ref_physpa    194     = (struct x86_pde*) sos_physmem_ref_physpage_new(FALSE);
194                                                   195 
195   /* The iterator for scanning the kernel area    196   /* The iterator for scanning the kernel area */
196   sos_paddr_t paddr;                              197   sos_paddr_t paddr;
197                                                   198 
198   /* Reset the PD. For the moment, there is st    199   /* Reset the PD. For the moment, there is still an IM for the whole
199      RAM, so that the paddr are also vaddr */     200      RAM, so that the paddr are also vaddr */
200   memset((void*)pd,                               201   memset((void*)pd,
201          0x0,                                     202          0x0,
202          SOS_PAGE_SIZE);                          203          SOS_PAGE_SIZE);
203                                                   204 
204   /* Identity-map the identity_mapping_* area     205   /* Identity-map the identity_mapping_* area */
205   for (paddr = identity_mapping_base ;            206   for (paddr = identity_mapping_base ;
206        paddr < identity_mapping_top ;             207        paddr < identity_mapping_top ;
207        paddr += SOS_PAGE_SIZE)                    208        paddr += SOS_PAGE_SIZE)
208     {                                             209     {
209       if (paging_setup_map_helper(pd, paddr, p    210       if (paging_setup_map_helper(pd, paddr, paddr))
210         return -SOS_ENOMEM;                       211         return -SOS_ENOMEM;
211     }                                             212     }
212                                                   213 
213   /* Identity-map the PC-specific BIOS/Video a    214   /* Identity-map the PC-specific BIOS/Video area */
214   for (paddr = BIOS_N_VIDEO_START ;               215   for (paddr = BIOS_N_VIDEO_START ;
215        paddr < BIOS_N_VIDEO_END ;                 216        paddr < BIOS_N_VIDEO_END ;
216        paddr += SOS_PAGE_SIZE)                    217        paddr += SOS_PAGE_SIZE)
217     {                                             218     {
218       if (paging_setup_map_helper(pd, paddr, p    219       if (paging_setup_map_helper(pd, paddr, paddr))
219         return -SOS_ENOMEM;                       220         return -SOS_ENOMEM;
220     }                                             221     }
221                                                   222 
222   /* Ok, kernel is now identity mapped in the     223   /* Ok, kernel is now identity mapped in the PD. We still have to set
223      up the mirroring */                          224      up the mirroring */
224   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)    225   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].present = TRUE;
225   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)    226   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].write = 1;
226   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)    227   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].user  = 0;
227   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)    228   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].pt_paddr 
228     = ((sos_paddr_t)pd)>>12;                      229     = ((sos_paddr_t)pd)>>12;
229                                                   230 
230   /* We now just have to configure the MMU to     231   /* We now just have to configure the MMU to use our PD. See Intel
231      x86 doc vol 3, section 3.6.3 */              232      x86 doc vol 3, section 3.6.3 */
232   memset(& cr3, 0x0, sizeof(struct x86_pdbr));    233   memset(& cr3, 0x0, sizeof(struct x86_pdbr)); /* Reset the PDBR */
233   cr3.pd_paddr = ((sos_paddr_t)pd) >> 12;         234   cr3.pd_paddr = ((sos_paddr_t)pd) >> 12;
234                                                   235 
235  /* Actual loading of the PDBR in the MMU: set    236  /* Actual loading of the PDBR in the MMU: setup cr3 + bits 31[Paging
236     Enabled] and 16[Write Protect] of cr0, see    237     Enabled] and 16[Write Protect] of cr0, see Intel x86 doc vol 3,
237     sections 2.5, 3.6.1 and 4.11.3 + note tabl    238     sections 2.5, 3.6.1 and 4.11.3 + note table 4-2 */
238   asm volatile ("movl %0,%%cr3\n\t"               239   asm volatile ("movl %0,%%cr3\n\t"
239                 "movl %%cr0,%%eax\n\t"            240                 "movl %%cr0,%%eax\n\t"
240                 "orl $0x80010000, %%eax\n\t" /    241                 "orl $0x80010000, %%eax\n\t" /* bit 31 | bit 16 */
241                 "movl %%eax,%%cr0\n\t"            242                 "movl %%eax,%%cr0\n\t"
242                 "jmp 1f\n\t"                      243                 "jmp 1f\n\t"
243                 "1:\n\t"                          244                 "1:\n\t"
244                 "movl $2f, %%eax\n\t"             245                 "movl $2f, %%eax\n\t"
245                 "jmp *%%eax\n\t"                  246                 "jmp *%%eax\n\t"
246                 "2:\n\t" ::"r"(cr3):"memory","    247                 "2:\n\t" ::"r"(cr3):"memory","eax");
247                                                   248 
248   /*                                              249   /*
249    * Here, the only memory available is:          250    * Here, the only memory available is:
250    * - The BIOS+video area                        251    * - The BIOS+video area
251    * - the identity_mapping_base .. identity_m    252    * - the identity_mapping_base .. identity_mapping_top area
252    * - the PD mirroring area (4M)                 253    * - the PD mirroring area (4M)
253    * All accesses to other virtual addresses w    254    * All accesses to other virtual addresses will generate a #PF
254    */                                             255    */
255                                                   256 
256   return SOS_OK;                                  257   return SOS_OK;
257 }                                                 258 }
258                                                   259 
259                                                   260 
260 /* Suppose that the current address is configu    261 /* Suppose that the current address is configured with the mirroring
261  * enabled to access the PD and PT. */            262  * enabled to access the PD and PT. */
262 sos_ret_t sos_paging_map(sos_paddr_t ppage_pad    263 sos_ret_t sos_paging_map(sos_paddr_t ppage_paddr,
263                          sos_vaddr_t vpage_vad    264                          sos_vaddr_t vpage_vaddr,
264                          sos_bool_t is_user_pa    265                          sos_bool_t is_user_page,
265                          int flags)            !! 266                          sos_ui32_t flags)
266 {                                                 267 {
267   /* Get the page directory entry and table en    268   /* Get the page directory entry and table entry index for this
268      address */                                   269      address */
269   unsigned index_in_pd = virt_to_pd_index(vpag    270   unsigned index_in_pd = virt_to_pd_index(vpage_vaddr);
270   unsigned index_in_pt = virt_to_pt_index(vpag    271   unsigned index_in_pt = virt_to_pt_index(vpage_vaddr);
271                                                   272   
272   /* Get the PD of the current context */         273   /* Get the PD of the current context */
273   struct x86_pde *pd = (struct x86_pde*)          274   struct x86_pde *pd = (struct x86_pde*)
274     (SOS_PAGING_MIRROR_VADDR                      275     (SOS_PAGING_MIRROR_VADDR
275      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI    276      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
276                                                   277 
277   /* Address of the PT in the mirroring */        278   /* Address of the PT in the mirroring */
278   struct x86_pte * pt = (struct x86_pte*) (SOS    279   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
279                                            + S    280                                            + SOS_PAGE_SIZE*index_in_pd);
280                                                   281 
281   /* The mapping of anywhere in the PD mirrori    282   /* The mapping of anywhere in the PD mirroring is FORBIDDEN ;) */
282   if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)    283   if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)
283       && (vpage_vaddr < SOS_PAGING_MIRROR_VADD    284       && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE))
284     return -SOS_EINVAL;                           285     return -SOS_EINVAL;
285                                                   286 
286   /* Map a page for the PT if necessary */        287   /* Map a page for the PT if necessary */
287   if (! pd[index_in_pd].present)                  288   if (! pd[index_in_pd].present)
288     {                                             289     {
                                                   >> 290       
289       /* No : allocate a new one */               291       /* No : allocate a new one */
290       sos_paddr_t pt_ppage                        292       sos_paddr_t pt_ppage
291         = sos_physmem_ref_physpage_new(! (flag    293         = sos_physmem_ref_physpage_new(! (flags & SOS_VM_MAP_ATOMIC));
292       if (! pt_ppage)                             294       if (! pt_ppage)
293         {                                         295         {
294           return -SOS_ENOMEM;                     296           return -SOS_ENOMEM;
295         }                                         297         }
296                                                   298 
297       pd[index_in_pd].present  = TRUE;            299       pd[index_in_pd].present  = TRUE;
298       pd[index_in_pd].write    = 1; /* Ignored    300       pd[index_in_pd].write    = 1; /* Ignored in supervisor mode, see
299                                        Intel v    301                                        Intel vol 3 section 4.12 */
300       pd[index_in_pd].user     |= (is_user_pag !! 302       pd[index_in_pd].user     = (is_user_page)?1:0;
301       pd[index_in_pd].pt_paddr = ((sos_paddr_t    303       pd[index_in_pd].pt_paddr = ((sos_paddr_t)pt_ppage) >> 12;
302                                                   304       
303       /*                                          305       /*
304        * The PT is now mapped in the PD mirror    306        * The PT is now mapped in the PD mirroring
305        */                                         307        */
306                                                   308 
307       /* Invalidate TLB for the page we just a    309       /* Invalidate TLB for the page we just added */
308       invlpg(pt);                                 310       invlpg(pt);
309                                                   311      
310       /* Reset this new PT */                     312       /* Reset this new PT */
311       memset((void*)pt, 0x0, SOS_PAGE_SIZE);      313       memset((void*)pt, 0x0, SOS_PAGE_SIZE);
312     }                                             314     }
313                                                   315 
314   /* If we allocate a new entry in the PT, inc    316   /* If we allocate a new entry in the PT, increase its reference
315      count. */                                    317      count. */
316   else if (! pt[index_in_pt].present)             318   else if (! pt[index_in_pt].present)
317     sos_physmem_ref_physpage_at(pd[index_in_pd    319     sos_physmem_ref_physpage_at(pd[index_in_pd].pt_paddr << 12);
318                                                   320   
319   /* Otherwise, that means that a physical pag    321   /* Otherwise, that means that a physical page is implicitely
320      unmapped */                                  322      unmapped */
321   else                                            323   else
322     sos_physmem_unref_physpage(pt[index_in_pt]    324     sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12);
323                                                   325 
324   /* Map the page in the page table */            326   /* Map the page in the page table */
325   pt[index_in_pt].present = TRUE;                 327   pt[index_in_pt].present = TRUE;
326   pt[index_in_pt].write   = (flags & SOS_VM_MA    328   pt[index_in_pt].write   = (flags & SOS_VM_MAP_PROT_WRITE)?1:0;
327   pt[index_in_pt].user    = (is_user_page)?1:0    329   pt[index_in_pt].user    = (is_user_page)?1:0;
328   pt[index_in_pt].paddr   = ppage_paddr >> 12;    330   pt[index_in_pt].paddr   = ppage_paddr >> 12;
329   sos_physmem_ref_physpage_at(ppage_paddr);       331   sos_physmem_ref_physpage_at(ppage_paddr);
330                                                   332 
331   /*                                              333   /*
332    * The page is now mapped in the current add    334    * The page is now mapped in the current address space
333    */                                             335    */
334                                                   336   
335   /* Invalidate TLB for the page we just added    337   /* Invalidate TLB for the page we just added */
336   invlpg(vpage_vaddr);                            338   invlpg(vpage_vaddr);
337                                                   339 
338   return SOS_OK;                                  340   return SOS_OK;
339 }                                                 341 }
340                                                   342 
341                                                   343 
342 sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_v    344 sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_vaddr)
343 {                                                 345 {
344   sos_ret_t pt_unref_retval;                      346   sos_ret_t pt_unref_retval;
345                                                   347 
346   /* Get the page directory entry and table en    348   /* Get the page directory entry and table entry index for this
347      address */                                   349      address */
348   unsigned index_in_pd = virt_to_pd_index(vpag    350   unsigned index_in_pd = virt_to_pd_index(vpage_vaddr);
349   unsigned index_in_pt = virt_to_pt_index(vpag    351   unsigned index_in_pt = virt_to_pt_index(vpage_vaddr);
350                                                   352   
351   /* Get the PD of the current context */         353   /* Get the PD of the current context */
352   struct x86_pde *pd = (struct x86_pde*)          354   struct x86_pde *pd = (struct x86_pde*)
353     (SOS_PAGING_MIRROR_VADDR                      355     (SOS_PAGING_MIRROR_VADDR
354      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI    356      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
355                                                   357 
356   /* Address of the PT in the mirroring */        358   /* Address of the PT in the mirroring */
357   struct x86_pte * pt = (struct x86_pte*) (SOS    359   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
358                                            + S    360                                            + SOS_PAGE_SIZE*index_in_pd);
359                                                   361 
360   /* No page mapped at this address ? */          362   /* No page mapped at this address ? */
361   if (! pd[index_in_pd].present)                  363   if (! pd[index_in_pd].present)
362     return -SOS_EINVAL;                           364     return -SOS_EINVAL;
363   if (! pt[index_in_pt].present)                  365   if (! pt[index_in_pt].present)
364     return -SOS_EINVAL;                           366     return -SOS_EINVAL;
365                                                   367 
366   /* The unmapping of anywhere in the PD mirro    368   /* The unmapping of anywhere in the PD mirroring is FORBIDDEN ;) */
367   if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)    369   if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)
368       && (vpage_vaddr < SOS_PAGING_MIRROR_VADD    370       && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE))
369     return -SOS_EINVAL;                           371     return -SOS_EINVAL;
370                                                   372 
371   /* Reclaim the physical page */                 373   /* Reclaim the physical page */
372   sos_physmem_unref_physpage(pt[index_in_pt].p    374   sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12);
373                                                   375 
374   /* Unmap the page in the page table */          376   /* Unmap the page in the page table */
375   memset(pt + index_in_pt, 0x0, sizeof(struct     377   memset(pt + index_in_pt, 0x0, sizeof(struct x86_pte));
376                                                   378 
377   /* Invalidate TLB for the page we just unmap    379   /* Invalidate TLB for the page we just unmapped */
378   invlpg(vpage_vaddr);                            380   invlpg(vpage_vaddr);
379                                                   381 
380   /* Reclaim this entry in the PT, which may f    382   /* Reclaim this entry in the PT, which may free the PT */
381   pt_unref_retval = sos_physmem_unref_physpage    383   pt_unref_retval = sos_physmem_unref_physpage(pd[index_in_pd].pt_paddr << 12);
382   SOS_ASSERT_FATAL(pt_unref_retval >= 0);         384   SOS_ASSERT_FATAL(pt_unref_retval >= 0);
383   if (pt_unref_retval > 0)                     !! 385   if (pt_unref_retval == TRUE)
384     /* If the PT is now completely unused... *    386     /* If the PT is now completely unused... */
385     {                                             387     {
386       /* Release the PDE */                    !! 388       union { struct x86_pde pde; sos_ui32_t ui32; } u;
387       memset(pd + index_in_pd, 0x0, sizeof(str !! 389 
                                                   >> 390       /*
                                                   >> 391        * Reset the PDE
                                                   >> 392        */
                                                   >> 393 
                                                   >> 394       /* Mark the PDE as unavailable */
                                                   >> 395       u.ui32 = 0;
                                                   >> 396 
                                                   >> 397       /* Update the PD */
                                                   >> 398       pd[index_in_pd] = u.pde;
388                                                   399       
389       /* Update the TLB */                        400       /* Update the TLB */
390       invlpg(pt);                                 401       invlpg(pt);
391     }                                             402     }
392                                                   403 
393   return SOS_OK;                                  404   return SOS_OK;  
394 }                                                 405 }
395                                                   406 
396                                                   407 
397 int sos_paging_get_prot(sos_vaddr_t vaddr)        408 int sos_paging_get_prot(sos_vaddr_t vaddr)
398 {                                                 409 {
399   int retval;                                     410   int retval;
400                                                   411 
401   /* Get the page directory entry and table en    412   /* Get the page directory entry and table entry index for this
402      address */                                   413      address */
403   unsigned index_in_pd = virt_to_pd_index(vadd    414   unsigned index_in_pd = virt_to_pd_index(vaddr);
404   unsigned index_in_pt = virt_to_pt_index(vadd    415   unsigned index_in_pt = virt_to_pt_index(vaddr);
405                                                   416   
406   /* Get the PD of the current context */         417   /* Get the PD of the current context */
407   struct x86_pde *pd = (struct x86_pde*)          418   struct x86_pde *pd = (struct x86_pde*)
408     (SOS_PAGING_MIRROR_VADDR                      419     (SOS_PAGING_MIRROR_VADDR
409      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI    420      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
410                                                   421 
411   /* Address of the PT in the mirroring */        422   /* Address of the PT in the mirroring */
412   struct x86_pte * pt = (struct x86_pte*) (SOS    423   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
413                                            + S    424                                            + SOS_PAGE_SIZE*index_in_pd);
414                                                   425 
415   /* No page mapped at this address ? */          426   /* No page mapped at this address ? */
416   if (! pd[index_in_pd].present)                  427   if (! pd[index_in_pd].present)
417     return SOS_VM_MAP_PROT_NONE;                  428     return SOS_VM_MAP_PROT_NONE;
418   if (! pt[index_in_pt].present)                  429   if (! pt[index_in_pt].present)
419     return SOS_VM_MAP_PROT_NONE;                  430     return SOS_VM_MAP_PROT_NONE;
420                                                   431   
421   /* Default access right of an available page    432   /* Default access right of an available page is "read" on x86 */
422   retval = SOS_VM_MAP_PROT_READ;                  433   retval = SOS_VM_MAP_PROT_READ;
423   if (pd[index_in_pd].write && pt[index_in_pt]    434   if (pd[index_in_pd].write && pt[index_in_pt].write)
424     retval |= SOS_VM_MAP_PROT_WRITE;              435     retval |= SOS_VM_MAP_PROT_WRITE;
425                                                   436 
426   return retval;                                  437   return retval;
427 }                                                 438 }
428                                                   439 
429                                                   440 
430 sos_paddr_t sos_paging_get_paddr(sos_vaddr_t v    441 sos_paddr_t sos_paging_get_paddr(sos_vaddr_t vaddr)
431 {                                                 442 {
432   /* Get the page directory entry and table en    443   /* Get the page directory entry and table entry index for this
433      address */                                   444      address */
434   unsigned index_in_pd = virt_to_pd_index(vadd    445   unsigned index_in_pd = virt_to_pd_index(vaddr);
435   unsigned index_in_pt = virt_to_pt_index(vadd    446   unsigned index_in_pt = virt_to_pt_index(vaddr);
436   unsigned offset_in_page = virt_to_page_offse    447   unsigned offset_in_page = virt_to_page_offset(vaddr);
437                                                   448   
438   /* Get the PD of the current context */         449   /* Get the PD of the current context */
439   struct x86_pde *pd = (struct x86_pde*)          450   struct x86_pde *pd = (struct x86_pde*)
440     (SOS_PAGING_MIRROR_VADDR                      451     (SOS_PAGING_MIRROR_VADDR
441      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI    452      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
442                                                   453 
443   /* Address of the PT in the mirroring */        454   /* Address of the PT in the mirroring */
444   struct x86_pte * pt = (struct x86_pte*) (SOS    455   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
445                                            + S    456                                            + SOS_PAGE_SIZE*index_in_pd);
446                                                   457 
447   /* No page mapped at this address ? */          458   /* No page mapped at this address ? */
448   if (! pd[index_in_pd].present)                  459   if (! pd[index_in_pd].present)
449     return (sos_paddr_t)NULL;                     460     return (sos_paddr_t)NULL;
450   if (! pt[index_in_pt].present)                  461   if (! pt[index_in_pt].present)
451     return (sos_paddr_t)NULL;                     462     return (sos_paddr_t)NULL;
452                                                   463 
453   return (pt[index_in_pt].paddr << 12) + offse    464   return (pt[index_in_pt].paddr << 12) + offset_in_page;
454 }                                                 465 }
455                                                << 
                                                      

source navigation ] diff markup ] identifier search ] general search ]