SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

Diff markup

Differences between /hwcore/paging.c (Article 6) and /hwcore/paging.c (Article 5)


001 /* Copyright (C) 2004  David Decotigny            001 /* Copyright (C) 2004  David Decotigny
002                                                   002 
003    This program is free software; you can redi    003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU Genera    004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundatio    005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any lat    006    of the License, or (at your option) any later version.
007                                                   007    
008    This program is distributed in the hope tha    008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the     009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR    010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details    011    GNU General Public License for more details.
012                                                   012    
013    You should have received a copy of the GNU     013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to t    014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 3    015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA.                                           016    USA. 
017 */                                                017 */
018 #include <sos/physmem.h>                          018 #include <sos/physmem.h>
019 #include <sos/klibc.h>                            019 #include <sos/klibc.h>
020 #include <sos/assert.h>                           020 #include <sos/assert.h>
021                                                   021 
022 #include "paging.h"                               022 #include "paging.h"
023                                                   023 
024 /** The structure of a page directory entry. S    024 /** The structure of a page directory entry. See Intel vol 3 section
025     3.6.4 */                                      025     3.6.4 */
026 struct x86_pde                                    026 struct x86_pde
027 {                                                 027 {
028   sos_ui32_t present        :1; /* 1=PT mapped    028   sos_ui32_t present        :1; /* 1=PT mapped */
029   sos_ui32_t write          :1; /* 0=read-only    029   sos_ui32_t write          :1; /* 0=read-only, 1=read/write */
030   sos_ui32_t user           :1; /* 0=superviso    030   sos_ui32_t user           :1; /* 0=supervisor, 1=user */
031   sos_ui32_t write_through  :1; /* 0=write-bac    031   sos_ui32_t write_through  :1; /* 0=write-back, 1=write-through */
032   sos_ui32_t cache_disabled :1; /* 1=cache dis    032   sos_ui32_t cache_disabled :1; /* 1=cache disabled */
033   sos_ui32_t accessed       :1; /* 1=read/writ    033   sos_ui32_t accessed       :1; /* 1=read/write access since last clear */
034   sos_ui32_t zero           :1; /* Intel reser    034   sos_ui32_t zero           :1; /* Intel reserved */
035   sos_ui32_t page_size      :1; /* 0=4kB, 1=4M    035   sos_ui32_t page_size      :1; /* 0=4kB, 1=4MB or 2MB (depending on PAE) */
036   sos_ui32_t global_page    :1; /* Ignored (In    036   sos_ui32_t global_page    :1; /* Ignored (Intel reserved) */
037   sos_ui32_t custom         :3; /* Do what you    037   sos_ui32_t custom         :3; /* Do what you want with them */
038   sos_ui32_t pt_paddr       :20;                  038   sos_ui32_t pt_paddr       :20;
039 } __attribute__ ((packed));                       039 } __attribute__ ((packed));
040                                                   040 
041                                                   041 
042 /** The structure of a page table entry. See I    042 /** The structure of a page table entry. See Intel vol 3 section
043     3.6.4 */                                      043     3.6.4 */
044 struct x86_pte                                    044 struct x86_pte
045 {                                                 045 {
046   sos_ui32_t present        :1; /* 1=PT mapped    046   sos_ui32_t present        :1; /* 1=PT mapped */
047   sos_ui32_t write          :1; /* 0=read-only    047   sos_ui32_t write          :1; /* 0=read-only, 1=read/write */
048   sos_ui32_t user           :1; /* 0=superviso    048   sos_ui32_t user           :1; /* 0=supervisor, 1=user */
049   sos_ui32_t write_through  :1; /* 0=write-bac    049   sos_ui32_t write_through  :1; /* 0=write-back, 1=write-through */
050   sos_ui32_t cache_disabled :1; /* 1=cache dis    050   sos_ui32_t cache_disabled :1; /* 1=cache disabled */
051   sos_ui32_t accessed       :1; /* 1=read/writ    051   sos_ui32_t accessed       :1; /* 1=read/write access since last clear */
052   sos_ui32_t dirty          :1; /* 1=write acc    052   sos_ui32_t dirty          :1; /* 1=write access since last clear */
053   sos_ui32_t zero           :1; /* Intel reser    053   sos_ui32_t zero           :1; /* Intel reserved */
054   sos_ui32_t global_page    :1; /* 1=No TLB in    054   sos_ui32_t global_page    :1; /* 1=No TLB invalidation upon cr3 switch
055                                    (when PG se    055                                    (when PG set in cr4) */
056   sos_ui32_t custom         :3; /* Do what you    056   sos_ui32_t custom         :3; /* Do what you want with them */
057   sos_ui32_t paddr          :20;                  057   sos_ui32_t paddr          :20;
058 } __attribute__ ((packed));                       058 } __attribute__ ((packed));
059                                                   059 
060                                                   060 
061 /** Structure of the x86 CR3 register: the Pag    061 /** Structure of the x86 CR3 register: the Page Directory Base
062     Register. See Intel x86 doc Vol 3 section     062     Register. See Intel x86 doc Vol 3 section 2.5 */
063 struct x86_pdbr                                   063 struct x86_pdbr
064 {                                                 064 {
065   sos_ui32_t zero1          :3; /* Intel reser    065   sos_ui32_t zero1          :3; /* Intel reserved */
066   sos_ui32_t write_through  :1; /* 0=write-bac    066   sos_ui32_t write_through  :1; /* 0=write-back, 1=write-through */
067   sos_ui32_t cache_disabled :1; /* 1=cache dis    067   sos_ui32_t cache_disabled :1; /* 1=cache disabled */
068   sos_ui32_t zero2          :7; /* Intel reser    068   sos_ui32_t zero2          :7; /* Intel reserved */
069   sos_ui32_t pd_paddr       :20;                  069   sos_ui32_t pd_paddr       :20;
070 } __attribute__ ((packed));                       070 } __attribute__ ((packed));
071                                                   071 
072                                                   072 
073 /**                                               073 /**
074  * Helper macro to control the MMU: invalidate    074  * Helper macro to control the MMU: invalidate the TLB entry for the
075  * page located at the given virtual address.     075  * page located at the given virtual address. See Intel x86 vol 3
076  * section 3.7.                                   076  * section 3.7.
077  */                                               077  */
078 #define invlpg(vaddr) \                           078 #define invlpg(vaddr) \
079   do { \                                          079   do { \
080        __asm__ __volatile__("invlpg %0"::"m"(*    080        __asm__ __volatile__("invlpg %0"::"m"(*((unsigned *)(vaddr)))); \
081   } while(0)                                      081   } while(0)
082                                                   082 
083                                                   083 
084 /**                                               084 /**
085  * Helper macro to control the MMU: invalidate    085  * Helper macro to control the MMU: invalidate the whole TLB. See
086  * Intel x86 vol 3 section 3.7.                   086  * Intel x86 vol 3 section 3.7.
087  */                                               087  */
088 #define flush_tlb() \                             088 #define flush_tlb() \
089   do { \                                          089   do { \
090         unsigned long tmpreg; \                   090         unsigned long tmpreg; \
091         asm volatile("movl %%cr3,%0\n\tmovl %0    091         asm volatile("movl %%cr3,%0\n\tmovl %0,%%cr3" :"=r" \
092                      (tmpreg) : :"memory"); \     092                      (tmpreg) : :"memory"); \
093   } while (0)                                     093   } while (0)
094                                                   094 
095                                                   095 
096 /**                                               096 /**
097  * Helper macro to compute the index in the PD    097  * Helper macro to compute the index in the PD for the given virtual
098  * address                                        098  * address
099  */                                               099  */
100 #define virt_to_pd_index(vaddr) \                 100 #define virt_to_pd_index(vaddr) \
101   (((unsigned)(vaddr)) >> 22)                     101   (((unsigned)(vaddr)) >> 22)
102                                                   102 
103                                                   103 
104 /**                                               104 /**
105  * Helper macro to compute the index in the PT    105  * Helper macro to compute the index in the PT for the given virtual
106  * address                                        106  * address
107  */                                               107  */
108 #define virt_to_pt_index(vaddr) \                 108 #define virt_to_pt_index(vaddr) \
109   ( (((unsigned)(vaddr)) >> 12) & 0x3ff )         109   ( (((unsigned)(vaddr)) >> 12) & 0x3ff )
110                                                   110 
111                                                   111 
112 /**                                               112 /**
113  * Helper macro to compute the offset in the p    113  * Helper macro to compute the offset in the page for the given virtual
114  * address                                        114  * address
115  */                                               115  */
116 #define virt_to_page_offset(vaddr) \              116 #define virt_to_page_offset(vaddr) \
117   (((unsigned)(vaddr)) & SOS_PAGE_MASK)           117   (((unsigned)(vaddr)) & SOS_PAGE_MASK)
118                                                   118 
119                                                   119 
120 /**                                               120 /**
121  * Helper function to map a page in the pd.\ S    121  * Helper function to map a page in the pd.\ Suppose that the RAM
122  * is identity mapped to resolve PT actual (CP    122  * is identity mapped to resolve PT actual (CPU) address from the PD
123  * entry                                          123  * entry
124  */                                               124  */
125 static sos_ret_t paging_setup_map_helper(struc    125 static sos_ret_t paging_setup_map_helper(struct x86_pde * pd,
126                                          sos_p    126                                          sos_paddr_t ppage,
127                                          sos_v    127                                          sos_vaddr_t vaddr)
128 {                                                 128 {
129   /* Get the page directory entry and table en    129   /* Get the page directory entry and table entry index for this
130      address */                                   130      address */
131   unsigned index_in_pd = virt_to_pd_index(vadd    131   unsigned index_in_pd = virt_to_pd_index(vaddr);
132   unsigned index_in_pt = virt_to_pt_index(vadd    132   unsigned index_in_pt = virt_to_pt_index(vaddr);
133                                                   133 
134   /* Make sure the page table was mapped */       134   /* Make sure the page table was mapped */
135   struct x86_pte * pt;                            135   struct x86_pte * pt;
136   if (pd[index_in_pd].present)                    136   if (pd[index_in_pd].present)
137     {                                             137     {
138       pt = (struct x86_pte*) (pd[index_in_pd].    138       pt = (struct x86_pte*) (pd[index_in_pd].pt_paddr << 12);
139                                                   139 
140       /* If we allocate a new entry in the PT,    140       /* If we allocate a new entry in the PT, increase its reference
141          count. This test will always be TRUE     141          count. This test will always be TRUE here, since the setup
142          routine scans the kernel pages in a s    142          routine scans the kernel pages in a strictly increasing
143          order: at each step, the map will res    143          order: at each step, the map will result in the allocation of
144          a new PT entry. For the sake of clari    144          a new PT entry. For the sake of clarity, we keep the test
145          here. */                                 145          here. */
146       if (! pt[index_in_pt].present)              146       if (! pt[index_in_pt].present)
147         sos_physmem_ref_physpage_at((sos_paddr    147         sos_physmem_ref_physpage_at((sos_paddr_t)pt);
148                                                   148 
149       /* The previous test should always be TR    149       /* The previous test should always be TRUE */
150       else                                        150       else
151         SOS_ASSERT_FATAL(FALSE); /* indicate a    151         SOS_ASSERT_FATAL(FALSE); /* indicate a fatal error */
152     }                                             152     }
153   else                                            153   else
154     {                                             154     {
155       /* No : allocate a new one */               155       /* No : allocate a new one */
156       pt = (struct x86_pte*) sos_physmem_ref_p    156       pt = (struct x86_pte*) sos_physmem_ref_physpage_new(FALSE);
157       if (! pt)                                   157       if (! pt)
158         return -SOS_ENOMEM;                       158         return -SOS_ENOMEM;
159                                                   159       
160       memset((void*)pt, 0x0, SOS_PAGE_SIZE);      160       memset((void*)pt, 0x0, SOS_PAGE_SIZE);
161                                                   161 
162       pd[index_in_pd].present  = TRUE;            162       pd[index_in_pd].present  = TRUE;
163       pd[index_in_pd].write    = 1; /* It woul    163       pd[index_in_pd].write    = 1; /* It would be too complicated to
164                                        determi    164                                        determine whether it
165                                        corresp    165                                        corresponds to a real R/W area
166                                        of the     166                                        of the kernel code/data or
167                                        read-on    167                                        read-only */
168       pd[index_in_pd].pt_paddr = ((sos_paddr_t    168       pd[index_in_pd].pt_paddr = ((sos_paddr_t)pt) >> 12;
169     }                                             169     }
170                                                   170 
171                                                   171   
172   /* Map the page in the page table */            172   /* Map the page in the page table */
173   pt[index_in_pt].present = 1;                    173   pt[index_in_pt].present = 1;
174   pt[index_in_pt].write   = 1;  /* It would be    174   pt[index_in_pt].write   = 1;  /* It would be too complicated to
175                                    determine w    175                                    determine whether it corresponds to
176                                    a real R/W     176                                    a real R/W area of the kernel
177                                    code/data o    177                                    code/data or R/O only */
178   pt[index_in_pt].user    = 0;                    178   pt[index_in_pt].user    = 0;
179   pt[index_in_pt].paddr   = ppage >> 12;          179   pt[index_in_pt].paddr   = ppage >> 12;
180                                                   180 
181   return SOS_OK;                                  181   return SOS_OK;
182 }                                                 182 }
183                                                   183 
184                                                   184 
185 sos_ret_t sos_paging_subsystem_setup(sos_paddr !! 185 sos_ret_t sos_paging_setup(sos_paddr_t identity_mapping_base,
186                                      sos_paddr !! 186                            sos_paddr_t identity_mapping_top)
187 {                                                 187 {
188   /* The PDBR we will setup below */              188   /* The PDBR we will setup below */
189   struct x86_pdbr cr3;                            189   struct x86_pdbr cr3;  
190                                                   190 
191   /* Get the PD for the kernel */                 191   /* Get the PD for the kernel */
192   struct x86_pde * pd                             192   struct x86_pde * pd
193     = (struct x86_pde*) sos_physmem_ref_physpa    193     = (struct x86_pde*) sos_physmem_ref_physpage_new(FALSE);
194                                                   194 
195   /* The iterator for scanning the kernel area    195   /* The iterator for scanning the kernel area */
196   sos_paddr_t paddr;                              196   sos_paddr_t paddr;
197                                                   197 
198   /* Reset the PD. For the moment, there is st    198   /* Reset the PD. For the moment, there is still an IM for the whole
199      RAM, so that the paddr are also vaddr */     199      RAM, so that the paddr are also vaddr */
200   memset((void*)pd,                               200   memset((void*)pd,
201          0x0,                                     201          0x0,
202          SOS_PAGE_SIZE);                          202          SOS_PAGE_SIZE);
203                                                   203 
204   /* Identity-map the identity_mapping_* area     204   /* Identity-map the identity_mapping_* area */
205   for (paddr = identity_mapping_base ;            205   for (paddr = identity_mapping_base ;
206        paddr < identity_mapping_top ;             206        paddr < identity_mapping_top ;
207        paddr += SOS_PAGE_SIZE)                    207        paddr += SOS_PAGE_SIZE)
208     {                                             208     {
209       if (paging_setup_map_helper(pd, paddr, p    209       if (paging_setup_map_helper(pd, paddr, paddr))
210         return -SOS_ENOMEM;                       210         return -SOS_ENOMEM;
211     }                                             211     }
212                                                   212 
213   /* Identity-map the PC-specific BIOS/Video a    213   /* Identity-map the PC-specific BIOS/Video area */
214   for (paddr = BIOS_N_VIDEO_START ;               214   for (paddr = BIOS_N_VIDEO_START ;
215        paddr < BIOS_N_VIDEO_END ;                 215        paddr < BIOS_N_VIDEO_END ;
216        paddr += SOS_PAGE_SIZE)                    216        paddr += SOS_PAGE_SIZE)
217     {                                             217     {
218       if (paging_setup_map_helper(pd, paddr, p    218       if (paging_setup_map_helper(pd, paddr, paddr))
219         return -SOS_ENOMEM;                       219         return -SOS_ENOMEM;
220     }                                             220     }
221                                                   221 
222   /* Ok, kernel is now identity mapped in the     222   /* Ok, kernel is now identity mapped in the PD. We still have to set
223      up the mirroring */                          223      up the mirroring */
224   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)    224   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].present = TRUE;
225   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)    225   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].write = 1;
226   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)    226   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].user  = 0;
227   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)    227   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].pt_paddr 
228     = ((sos_paddr_t)pd)>>12;                      228     = ((sos_paddr_t)pd)>>12;
229                                                   229 
230   /* We now just have to configure the MMU to     230   /* We now just have to configure the MMU to use our PD. See Intel
231      x86 doc vol 3, section 3.6.3 */              231      x86 doc vol 3, section 3.6.3 */
232   memset(& cr3, 0x0, sizeof(struct x86_pdbr));    232   memset(& cr3, 0x0, sizeof(struct x86_pdbr)); /* Reset the PDBR */
233   cr3.pd_paddr = ((sos_paddr_t)pd) >> 12;         233   cr3.pd_paddr = ((sos_paddr_t)pd) >> 12;
234                                                   234 
235  /* Actual loading of the PDBR in the MMU: set    235  /* Actual loading of the PDBR in the MMU: setup cr3 + bits 31[Paging
236     Enabled] and 16[Write Protect] of cr0, see    236     Enabled] and 16[Write Protect] of cr0, see Intel x86 doc vol 3,
237     sections 2.5, 3.6.1 and 4.11.3 + note tabl    237     sections 2.5, 3.6.1 and 4.11.3 + note table 4-2 */
238   asm volatile ("movl %0,%%cr3\n\t"               238   asm volatile ("movl %0,%%cr3\n\t"
239                 "movl %%cr0,%%eax\n\t"            239                 "movl %%cr0,%%eax\n\t"
240                 "orl $0x80010000, %%eax\n\t" /    240                 "orl $0x80010000, %%eax\n\t" /* bit 31 | bit 16 */
241                 "movl %%eax,%%cr0\n\t"            241                 "movl %%eax,%%cr0\n\t"
242                 "jmp 1f\n\t"                      242                 "jmp 1f\n\t"
243                 "1:\n\t"                          243                 "1:\n\t"
244                 "movl $2f, %%eax\n\t"             244                 "movl $2f, %%eax\n\t"
245                 "jmp *%%eax\n\t"                  245                 "jmp *%%eax\n\t"
246                 "2:\n\t" ::"r"(cr3):"memory","    246                 "2:\n\t" ::"r"(cr3):"memory","eax");
247                                                   247 
248   /*                                              248   /*
249    * Here, the only memory available is:          249    * Here, the only memory available is:
250    * - The BIOS+video area                        250    * - The BIOS+video area
251    * - the identity_mapping_base .. identity_m    251    * - the identity_mapping_base .. identity_mapping_top area
252    * - the PD mirroring area (4M)                 252    * - the PD mirroring area (4M)
253    * All accesses to other virtual addresses w    253    * All accesses to other virtual addresses will generate a #PF
254    */                                             254    */
255                                                   255 
256   return SOS_OK;                                  256   return SOS_OK;
257 }                                                 257 }
258                                                   258 
259                                                   259 
260 /* Suppose that the current address is configu    260 /* Suppose that the current address is configured with the mirroring
261  * enabled to access the PD and PT. */            261  * enabled to access the PD and PT. */
262 sos_ret_t sos_paging_map(sos_paddr_t ppage_pad    262 sos_ret_t sos_paging_map(sos_paddr_t ppage_paddr,
263                          sos_vaddr_t vpage_vad    263                          sos_vaddr_t vpage_vaddr,
264                          sos_bool_t is_user_pa    264                          sos_bool_t is_user_page,
265                          sos_ui32_t flags)     !! 265                          int flags)
266 {                                                 266 {
267   /* Get the page directory entry and table en    267   /* Get the page directory entry and table entry index for this
268      address */                                   268      address */
269   unsigned index_in_pd = virt_to_pd_index(vpag    269   unsigned index_in_pd = virt_to_pd_index(vpage_vaddr);
270   unsigned index_in_pt = virt_to_pt_index(vpag    270   unsigned index_in_pt = virt_to_pt_index(vpage_vaddr);
271                                                   271   
272   /* Get the PD of the current context */         272   /* Get the PD of the current context */
273   struct x86_pde *pd = (struct x86_pde*)          273   struct x86_pde *pd = (struct x86_pde*)
274     (SOS_PAGING_MIRROR_VADDR                      274     (SOS_PAGING_MIRROR_VADDR
275      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI    275      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
276                                                   276 
277   /* Address of the PT in the mirroring */        277   /* Address of the PT in the mirroring */
278   struct x86_pte * pt = (struct x86_pte*) (SOS    278   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
279                                            + S    279                                            + SOS_PAGE_SIZE*index_in_pd);
280                                                   280 
281   /* The mapping of anywhere in the PD mirrori    281   /* The mapping of anywhere in the PD mirroring is FORBIDDEN ;) */
282   if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)    282   if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)
283       && (vpage_vaddr < SOS_PAGING_MIRROR_VADD    283       && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE))
284     return -SOS_EINVAL;                           284     return -SOS_EINVAL;
285                                                   285 
286   /* Map a page for the PT if necessary */        286   /* Map a page for the PT if necessary */
287   if (! pd[index_in_pd].present)                  287   if (! pd[index_in_pd].present)
288     {                                             288     {
289       /* No : allocate a new one */               289       /* No : allocate a new one */
290       sos_paddr_t pt_ppage                        290       sos_paddr_t pt_ppage
291         = sos_physmem_ref_physpage_new(! (flag    291         = sos_physmem_ref_physpage_new(! (flags & SOS_VM_MAP_ATOMIC));
292       if (! pt_ppage)                             292       if (! pt_ppage)
293         {                                         293         {
294           return -SOS_ENOMEM;                     294           return -SOS_ENOMEM;
295         }                                         295         }
296                                                   296 
297       pd[index_in_pd].present  = TRUE;            297       pd[index_in_pd].present  = TRUE;
298       pd[index_in_pd].write    = 1; /* Ignored    298       pd[index_in_pd].write    = 1; /* Ignored in supervisor mode, see
299                                        Intel v    299                                        Intel vol 3 section 4.12 */
300       pd[index_in_pd].user     |= (is_user_pag    300       pd[index_in_pd].user     |= (is_user_page)?1:0;
301       pd[index_in_pd].pt_paddr = ((sos_paddr_t    301       pd[index_in_pd].pt_paddr = ((sos_paddr_t)pt_ppage) >> 12;
302                                                   302       
303       /*                                          303       /*
304        * The PT is now mapped in the PD mirror    304        * The PT is now mapped in the PD mirroring
305        */                                         305        */
306                                                   306 
307       /* Invalidate TLB for the page we just a    307       /* Invalidate TLB for the page we just added */
308       invlpg(pt);                                 308       invlpg(pt);
309                                                   309      
310       /* Reset this new PT */                     310       /* Reset this new PT */
311       memset((void*)pt, 0x0, SOS_PAGE_SIZE);      311       memset((void*)pt, 0x0, SOS_PAGE_SIZE);
312     }                                             312     }
313                                                   313 
314   /* If we allocate a new entry in the PT, inc    314   /* If we allocate a new entry in the PT, increase its reference
315      count. */                                    315      count. */
316   else if (! pt[index_in_pt].present)             316   else if (! pt[index_in_pt].present)
317     sos_physmem_ref_physpage_at(pd[index_in_pd    317     sos_physmem_ref_physpage_at(pd[index_in_pd].pt_paddr << 12);
318                                                   318   
319   /* Otherwise, that means that a physical pag    319   /* Otherwise, that means that a physical page is implicitely
320      unmapped */                                  320      unmapped */
321   else                                            321   else
322     sos_physmem_unref_physpage(pt[index_in_pt]    322     sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12);
323                                                   323 
324   /* Map the page in the page table */            324   /* Map the page in the page table */
325   pt[index_in_pt].present = TRUE;                 325   pt[index_in_pt].present = TRUE;
326   pt[index_in_pt].write   = (flags & SOS_VM_MA    326   pt[index_in_pt].write   = (flags & SOS_VM_MAP_PROT_WRITE)?1:0;
327   pt[index_in_pt].user    = (is_user_page)?1:0    327   pt[index_in_pt].user    = (is_user_page)?1:0;
328   pt[index_in_pt].paddr   = ppage_paddr >> 12;    328   pt[index_in_pt].paddr   = ppage_paddr >> 12;
329   sos_physmem_ref_physpage_at(ppage_paddr);       329   sos_physmem_ref_physpage_at(ppage_paddr);
330                                                   330 
331   /*                                              331   /*
332    * The page is now mapped in the current add    332    * The page is now mapped in the current address space
333    */                                             333    */
334                                                   334   
335   /* Invalidate TLB for the page we just added    335   /* Invalidate TLB for the page we just added */
336   invlpg(vpage_vaddr);                            336   invlpg(vpage_vaddr);
337                                                   337 
338   return SOS_OK;                                  338   return SOS_OK;
339 }                                                 339 }
340                                                   340 
341                                                   341 
342 sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_v    342 sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_vaddr)
343 {                                                 343 {
344   sos_ret_t pt_unref_retval;                      344   sos_ret_t pt_unref_retval;
345                                                   345 
346   /* Get the page directory entry and table en    346   /* Get the page directory entry and table entry index for this
347      address */                                   347      address */
348   unsigned index_in_pd = virt_to_pd_index(vpag    348   unsigned index_in_pd = virt_to_pd_index(vpage_vaddr);
349   unsigned index_in_pt = virt_to_pt_index(vpag    349   unsigned index_in_pt = virt_to_pt_index(vpage_vaddr);
350                                                   350   
351   /* Get the PD of the current context */         351   /* Get the PD of the current context */
352   struct x86_pde *pd = (struct x86_pde*)          352   struct x86_pde *pd = (struct x86_pde*)
353     (SOS_PAGING_MIRROR_VADDR                      353     (SOS_PAGING_MIRROR_VADDR
354      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI    354      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
355                                                   355 
356   /* Address of the PT in the mirroring */        356   /* Address of the PT in the mirroring */
357   struct x86_pte * pt = (struct x86_pte*) (SOS    357   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
358                                            + S    358                                            + SOS_PAGE_SIZE*index_in_pd);
359                                                   359 
360   /* No page mapped at this address ? */          360   /* No page mapped at this address ? */
361   if (! pd[index_in_pd].present)                  361   if (! pd[index_in_pd].present)
362     return -SOS_EINVAL;                           362     return -SOS_EINVAL;
363   if (! pt[index_in_pt].present)                  363   if (! pt[index_in_pt].present)
364     return -SOS_EINVAL;                           364     return -SOS_EINVAL;
365                                                   365 
366   /* The unmapping of anywhere in the PD mirro    366   /* The unmapping of anywhere in the PD mirroring is FORBIDDEN ;) */
367   if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)    367   if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)
368       && (vpage_vaddr < SOS_PAGING_MIRROR_VADD    368       && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE))
369     return -SOS_EINVAL;                           369     return -SOS_EINVAL;
370                                                   370 
371   /* Reclaim the physical page */                 371   /* Reclaim the physical page */
372   sos_physmem_unref_physpage(pt[index_in_pt].p    372   sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12);
373                                                   373 
374   /* Unmap the page in the page table */          374   /* Unmap the page in the page table */
375   memset(pt + index_in_pt, 0x0, sizeof(struct     375   memset(pt + index_in_pt, 0x0, sizeof(struct x86_pte));
376                                                   376 
377   /* Invalidate TLB for the page we just unmap    377   /* Invalidate TLB for the page we just unmapped */
378   invlpg(vpage_vaddr);                            378   invlpg(vpage_vaddr);
379                                                   379 
380   /* Reclaim this entry in the PT, which may f    380   /* Reclaim this entry in the PT, which may free the PT */
381   pt_unref_retval = sos_physmem_unref_physpage    381   pt_unref_retval = sos_physmem_unref_physpage(pd[index_in_pd].pt_paddr << 12);
382   SOS_ASSERT_FATAL(pt_unref_retval >= 0);         382   SOS_ASSERT_FATAL(pt_unref_retval >= 0);
383   if (pt_unref_retval > 0)                        383   if (pt_unref_retval > 0)
384     /* If the PT is now completely unused... *    384     /* If the PT is now completely unused... */
385     {                                             385     {
386       /* Release the PDE */                       386       /* Release the PDE */
387       memset(pd + index_in_pd, 0x0, sizeof(str    387       memset(pd + index_in_pd, 0x0, sizeof(struct x86_pde));
388                                                   388       
389       /* Update the TLB */                        389       /* Update the TLB */
390       invlpg(pt);                                 390       invlpg(pt);
391     }                                             391     }
392                                                   392 
393   return SOS_OK;                                  393   return SOS_OK;  
394 }                                                 394 }
395                                                   395 
396                                                   396 
397 int sos_paging_get_prot(sos_vaddr_t vaddr)        397 int sos_paging_get_prot(sos_vaddr_t vaddr)
398 {                                                 398 {
399   int retval;                                     399   int retval;
400                                                   400 
401   /* Get the page directory entry and table en    401   /* Get the page directory entry and table entry index for this
402      address */                                   402      address */
403   unsigned index_in_pd = virt_to_pd_index(vadd    403   unsigned index_in_pd = virt_to_pd_index(vaddr);
404   unsigned index_in_pt = virt_to_pt_index(vadd    404   unsigned index_in_pt = virt_to_pt_index(vaddr);
405                                                   405   
406   /* Get the PD of the current context */         406   /* Get the PD of the current context */
407   struct x86_pde *pd = (struct x86_pde*)          407   struct x86_pde *pd = (struct x86_pde*)
408     (SOS_PAGING_MIRROR_VADDR                      408     (SOS_PAGING_MIRROR_VADDR
409      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI    409      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
410                                                   410 
411   /* Address of the PT in the mirroring */        411   /* Address of the PT in the mirroring */
412   struct x86_pte * pt = (struct x86_pte*) (SOS    412   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
413                                            + S    413                                            + SOS_PAGE_SIZE*index_in_pd);
414                                                   414 
415   /* No page mapped at this address ? */          415   /* No page mapped at this address ? */
416   if (! pd[index_in_pd].present)                  416   if (! pd[index_in_pd].present)
417     return SOS_VM_MAP_PROT_NONE;                  417     return SOS_VM_MAP_PROT_NONE;
418   if (! pt[index_in_pt].present)                  418   if (! pt[index_in_pt].present)
419     return SOS_VM_MAP_PROT_NONE;                  419     return SOS_VM_MAP_PROT_NONE;
420                                                   420   
421   /* Default access right of an available page    421   /* Default access right of an available page is "read" on x86 */
422   retval = SOS_VM_MAP_PROT_READ;                  422   retval = SOS_VM_MAP_PROT_READ;
423   if (pd[index_in_pd].write && pt[index_in_pt]    423   if (pd[index_in_pd].write && pt[index_in_pt].write)
424     retval |= SOS_VM_MAP_PROT_WRITE;              424     retval |= SOS_VM_MAP_PROT_WRITE;
425                                                   425 
426   return retval;                                  426   return retval;
427 }                                                 427 }
428                                                   428 
429                                                   429 
430 sos_paddr_t sos_paging_get_paddr(sos_vaddr_t v    430 sos_paddr_t sos_paging_get_paddr(sos_vaddr_t vaddr)
431 {                                                 431 {
432   /* Get the page directory entry and table en    432   /* Get the page directory entry and table entry index for this
433      address */                                   433      address */
434   unsigned index_in_pd = virt_to_pd_index(vadd    434   unsigned index_in_pd = virt_to_pd_index(vaddr);
435   unsigned index_in_pt = virt_to_pt_index(vadd    435   unsigned index_in_pt = virt_to_pt_index(vaddr);
436   unsigned offset_in_page = virt_to_page_offse    436   unsigned offset_in_page = virt_to_page_offset(vaddr);
437                                                   437   
438   /* Get the PD of the current context */         438   /* Get the PD of the current context */
439   struct x86_pde *pd = (struct x86_pde*)          439   struct x86_pde *pd = (struct x86_pde*)
440     (SOS_PAGING_MIRROR_VADDR                      440     (SOS_PAGING_MIRROR_VADDR
441      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI    441      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
442                                                   442 
443   /* Address of the PT in the mirroring */        443   /* Address of the PT in the mirroring */
444   struct x86_pte * pt = (struct x86_pte*) (SOS    444   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
445                                            + S    445                                            + SOS_PAGE_SIZE*index_in_pd);
446                                                   446 
447   /* No page mapped at this address ? */          447   /* No page mapped at this address ? */
448   if (! pd[index_in_pd].present)                  448   if (! pd[index_in_pd].present)
449     return (sos_paddr_t)NULL;                     449     return (sos_paddr_t)NULL;
450   if (! pt[index_in_pt].present)                  450   if (! pt[index_in_pt].present)
451     return (sos_paddr_t)NULL;                     451     return (sos_paddr_t)NULL;
452                                                   452 
453   return (pt[index_in_pt].paddr << 12) + offse    453   return (pt[index_in_pt].paddr << 12) + offset_in_page;
454 }                                                 454 }
455                                                   455 
                                                      

source navigation ] diff markup ] identifier search ] general search ]