SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

Diff markup

Differences between /hwcore/paging.c (Article 7) and /hwcore/paging.c (Article 5)


001 /* Copyright (C) 2004  David Decotigny            001 /* Copyright (C) 2004  David Decotigny
002                                                   002 
003    This program is free software; you can redi    003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU Genera    004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundatio    005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any lat    006    of the License, or (at your option) any later version.
007                                                   007    
008    This program is distributed in the hope tha    008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the     009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR    010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details    011    GNU General Public License for more details.
012                                                   012    
013    You should have received a copy of the GNU     013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to t    014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 3    015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA.                                           016    USA. 
017 */                                                017 */
018 #include <sos/physmem.h>                          018 #include <sos/physmem.h>
019 #include <sos/klibc.h>                            019 #include <sos/klibc.h>
020 #include <sos/assert.h>                           020 #include <sos/assert.h>
021                                                   021 
022 #include "mm_context.h"                        << 
023                                                << 
024 #include "paging.h"                               022 #include "paging.h"
025                                                   023 
026                                                << 
027 /*                                             << 
028  * Important NOTICE concerning the use of the  << 
029  * counters of the physical pages by the "pagi << 
030  *   - All the kernel PT are SHARED. This mean << 
031  *     kernel PT belongs to one mm_context, it << 
032  *     mm_contexts. We don't update the real r << 
033  *     in this respect, because it would requi << 
034  *     reference counts of ALL the kernel PTs  << 
035  *     mm_context is created, or as soon as a  << 
036  *     suppressed. This way, the reference cou << 
037  *     independently of the actual number of P << 
038  *   - We do NOT maintain the occupation count << 
039  *     some little overhead that is useless    << 
040  *   - We do maintain the occupation count of  << 
041  *     number of PTE allocated in the PT       << 
042  */                                            << 
043                                                << 
044                                                << 
045 /** The structure of a page directory entry. S    024 /** The structure of a page directory entry. See Intel vol 3 section
046     3.6.4 */                                      025     3.6.4 */
047 struct x86_pde                                    026 struct x86_pde
048 {                                                 027 {
049   sos_ui32_t present        :1; /* 1=PT mapped    028   sos_ui32_t present        :1; /* 1=PT mapped */
050   sos_ui32_t write          :1; /* 0=read-only    029   sos_ui32_t write          :1; /* 0=read-only, 1=read/write */
051   sos_ui32_t user           :1; /* 0=superviso    030   sos_ui32_t user           :1; /* 0=supervisor, 1=user */
052   sos_ui32_t write_through  :1; /* 0=write-bac    031   sos_ui32_t write_through  :1; /* 0=write-back, 1=write-through */
053   sos_ui32_t cache_disabled :1; /* 1=cache dis    032   sos_ui32_t cache_disabled :1; /* 1=cache disabled */
054   sos_ui32_t accessed       :1; /* 1=read/writ    033   sos_ui32_t accessed       :1; /* 1=read/write access since last clear */
055   sos_ui32_t zero           :1; /* Intel reser    034   sos_ui32_t zero           :1; /* Intel reserved */
056   sos_ui32_t page_size      :1; /* 0=4kB, 1=4M    035   sos_ui32_t page_size      :1; /* 0=4kB, 1=4MB or 2MB (depending on PAE) */
057   sos_ui32_t global_page    :1; /* Ignored (In    036   sos_ui32_t global_page    :1; /* Ignored (Intel reserved) */
058   sos_ui32_t custom         :3; /* Do what you    037   sos_ui32_t custom         :3; /* Do what you want with them */
059   sos_ui32_t pt_paddr       :20;                  038   sos_ui32_t pt_paddr       :20;
060 } __attribute__ ((packed));                       039 } __attribute__ ((packed));
061                                                   040 
062                                                   041 
063 /** Intermediate type to speed up PDE copy */  << 
064 typedef union {                                << 
065   struct x86_pde pde;                          << 
066   sos_ui32_t     ui32;                         << 
067 } x86_pde_val_t;                               << 
068                                                << 
069                                                << 
070 /** The structure of a page table entry. See I    042 /** The structure of a page table entry. See Intel vol 3 section
071     3.6.4 */                                      043     3.6.4 */
072 struct x86_pte                                    044 struct x86_pte
073 {                                                 045 {
074   sos_ui32_t present        :1; /* 1=PT mapped    046   sos_ui32_t present        :1; /* 1=PT mapped */
075   sos_ui32_t write          :1; /* 0=read-only    047   sos_ui32_t write          :1; /* 0=read-only, 1=read/write */
076   sos_ui32_t user           :1; /* 0=superviso    048   sos_ui32_t user           :1; /* 0=supervisor, 1=user */
077   sos_ui32_t write_through  :1; /* 0=write-bac    049   sos_ui32_t write_through  :1; /* 0=write-back, 1=write-through */
078   sos_ui32_t cache_disabled :1; /* 1=cache dis    050   sos_ui32_t cache_disabled :1; /* 1=cache disabled */
079   sos_ui32_t accessed       :1; /* 1=read/writ    051   sos_ui32_t accessed       :1; /* 1=read/write access since last clear */
080   sos_ui32_t dirty          :1; /* 1=write acc    052   sos_ui32_t dirty          :1; /* 1=write access since last clear */
081   sos_ui32_t zero           :1; /* Intel reser    053   sos_ui32_t zero           :1; /* Intel reserved */
082   sos_ui32_t global_page    :1; /* 1=No TLB in    054   sos_ui32_t global_page    :1; /* 1=No TLB invalidation upon cr3 switch
083                                    (when PG se    055                                    (when PG set in cr4) */
084   sos_ui32_t custom         :3; /* Do what you    056   sos_ui32_t custom         :3; /* Do what you want with them */
085   sos_ui32_t paddr          :20;                  057   sos_ui32_t paddr          :20;
086 } __attribute__ ((packed));                       058 } __attribute__ ((packed));
087                                                   059 
088                                                   060 
089 /** Intermediate type to speed up PTE copy */  << 
090 typedef union {                                << 
091   struct x86_pte pte;                          << 
092   sos_ui32_t     ui32;                         << 
093 } x86_pte_val_t;                               << 
094                                                << 
095                                                << 
096 /** Structure of the x86 CR3 register: the Pag    061 /** Structure of the x86 CR3 register: the Page Directory Base
097     Register. See Intel x86 doc Vol 3 section     062     Register. See Intel x86 doc Vol 3 section 2.5 */
098 struct x86_pdbr                                   063 struct x86_pdbr
099 {                                                 064 {
100   sos_ui32_t zero1          :3; /* Intel reser    065   sos_ui32_t zero1          :3; /* Intel reserved */
101   sos_ui32_t write_through  :1; /* 0=write-bac    066   sos_ui32_t write_through  :1; /* 0=write-back, 1=write-through */
102   sos_ui32_t cache_disabled :1; /* 1=cache dis    067   sos_ui32_t cache_disabled :1; /* 1=cache disabled */
103   sos_ui32_t zero2          :7; /* Intel reser    068   sos_ui32_t zero2          :7; /* Intel reserved */
104   sos_ui32_t pd_paddr       :20;                  069   sos_ui32_t pd_paddr       :20;
105 } __attribute__ ((packed));                       070 } __attribute__ ((packed));
106                                                   071 
107                                                   072 
108 /**                                               073 /**
109  * Helper macro to control the MMU: invalidate    074  * Helper macro to control the MMU: invalidate the TLB entry for the
110  * page located at the given virtual address.     075  * page located at the given virtual address. See Intel x86 vol 3
111  * section 3.7.                                   076  * section 3.7.
112  */                                               077  */
113 #define invlpg(vaddr) \                           078 #define invlpg(vaddr) \
114   do { \                                          079   do { \
115        __asm__ __volatile__("invlpg %0"::"m"(*    080        __asm__ __volatile__("invlpg %0"::"m"(*((unsigned *)(vaddr)))); \
116   } while(0)                                      081   } while(0)
117                                                   082 
118                                                   083 
119 /**                                               084 /**
120  * Helper macro to control the MMU: invalidate    085  * Helper macro to control the MMU: invalidate the whole TLB. See
121  * Intel x86 vol 3 section 3.7.                   086  * Intel x86 vol 3 section 3.7.
122  */                                               087  */
123 #define flush_tlb() \                             088 #define flush_tlb() \
124   do { \                                          089   do { \
125         unsigned long tmpreg; \                   090         unsigned long tmpreg; \
126         asm volatile("movl %%cr3,%0\n\tmovl %0    091         asm volatile("movl %%cr3,%0\n\tmovl %0,%%cr3" :"=r" \
127                      (tmpreg) : :"memory"); \     092                      (tmpreg) : :"memory"); \
128   } while (0)                                     093   } while (0)
129                                                   094 
130                                                   095 
131 /**                                               096 /**
132  * Helper macro to compute the index in the PD    097  * Helper macro to compute the index in the PD for the given virtual
133  * address                                        098  * address
134  */                                               099  */
135 #define virt_to_pd_index(vaddr) \                 100 #define virt_to_pd_index(vaddr) \
136   (((unsigned)(vaddr)) >> 22)                     101   (((unsigned)(vaddr)) >> 22)
137                                                   102 
138                                                   103 
139 /**                                               104 /**
140  * Helper macro to compute the index in the PT    105  * Helper macro to compute the index in the PT for the given virtual
141  * address                                        106  * address
142  */                                               107  */
143 #define virt_to_pt_index(vaddr) \                 108 #define virt_to_pt_index(vaddr) \
144   ( (((unsigned)(vaddr)) >> 12) & 0x3ff )         109   ( (((unsigned)(vaddr)) >> 12) & 0x3ff )
145                                                   110 
146                                                   111 
147 /**                                               112 /**
148  * Helper macro to compute the offset in the p    113  * Helper macro to compute the offset in the page for the given virtual
149  * address                                        114  * address
150  */                                               115  */
151 #define virt_to_page_offset(vaddr) \              116 #define virt_to_page_offset(vaddr) \
152   (((unsigned)(vaddr)) & SOS_PAGE_MASK)           117   (((unsigned)(vaddr)) & SOS_PAGE_MASK)
153                                                   118 
154                                                   119 
155 /**                                               120 /**
156  * Helper function to map a page in the pd.\ S    121  * Helper function to map a page in the pd.\ Suppose that the RAM
157  * is identity mapped to resolve PT actual (CP    122  * is identity mapped to resolve PT actual (CPU) address from the PD
158  * entry                                          123  * entry
159  */                                               124  */
160 static sos_ret_t paging_setup_map_helper(struc    125 static sos_ret_t paging_setup_map_helper(struct x86_pde * pd,
161                                          sos_p    126                                          sos_paddr_t ppage,
162                                          sos_v    127                                          sos_vaddr_t vaddr)
163 {                                                 128 {
164   /* Get the page directory entry and table en    129   /* Get the page directory entry and table entry index for this
165      address */                                   130      address */
166   unsigned index_in_pd = virt_to_pd_index(vadd    131   unsigned index_in_pd = virt_to_pd_index(vaddr);
167   unsigned index_in_pt = virt_to_pt_index(vadd    132   unsigned index_in_pt = virt_to_pt_index(vaddr);
168                                                   133 
169   /* Make sure the page table was mapped */       134   /* Make sure the page table was mapped */
170   struct x86_pte * pt;                            135   struct x86_pte * pt;
171   if (pd[index_in_pd].present)                    136   if (pd[index_in_pd].present)
172     {                                             137     {
173       pt = (struct x86_pte*) (pd[index_in_pd].    138       pt = (struct x86_pte*) (pd[index_in_pd].pt_paddr << 12);
174                                                   139 
175       /* This test will always be TRUE here, s !! 140       /* If we allocate a new entry in the PT, increase its reference
176          scans the kernel pages in a strictly  !! 141          count. This test will always be TRUE here, since the setup
177          each step, the map will result in the !! 142          routine scans the kernel pages in a strictly increasing
178          entry. For the sake of clarity, we ke !! 143          order: at each step, the map will result in the allocation of
179       if (pt[index_in_pt].present)             !! 144          a new PT entry. For the sake of clarity, we keep the test
                                                   >> 145          here. */
                                                   >> 146       if (! pt[index_in_pt].present)
                                                   >> 147         sos_physmem_ref_physpage_at((sos_paddr_t)pt);
                                                   >> 148 
                                                   >> 149       /* The previous test should always be TRUE */
                                                   >> 150       else
180         SOS_ASSERT_FATAL(FALSE); /* indicate a    151         SOS_ASSERT_FATAL(FALSE); /* indicate a fatal error */
181     }                                             152     }
182   else                                            153   else
183     {                                             154     {
184       /* No : allocate a new one */               155       /* No : allocate a new one */
185       pt = (struct x86_pte*) sos_physmem_ref_p    156       pt = (struct x86_pte*) sos_physmem_ref_physpage_new(FALSE);
186       if (! pt)                                   157       if (! pt)
187         return -SOS_ENOMEM;                       158         return -SOS_ENOMEM;
188                                                   159       
189       memset((void*)pt, 0x0, SOS_PAGE_SIZE);      160       memset((void*)pt, 0x0, SOS_PAGE_SIZE);
190                                                   161 
191       pd[index_in_pd].present  = TRUE;            162       pd[index_in_pd].present  = TRUE;
192       pd[index_in_pd].write    = 1; /* It woul    163       pd[index_in_pd].write    = 1; /* It would be too complicated to
193                                        determi    164                                        determine whether it
194                                        corresp    165                                        corresponds to a real R/W area
195                                        of the     166                                        of the kernel code/data or
196                                        read-on    167                                        read-only */
197       pd[index_in_pd].pt_paddr = ((sos_paddr_t    168       pd[index_in_pd].pt_paddr = ((sos_paddr_t)pt) >> 12;
198     }                                             169     }
199                                                   170 
200                                                   171   
201   /* Map the page in the page table */            172   /* Map the page in the page table */
202   pt[index_in_pt].present = 1;                    173   pt[index_in_pt].present = 1;
203   pt[index_in_pt].write   = 1;  /* It would be    174   pt[index_in_pt].write   = 1;  /* It would be too complicated to
204                                    determine w    175                                    determine whether it corresponds to
205                                    a real R/W     176                                    a real R/W area of the kernel
206                                    code/data o    177                                    code/data or R/O only */
207   pt[index_in_pt].user    = 0;                    178   pt[index_in_pt].user    = 0;
208   pt[index_in_pt].paddr   = ppage >> 12;          179   pt[index_in_pt].paddr   = ppage >> 12;
209                                                   180 
210   /* Increase the PT's occupation count becaus << 
211      inside it */                              << 
212   sos_physmem_inc_physpage_occupation((sos_pad << 
213                                                << 
214   return SOS_OK;                                  181   return SOS_OK;
215 }                                                 182 }
216                                                   183 
217                                                   184 
218 sos_ret_t sos_paging_subsystem_setup(sos_paddr !! 185 sos_ret_t sos_paging_setup(sos_paddr_t identity_mapping_base,
219                                      sos_paddr !! 186                            sos_paddr_t identity_mapping_top)
220 {                                                 187 {
221   /* The PDBR we will setup below */              188   /* The PDBR we will setup below */
222   struct x86_pdbr cr3;                            189   struct x86_pdbr cr3;  
223                                                   190 
224   /* Get the PD for the kernel */                 191   /* Get the PD for the kernel */
225   struct x86_pde * pd                             192   struct x86_pde * pd
226     = (struct x86_pde*) sos_physmem_ref_physpa    193     = (struct x86_pde*) sos_physmem_ref_physpage_new(FALSE);
227                                                   194 
228   /* The iterator for scanning the kernel area    195   /* The iterator for scanning the kernel area */
229   sos_paddr_t paddr;                              196   sos_paddr_t paddr;
230                                                   197 
231   /* Reset the PD. For the moment, there is st    198   /* Reset the PD. For the moment, there is still an IM for the whole
232      RAM, so that the paddr are also vaddr */     199      RAM, so that the paddr are also vaddr */
233   memset((void*)pd,                               200   memset((void*)pd,
234          0x0,                                     201          0x0,
235          SOS_PAGE_SIZE);                          202          SOS_PAGE_SIZE);
236                                                   203 
237   /* Identity-map the identity_mapping_* area     204   /* Identity-map the identity_mapping_* area */
238   for (paddr = identity_mapping_base ;            205   for (paddr = identity_mapping_base ;
239        paddr < identity_mapping_top ;             206        paddr < identity_mapping_top ;
240        paddr += SOS_PAGE_SIZE)                    207        paddr += SOS_PAGE_SIZE)
241     {                                             208     {
242       if (paging_setup_map_helper(pd, paddr, p    209       if (paging_setup_map_helper(pd, paddr, paddr))
243         return -SOS_ENOMEM;                       210         return -SOS_ENOMEM;
244     }                                             211     }
245                                                   212 
246   /* Identity-map the PC-specific BIOS/Video a    213   /* Identity-map the PC-specific BIOS/Video area */
247   for (paddr = BIOS_N_VIDEO_START ;               214   for (paddr = BIOS_N_VIDEO_START ;
248        paddr < BIOS_N_VIDEO_END ;                 215        paddr < BIOS_N_VIDEO_END ;
249        paddr += SOS_PAGE_SIZE)                    216        paddr += SOS_PAGE_SIZE)
250     {                                             217     {
251       if (paging_setup_map_helper(pd, paddr, p    218       if (paging_setup_map_helper(pd, paddr, paddr))
252         return -SOS_ENOMEM;                       219         return -SOS_ENOMEM;
253     }                                             220     }
254                                                   221 
255   /* Ok, kernel is now identity mapped in the     222   /* Ok, kernel is now identity mapped in the PD. We still have to set
256      up the mirroring */                          223      up the mirroring */
257   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)    224   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].present = TRUE;
258   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)    225   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].write = 1;
259   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)    226   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].user  = 0;
260   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)    227   pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].pt_paddr 
261     = ((sos_paddr_t)pd)>>12;                      228     = ((sos_paddr_t)pd)>>12;
262                                                   229 
263   /* We now just have to configure the MMU to     230   /* We now just have to configure the MMU to use our PD. See Intel
264      x86 doc vol 3, section 3.6.3 */              231      x86 doc vol 3, section 3.6.3 */
265   memset(& cr3, 0x0, sizeof(struct x86_pdbr));    232   memset(& cr3, 0x0, sizeof(struct x86_pdbr)); /* Reset the PDBR */
266   cr3.pd_paddr = ((sos_paddr_t)pd) >> 12;         233   cr3.pd_paddr = ((sos_paddr_t)pd) >> 12;
267                                                   234 
268  /* Actual loading of the PDBR in the MMU: set    235  /* Actual loading of the PDBR in the MMU: setup cr3 + bits 31[Paging
269     Enabled] and 16[Write Protect] of cr0, see    236     Enabled] and 16[Write Protect] of cr0, see Intel x86 doc vol 3,
270     sections 2.5, 3.6.1 and 4.11.3 + note tabl    237     sections 2.5, 3.6.1 and 4.11.3 + note table 4-2 */
271   asm volatile ("movl %0,%%cr3\n\t"               238   asm volatile ("movl %0,%%cr3\n\t"
272                 "movl %%cr0,%%eax\n\t"            239                 "movl %%cr0,%%eax\n\t"
273                 "orl $0x80010000, %%eax\n\t" /    240                 "orl $0x80010000, %%eax\n\t" /* bit 31 | bit 16 */
274                 "movl %%eax,%%cr0\n\t"            241                 "movl %%eax,%%cr0\n\t"
275                 "jmp 1f\n\t"                      242                 "jmp 1f\n\t"
276                 "1:\n\t"                          243                 "1:\n\t"
277                 "movl $2f, %%eax\n\t"             244                 "movl $2f, %%eax\n\t"
278                 "jmp *%%eax\n\t"                  245                 "jmp *%%eax\n\t"
279                 "2:\n\t" ::"r"(cr3):"memory","    246                 "2:\n\t" ::"r"(cr3):"memory","eax");
280                                                   247 
281   /*                                              248   /*
282    * Here, the only memory available is:          249    * Here, the only memory available is:
283    * - The BIOS+video area                        250    * - The BIOS+video area
284    * - the identity_mapping_base .. identity_m    251    * - the identity_mapping_base .. identity_mapping_top area
285    * - the PD mirroring area (4M)                 252    * - the PD mirroring area (4M)
286    * All accesses to other virtual addresses w    253    * All accesses to other virtual addresses will generate a #PF
287    */                                             254    */
288                                                   255 
289   return SOS_OK;                                  256   return SOS_OK;
290 }                                                 257 }
291                                                   258 
292                                                   259 
293 /* Suppose that the current address is configu    260 /* Suppose that the current address is configured with the mirroring
294  * enabled to access the PD and PT. */            261  * enabled to access the PD and PT. */
295 sos_ret_t sos_paging_map(sos_paddr_t ppage_pad    262 sos_ret_t sos_paging_map(sos_paddr_t ppage_paddr,
296                          sos_vaddr_t vpage_vad    263                          sos_vaddr_t vpage_vaddr,
297                          sos_bool_t is_user_pa    264                          sos_bool_t is_user_page,
298                          sos_ui32_t flags)     !! 265                          int flags)
299 {                                                 266 {
300   /* Get the page directory entry and table en    267   /* Get the page directory entry and table entry index for this
301      address */                                   268      address */
302   unsigned index_in_pd = virt_to_pd_index(vpag    269   unsigned index_in_pd = virt_to_pd_index(vpage_vaddr);
303   unsigned index_in_pt = virt_to_pt_index(vpag    270   unsigned index_in_pt = virt_to_pt_index(vpage_vaddr);
304                                                   271   
305   /* Get the PD of the current context */         272   /* Get the PD of the current context */
306   struct x86_pde *pd = (struct x86_pde*)          273   struct x86_pde *pd = (struct x86_pde*)
307     (SOS_PAGING_MIRROR_VADDR                      274     (SOS_PAGING_MIRROR_VADDR
308      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI    275      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
309                                                   276 
310   /* Address of the PT in the mirroring */        277   /* Address of the PT in the mirroring */
311   struct x86_pte * pt = (struct x86_pte*) (SOS    278   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
312                                            + S    279                                            + SOS_PAGE_SIZE*index_in_pd);
313                                                   280 
314   SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(ppage_p << 
315   SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(vpage_v << 
316                                                << 
317   /* EXEC permission ignored on x86 */         << 
318   flags &= ~SOS_VM_MAP_PROT_EXEC;              << 
319                                                << 
320   /* The mapping of anywhere in the PD mirrori    281   /* The mapping of anywhere in the PD mirroring is FORBIDDEN ;) */
321   if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)    282   if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)
322       && (vpage_vaddr < SOS_PAGING_MIRROR_VADD    283       && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE))
323     return -SOS_EINVAL;                           284     return -SOS_EINVAL;
324                                                   285 
325   /* Map a page for the PT if necessary */        286   /* Map a page for the PT if necessary */
326   if (! pd[index_in_pd].present)                  287   if (! pd[index_in_pd].present)
327     {                                             288     {
328       x86_pde_val_t u;                         << 
329                                                << 
330       /* No : allocate a new one */               289       /* No : allocate a new one */
331       sos_paddr_t pt_ppage                        290       sos_paddr_t pt_ppage
332         = sos_physmem_ref_physpage_new(! (flag    291         = sos_physmem_ref_physpage_new(! (flags & SOS_VM_MAP_ATOMIC));
333       if (! pt_ppage)                             292       if (! pt_ppage)
334         {                                         293         {
335           return -SOS_ENOMEM;                     294           return -SOS_ENOMEM;
336         }                                         295         }
337                                                   296 
338       /* Prepare the value of the PDE */       !! 297       pd[index_in_pd].present  = TRUE;
339       u.pde = (struct x86_pde){                !! 298       pd[index_in_pd].write    = 1; /* Ignored in supervisor mode, see
340         .present  = TRUE,                      !! 299                                        Intel vol 3 section 4.12 */
341         .write    = 1,                         !! 300       pd[index_in_pd].user     |= (is_user_page)?1:0;
342         .pt_paddr = ((sos_paddr_t)pt_ppage) >> !! 301       pd[index_in_pd].pt_paddr = ((sos_paddr_t)pt_ppage) >> 12;
343       };                                       << 
344                                                << 
345       /* Is it a PDE concerning the kernel spa << 
346       if (vpage_vaddr < SOS_PAGING_MIRROR_VADD << 
347         {                                      << 
348           /* Yes: So we need to update the PDE << 
349              in the system */                  << 
350                                                << 
351           /* First of all: this is a kernel PT << 
352           u.pde.user = 0;                      << 
353                                                << 
354           /* Now synchronize all the PD */     << 
355           SOS_ASSERT_FATAL(SOS_OK ==           << 
356                            sos_mm_context_sync << 
357                                                << 
358         }                                      << 
359       else /* We should have written "else if  << 
360               SOS_PAGING_BASE_USER_ADDRESS)" b << 
361               because the beginning of the fun << 
362               rejects mapping requests inside  << 
363         {                                      << 
364           /* No: The request concerns the user << 
365              current MMU context is concerned  << 
366                                                << 
367           /* First of all: this is a user PT * << 
368           u.pde.user = 1;                      << 
369                                                << 
370           /* Now update the current PD */      << 
371           pd[index_in_pd] = u.pde;             << 
372         }                                      << 
373                                                   302       
374       /*                                          303       /*
375        * The PT is now mapped in the PD mirror    304        * The PT is now mapped in the PD mirroring
376        */                                         305        */
377                                                   306 
378       /* Invalidate TLB for the page we just a    307       /* Invalidate TLB for the page we just added */
379       invlpg(pt);                                 308       invlpg(pt);
380                                                   309      
381       /* Reset this new PT */                     310       /* Reset this new PT */
382       memset((void*)pt, 0x0, SOS_PAGE_SIZE);      311       memset((void*)pt, 0x0, SOS_PAGE_SIZE);
383     }                                             312     }
384                                                   313 
385   /* If we allocate a new entry in the PT, inc !! 314   /* If we allocate a new entry in the PT, increase its reference
386      count. */                                    315      count. */
387   if (! pt[index_in_pt].present)               !! 316   else if (! pt[index_in_pt].present)
388     sos_physmem_inc_physpage_occupation(pd[ind !! 317     sos_physmem_ref_physpage_at(pd[index_in_pd].pt_paddr << 12);
389                                                   318   
390   /* Otherwise, that means that a physical pag    319   /* Otherwise, that means that a physical page is implicitely
391      unmapped */                                  320      unmapped */
392   else                                            321   else
393     sos_physmem_unref_physpage(pt[index_in_pt]    322     sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12);
394                                                   323 
395   /* Map the page in the page table */            324   /* Map the page in the page table */
396   pt[index_in_pt].present = TRUE;                 325   pt[index_in_pt].present = TRUE;
397   pt[index_in_pt].write   = (flags & SOS_VM_MA    326   pt[index_in_pt].write   = (flags & SOS_VM_MAP_PROT_WRITE)?1:0;
398   pt[index_in_pt].user    = (is_user_page)?1:0    327   pt[index_in_pt].user    = (is_user_page)?1:0;
399   pt[index_in_pt].paddr   = ppage_paddr >> 12;    328   pt[index_in_pt].paddr   = ppage_paddr >> 12;
400   sos_physmem_ref_physpage_at(ppage_paddr);       329   sos_physmem_ref_physpage_at(ppage_paddr);
401                                                   330 
402                                                << 
403   /*                                              331   /*
404    * The page is now mapped in the current add    332    * The page is now mapped in the current address space
405    */                                             333    */
406                                                   334   
407   /* Invalidate TLB for the page we just added    335   /* Invalidate TLB for the page we just added */
408   invlpg(vpage_vaddr);                            336   invlpg(vpage_vaddr);
409                                                   337 
410   return SOS_OK;                                  338   return SOS_OK;
411 }                                                 339 }
412                                                   340 
413                                                   341 
414 sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_v    342 sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_vaddr)
415 {                                                 343 {
416   sos_ret_t pt_dec_occupation_retval;          !! 344   sos_ret_t pt_unref_retval;
417                                                   345 
418   /* Get the page directory entry and table en    346   /* Get the page directory entry and table entry index for this
419      address */                                   347      address */
420   unsigned index_in_pd = virt_to_pd_index(vpag    348   unsigned index_in_pd = virt_to_pd_index(vpage_vaddr);
421   unsigned index_in_pt = virt_to_pt_index(vpag    349   unsigned index_in_pt = virt_to_pt_index(vpage_vaddr);
422                                                   350   
423   /* Get the PD of the current context */         351   /* Get the PD of the current context */
424   struct x86_pde *pd = (struct x86_pde*)          352   struct x86_pde *pd = (struct x86_pde*)
425     (SOS_PAGING_MIRROR_VADDR                      353     (SOS_PAGING_MIRROR_VADDR
426      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI    354      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
427                                                   355 
428   /* Address of the PT in the mirroring */        356   /* Address of the PT in the mirroring */
429   struct x86_pte * pt = (struct x86_pte*) (SOS    357   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
430                                            + S    358                                            + SOS_PAGE_SIZE*index_in_pd);
431                                                   359 
432   SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(vpage_v << 
433                                                << 
434   /* No page mapped at this address ? */          360   /* No page mapped at this address ? */
435   if (! pd[index_in_pd].present)                  361   if (! pd[index_in_pd].present)
436     return -SOS_EINVAL;                           362     return -SOS_EINVAL;
437   if (! pt[index_in_pt].present)                  363   if (! pt[index_in_pt].present)
438     return -SOS_EINVAL;                           364     return -SOS_EINVAL;
439                                                   365 
440   /* The unmapping of anywhere in the PD mirro    366   /* The unmapping of anywhere in the PD mirroring is FORBIDDEN ;) */
441   if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)    367   if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)
442       && (vpage_vaddr < SOS_PAGING_MIRROR_VADD    368       && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE))
443     return -SOS_EINVAL;                           369     return -SOS_EINVAL;
444                                                   370 
445   /* Reclaim the physical page */                 371   /* Reclaim the physical page */
446   sos_physmem_unref_physpage(pt[index_in_pt].p    372   sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12);
447                                                   373 
448   /* Unmap the page in the page table */          374   /* Unmap the page in the page table */
449   memset(pt + index_in_pt, 0x0, sizeof(struct     375   memset(pt + index_in_pt, 0x0, sizeof(struct x86_pte));
450                                                   376 
451   /* Invalidate TLB for the page we just unmap    377   /* Invalidate TLB for the page we just unmapped */
452   invlpg(vpage_vaddr);                            378   invlpg(vpage_vaddr);
453                                                   379 
454   /* Reclaim this entry in the PT, which may f    380   /* Reclaim this entry in the PT, which may free the PT */
455   pt_dec_occupation_retval                     !! 381   pt_unref_retval = sos_physmem_unref_physpage(pd[index_in_pd].pt_paddr << 12);
456     = sos_physmem_dec_physpage_occupation(pd[i !! 382   SOS_ASSERT_FATAL(pt_unref_retval >= 0);
457   SOS_ASSERT_FATAL(pt_dec_occupation_retval >= !! 383   if (pt_unref_retval > 0)
458   if (pt_dec_occupation_retval > 0)            << 
459     /* If the PT is now completely unused... *    384     /* If the PT is now completely unused... */
460     {                                             385     {
461       x86_pde_val_t u;                         !! 386       /* Release the PDE */
462                                                !! 387       memset(pd + index_in_pd, 0x0, sizeof(struct x86_pde));
463                                                << 
464       /*                                       << 
465        * The PT is not referenced by this PD a << 
466        */                                      << 
467       sos_physmem_unref_physpage(pd[index_in_p << 
468                                                << 
469                                                << 
470       /*                                       << 
471        * Reset the PDE                         << 
472        */                                      << 
473                                                << 
474       /* Mark the PDE as unavailable */        << 
475       u.ui32 = 0;                              << 
476                                                << 
477       /* Is it a PDE concerning the kernel spa << 
478       if (vpage_vaddr < SOS_PAGING_MIRROR_VADD << 
479         {                                      << 
480           /* Now synchronize all the PD */     << 
481           SOS_ASSERT_FATAL(SOS_OK ==           << 
482                            sos_mm_context_sync << 
483                                                << 
484         }                                      << 
485       else /* We should have written "else if  << 
486               SOS_PAGING_BASE_USER_ADDRESS)" b << 
487               because the beginning of the fun << 
488               rejects mapping requests inside  << 
489         {                                      << 
490           /* No: The request concerns the user << 
491              current MMU context is concerned  << 
492           pd[index_in_pd] = u.pde;             << 
493         }                                      << 
494                                                   388       
495       /* Update the TLB */                        389       /* Update the TLB */
496       invlpg(pt);                                 390       invlpg(pt);
497     }                                             391     }
498                                                   392 
499   return SOS_OK;                                  393   return SOS_OK;  
500 }                                                 394 }
501                                                   395 
502                                                   396 
503 sos_ret_t sos_paging_unmap_interval(sos_vaddr_ !! 397 int sos_paging_get_prot(sos_vaddr_t vaddr)
504                                     sos_size_t << 
505 {                                                 398 {
506   sos_ret_t retval = 0;                        !! 399   int retval;
507                                                << 
508   if (! SOS_IS_PAGE_ALIGNED(vaddr))            << 
509     return -SOS_EINVAL;                        << 
510   if (! SOS_IS_PAGE_ALIGNED(size))             << 
511     return -SOS_EINVAL;                        << 
512                                                << 
513   for ( ;                                      << 
514         size >= SOS_PAGE_SIZE ;                << 
515         vaddr += SOS_PAGE_SIZE, size -= SOS_PA << 
516     if (SOS_OK == sos_paging_unmap(vaddr))     << 
517       retval += SOS_PAGE_SIZE;                 << 
518                                                << 
519   return retval;                               << 
520 }                                              << 
521                                                << 
522                                                << 
523 sos_ui32_t sos_paging_get_prot(sos_vaddr_t vad << 
524 {                                              << 
525   sos_ui32_t retval;                           << 
526                                                   400 
527   /* Get the page directory entry and table en    401   /* Get the page directory entry and table entry index for this
528      address */                                   402      address */
529   unsigned index_in_pd = virt_to_pd_index(vadd    403   unsigned index_in_pd = virt_to_pd_index(vaddr);
530   unsigned index_in_pt = virt_to_pt_index(vadd    404   unsigned index_in_pt = virt_to_pt_index(vaddr);
531                                                   405   
532   /* Get the PD of the current context */         406   /* Get the PD of the current context */
533   struct x86_pde *pd = (struct x86_pde*)          407   struct x86_pde *pd = (struct x86_pde*)
534     (SOS_PAGING_MIRROR_VADDR                      408     (SOS_PAGING_MIRROR_VADDR
535      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI    409      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
536                                                   410 
537   /* Address of the PT in the mirroring */        411   /* Address of the PT in the mirroring */
538   struct x86_pte * pt = (struct x86_pte*) (SOS    412   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
539                                            + S    413                                            + SOS_PAGE_SIZE*index_in_pd);
540                                                   414 
541   /* No page mapped at this address ? */          415   /* No page mapped at this address ? */
542   if (! pd[index_in_pd].present)                  416   if (! pd[index_in_pd].present)
543     return SOS_VM_MAP_PROT_NONE;                  417     return SOS_VM_MAP_PROT_NONE;
544   if (! pt[index_in_pt].present)                  418   if (! pt[index_in_pt].present)
545     return SOS_VM_MAP_PROT_NONE;                  419     return SOS_VM_MAP_PROT_NONE;
546                                                   420   
547   /* Default access right of an available page    421   /* Default access right of an available page is "read" on x86 */
548   retval = SOS_VM_MAP_PROT_READ;                  422   retval = SOS_VM_MAP_PROT_READ;
549   if (pd[index_in_pd].write && pt[index_in_pt]    423   if (pd[index_in_pd].write && pt[index_in_pt].write)
550     retval |= SOS_VM_MAP_PROT_WRITE;              424     retval |= SOS_VM_MAP_PROT_WRITE;
551                                                   425 
552   return retval;                                  426   return retval;
553 }                                                 427 }
554                                                   428 
555                                                   429 
556 sos_ret_t sos_paging_set_prot(sos_vaddr_t vadd << 
557                               sos_ui32_t  new_ << 
558 {                                              << 
559   /* Get the page directory entry and table en << 
560      address */                                << 
561   unsigned index_in_pd = virt_to_pd_index(vadd << 
562   unsigned index_in_pt = virt_to_pt_index(vadd << 
563                                                << 
564   /* Get the PD of the current context */      << 
565   struct x86_pde *pd = (struct x86_pde*)       << 
566     (SOS_PAGING_MIRROR_VADDR                   << 
567      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI << 
568                                                << 
569   /* Address of the PT in the mirroring */     << 
570   struct x86_pte * pt = (struct x86_pte*) (SOS << 
571                                            + S << 
572                                                << 
573   /* EXEC permission ignored on x86 */         << 
574   new_prot &= ~SOS_VM_MAP_PROT_EXEC;           << 
575                                                << 
576   /* Check flags */                            << 
577   if (new_prot & ~(SOS_VM_MAP_PROT_READ | SOS_ << 
578     return -SOS_EINVAL;                        << 
579   if (! (new_prot & SOS_VM_MAP_PROT_READ))     << 
580     /* x86 READ flag always set by default */  << 
581     return -SOS_ENOSUP;                        << 
582                                                << 
583   /* No page mapped at this address ? */       << 
584   if (! pd[index_in_pd].present)               << 
585     return -SOS_EINVAL;                        << 
586   if (! pt[index_in_pt].present)               << 
587     return -SOS_EINVAL;                        << 
588                                                << 
589   /* Update access rights */                   << 
590   pt[index_in_pt].write = ((new_prot & SOS_VM_ << 
591   invlpg(vaddr);                               << 
592                                                << 
593   return SOS_OK;                               << 
594 }                                              << 
595                                                << 
596                                                << 
597 sos_ret_t sos_paging_set_prot_of_interval(sos_ << 
598                                           sos_ << 
599                                           sos_ << 
600 {                                              << 
601   if (! SOS_IS_PAGE_ALIGNED(vaddr))            << 
602     return -SOS_EINVAL;                        << 
603   if (! SOS_IS_PAGE_ALIGNED(size))             << 
604     return -SOS_EINVAL;                        << 
605                                                << 
606   for ( ; size >= SOS_PAGE_SIZE ; vaddr += SOS << 
607     sos_paging_set_prot(vaddr, new_prot);      << 
608                                                << 
609   return SOS_OK;                               << 
610 }                                              << 
611                                                << 
612                                                << 
613 sos_paddr_t sos_paging_get_paddr(sos_vaddr_t v    430 sos_paddr_t sos_paging_get_paddr(sos_vaddr_t vaddr)
614 {                                                 431 {
615   /* Get the page directory entry and table en    432   /* Get the page directory entry and table entry index for this
616      address */                                   433      address */
617   unsigned index_in_pd = virt_to_pd_index(vadd    434   unsigned index_in_pd = virt_to_pd_index(vaddr);
618   unsigned index_in_pt = virt_to_pt_index(vadd    435   unsigned index_in_pt = virt_to_pt_index(vaddr);
619   unsigned offset_in_page = virt_to_page_offse    436   unsigned offset_in_page = virt_to_page_offset(vaddr);
620                                                   437   
621   /* Get the PD of the current context */         438   /* Get the PD of the current context */
622   struct x86_pde *pd = (struct x86_pde*)          439   struct x86_pde *pd = (struct x86_pde*)
623     (SOS_PAGING_MIRROR_VADDR                      440     (SOS_PAGING_MIRROR_VADDR
624      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI    441      + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
625                                                   442 
626   /* Address of the PT in the mirroring */        443   /* Address of the PT in the mirroring */
627   struct x86_pte * pt = (struct x86_pte*) (SOS    444   struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
628                                            + S    445                                            + SOS_PAGE_SIZE*index_in_pd);
629                                                   446 
630   /* No page mapped at this address ? */          447   /* No page mapped at this address ? */
631   if (! pd[index_in_pd].present)                  448   if (! pd[index_in_pd].present)
632     return (sos_paddr_t)NULL;                     449     return (sos_paddr_t)NULL;
633   if (! pt[index_in_pt].present)                  450   if (! pt[index_in_pt].present)
634     return (sos_paddr_t)NULL;                     451     return (sos_paddr_t)NULL;
635                                                   452 
636   return (pt[index_in_pt].paddr << 12) + offse    453   return (pt[index_in_pt].paddr << 12) + offset_in_page;
637 }                                              << 
638                                                << 
639                                                << 
640 /* ******************************************* << 
641  * Functions restricted to mm_context module   << 
642  */                                            << 
643                                                << 
644                                                << 
645 sos_paddr_t sos_paging_get_current_PD_paddr()  << 
646 {                                              << 
647   struct x86_pdbr pdbr;                        << 
648   asm volatile("movl %%cr3, %0\n": "=r"(pdbr)) << 
649   return (pdbr.pd_paddr << 12);                << 
650 }                                              << 
651                                                << 
652                                                << 
653 sos_ret_t sos_paging_set_current_PD_paddr(sos_ << 
654 {                                              << 
655   struct x86_pdbr pdbr;                        << 
656                                                << 
657   SOS_ASSERT_FATAL(paddr_PD != 0);             << 
658   SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(paddr_P << 
659                                                << 
660   /* Setup the value of the PDBR */            << 
661   memset(& pdbr, 0x0, sizeof(struct x86_pdbr)) << 
662   pdbr.pd_paddr = (paddr_PD >> 12);            << 
663                                                << 
664   /* Configure the MMU according to the PDBR * << 
665   asm volatile ("movl %0,%%cr3\n" ::"r"(pdbr)) << 
666                                                << 
667   return SOS_OK;                               << 
668 }                                              << 
669                                                << 
670                                                << 
671 sos_ret_t sos_paging_dispose(sos_vaddr_t vaddr << 
672 {                                              << 
673   x86_pde_val_t *pd = (x86_pde_val_t*) vaddr_P << 
674   x86_pte_val_t *pt;                           << 
675   int           index_in_pd;                   << 
676                                                << 
677   /* Allocate 1 page in kernel space to map th << 
678      unreference the physical pages they refer << 
679   pt = (x86_pte_val_t *)sos_kmem_vmm_alloc(1,  << 
680   if (! pt)                                    << 
681     return -SOS_ENOMEM;                        << 
682                                                << 
683   /* (Nothing to do in kernel space) */        << 
684                                                << 
685   /* Reset all the PTs in user space */        << 
686   for (index_in_pd = (SOS_PAGING_BASE_USER_ADD << 
687        index_in_pd < 1024 ; /* 1 PDE = 1 PT    << 
688                                = 1024 Pages    << 
689                                = 4MB */        << 
690        index_in_pd ++)                         << 
691     {                                          << 
692       sos_paddr_t paddr_pt = (pd[index_in_pd]. << 
693       int index_in_pt;                         << 
694                                                << 
695       /* Nothing to do if there is no PT */    << 
696       if (! pd[index_in_pd].pde.present)       << 
697         {                                      << 
698           pd[index_in_pd].ui32 = 0;            << 
699           continue;                            << 
700         }                                      << 
701                                                << 
702       /* Map this PT inside kernel */          << 
703       SOS_ASSERT_FATAL(SOS_OK                  << 
704                        == sos_paging_map(paddr << 
705                                          (sos_ << 
706                                          SOS_V << 
707                                          | SOS << 
708                                                << 
709       /* Reset all the mappings in this PT */  << 
710       for (index_in_pt = 0 ; index_in_pt < 102 << 
711         {                                      << 
712           /* Ignore unmapped PTE */            << 
713           if (! pt[index_in_pt].pte.present)   << 
714             {                                  << 
715               pt[index_in_pt].ui32 = 0;        << 
716               continue;                        << 
717             }                                  << 
718                                                << 
719           /* Unreference the associated page * << 
720           sos_physmem_unref_physpage(pt[index_ << 
721                                                << 
722           /* Decrease occupation count of the  << 
723           sos_physmem_dec_physpage_occupation( << 
724                                                << 
725           /* Reset PTE */                      << 
726           pt[index_in_pt].ui32 = 0;            << 
727         }                                      << 
728                                                << 
729       /* Unmap PT */                           << 
730       SOS_ASSERT_FATAL(SOS_OK == sos_paging_un << 
731                                                << 
732       /* Reset PDE */                          << 
733       pd[index_in_pd].ui32 = 0;                << 
734                                                << 
735       /* Unreference PT */                     << 
736       sos_physmem_unref_physpage(paddr_pt);    << 
737     }                                          << 
738                                                << 
739   /* Unallocate kernel space used for the temp << 
740   SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free << 
741                                                << 
742   return SOS_OK;                               << 
743 }                                              << 
744                                                << 
745                                                << 
746 sos_ret_t sos_paging_copy_kernel_space(sos_vad << 
747                                        sos_vad << 
748 {                                              << 
749   x86_pde_val_t *src_pd       = (x86_pde_val_t << 
750   x86_pde_val_t *dest_pd      = (x86_pde_val_t << 
751   sos_paddr_t   dest_paddr_PD = sos_paging_get << 
752   x86_pde_val_t mirror_pde;                    << 
753   int           index_in_pd;                   << 
754                                                << 
755   /* Fill destination PD with zeros */         << 
756   memset((void*)dest_vaddr_PD, 0x0, SOS_PAGE_S << 
757                                                << 
758   /* Synchronize it with the master Kernel MMU << 
759      before the mirroring ! */                 << 
760   for (index_in_pd = 0 ;                       << 
761        index_in_pd < (SOS_PAGING_MIRROR_VADDR  << 
762                                                << 
763                                                << 
764        index_in_pd ++)                         << 
765     {                                          << 
766       /* Copy the master's configuration */    << 
767       dest_pd[index_in_pd].ui32 = src_pd[index << 
768                                                << 
769       /* We DON'T mark the underlying PT and p << 
770          because all the PD are equivalent in  << 
771          soon as a page is mapped in the kerne << 
772          address spaces, and as soon as it is  << 
773          space, it is unmapped in all the othe << 
774          address spaces, the reference counter << 
775          and not something else: using the ref << 
776          won't be of any use and would consume << 
777     }                                          << 
778                                                << 
779   /* Setup the mirroring for the new address s << 
780   mirror_pde.ui32 = 0;                         << 
781   mirror_pde.pde.present  = TRUE;              << 
782   mirror_pde.pde.write    = 1;                 << 
783   mirror_pde.pde.user     = 0; /* This is a KE << 
784   mirror_pde.pde.pt_paddr = (dest_paddr_PD >>  << 
785   dest_pd[SOS_PAGING_MIRROR_VADDR >> 22].ui32  << 
786                                                << 
787   return SOS_OK;                               << 
788 }                                                 454 }
789                                                   455 
                                                      

source navigation ] diff markup ] identifier search ] general search ]