Diff markup
001 001
002 002
003 003
004 004
005 005
006 006
007 007
008 008
009 009
010 010
011 011
012 012
013 013
014 014
015 015
016 016
017 017
018 #include <sos/physmem.h> 018 #include <sos/physmem.h>
019 #include <sos/klibc.h> 019 #include <sos/klibc.h>
020 #include <sos/assert.h> 020 #include <sos/assert.h>
021 021
022 #include "mm_context.h" 022 #include "mm_context.h"
023 023
024 #include "paging.h" 024 #include "paging.h"
025 025
026 026
027 027
028 028
029 029
030 030
031 031
032 032
033 033
034 034
035 035
036 036
037 037
038 038
039 039
040 040
041 041
042 042
043 043
044 044
045 045
046 046
047 struct x86_pde 047 struct x86_pde
048 { 048 {
049 sos_ui32_t present :1; 049 sos_ui32_t present :1;
050 sos_ui32_t write :1; 050 sos_ui32_t write :1;
051 sos_ui32_t user :1; 051 sos_ui32_t user :1;
052 sos_ui32_t write_through :1; 052 sos_ui32_t write_through :1;
053 sos_ui32_t cache_disabled :1; 053 sos_ui32_t cache_disabled :1;
054 sos_ui32_t accessed :1; 054 sos_ui32_t accessed :1;
055 sos_ui32_t zero :1; 055 sos_ui32_t zero :1;
056 sos_ui32_t page_size :1; 056 sos_ui32_t page_size :1;
057 sos_ui32_t global_page :1; 057 sos_ui32_t global_page :1;
058 sos_ui32_t custom :3; 058 sos_ui32_t custom :3;
059 sos_ui32_t pt_paddr :20; 059 sos_ui32_t pt_paddr :20;
060 } __attribute__ ((packed)); 060 } __attribute__ ((packed));
061 061
062 062
063 063
064 typedef union { 064 typedef union {
065 struct x86_pde pde; 065 struct x86_pde pde;
066 sos_ui32_t ui32; 066 sos_ui32_t ui32;
067 } x86_pde_val_t; 067 } x86_pde_val_t;
068 068
069 069
070 070
071 071
072 struct x86_pte 072 struct x86_pte
073 { 073 {
074 sos_ui32_t present :1; 074 sos_ui32_t present :1;
075 sos_ui32_t write :1; 075 sos_ui32_t write :1;
076 sos_ui32_t user :1; 076 sos_ui32_t user :1;
077 sos_ui32_t write_through :1; 077 sos_ui32_t write_through :1;
078 sos_ui32_t cache_disabled :1; 078 sos_ui32_t cache_disabled :1;
079 sos_ui32_t accessed :1; 079 sos_ui32_t accessed :1;
080 sos_ui32_t dirty :1; 080 sos_ui32_t dirty :1;
081 sos_ui32_t zero :1; 081 sos_ui32_t zero :1;
082 sos_ui32_t global_page :1; 082 sos_ui32_t global_page :1;
083 083
084 sos_ui32_t custom :3; 084 sos_ui32_t custom :3;
085 sos_ui32_t paddr :20; 085 sos_ui32_t paddr :20;
086 } __attribute__ ((packed)); 086 } __attribute__ ((packed));
087 087
088 088
089 089
090 typedef union { 090 typedef union {
091 struct x86_pte pte; 091 struct x86_pte pte;
092 sos_ui32_t ui32; 092 sos_ui32_t ui32;
093 } x86_pte_val_t; 093 } x86_pte_val_t;
094 094
095 095
096 096
097 097
098 struct x86_pdbr 098 struct x86_pdbr
099 { 099 {
100 sos_ui32_t zero1 :3; 100 sos_ui32_t zero1 :3;
101 sos_ui32_t write_through :1; 101 sos_ui32_t write_through :1;
102 sos_ui32_t cache_disabled :1; 102 sos_ui32_t cache_disabled :1;
103 sos_ui32_t zero2 :7; 103 sos_ui32_t zero2 :7;
104 sos_ui32_t pd_paddr :20; 104 sos_ui32_t pd_paddr :20;
105 } __attribute__ ((packed)); 105 } __attribute__ ((packed));
106 106
107 107
108 108
109 109
110 110
111 111
112 112
113 #define invlpg(vaddr) \ 113 #define invlpg(vaddr) \
114 do { \ 114 do { \
115 __asm__ __volatile__("invlpg %0"::"m"(* 115 __asm__ __volatile__("invlpg %0"::"m"(*((unsigned *)(vaddr)))); \
116 } while(0) 116 } while(0)
117 117
118 118
119 119
120 120
121 121
122 122
123 #define flush_tlb() \ 123 #define flush_tlb() \
124 do { \ 124 do { \
125 unsigned long tmpreg; \ 125 unsigned long tmpreg; \
126 asm volatile("movl %%cr3,%0\n\tmovl %0 126 asm volatile("movl %%cr3,%0\n\tmovl %0,%%cr3" :"=r" \
127 (tmpreg) : :"memory"); \ 127 (tmpreg) : :"memory"); \
128 } while (0) 128 } while (0)
129 129
130 130
131 131
132 132
133 133
134 134
135 #define virt_to_pd_index(vaddr) \ 135 #define virt_to_pd_index(vaddr) \
136 (((unsigned)(vaddr)) >> 22) 136 (((unsigned)(vaddr)) >> 22)
137 137
138 138
139 139
140 140
141 141
142 142
143 #define virt_to_pt_index(vaddr) \ 143 #define virt_to_pt_index(vaddr) \
144 ( (((unsigned)(vaddr)) >> 12) & 0x3ff ) 144 ( (((unsigned)(vaddr)) >> 12) & 0x3ff )
145 145
146 146
147 147
148 148
149 149
150 150
151 #define virt_to_page_offset(vaddr) \ 151 #define virt_to_page_offset(vaddr) \
152 (((unsigned)(vaddr)) & SOS_PAGE_MASK) 152 (((unsigned)(vaddr)) & SOS_PAGE_MASK)
153 153
154 154
155 155
156 156
157 157
158 158
159 159
160 static sos_ret_t paging_setup_map_helper(struc 160 static sos_ret_t paging_setup_map_helper(struct x86_pde * pd,
161 sos_p 161 sos_paddr_t ppage,
162 sos_v 162 sos_vaddr_t vaddr)
163 { 163 {
164 164
165 165
166 unsigned index_in_pd = virt_to_pd_index(vadd 166 unsigned index_in_pd = virt_to_pd_index(vaddr);
167 unsigned index_in_pt = virt_to_pt_index(vadd 167 unsigned index_in_pt = virt_to_pt_index(vaddr);
168 168
169 169
170 struct x86_pte * pt; 170 struct x86_pte * pt;
171 if (pd[index_in_pd].present) 171 if (pd[index_in_pd].present)
172 { 172 {
173 pt = (struct x86_pte*) (pd[index_in_pd]. 173 pt = (struct x86_pte*) (pd[index_in_pd].pt_paddr << 12);
174 174
175 175
176 176
177 177
178 178
179 if (pt[index_in_pt].present) 179 if (pt[index_in_pt].present)
180 SOS_ASSERT_FATAL(FALSE); 180 SOS_ASSERT_FATAL(FALSE);
181 } 181 }
182 else 182 else
183 { 183 {
184 184
185 pt = (struct x86_pte*) sos_physmem_ref_p 185 pt = (struct x86_pte*) sos_physmem_ref_physpage_new(FALSE);
186 if (! pt) 186 if (! pt)
187 return -SOS_ENOMEM; 187 return -SOS_ENOMEM;
188 188
189 memset((void*)pt, 0x0, SOS_PAGE_SIZE); 189 memset((void*)pt, 0x0, SOS_PAGE_SIZE);
190 190
191 pd[index_in_pd].present = TRUE; 191 pd[index_in_pd].present = TRUE;
192 pd[index_in_pd].write = 1; 192 pd[index_in_pd].write = 1;
193 193
194 194
195 195
196 196
197 pd[index_in_pd].pt_paddr = ((sos_paddr_t 197 pd[index_in_pd].pt_paddr = ((sos_paddr_t)pt) >> 12;
198 } 198 }
199 199
200 200
201 201
202 pt[index_in_pt].present = 1; 202 pt[index_in_pt].present = 1;
203 pt[index_in_pt].write = 1; 203 pt[index_in_pt].write = 1;
204 204
205 205
206 206
207 pt[index_in_pt].user = 0; 207 pt[index_in_pt].user = 0;
208 pt[index_in_pt].paddr = ppage >> 12; 208 pt[index_in_pt].paddr = ppage >> 12;
209 209
210 210
211 211
212 sos_physmem_inc_physpage_occupation((sos_pad 212 sos_physmem_inc_physpage_occupation((sos_paddr_t)pt);
213 213
214 return SOS_OK; 214 return SOS_OK;
215 } 215 }
216 216
217 217
218 sos_ret_t sos_paging_subsystem_setup(sos_paddr 218 sos_ret_t sos_paging_subsystem_setup(sos_paddr_t identity_mapping_base,
219 sos_paddr 219 sos_paddr_t identity_mapping_top)
220 { 220 {
221 221
222 struct x86_pdbr cr3; 222 struct x86_pdbr cr3;
223 223
224 224
225 struct x86_pde * pd 225 struct x86_pde * pd
226 = (struct x86_pde*) sos_physmem_ref_physpa 226 = (struct x86_pde*) sos_physmem_ref_physpage_new(FALSE);
227 227
228 228
229 sos_paddr_t paddr; 229 sos_paddr_t paddr;
230 230
231 231
232 232
233 memset((void*)pd, 233 memset((void*)pd,
234 0x0, 234 0x0,
235 SOS_PAGE_SIZE); 235 SOS_PAGE_SIZE);
236 236
237 237
238 for (paddr = identity_mapping_base ; 238 for (paddr = identity_mapping_base ;
239 paddr < identity_mapping_top ; 239 paddr < identity_mapping_top ;
240 paddr += SOS_PAGE_SIZE) 240 paddr += SOS_PAGE_SIZE)
241 { 241 {
242 if (paging_setup_map_helper(pd, paddr, p 242 if (paging_setup_map_helper(pd, paddr, paddr))
243 return -SOS_ENOMEM; 243 return -SOS_ENOMEM;
244 } 244 }
245 245
246 246
247 for (paddr = BIOS_N_VIDEO_START ; 247 for (paddr = BIOS_N_VIDEO_START ;
248 paddr < BIOS_N_VIDEO_END ; 248 paddr < BIOS_N_VIDEO_END ;
249 paddr += SOS_PAGE_SIZE) 249 paddr += SOS_PAGE_SIZE)
250 { 250 {
251 if (paging_setup_map_helper(pd, paddr, p 251 if (paging_setup_map_helper(pd, paddr, paddr))
252 return -SOS_ENOMEM; 252 return -SOS_ENOMEM;
253 } 253 }
254 254
255 255
256 256
257 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR) 257 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].present = TRUE;
258 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR) 258 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].write = 1;
259 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR) 259 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].user = 0;
260 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR) 260 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].pt_paddr
261 = ((sos_paddr_t)pd)>>12; 261 = ((sos_paddr_t)pd)>>12;
262 262
263 263
264 264
265 memset(& cr3, 0x0, sizeof(struct x86_pdbr)); 265 memset(& cr3, 0x0, sizeof(struct x86_pdbr));
266 cr3.pd_paddr = ((sos_paddr_t)pd) >> 12; 266 cr3.pd_paddr = ((sos_paddr_t)pd) >> 12;
267 267
268 268
269 269
270 270
271 asm volatile ("movl %0,%%cr3\n\t" 271 asm volatile ("movl %0,%%cr3\n\t"
272 "movl %%cr0,%%eax\n\t" 272 "movl %%cr0,%%eax\n\t"
273 "orl $0x80010000, %%eax\n\t" 273 "orl $0x80010000, %%eax\n\t"
274 "movl %%eax,%%cr0\n\t" 274 "movl %%eax,%%cr0\n\t"
275 "jmp 1f\n\t" 275 "jmp 1f\n\t"
276 "1:\n\t" 276 "1:\n\t"
277 "movl $2f, %%eax\n\t" 277 "movl $2f, %%eax\n\t"
278 "jmp *%%eax\n\t" 278 "jmp *%%eax\n\t"
279 "2:\n\t" ::"r"(cr3):"memory"," 279 "2:\n\t" ::"r"(cr3):"memory","eax");
280 280
281 281
282 282
283 283
284 284
285 285
286 286
287 287
288 288
289 return SOS_OK; 289 return SOS_OK;
290 } 290 }
291 291
292 292
293 293
294 294
295 sos_ret_t sos_paging_map(sos_paddr_t ppage_pad 295 sos_ret_t sos_paging_map(sos_paddr_t ppage_paddr,
296 sos_vaddr_t vpage_vad 296 sos_vaddr_t vpage_vaddr,
297 sos_bool_t is_user_pa 297 sos_bool_t is_user_page,
298 sos_ui32_t flags) 298 sos_ui32_t flags)
299 { 299 {
300 300
301 301
302 unsigned index_in_pd = virt_to_pd_index(vpag 302 unsigned index_in_pd = virt_to_pd_index(vpage_vaddr);
303 unsigned index_in_pt = virt_to_pt_index(vpag 303 unsigned index_in_pt = virt_to_pt_index(vpage_vaddr);
304 304
305 305
306 struct x86_pde *pd = (struct x86_pde*) 306 struct x86_pde *pd = (struct x86_pde*)
307 (SOS_PAGING_MIRROR_VADDR 307 (SOS_PAGING_MIRROR_VADDR
308 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI 308 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
309 309
310 310
311 struct x86_pte * pt = (struct x86_pte*) (SOS 311 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
312 + S 312 + SOS_PAGE_SIZE*index_in_pd);
313 313
314 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(ppage_p 314 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(ppage_paddr));
315 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(vpage_v 315 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(vpage_vaddr));
316 316
317 317
318 flags &= ~SOS_VM_MAP_PROT_EXEC; 318 flags &= ~SOS_VM_MAP_PROT_EXEC;
319 319
320 320
321 if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR) 321 if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)
322 && (vpage_vaddr < SOS_PAGING_MIRROR_VADD 322 && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE))
323 return -SOS_EINVAL; 323 return -SOS_EINVAL;
324 324
325 325
326 if (! pd[index_in_pd].present) 326 if (! pd[index_in_pd].present)
327 { 327 {
328 x86_pde_val_t u; 328 x86_pde_val_t u;
329 329
330 330
331 sos_paddr_t pt_ppage 331 sos_paddr_t pt_ppage
332 = sos_physmem_ref_physpage_new(! (flag 332 = sos_physmem_ref_physpage_new(! (flags & SOS_VM_MAP_ATOMIC));
333 if (! pt_ppage) 333 if (! pt_ppage)
334 { 334 {
335 return -SOS_ENOMEM; 335 return -SOS_ENOMEM;
336 } 336 }
337 337
338 338
339 u.pde = (struct x86_pde){ 339 u.pde = (struct x86_pde){
340 .present = TRUE, 340 .present = TRUE,
341 .write = 1, 341 .write = 1,
342 .pt_paddr = ((sos_paddr_t)pt_ppage) >> 342 .pt_paddr = ((sos_paddr_t)pt_ppage) >> 12
343 }; 343 };
344 344
345 345
346 if (vpage_vaddr < SOS_PAGING_MIRROR_VADD 346 if (vpage_vaddr < SOS_PAGING_MIRROR_VADDR)
347 { 347 {
348 348
349 349
350 350
351 351
352 u.pde.user = 0; 352 u.pde.user = 0;
353 353
354 354
355 SOS_ASSERT_FATAL(SOS_OK == 355 SOS_ASSERT_FATAL(SOS_OK ==
356 sos_mm_context_sync 356 sos_mm_context_synch_kernel_PDE(index_in_pd,
357 357 u.ui32));
358 } 358 }
359 else 359 else
360 360
361 361
362 362
363 { 363 {
364 364
365 365
366 366
367 367
368 u.pde.user = 1; 368 u.pde.user = 1;
369 369
370 370
371 pd[index_in_pd] = u.pde; 371 pd[index_in_pd] = u.pde;
372 } 372 }
373 373
374 374
375 375
376 376
377 377
378 378
379 invlpg(pt); 379 invlpg(pt);
380 380
381 381
382 memset((void*)pt, 0x0, SOS_PAGE_SIZE); 382 memset((void*)pt, 0x0, SOS_PAGE_SIZE);
383 } 383 }
384 384
385 385
386 386
387 if (! pt[index_in_pt].present) 387 if (! pt[index_in_pt].present)
388 sos_physmem_inc_physpage_occupation(pd[ind 388 sos_physmem_inc_physpage_occupation(pd[index_in_pd].pt_paddr << 12);
389 389
390 390
391 391
392 else 392 else
393 sos_physmem_unref_physpage(pt[index_in_pt] 393 sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12);
394 394
395 395
396 pt[index_in_pt].present = TRUE; 396 pt[index_in_pt].present = TRUE;
397 pt[index_in_pt].write = (flags & SOS_VM_MA 397 pt[index_in_pt].write = (flags & SOS_VM_MAP_PROT_WRITE)?1:0;
398 pt[index_in_pt].user = (is_user_page)?1:0 398 pt[index_in_pt].user = (is_user_page)?1:0;
399 pt[index_in_pt].paddr = ppage_paddr >> 12; 399 pt[index_in_pt].paddr = ppage_paddr >> 12;
400 sos_physmem_ref_physpage_at(ppage_paddr); 400 sos_physmem_ref_physpage_at(ppage_paddr);
401 401
402 402
403 403
404 404
405 405
406 406
407 407
408 invlpg(vpage_vaddr); 408 invlpg(vpage_vaddr);
409 409
410 return SOS_OK; 410 return SOS_OK;
411 } 411 }
412 412
413 413
414 sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_v 414 sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_vaddr)
415 { 415 {
416 sos_ret_t pt_dec_occupation_retval; 416 sos_ret_t pt_dec_occupation_retval;
417 417
418 418
419 419
420 unsigned index_in_pd = virt_to_pd_index(vpag 420 unsigned index_in_pd = virt_to_pd_index(vpage_vaddr);
421 unsigned index_in_pt = virt_to_pt_index(vpag 421 unsigned index_in_pt = virt_to_pt_index(vpage_vaddr);
422 422
423 423
424 struct x86_pde *pd = (struct x86_pde*) 424 struct x86_pde *pd = (struct x86_pde*)
425 (SOS_PAGING_MIRROR_VADDR 425 (SOS_PAGING_MIRROR_VADDR
426 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI 426 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
427 427
428 428
429 struct x86_pte * pt = (struct x86_pte*) (SOS 429 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
430 + S 430 + SOS_PAGE_SIZE*index_in_pd);
431 431
432 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(vpage_v 432 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(vpage_vaddr));
433 433
434 434
435 if (! pd[index_in_pd].present) 435 if (! pd[index_in_pd].present)
436 return -SOS_EINVAL; 436 return -SOS_EINVAL;
437 if (! pt[index_in_pt].present) 437 if (! pt[index_in_pt].present)
438 return -SOS_EINVAL; 438 return -SOS_EINVAL;
439 439
440 440
441 if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR) 441 if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)
442 && (vpage_vaddr < SOS_PAGING_MIRROR_VADD 442 && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE))
443 return -SOS_EINVAL; 443 return -SOS_EINVAL;
444 444
445 445
446 sos_physmem_unref_physpage(pt[index_in_pt].p 446 sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12);
447 447
448 448
449 memset(pt + index_in_pt, 0x0, sizeof(struct 449 memset(pt + index_in_pt, 0x0, sizeof(struct x86_pte));
450 450
451 451
452 invlpg(vpage_vaddr); 452 invlpg(vpage_vaddr);
453 453
454 454
455 pt_dec_occupation_retval 455 pt_dec_occupation_retval
456 = sos_physmem_dec_physpage_occupation(pd[i 456 = sos_physmem_dec_physpage_occupation(pd[index_in_pd].pt_paddr << 12);
457 SOS_ASSERT_FATAL(pt_dec_occupation_retval >= 457 SOS_ASSERT_FATAL(pt_dec_occupation_retval >= 0);
458 if (pt_dec_occupation_retval > 0) 458 if (pt_dec_occupation_retval > 0)
459 459
460 { 460 {
461 x86_pde_val_t u; 461 x86_pde_val_t u;
462 462
463 463
464 464
465 465
466 466
467 sos_physmem_unref_physpage(pd[index_in_p 467 sos_physmem_unref_physpage(pd[index_in_pd].pt_paddr << 12);
468 468
469 469
470 470
471 471
472 472
473 473
474 474
475 u.ui32 = 0; 475 u.ui32 = 0;
476 476
477 477
478 if (vpage_vaddr < SOS_PAGING_MIRROR_VADD 478 if (vpage_vaddr < SOS_PAGING_MIRROR_VADDR)
479 { 479 {
480 480
481 SOS_ASSERT_FATAL(SOS_OK == 481 SOS_ASSERT_FATAL(SOS_OK ==
482 sos_mm_context_sync 482 sos_mm_context_synch_kernel_PDE(index_in_pd,
483 483 u.ui32));
484 } 484 }
485 else 485 else
486 486
487 487
488 488
489 { 489 {
490 490
491 491
492 pd[index_in_pd] = u.pde; 492 pd[index_in_pd] = u.pde;
493 } 493 }
494 494
495 495
496 invlpg(pt); 496 invlpg(pt);
497 } 497 }
498 498
499 return SOS_OK; 499 return SOS_OK;
500 } 500 }
501 501
502 502
503 sos_ret_t sos_paging_unmap_interval(sos_vaddr_ 503 sos_ret_t sos_paging_unmap_interval(sos_vaddr_t vaddr,
504 sos_size_t 504 sos_size_t size)
505 { 505 {
506 sos_ret_t retval = 0; 506 sos_ret_t retval = 0;
507 507
508 if (! SOS_IS_PAGE_ALIGNED(vaddr)) 508 if (! SOS_IS_PAGE_ALIGNED(vaddr))
509 return -SOS_EINVAL; 509 return -SOS_EINVAL;
510 if (! SOS_IS_PAGE_ALIGNED(size)) 510 if (! SOS_IS_PAGE_ALIGNED(size))
511 return -SOS_EINVAL; 511 return -SOS_EINVAL;
512 512
513 for ( ; 513 for ( ;
514 size >= SOS_PAGE_SIZE ; 514 size >= SOS_PAGE_SIZE ;
515 vaddr += SOS_PAGE_SIZE, size -= SOS_PA 515 vaddr += SOS_PAGE_SIZE, size -= SOS_PAGE_SIZE)
516 if (SOS_OK == sos_paging_unmap(vaddr)) 516 if (SOS_OK == sos_paging_unmap(vaddr))
517 retval += SOS_PAGE_SIZE; 517 retval += SOS_PAGE_SIZE;
518 518
519 return retval; 519 return retval;
520 } 520 }
521 521
522 522
523 sos_ui32_t sos_paging_get_prot(sos_vaddr_t vad 523 sos_ui32_t sos_paging_get_prot(sos_vaddr_t vaddr)
524 { 524 {
525 sos_ui32_t retval; 525 sos_ui32_t retval;
526 526
527 527
528 528
529 unsigned index_in_pd = virt_to_pd_index(vadd 529 unsigned index_in_pd = virt_to_pd_index(vaddr);
530 unsigned index_in_pt = virt_to_pt_index(vadd 530 unsigned index_in_pt = virt_to_pt_index(vaddr);
531 531
532 532
533 struct x86_pde *pd = (struct x86_pde*) 533 struct x86_pde *pd = (struct x86_pde*)
534 (SOS_PAGING_MIRROR_VADDR 534 (SOS_PAGING_MIRROR_VADDR
535 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI 535 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
536 536
537 537
538 struct x86_pte * pt = (struct x86_pte*) (SOS 538 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
539 + S 539 + SOS_PAGE_SIZE*index_in_pd);
540 540
541 541
542 if (! pd[index_in_pd].present) 542 if (! pd[index_in_pd].present)
543 return SOS_VM_MAP_PROT_NONE; 543 return SOS_VM_MAP_PROT_NONE;
544 if (! pt[index_in_pt].present) 544 if (! pt[index_in_pt].present)
545 return SOS_VM_MAP_PROT_NONE; 545 return SOS_VM_MAP_PROT_NONE;
546 546
547 547
548 retval = SOS_VM_MAP_PROT_READ; 548 retval = SOS_VM_MAP_PROT_READ;
549 if (pd[index_in_pd].write && pt[index_in_pt] 549 if (pd[index_in_pd].write && pt[index_in_pt].write)
550 retval |= SOS_VM_MAP_PROT_WRITE; 550 retval |= SOS_VM_MAP_PROT_WRITE;
551 551
552 return retval; 552 return retval;
553 } 553 }
554 554
555 555
556 sos_ret_t sos_paging_set_prot(sos_vaddr_t vadd 556 sos_ret_t sos_paging_set_prot(sos_vaddr_t vaddr,
557 sos_ui32_t new_ 557 sos_ui32_t new_prot)
558 { 558 {
559 559
560 560
561 unsigned index_in_pd = virt_to_pd_index(vadd 561 unsigned index_in_pd = virt_to_pd_index(vaddr);
562 unsigned index_in_pt = virt_to_pt_index(vadd 562 unsigned index_in_pt = virt_to_pt_index(vaddr);
563 563
564 564
565 struct x86_pde *pd = (struct x86_pde*) 565 struct x86_pde *pd = (struct x86_pde*)
566 (SOS_PAGING_MIRROR_VADDR 566 (SOS_PAGING_MIRROR_VADDR
567 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI 567 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
568 568
569 569
570 struct x86_pte * pt = (struct x86_pte*) (SOS 570 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
571 + S 571 + SOS_PAGE_SIZE*index_in_pd);
572 572
573 573
574 new_prot &= ~SOS_VM_MAP_PROT_EXEC; 574 new_prot &= ~SOS_VM_MAP_PROT_EXEC;
575 575
576 576
577 if (new_prot & ~(SOS_VM_MAP_PROT_READ | SOS_ 577 if (new_prot & ~(SOS_VM_MAP_PROT_READ | SOS_VM_MAP_PROT_WRITE))
578 return -SOS_EINVAL; 578 return -SOS_EINVAL;
579 if (! (new_prot & SOS_VM_MAP_PROT_READ)) 579 if (! (new_prot & SOS_VM_MAP_PROT_READ))
580 580
581 return -SOS_ENOSUP; 581 return -SOS_ENOSUP;
582 582
583 583
584 if (! pd[index_in_pd].present) 584 if (! pd[index_in_pd].present)
585 return -SOS_EINVAL; 585 return -SOS_EINVAL;
586 if (! pt[index_in_pt].present) 586 if (! pt[index_in_pt].present)
587 return -SOS_EINVAL; 587 return -SOS_EINVAL;
588 588
589 589
590 pt[index_in_pt].write = ((new_prot & SOS_VM_ 590 pt[index_in_pt].write = ((new_prot & SOS_VM_MAP_PROT_WRITE) != 0);
591 invlpg(vaddr); 591 invlpg(vaddr);
592 592
593 return SOS_OK; 593 return SOS_OK;
594 } 594 }
595 595
596 596
597 sos_ret_t sos_paging_set_prot_of_interval(sos_ 597 sos_ret_t sos_paging_set_prot_of_interval(sos_vaddr_t vaddr,
598 sos_ 598 sos_size_t size,
599 sos_ 599 sos_ui32_t new_prot)
600 { 600 {
601 if (! SOS_IS_PAGE_ALIGNED(vaddr)) 601 if (! SOS_IS_PAGE_ALIGNED(vaddr))
602 return -SOS_EINVAL; 602 return -SOS_EINVAL;
603 if (! SOS_IS_PAGE_ALIGNED(size)) 603 if (! SOS_IS_PAGE_ALIGNED(size))
604 return -SOS_EINVAL; 604 return -SOS_EINVAL;
605 605
606 for ( ; size >= SOS_PAGE_SIZE ; vaddr += SOS 606 for ( ; size >= SOS_PAGE_SIZE ; vaddr += SOS_PAGE_SIZE, size -= SOS_PAGE_SIZE)
607 sos_paging_set_prot(vaddr, new_prot); 607 sos_paging_set_prot(vaddr, new_prot);
608 608
609 return SOS_OK; 609 return SOS_OK;
610 } 610 }
611 611
612 612
>> 613 sos_bool_t sos_paging_is_dirty(sos_vaddr_t vaddr)
>> 614 {
>> 615
>> 616
>> 617 unsigned index_in_pd = virt_to_pd_index(vaddr);
>> 618 unsigned index_in_pt = virt_to_pt_index(vaddr);
>> 619
>> 620
>> 621 struct x86_pde *pd = (struct x86_pde*)
>> 622 (SOS_PAGING_MIRROR_VADDR
>> 623 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
>> 624
>> 625
>> 626 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
>> 627 + SOS_PAGE_SIZE*index_in_pd);
>> 628
>> 629
>> 630 if (! pd[index_in_pd].present)
>> 631 return FALSE;
>> 632 if (! pt[index_in_pt].present)
>> 633 return FALSE;
>> 634
>> 635 return (pt[index_in_pt].dirty != 0);
>> 636 }
>> 637
>> 638
>> 639 sos_ret_t sos_paging_set_dirty(sos_vaddr_t vaddr,
>> 640 sos_bool_t is_dirty)
>> 641 {
>> 642
>> 643
>> 644 unsigned index_in_pd = virt_to_pd_index(vaddr);
>> 645 unsigned index_in_pt = virt_to_pt_index(vaddr);
>> 646
>> 647
>> 648 struct x86_pde *pd = (struct x86_pde*)
>> 649 (SOS_PAGING_MIRROR_VADDR
>> 650 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
>> 651
>> 652
>> 653 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
>> 654 + SOS_PAGE_SIZE*index_in_pd);
>> 655
>> 656
>> 657 if (! pd[index_in_pd].present)
>> 658 return -SOS_EFAULT;
>> 659 if (! pt[index_in_pt].present)
>> 660 return -SOS_EFAULT;
>> 661
>> 662 pt[index_in_pt].dirty = is_dirty;
>> 663 return SOS_OK;
>> 664 }
>> 665
>> 666
613 sos_paddr_t sos_paging_get_paddr(sos_vaddr_t v 667 sos_paddr_t sos_paging_get_paddr(sos_vaddr_t vaddr)
614 { 668 {
615 669
616 670
617 unsigned index_in_pd = virt_to_pd_index(vadd 671 unsigned index_in_pd = virt_to_pd_index(vaddr);
618 unsigned index_in_pt = virt_to_pt_index(vadd 672 unsigned index_in_pt = virt_to_pt_index(vaddr);
619 unsigned offset_in_page = virt_to_page_offse 673 unsigned offset_in_page = virt_to_page_offset(vaddr);
620 674
621 675
622 struct x86_pde *pd = (struct x86_pde*) 676 struct x86_pde *pd = (struct x86_pde*)
623 (SOS_PAGING_MIRROR_VADDR 677 (SOS_PAGING_MIRROR_VADDR
624 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI 678 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
625 679
626 680
627 struct x86_pte * pt = (struct x86_pte*) (SOS 681 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
628 + S 682 + SOS_PAGE_SIZE*index_in_pd);
629 683
630 684
631 if (! pd[index_in_pd].present) 685 if (! pd[index_in_pd].present)
632 return (sos_paddr_t)NULL; 686 return (sos_paddr_t)NULL;
633 if (! pt[index_in_pt].present) 687 if (! pt[index_in_pt].present)
634 return (sos_paddr_t)NULL; 688 return (sos_paddr_t)NULL;
635 689
636 return (pt[index_in_pt].paddr << 12) + offse 690 return (pt[index_in_pt].paddr << 12) + offset_in_page;
637 } 691 }
638 692
639 693
640 694
641 695
642 696
643 697
644 698
645 sos_paddr_t sos_paging_get_current_PD_paddr() !! 699 sos_paddr_t sos_paging_get_current_PD_paddr(void)
646 { 700 {
647 struct x86_pdbr pdbr; 701 struct x86_pdbr pdbr;
648 asm volatile("movl %%cr3, %0\n": "=r"(pdbr)) 702 asm volatile("movl %%cr3, %0\n": "=r"(pdbr));
649 return (pdbr.pd_paddr << 12); 703 return (pdbr.pd_paddr << 12);
650 } 704 }
651 705
652 706
653 sos_ret_t sos_paging_set_current_PD_paddr(sos_ 707 sos_ret_t sos_paging_set_current_PD_paddr(sos_paddr_t paddr_PD)
654 { 708 {
655 struct x86_pdbr pdbr; 709 struct x86_pdbr pdbr;
656 710
657 SOS_ASSERT_FATAL(paddr_PD != 0); 711 SOS_ASSERT_FATAL(paddr_PD != 0);
658 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(paddr_P 712 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(paddr_PD));
659 713
660 714
661 memset(& pdbr, 0x0, sizeof(struct x86_pdbr)) 715 memset(& pdbr, 0x0, sizeof(struct x86_pdbr));
662 pdbr.pd_paddr = (paddr_PD >> 12); 716 pdbr.pd_paddr = (paddr_PD >> 12);
663 717
664 718
665 asm volatile ("movl %0,%%cr3\n" ::"r"(pdbr)) 719 asm volatile ("movl %0,%%cr3\n" ::"r"(pdbr));
666 720
667 return SOS_OK; 721 return SOS_OK;
668 } 722 }
669 723
670 724
671 sos_ret_t sos_paging_dispose(sos_vaddr_t vaddr 725 sos_ret_t sos_paging_dispose(sos_vaddr_t vaddr_PD)
672 { 726 {
673 x86_pde_val_t *pd = (x86_pde_val_t*) vaddr_P 727 x86_pde_val_t *pd = (x86_pde_val_t*) vaddr_PD;
674 x86_pte_val_t *pt; 728 x86_pte_val_t *pt;
675 int index_in_pd; 729 int index_in_pd;
676 730
677 731
678 732
679 pt = (x86_pte_val_t *)sos_kmem_vmm_alloc(1, 733 pt = (x86_pte_val_t *)sos_kmem_vmm_alloc(1, 0);
680 if (! pt) 734 if (! pt)
681 return -SOS_ENOMEM; 735 return -SOS_ENOMEM;
682 736
683 737
684 738
685 739
686 for (index_in_pd = (SOS_PAGING_BASE_USER_ADD 740 for (index_in_pd = (SOS_PAGING_BASE_USER_ADDRESS >> 22) ;
687 index_in_pd < 1024 ; 741 index_in_pd < 1024 ;
688 742
689 743
690 index_in_pd ++) 744 index_in_pd ++)
691 { 745 {
692 sos_paddr_t paddr_pt = (pd[index_in_pd]. 746 sos_paddr_t paddr_pt = (pd[index_in_pd].pde.pt_paddr << 12);
693 int index_in_pt; 747 int index_in_pt;
694 748
695 749
696 if (! pd[index_in_pd].pde.present) 750 if (! pd[index_in_pd].pde.present)
697 { 751 {
698 pd[index_in_pd].ui32 = 0; 752 pd[index_in_pd].ui32 = 0;
699 continue; 753 continue;
700 } 754 }
701 755
702 756
703 SOS_ASSERT_FATAL(SOS_OK 757 SOS_ASSERT_FATAL(SOS_OK
704 == sos_paging_map(paddr 758 == sos_paging_map(paddr_pt,
705 (sos_ 759 (sos_vaddr_t)pt, FALSE,
706 SOS_V 760 SOS_VM_MAP_PROT_READ
707 | SOS 761 | SOS_VM_MAP_PROT_WRITE));
708 762
709 763
710 for (index_in_pt = 0 ; index_in_pt < 102 764 for (index_in_pt = 0 ; index_in_pt < 1024 ; index_in_pt ++)
711 { 765 {
712 766
713 if (! pt[index_in_pt].pte.present) 767 if (! pt[index_in_pt].pte.present)
714 { 768 {
715 pt[index_in_pt].ui32 = 0; 769 pt[index_in_pt].ui32 = 0;
716 continue; 770 continue;
717 } 771 }
718 772
719 773
720 sos_physmem_unref_physpage(pt[index_ 774 sos_physmem_unref_physpage(pt[index_in_pt].pte.paddr << 12);
721 775
722 776
723 sos_physmem_dec_physpage_occupation( 777 sos_physmem_dec_physpage_occupation(paddr_pt);
724 778
725 779
726 pt[index_in_pt].ui32 = 0; 780 pt[index_in_pt].ui32 = 0;
727 } 781 }
728 782
729 783
730 SOS_ASSERT_FATAL(SOS_OK == sos_paging_un 784 SOS_ASSERT_FATAL(SOS_OK == sos_paging_unmap((sos_vaddr_t)pt));
731 785
732 786
733 pd[index_in_pd].ui32 = 0; 787 pd[index_in_pd].ui32 = 0;
734 788
735 789
736 sos_physmem_unref_physpage(paddr_pt); 790 sos_physmem_unref_physpage(paddr_pt);
737 } 791 }
738 792
739 793
740 SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free 794 SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free((sos_vaddr_t)pt));
741 795
742 return SOS_OK; 796 return SOS_OK;
743 } 797 }
744 798
745 799
746 sos_ret_t sos_paging_copy_kernel_space(sos_vad 800 sos_ret_t sos_paging_copy_kernel_space(sos_vaddr_t dest_vaddr_PD,
747 sos_vad 801 sos_vaddr_t src_vaddr_PD)
748 { 802 {
749 x86_pde_val_t *src_pd = (x86_pde_val_t 803 x86_pde_val_t *src_pd = (x86_pde_val_t*) src_vaddr_PD;
750 x86_pde_val_t *dest_pd = (x86_pde_val_t 804 x86_pde_val_t *dest_pd = (x86_pde_val_t*) dest_vaddr_PD;
751 sos_paddr_t dest_paddr_PD = sos_paging_get 805 sos_paddr_t dest_paddr_PD = sos_paging_get_paddr(dest_vaddr_PD);
752 x86_pde_val_t mirror_pde; 806 x86_pde_val_t mirror_pde;
753 int index_in_pd; 807 int index_in_pd;
754 808
755 809
756 memset((void*)dest_vaddr_PD, 0x0, SOS_PAGE_S 810 memset((void*)dest_vaddr_PD, 0x0, SOS_PAGE_SIZE);
757 811
758 812
759 813
760 for (index_in_pd = 0 ; 814 for (index_in_pd = 0 ;
761 index_in_pd < (SOS_PAGING_MIRROR_VADDR 815 index_in_pd < (SOS_PAGING_MIRROR_VADDR >> 22) ;
762 816
763 817
764 index_in_pd ++) 818 index_in_pd ++)
765 { 819 {
766 820
767 dest_pd[index_in_pd].ui32 = src_pd[index 821 dest_pd[index_in_pd].ui32 = src_pd[index_in_pd].ui32;
768 822
769 823
770 824
771 825
772 826
773 827
774 828
775 829
776 830
777 } 831 }
778 832
779 833
780 mirror_pde.ui32 = 0; 834 mirror_pde.ui32 = 0;
781 mirror_pde.pde.present = TRUE; 835 mirror_pde.pde.present = TRUE;
782 mirror_pde.pde.write = 1; 836 mirror_pde.pde.write = 1;
783 mirror_pde.pde.user = 0; 837 mirror_pde.pde.user = 0;
784 mirror_pde.pde.pt_paddr = (dest_paddr_PD >> 838 mirror_pde.pde.pt_paddr = (dest_paddr_PD >> 12);
785 dest_pd[SOS_PAGING_MIRROR_VADDR >> 22].ui32 839 dest_pd[SOS_PAGING_MIRROR_VADDR >> 22].ui32 = mirror_pde.ui32;
786 840
787 return SOS_OK; 841 return SOS_OK;
788 } 842 }
789 843
>> 844
>> 845 sos_ret_t sos_paging_copy_user_space(sos_vaddr_t dest_vaddr_PD,
>> 846 sos_vaddr_t src_vaddr_PD)
>> 847 {
>> 848 x86_pde_val_t *src_pd = (x86_pde_val_t*) src_vaddr_PD;
>> 849 x86_pde_val_t *dest_pd = (x86_pde_val_t*) dest_vaddr_PD;
>> 850 x86_pte_val_t *tmp_src_pt, *tmp_dest_pt;
>> 851 int index_in_pd;
>> 852
>> 853
>> 854
>> 855 tmp_src_pt = (x86_pte_val_t *)sos_kmem_vmm_alloc(1, 0);
>> 856 if (! tmp_src_pt)
>> 857 return -SOS_ENOMEM;
>> 858
>> 859 tmp_dest_pt = (x86_pte_val_t *)sos_kmem_vmm_alloc(1, 0);
>> 860 if (! tmp_dest_pt)
>> 861 {
>> 862 sos_kmem_vmm_free((sos_vaddr_t)tmp_dest_pt);
>> 863 return -SOS_ENOMEM;
>> 864 }
>> 865
>> 866
>> 867 for (index_in_pd = (SOS_PAGING_BASE_USER_ADDRESS >> 22) ;
>> 868 index_in_pd < 1024 ;
>> 869
>> 870
>> 871 index_in_pd ++)
>> 872 {
>> 873 sos_paddr_t paddr_dest_pt;
>> 874 int index_in_pt;
>> 875
>> 876
>> 877
>> 878
>> 879
>> 880
>> 881 dest_pd[index_in_pd].ui32 = src_pd[index_in_pd].ui32;
>> 882
>> 883
>> 884 if (! src_pd[index_in_pd].pde.present)
>> 885 continue;
>> 886
>> 887
>> 888 paddr_dest_pt = sos_physmem_ref_physpage_new(TRUE);
>> 889 if (NULL == (void*)paddr_dest_pt)
>> 890 {
>> 891 sos_paging_dispose((sos_vaddr_t)dest_vaddr_PD);
>> 892
>> 893
>> 894 sos_kmem_vmm_free((sos_vaddr_t)tmp_src_pt);
>> 895 sos_kmem_vmm_free((sos_vaddr_t)tmp_dest_pt);
>> 896 return -SOS_ENOMEM;
>> 897 }
>> 898
>> 899
>> 900 SOS_ASSERT_FATAL(SOS_OK
>> 901 == sos_paging_map(src_pd[index_in_pd].pde.pt_paddr << 12,
>> 902 (sos_vaddr_t)tmp_src_pt, FALSE,
>> 903 SOS_VM_MAP_PROT_READ));
>> 904 SOS_ASSERT_FATAL(SOS_OK
>> 905 == sos_paging_map(paddr_dest_pt,
>> 906 (sos_vaddr_t)tmp_dest_pt, FALSE,
>> 907 SOS_VM_MAP_PROT_READ
>> 908 | SOS_VM_MAP_PROT_WRITE));
>> 909
>> 910
>> 911
>> 912 for (index_in_pt = 0 ; index_in_pt < 1024 ; index_in_pt ++)
>> 913 {
>> 914
>> 915 tmp_dest_pt[index_in_pt].ui32 = tmp_src_pt[index_in_pt].ui32;
>> 916
>> 917
>> 918 if (! tmp_dest_pt[index_in_pt].pte.present)
>> 919 continue;
>> 920
>> 921
>> 922 tmp_dest_pt[index_in_pt].pte.accessed = 0;
>> 923 tmp_dest_pt[index_in_pt].pte.dirty = 0;
>> 924
>> 925
>> 926 sos_physmem_ref_physpage_at(tmp_src_pt[index_in_pt].pte.paddr << 12);
>> 927
>> 928
>> 929 sos_physmem_inc_physpage_occupation(paddr_dest_pt);
>> 930 }
>> 931
>> 932
>> 933 SOS_ASSERT_FATAL(SOS_OK == sos_paging_unmap((sos_vaddr_t)tmp_src_pt));
>> 934 SOS_ASSERT_FATAL(SOS_OK == sos_paging_unmap((sos_vaddr_t)tmp_dest_pt));
>> 935
>> 936
>> 937 dest_pd[index_in_pd].pde.pt_paddr = (paddr_dest_pt >> 12);
>> 938
>> 939
>> 940 dest_pd[index_in_pd].pde.accessed = 0;
>> 941 }
>> 942
>> 943
>> 944
>> 945 SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free((sos_vaddr_t)tmp_src_pt));
>> 946 SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free((sos_vaddr_t)tmp_dest_pt));
>> 947
>> 948 return SOS_OK;
>> 949 }
>> 950
>> 951
>> 952 sos_ret_t sos_paging_prepare_COW(sos_uaddr_t base_address,
>> 953 sos_size_t length)
>> 954 {
>> 955 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_address));
>> 956 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(length));
>> 957 SOS_ASSERT_FATAL(SOS_PAGING_BASE_USER_ADDRESS <= base_address);
>> 958
>> 959
>> 960
>> 961 for ( ;
>> 962 length > 0 ;
>> 963 length -= SOS_PAGE_SIZE, base_address += SOS_PAGE_SIZE)
>> 964 {
>> 965 sos_paging_set_prot(base_address,
>> 966 SOS_VM_MAP_PROT_READ);
>> 967 }
>> 968
>> 969 return SOS_OK;
>> 970 }
>> 971
>> 972
>> 973 sos_ret_t sos_paging_try_resolve_COW(sos_uaddr_t uaddr)
>> 974 {
>> 975 sos_ret_t refcnt;
>> 976
>> 977
>> 978
>> 979 unsigned index_in_pd = virt_to_pd_index(uaddr);
>> 980 unsigned index_in_pt = virt_to_pt_index(uaddr);
>> 981
>> 982
>> 983 struct x86_pde *pd = (struct x86_pde*)
>> 984 (SOS_PAGING_MIRROR_VADDR
>> 985 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
>> 986
>> 987
>> 988 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
>> 989 + SOS_PAGE_SIZE*index_in_pd);
>> 990
>> 991
>> 992 if (! pd[index_in_pd].present)
>> 993 return -SOS_EFAULT;
>> 994 if (! pt[index_in_pt].present)
>> 995 return -SOS_EFAULT;
>> 996
>> 997
>> 998 if (! pd[index_in_pd].write)
>> 999 return -SOS_EFAULT;
>> 1000
>> 1001
>> 1002
>> 1003 SOS_ASSERT_FATAL(! pt[index_in_pt].write);
>> 1004
>> 1005
>> 1006
>> 1007 refcnt = sos_physmem_get_physpage_refcount(pt[index_in_pt].paddr << 12);
>> 1008 SOS_ASSERT_FATAL(refcnt > 0);
>> 1009
>> 1010 if (refcnt == 1)
>> 1011 {
>> 1012
>> 1013
>> 1014 pt[index_in_pt].write = 1;
>> 1015 invlpg(pt[index_in_pt].paddr << 12);
>> 1016 }
>> 1017
>> 1018
>> 1019 else
>> 1020 {
>> 1021 sos_paddr_t new_ppage;
>> 1022 sos_vaddr_t vpage_src, tmp_dest;
>> 1023
>> 1024
>> 1025
>> 1026
>> 1027 tmp_dest = sos_kmem_vmm_alloc(1, SOS_KMEM_VMM_MAP);
>> 1028 if (! tmp_dest)
>> 1029 return -SOS_ENOMEM;
>> 1030
>> 1031
>> 1032 vpage_src = SOS_PAGE_ALIGN_INF(uaddr);
>> 1033 memcpy((void*)tmp_dest, (void*)vpage_src, SOS_PAGE_SIZE);
>> 1034
>> 1035
>> 1036
>> 1037
>> 1038 new_ppage = sos_paging_get_paddr(tmp_dest);
>> 1039 SOS_ASSERT_FATAL(new_ppage != (sos_vaddr_t)NULL);
>> 1040 if (SOS_OK != sos_paging_map(new_ppage, vpage_src,
>> 1041 TRUE,
>> 1042 SOS_VM_MAP_PROT_READ
>> 1043 | SOS_VM_MAP_PROT_WRITE))
>> 1044 {
>> 1045 sos_kmem_vmm_free(tmp_dest);
>> 1046 return -SOS_ENOMEM;
>> 1047 }
>> 1048
>> 1049
>> 1050
>> 1051 SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free(tmp_dest));
>> 1052 }
>> 1053
>> 1054
>> 1055 return SOS_OK;
>> 1056 }