Diff markup
001 001
002 002
003 003
004 004
005 005
006 006
007 007
008 008
009 009
010 010
011 011
012 012
013 013
014 014
015 015
016 016
017 017
018 #include <sos/physmem.h> 018 #include <sos/physmem.h>
019 #include <sos/klibc.h> 019 #include <sos/klibc.h>
020 #include <sos/assert.h> 020 #include <sos/assert.h>
021 021
022 #include "mm_context.h" 022 #include "mm_context.h"
023 023
024 #include "paging.h" 024 #include "paging.h"
025 025
026 026
027 027
028 028
029 029
030 030
031 031
032 032
033 033
034 034
035 035
036 036
037 037
038 038
039 039
040 040
041 041
042 042
043 043
044 044
045 045
046 046
047 struct x86_pde 047 struct x86_pde
048 { 048 {
049 sos_ui32_t present :1; 049 sos_ui32_t present :1;
050 sos_ui32_t write :1; 050 sos_ui32_t write :1;
051 sos_ui32_t user :1; 051 sos_ui32_t user :1;
052 sos_ui32_t write_through :1; 052 sos_ui32_t write_through :1;
053 sos_ui32_t cache_disabled :1; 053 sos_ui32_t cache_disabled :1;
054 sos_ui32_t accessed :1; 054 sos_ui32_t accessed :1;
055 sos_ui32_t zero :1; 055 sos_ui32_t zero :1;
056 sos_ui32_t page_size :1; 056 sos_ui32_t page_size :1;
057 sos_ui32_t global_page :1; 057 sos_ui32_t global_page :1;
058 sos_ui32_t custom :3; 058 sos_ui32_t custom :3;
059 sos_ui32_t pt_paddr :20; 059 sos_ui32_t pt_paddr :20;
060 } __attribute__ ((packed)); 060 } __attribute__ ((packed));
061 061
062 062
063 063
064 typedef union { 064 typedef union {
065 struct x86_pde pde; 065 struct x86_pde pde;
066 sos_ui32_t ui32; 066 sos_ui32_t ui32;
067 } x86_pde_val_t; 067 } x86_pde_val_t;
068 068
069 069
070 070
071 071
072 struct x86_pte 072 struct x86_pte
073 { 073 {
074 sos_ui32_t present :1; 074 sos_ui32_t present :1;
075 sos_ui32_t write :1; 075 sos_ui32_t write :1;
076 sos_ui32_t user :1; 076 sos_ui32_t user :1;
077 sos_ui32_t write_through :1; 077 sos_ui32_t write_through :1;
078 sos_ui32_t cache_disabled :1; 078 sos_ui32_t cache_disabled :1;
079 sos_ui32_t accessed :1; 079 sos_ui32_t accessed :1;
080 sos_ui32_t dirty :1; 080 sos_ui32_t dirty :1;
081 sos_ui32_t zero :1; 081 sos_ui32_t zero :1;
082 sos_ui32_t global_page :1; 082 sos_ui32_t global_page :1;
083 083
084 sos_ui32_t custom :3; 084 sos_ui32_t custom :3;
085 sos_ui32_t paddr :20; 085 sos_ui32_t paddr :20;
086 } __attribute__ ((packed)); 086 } __attribute__ ((packed));
087 087
088 088
089 089
090 typedef union { 090 typedef union {
091 struct x86_pte pte; 091 struct x86_pte pte;
092 sos_ui32_t ui32; 092 sos_ui32_t ui32;
093 } x86_pte_val_t; 093 } x86_pte_val_t;
094 094
095 095
096 096
097 097
098 struct x86_pdbr 098 struct x86_pdbr
099 { 099 {
100 sos_ui32_t zero1 :3; 100 sos_ui32_t zero1 :3;
101 sos_ui32_t write_through :1; 101 sos_ui32_t write_through :1;
102 sos_ui32_t cache_disabled :1; 102 sos_ui32_t cache_disabled :1;
103 sos_ui32_t zero2 :7; 103 sos_ui32_t zero2 :7;
104 sos_ui32_t pd_paddr :20; 104 sos_ui32_t pd_paddr :20;
105 } __attribute__ ((packed)); 105 } __attribute__ ((packed));
106 106
107 107
108 108
109 109
110 110
111 111
112 112
113 #define invlpg(vaddr) \ 113 #define invlpg(vaddr) \
114 do { \ 114 do { \
115 __asm__ __volatile__("invlpg %0"::"m"(* 115 __asm__ __volatile__("invlpg %0"::"m"(*((unsigned *)(vaddr)))); \
116 } while(0) 116 } while(0)
117 117
118 118
119 119
120 120
121 121
122 122
123 #define flush_tlb() \ 123 #define flush_tlb() \
124 do { \ 124 do { \
125 unsigned long tmpreg; \ 125 unsigned long tmpreg; \
126 asm volatile("movl %%cr3,%0\n\tmovl %0 126 asm volatile("movl %%cr3,%0\n\tmovl %0,%%cr3" :"=r" \
127 (tmpreg) : :"memory"); \ 127 (tmpreg) : :"memory"); \
128 } while (0) 128 } while (0)
129 129
130 130
131 131
132 132
133 133
134 134
135 #define virt_to_pd_index(vaddr) \ 135 #define virt_to_pd_index(vaddr) \
136 (((unsigned)(vaddr)) >> 22) 136 (((unsigned)(vaddr)) >> 22)
137 137
138 138
139 139
140 140
141 141
142 142
143 #define virt_to_pt_index(vaddr) \ 143 #define virt_to_pt_index(vaddr) \
144 ( (((unsigned)(vaddr)) >> 12) & 0x3ff ) 144 ( (((unsigned)(vaddr)) >> 12) & 0x3ff )
145 145
146 146
147 147
148 148
149 149
150 150
151 #define virt_to_page_offset(vaddr) \ 151 #define virt_to_page_offset(vaddr) \
152 (((unsigned)(vaddr)) & SOS_PAGE_MASK) 152 (((unsigned)(vaddr)) & SOS_PAGE_MASK)
153 153
154 154
155 155
156 156
157 157
158 158
159 159
160 static sos_ret_t paging_setup_map_helper(struc 160 static sos_ret_t paging_setup_map_helper(struct x86_pde * pd,
161 sos_p 161 sos_paddr_t ppage,
162 sos_v 162 sos_vaddr_t vaddr)
163 { 163 {
164 164
165 165
166 unsigned index_in_pd = virt_to_pd_index(vadd 166 unsigned index_in_pd = virt_to_pd_index(vaddr);
167 unsigned index_in_pt = virt_to_pt_index(vadd 167 unsigned index_in_pt = virt_to_pt_index(vaddr);
168 168
169 169
170 struct x86_pte * pt; 170 struct x86_pte * pt;
171 if (pd[index_in_pd].present) 171 if (pd[index_in_pd].present)
172 { 172 {
173 pt = (struct x86_pte*) (pd[index_in_pd]. 173 pt = (struct x86_pte*) (pd[index_in_pd].pt_paddr << 12);
174 174
175 175
176 176
177 177
178 178
179 if (pt[index_in_pt].present) 179 if (pt[index_in_pt].present)
180 SOS_ASSERT_FATAL(FALSE); 180 SOS_ASSERT_FATAL(FALSE);
181 } 181 }
182 else 182 else
183 { 183 {
184 184
185 pt = (struct x86_pte*) sos_physmem_ref_p 185 pt = (struct x86_pte*) sos_physmem_ref_physpage_new(FALSE);
186 if (! pt) 186 if (! pt)
187 return -SOS_ENOMEM; 187 return -SOS_ENOMEM;
188 188
189 memset((void*)pt, 0x0, SOS_PAGE_SIZE); 189 memset((void*)pt, 0x0, SOS_PAGE_SIZE);
190 190
191 pd[index_in_pd].present = TRUE; 191 pd[index_in_pd].present = TRUE;
192 pd[index_in_pd].write = 1; 192 pd[index_in_pd].write = 1;
193 193
194 194
195 195
196 196
197 pd[index_in_pd].pt_paddr = ((sos_paddr_t 197 pd[index_in_pd].pt_paddr = ((sos_paddr_t)pt) >> 12;
198 } 198 }
199 199
200 200
201 201
202 pt[index_in_pt].present = 1; 202 pt[index_in_pt].present = 1;
203 pt[index_in_pt].write = 1; 203 pt[index_in_pt].write = 1;
204 204
205 205
206 206
207 pt[index_in_pt].user = 0; 207 pt[index_in_pt].user = 0;
208 pt[index_in_pt].paddr = ppage >> 12; 208 pt[index_in_pt].paddr = ppage >> 12;
209 209
210 210
211 211
212 sos_physmem_inc_physpage_occupation((sos_pad 212 sos_physmem_inc_physpage_occupation((sos_paddr_t)pt);
213 213
214 return SOS_OK; 214 return SOS_OK;
215 } 215 }
216 216
217 217
218 sos_ret_t sos_paging_subsystem_setup(sos_paddr 218 sos_ret_t sos_paging_subsystem_setup(sos_paddr_t identity_mapping_base,
219 sos_paddr 219 sos_paddr_t identity_mapping_top)
220 { 220 {
221 221
222 struct x86_pdbr cr3; 222 struct x86_pdbr cr3;
223 223
224 224
225 struct x86_pde * pd 225 struct x86_pde * pd
226 = (struct x86_pde*) sos_physmem_ref_physpa 226 = (struct x86_pde*) sos_physmem_ref_physpage_new(FALSE);
227 227
228 228
229 sos_paddr_t paddr; 229 sos_paddr_t paddr;
230 230
231 231
232 232
233 memset((void*)pd, 233 memset((void*)pd,
234 0x0, 234 0x0,
235 SOS_PAGE_SIZE); 235 SOS_PAGE_SIZE);
236 236
237 237
238 for (paddr = identity_mapping_base ; 238 for (paddr = identity_mapping_base ;
239 paddr < identity_mapping_top ; 239 paddr < identity_mapping_top ;
240 paddr += SOS_PAGE_SIZE) 240 paddr += SOS_PAGE_SIZE)
241 { 241 {
242 if (paging_setup_map_helper(pd, paddr, p 242 if (paging_setup_map_helper(pd, paddr, paddr))
243 return -SOS_ENOMEM; 243 return -SOS_ENOMEM;
244 } 244 }
245 245
246 246
247 for (paddr = BIOS_N_VIDEO_START ; 247 for (paddr = BIOS_N_VIDEO_START ;
248 paddr < BIOS_N_VIDEO_END ; 248 paddr < BIOS_N_VIDEO_END ;
249 paddr += SOS_PAGE_SIZE) 249 paddr += SOS_PAGE_SIZE)
250 { 250 {
251 if (paging_setup_map_helper(pd, paddr, p 251 if (paging_setup_map_helper(pd, paddr, paddr))
252 return -SOS_ENOMEM; 252 return -SOS_ENOMEM;
253 } 253 }
254 254
255 255
256 256
257 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR) 257 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].present = TRUE;
258 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR) 258 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].write = 1;
259 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR) 259 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].user = 0;
260 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR) 260 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].pt_paddr
261 = ((sos_paddr_t)pd)>>12; 261 = ((sos_paddr_t)pd)>>12;
262 262
263 263
264 264
265 memset(& cr3, 0x0, sizeof(struct x86_pdbr)); 265 memset(& cr3, 0x0, sizeof(struct x86_pdbr));
266 cr3.pd_paddr = ((sos_paddr_t)pd) >> 12; 266 cr3.pd_paddr = ((sos_paddr_t)pd) >> 12;
267 267
268 268
269 269
270 270
271 asm volatile ("movl %0,%%cr3\n\t" 271 asm volatile ("movl %0,%%cr3\n\t"
272 "movl %%cr0,%%eax\n\t" 272 "movl %%cr0,%%eax\n\t"
273 "orl $0x80010000, %%eax\n\t" 273 "orl $0x80010000, %%eax\n\t"
274 "movl %%eax,%%cr0\n\t" 274 "movl %%eax,%%cr0\n\t"
275 "jmp 1f\n\t" 275 "jmp 1f\n\t"
276 "1:\n\t" 276 "1:\n\t"
277 "movl $2f, %%eax\n\t" 277 "movl $2f, %%eax\n\t"
278 "jmp *%%eax\n\t" 278 "jmp *%%eax\n\t"
279 "2:\n\t" ::"r"(cr3):"memory"," 279 "2:\n\t" ::"r"(cr3):"memory","eax");
280 280
281 281
282 282
283 283
284 284
285 285
286 286
287 287
288 288
289 return SOS_OK; 289 return SOS_OK;
290 } 290 }
291 291
292 292
293 293
294 294
295 sos_ret_t sos_paging_map(sos_paddr_t ppage_pad 295 sos_ret_t sos_paging_map(sos_paddr_t ppage_paddr,
296 sos_vaddr_t vpage_vad 296 sos_vaddr_t vpage_vaddr,
297 sos_bool_t is_user_pa 297 sos_bool_t is_user_page,
298 sos_ui32_t flags) 298 sos_ui32_t flags)
299 { 299 {
300 300
301 301
302 unsigned index_in_pd = virt_to_pd_index(vpag 302 unsigned index_in_pd = virt_to_pd_index(vpage_vaddr);
303 unsigned index_in_pt = virt_to_pt_index(vpag 303 unsigned index_in_pt = virt_to_pt_index(vpage_vaddr);
304 304
305 305
306 struct x86_pde *pd = (struct x86_pde*) 306 struct x86_pde *pd = (struct x86_pde*)
307 (SOS_PAGING_MIRROR_VADDR 307 (SOS_PAGING_MIRROR_VADDR
308 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI 308 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
309 309
310 310
311 struct x86_pte * pt = (struct x86_pte*) (SOS 311 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
312 + S 312 + SOS_PAGE_SIZE*index_in_pd);
313 313
314 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(ppage_p 314 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(ppage_paddr));
315 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(vpage_v 315 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(vpage_vaddr));
316 316
317 317
318 flags &= ~SOS_VM_MAP_PROT_EXEC; 318 flags &= ~SOS_VM_MAP_PROT_EXEC;
319 319
320 320
321 if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR) 321 if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)
322 && (vpage_vaddr < SOS_PAGING_MIRROR_VADD 322 && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE))
323 return -SOS_EINVAL; 323 return -SOS_EINVAL;
324 324
325 325
326 if (! pd[index_in_pd].present) 326 if (! pd[index_in_pd].present)
327 { 327 {
328 x86_pde_val_t u; 328 x86_pde_val_t u;
329 329
330 330
331 sos_paddr_t pt_ppage 331 sos_paddr_t pt_ppage
332 = sos_physmem_ref_physpage_new(! (flag 332 = sos_physmem_ref_physpage_new(! (flags & SOS_VM_MAP_ATOMIC));
333 if (! pt_ppage) 333 if (! pt_ppage)
334 { 334 {
335 return -SOS_ENOMEM; 335 return -SOS_ENOMEM;
336 } 336 }
337 337
338 338
339 u.pde = (struct x86_pde){ 339 u.pde = (struct x86_pde){
340 .present = TRUE, 340 .present = TRUE,
341 .write = 1, 341 .write = 1,
342 .pt_paddr = ((sos_paddr_t)pt_ppage) >> 342 .pt_paddr = ((sos_paddr_t)pt_ppage) >> 12
343 }; 343 };
344 344
345 345
346 if (vpage_vaddr < SOS_PAGING_MIRROR_VADD 346 if (vpage_vaddr < SOS_PAGING_MIRROR_VADDR)
347 { 347 {
348 348
349 349
350 350
351 351
352 u.pde.user = 0; 352 u.pde.user = 0;
353 353
354 354
355 SOS_ASSERT_FATAL(SOS_OK == 355 SOS_ASSERT_FATAL(SOS_OK ==
356 sos_mm_context_sync 356 sos_mm_context_synch_kernel_PDE(index_in_pd,
357 357 u.ui32));
358 } 358 }
359 else 359 else
360 360
361 361
362 362
363 { 363 {
364 364
365 365
366 366
367 367
368 u.pde.user = 1; 368 u.pde.user = 1;
369 369
370 370
371 pd[index_in_pd] = u.pde; 371 pd[index_in_pd] = u.pde;
372 } 372 }
373 373
374 374
375 375
376 376
377 377
378 378
379 invlpg(pt); 379 invlpg(pt);
380 380
381 381
382 memset((void*)pt, 0x0, SOS_PAGE_SIZE); 382 memset((void*)pt, 0x0, SOS_PAGE_SIZE);
383 } 383 }
384 384
385 385
386 386
387 if (! pt[index_in_pt].present) 387 if (! pt[index_in_pt].present)
388 sos_physmem_inc_physpage_occupation(pd[ind 388 sos_physmem_inc_physpage_occupation(pd[index_in_pd].pt_paddr << 12);
389 389
390 390
391 391
392 else 392 else
393 sos_physmem_unref_physpage(pt[index_in_pt] 393 sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12);
394 394
395 395
396 pt[index_in_pt].present = TRUE; 396 pt[index_in_pt].present = TRUE;
397 pt[index_in_pt].write = (flags & SOS_VM_MA 397 pt[index_in_pt].write = (flags & SOS_VM_MAP_PROT_WRITE)?1:0;
398 pt[index_in_pt].user = (is_user_page)?1:0 398 pt[index_in_pt].user = (is_user_page)?1:0;
399 pt[index_in_pt].paddr = ppage_paddr >> 12; 399 pt[index_in_pt].paddr = ppage_paddr >> 12;
400 sos_physmem_ref_physpage_at(ppage_paddr); 400 sos_physmem_ref_physpage_at(ppage_paddr);
401 401
402 402
403 403
404 404
405 405
406 406
407 407
408 invlpg(vpage_vaddr); 408 invlpg(vpage_vaddr);
409 409
410 return SOS_OK; 410 return SOS_OK;
411 } 411 }
412 412
413 413
414 sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_v 414 sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_vaddr)
415 { 415 {
416 sos_ret_t pt_dec_occupation_retval; 416 sos_ret_t pt_dec_occupation_retval;
417 417
418 418
419 419
420 unsigned index_in_pd = virt_to_pd_index(vpag 420 unsigned index_in_pd = virt_to_pd_index(vpage_vaddr);
421 unsigned index_in_pt = virt_to_pt_index(vpag 421 unsigned index_in_pt = virt_to_pt_index(vpage_vaddr);
422 422
423 423
424 struct x86_pde *pd = (struct x86_pde*) 424 struct x86_pde *pd = (struct x86_pde*)
425 (SOS_PAGING_MIRROR_VADDR 425 (SOS_PAGING_MIRROR_VADDR
426 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI 426 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
427 427
428 428
429 struct x86_pte * pt = (struct x86_pte*) (SOS 429 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
430 + S 430 + SOS_PAGE_SIZE*index_in_pd);
431 431
432 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(vpage_v 432 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(vpage_vaddr));
433 433
434 434
435 if (! pd[index_in_pd].present) 435 if (! pd[index_in_pd].present)
436 return -SOS_EINVAL; 436 return -SOS_EINVAL;
437 if (! pt[index_in_pt].present) 437 if (! pt[index_in_pt].present)
438 return -SOS_EINVAL; 438 return -SOS_EINVAL;
439 439
440 440
441 if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR) 441 if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)
442 && (vpage_vaddr < SOS_PAGING_MIRROR_VADD 442 && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE))
443 return -SOS_EINVAL; 443 return -SOS_EINVAL;
444 444
445 445
446 sos_physmem_unref_physpage(pt[index_in_pt].p 446 sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12);
447 447
448 448
449 memset(pt + index_in_pt, 0x0, sizeof(struct 449 memset(pt + index_in_pt, 0x0, sizeof(struct x86_pte));
450 450
451 451
452 invlpg(vpage_vaddr); 452 invlpg(vpage_vaddr);
453 453
454 454
455 pt_dec_occupation_retval 455 pt_dec_occupation_retval
456 = sos_physmem_dec_physpage_occupation(pd[i 456 = sos_physmem_dec_physpage_occupation(pd[index_in_pd].pt_paddr << 12);
457 SOS_ASSERT_FATAL(pt_dec_occupation_retval >= 457 SOS_ASSERT_FATAL(pt_dec_occupation_retval >= 0);
458 if (pt_dec_occupation_retval > 0) 458 if (pt_dec_occupation_retval > 0)
459 459
460 { 460 {
461 x86_pde_val_t u; 461 x86_pde_val_t u;
462 462
463 463
464 464
465 465
466 466
467 sos_physmem_unref_physpage(pd[index_in_p 467 sos_physmem_unref_physpage(pd[index_in_pd].pt_paddr << 12);
468 468
469 469
470 470
471 471
472 472
473 473
474 474
475 u.ui32 = 0; 475 u.ui32 = 0;
476 476
477 477
478 if (vpage_vaddr < SOS_PAGING_MIRROR_VADD 478 if (vpage_vaddr < SOS_PAGING_MIRROR_VADDR)
479 { 479 {
480 480
481 SOS_ASSERT_FATAL(SOS_OK == 481 SOS_ASSERT_FATAL(SOS_OK ==
482 sos_mm_context_sync 482 sos_mm_context_synch_kernel_PDE(index_in_pd,
483 483 u.ui32));
484 } 484 }
485 else 485 else
486 486
487 487
488 488
489 { 489 {
490 490
491 491
492 pd[index_in_pd] = u.pde; 492 pd[index_in_pd] = u.pde;
493 } 493 }
494 494
495 495
496 invlpg(pt); 496 invlpg(pt);
497 } 497 }
498 498
499 return SOS_OK; 499 return SOS_OK;
500 } 500 }
501 501
502 502
503 sos_ret_t sos_paging_unmap_interval(sos_vaddr_ 503 sos_ret_t sos_paging_unmap_interval(sos_vaddr_t vaddr,
504 sos_size_t 504 sos_size_t size)
505 { 505 {
506 sos_ret_t retval = 0; 506 sos_ret_t retval = 0;
507 507
508 if (! SOS_IS_PAGE_ALIGNED(vaddr)) 508 if (! SOS_IS_PAGE_ALIGNED(vaddr))
509 return -SOS_EINVAL; 509 return -SOS_EINVAL;
510 if (! SOS_IS_PAGE_ALIGNED(size)) 510 if (! SOS_IS_PAGE_ALIGNED(size))
511 return -SOS_EINVAL; 511 return -SOS_EINVAL;
512 512
513 for ( ; 513 for ( ;
514 size >= SOS_PAGE_SIZE ; 514 size >= SOS_PAGE_SIZE ;
515 vaddr += SOS_PAGE_SIZE, size -= SOS_PA 515 vaddr += SOS_PAGE_SIZE, size -= SOS_PAGE_SIZE)
516 if (SOS_OK == sos_paging_unmap(vaddr)) 516 if (SOS_OK == sos_paging_unmap(vaddr))
517 retval += SOS_PAGE_SIZE; 517 retval += SOS_PAGE_SIZE;
518 518
519 return retval; 519 return retval;
520 } 520 }
521 521
522 522
523 sos_ui32_t sos_paging_get_prot(sos_vaddr_t vad 523 sos_ui32_t sos_paging_get_prot(sos_vaddr_t vaddr)
524 { 524 {
525 sos_ui32_t retval; 525 sos_ui32_t retval;
526 526
527 527
528 528
529 unsigned index_in_pd = virt_to_pd_index(vadd 529 unsigned index_in_pd = virt_to_pd_index(vaddr);
530 unsigned index_in_pt = virt_to_pt_index(vadd 530 unsigned index_in_pt = virt_to_pt_index(vaddr);
531 531
532 532
533 struct x86_pde *pd = (struct x86_pde*) 533 struct x86_pde *pd = (struct x86_pde*)
534 (SOS_PAGING_MIRROR_VADDR 534 (SOS_PAGING_MIRROR_VADDR
535 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI 535 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
536 536
537 537
538 struct x86_pte * pt = (struct x86_pte*) (SOS 538 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
539 + S 539 + SOS_PAGE_SIZE*index_in_pd);
540 540
541 541
542 if (! pd[index_in_pd].present) 542 if (! pd[index_in_pd].present)
543 return SOS_VM_MAP_PROT_NONE; 543 return SOS_VM_MAP_PROT_NONE;
544 if (! pt[index_in_pt].present) 544 if (! pt[index_in_pt].present)
545 return SOS_VM_MAP_PROT_NONE; 545 return SOS_VM_MAP_PROT_NONE;
546 546
547 547
548 retval = SOS_VM_MAP_PROT_READ; 548 retval = SOS_VM_MAP_PROT_READ;
549 if (pd[index_in_pd].write && pt[index_in_pt] 549 if (pd[index_in_pd].write && pt[index_in_pt].write)
550 retval |= SOS_VM_MAP_PROT_WRITE; 550 retval |= SOS_VM_MAP_PROT_WRITE;
551 551
552 return retval; 552 return retval;
553 } 553 }
554 554
555 555
556 sos_ret_t sos_paging_set_prot(sos_vaddr_t vadd 556 sos_ret_t sos_paging_set_prot(sos_vaddr_t vaddr,
557 sos_ui32_t new_ 557 sos_ui32_t new_prot)
558 { 558 {
559 559
560 560
561 unsigned index_in_pd = virt_to_pd_index(vadd 561 unsigned index_in_pd = virt_to_pd_index(vaddr);
562 unsigned index_in_pt = virt_to_pt_index(vadd 562 unsigned index_in_pt = virt_to_pt_index(vaddr);
563 563
564 564
565 struct x86_pde *pd = (struct x86_pde*) 565 struct x86_pde *pd = (struct x86_pde*)
566 (SOS_PAGING_MIRROR_VADDR 566 (SOS_PAGING_MIRROR_VADDR
567 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI 567 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
568 568
569 569
570 struct x86_pte * pt = (struct x86_pte*) (SOS 570 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
571 + S 571 + SOS_PAGE_SIZE*index_in_pd);
572 572
573 573
574 new_prot &= ~SOS_VM_MAP_PROT_EXEC; 574 new_prot &= ~SOS_VM_MAP_PROT_EXEC;
575 575
576 576
577 if (new_prot & ~(SOS_VM_MAP_PROT_READ | SOS_ 577 if (new_prot & ~(SOS_VM_MAP_PROT_READ | SOS_VM_MAP_PROT_WRITE))
578 return -SOS_EINVAL; 578 return -SOS_EINVAL;
579 if (! (new_prot & SOS_VM_MAP_PROT_READ)) 579 if (! (new_prot & SOS_VM_MAP_PROT_READ))
580 580
581 return -SOS_ENOSUP; 581 return -SOS_ENOSUP;
582 582
583 583
584 if (! pd[index_in_pd].present) 584 if (! pd[index_in_pd].present)
585 return -SOS_EINVAL; 585 return -SOS_EINVAL;
586 if (! pt[index_in_pt].present) 586 if (! pt[index_in_pt].present)
587 return -SOS_EINVAL; 587 return -SOS_EINVAL;
588 588
589 589
590 pt[index_in_pt].write = ((new_prot & SOS_VM_ 590 pt[index_in_pt].write = ((new_prot & SOS_VM_MAP_PROT_WRITE) != 0);
591 invlpg(vaddr); 591 invlpg(vaddr);
592 592
593 return SOS_OK; 593 return SOS_OK;
594 } 594 }
595 595
596 596
597 sos_ret_t sos_paging_set_prot_of_interval(sos_ 597 sos_ret_t sos_paging_set_prot_of_interval(sos_vaddr_t vaddr,
598 sos_ 598 sos_size_t size,
599 sos_ 599 sos_ui32_t new_prot)
600 { 600 {
601 if (! SOS_IS_PAGE_ALIGNED(vaddr)) 601 if (! SOS_IS_PAGE_ALIGNED(vaddr))
602 return -SOS_EINVAL; 602 return -SOS_EINVAL;
603 if (! SOS_IS_PAGE_ALIGNED(size)) 603 if (! SOS_IS_PAGE_ALIGNED(size))
604 return -SOS_EINVAL; 604 return -SOS_EINVAL;
605 605
606 for ( ; size >= SOS_PAGE_SIZE ; vaddr += SOS 606 for ( ; size >= SOS_PAGE_SIZE ; vaddr += SOS_PAGE_SIZE, size -= SOS_PAGE_SIZE)
607 sos_paging_set_prot(vaddr, new_prot); 607 sos_paging_set_prot(vaddr, new_prot);
608 608
609 return SOS_OK; 609 return SOS_OK;
610 } 610 }
611 611
612 612
613 sos_paddr_t sos_paging_get_paddr(sos_vaddr_t v 613 sos_paddr_t sos_paging_get_paddr(sos_vaddr_t vaddr)
614 { 614 {
615 615
616 616
617 unsigned index_in_pd = virt_to_pd_index(vadd 617 unsigned index_in_pd = virt_to_pd_index(vaddr);
618 unsigned index_in_pt = virt_to_pt_index(vadd 618 unsigned index_in_pt = virt_to_pt_index(vaddr);
619 unsigned offset_in_page = virt_to_page_offse 619 unsigned offset_in_page = virt_to_page_offset(vaddr);
620 620
621 621
622 struct x86_pde *pd = (struct x86_pde*) 622 struct x86_pde *pd = (struct x86_pde*)
623 (SOS_PAGING_MIRROR_VADDR 623 (SOS_PAGING_MIRROR_VADDR
624 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGI 624 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
625 625
626 626
627 struct x86_pte * pt = (struct x86_pte*) (SOS 627 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
628 + S 628 + SOS_PAGE_SIZE*index_in_pd);
629 629
630 630
631 if (! pd[index_in_pd].present) 631 if (! pd[index_in_pd].present)
632 return (sos_paddr_t)NULL; 632 return (sos_paddr_t)NULL;
633 if (! pt[index_in_pt].present) 633 if (! pt[index_in_pt].present)
634 return (sos_paddr_t)NULL; 634 return (sos_paddr_t)NULL;
635 635
636 return (pt[index_in_pt].paddr << 12) + offse 636 return (pt[index_in_pt].paddr << 12) + offset_in_page;
637 } 637 }
638 638
639 639
640 640
641 641
642 642
643 643
644 644
645 sos_paddr_t sos_paging_get_current_PD_paddr() 645 sos_paddr_t sos_paging_get_current_PD_paddr()
646 { 646 {
647 struct x86_pdbr pdbr; 647 struct x86_pdbr pdbr;
648 asm volatile("movl %%cr3, %0\n": "=r"(pdbr)) 648 asm volatile("movl %%cr3, %0\n": "=r"(pdbr));
649 return (pdbr.pd_paddr << 12); 649 return (pdbr.pd_paddr << 12);
650 } 650 }
651 651
652 652
653 sos_ret_t sos_paging_set_current_PD_paddr(sos_ 653 sos_ret_t sos_paging_set_current_PD_paddr(sos_paddr_t paddr_PD)
654 { 654 {
655 struct x86_pdbr pdbr; 655 struct x86_pdbr pdbr;
656 656
657 SOS_ASSERT_FATAL(paddr_PD != 0); 657 SOS_ASSERT_FATAL(paddr_PD != 0);
658 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(paddr_P 658 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(paddr_PD));
659 659
660 660
661 memset(& pdbr, 0x0, sizeof(struct x86_pdbr)) 661 memset(& pdbr, 0x0, sizeof(struct x86_pdbr));
662 pdbr.pd_paddr = (paddr_PD >> 12); 662 pdbr.pd_paddr = (paddr_PD >> 12);
663 663
664 664
665 asm volatile ("movl %0,%%cr3\n" ::"r"(pdbr)) 665 asm volatile ("movl %0,%%cr3\n" ::"r"(pdbr));
666 666
667 return SOS_OK; 667 return SOS_OK;
668 } 668 }
669 669
670 670
671 sos_ret_t sos_paging_dispose(sos_vaddr_t vaddr 671 sos_ret_t sos_paging_dispose(sos_vaddr_t vaddr_PD)
672 { 672 {
673 x86_pde_val_t *pd = (x86_pde_val_t*) vaddr_P 673 x86_pde_val_t *pd = (x86_pde_val_t*) vaddr_PD;
674 x86_pte_val_t *pt; 674 x86_pte_val_t *pt;
675 int index_in_pd; 675 int index_in_pd;
676 676
677 677
678 678
679 pt = (x86_pte_val_t *)sos_kmem_vmm_alloc(1, 679 pt = (x86_pte_val_t *)sos_kmem_vmm_alloc(1, 0);
680 if (! pt) 680 if (! pt)
681 return -SOS_ENOMEM; 681 return -SOS_ENOMEM;
682 682
683 683
684 684
685 685
686 for (index_in_pd = (SOS_PAGING_BASE_USER_ADD 686 for (index_in_pd = (SOS_PAGING_BASE_USER_ADDRESS >> 22) ;
687 index_in_pd < 1024 ; 687 index_in_pd < 1024 ;
688 688
689 689
690 index_in_pd ++) 690 index_in_pd ++)
691 { 691 {
692 sos_paddr_t paddr_pt = (pd[index_in_pd]. 692 sos_paddr_t paddr_pt = (pd[index_in_pd].pde.pt_paddr << 12);
693 int index_in_pt; 693 int index_in_pt;
694 694
695 695
696 if (! pd[index_in_pd].pde.present) 696 if (! pd[index_in_pd].pde.present)
697 { 697 {
698 pd[index_in_pd].ui32 = 0; 698 pd[index_in_pd].ui32 = 0;
699 continue; 699 continue;
700 } 700 }
701 701
702 702
703 SOS_ASSERT_FATAL(SOS_OK 703 SOS_ASSERT_FATAL(SOS_OK
704 == sos_paging_map(paddr 704 == sos_paging_map(paddr_pt,
705 (sos_ 705 (sos_vaddr_t)pt, FALSE,
706 SOS_V 706 SOS_VM_MAP_PROT_READ
707 | SOS 707 | SOS_VM_MAP_PROT_WRITE));
708 708
709 709
710 for (index_in_pt = 0 ; index_in_pt < 102 710 for (index_in_pt = 0 ; index_in_pt < 1024 ; index_in_pt ++)
711 { 711 {
712 712
713 if (! pt[index_in_pt].pte.present) 713 if (! pt[index_in_pt].pte.present)
714 { 714 {
715 pt[index_in_pt].ui32 = 0; 715 pt[index_in_pt].ui32 = 0;
716 continue; 716 continue;
717 } 717 }
718 718
719 719
720 sos_physmem_unref_physpage(pt[index_ 720 sos_physmem_unref_physpage(pt[index_in_pt].pte.paddr << 12);
721 721
722 722
723 sos_physmem_dec_physpage_occupation( 723 sos_physmem_dec_physpage_occupation(paddr_pt);
724 724
725 725
726 pt[index_in_pt].ui32 = 0; 726 pt[index_in_pt].ui32 = 0;
727 } 727 }
728 728
729 729
730 SOS_ASSERT_FATAL(SOS_OK == sos_paging_un 730 SOS_ASSERT_FATAL(SOS_OK == sos_paging_unmap((sos_vaddr_t)pt));
731 731
732 732
733 pd[index_in_pd].ui32 = 0; 733 pd[index_in_pd].ui32 = 0;
734 734
735 735
736 sos_physmem_unref_physpage(paddr_pt); 736 sos_physmem_unref_physpage(paddr_pt);
737 } 737 }
738 738
739 739
740 SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free 740 SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free((sos_vaddr_t)pt));
741 741
742 return SOS_OK; 742 return SOS_OK;
743 } 743 }
744 744
745 745
746 sos_ret_t sos_paging_copy_kernel_space(sos_vad 746 sos_ret_t sos_paging_copy_kernel_space(sos_vaddr_t dest_vaddr_PD,
747 sos_vad 747 sos_vaddr_t src_vaddr_PD)
748 { 748 {
749 x86_pde_val_t *src_pd = (x86_pde_val_t 749 x86_pde_val_t *src_pd = (x86_pde_val_t*) src_vaddr_PD;
750 x86_pde_val_t *dest_pd = (x86_pde_val_t 750 x86_pde_val_t *dest_pd = (x86_pde_val_t*) dest_vaddr_PD;
751 sos_paddr_t dest_paddr_PD = sos_paging_get 751 sos_paddr_t dest_paddr_PD = sos_paging_get_paddr(dest_vaddr_PD);
752 x86_pde_val_t mirror_pde; 752 x86_pde_val_t mirror_pde;
753 int index_in_pd; 753 int index_in_pd;
754 754
755 755
756 memset((void*)dest_vaddr_PD, 0x0, SOS_PAGE_S 756 memset((void*)dest_vaddr_PD, 0x0, SOS_PAGE_SIZE);
757 757
758 758
759 759
760 for (index_in_pd = 0 ; 760 for (index_in_pd = 0 ;
761 index_in_pd < (SOS_PAGING_MIRROR_VADDR 761 index_in_pd < (SOS_PAGING_MIRROR_VADDR >> 22) ;
762 762
763 763
764 index_in_pd ++) 764 index_in_pd ++)
765 { 765 {
766 766
767 dest_pd[index_in_pd].ui32 = src_pd[index 767 dest_pd[index_in_pd].ui32 = src_pd[index_in_pd].ui32;
768 768
769 769
770 770
771 771
772 772
773 773
774 774
775 775
776 776
777 } 777 }
778 778
779 779
780 mirror_pde.ui32 = 0; 780 mirror_pde.ui32 = 0;
781 mirror_pde.pde.present = TRUE; 781 mirror_pde.pde.present = TRUE;
782 mirror_pde.pde.write = 1; 782 mirror_pde.pde.write = 1;
783 mirror_pde.pde.user = 0; 783 mirror_pde.pde.user = 0;
784 mirror_pde.pde.pt_paddr = (dest_paddr_PD >> 784 mirror_pde.pde.pt_paddr = (dest_paddr_PD >> 12);
785 dest_pd[SOS_PAGING_MIRROR_VADDR >> 22].ui32 785 dest_pd[SOS_PAGING_MIRROR_VADDR >> 22].ui32 = mirror_pde.ui32;
786 786
787 return SOS_OK; 787 return SOS_OK;
788 } 788 }
789 789
>> 790
>> 791 sos_ret_t sos_paging_copy_user_space(sos_vaddr_t dest_vaddr_PD,
>> 792 sos_vaddr_t src_vaddr_PD)
>> 793 {
>> 794 x86_pde_val_t *src_pd = (x86_pde_val_t*) src_vaddr_PD;
>> 795 x86_pde_val_t *dest_pd = (x86_pde_val_t*) dest_vaddr_PD;
>> 796 x86_pte_val_t *tmp_src_pt, *tmp_dest_pt;
>> 797 int index_in_pd;
>> 798
>> 799
>> 800
>> 801 tmp_src_pt = (x86_pte_val_t *)sos_kmem_vmm_alloc(1, 0);
>> 802 if (! tmp_src_pt)
>> 803 return -SOS_ENOMEM;
>> 804
>> 805 tmp_dest_pt = (x86_pte_val_t *)sos_kmem_vmm_alloc(1, 0);
>> 806 if (! tmp_dest_pt)
>> 807 {
>> 808 sos_kmem_vmm_free((sos_vaddr_t)tmp_dest_pt);
>> 809 return -SOS_ENOMEM;
>> 810 }
>> 811
>> 812
>> 813 for (index_in_pd = (SOS_PAGING_BASE_USER_ADDRESS >> 22) ;
>> 814 index_in_pd < 1024 ;
>> 815
>> 816
>> 817 index_in_pd ++)
>> 818 {
>> 819 sos_paddr_t paddr_dest_pt;
>> 820 int index_in_pt;
>> 821
>> 822
>> 823
>> 824
>> 825
>> 826
>> 827 dest_pd[index_in_pd].ui32 = src_pd[index_in_pd].ui32;
>> 828
>> 829
>> 830 if (! src_pd[index_in_pd].pde.present)
>> 831 continue;
>> 832
>> 833
>> 834 paddr_dest_pt = sos_physmem_ref_physpage_new(TRUE);
>> 835 if (NULL == (void*)paddr_dest_pt)
>> 836 {
>> 837 sos_paging_dispose((sos_vaddr_t)dest_vaddr_PD);
>> 838
>> 839
>> 840 sos_kmem_vmm_free((sos_vaddr_t)tmp_src_pt);
>> 841 sos_kmem_vmm_free((sos_vaddr_t)tmp_dest_pt);
>> 842 return -SOS_ENOMEM;
>> 843 }
>> 844
>> 845
>> 846 SOS_ASSERT_FATAL(SOS_OK
>> 847 == sos_paging_map(src_pd[index_in_pd].pde.pt_paddr << 12,
>> 848 (sos_vaddr_t)tmp_src_pt, FALSE,
>> 849 SOS_VM_MAP_PROT_READ));
>> 850 SOS_ASSERT_FATAL(SOS_OK
>> 851 == sos_paging_map(paddr_dest_pt,
>> 852 (sos_vaddr_t)tmp_dest_pt, FALSE,
>> 853 SOS_VM_MAP_PROT_READ
>> 854 | SOS_VM_MAP_PROT_WRITE));
>> 855
>> 856
>> 857
>> 858 for (index_in_pt = 0 ; index_in_pt < 1024 ; index_in_pt ++)
>> 859 {
>> 860
>> 861 tmp_dest_pt[index_in_pt].ui32 = tmp_src_pt[index_in_pt].ui32;
>> 862
>> 863
>> 864 if (! tmp_dest_pt[index_in_pt].pte.present)
>> 865 continue;
>> 866
>> 867
>> 868 tmp_dest_pt[index_in_pt].pte.accessed = 0;
>> 869 tmp_dest_pt[index_in_pt].pte.dirty = 0;
>> 870
>> 871
>> 872 sos_physmem_ref_physpage_at(tmp_src_pt[index_in_pt].pte.paddr << 12);
>> 873
>> 874
>> 875 sos_physmem_inc_physpage_occupation(paddr_dest_pt);
>> 876 }
>> 877
>> 878
>> 879 SOS_ASSERT_FATAL(SOS_OK == sos_paging_unmap((sos_vaddr_t)tmp_src_pt));
>> 880 SOS_ASSERT_FATAL(SOS_OK == sos_paging_unmap((sos_vaddr_t)tmp_dest_pt));
>> 881
>> 882
>> 883 dest_pd[index_in_pd].pde.pt_paddr = (paddr_dest_pt >> 12);
>> 884
>> 885
>> 886 dest_pd[index_in_pd].pde.accessed = 0;
>> 887 }
>> 888
>> 889
>> 890
>> 891 SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free((sos_vaddr_t)tmp_src_pt));
>> 892 SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free((sos_vaddr_t)tmp_dest_pt));
>> 893
>> 894 return SOS_OK;
>> 895 }
>> 896
>> 897
>> 898 sos_ret_t sos_paging_prepare_COW(sos_uaddr_t base_address,
>> 899 sos_size_t length)
>> 900 {
>> 901 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_address));
>> 902 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(length));
>> 903 SOS_ASSERT_FATAL(SOS_PAGING_BASE_USER_ADDRESS <= base_address);
>> 904
>> 905
>> 906
>> 907 for ( ;
>> 908 length > 0 ;
>> 909 length -= SOS_PAGE_SIZE, base_address += SOS_PAGE_SIZE)
>> 910 {
>> 911 sos_paging_set_prot(base_address,
>> 912 SOS_VM_MAP_PROT_READ);
>> 913 }
>> 914
>> 915 return SOS_OK;
>> 916 }
>> 917
>> 918
>> 919 sos_ret_t sos_paging_try_resolve_COW(sos_uaddr_t uaddr)
>> 920 {
>> 921 sos_ret_t refcnt;
>> 922
>> 923
>> 924
>> 925 unsigned index_in_pd = virt_to_pd_index(uaddr);
>> 926 unsigned index_in_pt = virt_to_pt_index(uaddr);
>> 927
>> 928
>> 929 struct x86_pde *pd = (struct x86_pde*)
>> 930 (SOS_PAGING_MIRROR_VADDR
>> 931 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
>> 932
>> 933
>> 934 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
>> 935 + SOS_PAGE_SIZE*index_in_pd);
>> 936
>> 937
>> 938 if (! pd[index_in_pd].present)
>> 939 return -SOS_EFAULT;
>> 940 if (! pt[index_in_pt].present)
>> 941 return -SOS_EFAULT;
>> 942
>> 943
>> 944 if (! pd[index_in_pd].write)
>> 945 return -SOS_EFAULT;
>> 946
>> 947
>> 948
>> 949 SOS_ASSERT_FATAL(! pt[index_in_pt].write);
>> 950
>> 951
>> 952
>> 953 refcnt = sos_physmem_get_physpage_refcount(pt[index_in_pt].paddr << 12);
>> 954 SOS_ASSERT_FATAL(refcnt > 0);
>> 955
>> 956 if (refcnt == 1)
>> 957 {
>> 958
>> 959
>> 960 pt[index_in_pt].write = 1;
>> 961 invlpg(pt[index_in_pt].paddr << 12);
>> 962 }
>> 963
>> 964
>> 965 else
>> 966 {
>> 967 sos_paddr_t new_ppage;
>> 968 sos_vaddr_t vpage_src, tmp_dest;
>> 969
>> 970
>> 971
>> 972
>> 973 tmp_dest = sos_kmem_vmm_alloc(1, SOS_KMEM_VMM_MAP);
>> 974 if (! tmp_dest)
>> 975 return -SOS_ENOMEM;
>> 976
>> 977
>> 978 vpage_src = SOS_PAGE_ALIGN_INF(uaddr);
>> 979 memcpy((void*)tmp_dest, (void*)vpage_src, SOS_PAGE_SIZE);
>> 980
>> 981
>> 982
>> 983
>> 984 new_ppage = sos_paging_get_paddr(tmp_dest);
>> 985 SOS_ASSERT_FATAL(new_ppage != (sos_vaddr_t)NULL);
>> 986 if (SOS_OK != sos_paging_map(new_ppage, vpage_src,
>> 987 TRUE,
>> 988 SOS_VM_MAP_PROT_READ
>> 989 | SOS_VM_MAP_PROT_WRITE))
>> 990 {
>> 991 sos_kmem_vmm_free(tmp_dest);
>> 992 return -SOS_ENOMEM;
>> 993 }
>> 994
>> 995
>> 996
>> 997 SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free(tmp_dest));
>> 998 }
>> 999
>> 1000
>> 1001 return SOS_OK;
>> 1002 }