001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018 #include <sos/physmem.h>
019 #include <sos/klibc.h>
020 #include <sos/assert.h>
021
022 #include "paging.h"
023
024
025
026
027 struct x86_pde
028 {
029 sos_ui32_t present :1;
030 sos_ui32_t write :1;
031 sos_ui32_t user :1;
032 sos_ui32_t write_through :1;
033 sos_ui32_t cache_disabled :1;
034 sos_ui32_t accessed :1;
035 sos_ui32_t zero :1;
036 sos_ui32_t page_size :1;
037 sos_ui32_t global_page :1;
038 sos_ui32_t custom :3;
039 sos_ui32_t pt_paddr :20;
040 } __attribute__ ((packed));
041
042
043
044
045 struct x86_pte
046 {
047 sos_ui32_t present :1;
048 sos_ui32_t write :1;
049 sos_ui32_t user :1;
050 sos_ui32_t write_through :1;
051 sos_ui32_t cache_disabled :1;
052 sos_ui32_t accessed :1;
053 sos_ui32_t dirty :1;
054 sos_ui32_t zero :1;
055 sos_ui32_t global_page :1;
056
057 sos_ui32_t custom :3;
058 sos_ui32_t paddr :20;
059 } __attribute__ ((packed));
060
061
062
063
064 struct x86_pdbr
065 {
066 sos_ui32_t zero1 :3;
067 sos_ui32_t write_through :1;
068 sos_ui32_t cache_disabled :1;
069 sos_ui32_t zero2 :7;
070 sos_ui32_t pd_paddr :20;
071 } __attribute__ ((packed));
072
073
074
075
076
077
078
079 #define invlpg(vaddr) \
080 do { \
081 __asm__ __volatile__("invlpg %0"::"m"(*((unsigned *)(vaddr)))); \
082 } while(0)
083
084
085
086
087
088
089 #define flush_tlb() \
090 do { \
091 unsigned long tmpreg; \
092 asm volatile("movl %%cr3,%0\n\tmovl %0,%%cr3" :"=r" \
093 (tmpreg) : :"memory"); \
094 } while (0)
095
096
097
098
099
100
101 #define virt_to_pd_index(vaddr) \
102 (((unsigned)(vaddr)) >> 22)
103
104
105
106
107
108
109 #define virt_to_pt_index(vaddr) \
110 ( (((unsigned)(vaddr)) >> 12) & 0x3ff )
111
112
113
114
115
116
117 #define virt_to_page_offset(vaddr) \
118 (((unsigned)(vaddr)) & SOS_PAGE_MASK)
119
120
121
122
123
124
125
126 static sos_ret_t paging_setup_map_helper(struct x86_pde * pd,
127 sos_paddr_t ppage,
128 sos_vaddr_t vaddr)
129 {
130
131
132 unsigned index_in_pd = virt_to_pd_index(vaddr);
133 unsigned index_in_pt = virt_to_pt_index(vaddr);
134
135
136 struct x86_pte * pt;
137 if (pd[index_in_pd].present)
138 {
139 pt = (struct x86_pte*) (pd[index_in_pd].pt_paddr << 12);
140
141
142
143
144
145
146
147 if (! pt[index_in_pt].present)
148 sos_physmem_ref_physpage_at((sos_paddr_t)pt);
149
150
151 else
152 SOS_ASSERT_FATAL(FALSE);
153 }
154 else
155 {
156
157 pt = (struct x86_pte*) sos_physmem_ref_physpage_new(FALSE);
158 if (! pt)
159 return -SOS_ENOMEM;
160
161 memset((void*)pt, 0x0, SOS_PAGE_SIZE);
162
163 pd[index_in_pd].present = TRUE;
164 pd[index_in_pd].write = 1;
165
166
167
168
169 pd[index_in_pd].pt_paddr = ((sos_paddr_t)pt) >> 12;
170 }
171
172
173
174 pt[index_in_pt].present = 1;
175 pt[index_in_pt].write = 1;
176
177
178
179 pt[index_in_pt].user = 0;
180 pt[index_in_pt].paddr = ppage >> 12;
181
182 return SOS_OK;
183 }
184
185
186 sos_ret_t sos_paging_subsystem_setup(sos_paddr_t identity_mapping_base,
187 sos_paddr_t identity_mapping_top)
188 {
189
190 struct x86_pdbr cr3;
191
192
193 struct x86_pde * pd
194 = (struct x86_pde*) sos_physmem_ref_physpage_new(FALSE);
195
196
197 sos_paddr_t paddr;
198
199
200
201 memset((void*)pd,
202 0x0,
203 SOS_PAGE_SIZE);
204
205
206 for (paddr = identity_mapping_base ;
207 paddr < identity_mapping_top ;
208 paddr += SOS_PAGE_SIZE)
209 {
210 if (paging_setup_map_helper(pd, paddr, paddr))
211 return -SOS_ENOMEM;
212 }
213
214
215 for (paddr = BIOS_N_VIDEO_START ;
216 paddr < BIOS_N_VIDEO_END ;
217 paddr += SOS_PAGE_SIZE)
218 {
219 if (paging_setup_map_helper(pd, paddr, paddr))
220 return -SOS_ENOMEM;
221 }
222
223
224
225 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].present = TRUE;
226 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].write = 1;
227 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].user = 0;
228 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].pt_paddr
229 = ((sos_paddr_t)pd)>>12;
230
231
232
233 memset(& cr3, 0x0, sizeof(struct x86_pdbr));
234 cr3.pd_paddr = ((sos_paddr_t)pd) >> 12;
235
236
237
238
239 asm volatile ("movl %0,%%cr3\n\t"
240 "movl %%cr0,%%eax\n\t"
241 "orl $0x80010000, %%eax\n\t"
242 "movl %%eax,%%cr0\n\t"
243 "jmp 1f\n\t"
244 "1:\n\t"
245 "movl $2f, %%eax\n\t"
246 "jmp *%%eax\n\t"
247 "2:\n\t" ::"r"(cr3):"memory","eax");
248
249
250
251
252
253
254
255
256
257 return SOS_OK;
258 }
259
260
261
262
263 sos_ret_t sos_paging_map(sos_paddr_t ppage_paddr,
264 sos_vaddr_t vpage_vaddr,
265 sos_bool_t is_user_page,
266 sos_ui32_t flags)
267 {
268
269
270 unsigned index_in_pd = virt_to_pd_index(vpage_vaddr);
271 unsigned index_in_pt = virt_to_pt_index(vpage_vaddr);
272
273
274 struct x86_pde *pd = (struct x86_pde*)
275 (SOS_PAGING_MIRROR_VADDR
276 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
277
278
279 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
280 + SOS_PAGE_SIZE*index_in_pd);
281
282
283 if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)
284 && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE))
285 return -SOS_EINVAL;
286
287
288 if (! pd[index_in_pd].present)
289 {
290
291
292 sos_paddr_t pt_ppage
293 = sos_physmem_ref_physpage_new(! (flags & SOS_VM_MAP_ATOMIC));
294 if (! pt_ppage)
295 {
296 return -SOS_ENOMEM;
297 }
298
299 pd[index_in_pd].present = TRUE;
300 pd[index_in_pd].write = 1;
301
302 pd[index_in_pd].user = (is_user_page)?1:0;
303 pd[index_in_pd].pt_paddr = ((sos_paddr_t)pt_ppage) >> 12;
304
305
306
307
308
309
310 invlpg(pt);
311
312
313 memset((void*)pt, 0x0, SOS_PAGE_SIZE);
314 }
315
316
317
318 else if (! pt[index_in_pt].present)
319 sos_physmem_ref_physpage_at(pd[index_in_pd].pt_paddr << 12);
320
321
322
323 else
324 sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12);
325
326
327 pt[index_in_pt].present = TRUE;
328 pt[index_in_pt].write = (flags & SOS_VM_MAP_PROT_WRITE)?1:0;
329 pt[index_in_pt].user = (is_user_page)?1:0;
330 pt[index_in_pt].paddr = ppage_paddr >> 12;
331 sos_physmem_ref_physpage_at(ppage_paddr);
332
333
334
335
336
337
338 invlpg(vpage_vaddr);
339
340 return SOS_OK;
341 }
342
343
344 sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_vaddr)
345 {
346 sos_ret_t pt_unref_retval;
347
348
349
350 unsigned index_in_pd = virt_to_pd_index(vpage_vaddr);
351 unsigned index_in_pt = virt_to_pt_index(vpage_vaddr);
352
353
354 struct x86_pde *pd = (struct x86_pde*)
355 (SOS_PAGING_MIRROR_VADDR
356 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
357
358
359 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
360 + SOS_PAGE_SIZE*index_in_pd);
361
362
363 if (! pd[index_in_pd].present)
364 return -SOS_EINVAL;
365 if (! pt[index_in_pt].present)
366 return -SOS_EINVAL;
367
368
369 if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)
370 && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE))
371 return -SOS_EINVAL;
372
373
374 sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12);
375
376
377 memset(pt + index_in_pt, 0x0, sizeof(struct x86_pte));
378
379
380 invlpg(vpage_vaddr);
381
382
383 pt_unref_retval = sos_physmem_unref_physpage(pd[index_in_pd].pt_paddr << 12);
384 SOS_ASSERT_FATAL(pt_unref_retval >= 0);
385 if (pt_unref_retval == TRUE)
386
387 {
388 union { struct x86_pde pde; sos_ui32_t ui32; } u;
389
390
391
392
393
394
395 u.ui32 = 0;
396
397
398 pd[index_in_pd] = u.pde;
399
400
401 invlpg(pt);
402 }
403
404 return SOS_OK;
405 }
406
407
408 int sos_paging_get_prot(sos_vaddr_t vaddr)
409 {
410 int retval;
411
412
413
414 unsigned index_in_pd = virt_to_pd_index(vaddr);
415 unsigned index_in_pt = virt_to_pt_index(vaddr);
416
417
418 struct x86_pde *pd = (struct x86_pde*)
419 (SOS_PAGING_MIRROR_VADDR
420 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
421
422
423 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
424 + SOS_PAGE_SIZE*index_in_pd);
425
426
427 if (! pd[index_in_pd].present)
428 return SOS_VM_MAP_PROT_NONE;
429 if (! pt[index_in_pt].present)
430 return SOS_VM_MAP_PROT_NONE;
431
432
433 retval = SOS_VM_MAP_PROT_READ;
434 if (pd[index_in_pd].write && pt[index_in_pt].write)
435 retval |= SOS_VM_MAP_PROT_WRITE;
436
437 return retval;
438 }
439
440
441 sos_paddr_t sos_paging_get_paddr(sos_vaddr_t vaddr)
442 {
443
444
445 unsigned index_in_pd = virt_to_pd_index(vaddr);
446 unsigned index_in_pt = virt_to_pt_index(vaddr);
447 unsigned offset_in_page = virt_to_page_offset(vaddr);
448
449
450 struct x86_pde *pd = (struct x86_pde*)
451 (SOS_PAGING_MIRROR_VADDR
452 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
453
454
455 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
456 + SOS_PAGE_SIZE*index_in_pd);
457
458
459 if (! pd[index_in_pd].present)
460 return (sos_paddr_t)NULL;
461 if (! pt[index_in_pt].present)
462 return (sos_paddr_t)NULL;
463
464 return (pt[index_in_pt].paddr << 12) + offset_in_page;
465 }