001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018 #include <sos/physmem.h>
019 #include <sos/klibc.h>
020 #include <sos/assert.h>
021
022 #include "paging.h"
023
024
025
026 struct x86_pde
027 {
028 sos_ui32_t present :1;
029 sos_ui32_t write :1;
030 sos_ui32_t user :1;
031 sos_ui32_t write_through :1;
032 sos_ui32_t cache_disabled :1;
033 sos_ui32_t accessed :1;
034 sos_ui32_t zero :1;
035 sos_ui32_t page_size :1;
036 sos_ui32_t global_page :1;
037 sos_ui32_t custom :3;
038 sos_ui32_t pt_paddr :20;
039 } __attribute__ ((packed));
040
041
042
043
044 struct x86_pte
045 {
046 sos_ui32_t present :1;
047 sos_ui32_t write :1;
048 sos_ui32_t user :1;
049 sos_ui32_t write_through :1;
050 sos_ui32_t cache_disabled :1;
051 sos_ui32_t accessed :1;
052 sos_ui32_t dirty :1;
053 sos_ui32_t zero :1;
054 sos_ui32_t global_page :1;
055
056 sos_ui32_t custom :3;
057 sos_ui32_t paddr :20;
058 } __attribute__ ((packed));
059
060
061
062
063 struct x86_pdbr
064 {
065 sos_ui32_t zero1 :3;
066 sos_ui32_t write_through :1;
067 sos_ui32_t cache_disabled :1;
068 sos_ui32_t zero2 :7;
069 sos_ui32_t pd_paddr :20;
070 } __attribute__ ((packed));
071
072
073
074
075
076
077
078 #define invlpg(vaddr) \
079 do { \
080 __asm__ __volatile__("invlpg %0"::"m"(*((unsigned *)(vaddr)))); \
081 } while(0)
082
083
084
085
086
087
088 #define flush_tlb() \
089 do { \
090 unsigned long tmpreg; \
091 asm volatile("movl %%cr3,%0\n\tmovl %0,%%cr3" :"=r" \
092 (tmpreg) : :"memory"); \
093 } while (0)
094
095
096
097
098
099
100 #define virt_to_pd_index(vaddr) \
101 (((unsigned)(vaddr)) >> 22)
102
103
104
105
106
107
108 #define virt_to_pt_index(vaddr) \
109 ( (((unsigned)(vaddr)) >> 12) & 0x3ff )
110
111
112
113
114
115
116 #define virt_to_page_offset(vaddr) \
117 (((unsigned)(vaddr)) & SOS_PAGE_MASK)
118
119
120
121
122
123
124
125 static sos_ret_t paging_setup_map_helper(struct x86_pde * pd,
126 sos_paddr_t ppage,
127 sos_vaddr_t vaddr)
128 {
129
130
131 unsigned index_in_pd = virt_to_pd_index(vaddr);
132 unsigned index_in_pt = virt_to_pt_index(vaddr);
133
134
135 struct x86_pte * pt;
136 if (pd[index_in_pd].present)
137 {
138 pt = (struct x86_pte*) (pd[index_in_pd].pt_paddr << 12);
139
140
141
142
143
144
145
146 if (! pt[index_in_pt].present)
147 sos_physmem_ref_physpage_at((sos_paddr_t)pt);
148
149
150 else
151 SOS_ASSERT_FATAL(FALSE);
152 }
153 else
154 {
155
156 pt = (struct x86_pte*) sos_physmem_ref_physpage_new(FALSE);
157 if (! pt)
158 return -SOS_ENOMEM;
159
160 memset((void*)pt, 0x0, SOS_PAGE_SIZE);
161
162 pd[index_in_pd].present = TRUE;
163 pd[index_in_pd].write = 1;
164
165
166
167
168 pd[index_in_pd].pt_paddr = ((sos_paddr_t)pt) >> 12;
169 }
170
171
172
173 pt[index_in_pt].present = 1;
174 pt[index_in_pt].write = 1;
175
176
177
178 pt[index_in_pt].user = 0;
179 pt[index_in_pt].paddr = ppage >> 12;
180
181 return SOS_OK;
182 }
183
184
185 sos_ret_t sos_paging_setup(sos_paddr_t identity_mapping_base,
186 sos_paddr_t identity_mapping_top)
187 {
188
189 struct x86_pdbr cr3;
190
191
192 struct x86_pde * pd
193 = (struct x86_pde*) sos_physmem_ref_physpage_new(FALSE);
194
195
196 sos_paddr_t paddr;
197
198
199
200 memset((void*)pd,
201 0x0,
202 SOS_PAGE_SIZE);
203
204
205 for (paddr = identity_mapping_base ;
206 paddr < identity_mapping_top ;
207 paddr += SOS_PAGE_SIZE)
208 {
209 if (paging_setup_map_helper(pd, paddr, paddr))
210 return -SOS_ENOMEM;
211 }
212
213
214 for (paddr = BIOS_N_VIDEO_START ;
215 paddr < BIOS_N_VIDEO_END ;
216 paddr += SOS_PAGE_SIZE)
217 {
218 if (paging_setup_map_helper(pd, paddr, paddr))
219 return -SOS_ENOMEM;
220 }
221
222
223
224 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].present = TRUE;
225 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].write = 1;
226 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].user = 0;
227 pd[virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)].pt_paddr
228 = ((sos_paddr_t)pd)>>12;
229
230
231
232 memset(& cr3, 0x0, sizeof(struct x86_pdbr));
233 cr3.pd_paddr = ((sos_paddr_t)pd) >> 12;
234
235
236
237
238 asm volatile ("movl %0,%%cr3\n\t"
239 "movl %%cr0,%%eax\n\t"
240 "orl $0x80010000, %%eax\n\t"
241 "movl %%eax,%%cr0\n\t"
242 "jmp 1f\n\t"
243 "1:\n\t"
244 "movl $2f, %%eax\n\t"
245 "jmp *%%eax\n\t"
246 "2:\n\t" ::"r"(cr3):"memory","eax");
247
248
249
250
251
252
253
254
255
256 return SOS_OK;
257 }
258
259
260
261
262 sos_ret_t sos_paging_map(sos_paddr_t ppage_paddr,
263 sos_vaddr_t vpage_vaddr,
264 sos_bool_t is_user_page,
265 int flags)
266 {
267
268
269 unsigned index_in_pd = virt_to_pd_index(vpage_vaddr);
270 unsigned index_in_pt = virt_to_pt_index(vpage_vaddr);
271
272
273 struct x86_pde *pd = (struct x86_pde*)
274 (SOS_PAGING_MIRROR_VADDR
275 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
276
277
278 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
279 + SOS_PAGE_SIZE*index_in_pd);
280
281
282 if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)
283 && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE))
284 return -SOS_EINVAL;
285
286
287 if (! pd[index_in_pd].present)
288 {
289
290 sos_paddr_t pt_ppage
291 = sos_physmem_ref_physpage_new(! (flags & SOS_VM_MAP_ATOMIC));
292 if (! pt_ppage)
293 {
294 return -SOS_ENOMEM;
295 }
296
297 pd[index_in_pd].present = TRUE;
298 pd[index_in_pd].write = 1;
299
300 pd[index_in_pd].user |= (is_user_page)?1:0;
301 pd[index_in_pd].pt_paddr = ((sos_paddr_t)pt_ppage) >> 12;
302
303
304
305
306
307
308 invlpg(pt);
309
310
311 memset((void*)pt, 0x0, SOS_PAGE_SIZE);
312 }
313
314
315
316 else if (! pt[index_in_pt].present)
317 sos_physmem_ref_physpage_at(pd[index_in_pd].pt_paddr << 12);
318
319
320
321 else
322 sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12);
323
324
325 pt[index_in_pt].present = TRUE;
326 pt[index_in_pt].write = (flags & SOS_VM_MAP_PROT_WRITE)?1:0;
327 pt[index_in_pt].user = (is_user_page)?1:0;
328 pt[index_in_pt].paddr = ppage_paddr >> 12;
329 sos_physmem_ref_physpage_at(ppage_paddr);
330
331
332
333
334
335
336 invlpg(vpage_vaddr);
337
338 return SOS_OK;
339 }
340
341
342 sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_vaddr)
343 {
344 sos_ret_t pt_unref_retval;
345
346
347
348 unsigned index_in_pd = virt_to_pd_index(vpage_vaddr);
349 unsigned index_in_pt = virt_to_pt_index(vpage_vaddr);
350
351
352 struct x86_pde *pd = (struct x86_pde*)
353 (SOS_PAGING_MIRROR_VADDR
354 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
355
356
357 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
358 + SOS_PAGE_SIZE*index_in_pd);
359
360
361 if (! pd[index_in_pd].present)
362 return -SOS_EINVAL;
363 if (! pt[index_in_pt].present)
364 return -SOS_EINVAL;
365
366
367 if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR)
368 && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE))
369 return -SOS_EINVAL;
370
371
372 sos_physmem_unref_physpage(pt[index_in_pt].paddr << 12);
373
374
375 memset(pt + index_in_pt, 0x0, sizeof(struct x86_pte));
376
377
378 invlpg(vpage_vaddr);
379
380
381 pt_unref_retval = sos_physmem_unref_physpage(pd[index_in_pd].pt_paddr << 12);
382 SOS_ASSERT_FATAL(pt_unref_retval >= 0);
383 if (pt_unref_retval > 0)
384
385 {
386
387 memset(pd + index_in_pd, 0x0, sizeof(struct x86_pde));
388
389
390 invlpg(pt);
391 }
392
393 return SOS_OK;
394 }
395
396
397 int sos_paging_get_prot(sos_vaddr_t vaddr)
398 {
399 int retval;
400
401
402
403 unsigned index_in_pd = virt_to_pd_index(vaddr);
404 unsigned index_in_pt = virt_to_pt_index(vaddr);
405
406
407 struct x86_pde *pd = (struct x86_pde*)
408 (SOS_PAGING_MIRROR_VADDR
409 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
410
411
412 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
413 + SOS_PAGE_SIZE*index_in_pd);
414
415
416 if (! pd[index_in_pd].present)
417 return SOS_VM_MAP_PROT_NONE;
418 if (! pt[index_in_pt].present)
419 return SOS_VM_MAP_PROT_NONE;
420
421
422 retval = SOS_VM_MAP_PROT_READ;
423 if (pd[index_in_pd].write && pt[index_in_pt].write)
424 retval |= SOS_VM_MAP_PROT_WRITE;
425
426 return retval;
427 }
428
429
430 sos_paddr_t sos_paging_get_paddr(sos_vaddr_t vaddr)
431 {
432
433
434 unsigned index_in_pd = virt_to_pd_index(vaddr);
435 unsigned index_in_pt = virt_to_pt_index(vaddr);
436 unsigned offset_in_page = virt_to_page_offset(vaddr);
437
438
439 struct x86_pde *pd = (struct x86_pde*)
440 (SOS_PAGING_MIRROR_VADDR
441 + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR));
442
443
444 struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR
445 + SOS_PAGE_SIZE*index_in_pd);
446
447
448 if (! pd[index_in_pd].present)
449 return (sos_paddr_t)NULL;
450 if (! pt[index_in_pt].present)
451 return (sos_paddr_t)NULL;
452
453 return (pt[index_in_pt].paddr << 12) + offset_in_page;
454 }
455