001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019
020 #include <sos/list.h>
021 #include <sos/physmem.h>
022 #include <hwcore/paging.h>
023 #include <sos/assert.h>
024
025 #include "kmem_vmm.h"
026
027
028 struct sos_kmem_range
029 {
030 sos_vaddr_t base_vaddr;
031 sos_count_t nb_pages;
032
033
034 struct sos_kslab *slab;
035
036 struct sos_kmem_range *prev, *next;
037 };
038 const int sizeof_struct_sos_kmem_range = sizeof(struct sos_kmem_range);
039
040
041 static struct sos_kmem_range *kmem_free_range_list, *kmem_used_range_list;
042
043
044 static struct sos_kslab_cache *kmem_range_cache;
045
046
047
048
049
050 static struct sos_kmem_range *
051 get_closest_preceding_kmem_range(struct sos_kmem_range *the_list,
052 sos_vaddr_t vaddr)
053 {
054 int nb_elements;
055 struct sos_kmem_range *a_range, *ret_range;
056
057
058
059 ret_range = NULL;
060 list_foreach(the_list, a_range, nb_elements)
061 {
062 if (vaddr < a_range->base_vaddr)
063 return ret_range;
064 ret_range = a_range;
065 }
066
067
068 return ret_range;
069 }
070
071
072
073
074
075
076 static struct sos_kmem_range *find_suitable_free_range(sos_count_t nb_pages)
077 {
078 int nb_elements;
079 struct sos_kmem_range *r;
080
081 list_foreach(kmem_free_range_list, r, nb_elements)
082 {
083 if (r->nb_pages >= nb_pages)
084 return r;
085 }
086
087 return NULL;
088 }
089
090
091
092
093
094
095
096 static struct sos_kmem_range *insert_range(struct sos_kmem_range *the_list,
097 struct sos_kmem_range *a_range)
098 {
099 struct sos_kmem_range *prec_used;
100
101
102 prec_used = get_closest_preceding_kmem_range(the_list,
103 a_range->base_vaddr);
104
105 if (prec_used != NULL)
106 list_insert_after(the_list, prec_used, a_range);
107 else
108 list_add_head(the_list, a_range);
109
110 return the_list;
111 }
112
113
114
115
116
117
118 static struct sos_kmem_range *lookup_range(sos_vaddr_t vaddr)
119 {
120 struct sos_kmem_range *range;
121
122
123 sos_paddr_t ppage_paddr = SOS_PAGE_ALIGN_INF(sos_paging_get_paddr(vaddr));
124
125 if (ppage_paddr)
126 {
127 range = sos_physmem_get_kmem_range(ppage_paddr);
128
129
130
131 SOS_ASSERT_FATAL(range != NULL);
132 }
133
134
135
136 else
137 {
138 range = get_closest_preceding_kmem_range(kmem_used_range_list,
139 vaddr);
140
141 if (! range)
142 return NULL;
143
144
145 if ( (vaddr < range->base_vaddr)
146 || (vaddr >= (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE)) )
147 return NULL;
148 }
149
150 return range;
151 }
152
153
154
155
156
157
158
159 static struct sos_kmem_range *
160 create_range(sos_bool_t is_free,
161 sos_vaddr_t base_vaddr,
162 sos_vaddr_t top_vaddr,
163 struct sos_kslab *associated_slab)
164 {
165 struct sos_kmem_range *range;
166
167 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_vaddr));
168 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(top_vaddr));
169
170 if ((top_vaddr - base_vaddr) < SOS_PAGE_SIZE)
171 return NULL;
172
173 range = (struct sos_kmem_range*)sos_kmem_cache_alloc(kmem_range_cache,
174 SOS_KSLAB_ALLOC_ATOMIC);
175 SOS_ASSERT_FATAL(range != NULL);
176
177 range->base_vaddr = base_vaddr;
178 range->nb_pages = (top_vaddr - base_vaddr) / SOS_PAGE_SIZE;
179
180 if (is_free)
181 {
182 list_add_tail(kmem_free_range_list,
183 range);
184 }
185 else
186 {
187 sos_vaddr_t vaddr;
188 range->slab = associated_slab;
189 list_add_tail(kmem_used_range_list,
190 range);
191
192
193 for (vaddr = base_vaddr ;
194 vaddr < top_vaddr ;
195 vaddr += SOS_PAGE_SIZE)
196 {
197 sos_paddr_t ppage_paddr = sos_paging_get_paddr(vaddr);
198 SOS_ASSERT_FATAL((void*)ppage_paddr != NULL);
199 sos_physmem_set_kmem_range(ppage_paddr, range);
200 }
201 }
202
203 return range;
204 }
205
206
207 sos_ret_t
208 sos_kmem_vmm_subsystem_setup(sos_vaddr_t kernel_core_base,
209 sos_vaddr_t kernel_core_top,
210 sos_vaddr_t bootstrap_stack_bottom_vaddr,
211 sos_vaddr_t bootstrap_stack_top_vaddr)
212 {
213 struct sos_kslab *first_struct_slab_of_caches,
214 *first_struct_slab_of_ranges;
215 sos_vaddr_t first_slab_of_caches_base,
216 first_slab_of_caches_nb_pages,
217 first_slab_of_ranges_base,
218 first_slab_of_ranges_nb_pages;
219 struct sos_kmem_range *first_range_of_caches,
220 *first_range_of_ranges;
221
222 list_init(kmem_free_range_list);
223 list_init(kmem_used_range_list);
224
225 kmem_range_cache
226 = sos_kmem_cache_subsystem_setup_prepare(kernel_core_base,
227 kernel_core_top,
228 sizeof(struct sos_kmem_range),
229 & first_struct_slab_of_caches,
230 & first_slab_of_caches_base,
231 & first_slab_of_caches_nb_pages,
232 & first_struct_slab_of_ranges,
233 & first_slab_of_ranges_base,
234 & first_slab_of_ranges_nb_pages);
235 SOS_ASSERT_FATAL(kmem_range_cache != NULL);
236
237
238 create_range(TRUE,
239 SOS_KMEM_VMM_BASE,
240 SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
241 NULL);
242
243
244 create_range(FALSE,
245 SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
246 SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
247 NULL);
248
249
250 create_range(TRUE,
251 SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
252 SOS_PAGE_ALIGN_INF(kernel_core_base),
253 NULL);
254
255
256
257 create_range(FALSE,
258 SOS_PAGE_ALIGN_INF(kernel_core_base),
259 bootstrap_stack_bottom_vaddr,
260 NULL);
261
262
263
264 create_range(FALSE,
265 bootstrap_stack_bottom_vaddr,
266 bootstrap_stack_top_vaddr,
267 NULL);
268
269
270
271 create_range(FALSE,
272 bootstrap_stack_top_vaddr,
273 SOS_PAGE_ALIGN_SUP(kernel_core_top),
274 NULL);
275
276
277
278 SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_core_top)
279 == first_slab_of_caches_base);
280 SOS_ASSERT_FATAL(first_struct_slab_of_caches != NULL);
281 first_range_of_caches
282 = create_range(FALSE,
283 first_slab_of_caches_base,
284 first_slab_of_caches_base
285 + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE,
286 first_struct_slab_of_caches);
287
288
289
290 SOS_ASSERT_FATAL((first_slab_of_caches_base
291 + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE)
292 == first_slab_of_ranges_base);
293 SOS_ASSERT_FATAL(first_struct_slab_of_ranges != NULL);
294 first_range_of_ranges
295 = create_range(FALSE,
296 first_slab_of_ranges_base,
297 first_slab_of_ranges_base
298 + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
299 first_struct_slab_of_ranges);
300
301
302 create_range(TRUE,
303 first_slab_of_ranges_base
304 + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
305 SOS_KMEM_VMM_TOP,
306 NULL);
307
308
309
310
311 sos_kmem_cache_subsystem_setup_commit(first_struct_slab_of_caches,
312 first_range_of_caches,
313 first_struct_slab_of_ranges,
314 first_range_of_ranges);
315
316 return SOS_OK;
317 }
318
319
320
321
322
323
324
325 struct sos_kmem_range *sos_kmem_vmm_new_range(sos_count_t nb_pages,
326 sos_ui32_t flags,
327 sos_vaddr_t * range_start)
328 {
329 struct sos_kmem_range *free_range, *new_range;
330
331 if (nb_pages <= 0)
332 return NULL;
333
334
335 free_range = find_suitable_free_range(nb_pages);
336 if (free_range == NULL)
337 return NULL;
338
339
340
341 if(free_range->nb_pages == nb_pages)
342 {
343 list_delete(kmem_free_range_list, free_range);
344 kmem_used_range_list = insert_range(kmem_used_range_list,
345 free_range);
346
347 new_range = free_range;
348 }
349
350
351
352
353 else
354 {
355
356 new_range = (struct sos_kmem_range*)
357 sos_kmem_cache_alloc(kmem_range_cache,
358 (flags & SOS_KMEM_VMM_ATOMIC)?
359 SOS_KSLAB_ALLOC_ATOMIC:0);
360 if (! new_range)
361 return NULL;
362
363 new_range->base_vaddr = free_range->base_vaddr;
364 new_range->nb_pages = nb_pages;
365 free_range->base_vaddr += nb_pages*SOS_PAGE_SIZE;
366 free_range->nb_pages -= nb_pages;
367
368
369
370 kmem_used_range_list = insert_range(kmem_used_range_list,
371 new_range);
372 }
373
374
375 new_range->slab = NULL;
376
377
378 if (flags & SOS_KMEM_VMM_MAP)
379 {
380 int i;
381 for (i = 0 ; i < nb_pages ; i ++)
382 {
383
384 sos_paddr_t ppage_paddr
385 = sos_physmem_ref_physpage_new(! (flags & SOS_KMEM_VMM_ATOMIC));
386
387
388 if (ppage_paddr)
389 {
390 if (sos_paging_map(ppage_paddr,
391 new_range->base_vaddr
392 + i * SOS_PAGE_SIZE,
393 FALSE ,
394 ((flags & SOS_KMEM_VMM_ATOMIC)?
395 SOS_VM_MAP_ATOMIC:0)
396 | SOS_VM_MAP_PROT_READ
397 | SOS_VM_MAP_PROT_WRITE))
398 {
399
400 sos_physmem_unref_physpage(ppage_paddr);
401 ppage_paddr = (sos_paddr_t)NULL;
402 }
403 else
404 {
405
406
407 sos_physmem_unref_physpage(ppage_paddr);
408 }
409 }
410
411
412 if (! ppage_paddr)
413 {
414 sos_kmem_vmm_del_range(new_range);
415 return NULL;
416 }
417
418
419 sos_physmem_set_kmem_range(ppage_paddr, new_range);
420 }
421 }
422
423
424 if (range_start)
425 *range_start = new_range->base_vaddr;
426
427 return new_range;
428 }
429
430
431 sos_ret_t sos_kmem_vmm_del_range(struct sos_kmem_range *range)
432 {
433 int i;
434 struct sos_kmem_range *ranges_to_free;
435 list_init(ranges_to_free);
436
437 SOS_ASSERT_FATAL(range != NULL);
438 SOS_ASSERT_FATAL(range->slab == NULL);
439
440
441 list_delete(kmem_used_range_list, range);
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459 do
460 {
461
462 kmem_free_range_list = insert_range(kmem_free_range_list, range);
463
464
465 for (i = 0 ; i < range->nb_pages ; i ++)
466 {
467
468 sos_paging_unmap(range->base_vaddr + i*SOS_PAGE_SIZE);
469 }
470
471
472
473
474
475
476
477 if (range->prev->base_vaddr + range->prev->nb_pages*SOS_PAGE_SIZE
478 == range->base_vaddr)
479 {
480 struct sos_kmem_range *empty_range_of_ranges = NULL;
481 struct sos_kmem_range *prec_free = range->prev;
482
483
484 prec_free->nb_pages += range->nb_pages;
485 list_delete(kmem_free_range_list, range);
486
487
488
489 empty_range_of_ranges =
490 sos_kmem_cache_release_struct_range(range);
491
492
493
494
495
496 if (empty_range_of_ranges != NULL)
497 {
498 list_delete(kmem_used_range_list, empty_range_of_ranges);
499 list_add_tail(ranges_to_free, empty_range_of_ranges);
500 }
501
502
503 range = prec_free;
504 }
505
506
507
508 if (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE
509 == range->next->base_vaddr)
510 {
511 struct sos_kmem_range *empty_range_of_ranges = NULL;
512 struct sos_kmem_range *next_range = range->next;
513
514
515 range->nb_pages += next_range->nb_pages;
516 list_delete(kmem_free_range_list, next_range);
517
518
519
520 empty_range_of_ranges =
521 sos_kmem_cache_release_struct_range(next_range);
522
523
524
525
526
527
528 if (empty_range_of_ranges != NULL)
529 {
530 list_delete(kmem_used_range_list, empty_range_of_ranges);
531 list_add_tail(ranges_to_free, empty_range_of_ranges);
532 }
533 }
534
535
536
537
538 if (list_is_empty(ranges_to_free))
539 range = NULL;
540 else
541 range = list_pop_head(ranges_to_free);
542
543 }
544
545 while (range != NULL);
546
547 return SOS_OK;
548 }
549
550
551 sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_pages,
552 sos_ui32_t flags)
553 {
554 struct sos_kmem_range *range
555 = sos_kmem_vmm_new_range(nb_pages,
556 flags,
557 NULL);
558 if (! range)
559 return (sos_vaddr_t)NULL;
560
561 return range->base_vaddr;
562 }
563
564
565 sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr)
566 {
567 struct sos_kmem_range *range = lookup_range(vaddr);
568
569
570
571 if (!range || (range->base_vaddr != vaddr))
572 return -SOS_EINVAL;
573
574
575 if (range->slab != NULL)
576 return -SOS_EBUSY;
577
578 return sos_kmem_vmm_del_range(range);
579 }
580
581
582 sos_ret_t sos_kmem_vmm_set_slab(struct sos_kmem_range *range,
583 struct sos_kslab *slab)
584 {
585 if (! range)
586 return -SOS_EINVAL;
587
588 range->slab = slab;
589 return SOS_OK;
590 }
591
592 struct sos_kslab * sos_kmem_vmm_resolve_slab(sos_vaddr_t vaddr)
593 {
594 struct sos_kmem_range *range = lookup_range(vaddr);
595 if (! range)
596 return NULL;
597
598 return range->slab;
599 }
600
601
602 sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vaddr_t vaddr)
603 {
604 struct sos_kmem_range *range = lookup_range(vaddr);
605 return (range != NULL);
606 }