001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019
020 #include <sos/list.h>
021 #include <sos/physmem.h>
022 #include <hwcore/paging.h>
023 #include <sos/assert.h>
024
025 #include "kmem_vmm.h"
026
027
028 struct sos_kmem_range
029 {
030 sos_vaddr_t base_vaddr;
031 sos_count_t nb_pages;
032
033
034 struct sos_kslab *slab;
035
036 struct sos_kmem_range *prev, *next;
037 };
038 const int sizeof_struct_sos_kmem_range = sizeof(struct sos_kmem_range);
039
040
041 static struct sos_kmem_range *kmem_free_range_list, *kmem_used_range_list;
042
043
044 static struct sos_kslab_cache *kmem_range_cache;
045
046
047
048
049
050 static struct sos_kmem_range *
051 get_closest_preceding_kmem_range(struct sos_kmem_range *the_list,
052 sos_vaddr_t vaddr)
053 {
054 int nb_elements;
055 struct sos_kmem_range *a_range, *ret_range;
056
057
058
059 ret_range = NULL;
060 list_foreach(the_list, a_range, nb_elements)
061 {
062 if (vaddr < a_range->base_vaddr)
063 return ret_range;
064 ret_range = a_range;
065 }
066
067
068 return ret_range;
069 }
070
071
072
073
074
075
076 static struct sos_kmem_range *find_suitable_free_range(sos_count_t nb_pages)
077 {
078 int nb_elements;
079 struct sos_kmem_range *r;
080
081 list_foreach(kmem_free_range_list, r, nb_elements)
082 {
083 if (r->nb_pages >= nb_pages)
084 return r;
085 }
086
087 return NULL;
088 }
089
090
091
092
093
094
095
096 static struct sos_kmem_range *insert_range(struct sos_kmem_range *the_list,
097 struct sos_kmem_range *a_range)
098 {
099 struct sos_kmem_range *prec_used;
100
101
102 prec_used = get_closest_preceding_kmem_range(the_list,
103 a_range->base_vaddr);
104
105 if (prec_used != NULL)
106 list_insert_after(the_list, prec_used, a_range);
107 else
108 list_add_head(the_list, a_range);
109
110 return the_list;
111 }
112
113
114
115
116
117
118 static struct sos_kmem_range *lookup_range(sos_vaddr_t vaddr)
119 {
120 struct sos_kmem_range *range;
121
122
123 sos_paddr_t ppage_paddr = SOS_PAGE_ALIGN_INF(sos_paging_get_paddr(vaddr));
124 if (ppage_paddr)
125 {
126 range = sos_physmem_get_kmem_range(ppage_paddr);
127
128
129
130 SOS_ASSERT_FATAL(range != NULL);
131 }
132
133
134
135 else
136 {
137 range = get_closest_preceding_kmem_range(kmem_used_range_list,
138 vaddr);
139
140 if (! range)
141 return NULL;
142
143
144 if ( (vaddr < range->base_vaddr)
145 || (vaddr >= (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE)) )
146 return NULL;
147 }
148
149 return range;
150 }
151
152
153
154
155
156
157
158 static struct sos_kmem_range *
159 create_range(sos_bool_t is_free,
160 sos_vaddr_t base_vaddr,
161 sos_vaddr_t top_vaddr,
162 struct sos_kslab *associated_slab)
163 {
164 struct sos_kmem_range *range;
165
166 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_vaddr));
167 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(top_vaddr));
168
169 if ((top_vaddr - base_vaddr) < SOS_PAGE_SIZE)
170 return NULL;
171
172 range = (struct sos_kmem_range*)sos_kmem_cache_alloc(kmem_range_cache,
173 SOS_KSLAB_ALLOC_ATOMIC);
174 SOS_ASSERT_FATAL(range != NULL);
175
176 range->base_vaddr = base_vaddr;
177 range->nb_pages = (top_vaddr - base_vaddr) / SOS_PAGE_SIZE;
178
179 if (is_free)
180 {
181 list_add_tail(kmem_free_range_list,
182 range);
183 }
184 else
185 {
186 sos_vaddr_t vaddr;
187 range->slab = associated_slab;
188 list_add_tail(kmem_used_range_list,
189 range);
190
191
192 for (vaddr = base_vaddr ;
193 vaddr < top_vaddr ;
194 vaddr += SOS_PAGE_SIZE)
195 {
196 sos_paddr_t ppage_paddr = sos_paging_get_paddr(vaddr);
197 SOS_ASSERT_FATAL((void*)ppage_paddr != NULL);
198 sos_physmem_set_kmem_range(ppage_paddr, range);
199 }
200 }
201
202 return range;
203 }
204
205
206 sos_ret_t
207 sos_kmem_vmm_subsystem_setup(sos_vaddr_t kernel_core_base,
208 sos_vaddr_t kernel_core_top,
209 sos_vaddr_t bootstrap_stack_bottom_vaddr,
210 sos_vaddr_t bootstrap_stack_top_vaddr)
211 {
212 struct sos_kslab *first_struct_slab_of_caches,
213 *first_struct_slab_of_ranges;
214 sos_vaddr_t first_slab_of_caches_base,
215 first_slab_of_caches_nb_pages,
216 first_slab_of_ranges_base,
217 first_slab_of_ranges_nb_pages;
218 struct sos_kmem_range *first_range_of_caches,
219 *first_range_of_ranges;
220
221 list_init(kmem_free_range_list);
222 list_init(kmem_used_range_list);
223
224 kmem_range_cache
225 = sos_kmem_cache_subsystem_setup_prepare(kernel_core_base,
226 kernel_core_top,
227 sizeof(struct sos_kmem_range),
228 & first_struct_slab_of_caches,
229 & first_slab_of_caches_base,
230 & first_slab_of_caches_nb_pages,
231 & first_struct_slab_of_ranges,
232 & first_slab_of_ranges_base,
233 & first_slab_of_ranges_nb_pages);
234 SOS_ASSERT_FATAL(kmem_range_cache != NULL);
235
236
237 create_range(TRUE,
238 SOS_KMEM_VMM_BASE,
239 SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
240 NULL);
241
242
243 create_range(FALSE,
244 SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
245 SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
246 NULL);
247
248
249 create_range(TRUE,
250 SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
251 SOS_PAGE_ALIGN_INF(kernel_core_base),
252 NULL);
253
254
255
256 create_range(FALSE,
257 SOS_PAGE_ALIGN_INF(kernel_core_base),
258 bootstrap_stack_bottom_vaddr,
259 NULL);
260
261
262
263 create_range(FALSE,
264 bootstrap_stack_bottom_vaddr,
265 bootstrap_stack_top_vaddr,
266 NULL);
267
268
269
270 create_range(FALSE,
271 bootstrap_stack_top_vaddr,
272 SOS_PAGE_ALIGN_SUP(kernel_core_top),
273 NULL);
274
275
276
277 SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_core_top)
278 == first_slab_of_caches_base);
279 SOS_ASSERT_FATAL(first_struct_slab_of_caches != NULL);
280 first_range_of_caches
281 = create_range(FALSE,
282 first_slab_of_caches_base,
283 first_slab_of_caches_base
284 + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE,
285 first_struct_slab_of_caches);
286
287
288
289 SOS_ASSERT_FATAL((first_slab_of_caches_base
290 + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE)
291 == first_slab_of_ranges_base);
292 SOS_ASSERT_FATAL(first_struct_slab_of_ranges != NULL);
293 first_range_of_ranges
294 = create_range(FALSE,
295 first_slab_of_ranges_base,
296 first_slab_of_ranges_base
297 + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
298 first_struct_slab_of_ranges);
299
300
301 create_range(TRUE,
302 first_slab_of_ranges_base
303 + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
304 SOS_KMEM_VMM_TOP,
305 NULL);
306
307
308
309
310 sos_kmem_cache_subsystem_setup_commit(first_struct_slab_of_caches,
311 first_range_of_caches,
312 first_struct_slab_of_ranges,
313 first_range_of_ranges);
314
315 return SOS_OK;
316 }
317
318
319
320
321
322
323
324 struct sos_kmem_range *sos_kmem_vmm_new_range(sos_count_t nb_pages,
325 sos_ui32_t flags,
326 sos_vaddr_t * range_start)
327 {
328 struct sos_kmem_range *free_range, *new_range;
329
330 if (nb_pages <= 0)
331 return NULL;
332
333
334 free_range = find_suitable_free_range(nb_pages);
335 if (free_range == NULL)
336 return NULL;
337
338
339
340 if(free_range->nb_pages == nb_pages)
341 {
342 list_delete(kmem_free_range_list, free_range);
343 kmem_used_range_list = insert_range(kmem_used_range_list,
344 free_range);
345
346 new_range = free_range;
347 }
348
349
350
351
352 else
353 {
354
355 new_range = (struct sos_kmem_range*)
356 sos_kmem_cache_alloc(kmem_range_cache,
357 (flags & SOS_KMEM_VMM_ATOMIC)?
358 SOS_KSLAB_ALLOC_ATOMIC:0);
359 if (! new_range)
360 return NULL;
361
362 new_range->base_vaddr = free_range->base_vaddr;
363 new_range->nb_pages = nb_pages;
364 free_range->base_vaddr += nb_pages*SOS_PAGE_SIZE;
365 free_range->nb_pages -= nb_pages;
366
367
368
369 kmem_used_range_list = insert_range(kmem_used_range_list,
370 new_range);
371 }
372
373
374 new_range->slab = NULL;
375
376
377 if (flags & SOS_KMEM_VMM_MAP)
378 {
379 int i;
380 for (i = 0 ; i < nb_pages ; i ++)
381 {
382
383 sos_paddr_t ppage_paddr
384 = sos_physmem_ref_physpage_new(! (flags & SOS_KMEM_VMM_ATOMIC));
385
386
387 if (ppage_paddr)
388 {
389 if (sos_paging_map(ppage_paddr,
390 new_range->base_vaddr
391 + i * SOS_PAGE_SIZE,
392 FALSE ,
393 ((flags & SOS_KMEM_VMM_ATOMIC)?
394 SOS_VM_MAP_ATOMIC:0)
395 | SOS_VM_MAP_PROT_READ
396 | SOS_VM_MAP_PROT_WRITE))
397 {
398
399 sos_physmem_unref_physpage(ppage_paddr);
400 ppage_paddr = (sos_paddr_t)NULL;
401 }
402 else
403 {
404
405
406 sos_physmem_unref_physpage(ppage_paddr);
407 }
408 }
409
410
411 if (! ppage_paddr)
412 {
413 sos_kmem_vmm_del_range(new_range);
414 return NULL;
415 }
416
417
418 sos_physmem_set_kmem_range(ppage_paddr, new_range);
419 }
420 }
421
422
423 if (range_start)
424 *range_start = new_range->base_vaddr;
425
426 return new_range;
427 }
428
429
430 sos_ret_t sos_kmem_vmm_del_range(struct sos_kmem_range *range)
431 {
432 int i;
433 struct sos_kmem_range *ranges_to_free;
434 list_init(ranges_to_free);
435
436 SOS_ASSERT_FATAL(range != NULL);
437 SOS_ASSERT_FATAL(range->slab == NULL);
438
439
440 list_delete(kmem_used_range_list, range);
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458 do
459 {
460
461 kmem_free_range_list = insert_range(kmem_free_range_list, range);
462
463
464 for (i = 0 ; i < range->nb_pages ; i ++)
465 {
466
467 sos_paging_unmap(range->base_vaddr + i*SOS_PAGE_SIZE);
468 }
469
470
471
472
473
474
475
476 if (range->prev->base_vaddr + range->prev->nb_pages*SOS_PAGE_SIZE
477 == range->base_vaddr)
478 {
479 struct sos_kmem_range *empty_range_of_ranges = NULL;
480 struct sos_kmem_range *prec_free = range->prev;
481
482
483 prec_free->nb_pages += range->nb_pages;
484 list_delete(kmem_free_range_list, range);
485
486
487
488 empty_range_of_ranges =
489 sos_kmem_cache_release_struct_range(range);
490
491
492
493
494
495 if (empty_range_of_ranges != NULL)
496 {
497 list_delete(kmem_used_range_list, empty_range_of_ranges);
498 list_add_tail(ranges_to_free, empty_range_of_ranges);
499 }
500
501
502 range = prec_free;
503 }
504
505
506
507 if (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE
508 == range->next->base_vaddr)
509 {
510 struct sos_kmem_range *empty_range_of_ranges = NULL;
511 struct sos_kmem_range *next_range = range->next;
512
513
514 range->nb_pages += next_range->nb_pages;
515 list_delete(kmem_free_range_list, next_range);
516
517
518
519 empty_range_of_ranges =
520 sos_kmem_cache_release_struct_range(next_range);
521
522
523
524
525
526
527 if (empty_range_of_ranges != NULL)
528 {
529 list_delete(kmem_used_range_list, empty_range_of_ranges);
530 list_add_tail(ranges_to_free, empty_range_of_ranges);
531 }
532 }
533
534
535
536
537 if (list_is_empty(ranges_to_free))
538 range = NULL;
539 else
540 range = list_pop_head(ranges_to_free);
541
542 }
543
544 while (range != NULL);
545
546 return SOS_OK;
547 }
548
549
550 sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_pages,
551 sos_ui32_t flags)
552 {
553 struct sos_kmem_range *range
554 = sos_kmem_vmm_new_range(nb_pages,
555 flags,
556 NULL);
557 if (! range)
558 return (sos_vaddr_t)NULL;
559
560 return range->base_vaddr;
561 }
562
563
564 sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr)
565 {
566 struct sos_kmem_range *range = lookup_range(vaddr);
567
568
569
570 if (!range || (range->base_vaddr != vaddr))
571 return -SOS_EINVAL;
572
573
574 if (range->slab != NULL)
575 return -SOS_EBUSY;
576
577 return sos_kmem_vmm_del_range(range);
578 }
579
580
581 sos_ret_t sos_kmem_vmm_set_slab(struct sos_kmem_range *range,
582 struct sos_kslab *slab)
583 {
584 if (! range)
585 return -SOS_EINVAL;
586
587 range->slab = slab;
588 return SOS_OK;
589 }
590
591 struct sos_kslab * sos_kmem_vmm_resolve_slab(sos_vaddr_t vaddr)
592 {
593 struct sos_kmem_range *range = lookup_range(vaddr);
594 if (! range)
595 return NULL;
596
597 return range->slab;
598 }
599
600
601 sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vaddr_t vaddr)
602 {
603 struct sos_kmem_range *range = lookup_range(vaddr);
604 return (range != NULL);
605 }