001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019 #include <sos/macros.h>
020 #include <sos/klibc.h>
021 #include <sos/list.h>
022 #include <sos/assert.h>
023 #include <hwcore/paging.h>
024 #include <sos/physmem.h>
025 #include <sos/kmem_vmm.h>
026
027 #include "kmem_slab.h"
028
029
030 #define NB_PAGES_IN_SLAB_OF_CACHES 1
031 #define NB_PAGES_IN_SLAB_OF_RANGES 1
032
033
034 struct sos_kslab_cache
035 {
036 char *name;
037
038
039 sos_size_t original_obj_size;
040 sos_size_t alloc_obj_size;
041
042 sos_count_t nb_objects_per_slab;
043 sos_count_t nb_pages_per_slab;
044 sos_count_t min_free_objects;
045
046
047
048
049 #define ON_SLAB (1<<31)
050 sos_ui32_t flags;
051
052
053 sos_count_t nb_free_objects;
054
055
056 struct sos_kslab *slab_list;
057
058
059 struct sos_kslab_cache *prev, *next;
060 };
061
062
063
064 struct sos_kslab
065 {
066
067 sos_count_t nb_free;
068
069
070 struct sos_kslab_free_object *free;
071
072
073 struct sos_kmem_range *range;
074
075
076 sos_vaddr_t first_object;
077
078
079 struct sos_kslab_cache *cache;
080
081
082 struct sos_kslab *prev, *next;
083 };
084
085
086
087 struct sos_kslab_free_object
088 {
089 struct sos_kslab_free_object *prev, *next;
090 };
091
092
093 static struct sos_kslab_cache *cache_of_struct_kslab_cache;
094
095
096 static struct sos_kslab_cache *cache_of_struct_kslab;
097
098
099 static struct sos_kslab_cache *kslab_cache_list;
100
101
102 static sos_ret_t
103 cache_initialize(struct sos_kslab_cache *the_cache,
104 const char* name,
105 sos_size_t obj_size,
106 sos_count_t pages_per_slab,
107 sos_count_t min_free_objs,
108 sos_ui32_t cache_flags)
109 {
110 unsigned int space_left;
111 sos_size_t alloc_obj_size;
112
113 if (obj_size <= 0)
114 return -SOS_EINVAL;
115
116
117 alloc_obj_size = obj_size;
118
119
120
121 if (alloc_obj_size < sizeof(struct sos_kslab_free_object))
122 alloc_obj_size = sizeof(struct sos_kslab_free_object);
123
124
125 alloc_obj_size = SOS_ALIGN_SUP(alloc_obj_size, sizeof(int));
126
127
128
129 if (alloc_obj_size > pages_per_slab*SOS_PAGE_SIZE)
130 return -SOS_EINVAL;
131
132
133 if (pages_per_slab > MAX_PAGES_PER_SLAB)
134 return -SOS_ENOMEM;
135
136
137 memset(the_cache, 0x0, sizeof(struct sos_kslab_cache));
138 the_cache->name = (char*)name;
139 the_cache->flags = cache_flags;
140 the_cache->original_obj_size = obj_size;
141 the_cache->alloc_obj_size = alloc_obj_size;
142 the_cache->min_free_objects = min_free_objs;
143 the_cache->nb_pages_per_slab = pages_per_slab;
144
145
146
147 if(alloc_obj_size <= sizeof(struct sos_kslab))
148 the_cache->flags |= ON_SLAB;
149
150
151
152
153
154 space_left = the_cache->nb_pages_per_slab*SOS_PAGE_SIZE;
155 if(the_cache->flags & ON_SLAB)
156 space_left -= sizeof(struct sos_kslab);
157 the_cache->nb_objects_per_slab = space_left / alloc_obj_size;
158 space_left -= the_cache->nb_objects_per_slab*alloc_obj_size;
159
160
161
162 if (the_cache->nb_objects_per_slab < min_free_objs)
163 return -SOS_EINVAL;
164
165
166
167 if (space_left >= sizeof(struct sos_kslab))
168 the_cache->flags |= ON_SLAB;
169
170 return SOS_OK;
171 }
172
173
174
175 static sos_ret_t
176 cache_add_slab(struct sos_kslab_cache *kslab_cache,
177 sos_vaddr_t vaddr_slab,
178 struct sos_kslab *slab)
179 {
180 int i;
181
182
183 memset(slab, 0x0, sizeof(struct sos_kslab));
184 slab->cache = kslab_cache;
185
186
187 slab->first_object = vaddr_slab;
188
189
190 slab->nb_free = kslab_cache->nb_objects_per_slab;
191 kslab_cache->nb_free_objects += slab->nb_free;
192
193
194 for (i = 0 ; i < kslab_cache->nb_objects_per_slab ; i++)
195 {
196 sos_vaddr_t obj_vaddr;
197
198
199 obj_vaddr = slab->first_object + i*kslab_cache->alloc_obj_size;
200
201
202 list_add_tail(slab->free,
203 (struct sos_kslab_free_object *)obj_vaddr);
204 }
205
206
207
208 list_add_head(kslab_cache->slab_list, slab);
209
210 return SOS_OK;
211 }
212
213
214
215 static sos_ret_t
216 cache_grow(struct sos_kslab_cache *kslab_cache,
217 sos_ui32_t alloc_flags)
218 {
219 sos_ui32_t range_alloc_flags;
220
221 struct sos_kmem_range *new_range;
222 sos_vaddr_t new_range_start;
223
224 struct sos_kslab *new_slab;
225
226
227
228
229 range_alloc_flags = 0;
230
231
232 if (alloc_flags & SOS_KSLAB_ALLOC_ATOMIC)
233 range_alloc_flags |= SOS_KMEM_VMM_ATOMIC;
234
235
236 if (kslab_cache->flags & (SOS_KSLAB_CREATE_MAP
237 | SOS_KSLAB_CREATE_ZERO))
238 range_alloc_flags |= SOS_KMEM_VMM_MAP;
239
240
241 new_range = sos_kmem_vmm_new_range(kslab_cache->nb_pages_per_slab,
242 range_alloc_flags,
243 & new_range_start);
244 if (! new_range)
245 return -SOS_ENOMEM;
246
247
248 if (kslab_cache->flags & ON_SLAB)
249 {
250
251
252 sos_vaddr_t slab_vaddr
253 = new_range_start + kslab_cache->nb_pages_per_slab*SOS_PAGE_SIZE
254 - sizeof(struct sos_kslab);
255 new_slab = (struct sos_kslab*)slab_vaddr;
256 }
257 else
258 {
259
260
261 sos_vaddr_t slab_vaddr
262 = sos_kmem_cache_alloc(cache_of_struct_kslab,
263 alloc_flags);
264 if (! slab_vaddr)
265 {
266 sos_kmem_vmm_del_range(new_range);
267 return -SOS_ENOMEM;
268 }
269 new_slab = (struct sos_kslab*)slab_vaddr;
270 }
271
272 cache_add_slab(kslab_cache, new_range_start, new_slab);
273 new_slab->range = new_range;
274
275
276 sos_kmem_vmm_set_slab(new_range, new_slab);
277
278 return SOS_OK;
279 }
280
281
282
283
284
285
286
287
288
289
290 static sos_ret_t
291 cache_release_slab(struct sos_kslab *slab,
292 sos_bool_t must_del_range_now)
293 {
294 struct sos_kslab_cache *kslab_cache = slab->cache;
295 struct sos_kmem_range *range = slab->range;
296
297 SOS_ASSERT_FATAL(kslab_cache != NULL);
298 SOS_ASSERT_FATAL(range != NULL);
299 SOS_ASSERT_FATAL(slab->nb_free == slab->cache->nb_objects_per_slab);
300
301
302 list_delete(kslab_cache->slab_list, slab);
303 slab->cache->nb_free_objects -= slab->nb_free;
304
305
306 if (! (slab->cache->flags & ON_SLAB))
307 sos_kmem_cache_free((sos_vaddr_t)slab);
308
309
310 sos_kmem_vmm_set_slab(range, NULL);
311
312
313
314 if (must_del_range_now)
315 return sos_kmem_vmm_del_range(range);
316
317 return SOS_OK;
318 }
319
320
321
322
323
324
325
326 static struct sos_kslab_cache *
327 create_cache_of_caches(sos_vaddr_t vaddr_first_slab_of_caches,
328 int nb_pages)
329 {
330
331
332 struct sos_kslab_cache fake_cache_of_caches;
333
334
335 struct sos_kslab_cache *real_cache_of_caches;
336
337
338 struct sos_kslab *slab_of_caches;
339
340
341 if (cache_initialize(& fake_cache_of_caches,
342 "Caches", sizeof(struct sos_kslab_cache),
343 nb_pages, 0, SOS_KSLAB_CREATE_MAP | ON_SLAB))
344
345 return NULL;
346
347 memset((void*)vaddr_first_slab_of_caches, 0x0, nb_pages*SOS_PAGE_SIZE);
348
349
350 slab_of_caches = (struct sos_kslab*)(vaddr_first_slab_of_caches
351 + nb_pages*SOS_PAGE_SIZE
352 - sizeof(struct sos_kslab));
353
354
355 cache_add_slab(& fake_cache_of_caches,
356 vaddr_first_slab_of_caches,
357 slab_of_caches);
358
359
360
361
362 real_cache_of_caches
363 = (struct sos_kslab_cache*) sos_kmem_cache_alloc(& fake_cache_of_caches,
364 0);
365
366 memcpy(real_cache_of_caches, & fake_cache_of_caches,
367 sizeof(struct sos_kslab_cache));
368
369 slab_of_caches->cache = real_cache_of_caches;
370
371
372 list_add_tail(kslab_cache_list, real_cache_of_caches);
373
374 return real_cache_of_caches;
375 }
376
377
378
379
380
381
382
383
384 static struct sos_kslab_cache *
385 create_cache_of_ranges(sos_vaddr_t vaddr_first_slab_of_ranges,
386 sos_size_t sizeof_struct_range,
387 int nb_pages)
388 {
389
390 struct sos_kslab_cache *cache_of_ranges;
391
392
393 struct sos_kslab *slab_of_ranges;
394
395 cache_of_ranges = (struct sos_kslab_cache*)
396 sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
397 0);
398 if (! cache_of_ranges)
399 return NULL;
400
401
402
403 if (cache_initialize(cache_of_ranges,
404 "struct kmem_range",
405 sizeof_struct_range,
406 nb_pages, 2, SOS_KSLAB_CREATE_MAP | ON_SLAB))
407
408 return NULL;
409
410
411 list_add_tail(kslab_cache_list, cache_of_ranges);
412
413
414
415
416 memset((void*)vaddr_first_slab_of_ranges, 0x0, nb_pages*SOS_PAGE_SIZE);
417
418
419 slab_of_ranges = (struct sos_kslab*)(vaddr_first_slab_of_ranges
420 + nb_pages*SOS_PAGE_SIZE
421 - sizeof(struct sos_kslab));
422
423 cache_add_slab(cache_of_ranges,
424 vaddr_first_slab_of_ranges,
425 slab_of_ranges);
426
427 return cache_of_ranges;
428 }
429
430
431 struct sos_kslab_cache *
432 sos_kmem_cache_setup_prepare(sos_vaddr_t kernel_core_base,
433 sos_vaddr_t kernel_core_top,
434 sos_size_t sizeof_struct_range,
435
436 struct sos_kslab **first_struct_slab_of_caches,
437 sos_vaddr_t *first_slab_of_caches_base,
438 sos_count_t *first_slab_of_caches_nb_pages,
439 struct sos_kslab **first_struct_slab_of_ranges,
440 sos_vaddr_t *first_slab_of_ranges_base,
441 sos_count_t *first_slab_of_ranges_nb_pages)
442 {
443 int i;
444 sos_ret_t retval;
445 sos_vaddr_t vaddr;
446
447
448 struct sos_kslab_cache *cache_of_ranges;
449
450
451 kslab_cache_list = NULL;
452 cache_of_struct_kslab = NULL;
453 cache_of_struct_kslab_cache = NULL;
454
455
456
457
458
459
460
461 *first_slab_of_caches_base = SOS_PAGE_ALIGN_SUP(kernel_core_top);
462 for (i = 0, vaddr = *first_slab_of_caches_base ;
463 i < NB_PAGES_IN_SLAB_OF_CACHES ;
464 i++, vaddr += SOS_PAGE_SIZE)
465 {
466 sos_paddr_t ppage_paddr;
467
468 ppage_paddr
469 = sos_physmem_ref_physpage_new(FALSE);
470 SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
471
472 retval =sos_paging_map(ppage_paddr, vaddr,
473 FALSE,
474 SOS_VM_MAP_ATOMIC
475 | SOS_VM_MAP_PROT_READ
476 | SOS_VM_MAP_PROT_WRITE);
477 SOS_ASSERT_FATAL(retval == SOS_OK);
478
479 retval = sos_physmem_unref_physpage(ppage_paddr);
480 SOS_ASSERT_FATAL(retval == SOS_OK);
481 }
482
483
484 *first_slab_of_caches_nb_pages = NB_PAGES_IN_SLAB_OF_CACHES;
485 cache_of_struct_kslab_cache
486 = create_cache_of_caches(*first_slab_of_caches_base,
487 NB_PAGES_IN_SLAB_OF_CACHES);
488 SOS_ASSERT_FATAL(cache_of_struct_kslab_cache != NULL);
489
490
491 *first_struct_slab_of_caches
492 = list_get_head(cache_of_struct_kslab_cache->slab_list);
493
494
495
496
497
498 *first_slab_of_ranges_base = vaddr;
499
500 for (i = 0, vaddr = *first_slab_of_ranges_base ;
501 i < NB_PAGES_IN_SLAB_OF_RANGES ;
502 i++, vaddr += SOS_PAGE_SIZE)
503 {
504 sos_paddr_t ppage_paddr;
505
506 ppage_paddr
507 = sos_physmem_ref_physpage_new(FALSE);
508 SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
509
510 retval = sos_paging_map(ppage_paddr, vaddr,
511 FALSE,
512 SOS_VM_MAP_ATOMIC
513 | SOS_VM_MAP_PROT_READ
514 | SOS_VM_MAP_PROT_WRITE);
515 SOS_ASSERT_FATAL(retval == SOS_OK);
516
517 retval = sos_physmem_unref_physpage(ppage_paddr);
518 SOS_ASSERT_FATAL(retval == SOS_OK);
519 }
520
521
522 *first_slab_of_ranges_nb_pages = NB_PAGES_IN_SLAB_OF_RANGES;
523 cache_of_ranges = create_cache_of_ranges(*first_slab_of_ranges_base,
524 sizeof_struct_range,
525 NB_PAGES_IN_SLAB_OF_RANGES);
526 SOS_ASSERT_FATAL(cache_of_ranges != NULL);
527
528
529 *first_struct_slab_of_ranges
530 = list_get_head(cache_of_ranges->slab_list);
531
532
533
534
535 cache_of_struct_kslab
536 = sos_kmem_cache_create("off-slab slab structures",
537 sizeof(struct sos_kslab),
538 1,
539 0,
540 SOS_KSLAB_CREATE_MAP);
541 SOS_ASSERT_FATAL(cache_of_struct_kslab != NULL);
542
543 return cache_of_ranges;
544 }
545
546
547 sos_ret_t
548 sos_kmem_cache_setup_commit(struct sos_kslab *first_struct_slab_of_caches,
549 struct sos_kmem_range *first_range_of_caches,
550 struct sos_kslab *first_struct_slab_of_ranges,
551 struct sos_kmem_range *first_range_of_ranges)
552 {
553 first_struct_slab_of_caches->range = first_range_of_caches;
554 first_struct_slab_of_ranges->range = first_range_of_ranges;
555 return SOS_OK;
556 }
557
558
559 struct sos_kslab_cache *
560 sos_kmem_cache_create(const char* name,
561 sos_size_t obj_size,
562 sos_count_t pages_per_slab,
563 sos_count_t min_free_objs,
564 sos_ui32_t cache_flags)
565 {
566 struct sos_kslab_cache *new_cache;
567
568
569 new_cache = (struct sos_kslab_cache*)
570 sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
571 0);
572 if (! new_cache)
573 return NULL;
574
575 if (cache_initialize(new_cache, name, obj_size,
576 pages_per_slab, min_free_objs,
577 cache_flags))
578 {
579
580 sos_kmem_cache_free((sos_vaddr_t)new_cache);
581 return NULL;
582 }
583
584
585 list_add_tail(kslab_cache_list, new_cache);
586
587
588 if (min_free_objs)
589 {
590 if (cache_grow(new_cache, 0 ) != SOS_OK)
591 {
592 sos_kmem_cache_destroy(new_cache);
593 return NULL;
594 }
595 }
596
597 return new_cache;
598 }
599
600
601 sos_ret_t sos_kmem_cache_destroy(struct sos_kslab_cache *kslab_cache)
602 {
603 int nb_slabs;
604 struct sos_kslab *slab;
605
606 if (! kslab_cache)
607 return -SOS_EINVAL;
608
609
610
611 list_foreach(kslab_cache->slab_list, slab, nb_slabs)
612 {
613 if (slab->nb_free != kslab_cache->nb_objects_per_slab)
614 return -SOS_EBUSY;
615 }
616
617
618 while ((slab = list_get_head(kslab_cache->slab_list)) != NULL)
619 {
620 cache_release_slab(slab, TRUE);
621 }
622
623
624 return sos_kmem_cache_free((sos_vaddr_t)kslab_cache);
625 }
626
627
628 sos_vaddr_t sos_kmem_cache_alloc(struct sos_kslab_cache *kslab_cache,
629 sos_ui32_t alloc_flags)
630 {
631 sos_vaddr_t obj_vaddr;
632 struct sos_kslab * slab_head;
633 #define ALLOC_RET return
634
635
636
637
638 if ((! kslab_cache->slab_list)
639 || (! list_get_head(kslab_cache->slab_list)->free))
640 {
641 if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
642
643 ALLOC_RET( (sos_vaddr_t)NULL);
644 }
645
646
647
648
649 slab_head = list_get_head(kslab_cache->slab_list);
650 SOS_ASSERT_FATAL(slab_head != NULL);
651
652
653
654 obj_vaddr = (sos_vaddr_t)list_pop_head(slab_head->free);
655 slab_head->nb_free --;
656 kslab_cache->nb_free_objects --;
657
658
659 if (kslab_cache->flags & SOS_KSLAB_CREATE_ZERO)
660 memset((void*)obj_vaddr, 0x0, kslab_cache->alloc_obj_size);
661
662
663 if (slab_head->free == NULL)
664 {
665
666 struct sos_kslab *slab;
667 slab = list_pop_head(kslab_cache->slab_list);
668 list_add_tail(kslab_cache->slab_list, slab);
669 }
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685 if ((kslab_cache->min_free_objects > 0)
686 && (kslab_cache->nb_free_objects == (kslab_cache->min_free_objects - 1)))
687 {
688
689 if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
690 {
691
692
693 sos_kmem_cache_free(obj_vaddr);
694 ALLOC_RET( (sos_vaddr_t)NULL);
695 }
696 }
697
698 ALLOC_RET(obj_vaddr);
699 }
700
701
702
703
704
705
706
707
708 inline static
709 sos_ret_t
710 free_object(sos_vaddr_t vaddr,
711 struct sos_kslab ** empty_slab)
712 {
713 struct sos_kslab_cache *kslab_cache;
714
715
716 struct sos_kslab *slab = sos_kmem_vmm_resolve_slab(vaddr);
717
718
719 *empty_slab = NULL;
720
721
722 if (! slab)
723 return -SOS_EINVAL;
724
725 SOS_ASSERT_FATAL(slab->cache);
726 kslab_cache = slab->cache;
727
728
729
730
731
732
733 if (( (vaddr - slab->first_object)
734 % kslab_cache->alloc_obj_size) != 0)
735 return -SOS_EINVAL;
736
737 if (( (vaddr - slab->first_object)
738 / kslab_cache->alloc_obj_size) >= kslab_cache->nb_objects_per_slab)
739 return -SOS_EINVAL;
740
741
742
743
744
745
746
747 if (! slab->free)
748 {
749 list_delete(kslab_cache->slab_list, slab);
750 list_add_head(kslab_cache->slab_list, slab);
751 }
752
753
754 list_add_head(slab->free, (struct sos_kslab_free_object*)vaddr);
755 slab->nb_free++;
756 kslab_cache->nb_free_objects++;
757 SOS_ASSERT_FATAL(slab->nb_free <= slab->cache->nb_objects_per_slab);
758
759
760
761 if ((slab->nb_free >= kslab_cache->nb_objects_per_slab)
762 && (kslab_cache->nb_free_objects - slab->nb_free
763 >= kslab_cache->min_free_objects))
764 {
765 *empty_slab = slab;
766 }
767
768 return SOS_OK;
769 }
770
771
772 sos_ret_t sos_kmem_cache_free(sos_vaddr_t vaddr)
773 {
774 sos_ret_t retval;
775 struct sos_kslab *empty_slab;
776
777
778 retval = free_object(vaddr, & empty_slab);
779 if (retval != SOS_OK)
780 return retval;
781
782
783 if (empty_slab != NULL)
784 return cache_release_slab(empty_slab, TRUE);
785
786 return SOS_OK;
787 }
788
789
790 struct sos_kmem_range *
791 sos_kmem_cache_release_struct_range(struct sos_kmem_range *the_range)
792 {
793 sos_ret_t retval;
794 struct sos_kslab *empty_slab;
795
796
797 retval = free_object((sos_vaddr_t)the_range, & empty_slab);
798 if (retval != SOS_OK)
799 return NULL;
800
801
802 if (empty_slab != NULL)
803 {
804 struct sos_kmem_range *empty_range = empty_slab->range;
805 SOS_ASSERT_FATAL(cache_release_slab(empty_slab, FALSE) == SOS_OK);
806 SOS_ASSERT_FATAL(empty_range != NULL);
807 return empty_range;
808 }
809
810 return NULL;
811 }
812