001 
002 
003 
004 
005 
006 
007 
008 
009 
010 
011 
012 
013 
014 
015 
016 
017 
018 
019 #include <sos/macros.h>
020 #include <sos/klibc.h>
021 #include <sos/list.h>
022 #include <sos/assert.h>
023 #include <hwcore/paging.h>
024 #include <sos/physmem.h>
025 #include <sos/kmem_vmm.h>
026 
027 #include "kmem_slab.h"
028 
029 
030 #define NB_PAGES_IN_SLAB_OF_CACHES 1
031 #define NB_PAGES_IN_SLAB_OF_RANGES 1
032 
033 
034 struct sos_kslab_cache
035 {
036   char const* name;
037 
038   
039   sos_size_t  original_obj_size; 
040   sos_size_t  alloc_obj_size;    
041 
042   sos_count_t nb_objects_per_slab;
043   sos_count_t nb_pages_per_slab;
044   sos_count_t min_free_objects;
045 
046 
047 
048 
049 #define ON_SLAB (1<<31) 
050   sos_ui32_t  flags;
051 
052   
053   sos_count_t nb_free_objects;
054 
055   
056   struct sos_kslab *slab_list; 
057 
058   
059   struct sos_kslab_cache *prev, *next;
060 };
061 
062 
063 
064 struct sos_kslab
065 {
066   
067   sos_count_t nb_free;
068 
069   
070   struct sos_kslab_free_object *free;
071 
072   
073   struct sos_kmem_range *range;
074 
075   
076   sos_vaddr_t first_object;
077   
078   
079   struct sos_kslab_cache *cache;
080 
081   
082   struct sos_kslab *prev, *next;
083 };
084 
085 
086 
087 struct sos_kslab_free_object
088 {
089   struct sos_kslab_free_object *prev, *next;
090 };
091 
092 
093 static struct sos_kslab_cache *cache_of_struct_kslab_cache;
094 
095 
096 static struct sos_kslab_cache *cache_of_struct_kslab;
097 
098 
099 static struct sos_kslab_cache *kslab_cache_list;
100 
101 
102 static sos_ret_t
103 cache_initialize(struct sos_kslab_cache *the_cache,
104                  const char* name,
105                  sos_size_t  obj_size,
106                  sos_count_t pages_per_slab,
107                  sos_count_t min_free_objs,
108                  sos_ui32_t  cache_flags)
109 {
110   unsigned int space_left;
111   sos_size_t alloc_obj_size;
112 
113   if (obj_size <= 0)
114     return -SOS_EINVAL;
115 
116   
117   alloc_obj_size = obj_size;
118 
119   
120 
121   if (alloc_obj_size < sizeof(struct sos_kslab_free_object))
122     alloc_obj_size = sizeof(struct sos_kslab_free_object);
123   
124   
125   alloc_obj_size = SOS_ALIGN_SUP(alloc_obj_size, sizeof(int));
126 
127   
128 
129   if (alloc_obj_size > pages_per_slab*SOS_PAGE_SIZE)
130     return -SOS_EINVAL;
131   
132   
133   if (pages_per_slab > MAX_PAGES_PER_SLAB)
134     return -SOS_ENOMEM;
135 
136   
137   memset(the_cache, 0x0, sizeof(struct sos_kslab_cache));
138   the_cache->name              = name;
139   the_cache->flags             = cache_flags;
140   the_cache->original_obj_size = obj_size;
141   the_cache->alloc_obj_size    = alloc_obj_size;
142   the_cache->min_free_objects  = min_free_objs;
143   the_cache->nb_pages_per_slab = pages_per_slab;
144   
145   
146 
147   if(alloc_obj_size <= sizeof(struct sos_kslab))
148     the_cache->flags |= ON_SLAB;
149   
150   
151 
152 
153 
154   space_left = the_cache->nb_pages_per_slab*SOS_PAGE_SIZE;
155   if(the_cache->flags & ON_SLAB)
156     space_left -= sizeof(struct sos_kslab);
157   the_cache->nb_objects_per_slab = space_left / alloc_obj_size;
158   space_left -= the_cache->nb_objects_per_slab*alloc_obj_size;
159 
160   
161 
162   if (the_cache->nb_objects_per_slab < min_free_objs)
163     return -SOS_EINVAL;
164 
165   
166 
167   if (space_left >= sizeof(struct sos_kslab))
168     the_cache->flags |= ON_SLAB;
169 
170   return SOS_OK;
171 }
172 
173 
174 
175 static sos_ret_t
176 cache_add_slab(struct sos_kslab_cache *kslab_cache,
177                sos_vaddr_t vaddr_slab,
178                struct sos_kslab *slab)
179 {
180   unsigned int i;
181 
182   
183   memset(slab, 0x0, sizeof(struct sos_kslab));
184   slab->cache = kslab_cache;
185 
186   
187   slab->first_object = vaddr_slab;
188 
189   
190   slab->nb_free = kslab_cache->nb_objects_per_slab;
191   kslab_cache->nb_free_objects += slab->nb_free;
192 
193   
194   for (i = 0 ; i <  kslab_cache->nb_objects_per_slab ; i++)
195     {
196       sos_vaddr_t obj_vaddr;
197 
198       
199       obj_vaddr = slab->first_object + i*kslab_cache->alloc_obj_size;
200 
201       
202       list_add_tail(slab->free,
203                     (struct sos_kslab_free_object *)obj_vaddr);
204     }
205 
206   
207 
208   list_add_head(kslab_cache->slab_list, slab);
209 
210   return SOS_OK;
211 }
212 
213 
214 
215 static sos_ret_t
216 cache_grow(struct sos_kslab_cache *kslab_cache,
217            sos_ui32_t alloc_flags)
218 {
219   sos_ui32_t range_alloc_flags;
220 
221   struct sos_kmem_range *new_range;
222   sos_vaddr_t new_range_start;
223 
224   struct sos_kslab *new_slab;
225 
226   
227 
228 
229   range_alloc_flags = 0;
230 
231   
232   if (alloc_flags & SOS_KSLAB_ALLOC_ATOMIC)
233     range_alloc_flags |= SOS_KMEM_VMM_ATOMIC;
234 
235   
236   if (kslab_cache->flags & (SOS_KSLAB_CREATE_MAP
237                            | SOS_KSLAB_CREATE_ZERO))
238     range_alloc_flags |= SOS_KMEM_VMM_MAP;
239 
240   
241   new_range = sos_kmem_vmm_new_range(kslab_cache->nb_pages_per_slab,
242                                      range_alloc_flags,
243                                      & new_range_start);
244   if (! new_range)
245     return -SOS_ENOMEM;
246 
247   
248   if (kslab_cache->flags & ON_SLAB)
249     {
250       
251 
252       sos_vaddr_t slab_vaddr
253         = new_range_start + kslab_cache->nb_pages_per_slab*SOS_PAGE_SIZE
254           - sizeof(struct sos_kslab);
255       new_slab = (struct sos_kslab*)slab_vaddr;
256     }
257   else
258     {
259       
260 
261       sos_vaddr_t slab_vaddr
262         = sos_kmem_cache_alloc(cache_of_struct_kslab,
263                                alloc_flags);
264       if (! slab_vaddr)
265         {
266           sos_kmem_vmm_del_range(new_range);
267           return -SOS_ENOMEM;
268         }
269       new_slab = (struct sos_kslab*)slab_vaddr;
270     }
271 
272   cache_add_slab(kslab_cache, new_range_start, new_slab);
273   new_slab->range = new_range;
274 
275   
276   sos_kmem_vmm_set_slab(new_range, new_slab);
277 
278   return SOS_OK;
279 }
280 
281 
282 
283 
284 
285 
286 
287 
288 
289 
290 static sos_ret_t
291 cache_release_slab(struct sos_kslab *slab,
292                    sos_bool_t must_del_range_now)
293 {
294   struct sos_kslab_cache *kslab_cache = slab->cache;
295   struct sos_kmem_range *range = slab->range;
296 
297   SOS_ASSERT_FATAL(kslab_cache != NULL);
298   SOS_ASSERT_FATAL(range != NULL);
299   SOS_ASSERT_FATAL(slab->nb_free == slab->cache->nb_objects_per_slab);
300 
301   
302   list_delete(kslab_cache->slab_list, slab);
303   slab->cache->nb_free_objects -= slab->nb_free;
304 
305   
306   if (! (slab->cache->flags & ON_SLAB))
307     sos_kmem_cache_free((sos_vaddr_t)slab);
308 
309   
310   sos_kmem_vmm_set_slab(range, NULL);
311 
312   
313 
314   if (must_del_range_now)
315     return sos_kmem_vmm_del_range(range);
316 
317   return SOS_OK;
318 }
319 
320 
321 
322 
323 
324 
325 
326 static struct sos_kslab_cache *
327 create_cache_of_caches(sos_vaddr_t vaddr_first_slab_of_caches,
328                        int nb_pages)
329 {
330   
331 
332   struct sos_kslab_cache fake_cache_of_caches;
333 
334   
335   struct sos_kslab_cache *real_cache_of_caches;
336 
337   
338   struct sos_kslab       *slab_of_caches;
339 
340   
341   if (cache_initialize(& fake_cache_of_caches,
342                        "Caches", sizeof(struct sos_kslab_cache),
343                        nb_pages, 0, SOS_KSLAB_CREATE_MAP | ON_SLAB))
344     
345     return NULL;
346 
347   memset((void*)vaddr_first_slab_of_caches, 0x0, nb_pages*SOS_PAGE_SIZE);
348 
349   
350   slab_of_caches = (struct sos_kslab*)(vaddr_first_slab_of_caches
351                                        + nb_pages*SOS_PAGE_SIZE
352                                        - sizeof(struct sos_kslab));
353 
354   
355   cache_add_slab(& fake_cache_of_caches,
356                  vaddr_first_slab_of_caches,
357                  slab_of_caches);
358 
359   
360 
361 
362   real_cache_of_caches
363     = (struct sos_kslab_cache*) sos_kmem_cache_alloc(& fake_cache_of_caches,
364                                                      0);
365   
366   memcpy(real_cache_of_caches, & fake_cache_of_caches,
367          sizeof(struct sos_kslab_cache));
368   
369   slab_of_caches->cache = real_cache_of_caches;
370   
371   
372   list_add_tail(kslab_cache_list, real_cache_of_caches);
373 
374   return real_cache_of_caches;
375 }
376 
377 
378 
379 
380 
381 
382 
383 
384 static struct sos_kslab_cache *
385 create_cache_of_ranges(sos_vaddr_t vaddr_first_slab_of_ranges,
386                        sos_size_t  sizeof_struct_range,
387                        int nb_pages)
388 {
389   
390   struct sos_kslab_cache *cache_of_ranges;
391 
392   
393   struct sos_kslab *slab_of_ranges;
394 
395   cache_of_ranges = (struct sos_kslab_cache*)
396     sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
397                          0);
398   if (! cache_of_ranges)
399     return NULL;
400 
401   
402 
403   if (cache_initialize(cache_of_ranges,
404                        "struct kmem_range",
405                        sizeof_struct_range,
406                        nb_pages, 2, SOS_KSLAB_CREATE_MAP | ON_SLAB))
407     
408     return NULL;
409 
410   
411   list_add_tail(kslab_cache_list, cache_of_ranges);
412 
413   
414 
415 
416   memset((void*)vaddr_first_slab_of_ranges, 0x0, nb_pages*SOS_PAGE_SIZE);
417 
418   
419   slab_of_ranges = (struct sos_kslab*)(vaddr_first_slab_of_ranges
420                                        + nb_pages*SOS_PAGE_SIZE
421                                        - sizeof(struct sos_kslab));
422 
423   cache_add_slab(cache_of_ranges,
424                  vaddr_first_slab_of_ranges,
425                  slab_of_ranges);
426 
427   return cache_of_ranges;
428 }
429 
430 
431 struct sos_kslab_cache *
432 sos_kmem_cache_subsystem_setup_prepare(sos_vaddr_t kernel_core_base,
433                                        sos_vaddr_t kernel_core_top,
434                                        sos_size_t  sizeof_struct_range,
435                                        
436                                        struct sos_kslab **first_struct_slab_of_caches,
437                                        sos_vaddr_t *first_slab_of_caches_base,
438                                        sos_count_t *first_slab_of_caches_nb_pages,
439                                        struct sos_kslab **first_struct_slab_of_ranges,
440                                        sos_vaddr_t *first_slab_of_ranges_base,
441                                        sos_count_t *first_slab_of_ranges_nb_pages)
442 {
443   int i;
444   sos_ret_t   retval;
445   sos_vaddr_t vaddr;
446 
447   
448   struct sos_kslab_cache *cache_of_ranges;
449 
450   
451   kslab_cache_list = NULL;
452   cache_of_struct_kslab = NULL;
453   cache_of_struct_kslab_cache = NULL;
454 
455   
456 
457 
458 
459   
460 
461   *first_slab_of_caches_base = SOS_PAGE_ALIGN_SUP(kernel_core_top);
462   for (i = 0, vaddr = *first_slab_of_caches_base ;
463        i < NB_PAGES_IN_SLAB_OF_CACHES ;
464        i++, vaddr += SOS_PAGE_SIZE)
465     {
466       sos_paddr_t ppage_paddr;
467 
468       ppage_paddr
469         = sos_physmem_ref_physpage_new(FALSE);
470       SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
471 
472       retval = sos_paging_map(ppage_paddr, vaddr,
473                               FALSE,
474                               SOS_VM_MAP_ATOMIC
475                               | SOS_VM_MAP_PROT_READ
476                               | SOS_VM_MAP_PROT_WRITE);
477       SOS_ASSERT_FATAL(retval == SOS_OK);
478 
479       retval = sos_physmem_unref_physpage(ppage_paddr);
480       SOS_ASSERT_FATAL(retval == FALSE);
481     }
482 
483   
484   *first_slab_of_caches_nb_pages = NB_PAGES_IN_SLAB_OF_CACHES;
485   cache_of_struct_kslab_cache
486     = create_cache_of_caches(*first_slab_of_caches_base,
487                              NB_PAGES_IN_SLAB_OF_CACHES);
488   SOS_ASSERT_FATAL(cache_of_struct_kslab_cache != NULL);
489 
490   
491   *first_struct_slab_of_caches
492     = list_get_head(cache_of_struct_kslab_cache->slab_list);
493 
494   
495   
496 
497 
498   *first_slab_of_ranges_base = vaddr;
499   
500   for (i = 0, vaddr = *first_slab_of_ranges_base ;
501        i < NB_PAGES_IN_SLAB_OF_RANGES ;
502        i++, vaddr += SOS_PAGE_SIZE)
503     {
504       sos_paddr_t ppage_paddr;
505 
506       ppage_paddr
507         = sos_physmem_ref_physpage_new(FALSE);
508       SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
509 
510       retval = sos_paging_map(ppage_paddr, vaddr,
511                               FALSE,
512                               SOS_VM_MAP_ATOMIC
513                               | SOS_VM_MAP_PROT_READ
514                               | SOS_VM_MAP_PROT_WRITE);
515       SOS_ASSERT_FATAL(retval == SOS_OK);
516 
517       retval = sos_physmem_unref_physpage(ppage_paddr);
518       SOS_ASSERT_FATAL(retval == FALSE);
519     }
520 
521   
522   *first_slab_of_ranges_nb_pages = NB_PAGES_IN_SLAB_OF_RANGES;
523   cache_of_ranges = create_cache_of_ranges(*first_slab_of_ranges_base,
524                                            sizeof_struct_range,
525                                            NB_PAGES_IN_SLAB_OF_RANGES);
526   SOS_ASSERT_FATAL(cache_of_ranges != NULL);
527 
528   
529   *first_struct_slab_of_ranges
530     = list_get_head(cache_of_ranges->slab_list);
531 
532   
533 
534 
535   cache_of_struct_kslab
536     = sos_kmem_cache_create("off-slab slab structures",
537                             sizeof(struct sos_kslab),
538                             1,
539                             0,
540                             SOS_KSLAB_CREATE_MAP);
541   SOS_ASSERT_FATAL(cache_of_struct_kslab != NULL);
542 
543   return cache_of_ranges;
544 }
545 
546 
547 sos_ret_t
548 sos_kmem_cache_subsystem_setup_commit(struct sos_kslab *first_struct_slab_of_caches,
549                                       struct sos_kmem_range *first_range_of_caches,
550                                       struct sos_kslab *first_struct_slab_of_ranges,
551                                       struct sos_kmem_range *first_range_of_ranges)
552 {
553   first_struct_slab_of_caches->range = first_range_of_caches;
554   first_struct_slab_of_ranges->range = first_range_of_ranges;
555   return SOS_OK;
556 }
557 
558 
559 struct sos_kslab_cache *
560 sos_kmem_cache_create(const char* name,
561                       sos_size_t  obj_size,
562                       sos_count_t pages_per_slab,
563                       sos_count_t min_free_objs,
564                       sos_ui32_t  cache_flags)
565 {
566   struct sos_kslab_cache *new_cache;
567 
568   SOS_ASSERT_FATAL(obj_size > 0);
569 
570   
571   new_cache = (struct sos_kslab_cache*)
572     sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
573                          0);
574   if (! new_cache)
575     return NULL;
576 
577   if (cache_initialize(new_cache, name, obj_size,
578                        pages_per_slab, min_free_objs,
579                        cache_flags))
580     {
581       
582       sos_kmem_cache_free((sos_vaddr_t)new_cache);
583       return NULL;
584     }
585 
586   
587   list_add_tail(kslab_cache_list, new_cache);
588   
589   
590   if (min_free_objs)
591     {
592       if (cache_grow(new_cache, 0 ) != SOS_OK)
593         {
594           sos_kmem_cache_destroy(new_cache);
595           return NULL; 
596         }
597     }
598 
599   return new_cache;  
600 }
601 
602   
603 sos_ret_t sos_kmem_cache_destroy(struct sos_kslab_cache *kslab_cache)
604 {
605   int nb_slabs;
606   struct sos_kslab *slab;
607 
608   if (! kslab_cache)
609     return -SOS_EINVAL;
610 
611   
612 
613   list_foreach(kslab_cache->slab_list, slab, nb_slabs)
614     {
615       if (slab->nb_free != kslab_cache->nb_objects_per_slab)
616         return -SOS_EBUSY;
617     }
618 
619   
620   while ((slab = list_get_head(kslab_cache->slab_list)) != NULL)
621     {
622       cache_release_slab(slab, TRUE);
623     }
624 
625   
626   return sos_kmem_cache_free((sos_vaddr_t)kslab_cache);
627 }
628 
629 
630 sos_vaddr_t sos_kmem_cache_alloc(struct sos_kslab_cache *kslab_cache,
631                                  sos_ui32_t alloc_flags)
632 {
633   sos_vaddr_t obj_vaddr;
634   struct sos_kslab * slab_head;
635 #define ALLOC_RET return
636 
637   
638 
639 
640   if ((! kslab_cache->slab_list)
641       || (! list_get_head(kslab_cache->slab_list)->free))
642     {
643       if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
644         
645         ALLOC_RET( (sos_vaddr_t)NULL);
646     }
647 
648   
649 
650 
651   slab_head = list_get_head(kslab_cache->slab_list);
652   SOS_ASSERT_FATAL(slab_head != NULL);
653 
654   
655 
656   obj_vaddr = (sos_vaddr_t)list_pop_head(slab_head->free);
657   slab_head->nb_free --;
658   kslab_cache->nb_free_objects --;
659 
660   
661   if (kslab_cache->flags & SOS_KSLAB_CREATE_ZERO)
662     memset((void*)obj_vaddr, 0x0, kslab_cache->alloc_obj_size);
663 
664   
665   if (slab_head->free == NULL)
666     {
667       
668       struct sos_kslab *slab;
669       slab = list_pop_head(kslab_cache->slab_list);
670       list_add_tail(kslab_cache->slab_list, slab);
671     }
672   
673   
674 
675 
676 
677 
678 
679 
680 
681 
682 
683 
684 
685 
686 
687   if ((kslab_cache->min_free_objects > 0)
688       && (kslab_cache->nb_free_objects == (kslab_cache->min_free_objects - 1)))
689     {
690       
691       if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
692         {
693           
694 
695           sos_kmem_cache_free(obj_vaddr);
696           ALLOC_RET( (sos_vaddr_t)NULL);
697         }
698     }
699 
700   ALLOC_RET(obj_vaddr);
701 }
702 
703 
704 
705 
706 
707 
708 
709 
710 inline static
711 sos_ret_t
712 free_object(sos_vaddr_t vaddr,
713             struct sos_kslab ** empty_slab)
714 {
715   struct sos_kslab_cache *kslab_cache;
716 
717   
718   struct sos_kslab *slab = sos_kmem_vmm_resolve_slab(vaddr);
719 
720   
721   *empty_slab = NULL;
722 
723   
724   if (! slab)
725     return -SOS_EINVAL;
726 
727   SOS_ASSERT_FATAL(slab->cache);
728   kslab_cache = slab->cache;
729 
730   
731 
732 
733 
734   
735   if (( (vaddr - slab->first_object)
736         % kslab_cache->alloc_obj_size) != 0)
737     return -SOS_EINVAL;
738   
739   if (( (vaddr - slab->first_object)
740         / kslab_cache->alloc_obj_size) >= kslab_cache->nb_objects_per_slab)
741     return -SOS_EINVAL;
742 
743   
744 
745 
746 
747   
748 
749   if (! slab->free)
750     {
751       list_delete(kslab_cache->slab_list, slab);
752       list_add_head(kslab_cache->slab_list, slab);
753     }
754 
755   
756   list_add_head(slab->free, (struct sos_kslab_free_object*)vaddr);
757   slab->nb_free++;
758   kslab_cache->nb_free_objects++;
759   SOS_ASSERT_FATAL(slab->nb_free <= slab->cache->nb_objects_per_slab);
760 
761   
762 
763   if ((slab->nb_free >= kslab_cache->nb_objects_per_slab)
764       && (kslab_cache->nb_free_objects - slab->nb_free
765           >= kslab_cache->min_free_objects))
766     {
767       *empty_slab = slab;
768     }
769 
770   return SOS_OK;
771 }
772 
773 
774 sos_ret_t sos_kmem_cache_free(sos_vaddr_t vaddr)
775 {
776   sos_ret_t retval;
777   struct sos_kslab *empty_slab;
778 
779   
780   retval = free_object(vaddr, & empty_slab);
781   if (retval != SOS_OK)
782     return retval;
783 
784   
785   if (empty_slab != NULL)
786     return cache_release_slab(empty_slab, TRUE);
787 
788   return SOS_OK;
789 }
790 
791 
792 struct sos_kmem_range *
793 sos_kmem_cache_release_struct_range(struct sos_kmem_range *the_range)
794 {
795   sos_ret_t retval;
796   struct sos_kslab *empty_slab;
797 
798   
799   retval = free_object((sos_vaddr_t)the_range, & empty_slab);
800   if (retval != SOS_OK)
801     return NULL;
802 
803   
804   if (empty_slab != NULL)
805     {
806       struct sos_kmem_range *empty_range = empty_slab->range;
807       SOS_ASSERT_FATAL(cache_release_slab(empty_slab, FALSE) == SOS_OK);
808       SOS_ASSERT_FATAL(empty_range != NULL);
809       return empty_range;
810     }
811 
812   return NULL;
813 }
814