SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2000 Thomas Petazzoni
002    Copyright (C) 2004 David Decotigny
003 
004    This program is free software; you can redistribute it and/or
005    modify it under the terms of the GNU General Public License
006    as published by the Free Software Foundation; either version 2
007    of the License, or (at your option) any later version.
008    
009    This program is distributed in the hope that it will be useful,
010    but WITHOUT ANY WARRANTY; without even the implied warranty of
011    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
012    GNU General Public License for more details.
013    
014    You should have received a copy of the GNU General Public License
015    along with this program; if not, write to the Free Software
016    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
017    USA. 
018 */
019 #include <sos/macros.h>
020 #include <sos/klibc.h>
021 #include <sos/list.h>
022 #include <sos/assert.h>
023 #include <hwcore/paging.h>
024 #include <sos/physmem.h>
025 #include <sos/kmem_vmm.h>
026 
027 #include "kmem_slab.h"
028 
029 /* Dimensioning constants */
030 #define NB_PAGES_IN_SLAB_OF_CACHES 1
031 #define NB_PAGES_IN_SLAB_OF_RANGES 1
032 
033 /** The structure of a slab cache */
034 struct sos_kslab_cache
035 {
036   char const* name;
037 
038   /* non mutable characteristics of this slab */
039   sos_size_t  original_obj_size; /* asked object size */
040   sos_size_t  alloc_obj_size;    /* actual object size, taking the
041                                     alignment constraints into account */
042   sos_count_t nb_objects_per_slab;
043   sos_count_t nb_pages_per_slab;
044   sos_count_t min_free_objects;
045 
046 /* slab cache flags */
047 // #define SOS_KSLAB_CREATE_MAP  (1<<0) /* See kmem_slab.h */
048 // #define SOS_KSLAB_CREATE_ZERO (1<<1) /* " " " " " " " " */
049 #define ON_SLAB (1<<31) /* struct sos_kslab is included inside the slab */
050   sos_ui32_t  flags;
051 
052   /* Supervision data (updated at run-time) */
053   sos_count_t nb_free_objects;
054 
055   /* The lists of slabs owned by this cache */
056   struct sos_kslab *slab_list; /* head = non full, tail = full */
057 
058   /* The caches are linked together on the kslab_cache_list */
059   struct sos_kslab_cache *prev, *next;
060 };
061 
062 
063 /** The structure of a slab */
064 struct sos_kslab
065 {
066   /** Number of free objects on this slab */
067   sos_count_t nb_free;
068 
069   /** The list of these free objects */
070   struct sos_kslab_free_object *free;
071 
072   /** The address of the associated range structure */
073   struct sos_kmem_range *range;
074 
075   /** Virtual start address of this range */
076   sos_vaddr_t first_object;
077   
078   /** Slab cache owning this slab */
079   struct sos_kslab_cache *cache;
080 
081   /** Links to the other slabs managed by the same cache */
082   struct sos_kslab *prev, *next;
083 };
084 
085 
086 /** The structure of the free objects in the slab */
087 struct sos_kslab_free_object
088 {
089   struct sos_kslab_free_object *prev, *next;
090 };
091 
092 /** The cache of slab caches */
093 static struct sos_kslab_cache *cache_of_struct_kslab_cache;
094 
095 /** The cache of slab structures for non-ON_SLAB caches */
096 static struct sos_kslab_cache *cache_of_struct_kslab;
097 
098 /** The list of slab caches */
099 static struct sos_kslab_cache *kslab_cache_list;
100 
101 /* Helper function to initialize a cache structure */
102 static sos_ret_t
103 cache_initialize(/*out*/struct sos_kslab_cache *the_cache,
104                  const char* name,
105                  sos_size_t  obj_size,
106                  sos_count_t pages_per_slab,
107                  sos_count_t min_free_objs,
108                  sos_ui32_t  cache_flags)
109 {
110   unsigned int space_left;
111   sos_size_t alloc_obj_size;
112 
113   if (obj_size <= 0)
114     return -SOS_EINVAL;
115 
116   /* Default allocation size is the requested one */
117   alloc_obj_size = obj_size;
118 
119   /* Make sure the requested size is large enough to store a
120      free_object structure */
121   if (alloc_obj_size < sizeof(struct sos_kslab_free_object))
122     alloc_obj_size = sizeof(struct sos_kslab_free_object);
123   
124   /* Align obj_size on 4 bytes */
125   alloc_obj_size = SOS_ALIGN_SUP(alloc_obj_size, sizeof(int));
126 
127   /* Make sure supplied number of pages per slab is consistent with
128      actual allocated object size */
129   if (alloc_obj_size > pages_per_slab*SOS_PAGE_SIZE)
130     return -SOS_EINVAL;
131   
132   /* Refuse too large slabs */
133   if (pages_per_slab > MAX_PAGES_PER_SLAB)
134     return -SOS_ENOMEM;
135 
136   /* Fills in the cache structure */
137   memset(the_cache, 0x0, sizeof(struct sos_kslab_cache));
138   the_cache->name              = name;
139   the_cache->flags             = cache_flags;
140   the_cache->original_obj_size = obj_size;
141   the_cache->alloc_obj_size    = alloc_obj_size;
142   the_cache->min_free_objects  = min_free_objs;
143   the_cache->nb_pages_per_slab = pages_per_slab;
144   
145   /* Small size objets => the slab structure is allocated directly in
146      the slab */
147   if(alloc_obj_size <= sizeof(struct sos_kslab))
148     the_cache->flags |= ON_SLAB;
149   
150   /*
151    * Compute the space left once the maximum number of objects
152    * have been allocated in the slab
153    */
154   space_left = the_cache->nb_pages_per_slab*SOS_PAGE_SIZE;
155   if(the_cache->flags & ON_SLAB)
156     space_left -= sizeof(struct sos_kslab);
157   the_cache->nb_objects_per_slab = space_left / alloc_obj_size;
158   space_left -= the_cache->nb_objects_per_slab*alloc_obj_size;
159 
160   /* Make sure a single slab is large enough to contain the minimum
161      number of objects requested */
162   if (the_cache->nb_objects_per_slab < min_free_objs)
163     return -SOS_EINVAL;
164 
165   /* If there is now enough place for both the objects and the slab
166      structure, then make the slab structure ON_SLAB */
167   if (space_left >= sizeof(struct sos_kslab))
168     the_cache->flags |= ON_SLAB;
169 
170   return SOS_OK;
171 }
172 
173 
174 /** Helper function to add a new slab for the given cache. */
175 static sos_ret_t
176 cache_add_slab(struct sos_kslab_cache *kslab_cache,
177                sos_vaddr_t vaddr_slab,
178                struct sos_kslab *slab)
179 {
180   unsigned int i;
181 
182   /* Setup the slab structure */
183   memset(slab, 0x0, sizeof(struct sos_kslab));
184   slab->cache = kslab_cache;
185 
186   /* Establish the address of the first free object */
187   slab->first_object = vaddr_slab;
188 
189   /* Account for this new slab in the cache */
190   slab->nb_free = kslab_cache->nb_objects_per_slab;
191   kslab_cache->nb_free_objects += slab->nb_free;
192 
193   /* Build the list of free objects */
194   for (i = 0 ; i <  kslab_cache->nb_objects_per_slab ; i++)
195     {
196       sos_vaddr_t obj_vaddr;
197 
198       /* Set object's address */
199       obj_vaddr = slab->first_object + i*kslab_cache->alloc_obj_size;
200 
201       /* Add it to the list of free objects */
202       list_add_tail(slab->free,
203                     (struct sos_kslab_free_object *)obj_vaddr);
204     }
205 
206   /* Add the slab to the cache's slab list: add the head of the list
207      since this slab is non full */
208   list_add_head(kslab_cache->slab_list, slab);
209 
210   return SOS_OK;
211 }
212 
213 
214 /** Helper function to allocate a new slab for the given kslab_cache */
215 static sos_ret_t
216 cache_grow(struct sos_kslab_cache *kslab_cache,
217            sos_ui32_t alloc_flags)
218 {
219   sos_ui32_t range_alloc_flags;
220 
221   struct sos_kmem_range *new_range;
222   sos_vaddr_t new_range_start;
223 
224   struct sos_kslab *new_slab;
225 
226   /*
227    * Setup the flags for the range allocation
228    */
229   range_alloc_flags = 0;
230 
231   /* Atomic ? */
232   if (alloc_flags & SOS_KSLAB_ALLOC_ATOMIC)
233     range_alloc_flags |= SOS_KMEM_VMM_ATOMIC;
234 
235   /* Need physical mapping NOW ? */
236   if (kslab_cache->flags & (SOS_KSLAB_CREATE_MAP
237                            | SOS_KSLAB_CREATE_ZERO))
238     range_alloc_flags |= SOS_KMEM_VMM_MAP;
239 
240   /* Allocate the range */
241   new_range = sos_kmem_vmm_new_range(kslab_cache->nb_pages_per_slab,
242                                      range_alloc_flags,
243                                      & new_range_start);
244   if (! new_range)
245     return -SOS_ENOMEM;
246 
247   /* Allocate the slab structure */
248   if (kslab_cache->flags & ON_SLAB)
249     {
250       /* Slab structure is ON the slab: simply set its address to the
251          end of the range */
252       sos_vaddr_t slab_vaddr
253         = new_range_start + kslab_cache->nb_pages_per_slab*SOS_PAGE_SIZE
254           - sizeof(struct sos_kslab);
255       new_slab = (struct sos_kslab*)slab_vaddr;
256     }
257   else
258     {
259       /* Slab structure is OFF the slab: allocate it from the cache of
260          slab structures */
261       sos_vaddr_t slab_vaddr
262         = sos_kmem_cache_alloc(cache_of_struct_kslab,
263                                alloc_flags);
264       if (! slab_vaddr)
265         {
266           sos_kmem_vmm_del_range(new_range);
267           return -SOS_ENOMEM;
268         }
269       new_slab = (struct sos_kslab*)slab_vaddr;
270     }
271 
272   cache_add_slab(kslab_cache, new_range_start, new_slab);
273   new_slab->range = new_range;
274 
275   /* Set the backlink from range to this slab */
276   sos_kmem_vmm_set_slab(new_range, new_slab);
277 
278   return SOS_OK;
279 }
280 
281 
282 /**
283  * Helper function to release a slab
284  *
285  * The corresponding range is always deleted, except when the @param
286  * must_del_range_now is not set. This happens only when the function
287  * gets called from sos_kmem_cache_release_struct_range(), to avoid
288  * large recursions.
289  */
290 static sos_ret_t
291 cache_release_slab(struct sos_kslab *slab,
292                    sos_bool_t must_del_range_now)
293 {
294   struct sos_kslab_cache *kslab_cache = slab->cache;
295   struct sos_kmem_range *range = slab->range;
296 
297   SOS_ASSERT_FATAL(kslab_cache != NULL);
298   SOS_ASSERT_FATAL(range != NULL);
299   SOS_ASSERT_FATAL(slab->nb_free == slab->cache->nb_objects_per_slab);
300 
301   /* First, remove the slab from the slabs' list of the cache */
302   list_delete(kslab_cache->slab_list, slab);
303   slab->cache->nb_free_objects -= slab->nb_free;
304 
305   /* Release the slab structure if it is OFF slab */
306   if (! (slab->cache->flags & ON_SLAB))
307     sos_kmem_cache_free((sos_vaddr_t)slab);
308 
309   /* Ok, the range is not bound to any slab anymore */
310   sos_kmem_vmm_set_slab(range, NULL);
311 
312   /* Always delete the range now, unless we are told not to do so (see
313      sos_kmem_cache_release_struct_range() below) */
314   if (must_del_range_now)
315     return sos_kmem_vmm_del_range(range);
316 
317   return SOS_OK;
318 }
319 
320 
321 /**
322  * Helper function to create the initial cache of caches, with a very
323  * first slab in it, so that new cache structures can be simply allocated.
324  * @return the cache structure for the cache of caches
325  */
326 static struct sos_kslab_cache *
327 create_cache_of_caches(sos_vaddr_t vaddr_first_slab_of_caches,
328                        int nb_pages)
329 {
330   /* The preliminary cache structure we need in order to allocate the
331      first slab in the cache of caches (allocated on the stack !) */
332   struct sos_kslab_cache fake_cache_of_caches;
333 
334   /* The real cache structure for the cache of caches */
335   struct sos_kslab_cache *real_cache_of_caches;
336 
337   /* The kslab structure for this very first slab */
338   struct sos_kslab       *slab_of_caches;
339 
340   /* Init the cache structure for the cache of caches */
341   if (cache_initialize(& fake_cache_of_caches,
342                        "Caches", sizeof(struct sos_kslab_cache),
343                        nb_pages, 0, SOS_KSLAB_CREATE_MAP | ON_SLAB))
344     /* Something wrong with the parameters */
345     return NULL;
346 
347   memset((void*)vaddr_first_slab_of_caches, 0x0, nb_pages*SOS_PAGE_SIZE);
348 
349   /* Add the pages for the 1st slab of caches */
350   slab_of_caches = (struct sos_kslab*)(vaddr_first_slab_of_caches
351                                        + nb_pages*SOS_PAGE_SIZE
352                                        - sizeof(struct sos_kslab));
353 
354   /* Add the abovementioned 1st slab to the cache of caches */
355   cache_add_slab(& fake_cache_of_caches,
356                  vaddr_first_slab_of_caches,
357                  slab_of_caches);
358 
359   /* Now we allocate a cache structure, which will be the real cache
360      of caches, ie a cache structure allocated INSIDE the cache of
361      caches, not inside the stack */
362   real_cache_of_caches
363     = (struct sos_kslab_cache*) sos_kmem_cache_alloc(& fake_cache_of_caches,
364                                                      0);
365   /* We initialize it */
366   memcpy(real_cache_of_caches, & fake_cache_of_caches,
367          sizeof(struct sos_kslab_cache));
368   /* We need to update the slab's 'cache' field */
369   slab_of_caches->cache = real_cache_of_caches;
370   
371   /* Add the cache to the list of slab caches */
372   list_add_tail(kslab_cache_list, real_cache_of_caches);
373 
374   return real_cache_of_caches;
375 }
376 
377 
378 /**
379  * Helper function to create the initial cache of ranges, with a very
380  * first slab in it, so that new kmem_range structures can be simply
381  * allocated.
382  * @return the cache of kmem_range
383  */
384 static struct sos_kslab_cache *
385 create_cache_of_ranges(sos_vaddr_t vaddr_first_slab_of_ranges,
386                        sos_size_t  sizeof_struct_range,
387                        int nb_pages)
388 {
389   /* The cache structure for the cache of kmem_range */
390   struct sos_kslab_cache *cache_of_ranges;
391 
392   /* The kslab structure for the very first slab of ranges */
393   struct sos_kslab *slab_of_ranges;
394 
395   cache_of_ranges = (struct sos_kslab_cache*)
396     sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
397                          0);
398   if (! cache_of_ranges)
399     return NULL;
400 
401   /* Init the cache structure for the cache of ranges with min objects
402      per slab = 2 !!! */
403   if (cache_initialize(cache_of_ranges,
404                        "struct kmem_range",
405                        sizeof_struct_range,
406                        nb_pages, 2, SOS_KSLAB_CREATE_MAP | ON_SLAB))
407     /* Something wrong with the parameters */
408     return NULL;
409 
410   /* Add the cache to the list of slab caches */
411   list_add_tail(kslab_cache_list, cache_of_ranges);
412 
413   /*
414    * Add the first slab for this cache
415    */
416   memset((void*)vaddr_first_slab_of_ranges, 0x0, nb_pages*SOS_PAGE_SIZE);
417 
418   /* Add the pages for the 1st slab of ranges */
419   slab_of_ranges = (struct sos_kslab*)(vaddr_first_slab_of_ranges
420                                        + nb_pages*SOS_PAGE_SIZE
421                                        - sizeof(struct sos_kslab));
422 
423   cache_add_slab(cache_of_ranges,
424                  vaddr_first_slab_of_ranges,
425                  slab_of_ranges);
426 
427   return cache_of_ranges;
428 }
429 
430 
431 struct sos_kslab_cache *
432 sos_kmem_cache_subsystem_setup_prepare(sos_vaddr_t kernel_core_base,
433                                        sos_vaddr_t kernel_core_top,
434                                        sos_size_t  sizeof_struct_range,
435                                        /* results */
436                                        struct sos_kslab **first_struct_slab_of_caches,
437                                        sos_vaddr_t *first_slab_of_caches_base,
438                                        sos_count_t *first_slab_of_caches_nb_pages,
439                                        struct sos_kslab **first_struct_slab_of_ranges,
440                                        sos_vaddr_t *first_slab_of_ranges_base,
441                                        sos_count_t *first_slab_of_ranges_nb_pages)
442 {
443   int i;
444   sos_ret_t   retval;
445   sos_vaddr_t vaddr;
446 
447   /* The cache of ranges we are about to allocate */
448   struct sos_kslab_cache *cache_of_ranges;
449 
450   /* In the begining, there isn't any cache */
451   kslab_cache_list = NULL;
452   cache_of_struct_kslab = NULL;
453   cache_of_struct_kslab_cache = NULL;
454 
455   /*
456    * Create the cache of caches, initialised with 1 allocated slab
457    */
458 
459   /* Allocate the pages needed for the 1st slab of caches, and map them
460      in kernel space, right after the kernel */
461   *first_slab_of_caches_base = SOS_PAGE_ALIGN_SUP(kernel_core_top);
462   for (i = 0, vaddr = *first_slab_of_caches_base ;
463        i < NB_PAGES_IN_SLAB_OF_CACHES ;
464        i++, vaddr += SOS_PAGE_SIZE)
465     {
466       sos_paddr_t ppage_paddr;
467 
468       ppage_paddr
469         = sos_physmem_ref_physpage_new(FALSE);
470       SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
471 
472       retval = sos_paging_map(ppage_paddr, vaddr,
473                               FALSE,
474                               SOS_VM_MAP_ATOMIC
475                               | SOS_VM_MAP_PROT_READ
476                               | SOS_VM_MAP_PROT_WRITE);
477       SOS_ASSERT_FATAL(retval == SOS_OK);
478 
479       retval = sos_physmem_unref_physpage(ppage_paddr);
480       SOS_ASSERT_FATAL(retval == FALSE);
481     }
482 
483   /* Create the cache of caches */
484   *first_slab_of_caches_nb_pages = NB_PAGES_IN_SLAB_OF_CACHES;
485   cache_of_struct_kslab_cache
486     = create_cache_of_caches(*first_slab_of_caches_base,
487                              NB_PAGES_IN_SLAB_OF_CACHES);
488   SOS_ASSERT_FATAL(cache_of_struct_kslab_cache != NULL);
489 
490   /* Retrieve the slab that should have been allocated */
491   *first_struct_slab_of_caches
492     = list_get_head(cache_of_struct_kslab_cache->slab_list);
493 
494   
495   /*
496    * Create the cache of ranges, initialised with 1 allocated slab
497    */
498   *first_slab_of_ranges_base = vaddr;
499   /* Allocate the 1st slab */
500   for (i = 0, vaddr = *first_slab_of_ranges_base ;
501        i < NB_PAGES_IN_SLAB_OF_RANGES ;
502        i++, vaddr += SOS_PAGE_SIZE)
503     {
504       sos_paddr_t ppage_paddr;
505 
506       ppage_paddr
507         = sos_physmem_ref_physpage_new(FALSE);
508       SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
509 
510       retval = sos_paging_map(ppage_paddr, vaddr,
511                               FALSE,
512                               SOS_VM_MAP_ATOMIC
513                               | SOS_VM_MAP_PROT_READ
514                               | SOS_VM_MAP_PROT_WRITE);
515       SOS_ASSERT_FATAL(retval == SOS_OK);
516 
517       retval = sos_physmem_unref_physpage(ppage_paddr);
518       SOS_ASSERT_FATAL(retval == FALSE);
519     }
520 
521   /* Create the cache of ranges */
522   *first_slab_of_ranges_nb_pages = NB_PAGES_IN_SLAB_OF_RANGES;
523   cache_of_ranges = create_cache_of_ranges(*first_slab_of_ranges_base,
524                                            sizeof_struct_range,
525                                            NB_PAGES_IN_SLAB_OF_RANGES);
526   SOS_ASSERT_FATAL(cache_of_ranges != NULL);
527 
528   /* Retrieve the slab that should have been allocated */
529   *first_struct_slab_of_ranges
530     = list_get_head(cache_of_ranges->slab_list);
531 
532   /*
533    * Create the cache of slabs, without any allocated slab yet
534    */
535   cache_of_struct_kslab
536     = sos_kmem_cache_create("off-slab slab structures",
537                             sizeof(struct sos_kslab),
538                             1,
539                             0,
540                             SOS_KSLAB_CREATE_MAP);
541   SOS_ASSERT_FATAL(cache_of_struct_kslab != NULL);
542 
543   return cache_of_ranges;
544 }
545 
546 
547 sos_ret_t
548 sos_kmem_cache_subsystem_setup_commit(struct sos_kslab *first_struct_slab_of_caches,
549                                       struct sos_kmem_range *first_range_of_caches,
550                                       struct sos_kslab *first_struct_slab_of_ranges,
551                                       struct sos_kmem_range *first_range_of_ranges)
552 {
553   first_struct_slab_of_caches->range = first_range_of_caches;
554   first_struct_slab_of_ranges->range = first_range_of_ranges;
555   return SOS_OK;
556 }
557 
558 
559 struct sos_kslab_cache *
560 sos_kmem_cache_create(const char* name,
561                       sos_size_t  obj_size,
562                       sos_count_t pages_per_slab,
563                       sos_count_t min_free_objs,
564                       sos_ui32_t  cache_flags)
565 {
566   struct sos_kslab_cache *new_cache;
567 
568   SOS_ASSERT_FATAL(obj_size > 0);
569 
570   /* Allocate the new cache */
571   new_cache = (struct sos_kslab_cache*)
572     sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
573                          0/* NOT ATOMIC */);
574   if (! new_cache)
575     return NULL;
576 
577   if (cache_initialize(new_cache, name, obj_size,
578                        pages_per_slab, min_free_objs,
579                        cache_flags))
580     {
581       /* Something was wrong */
582       sos_kmem_cache_free((sos_vaddr_t)new_cache);
583       return NULL;
584     }
585 
586   /* Add the cache to the list of slab caches */
587   list_add_tail(kslab_cache_list, new_cache);
588   
589   /* if the min_free_objs is set, pre-allocate a slab */
590   if (min_free_objs)
591     {
592       if (cache_grow(new_cache, 0 /* Not atomic */) != SOS_OK)
593         {
594           sos_kmem_cache_destroy(new_cache);
595           return NULL; /* Not enough memory */
596         }
597     }
598 
599   return new_cache;  
600 }
601 
602   
603 sos_ret_t sos_kmem_cache_destroy(struct sos_kslab_cache *kslab_cache)
604 {
605   int nb_slabs;
606   struct sos_kslab *slab;
607 
608   if (! kslab_cache)
609     return -SOS_EINVAL;
610 
611   /* Refuse to destroy the cache if there are any objects still
612      allocated */
613   list_foreach(kslab_cache->slab_list, slab, nb_slabs)
614     {
615       if (slab->nb_free != kslab_cache->nb_objects_per_slab)
616         return -SOS_EBUSY;
617     }
618 
619   /* Remove all the slabs */
620   while ((slab = list_get_head(kslab_cache->slab_list)) != NULL)
621     {
622       cache_release_slab(slab, TRUE);
623     }
624 
625   /* Remove the cache */
626   return sos_kmem_cache_free((sos_vaddr_t)kslab_cache);
627 }
628 
629 
630 sos_vaddr_t sos_kmem_cache_alloc(struct sos_kslab_cache *kslab_cache,
631                                  sos_ui32_t alloc_flags)
632 {
633   sos_vaddr_t obj_vaddr;
634   struct sos_kslab * slab_head;
635 #define ALLOC_RET return
636 
637   /* If the slab at the head of the slabs' list has no free object,
638      then the other slabs don't either => need to allocate a new
639      slab */
640   if ((! kslab_cache->slab_list)
641       || (! list_get_head(kslab_cache->slab_list)->free))
642     {
643       if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
644         /* Not enough memory or blocking alloc */
645         ALLOC_RET( (sos_vaddr_t)NULL);
646     }
647 
648   /* Here: we are sure that list_get_head(kslab_cache->slab_list)
649      exists *AND* that list_get_head(kslab_cache->slab_list)->free is
650      NOT NULL */
651   slab_head = list_get_head(kslab_cache->slab_list);
652   SOS_ASSERT_FATAL(slab_head != NULL);
653 
654   /* Allocate the object at the head of the slab at the head of the
655      slabs' list */
656   obj_vaddr = (sos_vaddr_t)list_pop_head(slab_head->free);
657   slab_head->nb_free --;
658   kslab_cache->nb_free_objects --;
659 
660   /* If needed, reset object's contents */
661   if (kslab_cache->flags & SOS_KSLAB_CREATE_ZERO)
662     memset((void*)obj_vaddr, 0x0, kslab_cache->alloc_obj_size);
663 
664   /* Slab is now full ? */
665   if (slab_head->free == NULL)
666     {
667       /* Transfer it at the tail of the slabs' list */
668       struct sos_kslab *slab;
669       slab = list_pop_head(kslab_cache->slab_list);
670       list_add_tail(kslab_cache->slab_list, slab);
671     }
672   
673   /*
674    * For caches that require a minimum amount of free objects left,
675    * allocate a slab if needed.
676    *
677    * Notice the "== min_objects - 1": we did not write " <
678    * min_objects" because for the cache of kmem structure, this would
679    * lead to an chicken-and-egg problem, since cache_grow below would
680    * call cache_alloc again for the kmem_vmm cache, so we return here
681    * with the same cache. If the test were " < min_objects", then we
682    * would call cache_grow again for the kmem_vmm cache again and
683    * again... until we reach the bottom of our stack (infinite
684    * recursion). By telling precisely "==", then the cache_grow would
685    * only be called the first time.
686    */
687   if ((kslab_cache->min_free_objects > 0)
688       && (kslab_cache->nb_free_objects == (kslab_cache->min_free_objects - 1)))
689     {
690       /* No: allocate a new slab now */
691       if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
692         {
693           /* Not enough free memory or blocking alloc => undo the
694              allocation */
695           sos_kmem_cache_free(obj_vaddr);
696           ALLOC_RET( (sos_vaddr_t)NULL);
697         }
698     }
699 
700   ALLOC_RET(obj_vaddr);
701 }
702 
703 
704 /**
705  * Helper function to free the object located at the given address.
706  *
707  * @param empty_slab is the address of the slab to release, if removing
708  * the object causes the slab to become empty.
709  */
710 inline static
711 sos_ret_t
712 free_object(sos_vaddr_t vaddr,
713             struct sos_kslab ** empty_slab)
714 {
715   struct sos_kslab_cache *kslab_cache;
716 
717   /* Lookup the slab containing the object in the slabs' list */
718   struct sos_kslab *slab = sos_kmem_vmm_resolve_slab(vaddr);
719 
720   /* By default, consider that the slab will not become empty */
721   *empty_slab = NULL;
722 
723   /* Did not find the slab */
724   if (! slab)
725     return -SOS_EINVAL;
726 
727   SOS_ASSERT_FATAL(slab->cache);
728   kslab_cache = slab->cache;
729 
730   /*
731    * Check whether the address really could mark the start of an actual
732    * allocated object
733    */
734   /* Address multiple of an object's size ? */
735   if (( (vaddr - slab->first_object)
736         % kslab_cache->alloc_obj_size) != 0)
737     return -SOS_EINVAL;
738   /* Address not too large ? */
739   if (( (vaddr - slab->first_object)
740         / kslab_cache->alloc_obj_size) >= kslab_cache->nb_objects_per_slab)
741     return -SOS_EINVAL;
742 
743   /*
744    * Ok: we now release the object
745    */
746 
747   /* Did find a full slab => will not be full any more => move it
748      to the head of the slabs' list */
749   if (! slab->free)
750     {
751       list_delete(kslab_cache->slab_list, slab);
752       list_add_head(kslab_cache->slab_list, slab);
753     }
754 
755   /* Release the object */
756   list_add_head(slab->free, (struct sos_kslab_free_object*)vaddr);
757   slab->nb_free++;
758   kslab_cache->nb_free_objects++;
759   SOS_ASSERT_FATAL(slab->nb_free <= slab->cache->nb_objects_per_slab);
760 
761   /* Cause the slab to be released if it becomes empty, and if we are
762      allowed to do it */
763   if ((slab->nb_free >= kslab_cache->nb_objects_per_slab)
764       && (kslab_cache->nb_free_objects - slab->nb_free
765           >= kslab_cache->min_free_objects))
766     {
767       *empty_slab = slab;
768     }
769 
770   return SOS_OK;
771 }
772 
773 
774 sos_ret_t sos_kmem_cache_free(sos_vaddr_t vaddr)
775 {
776   sos_ret_t retval;
777   struct sos_kslab *empty_slab;
778 
779   /* Remove the object from the slab */
780   retval = free_object(vaddr, & empty_slab);
781   if (retval != SOS_OK)
782     return retval;
783 
784   /* Remove the slab and the underlying range if needed */
785   if (empty_slab != NULL)
786     return cache_release_slab(empty_slab, TRUE);
787 
788   return SOS_OK;
789 }
790 
791 
792 struct sos_kmem_range *
793 sos_kmem_cache_release_struct_range(struct sos_kmem_range *the_range)
794 {
795   sos_ret_t retval;
796   struct sos_kslab *empty_slab;
797 
798   /* Remove the object from the slab */
799   retval = free_object((sos_vaddr_t)the_range, & empty_slab);
800   if (retval != SOS_OK)
801     return NULL;
802 
803   /* Remove the slab BUT NOT the underlying range if needed */
804   if (empty_slab != NULL)
805     {
806       struct sos_kmem_range *empty_range = empty_slab->range;
807       SOS_ASSERT_FATAL(cache_release_slab(empty_slab, FALSE) == SOS_OK);
808       SOS_ASSERT_FATAL(empty_range != NULL);
809       return empty_range;
810     }
811 
812   return NULL;
813 }
814 

source navigation ] diff markup ] identifier search ] general search ]