SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2000 Thomas Petazzoni
002    Copyright (C) 2004 David Decotigny
003 
004    This program is free software; you can redistribute it and/or
005    modify it under the terms of the GNU General Public License
006    as published by the Free Software Foundation; either version 2
007    of the License, or (at your option) any later version.
008    
009    This program is distributed in the hope that it will be useful,
010    but WITHOUT ANY WARRANTY; without even the implied warranty of
011    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
012    GNU General Public License for more details.
013    
014    You should have received a copy of the GNU General Public License
015    along with this program; if not, write to the Free Software
016    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
017    USA. 
018 */
019 #include <sos/macros.h>
020 #include <sos/klibc.h>
021 #include <sos/list.h>
022 #include <sos/assert.h>
023 #include <hwcore/paging.h>
024 #include <sos/physmem.h>
025 #include <sos/kmem_vmm.h>
026 
027 #include "kmem_slab.h"
028 
029 /* Dimensioning constants */
030 #define NB_PAGES_IN_SLAB_OF_CACHES 1
031 #define NB_PAGES_IN_SLAB_OF_RANGES 1
032 
033 /** The structure of a slab cache */
034 struct sos_kslab_cache
035 {
036   char *name;
037 
038   /* non mutable characteristics of this slab */
039   sos_size_t  original_obj_size; /* asked object size */
040   sos_size_t  alloc_obj_size;    /* actual object size, taking the
041                                     alignment constraints into account */
042   sos_count_t nb_objects_per_slab;
043   sos_count_t nb_pages_per_slab;
044   sos_count_t min_free_objects;
045 
046 /* slab cache flags */
047 // #define SOS_KSLAB_CREATE_MAP  (1<<0) /* See kmem_slab.h */
048 // #define SOS_KSLAB_CREATE_ZERO (1<<1) /* " " " " " " " " */
049 #define ON_SLAB (1<<31) /* struct sos_kslab is included inside the slab */
050   sos_ui32_t  flags;
051 
052   /* Supervision data (updated at run-time) */
053   sos_count_t nb_free_objects;
054 
055   /* The lists of slabs owned by this cache */
056   struct sos_kslab *slab_list; /* head = non full, tail = full */
057 
058   /* The caches are linked together on the kslab_cache_list */
059   struct sos_kslab_cache *prev, *next;
060 };
061 
062 
063 /** The structure of a slab */
064 struct sos_kslab
065 {
066   /** Number of free objects on this slab */
067   sos_count_t nb_free;
068 
069   /** The list of these free objects */
070   struct sos_kslab_free_object *free;
071 
072   /** The address of the associated range structure */
073   struct sos_kmem_range *range;
074 
075   /** Virtual start address of this range */
076   sos_vaddr_t first_object;
077   
078   /** Slab cache owning this slab */
079   struct sos_kslab_cache *cache;
080 
081   /** Links to the other slabs managed by the same cache */
082   struct sos_kslab *prev, *next;
083 };
084 
085 
086 /** The structure of the free objects in the slab */
087 struct sos_kslab_free_object
088 {
089   struct sos_kslab_free_object *prev, *next;
090 };
091 
092 /** The cache of slab caches */
093 static struct sos_kslab_cache *cache_of_struct_kslab_cache;
094 
095 /** The cache of slab structures for non-ON_SLAB caches */
096 static struct sos_kslab_cache *cache_of_struct_kslab;
097 
098 /** The list of slab caches */
099 static struct sos_kslab_cache *kslab_cache_list;
100 
101 /* Helper function to initialize a cache structure */
102 static sos_ret_t
103 cache_initialize(/*out*/struct sos_kslab_cache *the_cache,
104                  const char* name,
105                  sos_size_t  obj_size,
106                  sos_count_t pages_per_slab,
107                  sos_count_t min_free_objs,
108                  sos_ui32_t  cache_flags)
109 {
110   unsigned int space_left;
111   sos_size_t alloc_obj_size;
112 
113   if (obj_size <= 0)
114     return -SOS_EINVAL;
115 
116   /* Default allocation size is the requested one */
117   alloc_obj_size = obj_size;
118 
119   /* Make sure the requested size is large enough to store a
120      free_object structure */
121   if (alloc_obj_size < sizeof(struct sos_kslab_free_object))
122     alloc_obj_size = sizeof(struct sos_kslab_free_object);
123   
124   /* Align obj_size on 4 bytes */
125   alloc_obj_size = SOS_ALIGN_SUP(alloc_obj_size, sizeof(int));
126 
127   /* Make sure supplied number of pages per slab is consistent with
128      actual allocated object size */
129   if (alloc_obj_size > pages_per_slab*SOS_PAGE_SIZE)
130     return -SOS_EINVAL;
131   
132   /* Refuse too large slabs */
133   if (pages_per_slab > MAX_PAGES_PER_SLAB)
134     return -SOS_ENOMEM;
135 
136   /* Fills in the cache structure */
137   memset(the_cache, 0x0, sizeof(struct sos_kslab_cache));
138   the_cache->name              = (char*)name;
139   the_cache->flags             = cache_flags;
140   the_cache->original_obj_size = obj_size;
141   the_cache->alloc_obj_size    = alloc_obj_size;
142   the_cache->min_free_objects  = min_free_objs;
143   the_cache->nb_pages_per_slab = pages_per_slab;
144   
145   /* Small size objets => the slab structure is allocated directly in
146      the slab */
147   if(alloc_obj_size <= sizeof(struct sos_kslab))
148     the_cache->flags |= ON_SLAB;
149   
150   /*
151    * Compute the space left once the maximum number of objects
152    * have been allocated in the slab
153    */
154   space_left = the_cache->nb_pages_per_slab*SOS_PAGE_SIZE;
155   if(the_cache->flags & ON_SLAB)
156     space_left -= sizeof(struct sos_kslab);
157   the_cache->nb_objects_per_slab = space_left / alloc_obj_size;
158   space_left -= the_cache->nb_objects_per_slab*alloc_obj_size;
159 
160   /* Make sure a single slab is large enough to contain the minimum
161      number of objects requested */
162   if (the_cache->nb_objects_per_slab < min_free_objs)
163     return -SOS_EINVAL;
164 
165   /* If there is now enough place for both the objects and the slab
166      structure, then make the slab structure ON_SLAB */
167   if (space_left >= sizeof(struct sos_kslab))
168     the_cache->flags |= ON_SLAB;
169 
170   return SOS_OK;
171 }
172 
173 
174 /** Helper function to add a new slab for the given cache. */
175 static sos_ret_t
176 cache_add_slab(struct sos_kslab_cache *kslab_cache,
177                sos_vaddr_t vaddr_slab,
178                struct sos_kslab *slab)
179 {
180   int i;
181 
182   /* Setup the slab structure */
183   memset(slab, 0x0, sizeof(struct sos_kslab));
184   slab->cache = kslab_cache;
185 
186   /* Establish the address of the first free object */
187   slab->first_object = vaddr_slab;
188 
189   /* Account for this new slab in the cache */
190   slab->nb_free = kslab_cache->nb_objects_per_slab;
191   kslab_cache->nb_free_objects += slab->nb_free;
192 
193   /* Build the list of free objects */
194   for (i = 0 ; i <  kslab_cache->nb_objects_per_slab ; i++)
195     {
196       sos_vaddr_t obj_vaddr;
197 
198       /* Set object's address */
199       obj_vaddr = slab->first_object + i*kslab_cache->alloc_obj_size;
200 
201       /* Add it to the list of free objects */
202       list_add_tail(slab->free,
203                     (struct sos_kslab_free_object *)obj_vaddr);
204     }
205 
206   /* Add the slab to the cache's slab list: add the head of the list
207      since this slab is non full */
208   list_add_head(kslab_cache->slab_list, slab);
209 
210   return SOS_OK;
211 }
212 
213 
214 /** Helper function to allocate a new slab for the given kslab_cache */
215 static sos_ret_t
216 cache_grow(struct sos_kslab_cache *kslab_cache,
217            sos_ui32_t alloc_flags)
218 {
219   sos_ui32_t range_alloc_flags;
220 
221   struct sos_kmem_range *new_range;
222   sos_vaddr_t new_range_start;
223 
224   struct sos_kslab *new_slab;
225 
226   /*
227    * Setup the flags for the range allocation
228    */
229   range_alloc_flags = 0;
230 
231   /* Atomic ? */
232   if (alloc_flags & SOS_KSLAB_ALLOC_ATOMIC)
233     range_alloc_flags |= SOS_KMEM_VMM_ATOMIC;
234 
235   /* Need physical mapping NOW ? */
236   if (kslab_cache->flags & (SOS_KSLAB_CREATE_MAP
237                            | SOS_KSLAB_CREATE_ZERO))
238     range_alloc_flags |= SOS_KMEM_VMM_MAP;
239 
240   /* Allocate the range */
241   new_range = sos_kmem_vmm_new_range(kslab_cache->nb_pages_per_slab,
242                                      range_alloc_flags,
243                                      & new_range_start);
244   if (! new_range)
245     return -SOS_ENOMEM;
246 
247   /* Allocate the slab structure */
248   if (kslab_cache->flags & ON_SLAB)
249     {
250       /* Slab structure is ON the slab: simply set its address to the
251          end of the range */
252       sos_vaddr_t slab_vaddr
253         = new_range_start + kslab_cache->nb_pages_per_slab*SOS_PAGE_SIZE
254           - sizeof(struct sos_kslab);
255       new_slab = (struct sos_kslab*)slab_vaddr;
256     }
257   else
258     {
259       /* Slab structure is OFF the slab: allocate it from the cache of
260          slab structures */
261       sos_vaddr_t slab_vaddr
262         = sos_kmem_cache_alloc(cache_of_struct_kslab,
263                                alloc_flags);
264       if (! slab_vaddr)
265         {
266           sos_kmem_vmm_del_range(new_range);
267           return -SOS_ENOMEM;
268         }
269       new_slab = (struct sos_kslab*)slab_vaddr;
270     }
271 
272   cache_add_slab(kslab_cache, new_range_start, new_slab);
273   new_slab->range = new_range;
274 
275   /* Set the backlink from range to this slab */
276   sos_kmem_vmm_set_slab(new_range, new_slab);
277 
278   return SOS_OK;
279 }
280 
281 
282 /**
283  * Helper function to release a slab
284  *
285  * The corresponding range is always deleted, except when the @param
286  * must_del_range_now is not set. This happens only when the function
287  * gets called from sos_kmem_cache_release_struct_range(), to avoid
288  * large recursions.
289  */
290 static sos_ret_t
291 cache_release_slab(struct sos_kslab *slab,
292                    sos_bool_t must_del_range_now)
293 {
294   struct sos_kslab_cache *kslab_cache = slab->cache;
295   struct sos_kmem_range *range = slab->range;
296 
297   SOS_ASSERT_FATAL(kslab_cache != NULL);
298   SOS_ASSERT_FATAL(range != NULL);
299   SOS_ASSERT_FATAL(slab->nb_free == slab->cache->nb_objects_per_slab);
300 
301   /* First, remove the slab from the slabs' list of the cache */
302   list_delete(kslab_cache->slab_list, slab);
303   slab->cache->nb_free_objects -= slab->nb_free;
304 
305   /* Release the slab structure if it is OFF slab */
306   if (! (slab->cache->flags & ON_SLAB))
307     sos_kmem_cache_free((sos_vaddr_t)slab);
308 
309   /* Ok, the range is not bound to any slab anymore */
310   sos_kmem_vmm_set_slab(range, NULL);
311 
312   /* Always delete the range now, unless we are told not to do so (see
313      sos_kmem_cache_release_struct_range() below) */
314   if (must_del_range_now)
315     return sos_kmem_vmm_del_range(range);
316 
317   return SOS_OK;
318 }
319 
320 
321 /**
322  * Helper function to create the initial cache of caches, with a very
323  * first slab in it, so that new cache structures can be simply allocated.
324  * @return the cache structure for the cache of caches
325  */
326 static struct sos_kslab_cache *
327 create_cache_of_caches(sos_vaddr_t vaddr_first_slab_of_caches,
328                        int nb_pages)
329 {
330   /* The preliminary cache structure we need in order to allocate the
331      first slab in the cache of caches (allocated on the stack !) */
332   struct sos_kslab_cache fake_cache_of_caches;
333 
334   /* The real cache structure for the cache of caches */
335   struct sos_kslab_cache *real_cache_of_caches;
336 
337   /* The kslab structure for this very first slab */
338   struct sos_kslab       *slab_of_caches;
339 
340   /* Init the cache structure for the cache of caches */
341   if (cache_initialize(& fake_cache_of_caches,
342                        "Caches", sizeof(struct sos_kslab_cache),
343                        nb_pages, 0, SOS_KSLAB_CREATE_MAP | ON_SLAB))
344     /* Something wrong with the parameters */
345     return NULL;
346 
347   memset((void*)vaddr_first_slab_of_caches, 0x0, nb_pages*SOS_PAGE_SIZE);
348 
349   /* Add the pages for the 1st slab of caches */
350   slab_of_caches = (struct sos_kslab*)(vaddr_first_slab_of_caches
351                                        + nb_pages*SOS_PAGE_SIZE
352                                        - sizeof(struct sos_kslab));
353 
354   /* Add the abovementioned 1st slab to the cache of caches */
355   cache_add_slab(& fake_cache_of_caches,
356                  vaddr_first_slab_of_caches,
357                  slab_of_caches);
358 
359   /* Now we allocate a cache structure, which will be the real cache
360      of caches, ie a cache structure allocated INSIDE the cache of
361      caches, not inside the stack */
362   real_cache_of_caches
363     = (struct sos_kslab_cache*) sos_kmem_cache_alloc(& fake_cache_of_caches,
364                                                      0);
365   /* We initialize it */
366   memcpy(real_cache_of_caches, & fake_cache_of_caches,
367          sizeof(struct sos_kslab_cache));
368   /* We need to update the slab's 'cache' field */
369   slab_of_caches->cache = real_cache_of_caches;
370   
371   /* Add the cache to the list of slab caches */
372   list_add_tail(kslab_cache_list, real_cache_of_caches);
373 
374   return real_cache_of_caches;
375 }
376 
377 
378 /**
379  * Helper function to create the initial cache of ranges, with a very
380  * first slab in it, so that new kmem_range structures can be simply
381  * allocated.
382  * @return the cache of kmem_range
383  */
384 static struct sos_kslab_cache *
385 create_cache_of_ranges(sos_vaddr_t vaddr_first_slab_of_ranges,
386                        sos_size_t  sizeof_struct_range,
387                        int nb_pages)
388 {
389   /* The cache structure for the cache of kmem_range */
390   struct sos_kslab_cache *cache_of_ranges;
391 
392   /* The kslab structure for the very first slab of ranges */
393   struct sos_kslab *slab_of_ranges;
394 
395   cache_of_ranges = (struct sos_kslab_cache*)
396     sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
397                          0);
398   if (! cache_of_ranges)
399     return NULL;
400 
401   /* Init the cache structure for the cache of ranges with min objects
402      per slab = 2 !!! */
403   if (cache_initialize(cache_of_ranges,
404                        "struct kmem_range",
405                        sizeof_struct_range,
406                        nb_pages, 2, SOS_KSLAB_CREATE_MAP | ON_SLAB))
407     /* Something wrong with the parameters */
408     return NULL;
409 
410   /* Add the cache to the list of slab caches */
411   list_add_tail(kslab_cache_list, cache_of_ranges);
412 
413   /*
414    * Add the first slab for this cache
415    */
416   memset((void*)vaddr_first_slab_of_ranges, 0x0, nb_pages*SOS_PAGE_SIZE);
417 
418   /* Add the pages for the 1st slab of ranges */
419   slab_of_ranges = (struct sos_kslab*)(vaddr_first_slab_of_ranges
420                                        + nb_pages*SOS_PAGE_SIZE
421                                        - sizeof(struct sos_kslab));
422 
423   cache_add_slab(cache_of_ranges,
424                  vaddr_first_slab_of_ranges,
425                  slab_of_ranges);
426 
427   return cache_of_ranges;
428 }
429 
430 
431 struct sos_kslab_cache *
432 sos_kmem_cache_subsystem_setup_prepare(sos_vaddr_t kernel_core_base,
433                                        sos_vaddr_t kernel_core_top,
434                                        sos_size_t  sizeof_struct_range,
435                                        /* results */
436                                        struct sos_kslab **first_struct_slab_of_caches,
437                                        sos_vaddr_t *first_slab_of_caches_base,
438                                        sos_count_t *first_slab_of_caches_nb_pages,
439                                        struct sos_kslab **first_struct_slab_of_ranges,
440                                        sos_vaddr_t *first_slab_of_ranges_base,
441                                        sos_count_t *first_slab_of_ranges_nb_pages)
442 {
443   int i;
444   sos_ret_t   retval;
445   sos_vaddr_t vaddr;
446 
447   /* The cache of ranges we are about to allocate */
448   struct sos_kslab_cache *cache_of_ranges;
449 
450   /* In the begining, there isn't any cache */
451   kslab_cache_list = NULL;
452   cache_of_struct_kslab = NULL;
453   cache_of_struct_kslab_cache = NULL;
454 
455   /*
456    * Create the cache of caches, initialised with 1 allocated slab
457    */
458 
459   /* Allocate the pages needed for the 1st slab of caches, and map them
460      in kernel space, right after the kernel */
461   *first_slab_of_caches_base = SOS_PAGE_ALIGN_SUP(kernel_core_top);
462   for (i = 0, vaddr = *first_slab_of_caches_base ;
463        i < NB_PAGES_IN_SLAB_OF_CACHES ;
464        i++, vaddr += SOS_PAGE_SIZE)
465     {
466       sos_paddr_t ppage_paddr;
467 
468       ppage_paddr
469         = sos_physmem_ref_physpage_new(FALSE);
470       SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
471 
472       retval = sos_paging_map(ppage_paddr, vaddr,
473                               FALSE,
474                               SOS_VM_MAP_ATOMIC
475                               | SOS_VM_MAP_PROT_READ
476                               | SOS_VM_MAP_PROT_WRITE);
477       SOS_ASSERT_FATAL(retval == SOS_OK);
478 
479       retval = sos_physmem_unref_physpage(ppage_paddr);
480       SOS_ASSERT_FATAL(retval == FALSE);
481     }
482 
483   /* Create the cache of caches */
484   *first_slab_of_caches_nb_pages = NB_PAGES_IN_SLAB_OF_CACHES;
485   cache_of_struct_kslab_cache
486     = create_cache_of_caches(*first_slab_of_caches_base,
487                              NB_PAGES_IN_SLAB_OF_CACHES);
488   SOS_ASSERT_FATAL(cache_of_struct_kslab_cache != NULL);
489 
490   /* Retrieve the slab that should have been allocated */
491   *first_struct_slab_of_caches
492     = list_get_head(cache_of_struct_kslab_cache->slab_list);
493 
494   
495   /*
496    * Create the cache of ranges, initialised with 1 allocated slab
497    */
498   *first_slab_of_ranges_base = vaddr;
499   /* Allocate the 1st slab */
500   for (i = 0, vaddr = *first_slab_of_ranges_base ;
501        i < NB_PAGES_IN_SLAB_OF_RANGES ;
502        i++, vaddr += SOS_PAGE_SIZE)
503     {
504       sos_paddr_t ppage_paddr;
505 
506       ppage_paddr
507         = sos_physmem_ref_physpage_new(FALSE);
508       SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
509 
510       retval = sos_paging_map(ppage_paddr, vaddr,
511                               FALSE,
512                               SOS_VM_MAP_ATOMIC
513                               | SOS_VM_MAP_PROT_READ
514                               | SOS_VM_MAP_PROT_WRITE);
515       SOS_ASSERT_FATAL(retval == SOS_OK);
516 
517       retval = sos_physmem_unref_physpage(ppage_paddr);
518       SOS_ASSERT_FATAL(retval == FALSE);
519     }
520 
521   /* Create the cache of ranges */
522   *first_slab_of_ranges_nb_pages = NB_PAGES_IN_SLAB_OF_RANGES;
523   cache_of_ranges = create_cache_of_ranges(*first_slab_of_ranges_base,
524                                            sizeof_struct_range,
525                                            NB_PAGES_IN_SLAB_OF_RANGES);
526   SOS_ASSERT_FATAL(cache_of_ranges != NULL);
527 
528   /* Retrieve the slab that should have been allocated */
529   *first_struct_slab_of_ranges
530     = list_get_head(cache_of_ranges->slab_list);
531 
532   /*
533    * Create the cache of slabs, without any allocated slab yet
534    */
535   cache_of_struct_kslab
536     = sos_kmem_cache_create("off-slab slab structures",
537                             sizeof(struct sos_kslab),
538                             1,
539                             0,
540                             SOS_KSLAB_CREATE_MAP);
541   SOS_ASSERT_FATAL(cache_of_struct_kslab != NULL);
542 
543   return cache_of_ranges;
544 }
545 
546 
547 sos_ret_t
548 sos_kmem_cache_subsystem_setup_commit(struct sos_kslab *first_struct_slab_of_caches,
549                                       struct sos_kmem_range *first_range_of_caches,
550                                       struct sos_kslab *first_struct_slab_of_ranges,
551                                       struct sos_kmem_range *first_range_of_ranges)
552 {
553   first_struct_slab_of_caches->range = first_range_of_caches;
554   first_struct_slab_of_ranges->range = first_range_of_ranges;
555   return SOS_OK;
556 }
557 
558 
559 struct sos_kslab_cache *
560 sos_kmem_cache_create(const char* name,
561                       sos_size_t  obj_size,
562                       sos_count_t pages_per_slab,
563                       sos_count_t min_free_objs,
564                       sos_ui32_t  cache_flags)
565 {
566   struct sos_kslab_cache *new_cache;
567 
568   /* Allocate the new cache */
569   new_cache = (struct sos_kslab_cache*)
570     sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
571                          0/* NOT ATOMIC */);
572   if (! new_cache)
573     return NULL;
574 
575   if (cache_initialize(new_cache, name, obj_size,
576                        pages_per_slab, min_free_objs,
577                        cache_flags))
578     {
579       /* Something was wrong */
580       sos_kmem_cache_free((sos_vaddr_t)new_cache);
581       return NULL;
582     }
583 
584   /* Add the cache to the list of slab caches */
585   list_add_tail(kslab_cache_list, new_cache);
586   
587   /* if the min_free_objs is set, pre-allocate a slab */
588   if (min_free_objs)
589     {
590       if (cache_grow(new_cache, 0 /* Not atomic */) != SOS_OK)
591         {
592           sos_kmem_cache_destroy(new_cache);
593           return NULL; /* Not enough memory */
594         }
595     }
596 
597   return new_cache;  
598 }
599 
600   
601 sos_ret_t sos_kmem_cache_destroy(struct sos_kslab_cache *kslab_cache)
602 {
603   int nb_slabs;
604   struct sos_kslab *slab;
605 
606   if (! kslab_cache)
607     return -SOS_EINVAL;
608 
609   /* Refuse to destroy the cache if there are any objects still
610      allocated */
611   list_foreach(kslab_cache->slab_list, slab, nb_slabs)
612     {
613       if (slab->nb_free != kslab_cache->nb_objects_per_slab)
614         return -SOS_EBUSY;
615     }
616 
617   /* Remove all the slabs */
618   while ((slab = list_get_head(kslab_cache->slab_list)) != NULL)
619     {
620       cache_release_slab(slab, TRUE);
621     }
622 
623   /* Remove the cache */
624   return sos_kmem_cache_free((sos_vaddr_t)kslab_cache);
625 }
626 
627 
628 sos_vaddr_t sos_kmem_cache_alloc(struct sos_kslab_cache *kslab_cache,
629                                  sos_ui32_t alloc_flags)
630 {
631   sos_vaddr_t obj_vaddr;
632   struct sos_kslab * slab_head;
633 #define ALLOC_RET return
634 
635   /* If the slab at the head of the slabs' list has no free object,
636      then the other slabs don't either => need to allocate a new
637      slab */
638   if ((! kslab_cache->slab_list)
639       || (! list_get_head(kslab_cache->slab_list)->free))
640     {
641       if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
642         /* Not enough memory or blocking alloc */
643         ALLOC_RET( (sos_vaddr_t)NULL);
644     }
645 
646   /* Here: we are sure that list_get_head(kslab_cache->slab_list)
647      exists *AND* that list_get_head(kslab_cache->slab_list)->free is
648      NOT NULL */
649   slab_head = list_get_head(kslab_cache->slab_list);
650   SOS_ASSERT_FATAL(slab_head != NULL);
651 
652   /* Allocate the object at the head of the slab at the head of the
653      slabs' list */
654   obj_vaddr = (sos_vaddr_t)list_pop_head(slab_head->free);
655   slab_head->nb_free --;
656   kslab_cache->nb_free_objects --;
657 
658   /* If needed, reset object's contents */
659   if (kslab_cache->flags & SOS_KSLAB_CREATE_ZERO)
660     memset((void*)obj_vaddr, 0x0, kslab_cache->alloc_obj_size);
661 
662   /* Slab is now full ? */
663   if (slab_head->free == NULL)
664     {
665       /* Transfer it at the tail of the slabs' list */
666       struct sos_kslab *slab;
667       slab = list_pop_head(kslab_cache->slab_list);
668       list_add_tail(kslab_cache->slab_list, slab);
669     }
670   
671   /*
672    * For caches that require a minimum amount of free objects left,
673    * allocate a slab if needed.
674    *
675    * Notice the "== min_objects - 1": we did not write " <
676    * min_objects" because for the cache of kmem structure, this would
677    * lead to an chicken-and-egg problem, since cache_grow below would
678    * call cache_alloc again for the kmem_vmm cache, so we return here
679    * with the same cache. If the test were " < min_objects", then we
680    * would call cache_grow again for the kmem_vmm cache again and
681    * again... until we reach the bottom of our stack (infinite
682    * recursion). By telling precisely "==", then the cache_grow would
683    * only be called the first time.
684    */
685   if ((kslab_cache->min_free_objects > 0)
686       && (kslab_cache->nb_free_objects == (kslab_cache->min_free_objects - 1)))
687     {
688       /* No: allocate a new slab now */
689       if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
690         {
691           /* Not enough free memory or blocking alloc => undo the
692              allocation */
693           sos_kmem_cache_free(obj_vaddr);
694           ALLOC_RET( (sos_vaddr_t)NULL);
695         }
696     }
697 
698   ALLOC_RET(obj_vaddr);
699 }
700 
701 
702 /**
703  * Helper function to free the object located at the given address.
704  *
705  * @param empty_slab is the address of the slab to release, if removing
706  * the object causes the slab to become empty.
707  */
708 inline static
709 sos_ret_t
710 free_object(sos_vaddr_t vaddr,
711             struct sos_kslab ** empty_slab)
712 {
713   struct sos_kslab_cache *kslab_cache;
714 
715   /* Lookup the slab containing the object in the slabs' list */
716   struct sos_kslab *slab = sos_kmem_vmm_resolve_slab(vaddr);
717 
718   /* By default, consider that the slab will not become empty */
719   *empty_slab = NULL;
720 
721   /* Did not find the slab */
722   if (! slab)
723     return -SOS_EINVAL;
724 
725   SOS_ASSERT_FATAL(slab->cache);
726   kslab_cache = slab->cache;
727 
728   /*
729    * Check whether the address really could mark the start of an actual
730    * allocated object
731    */
732   /* Address multiple of an object's size ? */
733   if (( (vaddr - slab->first_object)
734         % kslab_cache->alloc_obj_size) != 0)
735     return -SOS_EINVAL;
736   /* Address not too large ? */
737   if (( (vaddr - slab->first_object)
738         / kslab_cache->alloc_obj_size) >= kslab_cache->nb_objects_per_slab)
739     return -SOS_EINVAL;
740 
741   /*
742    * Ok: we now release the object
743    */
744 
745   /* Did find a full slab => will not be full any more => move it
746      to the head of the slabs' list */
747   if (! slab->free)
748     {
749       list_delete(kslab_cache->slab_list, slab);
750       list_add_head(kslab_cache->slab_list, slab);
751     }
752 
753   /* Release the object */
754   list_add_head(slab->free, (struct sos_kslab_free_object*)vaddr);
755   slab->nb_free++;
756   kslab_cache->nb_free_objects++;
757   SOS_ASSERT_FATAL(slab->nb_free <= slab->cache->nb_objects_per_slab);
758 
759   /* Cause the slab to be released if it becomes empty, and if we are
760      allowed to do it */
761   if ((slab->nb_free >= kslab_cache->nb_objects_per_slab)
762       && (kslab_cache->nb_free_objects - slab->nb_free
763           >= kslab_cache->min_free_objects))
764     {
765       *empty_slab = slab;
766     }
767 
768   return SOS_OK;
769 }
770 
771 
772 sos_ret_t sos_kmem_cache_free(sos_vaddr_t vaddr)
773 {
774   sos_ret_t retval;
775   struct sos_kslab *empty_slab;
776 
777   /* Remove the object from the slab */
778   retval = free_object(vaddr, & empty_slab);
779   if (retval != SOS_OK)
780     return retval;
781 
782   /* Remove the slab and the underlying range if needed */
783   if (empty_slab != NULL)
784     return cache_release_slab(empty_slab, TRUE);
785 
786   return SOS_OK;
787 }
788 
789 
790 struct sos_kmem_range *
791 sos_kmem_cache_release_struct_range(struct sos_kmem_range *the_range)
792 {
793   sos_ret_t retval;
794   struct sos_kslab *empty_slab;
795 
796   /* Remove the object from the slab */
797   retval = free_object((sos_vaddr_t)the_range, & empty_slab);
798   if (retval != SOS_OK)
799     return NULL;
800 
801   /* Remove the slab BUT NOT the underlying range if needed */
802   if (empty_slab != NULL)
803     {
804       struct sos_kmem_range *empty_range = empty_slab->range;
805       SOS_ASSERT_FATAL(cache_release_slab(empty_slab, FALSE) == SOS_OK);
806       SOS_ASSERT_FATAL(empty_range != NULL);
807       return empty_range;
808     }
809 
810   return NULL;
811 }
812 

source navigation ] diff markup ] identifier search ] general search ]