SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

Diff markup

Differences between /sos/kmem_slab.c (Article 9.5) and /sos/kmem_slab.c (Article 7.5)


001 /* Copyright (C) 2000 Thomas Petazzoni            001 /* Copyright (C) 2000 Thomas Petazzoni
002    Copyright (C) 2004 David Decotigny             002    Copyright (C) 2004 David Decotigny
003                                                   003 
004    This program is free software; you can redi    004    This program is free software; you can redistribute it and/or
005    modify it under the terms of the GNU Genera    005    modify it under the terms of the GNU General Public License
006    as published by the Free Software Foundatio    006    as published by the Free Software Foundation; either version 2
007    of the License, or (at your option) any lat    007    of the License, or (at your option) any later version.
008                                                   008    
009    This program is distributed in the hope tha    009    This program is distributed in the hope that it will be useful,
010    but WITHOUT ANY WARRANTY; without even the     010    but WITHOUT ANY WARRANTY; without even the implied warranty of
011    MERCHANTABILITY or FITNESS FOR A PARTICULAR    011    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
012    GNU General Public License for more details    012    GNU General Public License for more details.
013                                                   013    
014    You should have received a copy of the GNU     014    You should have received a copy of the GNU General Public License
015    along with this program; if not, write to t    015    along with this program; if not, write to the Free Software
016    Foundation, Inc., 59 Temple Place - Suite 3    016    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
017    USA.                                           017    USA. 
018 */                                                018 */
019 #include <sos/macros.h>                           019 #include <sos/macros.h>
020 #include <sos/klibc.h>                            020 #include <sos/klibc.h>
021 #include <sos/list.h>                             021 #include <sos/list.h>
022 #include <sos/assert.h>                           022 #include <sos/assert.h>
023 #include <hwcore/paging.h>                        023 #include <hwcore/paging.h>
024 #include <sos/physmem.h>                          024 #include <sos/physmem.h>
025 #include <sos/kmem_vmm.h>                         025 #include <sos/kmem_vmm.h>
026                                                   026 
027 #include "kmem_slab.h"                            027 #include "kmem_slab.h"
028                                                   028 
029 /* Dimensioning constants */                      029 /* Dimensioning constants */
030 #define NB_PAGES_IN_SLAB_OF_CACHES 1              030 #define NB_PAGES_IN_SLAB_OF_CACHES 1
031 #define NB_PAGES_IN_SLAB_OF_RANGES 1              031 #define NB_PAGES_IN_SLAB_OF_RANGES 1
032                                                   032 
033 /** The structure of a slab cache */              033 /** The structure of a slab cache */
034 struct sos_kslab_cache                            034 struct sos_kslab_cache
035 {                                                 035 {
036   char const* name;                            !! 036   char *name;
037                                                   037 
038   /* non mutable characteristics of this slab     038   /* non mutable characteristics of this slab */
039   sos_size_t  original_obj_size; /* asked obje    039   sos_size_t  original_obj_size; /* asked object size */
040   sos_size_t  alloc_obj_size;    /* actual obj    040   sos_size_t  alloc_obj_size;    /* actual object size, taking the
041                                     alignment     041                                     alignment constraints into account */
042   sos_count_t nb_objects_per_slab;                042   sos_count_t nb_objects_per_slab;
043   sos_count_t nb_pages_per_slab;                  043   sos_count_t nb_pages_per_slab;
044   sos_count_t min_free_objects;                   044   sos_count_t min_free_objects;
045                                                   045 
046 /* slab cache flags */                            046 /* slab cache flags */
047 // #define SOS_KSLAB_CREATE_MAP  (1<<0) /* See    047 // #define SOS_KSLAB_CREATE_MAP  (1<<0) /* See kmem_slab.h */
048 // #define SOS_KSLAB_CREATE_ZERO (1<<1) /* " "    048 // #define SOS_KSLAB_CREATE_ZERO (1<<1) /* " " " " " " " " */
049 #define ON_SLAB (1<<31) /* struct sos_kslab is    049 #define ON_SLAB (1<<31) /* struct sos_kslab is included inside the slab */
050   sos_ui32_t  flags;                              050   sos_ui32_t  flags;
051                                                   051 
052   /* Supervision data (updated at run-time) */    052   /* Supervision data (updated at run-time) */
053   sos_count_t nb_free_objects;                    053   sos_count_t nb_free_objects;
054                                                   054 
055   /* The lists of slabs owned by this cache */    055   /* The lists of slabs owned by this cache */
056   struct sos_kslab *slab_list; /* head = non f    056   struct sos_kslab *slab_list; /* head = non full, tail = full */
057                                                   057 
058   /* The caches are linked together on the ksl    058   /* The caches are linked together on the kslab_cache_list */
059   struct sos_kslab_cache *prev, *next;            059   struct sos_kslab_cache *prev, *next;
060 };                                                060 };
061                                                   061 
062                                                   062 
063 /** The structure of a slab */                    063 /** The structure of a slab */
064 struct sos_kslab                                  064 struct sos_kslab
065 {                                                 065 {
066   /** Number of free objects on this slab */      066   /** Number of free objects on this slab */
067   sos_count_t nb_free;                            067   sos_count_t nb_free;
068                                                   068 
069   /** The list of these free objects */           069   /** The list of these free objects */
070   struct sos_kslab_free_object *free;             070   struct sos_kslab_free_object *free;
071                                                   071 
072   /** The address of the associated range stru    072   /** The address of the associated range structure */
073   struct sos_kmem_range *range;                   073   struct sos_kmem_range *range;
074                                                   074 
075   /** Virtual start address of this range */      075   /** Virtual start address of this range */
076   sos_vaddr_t first_object;                       076   sos_vaddr_t first_object;
077                                                   077   
078   /** Slab cache owning this slab */              078   /** Slab cache owning this slab */
079   struct sos_kslab_cache *cache;                  079   struct sos_kslab_cache *cache;
080                                                   080 
081   /** Links to the other slabs managed by the     081   /** Links to the other slabs managed by the same cache */
082   struct sos_kslab *prev, *next;                  082   struct sos_kslab *prev, *next;
083 };                                                083 };
084                                                   084 
085                                                   085 
086 /** The structure of the free objects in the s    086 /** The structure of the free objects in the slab */
087 struct sos_kslab_free_object                      087 struct sos_kslab_free_object
088 {                                                 088 {
089   struct sos_kslab_free_object *prev, *next;      089   struct sos_kslab_free_object *prev, *next;
090 };                                                090 };
091                                                   091 
092 /** The cache of slab caches */                   092 /** The cache of slab caches */
093 static struct sos_kslab_cache *cache_of_struct    093 static struct sos_kslab_cache *cache_of_struct_kslab_cache;
094                                                   094 
095 /** The cache of slab structures for non-ON_SL    095 /** The cache of slab structures for non-ON_SLAB caches */
096 static struct sos_kslab_cache *cache_of_struct    096 static struct sos_kslab_cache *cache_of_struct_kslab;
097                                                   097 
098 /** The list of slab caches */                    098 /** The list of slab caches */
099 static struct sos_kslab_cache *kslab_cache_lis    099 static struct sos_kslab_cache *kslab_cache_list;
100                                                   100 
101 /* Helper function to initialize a cache struc    101 /* Helper function to initialize a cache structure */
102 static sos_ret_t                                  102 static sos_ret_t
103 cache_initialize(/*out*/struct sos_kslab_cache    103 cache_initialize(/*out*/struct sos_kslab_cache *the_cache,
104                  const char* name,                104                  const char* name,
105                  sos_size_t  obj_size,            105                  sos_size_t  obj_size,
106                  sos_count_t pages_per_slab,      106                  sos_count_t pages_per_slab,
107                  sos_count_t min_free_objs,       107                  sos_count_t min_free_objs,
108                  sos_ui32_t  cache_flags)         108                  sos_ui32_t  cache_flags)
109 {                                                 109 {
110   unsigned int space_left;                        110   unsigned int space_left;
111   sos_size_t alloc_obj_size;                      111   sos_size_t alloc_obj_size;
112                                                   112 
113   if (obj_size <= 0)                              113   if (obj_size <= 0)
114     return -SOS_EINVAL;                           114     return -SOS_EINVAL;
115                                                   115 
116   /* Default allocation size is the requested     116   /* Default allocation size is the requested one */
117   alloc_obj_size = obj_size;                      117   alloc_obj_size = obj_size;
118                                                   118 
119   /* Make sure the requested size is large eno    119   /* Make sure the requested size is large enough to store a
120      free_object structure */                     120      free_object structure */
121   if (alloc_obj_size < sizeof(struct sos_kslab    121   if (alloc_obj_size < sizeof(struct sos_kslab_free_object))
122     alloc_obj_size = sizeof(struct sos_kslab_f    122     alloc_obj_size = sizeof(struct sos_kslab_free_object);
123                                                   123   
124   /* Align obj_size on 4 bytes */                 124   /* Align obj_size on 4 bytes */
125   alloc_obj_size = SOS_ALIGN_SUP(alloc_obj_siz    125   alloc_obj_size = SOS_ALIGN_SUP(alloc_obj_size, sizeof(int));
126                                                   126 
127   /* Make sure supplied number of pages per sl    127   /* Make sure supplied number of pages per slab is consistent with
128      actual allocated object size */              128      actual allocated object size */
129   if (alloc_obj_size > pages_per_slab*SOS_PAGE    129   if (alloc_obj_size > pages_per_slab*SOS_PAGE_SIZE)
130     return -SOS_EINVAL;                           130     return -SOS_EINVAL;
131                                                   131   
132   /* Refuse too large slabs */                    132   /* Refuse too large slabs */
133   if (pages_per_slab > MAX_PAGES_PER_SLAB)        133   if (pages_per_slab > MAX_PAGES_PER_SLAB)
134     return -SOS_ENOMEM;                           134     return -SOS_ENOMEM;
135                                                   135 
136   /* Fills in the cache structure */              136   /* Fills in the cache structure */
137   memset(the_cache, 0x0, sizeof(struct sos_ksl    137   memset(the_cache, 0x0, sizeof(struct sos_kslab_cache));
138   the_cache->name              = name;         !! 138   the_cache->name              = (char*)name;
139   the_cache->flags             = cache_flags;     139   the_cache->flags             = cache_flags;
140   the_cache->original_obj_size = obj_size;        140   the_cache->original_obj_size = obj_size;
141   the_cache->alloc_obj_size    = alloc_obj_siz    141   the_cache->alloc_obj_size    = alloc_obj_size;
142   the_cache->min_free_objects  = min_free_objs    142   the_cache->min_free_objects  = min_free_objs;
143   the_cache->nb_pages_per_slab = pages_per_sla    143   the_cache->nb_pages_per_slab = pages_per_slab;
144                                                   144   
145   /* Small size objets => the slab structure i    145   /* Small size objets => the slab structure is allocated directly in
146      the slab */                                  146      the slab */
147   if(alloc_obj_size <= sizeof(struct sos_kslab    147   if(alloc_obj_size <= sizeof(struct sos_kslab))
148     the_cache->flags |= ON_SLAB;                  148     the_cache->flags |= ON_SLAB;
149                                                   149   
150   /*                                              150   /*
151    * Compute the space left once the maximum n    151    * Compute the space left once the maximum number of objects
152    * have been allocated in the slab              152    * have been allocated in the slab
153    */                                             153    */
154   space_left = the_cache->nb_pages_per_slab*SO    154   space_left = the_cache->nb_pages_per_slab*SOS_PAGE_SIZE;
155   if(the_cache->flags & ON_SLAB)                  155   if(the_cache->flags & ON_SLAB)
156     space_left -= sizeof(struct sos_kslab);       156     space_left -= sizeof(struct sos_kslab);
157   the_cache->nb_objects_per_slab = space_left     157   the_cache->nb_objects_per_slab = space_left / alloc_obj_size;
158   space_left -= the_cache->nb_objects_per_slab    158   space_left -= the_cache->nb_objects_per_slab*alloc_obj_size;
159                                                   159 
160   /* Make sure a single slab is large enough t    160   /* Make sure a single slab is large enough to contain the minimum
161      number of objects requested */               161      number of objects requested */
162   if (the_cache->nb_objects_per_slab < min_fre    162   if (the_cache->nb_objects_per_slab < min_free_objs)
163     return -SOS_EINVAL;                           163     return -SOS_EINVAL;
164                                                   164 
165   /* If there is now enough place for both the    165   /* If there is now enough place for both the objects and the slab
166      structure, then make the slab structure O    166      structure, then make the slab structure ON_SLAB */
167   if (space_left >= sizeof(struct sos_kslab))     167   if (space_left >= sizeof(struct sos_kslab))
168     the_cache->flags |= ON_SLAB;                  168     the_cache->flags |= ON_SLAB;
169                                                   169 
170   return SOS_OK;                                  170   return SOS_OK;
171 }                                                 171 }
172                                                   172 
173                                                   173 
174 /** Helper function to add a new slab for the     174 /** Helper function to add a new slab for the given cache. */
175 static sos_ret_t                                  175 static sos_ret_t
176 cache_add_slab(struct sos_kslab_cache *kslab_c    176 cache_add_slab(struct sos_kslab_cache *kslab_cache,
177                sos_vaddr_t vaddr_slab,            177                sos_vaddr_t vaddr_slab,
178                struct sos_kslab *slab)            178                struct sos_kslab *slab)
179 {                                                 179 {
180   unsigned int i;                              !! 180   int i;
181                                                   181 
182   /* Setup the slab structure */                  182   /* Setup the slab structure */
183   memset(slab, 0x0, sizeof(struct sos_kslab));    183   memset(slab, 0x0, sizeof(struct sos_kslab));
184   slab->cache = kslab_cache;                      184   slab->cache = kslab_cache;
185                                                   185 
186   /* Establish the address of the first free o    186   /* Establish the address of the first free object */
187   slab->first_object = vaddr_slab;                187   slab->first_object = vaddr_slab;
188                                                   188 
189   /* Account for this new slab in the cache */    189   /* Account for this new slab in the cache */
190   slab->nb_free = kslab_cache->nb_objects_per_    190   slab->nb_free = kslab_cache->nb_objects_per_slab;
191   kslab_cache->nb_free_objects += slab->nb_fre    191   kslab_cache->nb_free_objects += slab->nb_free;
192                                                   192 
193   /* Build the list of free objects */            193   /* Build the list of free objects */
194   for (i = 0 ; i <  kslab_cache->nb_objects_pe    194   for (i = 0 ; i <  kslab_cache->nb_objects_per_slab ; i++)
195     {                                             195     {
196       sos_vaddr_t obj_vaddr;                      196       sos_vaddr_t obj_vaddr;
197                                                   197 
198       /* Set object's address */                  198       /* Set object's address */
199       obj_vaddr = slab->first_object + i*kslab    199       obj_vaddr = slab->first_object + i*kslab_cache->alloc_obj_size;
200                                                   200 
201       /* Add it to the list of free objects */    201       /* Add it to the list of free objects */
202       list_add_tail(slab->free,                   202       list_add_tail(slab->free,
203                     (struct sos_kslab_free_obj    203                     (struct sos_kslab_free_object *)obj_vaddr);
204     }                                             204     }
205                                                   205 
206   /* Add the slab to the cache's slab list: ad    206   /* Add the slab to the cache's slab list: add the head of the list
207      since this slab is non full */               207      since this slab is non full */
208   list_add_head(kslab_cache->slab_list, slab);    208   list_add_head(kslab_cache->slab_list, slab);
209                                                   209 
210   return SOS_OK;                                  210   return SOS_OK;
211 }                                                 211 }
212                                                   212 
213                                                   213 
214 /** Helper function to allocate a new slab for    214 /** Helper function to allocate a new slab for the given kslab_cache */
215 static sos_ret_t                                  215 static sos_ret_t
216 cache_grow(struct sos_kslab_cache *kslab_cache    216 cache_grow(struct sos_kslab_cache *kslab_cache,
217            sos_ui32_t alloc_flags)                217            sos_ui32_t alloc_flags)
218 {                                                 218 {
219   sos_ui32_t range_alloc_flags;                   219   sos_ui32_t range_alloc_flags;
220                                                   220 
221   struct sos_kmem_range *new_range;               221   struct sos_kmem_range *new_range;
222   sos_vaddr_t new_range_start;                    222   sos_vaddr_t new_range_start;
223                                                   223 
224   struct sos_kslab *new_slab;                     224   struct sos_kslab *new_slab;
225                                                   225 
226   /*                                              226   /*
227    * Setup the flags for the range allocation     227    * Setup the flags for the range allocation
228    */                                             228    */
229   range_alloc_flags = 0;                          229   range_alloc_flags = 0;
230                                                   230 
231   /* Atomic ? */                                  231   /* Atomic ? */
232   if (alloc_flags & SOS_KSLAB_ALLOC_ATOMIC)       232   if (alloc_flags & SOS_KSLAB_ALLOC_ATOMIC)
233     range_alloc_flags |= SOS_KMEM_VMM_ATOMIC;     233     range_alloc_flags |= SOS_KMEM_VMM_ATOMIC;
234                                                   234 
235   /* Need physical mapping NOW ? */               235   /* Need physical mapping NOW ? */
236   if (kslab_cache->flags & (SOS_KSLAB_CREATE_M    236   if (kslab_cache->flags & (SOS_KSLAB_CREATE_MAP
237                            | SOS_KSLAB_CREATE_    237                            | SOS_KSLAB_CREATE_ZERO))
238     range_alloc_flags |= SOS_KMEM_VMM_MAP;        238     range_alloc_flags |= SOS_KMEM_VMM_MAP;
239                                                   239 
240   /* Allocate the range */                        240   /* Allocate the range */
241   new_range = sos_kmem_vmm_new_range(kslab_cac    241   new_range = sos_kmem_vmm_new_range(kslab_cache->nb_pages_per_slab,
242                                      range_all    242                                      range_alloc_flags,
243                                      & new_ran    243                                      & new_range_start);
244   if (! new_range)                                244   if (! new_range)
245     return -SOS_ENOMEM;                           245     return -SOS_ENOMEM;
246                                                   246 
247   /* Allocate the slab structure */               247   /* Allocate the slab structure */
248   if (kslab_cache->flags & ON_SLAB)               248   if (kslab_cache->flags & ON_SLAB)
249     {                                             249     {
250       /* Slab structure is ON the slab: simply    250       /* Slab structure is ON the slab: simply set its address to the
251          end of the range */                      251          end of the range */
252       sos_vaddr_t slab_vaddr                      252       sos_vaddr_t slab_vaddr
253         = new_range_start + kslab_cache->nb_pa    253         = new_range_start + kslab_cache->nb_pages_per_slab*SOS_PAGE_SIZE
254           - sizeof(struct sos_kslab);             254           - sizeof(struct sos_kslab);
255       new_slab = (struct sos_kslab*)slab_vaddr    255       new_slab = (struct sos_kslab*)slab_vaddr;
256     }                                             256     }
257   else                                            257   else
258     {                                             258     {
259       /* Slab structure is OFF the slab: alloc    259       /* Slab structure is OFF the slab: allocate it from the cache of
260          slab structures */                       260          slab structures */
261       sos_vaddr_t slab_vaddr                      261       sos_vaddr_t slab_vaddr
262         = sos_kmem_cache_alloc(cache_of_struct    262         = sos_kmem_cache_alloc(cache_of_struct_kslab,
263                                alloc_flags);      263                                alloc_flags);
264       if (! slab_vaddr)                           264       if (! slab_vaddr)
265         {                                         265         {
266           sos_kmem_vmm_del_range(new_range);      266           sos_kmem_vmm_del_range(new_range);
267           return -SOS_ENOMEM;                     267           return -SOS_ENOMEM;
268         }                                         268         }
269       new_slab = (struct sos_kslab*)slab_vaddr    269       new_slab = (struct sos_kslab*)slab_vaddr;
270     }                                             270     }
271                                                   271 
272   cache_add_slab(kslab_cache, new_range_start,    272   cache_add_slab(kslab_cache, new_range_start, new_slab);
273   new_slab->range = new_range;                    273   new_slab->range = new_range;
274                                                   274 
275   /* Set the backlink from range to this slab     275   /* Set the backlink from range to this slab */
276   sos_kmem_vmm_set_slab(new_range, new_slab);     276   sos_kmem_vmm_set_slab(new_range, new_slab);
277                                                   277 
278   return SOS_OK;                                  278   return SOS_OK;
279 }                                                 279 }
280                                                   280 
281                                                   281 
282 /**                                               282 /**
283  * Helper function to release a slab              283  * Helper function to release a slab
284  *                                                284  *
285  * The corresponding range is always deleted,     285  * The corresponding range is always deleted, except when the @param
286  * must_del_range_now is not set. This happens    286  * must_del_range_now is not set. This happens only when the function
287  * gets called from sos_kmem_cache_release_str    287  * gets called from sos_kmem_cache_release_struct_range(), to avoid
288  * large recursions.                              288  * large recursions.
289  */                                               289  */
290 static sos_ret_t                                  290 static sos_ret_t
291 cache_release_slab(struct sos_kslab *slab,        291 cache_release_slab(struct sos_kslab *slab,
292                    sos_bool_t must_del_range_n    292                    sos_bool_t must_del_range_now)
293 {                                                 293 {
294   struct sos_kslab_cache *kslab_cache = slab->    294   struct sos_kslab_cache *kslab_cache = slab->cache;
295   struct sos_kmem_range *range = slab->range;     295   struct sos_kmem_range *range = slab->range;
296                                                   296 
297   SOS_ASSERT_FATAL(kslab_cache != NULL);          297   SOS_ASSERT_FATAL(kslab_cache != NULL);
298   SOS_ASSERT_FATAL(range != NULL);                298   SOS_ASSERT_FATAL(range != NULL);
299   SOS_ASSERT_FATAL(slab->nb_free == slab->cach    299   SOS_ASSERT_FATAL(slab->nb_free == slab->cache->nb_objects_per_slab);
300                                                   300 
301   /* First, remove the slab from the slabs' li    301   /* First, remove the slab from the slabs' list of the cache */
302   list_delete(kslab_cache->slab_list, slab);      302   list_delete(kslab_cache->slab_list, slab);
303   slab->cache->nb_free_objects -= slab->nb_fre    303   slab->cache->nb_free_objects -= slab->nb_free;
304                                                   304 
305   /* Release the slab structure if it is OFF s    305   /* Release the slab structure if it is OFF slab */
306   if (! (slab->cache->flags & ON_SLAB))           306   if (! (slab->cache->flags & ON_SLAB))
307     sos_kmem_cache_free((sos_vaddr_t)slab);       307     sos_kmem_cache_free((sos_vaddr_t)slab);
308                                                   308 
309   /* Ok, the range is not bound to any slab an    309   /* Ok, the range is not bound to any slab anymore */
310   sos_kmem_vmm_set_slab(range, NULL);             310   sos_kmem_vmm_set_slab(range, NULL);
311                                                   311 
312   /* Always delete the range now, unless we ar    312   /* Always delete the range now, unless we are told not to do so (see
313      sos_kmem_cache_release_struct_range() bel    313      sos_kmem_cache_release_struct_range() below) */
314   if (must_del_range_now)                         314   if (must_del_range_now)
315     return sos_kmem_vmm_del_range(range);         315     return sos_kmem_vmm_del_range(range);
316                                                   316 
317   return SOS_OK;                                  317   return SOS_OK;
318 }                                                 318 }
319                                                   319 
320                                                   320 
321 /**                                               321 /**
322  * Helper function to create the initial cache    322  * Helper function to create the initial cache of caches, with a very
323  * first slab in it, so that new cache structu    323  * first slab in it, so that new cache structures can be simply allocated.
324  * @return the cache structure for the cache o    324  * @return the cache structure for the cache of caches
325  */                                               325  */
326 static struct sos_kslab_cache *                   326 static struct sos_kslab_cache *
327 create_cache_of_caches(sos_vaddr_t vaddr_first    327 create_cache_of_caches(sos_vaddr_t vaddr_first_slab_of_caches,
328                        int nb_pages)              328                        int nb_pages)
329 {                                                 329 {
330   /* The preliminary cache structure we need i    330   /* The preliminary cache structure we need in order to allocate the
331      first slab in the cache of caches (alloca    331      first slab in the cache of caches (allocated on the stack !) */
332   struct sos_kslab_cache fake_cache_of_caches;    332   struct sos_kslab_cache fake_cache_of_caches;
333                                                   333 
334   /* The real cache structure for the cache of    334   /* The real cache structure for the cache of caches */
335   struct sos_kslab_cache *real_cache_of_caches    335   struct sos_kslab_cache *real_cache_of_caches;
336                                                   336 
337   /* The kslab structure for this very first s    337   /* The kslab structure for this very first slab */
338   struct sos_kslab       *slab_of_caches;         338   struct sos_kslab       *slab_of_caches;
339                                                   339 
340   /* Init the cache structure for the cache of    340   /* Init the cache structure for the cache of caches */
341   if (cache_initialize(& fake_cache_of_caches,    341   if (cache_initialize(& fake_cache_of_caches,
342                        "Caches", sizeof(struct    342                        "Caches", sizeof(struct sos_kslab_cache),
343                        nb_pages, 0, SOS_KSLAB_    343                        nb_pages, 0, SOS_KSLAB_CREATE_MAP | ON_SLAB))
344     /* Something wrong with the parameters */     344     /* Something wrong with the parameters */
345     return NULL;                                  345     return NULL;
346                                                   346 
347   memset((void*)vaddr_first_slab_of_caches, 0x    347   memset((void*)vaddr_first_slab_of_caches, 0x0, nb_pages*SOS_PAGE_SIZE);
348                                                   348 
349   /* Add the pages for the 1st slab of caches     349   /* Add the pages for the 1st slab of caches */
350   slab_of_caches = (struct sos_kslab*)(vaddr_f    350   slab_of_caches = (struct sos_kslab*)(vaddr_first_slab_of_caches
351                                        + nb_pa    351                                        + nb_pages*SOS_PAGE_SIZE
352                                        - sizeo    352                                        - sizeof(struct sos_kslab));
353                                                   353 
354   /* Add the abovementioned 1st slab to the ca    354   /* Add the abovementioned 1st slab to the cache of caches */
355   cache_add_slab(& fake_cache_of_caches,          355   cache_add_slab(& fake_cache_of_caches,
356                  vaddr_first_slab_of_caches,      356                  vaddr_first_slab_of_caches,
357                  slab_of_caches);                 357                  slab_of_caches);
358                                                   358 
359   /* Now we allocate a cache structure, which     359   /* Now we allocate a cache structure, which will be the real cache
360      of caches, ie a cache structure allocated    360      of caches, ie a cache structure allocated INSIDE the cache of
361      caches, not inside the stack */              361      caches, not inside the stack */
362   real_cache_of_caches                            362   real_cache_of_caches
363     = (struct sos_kslab_cache*) sos_kmem_cache    363     = (struct sos_kslab_cache*) sos_kmem_cache_alloc(& fake_cache_of_caches,
364                                                   364                                                      0);
365   /* We initialize it */                          365   /* We initialize it */
366   memcpy(real_cache_of_caches, & fake_cache_of    366   memcpy(real_cache_of_caches, & fake_cache_of_caches,
367          sizeof(struct sos_kslab_cache));         367          sizeof(struct sos_kslab_cache));
368   /* We need to update the slab's 'cache' fiel    368   /* We need to update the slab's 'cache' field */
369   slab_of_caches->cache = real_cache_of_caches    369   slab_of_caches->cache = real_cache_of_caches;
370                                                   370   
371   /* Add the cache to the list of slab caches     371   /* Add the cache to the list of slab caches */
372   list_add_tail(kslab_cache_list, real_cache_o    372   list_add_tail(kslab_cache_list, real_cache_of_caches);
373                                                   373 
374   return real_cache_of_caches;                    374   return real_cache_of_caches;
375 }                                                 375 }
376                                                   376 
377                                                   377 
378 /**                                               378 /**
379  * Helper function to create the initial cache    379  * Helper function to create the initial cache of ranges, with a very
380  * first slab in it, so that new kmem_range st    380  * first slab in it, so that new kmem_range structures can be simply
381  * allocated.                                     381  * allocated.
382  * @return the cache of kmem_range                382  * @return the cache of kmem_range
383  */                                               383  */
384 static struct sos_kslab_cache *                   384 static struct sos_kslab_cache *
385 create_cache_of_ranges(sos_vaddr_t vaddr_first    385 create_cache_of_ranges(sos_vaddr_t vaddr_first_slab_of_ranges,
386                        sos_size_t  sizeof_stru    386                        sos_size_t  sizeof_struct_range,
387                        int nb_pages)              387                        int nb_pages)
388 {                                                 388 {
389   /* The cache structure for the cache of kmem    389   /* The cache structure for the cache of kmem_range */
390   struct sos_kslab_cache *cache_of_ranges;        390   struct sos_kslab_cache *cache_of_ranges;
391                                                   391 
392   /* The kslab structure for the very first sl    392   /* The kslab structure for the very first slab of ranges */
393   struct sos_kslab *slab_of_ranges;               393   struct sos_kslab *slab_of_ranges;
394                                                   394 
395   cache_of_ranges = (struct sos_kslab_cache*)     395   cache_of_ranges = (struct sos_kslab_cache*)
396     sos_kmem_cache_alloc(cache_of_struct_kslab    396     sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
397                          0);                      397                          0);
398   if (! cache_of_ranges)                          398   if (! cache_of_ranges)
399     return NULL;                                  399     return NULL;
400                                                   400 
401   /* Init the cache structure for the cache of    401   /* Init the cache structure for the cache of ranges with min objects
402      per slab = 2 !!! */                          402      per slab = 2 !!! */
403   if (cache_initialize(cache_of_ranges,           403   if (cache_initialize(cache_of_ranges,
404                        "struct kmem_range",       404                        "struct kmem_range",
405                        sizeof_struct_range,       405                        sizeof_struct_range,
406                        nb_pages, 2, SOS_KSLAB_    406                        nb_pages, 2, SOS_KSLAB_CREATE_MAP | ON_SLAB))
407     /* Something wrong with the parameters */     407     /* Something wrong with the parameters */
408     return NULL;                                  408     return NULL;
409                                                   409 
410   /* Add the cache to the list of slab caches     410   /* Add the cache to the list of slab caches */
411   list_add_tail(kslab_cache_list, cache_of_ran    411   list_add_tail(kslab_cache_list, cache_of_ranges);
412                                                   412 
413   /*                                              413   /*
414    * Add the first slab for this cache            414    * Add the first slab for this cache
415    */                                             415    */
416   memset((void*)vaddr_first_slab_of_ranges, 0x    416   memset((void*)vaddr_first_slab_of_ranges, 0x0, nb_pages*SOS_PAGE_SIZE);
417                                                   417 
418   /* Add the pages for the 1st slab of ranges     418   /* Add the pages for the 1st slab of ranges */
419   slab_of_ranges = (struct sos_kslab*)(vaddr_f    419   slab_of_ranges = (struct sos_kslab*)(vaddr_first_slab_of_ranges
420                                        + nb_pa    420                                        + nb_pages*SOS_PAGE_SIZE
421                                        - sizeo    421                                        - sizeof(struct sos_kslab));
422                                                   422 
423   cache_add_slab(cache_of_ranges,                 423   cache_add_slab(cache_of_ranges,
424                  vaddr_first_slab_of_ranges,      424                  vaddr_first_slab_of_ranges,
425                  slab_of_ranges);                 425                  slab_of_ranges);
426                                                   426 
427   return cache_of_ranges;                         427   return cache_of_ranges;
428 }                                                 428 }
429                                                   429 
430                                                   430 
431 struct sos_kslab_cache *                          431 struct sos_kslab_cache *
432 sos_kmem_cache_subsystem_setup_prepare(sos_vad    432 sos_kmem_cache_subsystem_setup_prepare(sos_vaddr_t kernel_core_base,
433                                        sos_vad    433                                        sos_vaddr_t kernel_core_top,
434                                        sos_siz    434                                        sos_size_t  sizeof_struct_range,
435                                        /* resu    435                                        /* results */
436                                        struct     436                                        struct sos_kslab **first_struct_slab_of_caches,
437                                        sos_vad    437                                        sos_vaddr_t *first_slab_of_caches_base,
438                                        sos_cou    438                                        sos_count_t *first_slab_of_caches_nb_pages,
439                                        struct     439                                        struct sos_kslab **first_struct_slab_of_ranges,
440                                        sos_vad    440                                        sos_vaddr_t *first_slab_of_ranges_base,
441                                        sos_cou    441                                        sos_count_t *first_slab_of_ranges_nb_pages)
442 {                                                 442 {
443   int i;                                          443   int i;
444   sos_ret_t   retval;                             444   sos_ret_t   retval;
445   sos_vaddr_t vaddr;                              445   sos_vaddr_t vaddr;
446                                                   446 
447   /* The cache of ranges we are about to alloc    447   /* The cache of ranges we are about to allocate */
448   struct sos_kslab_cache *cache_of_ranges;        448   struct sos_kslab_cache *cache_of_ranges;
449                                                   449 
450   /* In the begining, there isn't any cache */    450   /* In the begining, there isn't any cache */
451   kslab_cache_list = NULL;                        451   kslab_cache_list = NULL;
452   cache_of_struct_kslab = NULL;                   452   cache_of_struct_kslab = NULL;
453   cache_of_struct_kslab_cache = NULL;             453   cache_of_struct_kslab_cache = NULL;
454                                                   454 
455   /*                                              455   /*
456    * Create the cache of caches, initialised w    456    * Create the cache of caches, initialised with 1 allocated slab
457    */                                             457    */
458                                                   458 
459   /* Allocate the pages needed for the 1st sla    459   /* Allocate the pages needed for the 1st slab of caches, and map them
460      in kernel space, right after the kernel *    460      in kernel space, right after the kernel */
461   *first_slab_of_caches_base = SOS_PAGE_ALIGN_    461   *first_slab_of_caches_base = SOS_PAGE_ALIGN_SUP(kernel_core_top);
462   for (i = 0, vaddr = *first_slab_of_caches_ba    462   for (i = 0, vaddr = *first_slab_of_caches_base ;
463        i < NB_PAGES_IN_SLAB_OF_CACHES ;           463        i < NB_PAGES_IN_SLAB_OF_CACHES ;
464        i++, vaddr += SOS_PAGE_SIZE)               464        i++, vaddr += SOS_PAGE_SIZE)
465     {                                             465     {
466       sos_paddr_t ppage_paddr;                    466       sos_paddr_t ppage_paddr;
467                                                   467 
468       ppage_paddr                                 468       ppage_paddr
469         = sos_physmem_ref_physpage_new(FALSE);    469         = sos_physmem_ref_physpage_new(FALSE);
470       SOS_ASSERT_FATAL(ppage_paddr != (sos_pad    470       SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
471                                                   471 
472       retval = sos_paging_map(ppage_paddr, vad    472       retval = sos_paging_map(ppage_paddr, vaddr,
473                               FALSE,              473                               FALSE,
474                               SOS_VM_MAP_ATOMI    474                               SOS_VM_MAP_ATOMIC
475                               | SOS_VM_MAP_PRO    475                               | SOS_VM_MAP_PROT_READ
476                               | SOS_VM_MAP_PRO    476                               | SOS_VM_MAP_PROT_WRITE);
477       SOS_ASSERT_FATAL(retval == SOS_OK);         477       SOS_ASSERT_FATAL(retval == SOS_OK);
478                                                   478 
479       retval = sos_physmem_unref_physpage(ppag    479       retval = sos_physmem_unref_physpage(ppage_paddr);
480       SOS_ASSERT_FATAL(retval == FALSE);          480       SOS_ASSERT_FATAL(retval == FALSE);
481     }                                             481     }
482                                                   482 
483   /* Create the cache of caches */                483   /* Create the cache of caches */
484   *first_slab_of_caches_nb_pages = NB_PAGES_IN    484   *first_slab_of_caches_nb_pages = NB_PAGES_IN_SLAB_OF_CACHES;
485   cache_of_struct_kslab_cache                     485   cache_of_struct_kslab_cache
486     = create_cache_of_caches(*first_slab_of_ca    486     = create_cache_of_caches(*first_slab_of_caches_base,
487                              NB_PAGES_IN_SLAB_    487                              NB_PAGES_IN_SLAB_OF_CACHES);
488   SOS_ASSERT_FATAL(cache_of_struct_kslab_cache    488   SOS_ASSERT_FATAL(cache_of_struct_kslab_cache != NULL);
489                                                   489 
490   /* Retrieve the slab that should have been a    490   /* Retrieve the slab that should have been allocated */
491   *first_struct_slab_of_caches                    491   *first_struct_slab_of_caches
492     = list_get_head(cache_of_struct_kslab_cach    492     = list_get_head(cache_of_struct_kslab_cache->slab_list);
493                                                   493 
494                                                   494   
495   /*                                              495   /*
496    * Create the cache of ranges, initialised w    496    * Create the cache of ranges, initialised with 1 allocated slab
497    */                                             497    */
498   *first_slab_of_ranges_base = vaddr;             498   *first_slab_of_ranges_base = vaddr;
499   /* Allocate the 1st slab */                     499   /* Allocate the 1st slab */
500   for (i = 0, vaddr = *first_slab_of_ranges_ba    500   for (i = 0, vaddr = *first_slab_of_ranges_base ;
501        i < NB_PAGES_IN_SLAB_OF_RANGES ;           501        i < NB_PAGES_IN_SLAB_OF_RANGES ;
502        i++, vaddr += SOS_PAGE_SIZE)               502        i++, vaddr += SOS_PAGE_SIZE)
503     {                                             503     {
504       sos_paddr_t ppage_paddr;                    504       sos_paddr_t ppage_paddr;
505                                                   505 
506       ppage_paddr                                 506       ppage_paddr
507         = sos_physmem_ref_physpage_new(FALSE);    507         = sos_physmem_ref_physpage_new(FALSE);
508       SOS_ASSERT_FATAL(ppage_paddr != (sos_pad    508       SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
509                                                   509 
510       retval = sos_paging_map(ppage_paddr, vad    510       retval = sos_paging_map(ppage_paddr, vaddr,
511                               FALSE,              511                               FALSE,
512                               SOS_VM_MAP_ATOMI    512                               SOS_VM_MAP_ATOMIC
513                               | SOS_VM_MAP_PRO    513                               | SOS_VM_MAP_PROT_READ
514                               | SOS_VM_MAP_PRO    514                               | SOS_VM_MAP_PROT_WRITE);
515       SOS_ASSERT_FATAL(retval == SOS_OK);         515       SOS_ASSERT_FATAL(retval == SOS_OK);
516                                                   516 
517       retval = sos_physmem_unref_physpage(ppag    517       retval = sos_physmem_unref_physpage(ppage_paddr);
518       SOS_ASSERT_FATAL(retval == FALSE);          518       SOS_ASSERT_FATAL(retval == FALSE);
519     }                                             519     }
520                                                   520 
521   /* Create the cache of ranges */                521   /* Create the cache of ranges */
522   *first_slab_of_ranges_nb_pages = NB_PAGES_IN    522   *first_slab_of_ranges_nb_pages = NB_PAGES_IN_SLAB_OF_RANGES;
523   cache_of_ranges = create_cache_of_ranges(*fi    523   cache_of_ranges = create_cache_of_ranges(*first_slab_of_ranges_base,
524                                            siz    524                                            sizeof_struct_range,
525                                            NB_    525                                            NB_PAGES_IN_SLAB_OF_RANGES);
526   SOS_ASSERT_FATAL(cache_of_ranges != NULL);      526   SOS_ASSERT_FATAL(cache_of_ranges != NULL);
527                                                   527 
528   /* Retrieve the slab that should have been a    528   /* Retrieve the slab that should have been allocated */
529   *first_struct_slab_of_ranges                    529   *first_struct_slab_of_ranges
530     = list_get_head(cache_of_ranges->slab_list    530     = list_get_head(cache_of_ranges->slab_list);
531                                                   531 
532   /*                                              532   /*
533    * Create the cache of slabs, without any al    533    * Create the cache of slabs, without any allocated slab yet
534    */                                             534    */
535   cache_of_struct_kslab                           535   cache_of_struct_kslab
536     = sos_kmem_cache_create("off-slab slab str    536     = sos_kmem_cache_create("off-slab slab structures",
537                             sizeof(struct sos_    537                             sizeof(struct sos_kslab),
538                             1,                    538                             1,
539                             0,                    539                             0,
540                             SOS_KSLAB_CREATE_M    540                             SOS_KSLAB_CREATE_MAP);
541   SOS_ASSERT_FATAL(cache_of_struct_kslab != NU    541   SOS_ASSERT_FATAL(cache_of_struct_kslab != NULL);
542                                                   542 
543   return cache_of_ranges;                         543   return cache_of_ranges;
544 }                                                 544 }
545                                                   545 
546                                                   546 
547 sos_ret_t                                         547 sos_ret_t
548 sos_kmem_cache_subsystem_setup_commit(struct s    548 sos_kmem_cache_subsystem_setup_commit(struct sos_kslab *first_struct_slab_of_caches,
549                                       struct s    549                                       struct sos_kmem_range *first_range_of_caches,
550                                       struct s    550                                       struct sos_kslab *first_struct_slab_of_ranges,
551                                       struct s    551                                       struct sos_kmem_range *first_range_of_ranges)
552 {                                                 552 {
553   first_struct_slab_of_caches->range = first_r    553   first_struct_slab_of_caches->range = first_range_of_caches;
554   first_struct_slab_of_ranges->range = first_r    554   first_struct_slab_of_ranges->range = first_range_of_ranges;
555   return SOS_OK;                                  555   return SOS_OK;
556 }                                                 556 }
557                                                   557 
558                                                   558 
559 struct sos_kslab_cache *                          559 struct sos_kslab_cache *
560 sos_kmem_cache_create(const char* name,           560 sos_kmem_cache_create(const char* name,
561                       sos_size_t  obj_size,       561                       sos_size_t  obj_size,
562                       sos_count_t pages_per_sl    562                       sos_count_t pages_per_slab,
563                       sos_count_t min_free_obj    563                       sos_count_t min_free_objs,
564                       sos_ui32_t  cache_flags)    564                       sos_ui32_t  cache_flags)
565 {                                                 565 {
566   struct sos_kslab_cache *new_cache;              566   struct sos_kslab_cache *new_cache;
567                                                << 
568   SOS_ASSERT_FATAL(obj_size > 0);              << 
569                                                   567 
570   /* Allocate the new cache */                    568   /* Allocate the new cache */
571   new_cache = (struct sos_kslab_cache*)           569   new_cache = (struct sos_kslab_cache*)
572     sos_kmem_cache_alloc(cache_of_struct_kslab    570     sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
573                          0/* NOT ATOMIC */);      571                          0/* NOT ATOMIC */);
574   if (! new_cache)                                572   if (! new_cache)
575     return NULL;                                  573     return NULL;
576                                                   574 
577   if (cache_initialize(new_cache, name, obj_si    575   if (cache_initialize(new_cache, name, obj_size,
578                        pages_per_slab, min_fre    576                        pages_per_slab, min_free_objs,
579                        cache_flags))              577                        cache_flags))
580     {                                             578     {
581       /* Something was wrong */                   579       /* Something was wrong */
582       sos_kmem_cache_free((sos_vaddr_t)new_cac    580       sos_kmem_cache_free((sos_vaddr_t)new_cache);
583       return NULL;                                581       return NULL;
584     }                                             582     }
585                                                   583 
586   /* Add the cache to the list of slab caches     584   /* Add the cache to the list of slab caches */
587   list_add_tail(kslab_cache_list, new_cache);     585   list_add_tail(kslab_cache_list, new_cache);
588                                                   586   
589   /* if the min_free_objs is set, pre-allocate    587   /* if the min_free_objs is set, pre-allocate a slab */
590   if (min_free_objs)                              588   if (min_free_objs)
591     {                                             589     {
592       if (cache_grow(new_cache, 0 /* Not atomi    590       if (cache_grow(new_cache, 0 /* Not atomic */) != SOS_OK)
593         {                                         591         {
594           sos_kmem_cache_destroy(new_cache);      592           sos_kmem_cache_destroy(new_cache);
595           return NULL; /* Not enough memory */    593           return NULL; /* Not enough memory */
596         }                                         594         }
597     }                                             595     }
598                                                   596 
599   return new_cache;                               597   return new_cache;  
600 }                                                 598 }
601                                                   599 
602                                                   600   
603 sos_ret_t sos_kmem_cache_destroy(struct sos_ks    601 sos_ret_t sos_kmem_cache_destroy(struct sos_kslab_cache *kslab_cache)
604 {                                                 602 {
605   int nb_slabs;                                   603   int nb_slabs;
606   struct sos_kslab *slab;                         604   struct sos_kslab *slab;
607                                                   605 
608   if (! kslab_cache)                              606   if (! kslab_cache)
609     return -SOS_EINVAL;                           607     return -SOS_EINVAL;
610                                                   608 
611   /* Refuse to destroy the cache if there are     609   /* Refuse to destroy the cache if there are any objects still
612      allocated */                                 610      allocated */
613   list_foreach(kslab_cache->slab_list, slab, n    611   list_foreach(kslab_cache->slab_list, slab, nb_slabs)
614     {                                             612     {
615       if (slab->nb_free != kslab_cache->nb_obj    613       if (slab->nb_free != kslab_cache->nb_objects_per_slab)
616         return -SOS_EBUSY;                        614         return -SOS_EBUSY;
617     }                                             615     }
618                                                   616 
619   /* Remove all the slabs */                      617   /* Remove all the slabs */
620   while ((slab = list_get_head(kslab_cache->sl    618   while ((slab = list_get_head(kslab_cache->slab_list)) != NULL)
621     {                                             619     {
622       cache_release_slab(slab, TRUE);             620       cache_release_slab(slab, TRUE);
623     }                                             621     }
624                                                   622 
625   /* Remove the cache */                          623   /* Remove the cache */
626   return sos_kmem_cache_free((sos_vaddr_t)ksla    624   return sos_kmem_cache_free((sos_vaddr_t)kslab_cache);
627 }                                                 625 }
628                                                   626 
629                                                   627 
630 sos_vaddr_t sos_kmem_cache_alloc(struct sos_ks    628 sos_vaddr_t sos_kmem_cache_alloc(struct sos_kslab_cache *kslab_cache,
631                                  sos_ui32_t al    629                                  sos_ui32_t alloc_flags)
632 {                                                 630 {
633   sos_vaddr_t obj_vaddr;                          631   sos_vaddr_t obj_vaddr;
634   struct sos_kslab * slab_head;                   632   struct sos_kslab * slab_head;
635 #define ALLOC_RET return                          633 #define ALLOC_RET return
636                                                   634 
637   /* If the slab at the head of the slabs' lis    635   /* If the slab at the head of the slabs' list has no free object,
638      then the other slabs don't either => need    636      then the other slabs don't either => need to allocate a new
639      slab */                                      637      slab */
640   if ((! kslab_cache->slab_list)                  638   if ((! kslab_cache->slab_list)
641       || (! list_get_head(kslab_cache->slab_li    639       || (! list_get_head(kslab_cache->slab_list)->free))
642     {                                             640     {
643       if (cache_grow(kslab_cache, alloc_flags)    641       if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
644         /* Not enough memory or blocking alloc    642         /* Not enough memory or blocking alloc */
645         ALLOC_RET( (sos_vaddr_t)NULL);            643         ALLOC_RET( (sos_vaddr_t)NULL);
646     }                                             644     }
647                                                   645 
648   /* Here: we are sure that list_get_head(ksla    646   /* Here: we are sure that list_get_head(kslab_cache->slab_list)
649      exists *AND* that list_get_head(kslab_cac    647      exists *AND* that list_get_head(kslab_cache->slab_list)->free is
650      NOT NULL */                                  648      NOT NULL */
651   slab_head = list_get_head(kslab_cache->slab_    649   slab_head = list_get_head(kslab_cache->slab_list);
652   SOS_ASSERT_FATAL(slab_head != NULL);            650   SOS_ASSERT_FATAL(slab_head != NULL);
653                                                   651 
654   /* Allocate the object at the head of the sl    652   /* Allocate the object at the head of the slab at the head of the
655      slabs' list */                               653      slabs' list */
656   obj_vaddr = (sos_vaddr_t)list_pop_head(slab_    654   obj_vaddr = (sos_vaddr_t)list_pop_head(slab_head->free);
657   slab_head->nb_free --;                          655   slab_head->nb_free --;
658   kslab_cache->nb_free_objects --;                656   kslab_cache->nb_free_objects --;
659                                                   657 
660   /* If needed, reset object's contents */        658   /* If needed, reset object's contents */
661   if (kslab_cache->flags & SOS_KSLAB_CREATE_ZE    659   if (kslab_cache->flags & SOS_KSLAB_CREATE_ZERO)
662     memset((void*)obj_vaddr, 0x0, kslab_cache-    660     memset((void*)obj_vaddr, 0x0, kslab_cache->alloc_obj_size);
663                                                   661 
664   /* Slab is now full ? */                        662   /* Slab is now full ? */
665   if (slab_head->free == NULL)                    663   if (slab_head->free == NULL)
666     {                                             664     {
667       /* Transfer it at the tail of the slabs'    665       /* Transfer it at the tail of the slabs' list */
668       struct sos_kslab *slab;                     666       struct sos_kslab *slab;
669       slab = list_pop_head(kslab_cache->slab_l    667       slab = list_pop_head(kslab_cache->slab_list);
670       list_add_tail(kslab_cache->slab_list, sl    668       list_add_tail(kslab_cache->slab_list, slab);
671     }                                             669     }
672                                                   670   
673   /*                                              671   /*
674    * For caches that require a minimum amount     672    * For caches that require a minimum amount of free objects left,
675    * allocate a slab if needed.                   673    * allocate a slab if needed.
676    *                                              674    *
677    * Notice the "== min_objects - 1": we did n    675    * Notice the "== min_objects - 1": we did not write " <
678    * min_objects" because for the cache of kme    676    * min_objects" because for the cache of kmem structure, this would
679    * lead to an chicken-and-egg problem, since    677    * lead to an chicken-and-egg problem, since cache_grow below would
680    * call cache_alloc again for the kmem_vmm c    678    * call cache_alloc again for the kmem_vmm cache, so we return here
681    * with the same cache. If the test were " <    679    * with the same cache. If the test were " < min_objects", then we
682    * would call cache_grow again for the kmem_    680    * would call cache_grow again for the kmem_vmm cache again and
683    * again... until we reach the bottom of our    681    * again... until we reach the bottom of our stack (infinite
684    * recursion). By telling precisely "==", th    682    * recursion). By telling precisely "==", then the cache_grow would
685    * only be called the first time.               683    * only be called the first time.
686    */                                             684    */
687   if ((kslab_cache->min_free_objects > 0)         685   if ((kslab_cache->min_free_objects > 0)
688       && (kslab_cache->nb_free_objects == (ksl    686       && (kslab_cache->nb_free_objects == (kslab_cache->min_free_objects - 1)))
689     {                                             687     {
690       /* No: allocate a new slab now */           688       /* No: allocate a new slab now */
691       if (cache_grow(kslab_cache, alloc_flags)    689       if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
692         {                                         690         {
693           /* Not enough free memory or blockin    691           /* Not enough free memory or blocking alloc => undo the
694              allocation */                        692              allocation */
695           sos_kmem_cache_free(obj_vaddr);         693           sos_kmem_cache_free(obj_vaddr);
696           ALLOC_RET( (sos_vaddr_t)NULL);          694           ALLOC_RET( (sos_vaddr_t)NULL);
697         }                                         695         }
698     }                                             696     }
699                                                   697 
700   ALLOC_RET(obj_vaddr);                           698   ALLOC_RET(obj_vaddr);
701 }                                                 699 }
702                                                   700 
703                                                   701 
704 /**                                               702 /**
705  * Helper function to free the object located     703  * Helper function to free the object located at the given address.
706  *                                                704  *
707  * @param empty_slab is the address of the sla    705  * @param empty_slab is the address of the slab to release, if removing
708  * the object causes the slab to become empty.    706  * the object causes the slab to become empty.
709  */                                               707  */
710 inline static                                     708 inline static
711 sos_ret_t                                         709 sos_ret_t
712 free_object(sos_vaddr_t vaddr,                    710 free_object(sos_vaddr_t vaddr,
713             struct sos_kslab ** empty_slab)       711             struct sos_kslab ** empty_slab)
714 {                                                 712 {
715   struct sos_kslab_cache *kslab_cache;            713   struct sos_kslab_cache *kslab_cache;
716                                                   714 
717   /* Lookup the slab containing the object in     715   /* Lookup the slab containing the object in the slabs' list */
718   struct sos_kslab *slab = sos_kmem_vmm_resolv    716   struct sos_kslab *slab = sos_kmem_vmm_resolve_slab(vaddr);
719                                                   717 
720   /* By default, consider that the slab will n    718   /* By default, consider that the slab will not become empty */
721   *empty_slab = NULL;                             719   *empty_slab = NULL;
722                                                   720 
723   /* Did not find the slab */                     721   /* Did not find the slab */
724   if (! slab)                                     722   if (! slab)
725     return -SOS_EINVAL;                           723     return -SOS_EINVAL;
726                                                   724 
727   SOS_ASSERT_FATAL(slab->cache);                  725   SOS_ASSERT_FATAL(slab->cache);
728   kslab_cache = slab->cache;                      726   kslab_cache = slab->cache;
729                                                   727 
730   /*                                              728   /*
731    * Check whether the address really could ma    729    * Check whether the address really could mark the start of an actual
732    * allocated object                             730    * allocated object
733    */                                             731    */
734   /* Address multiple of an object's size ? */    732   /* Address multiple of an object's size ? */
735   if (( (vaddr - slab->first_object)              733   if (( (vaddr - slab->first_object)
736         % kslab_cache->alloc_obj_size) != 0)      734         % kslab_cache->alloc_obj_size) != 0)
737     return -SOS_EINVAL;                           735     return -SOS_EINVAL;
738   /* Address not too large ? */                   736   /* Address not too large ? */
739   if (( (vaddr - slab->first_object)              737   if (( (vaddr - slab->first_object)
740         / kslab_cache->alloc_obj_size) >= ksla    738         / kslab_cache->alloc_obj_size) >= kslab_cache->nb_objects_per_slab)
741     return -SOS_EINVAL;                           739     return -SOS_EINVAL;
742                                                   740 
743   /*                                              741   /*
744    * Ok: we now release the object                742    * Ok: we now release the object
745    */                                             743    */
746                                                   744 
747   /* Did find a full slab => will not be full     745   /* Did find a full slab => will not be full any more => move it
748      to the head of the slabs' list */            746      to the head of the slabs' list */
749   if (! slab->free)                               747   if (! slab->free)
750     {                                             748     {
751       list_delete(kslab_cache->slab_list, slab    749       list_delete(kslab_cache->slab_list, slab);
752       list_add_head(kslab_cache->slab_list, sl    750       list_add_head(kslab_cache->slab_list, slab);
753     }                                             751     }
754                                                   752 
755   /* Release the object */                        753   /* Release the object */
756   list_add_head(slab->free, (struct sos_kslab_    754   list_add_head(slab->free, (struct sos_kslab_free_object*)vaddr);
757   slab->nb_free++;                                755   slab->nb_free++;
758   kslab_cache->nb_free_objects++;                 756   kslab_cache->nb_free_objects++;
759   SOS_ASSERT_FATAL(slab->nb_free <= slab->cach    757   SOS_ASSERT_FATAL(slab->nb_free <= slab->cache->nb_objects_per_slab);
760                                                   758 
761   /* Cause the slab to be released if it becom    759   /* Cause the slab to be released if it becomes empty, and if we are
762      allowed to do it */                          760      allowed to do it */
763   if ((slab->nb_free >= kslab_cache->nb_object    761   if ((slab->nb_free >= kslab_cache->nb_objects_per_slab)
764       && (kslab_cache->nb_free_objects - slab-    762       && (kslab_cache->nb_free_objects - slab->nb_free
765           >= kslab_cache->min_free_objects))      763           >= kslab_cache->min_free_objects))
766     {                                             764     {
767       *empty_slab = slab;                         765       *empty_slab = slab;
768     }                                             766     }
769                                                   767 
770   return SOS_OK;                                  768   return SOS_OK;
771 }                                                 769 }
772                                                   770 
773                                                   771 
774 sos_ret_t sos_kmem_cache_free(sos_vaddr_t vadd    772 sos_ret_t sos_kmem_cache_free(sos_vaddr_t vaddr)
775 {                                                 773 {
776   sos_ret_t retval;                               774   sos_ret_t retval;
777   struct sos_kslab *empty_slab;                   775   struct sos_kslab *empty_slab;
778                                                   776 
779   /* Remove the object from the slab */           777   /* Remove the object from the slab */
780   retval = free_object(vaddr, & empty_slab);      778   retval = free_object(vaddr, & empty_slab);
781   if (retval != SOS_OK)                           779   if (retval != SOS_OK)
782     return retval;                                780     return retval;
783                                                   781 
784   /* Remove the slab and the underlying range     782   /* Remove the slab and the underlying range if needed */
785   if (empty_slab != NULL)                         783   if (empty_slab != NULL)
786     return cache_release_slab(empty_slab, TRUE    784     return cache_release_slab(empty_slab, TRUE);
787                                                   785 
788   return SOS_OK;                                  786   return SOS_OK;
789 }                                                 787 }
790                                                   788 
791                                                   789 
792 struct sos_kmem_range *                           790 struct sos_kmem_range *
793 sos_kmem_cache_release_struct_range(struct sos    791 sos_kmem_cache_release_struct_range(struct sos_kmem_range *the_range)
794 {                                                 792 {
795   sos_ret_t retval;                               793   sos_ret_t retval;
796   struct sos_kslab *empty_slab;                   794   struct sos_kslab *empty_slab;
797                                                   795 
798   /* Remove the object from the slab */           796   /* Remove the object from the slab */
799   retval = free_object((sos_vaddr_t)the_range,    797   retval = free_object((sos_vaddr_t)the_range, & empty_slab);
800   if (retval != SOS_OK)                           798   if (retval != SOS_OK)
801     return NULL;                                  799     return NULL;
802                                                   800 
803   /* Remove the slab BUT NOT the underlying ra    801   /* Remove the slab BUT NOT the underlying range if needed */
804   if (empty_slab != NULL)                         802   if (empty_slab != NULL)
805     {                                             803     {
806       struct sos_kmem_range *empty_range = emp    804       struct sos_kmem_range *empty_range = empty_slab->range;
807       SOS_ASSERT_FATAL(cache_release_slab(empt    805       SOS_ASSERT_FATAL(cache_release_slab(empty_slab, FALSE) == SOS_OK);
808       SOS_ASSERT_FATAL(empty_range != NULL);      806       SOS_ASSERT_FATAL(empty_range != NULL);
809       return empty_range;                         807       return empty_range;
810     }                                             808     }
811                                                   809 
812   return NULL;                                    810   return NULL;
813 }                                                 811 }
814                                                   812 
                                                      

source navigation ] diff markup ] identifier search ] general search ]