diff -Npur sos-code-article5/sos/kmalloc.c sos-code-article5-modif/sos/kmalloc.c
--- sos-code-article5/sos/kmalloc.c	2004-12-18 21:01:54.000000000 +0100
+++ sos-code-article5-modif/sos/kmalloc.c	2005-09-27 16:03:59.000000000 +0200
@@ -31,22 +31,40 @@
 static struct {
   const char             *name;
   sos_size_t             object_size;
-  sos_count_t            pages_per_slab;
   struct sos_kslab_cache *cache;
 } kmalloc_cache[] =
   {
-    { "kmalloc 8B objects",     8,     1  },
-    { "kmalloc 16B objects",    16,    1  },
-    { "kmalloc 32B objects",    32,    1  },
-    { "kmalloc 64B objects",    64,    1  },
-    { "kmalloc 128B objects",   128,   1  },
-    { "kmalloc 256B objects",   256,   2  },
-    { "kmalloc 1024B objects",  1024,  2  },
-    { "kmalloc 2048B objects",  2048,  3  },
-    { "kmalloc 4096B objects",  4096,  4  },
-    { "kmalloc 8192B objects",  8192,  8  },
-    { "kmalloc 16384B objects", 16384, 12 },
-    { NULL, 0, 0, NULL }
+    { "kmalloc 8B objects",     8    },
+    { "kmalloc 12B objects",    12   },
+    { "kmalloc 16B objects",    16   },
+    { "kmalloc 20B objects",    20   },
+    { "kmalloc 24B objects",    24   },
+    { "kmalloc 28B objects",    28   },
+    { "kmalloc 32B objects",    32   },
+    { "kmalloc 40B objects",    40   },
+    { "kmalloc 46B objects",    46   },
+    { "kmalloc 52B objects",    52   },
+    { "kmalloc 64B objects",    64   },
+    { "kmalloc 80B objects",    80   },
+    { "kmalloc 96B objects",    96   },
+    { "kmalloc 112B objects",   112  },
+    { "kmalloc 128B objects",   128  },
+    { "kmalloc 160B objects",   160  },
+    { "kmalloc 200B objects",   200  },
+    { "kmalloc 256B objects",   256  },
+    { "kmalloc 340B objects",   340  },
+    { "kmalloc 426B objects",   426  },
+    { "kmalloc 512B objects",   512  },
+    { "kmalloc 768B objects",   768  },
+    { "kmalloc 1024B objects",  1024 },
+    { "kmalloc 1536B objects",  1536 },
+    { "kmalloc 2048B objects",  2048 },
+    { "kmalloc 3072B objects",  3072 },
+    { "kmalloc 4096B objects",  4096 },
+    { "kmalloc 6144B objects",  6144 },
+    { "kmalloc 8192B objects",  8192 },
+    { "kmalloc 9728B objects",  9728 },
+    { NULL, 0, 0 }
   };
 
 
@@ -58,10 +76,10 @@ sos_ret_t sos_kmalloc_setup()
       struct sos_kslab_cache *new_cache;
       new_cache = sos_kmem_cache_create(kmalloc_cache[i].name,
 					kmalloc_cache[i].object_size,
-					kmalloc_cache[i].pages_per_slab,
+          ALIGN_DEFAULT,
 					0,
-					SOS_KSLAB_CREATE_MAP
-					);
+					SOS_KSLAB_CREATE_MAP,
+					NULL, NULL);
       SOS_ASSERT_FATAL(new_cache != NULL);
       kmalloc_cache[i].cache = new_cache;
     }
@@ -76,7 +94,7 @@ sos_vaddr_t sos_kmalloc(sos_size_t size,
   for (i = 0 ; kmalloc_cache[i].object_size != 0 ; i ++)
     {
       if (kmalloc_cache[i].object_size >= size)
-	return sos_kmem_cache_alloc(kmalloc_cache[i].cache,
+        return sos_kmem_cache_alloc(kmalloc_cache[i].cache,
 				    (flags
 				     & SOS_KMALLOC_ATOMIC)?
 				    SOS_KSLAB_ALLOC_ATOMIC:0);
@@ -101,9 +119,9 @@ sos_ret_t sos_kfree(sos_vaddr_t vaddr)
   
   /* We first pretend this object is allocated in a pre-allocated
      kmalloc cache */
-  if (! sos_kmem_cache_free(vaddr))
+  if (sos_kmem_cache_free(vaddr) == SOS_OK)
     return SOS_OK; /* Great ! We guessed right ! */
-    
+  
   /* Here we're wrong: it appears not to be an object in a
      pre-allocated kmalloc cache. So we try to pretend this is a
      kmem_vmm area */
diff -Npur sos-code-article5/sos/kmem_slab.c sos-code-article5-modif/sos/kmem_slab.c
--- sos-code-article5/sos/kmem_slab.c	2004-12-18 21:01:54.000000000 +0100
+++ sos-code-article5-modif/sos/kmem_slab.c	2005-10-02 16:57:59.000000000 +0200
@@ -27,8 +27,7 @@
 #include "kmem_slab.h"
 
 /* Dimensioning constants */
-#define NB_PAGES_IN_SLAB_OF_CACHES 1
-#define NB_PAGES_IN_SLAB_OF_RANGES 1
+#define MIN_OBJECT_PER_SLAB 8
 
 /** The structure of a slab cache */
 struct sos_kslab_cache
@@ -42,18 +41,29 @@ struct sos_kslab_cache
   sos_count_t nb_objects_per_slab;
   sos_count_t nb_pages_per_slab;
   sos_count_t min_free_objects;
+  
+  sos_count_t space_left;
+  sos_count_t cache_color;
 
 /* slab cache flags */
 // #define SOS_KSLAB_CREATE_MAP  (1<<0) /* See kmem_slab.h */
 // #define SOS_KSLAB_CREATE_ZERO (1<<1) /* " " " " " " " " */
 #define ON_SLAB (1<<31) /* struct sos_kslab is included inside the slab */
+#define ON_ONE_PAGE (1<<30) /* struct sos_kbufctl is included inside the buffer (implies ON_SLAB)*/
   sos_ui32_t  flags;
+  
+  /* Ctor/Dtor routine */
+  sos_constructor_handler_t constructor;
+  sos_constructor_handler_t destructor;
 
   /* Supervision data (updated at run-time) */
   sos_count_t nb_free_objects;
 
   /* The lists of slabs owned by this cache */
-  struct sos_kslab *slab_list; /* head = non full, tail = full */
+  struct sos_kslab *slab_list; /* head = full, tail = empty or partially full */
+  
+  /* Pointer on the first not empty slab */
+  struct sos_kslab *first_not_empty_slab;
 
   /* The caches are linked together on the kslab_cache_list */
   struct sos_kslab_cache *prev, *next;
@@ -63,14 +73,20 @@ struct sos_kslab_cache
 /** The structure of a slab */
 struct sos_kslab
 {
-  /** Number of free objects on this slab */
-  sos_count_t nb_free;
+  /** Number of used objects on this slab */
+  sos_count_t nb_used;
 
   /** The list of these free objects */
-  struct sos_kslab_free_object *free;
+  union {
+    struct sos_kslab_free_object *free_obj;
+    struct sos_kslab_bufctl *bufctl;
+  } free;
 
   /** The address of the associated range structure */
   struct sos_kmem_range *range;
+  
+  /** cache coloring field */
+  sos_count_t slab_color;
 
   /** Virtual start address of this range */
   sos_vaddr_t first_object;
@@ -82,11 +98,23 @@ struct sos_kslab
   struct sos_kslab *prev, *next;
 };
 
-
 /** The structure of the free objects in the slab */
 struct sos_kslab_free_object
 {
-  struct sos_kslab_free_object *prev, *next;
+  struct sos_kslab_free_object *next;
+};
+
+/** The structure of the free objects out of the slab */
+struct sos_kslab_bufctl
+{
+  /** Links to the other buffers controllers by the same slab */
+  struct sos_kslab_bufctl *next;
+  
+  /** Virtual address of the buffer */
+  sos_vaddr_t buf_vaddr;
+  
+  /** Slab owning this buffer controller */
+  struct sos_kslab *slab;
 };
 
 /** The cache of slab caches */
@@ -95,35 +123,70 @@ static struct sos_kslab_cache *cache_of_
 /** The cache of slab structures for non-ON_SLAB caches */
 static struct sos_kslab_cache *cache_of_struct_kslab;
 
+/** The cache of bufctl structures for non-ON_SLAB caches */
+static struct sos_kslab_cache *cache_of_struct_kbufctl;
+
 /** The list of slab caches */
 static struct sos_kslab_cache *kslab_cache_list;
 
+/* Helper function to compute number of pages per slab. */
+static sos_ret_t compute_nb_pages_per_slab(sos_size_t         original_obj_size,
+                                           sos_count_t        align,
+                                           sos_ui32_t         cache_flags,
+                                           sos_constructor_handler_t constructor,
+                                           /*out*/sos_count_t *pages_per_slab,
+                                           /*out*/sos_size_t  *alloc_obj_size) {
+  /* Default allocation size is the requested one */
+  *alloc_obj_size = original_obj_size;
+  
+  if (constructor != NULL) {
+    /* Add struct free object at end of buffer */
+    *alloc_obj_size += sizeof(struct sos_kslab_free_object);
+  } else {
+    /* Make sure the requested size is large enough to store a
+       free_object structure */
+    if (*alloc_obj_size < sizeof(struct sos_kslab_free_object))
+      *alloc_obj_size = sizeof(struct sos_kslab_free_object);
+  }
+  
+  /* Align obj_size on align bytes */
+  *alloc_obj_size = SOS_ALIGN_SUP(*alloc_obj_size, align);
+  
+  /* Compute number of pages per slab */
+  *pages_per_slab = ((MIN_OBJECT_PER_SLAB * *alloc_obj_size) / SOS_PAGE_SIZE) + 1;
+  
+  if (constructor != NULL && *pages_per_slab > 1)
+    *alloc_obj_size = SOS_ALIGN_SUP(original_obj_size, align);
+  
+  return SOS_OK;
+}
+
 /* Helper function to initialize a cache structure */
 static sos_ret_t
 cache_initialize(/*out*/struct sos_kslab_cache *the_cache,
 		 const char* name,
 		 sos_size_t  obj_size,
-		 sos_count_t pages_per_slab,
+                 sos_count_t align,
 		 sos_count_t min_free_objs,
-		 sos_ui32_t  cache_flags)
+		 sos_ui32_t  cache_flags,
+                 sos_constructor_handler_t constructor,
+                 sos_constructor_handler_t destructor)
 {
-  unsigned int space_left;
+  sos_count_t pages_per_slab;
+  sos_count_t space_left;
   sos_size_t alloc_obj_size;
 
   if (obj_size <= 0)
     return -SOS_EINVAL;
-
-  /* Default allocation size is the requested one */
-  alloc_obj_size = obj_size;
-
-  /* Make sure the requested size is large enough to store a
-     free_object structure */
-  if (alloc_obj_size < sizeof(struct sos_kslab_free_object))
-    alloc_obj_size = sizeof(struct sos_kslab_free_object);
   
-  /* Align obj_size on 4 bytes */
-  alloc_obj_size = SOS_ALIGN_SUP(alloc_obj_size, sizeof(int));
-
+  if (compute_nb_pages_per_slab(obj_size,
+                            align,
+                            cache_flags,
+                            constructor,
+                            &pages_per_slab,
+                            &alloc_obj_size) != SOS_OK)
+    return -SOS_EINVAL;
+  
   /* Make sure supplied number of pages per slab is consistent with
      actual allocated object size */
   if (alloc_obj_size > pages_per_slab*SOS_PAGE_SIZE)
@@ -141,6 +204,17 @@ cache_initialize(/*out*/struct sos_kslab
   the_cache->alloc_obj_size    = alloc_obj_size;
   the_cache->min_free_objects  = min_free_objs;
   the_cache->nb_pages_per_slab = pages_per_slab;
+  the_cache->constructor       = constructor;
+  the_cache->destructor        = destructor;
+  
+  /* One page per slab => the freelist is allocated directly in
+     the buffer slab via struct sos_kslab_free_objects */
+  if(pages_per_slab == 1)
+    the_cache->flags |= ON_ONE_PAGE;
+  
+  /* Bufctl struct on buffer slab => slab struct on slab too */
+  if(the_cache->flags & ON_ONE_PAGE)
+    the_cache->flags |= ON_SLAB;
   
   /* Small size objets => the slab structure is allocated directly in
      the slab */
@@ -152,7 +226,7 @@ cache_initialize(/*out*/struct sos_kslab
    * have been allocated in the slab
    */
   space_left = the_cache->nb_pages_per_slab*SOS_PAGE_SIZE;
-  if(the_cache->flags & ON_SLAB)
+  if (the_cache->flags & ON_SLAB)
     space_left -= sizeof(struct sos_kslab);
   the_cache->nb_objects_per_slab = space_left / alloc_obj_size;
   space_left -= the_cache->nb_objects_per_slab*alloc_obj_size;
@@ -164,8 +238,12 @@ cache_initialize(/*out*/struct sos_kslab
 
   /* If there is now enough place for both the objects and the slab
      structure, then make the slab structure ON_SLAB */
-  if (space_left >= sizeof(struct sos_kslab))
+  if (((the_cache->flags & ON_SLAB) == 0)
+      && space_left >= sizeof(struct sos_kslab)) {
     the_cache->flags |= ON_SLAB;
+    space_left -= sizeof(struct sos_kslab);
+  }
+  the_cache->space_left = space_left;
 
   return SOS_OK;
 }
@@ -187,26 +265,58 @@ cache_add_slab(struct sos_kslab_cache *k
   slab->first_object = vaddr_slab;
 
   /* Account for this new slab in the cache */
-  slab->nb_free = kslab_cache->nb_objects_per_slab;
-  kslab_cache->nb_free_objects += slab->nb_free;
-
+  kslab_cache->nb_free_objects += kslab_cache->nb_objects_per_slab;
+  
+  /* Compute slab color */
+  slab->slab_color = kslab_cache->cache_color;
+  
+  /* Increment cache color for next slab */
+  kslab_cache->cache_color = (kslab_cache->cache_color + 8);
+  if (kslab_cache->cache_color > kslab_cache->space_left)
+    kslab_cache->cache_color = 0;
+  
   /* Build the list of free objects */
   for (i = 0 ; i <  kslab_cache->nb_objects_per_slab ; i++)
-    {
-      sos_vaddr_t obj_vaddr;
-
-      /* Set object's address */
-      obj_vaddr = slab->first_object + i*kslab_cache->alloc_obj_size;
-
-      /* Add it to the list of free objects */
-      list_add_tail(slab->free,
-		    (struct sos_kslab_free_object *)obj_vaddr);
+    { 
+      sos_vaddr_t obj_vaddr = slab->first_object +
+                              slab->slab_color +
+                              i*kslab_cache->alloc_obj_size;
+      if (kslab_cache->constructor != NULL)
+        kslab_cache->constructor(obj_vaddr, kslab_cache->original_obj_size);
+      
+      if (kslab_cache->flags & ON_ONE_PAGE) {
+        struct sos_kslab_free_object *free_obj = (struct sos_kslab_free_object *)
+                    (slab->first_object +
+                    slab->slab_color +
+                    (i+1)*kslab_cache->alloc_obj_size -
+                    sizeof(struct sos_kslab_free_object));
+        
+        /* Add it to the list of free objects */
+        free_obj->next = slab->free.free_obj;
+        slab->free.free_obj = free_obj;
+      } else {
+        struct sos_kslab_bufctl *bufctl = (struct sos_kslab_bufctl *)
+                sos_kmem_cache_alloc(cache_of_struct_kbufctl, 0);
+        if (! bufctl)
+          return -SOS_ENOMEM;
+        bufctl->slab = slab;
+        /* Set object's address */
+        bufctl->buf_vaddr = obj_vaddr;
+        
+        /* Add it to the list of free objects */
+        bufctl->next = slab->free.bufctl;
+        slab->free.bufctl = bufctl;
+      }
     }
-
-  /* Add the slab to the cache's slab list: add the head of the list
-     since this slab is non full */
-  list_add_head(kslab_cache->slab_list, slab);
-
+  
+  /* Add the slab to the cache's slab list: add the tail of the list
+     since this slab is empty */
+  list_add_tail(kslab_cache->slab_list, slab);
+  /* Init pointer on first not empty slab if not is it */
+  if ((kslab_cache->first_not_empty_slab == NULL)
+      || (kslab_cache->first_not_empty_slab->free.free_obj == NULL))
+    kslab_cache->first_not_empty_slab = slab;
+    
   return SOS_OK;
 }
 
@@ -220,9 +330,10 @@ cache_grow(struct sos_kslab_cache *kslab
 
   struct sos_kmem_range *new_range;
   sos_vaddr_t new_range_start;
+  sos_vaddr_t slab_vaddr;
 
   struct sos_kslab *new_slab;
-
+  
   /*
    * Setup the flags for the range allocation
    */
@@ -236,45 +347,42 @@ cache_grow(struct sos_kslab_cache *kslab
   if (kslab_cache->flags & (SOS_KSLAB_CREATE_MAP
 			   | SOS_KSLAB_CREATE_ZERO))
     range_alloc_flags |= SOS_KMEM_VMM_MAP;
-
+  
   /* Allocate the range */
   new_range = sos_kmem_vmm_new_range(kslab_cache->nb_pages_per_slab,
 				     range_alloc_flags,
 				     & new_range_start);
   if (! new_range)
     return -SOS_ENOMEM;
-
+  
   /* Allocate the slab structure */
   if (kslab_cache->flags & ON_SLAB)
     {
       /* Slab structure is ON the slab: simply set its address to the
-	 end of the range */
-      sos_vaddr_t slab_vaddr
-	= new_range_start + kslab_cache->nb_pages_per_slab*SOS_PAGE_SIZE
-	  - sizeof(struct sos_kslab);
-      new_slab = (struct sos_kslab*)slab_vaddr;
+        end of the range */
+      slab_vaddr
+        = new_range_start + kslab_cache->nb_pages_per_slab*SOS_PAGE_SIZE
+          - sizeof(struct sos_kslab);
     }
   else
     {
       /* Slab structure is OFF the slab: allocate it from the cache of
-	 slab structures */
-      sos_vaddr_t slab_vaddr
-	= sos_kmem_cache_alloc(cache_of_struct_kslab,
-			       alloc_flags);
+        slab structures */
+      slab_vaddr = sos_kmem_cache_alloc(cache_of_struct_kslab, alloc_flags);
       if (! slab_vaddr)
-	{
-	  sos_kmem_vmm_del_range(new_range);
-	  return -SOS_ENOMEM;
-	}
-      new_slab = (struct sos_kslab*)slab_vaddr;
+        {
+           sos_kmem_vmm_del_range(new_range);
+           return -SOS_ENOMEM;
+        }
     }
-
+  new_slab = (struct sos_kslab*)slab_vaddr;
+  
   cache_add_slab(kslab_cache, new_range_start, new_slab);
   new_slab->range = new_range;
 
   /* Set the backlink from range to this slab */
   sos_kmem_vmm_set_slab(new_range, new_slab);
-
+  
   return SOS_OK;
 }
 
@@ -282,50 +390,71 @@ cache_grow(struct sos_kslab_cache *kslab
 /**
  * Helper function to release a slab
  *
- * The corresponding range is always deleted, except when the @param
- * must_del_range_now is not set. This happens only when the function
- * gets called from sos_kmem_cache_release_struct_range(), to avoid
- * large recursions.
+ * The corresponding range is always deleted.
  */
 static sos_ret_t
-cache_release_slab(struct sos_kslab *slab,
-		   sos_bool_t must_del_range_now)
+cache_release_slab(struct sos_kslab *slab)
 {
   struct sos_kslab_cache *kslab_cache = slab->cache;
   struct sos_kmem_range *range = slab->range;
 
   SOS_ASSERT_FATAL(kslab_cache != NULL);
   SOS_ASSERT_FATAL(range != NULL);
-  SOS_ASSERT_FATAL(slab->nb_free == slab->cache->nb_objects_per_slab);
-
+  SOS_ASSERT_FATAL(slab->nb_used == 0);
+  
   /* First, remove the slab from the slabs' list of the cache */
+  if (kslab_cache->first_not_empty_slab == slab)
+    kslab_cache->first_not_empty_slab = NULL;
   list_delete(kslab_cache->slab_list, slab);
-  slab->cache->nb_free_objects -= slab->nb_free;
-
+  kslab_cache->nb_free_objects -= kslab_cache->nb_objects_per_slab;
+  
+  /* Release bufctls structures if their are OFF slab and objects if their have destructor */
+  if (! (kslab_cache->flags & ON_ONE_PAGE)) {
+    struct sos_kslab_bufctl *bufctl;
+    while (slab->free.bufctl) {
+      bufctl = slab->free.bufctl;
+      slab->free.bufctl = slab->free.bufctl->next;
+      
+      if (kslab_cache->destructor != NULL)
+        kslab_cache->destructor(bufctl->buf_vaddr, kslab_cache->original_obj_size);
+      
+      sos_kmem_cache_free((sos_vaddr_t) bufctl);
+    }
+  } else {
+    if (kslab_cache->destructor != NULL) {
+      struct sos_kslab_free_object *free_obj;
+      while (slab->free.free_obj) {
+        free_obj = slab->free.free_obj;
+        slab->free.free_obj = slab->free.free_obj->next;
+        
+        kslab_cache->destructor(((unsigned int) free_obj
+                                - kslab_cache->alloc_obj_size
+                                + sizeof(struct sos_kslab_free_object)),
+                                kslab_cache->original_obj_size);
+      }
+    }
+  }
+  
   /* Release the slab structure if it is OFF slab */
-  if (! (slab->cache->flags & ON_SLAB))
+  if (! (kslab_cache->flags & ON_SLAB))
     sos_kmem_cache_free((sos_vaddr_t)slab);
 
   /* Ok, the range is not bound to any slab anymore */
   sos_kmem_vmm_set_slab(range, NULL);
-
-  /* Always delete the range now, unless we are told not to do so (see
-     sos_kmem_cache_release_struct_range() below) */
-  if (must_del_range_now)
-    return sos_kmem_vmm_del_range(range);
-
+  
+  /* Always delete the range now */
+  return sos_kmem_vmm_del_range(range);
+  
   return SOS_OK;
 }
 
-
 /**
  * Helper function to create the initial cache of caches, with a very
  * first slab in it, so that new cache structures can be simply allocated.
  * @return the cache structure for the cache of caches
  */
 static struct sos_kslab_cache *
-create_cache_of_caches(sos_vaddr_t vaddr_first_slab_of_caches,
-		       int nb_pages)
+create_cache_of_caches(sos_vaddr_t vaddr_first_slab_of_caches, int nb_pages)
 {
   /* The preliminary cache structure we need in order to allocate the
      first slab in the cache of caches (allocated on the stack !) */
@@ -336,11 +465,12 @@ create_cache_of_caches(sos_vaddr_t vaddr
 
   /* The kslab structure for this very first slab */
   struct sos_kslab       *slab_of_caches;
-
+  
   /* Init the cache structure for the cache of caches */
   if (cache_initialize(& fake_cache_of_caches,
-		       "Caches", sizeof(struct sos_kslab_cache),
-		       nb_pages, 0, SOS_KSLAB_CREATE_MAP | ON_SLAB))
+		       "Caches", sizeof(struct sos_kslab_cache), ALIGN_DEFAULT,
+		       0, SOS_KSLAB_CREATE_MAP | ON_SLAB,
+                       NULL, NULL))
     /* Something wrong with the parameters */
     return NULL;
 
@@ -350,27 +480,26 @@ create_cache_of_caches(sos_vaddr_t vaddr
   slab_of_caches = (struct sos_kslab*)(vaddr_first_slab_of_caches
 				       + nb_pages*SOS_PAGE_SIZE
 				       - sizeof(struct sos_kslab));
-
+  
   /* Add the abovementioned 1st slab to the cache of caches */
   cache_add_slab(& fake_cache_of_caches,
 		 vaddr_first_slab_of_caches,
 		 slab_of_caches);
-
+  
   /* Now we allocate a cache structure, which will be the real cache
      of caches, ie a cache structure allocated INSIDE the cache of
      caches, not inside the stack */
   real_cache_of_caches
-    = (struct sos_kslab_cache*) sos_kmem_cache_alloc(& fake_cache_of_caches,
-						     0);
+    = (struct sos_kslab_cache*) sos_kmem_cache_alloc(& fake_cache_of_caches, 0);
+  
   /* We initialize it */
-  memcpy(real_cache_of_caches, & fake_cache_of_caches,
-	 sizeof(struct sos_kslab_cache));
+  memcpy(real_cache_of_caches, & fake_cache_of_caches, sizeof(struct sos_kslab_cache));
   /* We need to update the slab's 'cache' field */
   slab_of_caches->cache = real_cache_of_caches;
   
   /* Add the cache to the list of slab caches */
   list_add_tail(kslab_cache_list, real_cache_of_caches);
-
+  
   return real_cache_of_caches;
 }
 
@@ -402,13 +531,14 @@ create_cache_of_ranges(sos_vaddr_t vaddr
      per slab = 2 !!! */
   if (cache_initialize(cache_of_ranges,
 		       "struct kmem_range",
-		       sizeof_struct_range,
-		       nb_pages, 2, SOS_KSLAB_CREATE_MAP | ON_SLAB))
+		       sizeof_struct_range, ALIGN_DEFAULT,
+		       2, SOS_KSLAB_CREATE_MAP | ON_SLAB,
+           NULL, NULL))
     /* Something wrong with the parameters */
     return NULL;
 
   /* Add the cache to the list of slab caches */
-  list_add_tail(kslab_cache_list, cache_of_ranges);
+  list_add_head(kslab_cache_list, cache_of_ranges);
 
   /*
    * Add the first slab for this cache
@@ -428,6 +558,21 @@ create_cache_of_ranges(sos_vaddr_t vaddr
 }
 
 
+static void slab_constructor(sos_vaddr_t buf, sos_size_t size_of_buf) {
+  struct sos_kslab *slab = (struct sos_kslab *) buf;
+  
+  slab->nb_used = 0;
+  slab->cache = NULL;
+}
+
+static void slab_destructor(sos_vaddr_t buf, sos_size_t size_of_buf) {
+  struct sos_kslab *slab = (struct sos_kslab *) buf;
+  
+  SOS_ASSERT_FATAL(slab->cache == NULL);
+  SOS_ASSERT_FATAL(slab->nb_used == 0);
+}
+
+
 struct sos_kslab_cache *
 sos_kmem_cache_setup_prepare(sos_vaddr_t kernel_core_base,
 			     sos_vaddr_t kernel_core_top,
@@ -443,30 +588,37 @@ sos_kmem_cache_setup_prepare(sos_vaddr_t
   int i;
   sos_ret_t   retval;
   sos_vaddr_t vaddr;
+  sos_count_t pages_per_slab;
+  sos_size_t  alloc_obj_size;
 
   /* The cache of ranges we are about to allocate */
   struct sos_kslab_cache *cache_of_ranges;
 
   /* In the begining, there isn't any cache */
-  kslab_cache_list = NULL;
+  list_init(kslab_cache_list);
+  cache_of_struct_kbufctl = NULL;
   cache_of_struct_kslab = NULL;
   cache_of_struct_kslab_cache = NULL;
-
+  
   /*
    * Create the cache of caches, initialised with 1 allocated slab
    */
-
   /* Allocate the pages needed for the 1st slab of caches, and map them
      in kernel space, right after the kernel */
+  compute_nb_pages_per_slab(sizeof(struct sos_kslab_cache),
+                            ALIGN_DEFAULT,
+                            SOS_KSLAB_CREATE_MAP | ON_SLAB,
+                            NULL,
+                            &pages_per_slab,
+                            &alloc_obj_size);
   *first_slab_of_caches_base = SOS_PAGE_ALIGN_SUP(kernel_core_top);
   for (i = 0, vaddr = *first_slab_of_caches_base ;
-       i < NB_PAGES_IN_SLAB_OF_CACHES ;
+       i < pages_per_slab ;
        i++, vaddr += SOS_PAGE_SIZE)
     {
       sos_paddr_t ppage_paddr;
 
-      ppage_paddr
-	= sos_physmem_ref_physpage_new(FALSE);
+      ppage_paddr	= sos_physmem_ref_physpage_new(FALSE);
       SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
 
       retval = sos_paging_map(ppage_paddr, vaddr,
@@ -479,32 +631,36 @@ sos_kmem_cache_setup_prepare(sos_vaddr_t
       retval = sos_physmem_unref_physpage(ppage_paddr);
       SOS_ASSERT_FATAL(retval == FALSE);
     }
-
+  
   /* Create the cache of caches */
-  *first_slab_of_caches_nb_pages = NB_PAGES_IN_SLAB_OF_CACHES;
+  *first_slab_of_caches_nb_pages = pages_per_slab;
   cache_of_struct_kslab_cache
     = create_cache_of_caches(*first_slab_of_caches_base,
-			     NB_PAGES_IN_SLAB_OF_CACHES);
+			     pages_per_slab);
   SOS_ASSERT_FATAL(cache_of_struct_kslab_cache != NULL);
 
   /* Retrieve the slab that should have been allocated */
   *first_struct_slab_of_caches
     = list_get_head(cache_of_struct_kslab_cache->slab_list);
-
   
   /*
    * Create the cache of ranges, initialised with 1 allocated slab
    */
+  compute_nb_pages_per_slab(sizeof(struct sos_kslab_cache),
+                            ALIGN_DEFAULT,
+                            SOS_KSLAB_CREATE_MAP | ON_SLAB,
+                            NULL,
+                            &pages_per_slab,
+                            &alloc_obj_size);
   *first_slab_of_ranges_base = vaddr;
   /* Allocate the 1st slab */
   for (i = 0, vaddr = *first_slab_of_ranges_base ;
-       i < NB_PAGES_IN_SLAB_OF_RANGES ;
+       i < pages_per_slab ;
        i++, vaddr += SOS_PAGE_SIZE)
     {
       sos_paddr_t ppage_paddr;
 
-      ppage_paddr
-	= sos_physmem_ref_physpage_new(FALSE);
+      ppage_paddr = sos_physmem_ref_physpage_new(FALSE);
       SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
 
       retval = sos_paging_map(ppage_paddr, vaddr,
@@ -517,29 +673,30 @@ sos_kmem_cache_setup_prepare(sos_vaddr_t
       retval = sos_physmem_unref_physpage(ppage_paddr);
       SOS_ASSERT_FATAL(retval == FALSE);
     }
-
+  
   /* Create the cache of ranges */
-  *first_slab_of_ranges_nb_pages = NB_PAGES_IN_SLAB_OF_RANGES;
+  *first_slab_of_ranges_nb_pages = pages_per_slab;
   cache_of_ranges = create_cache_of_ranges(*first_slab_of_ranges_base,
 					   sizeof_struct_range,
-					   NB_PAGES_IN_SLAB_OF_RANGES);
+					   pages_per_slab);
   SOS_ASSERT_FATAL(cache_of_ranges != NULL);
-
+  
   /* Retrieve the slab that should have been allocated */
   *first_struct_slab_of_ranges
     = list_get_head(cache_of_ranges->slab_list);
-
+  
   /*
-   * Create the cache of slabs, without any allocated slab yet
+   * Create the cache of slabs structures, without any allocated slab struct yet
    */
   cache_of_struct_kslab
     = sos_kmem_cache_create("off-slab slab structures",
 			    sizeof(struct sos_kslab),
-			    1,
+                            ALIGN_DEFAULT,
 			    0,
-			    SOS_KSLAB_CREATE_MAP);
+			    SOS_KSLAB_CREATE_MAP | ON_SLAB,
+                            slab_constructor, slab_destructor);
   SOS_ASSERT_FATAL(cache_of_struct_kslab != NULL);
-
+  
   return cache_of_ranges;
 }
 
@@ -552,6 +709,19 @@ sos_kmem_cache_setup_commit(struct sos_k
 {
   first_struct_slab_of_caches->range = first_range_of_caches;
   first_struct_slab_of_ranges->range = first_range_of_ranges;
+  
+  /*
+   * Create the cache of bufctls structures, initialised with 1 allocated slab
+   */
+  cache_of_struct_kbufctl
+    = sos_kmem_cache_create("off-slab bufctl structures",
+			    sizeof(struct sos_kslab_bufctl),
+                            ALIGN_DEFAULT,
+			    1,
+			    SOS_KSLAB_CREATE_MAP,
+                            NULL, NULL);
+  SOS_ASSERT_FATAL(cache_of_struct_kbufctl != NULL);
+  
   return SOS_OK;
 }
 
@@ -559,28 +729,30 @@ sos_kmem_cache_setup_commit(struct sos_k
 struct sos_kslab_cache *
 sos_kmem_cache_create(const char* name,
 		      sos_size_t  obj_size,
-		      sos_count_t pages_per_slab,
+                      sos_count_t align,
 		      sos_count_t min_free_objs,
-		      sos_ui32_t  cache_flags)
+		      sos_ui32_t  cache_flags,
+                      sos_constructor_handler_t constructor,
+                      sos_constructor_handler_t destructor)
 {
   struct sos_kslab_cache *new_cache;
-
+  
   /* Allocate the new cache */
   new_cache = (struct sos_kslab_cache*)
     sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
 			 0/* NOT ATOMIC */);
   if (! new_cache)
     return NULL;
-
-  if (cache_initialize(new_cache, name, obj_size,
-		       pages_per_slab, min_free_objs,
-		       cache_flags))
+  
+  if (cache_initialize(new_cache, name, obj_size, align,
+		       min_free_objs, cache_flags,
+                       constructor, destructor))
     {
       /* Something was wrong */
       sos_kmem_cache_free((sos_vaddr_t)new_cache);
       return NULL;
     }
-
+  
   /* Add the cache to the list of slab caches */
   list_add_tail(kslab_cache_list, new_cache);
   
@@ -588,12 +760,12 @@ sos_kmem_cache_create(const char* name,
   if (min_free_objs)
     {
       if (cache_grow(new_cache, 0 /* Not atomic */) != SOS_OK)
-	{
-	  sos_kmem_cache_destroy(new_cache);
-	  return NULL; /* Not enough memory */
-	}
+        {
+          sos_kmem_cache_destroy(new_cache);
+          return NULL; /* Not enough memory */
+        }
     }
-
+  
   return new_cache;  
 }
 
@@ -605,21 +777,25 @@ sos_ret_t sos_kmem_cache_destroy(struct 
 
   if (! kslab_cache)
     return -SOS_EINVAL;
-
+  
   /* Refuse to destroy the cache if there are any objects still
      allocated */
   list_foreach(kslab_cache->slab_list, slab, nb_slabs)
     {
-      if (slab->nb_free != kslab_cache->nb_objects_per_slab)
-	return -SOS_EBUSY;
+      if (slab->nb_used != 0)
+        return -SOS_EBUSY;
     }
-
+  
   /* Remove all the slabs */
-  while ((slab = list_get_head(kslab_cache->slab_list)) != NULL)
+  while(kslab_cache->slab_list)
     {
-      cache_release_slab(slab, TRUE);
+      slab = kslab_cache->slab_list;
+      cache_release_slab(slab);
     }
-
+  
+  /* Remove the cache from cache list */
+  list_delete(kslab_cache_list, kslab_cache);
+  
   /* Remove the cache */
   return sos_kmem_cache_free((sos_vaddr_t)kslab_cache);
 }
@@ -631,41 +807,55 @@ sos_vaddr_t sos_kmem_cache_alloc(struct 
   sos_vaddr_t obj_vaddr;
   struct sos_kslab * slab_head;
 #define ALLOC_RET return
-
-  /* If the slab at the head of the slabs' list has no free object,
-     then the other slabs don't either => need to allocate a new
-     slab */
-  if ((! kslab_cache->slab_list)
-      || (! list_get_head(kslab_cache->slab_list)->free))
+  
+  /* If the cache has no free object, need to allocate a new slab */
+  if (kslab_cache->nb_free_objects == 0)
     {
       if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
-	/* Not enough memory or blocking alloc */
-	ALLOC_RET( (sos_vaddr_t)NULL);
+        /* Not enough memory or blocking alloc */
+        ALLOC_RET( (sos_vaddr_t)NULL);
     }
-
-  /* Here: we are sure that list_get_head(kslab_cache->slab_list)
-     exists *AND* that list_get_head(kslab_cache->slab_list)->free is
-     NOT NULL */
-  slab_head = list_get_head(kslab_cache->slab_list);
+  
+  /* Here: we are sure that kslab_cache->first_not_empty_slab
+     exists *AND* that kslab_cache->first_not_empty_slab->free is NOT NULL */
+  slab_head = kslab_cache->first_not_empty_slab;
   SOS_ASSERT_FATAL(slab_head != NULL);
-
+  SOS_ASSERT_FATAL(slab_head->free.free_obj != NULL);
+  
   /* Allocate the object at the head of the slab at the head of the
      slabs' list */
-  obj_vaddr = (sos_vaddr_t)list_pop_head(slab_head->free);
-  slab_head->nb_free --;
-  kslab_cache->nb_free_objects --;
-
+  if (kslab_cache->flags & ON_ONE_PAGE) {
+    struct sos_kslab_free_object *free_obj;
+    
+    free_obj = slab_head->free.free_obj;
+    slab_head->free.free_obj = free_obj->next;
+    
+    obj_vaddr = ((unsigned int) free_obj
+                - kslab_cache->alloc_obj_size
+                + sizeof(struct sos_kslab_free_object));
+  } else {
+    struct sos_kslab_bufctl *bufctl;
+    
+    bufctl = slab_head->free.bufctl;
+    slab_head->free.bufctl = bufctl->next;
+    obj_vaddr = bufctl->buf_vaddr;
+    sos_kmem_cache_free((sos_vaddr_t) bufctl);
+  }
+ 
+  slab_head->nb_used++;
+  kslab_cache->nb_free_objects--;
+  
   /* If needed, reset object's contents */
   if (kslab_cache->flags & SOS_KSLAB_CREATE_ZERO)
     memset((void*)obj_vaddr, 0x0, kslab_cache->alloc_obj_size);
-
+  
   /* Slab is now full ? */
-  if (slab_head->free == NULL)
+  if (slab_head->nb_used == kslab_cache->nb_objects_per_slab)
     {
-      /* Transfer it at the tail of the slabs' list */
-      struct sos_kslab *slab;
-      slab = list_pop_head(kslab_cache->slab_list);
-      list_add_tail(kslab_cache->slab_list, slab);
+      /* Transfer it at the head of the slabs' list */
+      kslab_cache->first_not_empty_slab = kslab_cache->first_not_empty_slab->next;
+      list_delete(kslab_cache->slab_list, slab_head);
+      list_add_head(kslab_cache->slab_list, slab_head);
     }
   
   /*
@@ -687,37 +877,25 @@ sos_vaddr_t sos_kmem_cache_alloc(struct 
     {
       /* No: allocate a new slab now */
       if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
-	{
-	  /* Not enough free memory or blocking alloc => undo the
-	     allocation */
-	  sos_kmem_cache_free(obj_vaddr);
-	  ALLOC_RET( (sos_vaddr_t)NULL);
-	}
+      {
+        /* Not enough free memory or blocking alloc => undo the
+           allocation */
+        sos_kmem_cache_free(obj_vaddr);
+        ALLOC_RET( (sos_vaddr_t)NULL);
+      }
     }
-
+  
   ALLOC_RET(obj_vaddr);
 }
 
 
-/**
- * Helper function to free the object located at the given address.
- *
- * @param empty_slab is the address of the slab to release, if removing
- * the object causes the slab to become empty.
- */
-inline static
-sos_ret_t
-free_object(sos_vaddr_t vaddr,
-	    struct sos_kslab ** empty_slab)
+sos_ret_t sos_kmem_cache_free(sos_vaddr_t vaddr)
 {
   struct sos_kslab_cache *kslab_cache;
 
   /* Lookup the slab containing the object in the slabs' list */
   struct sos_kslab *slab = sos_kmem_vmm_resolve_slab(vaddr);
 
-  /* By default, consider that the slab will not become empty */
-  *empty_slab = NULL;
-
   /* Did not find the slab */
   if (! slab)
     return -SOS_EINVAL;
@@ -730,83 +908,92 @@ free_object(sos_vaddr_t vaddr,
    * allocated object
    */
   /* Address multiple of an object's size ? */
-  if (( (vaddr - slab->first_object)
+  if (( (vaddr - slab->first_object - slab->slab_color)
 	% kslab_cache->alloc_obj_size) != 0)
     return -SOS_EINVAL;
   /* Address not too large ? */
-  if (( (vaddr - slab->first_object)
+  if (( (vaddr - slab->first_object - slab->slab_color)
 	/ kslab_cache->alloc_obj_size) >= kslab_cache->nb_objects_per_slab)
     return -SOS_EINVAL;
 
   /*
    * Ok: we now release the object
    */
-
+  
   /* Did find a full slab => will not be full any more => move it
-     to the head of the slabs' list */
-  if (! slab->free)
+     to the head of the slabs' list not empty */
+  if (slab->free.free_obj == NULL)
     {
       list_delete(kslab_cache->slab_list, slab);
-      list_add_head(kslab_cache->slab_list, slab);
+      if (kslab_cache->first_not_empty_slab->free.free_obj == NULL)
+        list_add_tail(kslab_cache->slab_list, slab);
+      else
+        list_insert_before(kslab_cache->slab_list, kslab_cache->first_not_empty_slab, slab);
+      kslab_cache->first_not_empty_slab = slab;
     }
-
+  
   /* Release the object */
-  list_add_head(slab->free, (struct sos_kslab_free_object*)vaddr);
-  slab->nb_free++;
+  if (kslab_cache->flags & ON_ONE_PAGE) {
+    struct sos_kslab_free_object *free_obj = (struct sos_kslab_free_object *)
+        (vaddr + kslab_cache->alloc_obj_size - sizeof(struct sos_kslab_free_object));
+    
+    /* Add it to the list of free objects */
+    free_obj->next = slab->free.free_obj;
+    slab->free.free_obj = free_obj;
+  } else {
+    struct sos_kslab_bufctl *bufctl = (struct sos_kslab_bufctl *)
+        sos_kmem_cache_alloc(cache_of_struct_kbufctl, 0);
+    if (! bufctl)
+      return -SOS_ENOMEM;
+    
+    bufctl->buf_vaddr = vaddr;
+    bufctl->slab = slab;
+    
+    /* Add it to the list of free objects */
+    bufctl->next = slab->free.bufctl;
+    slab->free.bufctl = bufctl;
+  }
+  SOS_ASSERT_FATAL(slab->free.free_obj != NULL);
+  
+  slab->nb_used--;
   kslab_cache->nb_free_objects++;
-  SOS_ASSERT_FATAL(slab->nb_free <= slab->cache->nb_objects_per_slab);
-
-  /* Cause the slab to be released if it becomes empty, and if we are
-     allowed to do it */
-  if ((slab->nb_free >= kslab_cache->nb_objects_per_slab)
-      && (kslab_cache->nb_free_objects - slab->nb_free
-	  >= kslab_cache->min_free_objects))
+  SOS_ASSERT_FATAL(slab->nb_used >= 0);
+  
+  /* Cause the slab to be at tail of slab list if it becomes empty */
+  if (slab->nb_used == 0)
     {
-      *empty_slab = slab;
+      if (kslab_cache->first_not_empty_slab == slab)
+        kslab_cache->first_not_empty_slab = kslab_cache->first_not_empty_slab->next;
+      list_delete(kslab_cache->slab_list, slab);
+      list_add_tail(kslab_cache->slab_list, slab);
+      if (kslab_cache->first_not_empty_slab->free.free_obj == NULL)
+        kslab_cache->first_not_empty_slab = slab;
     }
 
   return SOS_OK;
 }
 
 
-sos_ret_t sos_kmem_cache_free(sos_vaddr_t vaddr)
-{
-  sos_ret_t retval;
-  struct sos_kslab *empty_slab;
-
-  /* Remove the object from the slab */
-  retval = free_object(vaddr, & empty_slab);
-  if (retval != SOS_OK)
-    return retval;
-
-  /* Remove the slab and the underlying range if needed */
-  if (empty_slab != NULL)
-    return cache_release_slab(empty_slab, TRUE);
-
-  return SOS_OK;
-}
-
-
-struct sos_kmem_range *
-sos_kmem_cache_release_struct_range(struct sos_kmem_range *the_range)
-{
-  sos_ret_t retval;
-  struct sos_kslab *empty_slab;
-
-  /* Remove the object from the slab */
-  retval = free_object((sos_vaddr_t)the_range, & empty_slab);
-  if (retval != SOS_OK)
-    return NULL;
-
-  /* Remove the slab BUT NOT the underlying range if needed */
-  if (empty_slab != NULL)
-    {
-      struct sos_kmem_range *empty_range = empty_slab->range;
-      SOS_ASSERT_FATAL(cache_release_slab(empty_slab, FALSE) == SOS_OK);
-      SOS_ASSERT_FATAL(empty_range != NULL);
-      return empty_range;
+sos_ret_t sos_kmem_cache_reap() {
+  struct sos_kslab_cache *kslab_cache;
+  struct sos_kslab *slab;
+  sos_count_t nb_cache;
+  sos_ret_t retval = -SOS_ENOMEM;
+  
+  /*  */
+  list_foreach(kslab_cache_list, kslab_cache, nb_cache) {
+    while (kslab_cache->slab_list) {
+      slab = kslab_cache->slab_list->prev;
+      if ((slab->nb_used == 0)
+         && (kslab_cache->nb_free_objects - kslab_cache->nb_objects_per_slab
+              >= kslab_cache->min_free_objects)) {
+        retval = cache_release_slab(slab);
+      } else {
+        break;
+      }
     }
-
-  return NULL;
+  }
+  
+  return retval;
 }
 
diff -Npur sos-code-article5/sos/kmem_slab.h sos-code-article5-modif/sos/kmem_slab.h
--- sos-code-article5/sos/kmem_slab.h	2004-12-18 21:01:54.000000000 +0100
+++ sos-code-article5-modif/sos/kmem_slab.h	2005-10-01 20:06:46.000000000 +0200
@@ -40,17 +40,6 @@
  * range allocation before being urged to allocate a new slab of
  * ranges, which would require the allocation of a new range.
  *
- * Compared to Bonwick's recommendations, we don't handle ctor/dtor
- * routines on the objects, so that we can alter the objects once they
- * are set free. Thus, the list of free object is stored in the free
- * objects themselves, not alongside the objects (this also implies that
- * the SOS_KSLAB_CREATE_MAP flag below is meaningless). We also don't
- * implement the cache colouring (trivial to add, but we omit it for
- * readability reasons), and the only alignment constraint we respect
- * is that allocated objects are aligned on a 4B boundary: for other
- * alignment constraints, the user must integrate them in the
- * "object_size" parameter to "sos_kmem_cache_create()".
- *
  * References :
  * - J. Bonwick's paper, "The slab allocator: An object-caching kernel
  *   memory allocator", In USENIX Summer 1994 Technical Conference
@@ -76,6 +65,8 @@ struct sos_kslab;
 /** The maximum  allowed pages for each slab */
 #define MAX_PAGES_PER_SLAB 32 /* 128 kB */
 
+/** The alignment by default */
+#define ALIGN_DEFAULT (sizeof(int))
 
 /**
  * Initialize the slab cache of slab caches, and prepare the cache of
@@ -149,6 +140,14 @@ sos_kmem_cache_setup_commit(struct sos_k
 #define SOS_KSLAB_CREATE_ZERO (1<<1)
 
 /**
+ * This type define handler for construct and destruct objects in cache
+ * @param buf buffer address who must be construct or destruct.
+ * @param size_of_buf The constructor and destructor take a size argument so
+ * that they can support families of similar caches.
+ */
+typedef void (*sos_constructor_handler_t)(sos_vaddr_t buf, sos_size_t size_of_buf);
+
+/**
  * @note this function MAY block (involved allocations are not atomic)
  * @param name must remain valid during the whole cache's life
  *             (shallow copy) !
@@ -157,9 +156,11 @@ sos_kmem_cache_setup_commit(struct sos_k
 struct sos_kslab_cache *
 sos_kmem_cache_create(const char* name,
 		      sos_size_t  object_size,
-		      sos_count_t pages_per_slab,
+          sos_count_t align,
 		      sos_count_t min_free_objects,
-		      sos_ui32_t  cache_flags);
+		      sos_ui32_t  cache_flags,
+          sos_constructor_handler_t constructor,
+          sos_constructor_handler_t destructor);
 
 sos_ret_t sos_kmem_cache_destroy(struct sos_kslab_cache *kslab_cache);
 
@@ -187,20 +188,10 @@ sos_ret_t sos_kmem_cache_free(sos_vaddr_
 
 
 /*
- * Function reserved to kmem_vmm.c. Does almost everything
- * sos_kmem_cache_free() does, except it does not call
- * sos_kmem_vmm_del_range() if it needs to. This is aimed at avoiding
- * large recursion when a range is freed with
- * sos_kmem_vmm_del_range().
- *
- * @param the_range The range structure to free
- *
- * @return NULL when the range containing 'the_range' still contains
- * other ranges, or the address of the range which owned 'the_range'
- * if it becomes empty.
+ * Function reserved to kmem_vmm.c.
+ * 
+ * This function free slab full in all caches.
  */
-struct sos_kmem_range *
-sos_kmem_cache_release_struct_range(struct sos_kmem_range *the_range);
-
+sos_ret_t sos_kmem_cache_reap();
 
 #endif /* _SOS_KMEM_SLAB_H_ */
diff -Npur sos-code-article5/sos/kmem_vmm.c sos-code-article5-modif/sos/kmem_vmm.c
--- sos-code-article5/sos/kmem_vmm.c	2004-12-18 21:01:54.000000000 +0100
+++ sos-code-article5-modif/sos/kmem_vmm.c	2005-10-02 17:31:26.000000000 +0200
@@ -60,7 +60,7 @@ get_closest_preceding_kmem_range(struct 
   list_foreach(the_list, a_range, nb_elements)
     {
       if (vaddr < a_range->base_vaddr)
-	return ret_range;
+        return ret_range;
       ret_range = a_range;
     }
 
@@ -77,13 +77,15 @@ static struct sos_kmem_range *find_suita
 {
   int nb_elements;
   struct sos_kmem_range *r;
-
+  
   list_foreach(kmem_free_range_list, r, nb_elements)
   {
     if (r->nb_pages >= nb_pages)
       return r;
   }
-
+  
+  sos_kmem_cache_reap();
+  
   return NULL;
 }
 
@@ -193,9 +195,9 @@ create_range(sos_bool_t  is_free,
 	   vaddr < top_vaddr ;
 	   vaddr += SOS_PAGE_SIZE)
       {
-	sos_paddr_t ppage_paddr = sos_paging_get_paddr(vaddr);
-	SOS_ASSERT_FATAL((void*)ppage_paddr != NULL);
-	sos_physmem_set_kmem_range(ppage_paddr, range);
+        sos_paddr_t ppage_paddr = sos_paging_get_paddr(vaddr);
+        SOS_ASSERT_FATAL((void*)ppage_paddr != NULL);
+        sos_physmem_set_kmem_range(ppage_paddr, range);
       }
     }
 
@@ -219,7 +221,7 @@ sos_ret_t sos_kmem_vmm_setup(sos_vaddr_t
 
   list_init(kmem_free_range_list);
   list_init(kmem_used_range_list);
-
+  
   kmem_range_cache
     = sos_kmem_cache_setup_prepare(kernel_core_base,
 				   kernel_core_top,
@@ -302,7 +304,7 @@ sos_ret_t sos_kmem_vmm_setup(sos_vaddr_t
 	       + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
 	       SOS_KMEM_VMM_TOP,
 	       NULL);
-
+  
   /* Update the cache subsystem so that the artificially-created
      caches of caches and ranges really behave like *normal* caches (ie
      those allocated by the normal slab API) */
@@ -325,15 +327,16 @@ struct sos_kmem_range *sos_kmem_vmm_new_
 					      sos_vaddr_t * range_start)
 {
   struct sos_kmem_range *free_range, *new_range;
-
+  sos_ret_t retval;
+  
   if (nb_pages <= 0)
     return NULL;
-
+  
   /* Find a suitable free range to hold the size-sized object */
   free_range = find_suitable_free_range(nb_pages);
   if (free_range == NULL)
     return NULL;
-
+  
   /* If range has exactly the requested size, just move it to the
      "used" list */
   if(free_range->nb_pages == nb_pages)
@@ -352,11 +355,11 @@ struct sos_kmem_range *sos_kmem_vmm_new_
     {
       /* free_range split in { new_range | free_range } */
       new_range = (struct sos_kmem_range*)
-	sos_kmem_cache_alloc(kmem_range_cache,
+        sos_kmem_cache_alloc(kmem_range_cache,
 			     (flags & SOS_KMEM_VMM_ATOMIC)?
 			     SOS_KSLAB_ALLOC_ATOMIC:0);
       if (! new_range)
-	return NULL;
+        return NULL;
 
       new_range->base_vaddr   = free_range->base_vaddr;
       new_range->nb_pages     = nb_pages;
@@ -374,48 +377,54 @@ struct sos_kmem_range *sos_kmem_vmm_new_
 
   /* If mapping of physical pages is needed, map them now */
   if (flags & SOS_KMEM_VMM_MAP)
-    {
+    {  
       int i;
-      for (i = 0 ; i < nb_pages ; i ++)
-	{
-	  /* Get a new physical page */
-	  sos_paddr_t ppage_paddr
-	    = sos_physmem_ref_physpage_new(! (flags & SOS_KMEM_VMM_ATOMIC));
-	  
-	  /* Map the page in kernel space */
-	  if (ppage_paddr)
-	    {
-	      if (sos_paging_map(ppage_paddr,
-				 new_range->base_vaddr
-				   + i * SOS_PAGE_SIZE,
-				 FALSE /* Not a user page */,
-				 ((flags & SOS_KMEM_VMM_ATOMIC)?
-				  SOS_VM_MAP_ATOMIC:0)
-				 | SOS_VM_MAP_PROT_READ
-				 | SOS_VM_MAP_PROT_WRITE))
-		{
-		  /* Failed => force unallocation, see below */
-		  sos_physmem_unref_physpage(ppage_paddr);
-		  ppage_paddr = (sos_paddr_t)NULL;
-		}
-	      else
-		{
-		  /* Success : page can be unreferenced since it is
-		     now mapped */
-		  sos_physmem_unref_physpage(ppage_paddr);
-		}
-	    }
-
-	  /* Undo the allocation if failed to allocate or map a new page */
-	  if (! ppage_paddr)
-	    {
-	      sos_kmem_vmm_del_range(new_range);
-	      return NULL;
-	    }
-
-	  /* Ok, set the range owner for this page */
-	  sos_physmem_set_kmem_range(ppage_paddr, new_range);
-	}
+      for (i = 0 ; i < nb_pages ; i++)
+      {
+        /* Get a new physical page */
+        sos_paddr_t ppage_paddr
+          = sos_physmem_ref_physpage_new(! (flags & SOS_KMEM_VMM_ATOMIC));
+        
+        /* Map the page in kernel space */
+        if (ppage_paddr)
+          {
+            if (sos_paging_map(ppage_paddr,
+             new_range->base_vaddr
+               + i * SOS_PAGE_SIZE,
+             FALSE /* Not a user page */,
+             ((flags & SOS_KMEM_VMM_ATOMIC)?
+              SOS_VM_MAP_ATOMIC:0)
+             | SOS_VM_MAP_PROT_READ
+             | SOS_VM_MAP_PROT_WRITE))
+            {
+              /* Failed => force unallocation, see below */
+              sos_physmem_unref_physpage(ppage_paddr);
+              ppage_paddr = (sos_paddr_t)NULL;
+            }
+                else
+            {
+              /* Success : page can be unreferenced since it is
+                 now mapped */
+              sos_physmem_unref_physpage(ppage_paddr);
+            }
+          }
+    
+        /* Demand pages at slab allocator */
+        if (! ppage_paddr)
+          {
+            retval = sos_kmem_cache_reap();
+            if (retval != SOS_OK) {
+              /* Undo the allocation if failed to allocate or map a new page */
+              sos_kmem_vmm_del_range(new_range);
+              return NULL;
+            }
+            /* restart this iteration */
+            i--;
+          }
+          
+          /* Ok, set the range owner for this page */
+          sos_physmem_set_kmem_range(ppage_paddr, new_range);
+      }
     }
 
   /* Otherwise we need a correct page fault handler to support
@@ -425,7 +434,7 @@ struct sos_kmem_range *sos_kmem_vmm_new_
 
   if (range_start)
     *range_start = new_range->base_vaddr;
-
+  
   return new_range;
 }
 
@@ -433,84 +442,51 @@ struct sos_kmem_range *sos_kmem_vmm_new_
 sos_ret_t sos_kmem_vmm_del_range(struct sos_kmem_range *range)
 {
   int i;
-  struct sos_kmem_range *ranges_to_free;
-  list_init(ranges_to_free);
 
   SOS_ASSERT_FATAL(range != NULL);
   SOS_ASSERT_FATAL(range->slab == NULL);
-
+  
   /* Remove the range from the 'USED' list now */
   list_delete(kmem_used_range_list, range);
 
-  /*
-   * The following do..while() loop is here to avoid an indirect
-   * recursion: if we call directly kmem_cache_free() from inside the
-   * current function, we take the risk to re-enter the current function
-   * (sos_kmem_vmm_del_range()) again, which may cause problem if it
-   * in turn calls kmem_slab again and sos_kmem_vmm_del_range again,
-   * and again and again. This may happen while freeing ranges of
-   * struct sos_kslab...
-   *
-   * To avoid this,we choose to call a special function of kmem_slab
-   * doing almost the same as sos_kmem_cache_free(), but which does
-   * NOT call us (ie sos_kmem_vmm_del_range()): instead WE add the
-   * range that is to be freed to a list, and the do..while() loop is
-   * here to process this list ! The recursion is replaced by
-   * classical iterations.
-   */
-  do
-    {
-      /* Ok, we got the range. Now, insert this range in the free list */
-      kmem_free_range_list = insert_range(kmem_free_range_list, range);
-
-      /* Unmap the physical pages */
-      for (i = 0 ; i < range->nb_pages ; i ++)
-	{
-	  /* This will work even if no page is mapped at this address */
-	  sos_paging_unmap(range->base_vaddr + i*SOS_PAGE_SIZE);
-	}
+  /* Ok, we got the range. Now, insert this range in the free list */
+  kmem_free_range_list = insert_range(kmem_free_range_list, range);
+  
+  /* Unmap the physical pages */
+  for (i = 0 ; i < range->nb_pages ; i ++)
+  {
+    /* This will work even if no page is mapped at this address */
+    sos_paging_unmap(range->base_vaddr + i*SOS_PAGE_SIZE);
+  }
       
-      /* Eventually coalesce it with prev/next free ranges (there is
-	 always a valid prev/next link since the list is circular). Note:
-	 the tests below will lead to correct behaviour even if the list
-	 is limited to the 'range' singleton, at least as long as the
-	 range is not zero-sized */
-      /* Merge with preceding one ? */
-      if (range->prev->base_vaddr + range->prev->nb_pages*SOS_PAGE_SIZE
-	  == range->base_vaddr)
-	{
-	  struct sos_kmem_range *empty_range_of_ranges = NULL;
-	  struct sos_kmem_range *prec_free = range->prev;
-	  
-	  /* Merge them */
-	  prec_free->nb_pages += range->nb_pages;
-	  list_delete(kmem_free_range_list, range);
-	  
-	  /* Mark the range as free. This may cause the slab owning
-	     the range to become empty */
-	  empty_range_of_ranges = 
-	    sos_kmem_cache_release_struct_range(range);
-
-	  /* If this causes the slab owning the range to become empty,
-	     add the range corresponding to the slab at the end of the
-	     list of the ranges to be freed: it will be actually freed
-	     in one of the next iterations of the do{} loop. */
-	  if (empty_range_of_ranges != NULL)
-	    {
-	      list_delete(kmem_used_range_list, empty_range_of_ranges);
-	      list_add_tail(ranges_to_free, empty_range_of_ranges);
-	    }
-	  
-	  /* Set range to the beginning of this coelescion */
-	  range = prec_free;
-	}
+  /* Eventually coalesce it with prev/next free ranges (there is
+	always a valid prev/next link since the list is circular). Note:
+	the tests below will lead to correct behaviour even if the list
+	is limited to the 'range' singleton, at least as long as the
+	range is not zero-sized */
+  /* Merge with preceding one ? */
+  if (range->prev->base_vaddr + range->prev->nb_pages*SOS_PAGE_SIZE
+	 == range->base_vaddr)
+  {
+    struct sos_kmem_range *prec_free = range->prev;
       
-      /* Merge with next one ? [NO 'else' since range may be the result of
+    /* Merge them */
+    prec_free->nb_pages += range->nb_pages;
+    list_delete(kmem_free_range_list, range);
+    
+    /* Mark the range as free. This may cause the slab owning
+       the range to become empty */
+    sos_kmem_cache_free((sos_vaddr_t) range);
+    
+    /* Set range to the beginning of this coelescion */
+    range = prec_free;
+  }
+      
+  /* Merge with next one ? [NO 'else' since range may be the result of
 	 the merge above] */
-      if (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE
+  if (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE
 	  == range->next->base_vaddr)
 	{
-	  struct sos_kmem_range *empty_range_of_ranges = NULL;
 	  struct sos_kmem_range *next_range = range->next;
 	  
 	  /* Merge them */
@@ -519,33 +495,9 @@ sos_ret_t sos_kmem_vmm_del_range(struct 
 	  
 	  /* Mark the next_range as free. This may cause the slab
 	     owning the next_range to become empty */
-	  empty_range_of_ranges = 
-	    sos_kmem_cache_release_struct_range(next_range);
-
-	  /* If this causes the slab owning the next_range to become
-	     empty, add the range corresponding to the slab at the end
-	     of the list of the ranges to be freed: it will be
-	     actually freed in one of the next iterations of the
-	     do{} loop. */
-	  if (empty_range_of_ranges != NULL)
-	    {
-	      list_delete(kmem_used_range_list, empty_range_of_ranges);
-	      list_add_tail(ranges_to_free, empty_range_of_ranges);
-	    }
+    sos_kmem_cache_free((sos_vaddr_t) next_range);
 	}
-      
-
-      /* If deleting the range(s) caused one or more range(s) to be
-	 freed, get the next one to free */
-      if (list_is_empty(ranges_to_free))
-	range = NULL; /* No range left to free */
-      else
-	range = list_pop_head(ranges_to_free);
-
-    }
-  /* Stop when there is no range left to be freed for now */
-  while (range != NULL);
-
+  
   return SOS_OK;
 }
 
