Diff markup
001 001
002 002
003 003
004 004
005 005
006 006
007 007
008 008
009 009
010 010
011 011
012 012
013 013
014 014
015 015
016 016
017 017
018 018
019 #include <sos/macros.h> 019 #include <sos/macros.h>
020 #include <sos/klibc.h> 020 #include <sos/klibc.h>
021 #include <sos/list.h> 021 #include <sos/list.h>
022 #include <sos/assert.h> 022 #include <sos/assert.h>
023 #include <hwcore/paging.h> 023 #include <hwcore/paging.h>
024 #include <sos/physmem.h> 024 #include <sos/physmem.h>
025 #include <sos/kmem_vmm.h> 025 #include <sos/kmem_vmm.h>
026 026
027 #include "kmem_slab.h" 027 #include "kmem_slab.h"
028 028
029 029
030 #define NB_PAGES_IN_SLAB_OF_CACHES 1 030 #define NB_PAGES_IN_SLAB_OF_CACHES 1
031 #define NB_PAGES_IN_SLAB_OF_RANGES 1 031 #define NB_PAGES_IN_SLAB_OF_RANGES 1
032 032
033 033
034 struct sos_kslab_cache 034 struct sos_kslab_cache
035 { 035 {
036 char *name; 036 char *name;
037 037
038 038
039 sos_size_t original_obj_size; 039 sos_size_t original_obj_size;
040 sos_size_t alloc_obj_size; 040 sos_size_t alloc_obj_size;
041 041
042 sos_count_t nb_objects_per_slab; 042 sos_count_t nb_objects_per_slab;
043 sos_count_t nb_pages_per_slab; 043 sos_count_t nb_pages_per_slab;
044 sos_count_t min_free_objects; 044 sos_count_t min_free_objects;
045 045
046 046
047 047
048 048
049 #define ON_SLAB (1<<31) 049 #define ON_SLAB (1<<31)
050 sos_ui32_t flags; 050 sos_ui32_t flags;
051 051
052 052
053 sos_count_t nb_free_objects; 053 sos_count_t nb_free_objects;
054 054
055 055
056 struct sos_kslab *slab_list; 056 struct sos_kslab *slab_list;
057 057
058 058
059 struct sos_kslab_cache *prev, *next; 059 struct sos_kslab_cache *prev, *next;
060 }; 060 };
061 061
062 062
063 063
064 struct sos_kslab 064 struct sos_kslab
065 { 065 {
066 066
067 sos_count_t nb_free; 067 sos_count_t nb_free;
068 068
069 069
070 struct sos_kslab_free_object *free; 070 struct sos_kslab_free_object *free;
071 071
072 072
073 struct sos_kmem_range *range; 073 struct sos_kmem_range *range;
074 074
075 075
076 sos_vaddr_t first_object; 076 sos_vaddr_t first_object;
077 077
078 078
079 struct sos_kslab_cache *cache; 079 struct sos_kslab_cache *cache;
080 080
081 081
082 struct sos_kslab *prev, *next; 082 struct sos_kslab *prev, *next;
083 }; 083 };
084 084
085 085
086 086
087 struct sos_kslab_free_object 087 struct sos_kslab_free_object
088 { 088 {
089 struct sos_kslab_free_object *prev, *next; 089 struct sos_kslab_free_object *prev, *next;
090 }; 090 };
091 091
092 092
093 static struct sos_kslab_cache *cache_of_struct 093 static struct sos_kslab_cache *cache_of_struct_kslab_cache;
094 094
095 095
096 static struct sos_kslab_cache *cache_of_struct 096 static struct sos_kslab_cache *cache_of_struct_kslab;
097 097
098 098
099 static struct sos_kslab_cache *kslab_cache_lis 099 static struct sos_kslab_cache *kslab_cache_list;
100 100
101 101
102 static sos_ret_t 102 static sos_ret_t
103 cache_initialize(struct sos_kslab_cache 103 cache_initialize(struct sos_kslab_cache *the_cache,
104 const char* name, 104 const char* name,
105 sos_size_t obj_size, 105 sos_size_t obj_size,
106 sos_count_t pages_per_slab, 106 sos_count_t pages_per_slab,
107 sos_count_t min_free_objs, 107 sos_count_t min_free_objs,
108 sos_ui32_t cache_flags) 108 sos_ui32_t cache_flags)
109 { 109 {
110 unsigned int space_left; 110 unsigned int space_left;
111 sos_size_t alloc_obj_size; 111 sos_size_t alloc_obj_size;
112 112
113 if (obj_size <= 0) 113 if (obj_size <= 0)
114 return -SOS_EINVAL; 114 return -SOS_EINVAL;
115 115
116 116
117 alloc_obj_size = obj_size; 117 alloc_obj_size = obj_size;
118 118
119 119
120 120
121 if (alloc_obj_size < sizeof(struct sos_kslab 121 if (alloc_obj_size < sizeof(struct sos_kslab_free_object))
122 alloc_obj_size = sizeof(struct sos_kslab_f 122 alloc_obj_size = sizeof(struct sos_kslab_free_object);
123 123
124 124
125 alloc_obj_size = SOS_ALIGN_SUP(alloc_obj_siz 125 alloc_obj_size = SOS_ALIGN_SUP(alloc_obj_size, sizeof(int));
126 126
127 127
128 128
129 if (alloc_obj_size > pages_per_slab*SOS_PAGE 129 if (alloc_obj_size > pages_per_slab*SOS_PAGE_SIZE)
130 return -SOS_EINVAL; 130 return -SOS_EINVAL;
131 131
132 132
133 if (pages_per_slab > MAX_PAGES_PER_SLAB) 133 if (pages_per_slab > MAX_PAGES_PER_SLAB)
134 return -SOS_ENOMEM; 134 return -SOS_ENOMEM;
135 135
136 136
137 memset(the_cache, 0x0, sizeof(struct sos_ksl 137 memset(the_cache, 0x0, sizeof(struct sos_kslab_cache));
138 the_cache->name = (char*)name; 138 the_cache->name = (char*)name;
139 the_cache->flags = cache_flags; 139 the_cache->flags = cache_flags;
140 the_cache->original_obj_size = obj_size; 140 the_cache->original_obj_size = obj_size;
141 the_cache->alloc_obj_size = alloc_obj_siz 141 the_cache->alloc_obj_size = alloc_obj_size;
142 the_cache->min_free_objects = min_free_objs 142 the_cache->min_free_objects = min_free_objs;
143 the_cache->nb_pages_per_slab = pages_per_sla 143 the_cache->nb_pages_per_slab = pages_per_slab;
144 144
145 145
146 146
147 if(alloc_obj_size <= sizeof(struct sos_kslab 147 if(alloc_obj_size <= sizeof(struct sos_kslab))
148 the_cache->flags |= ON_SLAB; 148 the_cache->flags |= ON_SLAB;
149 149
150 150
151 151
152 152
153 153
154 space_left = the_cache->nb_pages_per_slab*SO 154 space_left = the_cache->nb_pages_per_slab*SOS_PAGE_SIZE;
155 if(the_cache->flags & ON_SLAB) 155 if(the_cache->flags & ON_SLAB)
156 space_left -= sizeof(struct sos_kslab); 156 space_left -= sizeof(struct sos_kslab);
157 the_cache->nb_objects_per_slab = space_left 157 the_cache->nb_objects_per_slab = space_left / alloc_obj_size;
158 space_left -= the_cache->nb_objects_per_slab 158 space_left -= the_cache->nb_objects_per_slab*alloc_obj_size;
159 159
160 160
161 161
162 if (the_cache->nb_objects_per_slab < min_fre 162 if (the_cache->nb_objects_per_slab < min_free_objs)
163 return -SOS_EINVAL; 163 return -SOS_EINVAL;
164 164
165 165
166 166
167 if (space_left >= sizeof(struct sos_kslab)) 167 if (space_left >= sizeof(struct sos_kslab))
168 the_cache->flags |= ON_SLAB; 168 the_cache->flags |= ON_SLAB;
169 169
170 return SOS_OK; 170 return SOS_OK;
171 } 171 }
172 172
173 173
174 174
175 static sos_ret_t 175 static sos_ret_t
176 cache_add_slab(struct sos_kslab_cache *kslab_c 176 cache_add_slab(struct sos_kslab_cache *kslab_cache,
177 sos_vaddr_t vaddr_slab, 177 sos_vaddr_t vaddr_slab,
178 struct sos_kslab *slab) 178 struct sos_kslab *slab)
179 { 179 {
180 int i; 180 int i;
181 181
182 182
183 memset(slab, 0x0, sizeof(struct sos_kslab)); 183 memset(slab, 0x0, sizeof(struct sos_kslab));
184 slab->cache = kslab_cache; 184 slab->cache = kslab_cache;
185 185
186 186
187 slab->first_object = vaddr_slab; 187 slab->first_object = vaddr_slab;
188 188
189 189
190 slab->nb_free = kslab_cache->nb_objects_per_ 190 slab->nb_free = kslab_cache->nb_objects_per_slab;
191 kslab_cache->nb_free_objects += slab->nb_fre 191 kslab_cache->nb_free_objects += slab->nb_free;
192 192
193 193
194 for (i = 0 ; i < kslab_cache->nb_objects_pe 194 for (i = 0 ; i < kslab_cache->nb_objects_per_slab ; i++)
195 { 195 {
196 sos_vaddr_t obj_vaddr; 196 sos_vaddr_t obj_vaddr;
197 197
198 198
199 obj_vaddr = slab->first_object + i*kslab 199 obj_vaddr = slab->first_object + i*kslab_cache->alloc_obj_size;
200 200
201 201
202 list_add_tail(slab->free, 202 list_add_tail(slab->free,
203 (struct sos_kslab_free_obj 203 (struct sos_kslab_free_object *)obj_vaddr);
204 } 204 }
205 205
206 206
207 207
208 list_add_head(kslab_cache->slab_list, slab); 208 list_add_head(kslab_cache->slab_list, slab);
209 209
210 return SOS_OK; 210 return SOS_OK;
211 } 211 }
212 212
213 213
214 214
215 static sos_ret_t 215 static sos_ret_t
216 cache_grow(struct sos_kslab_cache *kslab_cache 216 cache_grow(struct sos_kslab_cache *kslab_cache,
217 sos_ui32_t alloc_flags) 217 sos_ui32_t alloc_flags)
218 { 218 {
219 sos_ui32_t range_alloc_flags; 219 sos_ui32_t range_alloc_flags;
220 220
221 struct sos_kmem_range *new_range; 221 struct sos_kmem_range *new_range;
222 sos_vaddr_t new_range_start; 222 sos_vaddr_t new_range_start;
223 223
224 struct sos_kslab *new_slab; 224 struct sos_kslab *new_slab;
225 225
226 226
227 227
228 228
229 range_alloc_flags = 0; 229 range_alloc_flags = 0;
230 230
231 231
232 if (alloc_flags & SOS_KSLAB_ALLOC_ATOMIC) 232 if (alloc_flags & SOS_KSLAB_ALLOC_ATOMIC)
233 range_alloc_flags |= SOS_KMEM_VMM_ATOMIC; 233 range_alloc_flags |= SOS_KMEM_VMM_ATOMIC;
234 234
235 235
236 if (kslab_cache->flags & (SOS_KSLAB_CREATE_M 236 if (kslab_cache->flags & (SOS_KSLAB_CREATE_MAP
237 | SOS_KSLAB_CREATE_ 237 | SOS_KSLAB_CREATE_ZERO))
238 range_alloc_flags |= SOS_KMEM_VMM_MAP; 238 range_alloc_flags |= SOS_KMEM_VMM_MAP;
239 239
240 240
241 new_range = sos_kmem_vmm_new_range(kslab_cac 241 new_range = sos_kmem_vmm_new_range(kslab_cache->nb_pages_per_slab,
242 range_all 242 range_alloc_flags,
243 & new_ran 243 & new_range_start);
244 if (! new_range) 244 if (! new_range)
245 return -SOS_ENOMEM; 245 return -SOS_ENOMEM;
246 246
247 247
248 if (kslab_cache->flags & ON_SLAB) 248 if (kslab_cache->flags & ON_SLAB)
249 { 249 {
250 250
251 251
252 sos_vaddr_t slab_vaddr 252 sos_vaddr_t slab_vaddr
253 = new_range_start + kslab_cache->nb_pa 253 = new_range_start + kslab_cache->nb_pages_per_slab*SOS_PAGE_SIZE
254 - sizeof(struct sos_kslab); 254 - sizeof(struct sos_kslab);
255 new_slab = (struct sos_kslab*)slab_vaddr 255 new_slab = (struct sos_kslab*)slab_vaddr;
256 } 256 }
257 else 257 else
258 { 258 {
259 259
260 260
261 sos_vaddr_t slab_vaddr 261 sos_vaddr_t slab_vaddr
262 = sos_kmem_cache_alloc(cache_of_struct 262 = sos_kmem_cache_alloc(cache_of_struct_kslab,
263 alloc_flags); 263 alloc_flags);
264 if (! slab_vaddr) 264 if (! slab_vaddr)
265 { 265 {
266 sos_kmem_vmm_del_range(new_range); 266 sos_kmem_vmm_del_range(new_range);
267 return -SOS_ENOMEM; 267 return -SOS_ENOMEM;
268 } 268 }
269 new_slab = (struct sos_kslab*)slab_vaddr 269 new_slab = (struct sos_kslab*)slab_vaddr;
270 } 270 }
271 271
272 cache_add_slab(kslab_cache, new_range_start, 272 cache_add_slab(kslab_cache, new_range_start, new_slab);
273 new_slab->range = new_range; 273 new_slab->range = new_range;
274 274
275 275
276 sos_kmem_vmm_set_slab(new_range, new_slab); 276 sos_kmem_vmm_set_slab(new_range, new_slab);
277 277
278 return SOS_OK; 278 return SOS_OK;
279 } 279 }
280 280
281 281
282 282
283 283
284 284
285 285
286 286
287 287
288 288
289 289
290 static sos_ret_t 290 static sos_ret_t
291 cache_release_slab(struct sos_kslab *slab, 291 cache_release_slab(struct sos_kslab *slab,
292 sos_bool_t must_del_range_n 292 sos_bool_t must_del_range_now)
293 { 293 {
294 struct sos_kslab_cache *kslab_cache = slab-> 294 struct sos_kslab_cache *kslab_cache = slab->cache;
295 struct sos_kmem_range *range = slab->range; 295 struct sos_kmem_range *range = slab->range;
296 296
297 SOS_ASSERT_FATAL(kslab_cache != NULL); 297 SOS_ASSERT_FATAL(kslab_cache != NULL);
298 SOS_ASSERT_FATAL(range != NULL); 298 SOS_ASSERT_FATAL(range != NULL);
299 SOS_ASSERT_FATAL(slab->nb_free == slab->cach 299 SOS_ASSERT_FATAL(slab->nb_free == slab->cache->nb_objects_per_slab);
300 300
301 301
302 list_delete(kslab_cache->slab_list, slab); 302 list_delete(kslab_cache->slab_list, slab);
303 slab->cache->nb_free_objects -= slab->nb_fre 303 slab->cache->nb_free_objects -= slab->nb_free;
304 304
305 305
306 if (! (slab->cache->flags & ON_SLAB)) 306 if (! (slab->cache->flags & ON_SLAB))
307 sos_kmem_cache_free((sos_vaddr_t)slab); 307 sos_kmem_cache_free((sos_vaddr_t)slab);
308 308
309 309
310 sos_kmem_vmm_set_slab(range, NULL); 310 sos_kmem_vmm_set_slab(range, NULL);
311 311
312 312
313 313
314 if (must_del_range_now) 314 if (must_del_range_now)
315 return sos_kmem_vmm_del_range(range); 315 return sos_kmem_vmm_del_range(range);
316 316
317 return SOS_OK; 317 return SOS_OK;
318 } 318 }
319 319
320 320
321 321
322 322
323 323
324 324
325 325
326 static struct sos_kslab_cache * 326 static struct sos_kslab_cache *
327 create_cache_of_caches(sos_vaddr_t vaddr_first 327 create_cache_of_caches(sos_vaddr_t vaddr_first_slab_of_caches,
328 int nb_pages) 328 int nb_pages)
329 { 329 {
330 330
331 331
332 struct sos_kslab_cache fake_cache_of_caches; 332 struct sos_kslab_cache fake_cache_of_caches;
333 333
334 334
335 struct sos_kslab_cache *real_cache_of_caches 335 struct sos_kslab_cache *real_cache_of_caches;
336 336
337 337
338 struct sos_kslab *slab_of_caches; 338 struct sos_kslab *slab_of_caches;
339 339
340 340
341 if (cache_initialize(& fake_cache_of_caches, 341 if (cache_initialize(& fake_cache_of_caches,
342 "Caches", sizeof(struct 342 "Caches", sizeof(struct sos_kslab_cache),
343 nb_pages, 0, SOS_KSLAB_ 343 nb_pages, 0, SOS_KSLAB_CREATE_MAP | ON_SLAB))
344 344
345 return NULL; 345 return NULL;
346 346
347 memset((void*)vaddr_first_slab_of_caches, 0x 347 memset((void*)vaddr_first_slab_of_caches, 0x0, nb_pages*SOS_PAGE_SIZE);
348 348
349 349
350 slab_of_caches = (struct sos_kslab*)(vaddr_f 350 slab_of_caches = (struct sos_kslab*)(vaddr_first_slab_of_caches
351 + nb_pa 351 + nb_pages*SOS_PAGE_SIZE
352 - sizeo 352 - sizeof(struct sos_kslab));
353 353
354 354
355 cache_add_slab(& fake_cache_of_caches, 355 cache_add_slab(& fake_cache_of_caches,
356 vaddr_first_slab_of_caches, 356 vaddr_first_slab_of_caches,
357 slab_of_caches); 357 slab_of_caches);
358 358
359 359
360 360
361 361
362 real_cache_of_caches 362 real_cache_of_caches
363 = (struct sos_kslab_cache*) sos_kmem_cache 363 = (struct sos_kslab_cache*) sos_kmem_cache_alloc(& fake_cache_of_caches,
364 364 0);
365 365
366 memcpy(real_cache_of_caches, & fake_cache_of 366 memcpy(real_cache_of_caches, & fake_cache_of_caches,
367 sizeof(struct sos_kslab_cache)); 367 sizeof(struct sos_kslab_cache));
368 368
369 slab_of_caches->cache = real_cache_of_caches 369 slab_of_caches->cache = real_cache_of_caches;
370 370
371 371
372 list_add_tail(kslab_cache_list, real_cache_o 372 list_add_tail(kslab_cache_list, real_cache_of_caches);
373 373
374 return real_cache_of_caches; 374 return real_cache_of_caches;
375 } 375 }
376 376
377 377
378 378
379 379
380 380
381 381
382 382
383 383
384 static struct sos_kslab_cache * 384 static struct sos_kslab_cache *
385 create_cache_of_ranges(sos_vaddr_t vaddr_first 385 create_cache_of_ranges(sos_vaddr_t vaddr_first_slab_of_ranges,
386 sos_size_t sizeof_stru 386 sos_size_t sizeof_struct_range,
387 int nb_pages) 387 int nb_pages)
388 { 388 {
389 389
390 struct sos_kslab_cache *cache_of_ranges; 390 struct sos_kslab_cache *cache_of_ranges;
391 391
392 392
393 struct sos_kslab *slab_of_ranges; 393 struct sos_kslab *slab_of_ranges;
394 394
395 cache_of_ranges = (struct sos_kslab_cache*) 395 cache_of_ranges = (struct sos_kslab_cache*)
396 sos_kmem_cache_alloc(cache_of_struct_kslab 396 sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
397 0); 397 0);
398 if (! cache_of_ranges) 398 if (! cache_of_ranges)
399 return NULL; 399 return NULL;
400 400
401 401
402 402
403 if (cache_initialize(cache_of_ranges, 403 if (cache_initialize(cache_of_ranges,
404 "struct kmem_range", 404 "struct kmem_range",
405 sizeof_struct_range, 405 sizeof_struct_range,
406 nb_pages, 2, SOS_KSLAB_ 406 nb_pages, 2, SOS_KSLAB_CREATE_MAP | ON_SLAB))
407 407
408 return NULL; 408 return NULL;
409 409
410 410
411 list_add_tail(kslab_cache_list, cache_of_ran 411 list_add_tail(kslab_cache_list, cache_of_ranges);
412 412
413 413
414 414
415 415
416 memset((void*)vaddr_first_slab_of_ranges, 0x 416 memset((void*)vaddr_first_slab_of_ranges, 0x0, nb_pages*SOS_PAGE_SIZE);
417 417
418 418
419 slab_of_ranges = (struct sos_kslab*)(vaddr_f 419 slab_of_ranges = (struct sos_kslab*)(vaddr_first_slab_of_ranges
420 + nb_pa 420 + nb_pages*SOS_PAGE_SIZE
421 - sizeo 421 - sizeof(struct sos_kslab));
422 422
423 cache_add_slab(cache_of_ranges, 423 cache_add_slab(cache_of_ranges,
424 vaddr_first_slab_of_ranges, 424 vaddr_first_slab_of_ranges,
425 slab_of_ranges); 425 slab_of_ranges);
426 426
427 return cache_of_ranges; 427 return cache_of_ranges;
428 } 428 }
429 429
430 430
431 struct sos_kslab_cache * 431 struct sos_kslab_cache *
432 sos_kmem_cache_subsystem_setup_prepare(sos_vad 432 sos_kmem_cache_subsystem_setup_prepare(sos_vaddr_t kernel_core_base,
433 sos_vad 433 sos_vaddr_t kernel_core_top,
434 sos_siz 434 sos_size_t sizeof_struct_range,
435 435
436 struct 436 struct sos_kslab **first_struct_slab_of_caches,
437 sos_vad 437 sos_vaddr_t *first_slab_of_caches_base,
438 sos_cou 438 sos_count_t *first_slab_of_caches_nb_pages,
439 struct 439 struct sos_kslab **first_struct_slab_of_ranges,
440 sos_vad 440 sos_vaddr_t *first_slab_of_ranges_base,
441 sos_cou 441 sos_count_t *first_slab_of_ranges_nb_pages)
442 { 442 {
443 int i; 443 int i;
444 sos_ret_t retval; 444 sos_ret_t retval;
445 sos_vaddr_t vaddr; 445 sos_vaddr_t vaddr;
446 446
447 447
448 struct sos_kslab_cache *cache_of_ranges; 448 struct sos_kslab_cache *cache_of_ranges;
449 449
450 450
451 kslab_cache_list = NULL; 451 kslab_cache_list = NULL;
452 cache_of_struct_kslab = NULL; 452 cache_of_struct_kslab = NULL;
453 cache_of_struct_kslab_cache = NULL; 453 cache_of_struct_kslab_cache = NULL;
454 454
455 455
456 456
457 457
458 458
459 459
460 460
461 *first_slab_of_caches_base = SOS_PAGE_ALIGN_ 461 *first_slab_of_caches_base = SOS_PAGE_ALIGN_SUP(kernel_core_top);
462 for (i = 0, vaddr = *first_slab_of_caches_ba 462 for (i = 0, vaddr = *first_slab_of_caches_base ;
463 i < NB_PAGES_IN_SLAB_OF_CACHES ; 463 i < NB_PAGES_IN_SLAB_OF_CACHES ;
464 i++, vaddr += SOS_PAGE_SIZE) 464 i++, vaddr += SOS_PAGE_SIZE)
465 { 465 {
466 sos_paddr_t ppage_paddr; 466 sos_paddr_t ppage_paddr;
467 467
468 ppage_paddr 468 ppage_paddr
469 = sos_physmem_ref_physpage_new(FALSE); 469 = sos_physmem_ref_physpage_new(FALSE);
470 SOS_ASSERT_FATAL(ppage_paddr != (sos_pad 470 SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
471 471
472 retval = sos_paging_map(ppage_paddr, vad 472 retval = sos_paging_map(ppage_paddr, vaddr,
473 FALSE, 473 FALSE,
474 SOS_VM_MAP_ATOMI 474 SOS_VM_MAP_ATOMIC
475 | SOS_VM_MAP_PRO 475 | SOS_VM_MAP_PROT_READ
476 | SOS_VM_MAP_PRO 476 | SOS_VM_MAP_PROT_WRITE);
477 SOS_ASSERT_FATAL(retval == SOS_OK); 477 SOS_ASSERT_FATAL(retval == SOS_OK);
478 478
479 retval = sos_physmem_unref_physpage(ppag 479 retval = sos_physmem_unref_physpage(ppage_paddr);
480 SOS_ASSERT_FATAL(retval == FALSE); 480 SOS_ASSERT_FATAL(retval == FALSE);
481 } 481 }
482 482
483 483
484 *first_slab_of_caches_nb_pages = NB_PAGES_IN 484 *first_slab_of_caches_nb_pages = NB_PAGES_IN_SLAB_OF_CACHES;
485 cache_of_struct_kslab_cache 485 cache_of_struct_kslab_cache
486 = create_cache_of_caches(*first_slab_of_ca 486 = create_cache_of_caches(*first_slab_of_caches_base,
487 NB_PAGES_IN_SLAB_ 487 NB_PAGES_IN_SLAB_OF_CACHES);
488 SOS_ASSERT_FATAL(cache_of_struct_kslab_cache 488 SOS_ASSERT_FATAL(cache_of_struct_kslab_cache != NULL);
489 489
490 490
491 *first_struct_slab_of_caches 491 *first_struct_slab_of_caches
492 = list_get_head(cache_of_struct_kslab_cach 492 = list_get_head(cache_of_struct_kslab_cache->slab_list);
493 493
494 494
495 495
496 496
497 497
498 *first_slab_of_ranges_base = vaddr; 498 *first_slab_of_ranges_base = vaddr;
499 499
500 for (i = 0, vaddr = *first_slab_of_ranges_ba 500 for (i = 0, vaddr = *first_slab_of_ranges_base ;
501 i < NB_PAGES_IN_SLAB_OF_RANGES ; 501 i < NB_PAGES_IN_SLAB_OF_RANGES ;
502 i++, vaddr += SOS_PAGE_SIZE) 502 i++, vaddr += SOS_PAGE_SIZE)
503 { 503 {
504 sos_paddr_t ppage_paddr; 504 sos_paddr_t ppage_paddr;
505 505
506 ppage_paddr 506 ppage_paddr
507 = sos_physmem_ref_physpage_new(FALSE); 507 = sos_physmem_ref_physpage_new(FALSE);
508 SOS_ASSERT_FATAL(ppage_paddr != (sos_pad 508 SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL);
509 509
510 retval = sos_paging_map(ppage_paddr, vad 510 retval = sos_paging_map(ppage_paddr, vaddr,
511 FALSE, 511 FALSE,
512 SOS_VM_MAP_ATOMI 512 SOS_VM_MAP_ATOMIC
513 | SOS_VM_MAP_PRO 513 | SOS_VM_MAP_PROT_READ
514 | SOS_VM_MAP_PRO 514 | SOS_VM_MAP_PROT_WRITE);
515 SOS_ASSERT_FATAL(retval == SOS_OK); 515 SOS_ASSERT_FATAL(retval == SOS_OK);
516 516
517 retval = sos_physmem_unref_physpage(ppag 517 retval = sos_physmem_unref_physpage(ppage_paddr);
518 SOS_ASSERT_FATAL(retval == FALSE); 518 SOS_ASSERT_FATAL(retval == FALSE);
519 } 519 }
520 520
521 521
522 *first_slab_of_ranges_nb_pages = NB_PAGES_IN 522 *first_slab_of_ranges_nb_pages = NB_PAGES_IN_SLAB_OF_RANGES;
523 cache_of_ranges = create_cache_of_ranges(*fi 523 cache_of_ranges = create_cache_of_ranges(*first_slab_of_ranges_base,
524 siz 524 sizeof_struct_range,
525 NB_ 525 NB_PAGES_IN_SLAB_OF_RANGES);
526 SOS_ASSERT_FATAL(cache_of_ranges != NULL); 526 SOS_ASSERT_FATAL(cache_of_ranges != NULL);
527 527
528 528
529 *first_struct_slab_of_ranges 529 *first_struct_slab_of_ranges
530 = list_get_head(cache_of_ranges->slab_list 530 = list_get_head(cache_of_ranges->slab_list);
531 531
532 532
533 533
534 534
535 cache_of_struct_kslab 535 cache_of_struct_kslab
536 = sos_kmem_cache_create("off-slab slab str 536 = sos_kmem_cache_create("off-slab slab structures",
537 sizeof(struct sos_ 537 sizeof(struct sos_kslab),
538 1, 538 1,
539 0, 539 0,
540 SOS_KSLAB_CREATE_M 540 SOS_KSLAB_CREATE_MAP);
541 SOS_ASSERT_FATAL(cache_of_struct_kslab != NU 541 SOS_ASSERT_FATAL(cache_of_struct_kslab != NULL);
542 542
543 return cache_of_ranges; 543 return cache_of_ranges;
544 } 544 }
545 545
546 546
547 sos_ret_t 547 sos_ret_t
548 sos_kmem_cache_subsystem_setup_commit(struct s 548 sos_kmem_cache_subsystem_setup_commit(struct sos_kslab *first_struct_slab_of_caches,
549 struct s 549 struct sos_kmem_range *first_range_of_caches,
550 struct s 550 struct sos_kslab *first_struct_slab_of_ranges,
551 struct s 551 struct sos_kmem_range *first_range_of_ranges)
552 { 552 {
553 first_struct_slab_of_caches->range = first_r 553 first_struct_slab_of_caches->range = first_range_of_caches;
554 first_struct_slab_of_ranges->range = first_r 554 first_struct_slab_of_ranges->range = first_range_of_ranges;
555 return SOS_OK; 555 return SOS_OK;
556 } 556 }
557 557
558 558
559 struct sos_kslab_cache * 559 struct sos_kslab_cache *
560 sos_kmem_cache_create(const char* name, 560 sos_kmem_cache_create(const char* name,
561 sos_size_t obj_size, 561 sos_size_t obj_size,
562 sos_count_t pages_per_sl 562 sos_count_t pages_per_slab,
563 sos_count_t min_free_obj 563 sos_count_t min_free_objs,
564 sos_ui32_t cache_flags) 564 sos_ui32_t cache_flags)
565 { 565 {
566 struct sos_kslab_cache *new_cache; 566 struct sos_kslab_cache *new_cache;
567 567
568 568
569 new_cache = (struct sos_kslab_cache*) 569 new_cache = (struct sos_kslab_cache*)
570 sos_kmem_cache_alloc(cache_of_struct_kslab 570 sos_kmem_cache_alloc(cache_of_struct_kslab_cache,
571 0); 571 0);
572 if (! new_cache) 572 if (! new_cache)
573 return NULL; 573 return NULL;
574 574
575 if (cache_initialize(new_cache, name, obj_si 575 if (cache_initialize(new_cache, name, obj_size,
576 pages_per_slab, min_fre 576 pages_per_slab, min_free_objs,
577 cache_flags)) 577 cache_flags))
578 { 578 {
579 579
580 sos_kmem_cache_free((sos_vaddr_t)new_cac 580 sos_kmem_cache_free((sos_vaddr_t)new_cache);
581 return NULL; 581 return NULL;
582 } 582 }
583 583
584 584
585 list_add_tail(kslab_cache_list, new_cache); 585 list_add_tail(kslab_cache_list, new_cache);
586 586
587 587
588 if (min_free_objs) 588 if (min_free_objs)
589 { 589 {
590 if (cache_grow(new_cache, 0 590 if (cache_grow(new_cache, 0 ) != SOS_OK)
591 { 591 {
592 sos_kmem_cache_destroy(new_cache); 592 sos_kmem_cache_destroy(new_cache);
593 return NULL; 593 return NULL;
594 } 594 }
595 } 595 }
596 596
597 return new_cache; 597 return new_cache;
598 } 598 }
599 599
600 600
601 sos_ret_t sos_kmem_cache_destroy(struct sos_ks 601 sos_ret_t sos_kmem_cache_destroy(struct sos_kslab_cache *kslab_cache)
602 { 602 {
603 int nb_slabs; 603 int nb_slabs;
604 struct sos_kslab *slab; 604 struct sos_kslab *slab;
605 605
606 if (! kslab_cache) 606 if (! kslab_cache)
607 return -SOS_EINVAL; 607 return -SOS_EINVAL;
608 608
609 609
610 610
611 list_foreach(kslab_cache->slab_list, slab, n 611 list_foreach(kslab_cache->slab_list, slab, nb_slabs)
612 { 612 {
613 if (slab->nb_free != kslab_cache->nb_obj 613 if (slab->nb_free != kslab_cache->nb_objects_per_slab)
614 return -SOS_EBUSY; 614 return -SOS_EBUSY;
615 } 615 }
616 616
617 617
618 while ((slab = list_get_head(kslab_cache->sl 618 while ((slab = list_get_head(kslab_cache->slab_list)) != NULL)
619 { 619 {
620 cache_release_slab(slab, TRUE); 620 cache_release_slab(slab, TRUE);
621 } 621 }
622 622
623 623
624 return sos_kmem_cache_free((sos_vaddr_t)ksla 624 return sos_kmem_cache_free((sos_vaddr_t)kslab_cache);
625 } 625 }
626 626
627 627
628 sos_vaddr_t sos_kmem_cache_alloc(struct sos_ks 628 sos_vaddr_t sos_kmem_cache_alloc(struct sos_kslab_cache *kslab_cache,
629 sos_ui32_t al 629 sos_ui32_t alloc_flags)
630 { 630 {
631 sos_vaddr_t obj_vaddr; 631 sos_vaddr_t obj_vaddr;
632 struct sos_kslab * slab_head; 632 struct sos_kslab * slab_head;
633 #define ALLOC_RET return 633 #define ALLOC_RET return
634 634
635 635
636 636
637 637
638 if ((! kslab_cache->slab_list) 638 if ((! kslab_cache->slab_list)
639 || (! list_get_head(kslab_cache->slab_li 639 || (! list_get_head(kslab_cache->slab_list)->free))
640 { 640 {
641 if (cache_grow(kslab_cache, alloc_flags) 641 if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
642 642
643 ALLOC_RET( (sos_vaddr_t)NULL); 643 ALLOC_RET( (sos_vaddr_t)NULL);
644 } 644 }
645 645
646 646
647 647
648 648
649 slab_head = list_get_head(kslab_cache->slab_ 649 slab_head = list_get_head(kslab_cache->slab_list);
650 SOS_ASSERT_FATAL(slab_head != NULL); 650 SOS_ASSERT_FATAL(slab_head != NULL);
651 651
652 652
653 653
654 obj_vaddr = (sos_vaddr_t)list_pop_head(slab_ 654 obj_vaddr = (sos_vaddr_t)list_pop_head(slab_head->free);
655 slab_head->nb_free --; 655 slab_head->nb_free --;
656 kslab_cache->nb_free_objects --; 656 kslab_cache->nb_free_objects --;
657 657
658 658
659 if (kslab_cache->flags & SOS_KSLAB_CREATE_ZE 659 if (kslab_cache->flags & SOS_KSLAB_CREATE_ZERO)
660 memset((void*)obj_vaddr, 0x0, kslab_cache- 660 memset((void*)obj_vaddr, 0x0, kslab_cache->alloc_obj_size);
661 661
662 662
663 if (slab_head->free == NULL) 663 if (slab_head->free == NULL)
664 { 664 {
665 665
666 struct sos_kslab *slab; 666 struct sos_kslab *slab;
667 slab = list_pop_head(kslab_cache->slab_l 667 slab = list_pop_head(kslab_cache->slab_list);
668 list_add_tail(kslab_cache->slab_list, sl 668 list_add_tail(kslab_cache->slab_list, slab);
669 } 669 }
670 670
671 671
672 672
673 673
674 674
675 675
676 676
677 677
678 678
679 679
680 680
681 681
682 682
683 683
684 684
685 if ((kslab_cache->min_free_objects > 0) 685 if ((kslab_cache->min_free_objects > 0)
686 && (kslab_cache->nb_free_objects == (ksl 686 && (kslab_cache->nb_free_objects == (kslab_cache->min_free_objects - 1)))
687 { 687 {
688 688
689 if (cache_grow(kslab_cache, alloc_flags) 689 if (cache_grow(kslab_cache, alloc_flags) != SOS_OK)
690 { 690 {
691 691
692 692
693 sos_kmem_cache_free(obj_vaddr); 693 sos_kmem_cache_free(obj_vaddr);
694 ALLOC_RET( (sos_vaddr_t)NULL); 694 ALLOC_RET( (sos_vaddr_t)NULL);
695 } 695 }
696 } 696 }
697 697
698 ALLOC_RET(obj_vaddr); 698 ALLOC_RET(obj_vaddr);
699 } 699 }
700 700
701 701
702 702
703 703
704 704
705 705
706 706
707 707
708 inline static 708 inline static
709 sos_ret_t 709 sos_ret_t
710 free_object(sos_vaddr_t vaddr, 710 free_object(sos_vaddr_t vaddr,
711 struct sos_kslab ** empty_slab) 711 struct sos_kslab ** empty_slab)
712 { 712 {
713 struct sos_kslab_cache *kslab_cache; 713 struct sos_kslab_cache *kslab_cache;
714 714
715 715
716 struct sos_kslab *slab = sos_kmem_vmm_resolv 716 struct sos_kslab *slab = sos_kmem_vmm_resolve_slab(vaddr);
717 717
718 718
719 *empty_slab = NULL; 719 *empty_slab = NULL;
720 720
721 721
722 if (! slab) 722 if (! slab)
723 return -SOS_EINVAL; 723 return -SOS_EINVAL;
724 724
725 SOS_ASSERT_FATAL(slab->cache); 725 SOS_ASSERT_FATAL(slab->cache);
726 kslab_cache = slab->cache; 726 kslab_cache = slab->cache;
727 727
728 728
729 729
730 730
731 731
732 732
733 if (( (vaddr - slab->first_object) 733 if (( (vaddr - slab->first_object)
734 % kslab_cache->alloc_obj_size) != 0) 734 % kslab_cache->alloc_obj_size) != 0)
735 return -SOS_EINVAL; 735 return -SOS_EINVAL;
736 736
737 if (( (vaddr - slab->first_object) 737 if (( (vaddr - slab->first_object)
738 / kslab_cache->alloc_obj_size) >= ksla 738 / kslab_cache->alloc_obj_size) >= kslab_cache->nb_objects_per_slab)
739 return -SOS_EINVAL; 739 return -SOS_EINVAL;
740 740
741 741
742 742
743 743
744 744
745 745
746 746
747 if (! slab->free) 747 if (! slab->free)
748 { 748 {
749 list_delete(kslab_cache->slab_list, slab 749 list_delete(kslab_cache->slab_list, slab);
750 list_add_head(kslab_cache->slab_list, sl 750 list_add_head(kslab_cache->slab_list, slab);
751 } 751 }
752 752
753 753
754 list_add_head(slab->free, (struct sos_kslab_ 754 list_add_head(slab->free, (struct sos_kslab_free_object*)vaddr);
755 slab->nb_free++; 755 slab->nb_free++;
756 kslab_cache->nb_free_objects++; 756 kslab_cache->nb_free_objects++;
757 SOS_ASSERT_FATAL(slab->nb_free <= slab->cach 757 SOS_ASSERT_FATAL(slab->nb_free <= slab->cache->nb_objects_per_slab);
758 758
759 759
760 760
761 if ((slab->nb_free >= kslab_cache->nb_object 761 if ((slab->nb_free >= kslab_cache->nb_objects_per_slab)
762 && (kslab_cache->nb_free_objects - slab- 762 && (kslab_cache->nb_free_objects - slab->nb_free
763 >= kslab_cache->min_free_objects)) 763 >= kslab_cache->min_free_objects))
764 { 764 {
765 *empty_slab = slab; 765 *empty_slab = slab;
766 } 766 }
767 767
768 return SOS_OK; 768 return SOS_OK;
769 } 769 }
770 770
771 771
772 sos_ret_t sos_kmem_cache_free(sos_vaddr_t vadd 772 sos_ret_t sos_kmem_cache_free(sos_vaddr_t vaddr)
773 { 773 {
774 sos_ret_t retval; 774 sos_ret_t retval;
775 struct sos_kslab *empty_slab; 775 struct sos_kslab *empty_slab;
776 776
777 777
778 retval = free_object(vaddr, & empty_slab); 778 retval = free_object(vaddr, & empty_slab);
779 if (retval != SOS_OK) 779 if (retval != SOS_OK)
780 return retval; 780 return retval;
781 781
782 782
783 if (empty_slab != NULL) 783 if (empty_slab != NULL)
784 return cache_release_slab(empty_slab, TRUE 784 return cache_release_slab(empty_slab, TRUE);
785 785
786 return SOS_OK; 786 return SOS_OK;
787 } 787 }
788 788
789 789
790 struct sos_kmem_range * 790 struct sos_kmem_range *
791 sos_kmem_cache_release_struct_range(struct sos 791 sos_kmem_cache_release_struct_range(struct sos_kmem_range *the_range)
792 { 792 {
793 sos_ret_t retval; 793 sos_ret_t retval;
794 struct sos_kslab *empty_slab; 794 struct sos_kslab *empty_slab;
795 795
796 796
797 retval = free_object((sos_vaddr_t)the_range, 797 retval = free_object((sos_vaddr_t)the_range, & empty_slab);
798 if (retval != SOS_OK) 798 if (retval != SOS_OK)
799 return NULL; 799 return NULL;
800 800
801 801
802 if (empty_slab != NULL) 802 if (empty_slab != NULL)
803 { 803 {
804 struct sos_kmem_range *empty_range = emp 804 struct sos_kmem_range *empty_range = empty_slab->range;
805 SOS_ASSERT_FATAL(cache_release_slab(empt 805 SOS_ASSERT_FATAL(cache_release_slab(empty_slab, FALSE) == SOS_OK);
806 SOS_ASSERT_FATAL(empty_range != NULL); 806 SOS_ASSERT_FATAL(empty_range != NULL);
807 return empty_range; 807 return empty_range;
808 } 808 }
809 809
810 return NULL; 810 return NULL;
811 } 811 }
812 812