Diff markup
001 001
002 002
003 003
004 004
005 005
006 006
007 007
008 008
009 009
010 010
011 011
012 012
013 013
014 014
015 015
016 016
017 017
018 018
019 019
020 #include <sos/list.h> 020 #include <sos/list.h>
021 #include <sos/physmem.h> 021 #include <sos/physmem.h>
022 #include <hwcore/paging.h> 022 #include <hwcore/paging.h>
023 #include <sos/assert.h> 023 #include <sos/assert.h>
024 024
025 #include "kmem_vmm.h" 025 #include "kmem_vmm.h"
026 026
027 027
028 struct sos_kmem_range 028 struct sos_kmem_range
029 { 029 {
030 sos_vaddr_t base_vaddr; 030 sos_vaddr_t base_vaddr;
031 sos_count_t nb_pages; 031 sos_count_t nb_pages;
032 032
033 033
034 struct sos_kslab *slab; 034 struct sos_kslab *slab;
035 035
036 struct sos_kmem_range *prev, *next; 036 struct sos_kmem_range *prev, *next;
037 }; 037 };
038 const int sizeof_struct_sos_kmem_range = sizeo 038 const int sizeof_struct_sos_kmem_range = sizeof(struct sos_kmem_range);
039 039
040 040
041 static struct sos_kmem_range *kmem_free_range_ 041 static struct sos_kmem_range *kmem_free_range_list, *kmem_used_range_list;
042 042
043 043
044 static struct sos_kslab_cache *kmem_range_cach 044 static struct sos_kslab_cache *kmem_range_cache;
045 045
046 046
047 047
048 048
049 049
050 static struct sos_kmem_range * 050 static struct sos_kmem_range *
051 get_closest_preceding_kmem_range(struct sos_km 051 get_closest_preceding_kmem_range(struct sos_kmem_range *the_list,
052 sos_vaddr_t v 052 sos_vaddr_t vaddr)
053 { 053 {
054 int nb_elements; 054 int nb_elements;
055 struct sos_kmem_range *a_range, *ret_range; 055 struct sos_kmem_range *a_range, *ret_range;
056 056
057 057
058 058
059 ret_range = NULL; 059 ret_range = NULL;
060 list_foreach(the_list, a_range, nb_elements) 060 list_foreach(the_list, a_range, nb_elements)
061 { 061 {
062 if (vaddr < a_range->base_vaddr) 062 if (vaddr < a_range->base_vaddr)
063 return ret_range; 063 return ret_range;
064 ret_range = a_range; 064 ret_range = a_range;
065 } 065 }
066 066
067 067
068 return ret_range; 068 return ret_range;
069 } 069 }
070 070
071 071
072 072
073 073
074 074
075 075
076 static struct sos_kmem_range *find_suitable_fr 076 static struct sos_kmem_range *find_suitable_free_range(sos_count_t nb_pages)
077 { 077 {
078 int nb_elements; 078 int nb_elements;
079 struct sos_kmem_range *r; 079 struct sos_kmem_range *r;
080 080
081 list_foreach(kmem_free_range_list, r, nb_ele 081 list_foreach(kmem_free_range_list, r, nb_elements)
082 { 082 {
083 if (r->nb_pages >= nb_pages) 083 if (r->nb_pages >= nb_pages)
084 return r; 084 return r;
085 } 085 }
086 086
087 return NULL; 087 return NULL;
088 } 088 }
089 089
090 090
091 091
092 092
093 093
094 094
095 095
096 static struct sos_kmem_range *insert_range(str 096 static struct sos_kmem_range *insert_range(struct sos_kmem_range *the_list,
097 str 097 struct sos_kmem_range *a_range)
098 { 098 {
099 struct sos_kmem_range *prec_used; 099 struct sos_kmem_range *prec_used;
100 100
101 101
102 prec_used = get_closest_preceding_kmem_range 102 prec_used = get_closest_preceding_kmem_range(the_list,
103 103 a_range->base_vaddr);
104 104
105 if (prec_used != NULL) 105 if (prec_used != NULL)
106 list_insert_after(the_list, prec_used, a_r 106 list_insert_after(the_list, prec_used, a_range);
107 else 107 else
108 list_add_head(the_list, a_range); 108 list_add_head(the_list, a_range);
109 109
110 return the_list; 110 return the_list;
111 } 111 }
112 112
113 113
114 114
115 115
116 116
117 117
118 static struct sos_kmem_range *lookup_range(sos 118 static struct sos_kmem_range *lookup_range(sos_vaddr_t vaddr)
119 { 119 {
120 struct sos_kmem_range *range; 120 struct sos_kmem_range *range;
121 121
122 122
123 sos_paddr_t ppage_paddr = SOS_PAGE_ALIGN_INF 123 sos_paddr_t ppage_paddr = SOS_PAGE_ALIGN_INF(sos_paging_get_paddr(vaddr));
124 124
125 if (ppage_paddr) 125 if (ppage_paddr)
126 { 126 {
127 range = sos_physmem_get_kmem_range(ppage 127 range = sos_physmem_get_kmem_range(ppage_paddr);
128 128
129 129
130 130
131 SOS_ASSERT_FATAL(range != NULL); 131 SOS_ASSERT_FATAL(range != NULL);
132 } 132 }
133 133
134 134
135 135
136 else 136 else
137 { 137 {
138 range = get_closest_preceding_kmem_range 138 range = get_closest_preceding_kmem_range(kmem_used_range_list,
139 139 vaddr);
140 140
141 if (! range) 141 if (! range)
142 return NULL; 142 return NULL;
143 143
144 144
145 if ( (vaddr < range->base_vaddr) 145 if ( (vaddr < range->base_vaddr)
146 || (vaddr >= (range->base_vaddr + r 146 || (vaddr >= (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE)) )
147 return NULL; 147 return NULL;
148 } 148 }
149 149
150 return range; 150 return range;
151 } 151 }
152 152
153 153
154 154
155 155
156 156
157 157
158 158
159 static struct sos_kmem_range * 159 static struct sos_kmem_range *
160 create_range(sos_bool_t is_free, 160 create_range(sos_bool_t is_free,
161 sos_vaddr_t base_vaddr, 161 sos_vaddr_t base_vaddr,
162 sos_vaddr_t top_vaddr, 162 sos_vaddr_t top_vaddr,
163 struct sos_kslab *associated_slab 163 struct sos_kslab *associated_slab)
164 { 164 {
165 struct sos_kmem_range *range; 165 struct sos_kmem_range *range;
166 166
167 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_va 167 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_vaddr));
168 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(top_vad 168 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(top_vaddr));
169 169
170 if ((top_vaddr - base_vaddr) < SOS_PAGE_SIZE 170 if ((top_vaddr - base_vaddr) < SOS_PAGE_SIZE)
171 return NULL; 171 return NULL;
172 172
173 range = (struct sos_kmem_range*)sos_kmem_cac 173 range = (struct sos_kmem_range*)sos_kmem_cache_alloc(kmem_range_cache,
174 174 SOS_KSLAB_ALLOC_ATOMIC);
175 SOS_ASSERT_FATAL(range != NULL); 175 SOS_ASSERT_FATAL(range != NULL);
176 176
177 range->base_vaddr = base_vaddr; 177 range->base_vaddr = base_vaddr;
178 range->nb_pages = (top_vaddr - base_vaddr) 178 range->nb_pages = (top_vaddr - base_vaddr) / SOS_PAGE_SIZE;
179 179
180 if (is_free) 180 if (is_free)
181 { 181 {
182 list_add_tail(kmem_free_range_list, 182 list_add_tail(kmem_free_range_list,
183 range); 183 range);
184 } 184 }
185 else 185 else
186 { 186 {
187 sos_vaddr_t vaddr; 187 sos_vaddr_t vaddr;
188 range->slab = associated_slab; 188 range->slab = associated_slab;
189 list_add_tail(kmem_used_range_list, 189 list_add_tail(kmem_used_range_list,
190 range); 190 range);
191 191
192 192
193 for (vaddr = base_vaddr ; 193 for (vaddr = base_vaddr ;
194 vaddr < top_vaddr ; 194 vaddr < top_vaddr ;
195 vaddr += SOS_PAGE_SIZE) 195 vaddr += SOS_PAGE_SIZE)
196 { 196 {
197 sos_paddr_t ppage_paddr = sos_paging_g 197 sos_paddr_t ppage_paddr = sos_paging_get_paddr(vaddr);
198 SOS_ASSERT_FATAL((void*)ppage_paddr != 198 SOS_ASSERT_FATAL((void*)ppage_paddr != NULL);
199 sos_physmem_set_kmem_range(ppage_paddr 199 sos_physmem_set_kmem_range(ppage_paddr, range);
200 } 200 }
201 } 201 }
202 202
203 return range; 203 return range;
204 } 204 }
205 205
206 206
207 sos_ret_t 207 sos_ret_t
208 sos_kmem_vmm_subsystem_setup(sos_vaddr_t kerne 208 sos_kmem_vmm_subsystem_setup(sos_vaddr_t kernel_core_base,
209 sos_vaddr_t kerne 209 sos_vaddr_t kernel_core_top,
210 sos_vaddr_t boots 210 sos_vaddr_t bootstrap_stack_bottom_vaddr,
211 sos_vaddr_t boots 211 sos_vaddr_t bootstrap_stack_top_vaddr)
212 { 212 {
213 struct sos_kslab *first_struct_slab_of_cache 213 struct sos_kslab *first_struct_slab_of_caches,
214 *first_struct_slab_of_ranges; 214 *first_struct_slab_of_ranges;
215 sos_vaddr_t first_slab_of_caches_base, 215 sos_vaddr_t first_slab_of_caches_base,
216 first_slab_of_caches_nb_pages, 216 first_slab_of_caches_nb_pages,
217 first_slab_of_ranges_base, 217 first_slab_of_ranges_base,
218 first_slab_of_ranges_nb_pages; 218 first_slab_of_ranges_nb_pages;
219 struct sos_kmem_range *first_range_of_caches 219 struct sos_kmem_range *first_range_of_caches,
220 *first_range_of_ranges; 220 *first_range_of_ranges;
221 221
222 list_init(kmem_free_range_list); 222 list_init(kmem_free_range_list);
223 list_init(kmem_used_range_list); 223 list_init(kmem_used_range_list);
224 224
225 kmem_range_cache 225 kmem_range_cache
226 = sos_kmem_cache_subsystem_setup_prepare(k 226 = sos_kmem_cache_subsystem_setup_prepare(kernel_core_base,
227 k 227 kernel_core_top,
228 s 228 sizeof(struct sos_kmem_range),
229 & 229 & first_struct_slab_of_caches,
230 & 230 & first_slab_of_caches_base,
231 & 231 & first_slab_of_caches_nb_pages,
232 & 232 & first_struct_slab_of_ranges,
233 & 233 & first_slab_of_ranges_base,
234 & 234 & first_slab_of_ranges_nb_pages);
235 SOS_ASSERT_FATAL(kmem_range_cache != NULL); 235 SOS_ASSERT_FATAL(kmem_range_cache != NULL);
236 236
237 237
238 create_range(TRUE, 238 create_range(TRUE,
239 SOS_KMEM_VMM_BASE, 239 SOS_KMEM_VMM_BASE,
240 SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO 240 SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
241 NULL); 241 NULL);
242 242
243 243
244 create_range(FALSE, 244 create_range(FALSE,
245 SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO 245 SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
246 SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO 246 SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
247 NULL); 247 NULL);
248 248
249 249
250 create_range(TRUE, 250 create_range(TRUE,
251 SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO 251 SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
252 SOS_PAGE_ALIGN_INF(kernel_core_ 252 SOS_PAGE_ALIGN_INF(kernel_core_base),
253 NULL); 253 NULL);
254 254
255 255
256 256
257 create_range(FALSE, 257 create_range(FALSE,
258 SOS_PAGE_ALIGN_INF(kernel_core_ 258 SOS_PAGE_ALIGN_INF(kernel_core_base),
259 bootstrap_stack_bottom_vaddr, 259 bootstrap_stack_bottom_vaddr,
260 NULL); 260 NULL);
261 261
262 262
263 263
264 create_range(FALSE, 264 create_range(FALSE,
265 bootstrap_stack_bottom_vaddr, 265 bootstrap_stack_bottom_vaddr,
266 bootstrap_stack_top_vaddr, 266 bootstrap_stack_top_vaddr,
267 NULL); 267 NULL);
268 268
269 269
270 270
271 create_range(FALSE, 271 create_range(FALSE,
272 bootstrap_stack_top_vaddr, 272 bootstrap_stack_top_vaddr,
273 SOS_PAGE_ALIGN_SUP(kernel_core_ 273 SOS_PAGE_ALIGN_SUP(kernel_core_top),
274 NULL); 274 NULL);
275 275
276 276
277 277
278 SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_c 278 SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_core_top)
279 == first_slab_of_caches_bas 279 == first_slab_of_caches_base);
280 SOS_ASSERT_FATAL(first_struct_slab_of_caches 280 SOS_ASSERT_FATAL(first_struct_slab_of_caches != NULL);
281 first_range_of_caches 281 first_range_of_caches
282 = create_range(FALSE, 282 = create_range(FALSE,
283 first_slab_of_caches_base, 283 first_slab_of_caches_base,
284 first_slab_of_caches_base 284 first_slab_of_caches_base
285 + first_slab_of_caches_nb_p 285 + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE,
286 first_struct_slab_of_caches 286 first_struct_slab_of_caches);
287 287
288 288
289 289
290 SOS_ASSERT_FATAL((first_slab_of_caches_base 290 SOS_ASSERT_FATAL((first_slab_of_caches_base
291 + first_slab_of_caches_nb_ 291 + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE)
292 == first_slab_of_ranges_bas 292 == first_slab_of_ranges_base);
293 SOS_ASSERT_FATAL(first_struct_slab_of_ranges 293 SOS_ASSERT_FATAL(first_struct_slab_of_ranges != NULL);
294 first_range_of_ranges 294 first_range_of_ranges
295 = create_range(FALSE, 295 = create_range(FALSE,
296 first_slab_of_ranges_base, 296 first_slab_of_ranges_base,
297 first_slab_of_ranges_base 297 first_slab_of_ranges_base
298 + first_slab_of_ranges_nb_p 298 + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
299 first_struct_slab_of_ranges 299 first_struct_slab_of_ranges);
300 300
301 301
302 create_range(TRUE, 302 create_range(TRUE,
303 first_slab_of_ranges_base 303 first_slab_of_ranges_base
304 + first_slab_of_ranges_nb_pages 304 + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
305 SOS_KMEM_VMM_TOP, 305 SOS_KMEM_VMM_TOP,
306 NULL); 306 NULL);
307 307
308 308
309 309
310 310
311 sos_kmem_cache_subsystem_setup_commit(first_ 311 sos_kmem_cache_subsystem_setup_commit(first_struct_slab_of_caches,
312 first_ 312 first_range_of_caches,
313 first_ 313 first_struct_slab_of_ranges,
314 first_ 314 first_range_of_ranges);
315 315
316 return SOS_OK; 316 return SOS_OK;
317 } 317 }
318 318
319 319
320 320
321 321
322 322
323 323
324 324
325 struct sos_kmem_range *sos_kmem_vmm_new_range( 325 struct sos_kmem_range *sos_kmem_vmm_new_range(sos_count_t nb_pages,
326 326 sos_ui32_t flags,
327 327 sos_vaddr_t * range_start)
328 { 328 {
329 struct sos_kmem_range *free_range, *new_rang 329 struct sos_kmem_range *free_range, *new_range;
330 330
331 if (nb_pages <= 0) 331 if (nb_pages <= 0)
332 return NULL; 332 return NULL;
333 333
334 334
335 free_range = find_suitable_free_range(nb_pag 335 free_range = find_suitable_free_range(nb_pages);
336 if (free_range == NULL) 336 if (free_range == NULL)
337 return NULL; 337 return NULL;
338 338
339 339
340 340
341 if(free_range->nb_pages == nb_pages) 341 if(free_range->nb_pages == nb_pages)
342 { 342 {
343 list_delete(kmem_free_range_list, free_r 343 list_delete(kmem_free_range_list, free_range);
344 kmem_used_range_list = insert_range(kmem 344 kmem_used_range_list = insert_range(kmem_used_range_list,
345 free 345 free_range);
346 346
347 new_range = free_range; 347 new_range = free_range;
348 } 348 }
349 349
350 350
351 351
352 352
353 else 353 else
354 { 354 {
355 355
356 new_range = (struct sos_kmem_range*) 356 new_range = (struct sos_kmem_range*)
357 sos_kmem_cache_alloc(kmem_range_cache, 357 sos_kmem_cache_alloc(kmem_range_cache,
358 (flags & SOS_KMEM 358 (flags & SOS_KMEM_VMM_ATOMIC)?
359 SOS_KSLAB_ALLOC_A 359 SOS_KSLAB_ALLOC_ATOMIC:0);
360 if (! new_range) 360 if (! new_range)
361 return NULL; 361 return NULL;
362 362
363 new_range->base_vaddr = free_range->ba 363 new_range->base_vaddr = free_range->base_vaddr;
364 new_range->nb_pages = nb_pages; 364 new_range->nb_pages = nb_pages;
365 free_range->base_vaddr += nb_pages*SOS_P 365 free_range->base_vaddr += nb_pages*SOS_PAGE_SIZE;
366 free_range->nb_pages -= nb_pages; 366 free_range->nb_pages -= nb_pages;
367 367
368 368
369 369
370 kmem_used_range_list = insert_range(kmem 370 kmem_used_range_list = insert_range(kmem_used_range_list,
371 new_ 371 new_range);
372 } 372 }
373 373
374 374
375 new_range->slab = NULL; 375 new_range->slab = NULL;
376 376
377 377
378 if (flags & SOS_KMEM_VMM_MAP) 378 if (flags & SOS_KMEM_VMM_MAP)
379 { 379 {
380 int i; !! 380 unsigned int i;
381 for (i = 0 ; i < nb_pages ; i ++) 381 for (i = 0 ; i < nb_pages ; i ++)
382 { 382 {
383 383
384 sos_paddr_t ppage_paddr 384 sos_paddr_t ppage_paddr
385 = sos_physmem_ref_physpage_new(! ( 385 = sos_physmem_ref_physpage_new(! (flags & SOS_KMEM_VMM_ATOMIC));
386 386
387 387
388 if (ppage_paddr) 388 if (ppage_paddr)
389 { 389 {
390 if (sos_paging_map(ppage_paddr, 390 if (sos_paging_map(ppage_paddr,
391 new_range->ba 391 new_range->base_vaddr
392 + i * SOS_P 392 + i * SOS_PAGE_SIZE,
393 FALSE 393 FALSE ,
394 ((flags & SOS 394 ((flags & SOS_KMEM_VMM_ATOMIC)?
395 SOS_VM_MAP_A 395 SOS_VM_MAP_ATOMIC:0)
396 | SOS_VM_MAP_ 396 | SOS_VM_MAP_PROT_READ
397 | SOS_VM_MAP_ 397 | SOS_VM_MAP_PROT_WRITE))
398 { 398 {
399 399
400 sos_physmem_unref_physpage(p 400 sos_physmem_unref_physpage(ppage_paddr);
401 ppage_paddr = (sos_paddr_t)N 401 ppage_paddr = (sos_paddr_t)NULL;
402 } 402 }
403 else 403 else
404 { 404 {
405 405
406 406
407 sos_physmem_unref_physpage(p 407 sos_physmem_unref_physpage(ppage_paddr);
408 } 408 }
409 } 409 }
410 410
411 411
412 if (! ppage_paddr) 412 if (! ppage_paddr)
413 { 413 {
414 sos_kmem_vmm_del_range(new_range 414 sos_kmem_vmm_del_range(new_range);
415 return NULL; 415 return NULL;
416 } 416 }
417 417
418 418
419 sos_physmem_set_kmem_range(ppage_pad 419 sos_physmem_set_kmem_range(ppage_paddr, new_range);
420 } 420 }
421 } 421 }
422 422
423 423
424 if (range_start) 424 if (range_start)
425 *range_start = new_range->base_vaddr; 425 *range_start = new_range->base_vaddr;
426 426
427 return new_range; 427 return new_range;
428 } 428 }
429 429
430 430
431 sos_ret_t sos_kmem_vmm_del_range(struct sos_km 431 sos_ret_t sos_kmem_vmm_del_range(struct sos_kmem_range *range)
432 { 432 {
433 int i; <<
434 struct sos_kmem_range *ranges_to_free; 433 struct sos_kmem_range *ranges_to_free;
435 list_init(ranges_to_free); 434 list_init(ranges_to_free);
436 435
437 SOS_ASSERT_FATAL(range != NULL); 436 SOS_ASSERT_FATAL(range != NULL);
438 SOS_ASSERT_FATAL(range->slab == NULL); 437 SOS_ASSERT_FATAL(range->slab == NULL);
439 438
440 439
441 list_delete(kmem_used_range_list, range); 440 list_delete(kmem_used_range_list, range);
442 441
443 442
444 443
445 444
446 445
447 446
448 447
449 448
450 449
451 450
452 451
453 452
454 453
455 454
456 455
457 456
458 457
459 do 458 do
460 { 459 {
>> 460 unsigned int i;
>> 461
461 462
462 kmem_free_range_list = insert_range(kmem 463 kmem_free_range_list = insert_range(kmem_free_range_list, range);
463 464
464 465
465 for (i = 0 ; i < range->nb_pages ; i ++) 466 for (i = 0 ; i < range->nb_pages ; i ++)
466 { 467 {
467 468
468 sos_paging_unmap(range->base_vaddr + 469 sos_paging_unmap(range->base_vaddr + i*SOS_PAGE_SIZE);
469 } 470 }
470 471
471 472
472 473
473 474
474 475
475 476
476 477
477 if (range->prev->base_vaddr + range->pre 478 if (range->prev->base_vaddr + range->prev->nb_pages*SOS_PAGE_SIZE
478 == range->base_vaddr) 479 == range->base_vaddr)
479 { 480 {
480 struct sos_kmem_range *empty_range_o 481 struct sos_kmem_range *empty_range_of_ranges = NULL;
481 struct sos_kmem_range *prec_free = r 482 struct sos_kmem_range *prec_free = range->prev;
482 483
483 484
484 prec_free->nb_pages += range->nb_pag 485 prec_free->nb_pages += range->nb_pages;
485 list_delete(kmem_free_range_list, ra 486 list_delete(kmem_free_range_list, range);
486 487
487 488
488 489
489 empty_range_of_ranges = 490 empty_range_of_ranges =
490 sos_kmem_cache_release_struct_rang 491 sos_kmem_cache_release_struct_range(range);
491 492
492 493
493 494
494 495
495 496
496 if (empty_range_of_ranges != NULL) 497 if (empty_range_of_ranges != NULL)
497 { 498 {
498 list_delete(kmem_used_range_list 499 list_delete(kmem_used_range_list, empty_range_of_ranges);
499 list_add_tail(ranges_to_free, em 500 list_add_tail(ranges_to_free, empty_range_of_ranges);
500 } 501 }
501 502
502 503
503 range = prec_free; 504 range = prec_free;
504 } 505 }
505 506
506 507
507 508
508 if (range->base_vaddr + range->nb_pages* 509 if (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE
509 == range->next->base_vaddr) 510 == range->next->base_vaddr)
510 { 511 {
511 struct sos_kmem_range *empty_range_o 512 struct sos_kmem_range *empty_range_of_ranges = NULL;
512 struct sos_kmem_range *next_range = 513 struct sos_kmem_range *next_range = range->next;
513 514
514 515
515 range->nb_pages += next_range->nb_pa 516 range->nb_pages += next_range->nb_pages;
516 list_delete(kmem_free_range_list, ne 517 list_delete(kmem_free_range_list, next_range);
517 518
518 519
519 520
520 empty_range_of_ranges = 521 empty_range_of_ranges =
521 sos_kmem_cache_release_struct_rang 522 sos_kmem_cache_release_struct_range(next_range);
522 523
523 524
524 525
525 526
526 527
527 528
528 if (empty_range_of_ranges != NULL) 529 if (empty_range_of_ranges != NULL)
529 { 530 {
530 list_delete(kmem_used_range_list 531 list_delete(kmem_used_range_list, empty_range_of_ranges);
531 list_add_tail(ranges_to_free, em 532 list_add_tail(ranges_to_free, empty_range_of_ranges);
532 } 533 }
533 } 534 }
534 535
535 536
536 537
537 538
538 if (list_is_empty(ranges_to_free)) 539 if (list_is_empty(ranges_to_free))
539 range = NULL; 540 range = NULL;
540 else 541 else
541 range = list_pop_head(ranges_to_free); 542 range = list_pop_head(ranges_to_free);
542 543
543 } 544 }
544 545
545 while (range != NULL); 546 while (range != NULL);
546 547
547 return SOS_OK; 548 return SOS_OK;
548 } 549 }
549 550
550 551
551 sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_ 552 sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_pages,
552 sos_ui32_t fla 553 sos_ui32_t flags)
553 { 554 {
554 struct sos_kmem_range *range 555 struct sos_kmem_range *range
555 = sos_kmem_vmm_new_range(nb_pages, 556 = sos_kmem_vmm_new_range(nb_pages,
556 flags, 557 flags,
557 NULL); 558 NULL);
558 if (! range) 559 if (! range)
559 return (sos_vaddr_t)NULL; 560 return (sos_vaddr_t)NULL;
560 561
561 return range->base_vaddr; 562 return range->base_vaddr;
562 } 563 }
563 564
564 565
565 sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr) 566 sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr)
566 { 567 {
567 struct sos_kmem_range *range = lookup_range( 568 struct sos_kmem_range *range = lookup_range(vaddr);
568 569
569 570
570 571
571 if (!range || (range->base_vaddr != vaddr)) 572 if (!range || (range->base_vaddr != vaddr))
572 return -SOS_EINVAL; 573 return -SOS_EINVAL;
573 574
574 575
575 if (range->slab != NULL) 576 if (range->slab != NULL)
576 return -SOS_EBUSY; 577 return -SOS_EBUSY;
577 578
578 return sos_kmem_vmm_del_range(range); 579 return sos_kmem_vmm_del_range(range);
579 } 580 }
580 581
581 582
582 sos_ret_t sos_kmem_vmm_set_slab(struct sos_kme 583 sos_ret_t sos_kmem_vmm_set_slab(struct sos_kmem_range *range,
583 struct sos_ksl 584 struct sos_kslab *slab)
584 { 585 {
585 if (! range) 586 if (! range)
586 return -SOS_EINVAL; 587 return -SOS_EINVAL;
587 588
588 range->slab = slab; 589 range->slab = slab;
589 return SOS_OK; 590 return SOS_OK;
590 } 591 }
591 592
592 struct sos_kslab * sos_kmem_vmm_resolve_slab(s 593 struct sos_kslab * sos_kmem_vmm_resolve_slab(sos_vaddr_t vaddr)
593 { 594 {
594 struct sos_kmem_range *range = lookup_range( 595 struct sos_kmem_range *range = lookup_range(vaddr);
595 if (! range) 596 if (! range)
596 return NULL; 597 return NULL;
597 598
598 return range->slab; 599 return range->slab;
599 } 600 }
600 601
601 602
602 sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vad 603 sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vaddr_t vaddr)
603 { 604 {
604 struct sos_kmem_range *range = lookup_range( 605 struct sos_kmem_range *range = lookup_range(vaddr);
605 return (range != NULL); 606 return (range != NULL);
606 } 607 }