Diff markup
001 001
002 002
003 003
004 004
005 005
006 006
007 007
008 008
009 009
010 010
011 011
012 012
013 013
014 014
015 015
016 016
017 017
018 018
019 019
020 #include <sos/list.h> 020 #include <sos/list.h>
021 #include <sos/physmem.h> 021 #include <sos/physmem.h>
022 #include <hwcore/paging.h> 022 #include <hwcore/paging.h>
023 #include <sos/assert.h> 023 #include <sos/assert.h>
024 024
025 #include "kmem_vmm.h" 025 #include "kmem_vmm.h"
026 026
027 027
028 struct sos_kmem_range 028 struct sos_kmem_range
029 { 029 {
030 sos_vaddr_t base_vaddr; 030 sos_vaddr_t base_vaddr;
031 sos_count_t nb_pages; 031 sos_count_t nb_pages;
032 032
033 033
034 struct sos_kslab *slab; 034 struct sos_kslab *slab;
035 035
036 struct sos_kmem_range *prev, *next; 036 struct sos_kmem_range *prev, *next;
037 }; 037 };
038 const int sizeof_struct_sos_kmem_range = sizeo 038 const int sizeof_struct_sos_kmem_range = sizeof(struct sos_kmem_range);
039 039
040 040
041 static struct sos_kmem_range *kmem_free_range_ 041 static struct sos_kmem_range *kmem_free_range_list, *kmem_used_range_list;
042 042
043 043
044 static struct sos_kslab_cache *kmem_range_cach 044 static struct sos_kslab_cache *kmem_range_cache;
045 045
046 046
047 047
048 048
049 049
050 static struct sos_kmem_range * 050 static struct sos_kmem_range *
051 get_closest_preceding_kmem_range(struct sos_km 051 get_closest_preceding_kmem_range(struct sos_kmem_range *the_list,
052 sos_vaddr_t v 052 sos_vaddr_t vaddr)
053 { 053 {
054 int nb_elements; 054 int nb_elements;
055 struct sos_kmem_range *a_range, *ret_range; 055 struct sos_kmem_range *a_range, *ret_range;
056 056
057 057
058 058
059 ret_range = NULL; 059 ret_range = NULL;
060 list_foreach(the_list, a_range, nb_elements) 060 list_foreach(the_list, a_range, nb_elements)
061 { 061 {
062 if (vaddr < a_range->base_vaddr) 062 if (vaddr < a_range->base_vaddr)
063 return ret_range; 063 return ret_range;
064 ret_range = a_range; 064 ret_range = a_range;
065 } 065 }
066 066
067 067
068 return ret_range; 068 return ret_range;
069 } 069 }
070 070
071 071
072 072
073 073
074 074
075 075
076 static struct sos_kmem_range *find_suitable_fr 076 static struct sos_kmem_range *find_suitable_free_range(sos_count_t nb_pages)
077 { 077 {
078 int nb_elements; 078 int nb_elements;
079 struct sos_kmem_range *r; 079 struct sos_kmem_range *r;
080 080
081 list_foreach(kmem_free_range_list, r, nb_ele 081 list_foreach(kmem_free_range_list, r, nb_elements)
082 { 082 {
083 if (r->nb_pages >= nb_pages) 083 if (r->nb_pages >= nb_pages)
084 return r; 084 return r;
085 } 085 }
086 086
087 return NULL; 087 return NULL;
088 } 088 }
089 089
090 090
091 091
092 092
093 093
094 094
095 095
096 static struct sos_kmem_range *insert_range(str 096 static struct sos_kmem_range *insert_range(struct sos_kmem_range *the_list,
097 str 097 struct sos_kmem_range *a_range)
098 { 098 {
099 struct sos_kmem_range *prec_used; 099 struct sos_kmem_range *prec_used;
100 100
101 101
102 prec_used = get_closest_preceding_kmem_range 102 prec_used = get_closest_preceding_kmem_range(the_list,
103 103 a_range->base_vaddr);
104 104
105 if (prec_used != NULL) 105 if (prec_used != NULL)
106 list_insert_after(the_list, prec_used, a_r 106 list_insert_after(the_list, prec_used, a_range);
107 else 107 else
108 list_add_head(the_list, a_range); 108 list_add_head(the_list, a_range);
109 109
110 return the_list; 110 return the_list;
111 } 111 }
112 112
113 113
114 114
115 115
116 116
117 117
118 static struct sos_kmem_range *lookup_range(sos 118 static struct sos_kmem_range *lookup_range(sos_vaddr_t vaddr)
119 { 119 {
120 struct sos_kmem_range *range; 120 struct sos_kmem_range *range;
121 121
122 122
123 sos_paddr_t ppage_paddr = SOS_PAGE_ALIGN_INF 123 sos_paddr_t ppage_paddr = SOS_PAGE_ALIGN_INF(sos_paging_get_paddr(vaddr));
>> 124
124 if (ppage_paddr) 125 if (ppage_paddr)
125 { 126 {
126 range = sos_physmem_get_kmem_range(ppage 127 range = sos_physmem_get_kmem_range(ppage_paddr);
127 128
128 129
129 130
130 SOS_ASSERT_FATAL(range != NULL); 131 SOS_ASSERT_FATAL(range != NULL);
131 } 132 }
132 133
133 134
134 135
135 else 136 else
136 { 137 {
137 range = get_closest_preceding_kmem_range 138 range = get_closest_preceding_kmem_range(kmem_used_range_list,
138 139 vaddr);
139 140
140 if (! range) 141 if (! range)
141 return NULL; 142 return NULL;
142 143
143 144
144 if ( (vaddr < range->base_vaddr) 145 if ( (vaddr < range->base_vaddr)
145 || (vaddr >= (range->base_vaddr + r 146 || (vaddr >= (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE)) )
146 return NULL; 147 return NULL;
147 } 148 }
148 149
149 return range; 150 return range;
150 } 151 }
151 152
152 153
153 154
154 155
155 156
156 157
157 158
158 static struct sos_kmem_range * 159 static struct sos_kmem_range *
159 create_range(sos_bool_t is_free, 160 create_range(sos_bool_t is_free,
160 sos_vaddr_t base_vaddr, 161 sos_vaddr_t base_vaddr,
161 sos_vaddr_t top_vaddr, 162 sos_vaddr_t top_vaddr,
162 struct sos_kslab *associated_slab 163 struct sos_kslab *associated_slab)
163 { 164 {
164 struct sos_kmem_range *range; 165 struct sos_kmem_range *range;
165 166
166 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_va 167 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_vaddr));
167 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(top_vad 168 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(top_vaddr));
168 169
169 if ((top_vaddr - base_vaddr) < SOS_PAGE_SIZE 170 if ((top_vaddr - base_vaddr) < SOS_PAGE_SIZE)
170 return NULL; 171 return NULL;
171 172
172 range = (struct sos_kmem_range*)sos_kmem_cac 173 range = (struct sos_kmem_range*)sos_kmem_cache_alloc(kmem_range_cache,
173 174 SOS_KSLAB_ALLOC_ATOMIC);
174 SOS_ASSERT_FATAL(range != NULL); 175 SOS_ASSERT_FATAL(range != NULL);
175 176
176 range->base_vaddr = base_vaddr; 177 range->base_vaddr = base_vaddr;
177 range->nb_pages = (top_vaddr - base_vaddr) 178 range->nb_pages = (top_vaddr - base_vaddr) / SOS_PAGE_SIZE;
178 179
179 if (is_free) 180 if (is_free)
180 { 181 {
181 list_add_tail(kmem_free_range_list, 182 list_add_tail(kmem_free_range_list,
182 range); 183 range);
183 } 184 }
184 else 185 else
185 { 186 {
186 sos_vaddr_t vaddr; 187 sos_vaddr_t vaddr;
187 range->slab = associated_slab; 188 range->slab = associated_slab;
188 list_add_tail(kmem_used_range_list, 189 list_add_tail(kmem_used_range_list,
189 range); 190 range);
190 191
191 192
192 for (vaddr = base_vaddr ; 193 for (vaddr = base_vaddr ;
193 vaddr < top_vaddr ; 194 vaddr < top_vaddr ;
194 vaddr += SOS_PAGE_SIZE) 195 vaddr += SOS_PAGE_SIZE)
195 { 196 {
196 sos_paddr_t ppage_paddr = sos_paging_g 197 sos_paddr_t ppage_paddr = sos_paging_get_paddr(vaddr);
197 SOS_ASSERT_FATAL((void*)ppage_paddr != 198 SOS_ASSERT_FATAL((void*)ppage_paddr != NULL);
198 sos_physmem_set_kmem_range(ppage_paddr 199 sos_physmem_set_kmem_range(ppage_paddr, range);
199 } 200 }
200 } 201 }
201 202
202 return range; 203 return range;
203 } 204 }
204 205
205 206
206 sos_ret_t 207 sos_ret_t
207 sos_kmem_vmm_subsystem_setup(sos_vaddr_t kerne 208 sos_kmem_vmm_subsystem_setup(sos_vaddr_t kernel_core_base,
208 sos_vaddr_t kerne 209 sos_vaddr_t kernel_core_top,
209 sos_vaddr_t boots 210 sos_vaddr_t bootstrap_stack_bottom_vaddr,
210 sos_vaddr_t boots 211 sos_vaddr_t bootstrap_stack_top_vaddr)
211 { 212 {
212 struct sos_kslab *first_struct_slab_of_cache 213 struct sos_kslab *first_struct_slab_of_caches,
213 *first_struct_slab_of_ranges; 214 *first_struct_slab_of_ranges;
214 sos_vaddr_t first_slab_of_caches_base, 215 sos_vaddr_t first_slab_of_caches_base,
215 first_slab_of_caches_nb_pages, 216 first_slab_of_caches_nb_pages,
216 first_slab_of_ranges_base, 217 first_slab_of_ranges_base,
217 first_slab_of_ranges_nb_pages; 218 first_slab_of_ranges_nb_pages;
218 struct sos_kmem_range *first_range_of_caches 219 struct sos_kmem_range *first_range_of_caches,
219 *first_range_of_ranges; 220 *first_range_of_ranges;
220 221
221 list_init(kmem_free_range_list); 222 list_init(kmem_free_range_list);
222 list_init(kmem_used_range_list); 223 list_init(kmem_used_range_list);
223 224
224 kmem_range_cache 225 kmem_range_cache
225 = sos_kmem_cache_subsystem_setup_prepare(k 226 = sos_kmem_cache_subsystem_setup_prepare(kernel_core_base,
226 k 227 kernel_core_top,
227 s 228 sizeof(struct sos_kmem_range),
228 & 229 & first_struct_slab_of_caches,
229 & 230 & first_slab_of_caches_base,
230 & 231 & first_slab_of_caches_nb_pages,
231 & 232 & first_struct_slab_of_ranges,
232 & 233 & first_slab_of_ranges_base,
233 & 234 & first_slab_of_ranges_nb_pages);
234 SOS_ASSERT_FATAL(kmem_range_cache != NULL); 235 SOS_ASSERT_FATAL(kmem_range_cache != NULL);
235 236
236 237
237 create_range(TRUE, 238 create_range(TRUE,
238 SOS_KMEM_VMM_BASE, 239 SOS_KMEM_VMM_BASE,
239 SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO 240 SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
240 NULL); 241 NULL);
241 242
242 243
243 create_range(FALSE, 244 create_range(FALSE,
244 SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO 245 SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
245 SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO 246 SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
246 NULL); 247 NULL);
247 248
248 249
249 create_range(TRUE, 250 create_range(TRUE,
250 SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO 251 SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
251 SOS_PAGE_ALIGN_INF(kernel_core_ 252 SOS_PAGE_ALIGN_INF(kernel_core_base),
252 NULL); 253 NULL);
253 254
254 255
255 256
256 create_range(FALSE, 257 create_range(FALSE,
257 SOS_PAGE_ALIGN_INF(kernel_core_ 258 SOS_PAGE_ALIGN_INF(kernel_core_base),
258 bootstrap_stack_bottom_vaddr, 259 bootstrap_stack_bottom_vaddr,
259 NULL); 260 NULL);
260 261
261 262
262 263
263 create_range(FALSE, 264 create_range(FALSE,
264 bootstrap_stack_bottom_vaddr, 265 bootstrap_stack_bottom_vaddr,
265 bootstrap_stack_top_vaddr, 266 bootstrap_stack_top_vaddr,
266 NULL); 267 NULL);
267 268
268 269
269 270
270 create_range(FALSE, 271 create_range(FALSE,
271 bootstrap_stack_top_vaddr, 272 bootstrap_stack_top_vaddr,
272 SOS_PAGE_ALIGN_SUP(kernel_core_ 273 SOS_PAGE_ALIGN_SUP(kernel_core_top),
273 NULL); 274 NULL);
274 275
275 276
276 277
277 SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_c 278 SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_core_top)
278 == first_slab_of_caches_bas 279 == first_slab_of_caches_base);
279 SOS_ASSERT_FATAL(first_struct_slab_of_caches 280 SOS_ASSERT_FATAL(first_struct_slab_of_caches != NULL);
280 first_range_of_caches 281 first_range_of_caches
281 = create_range(FALSE, 282 = create_range(FALSE,
282 first_slab_of_caches_base, 283 first_slab_of_caches_base,
283 first_slab_of_caches_base 284 first_slab_of_caches_base
284 + first_slab_of_caches_nb_p 285 + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE,
285 first_struct_slab_of_caches 286 first_struct_slab_of_caches);
286 287
287 288
288 289
289 SOS_ASSERT_FATAL((first_slab_of_caches_base 290 SOS_ASSERT_FATAL((first_slab_of_caches_base
290 + first_slab_of_caches_nb_ 291 + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE)
291 == first_slab_of_ranges_bas 292 == first_slab_of_ranges_base);
292 SOS_ASSERT_FATAL(first_struct_slab_of_ranges 293 SOS_ASSERT_FATAL(first_struct_slab_of_ranges != NULL);
293 first_range_of_ranges 294 first_range_of_ranges
294 = create_range(FALSE, 295 = create_range(FALSE,
295 first_slab_of_ranges_base, 296 first_slab_of_ranges_base,
296 first_slab_of_ranges_base 297 first_slab_of_ranges_base
297 + first_slab_of_ranges_nb_p 298 + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
298 first_struct_slab_of_ranges 299 first_struct_slab_of_ranges);
299 300
300 301
301 create_range(TRUE, 302 create_range(TRUE,
302 first_slab_of_ranges_base 303 first_slab_of_ranges_base
303 + first_slab_of_ranges_nb_pages 304 + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
304 SOS_KMEM_VMM_TOP, 305 SOS_KMEM_VMM_TOP,
305 NULL); 306 NULL);
306 307
307 308
308 309
309 310
310 sos_kmem_cache_subsystem_setup_commit(first_ 311 sos_kmem_cache_subsystem_setup_commit(first_struct_slab_of_caches,
311 first_ 312 first_range_of_caches,
312 first_ 313 first_struct_slab_of_ranges,
313 first_ 314 first_range_of_ranges);
314 315
315 return SOS_OK; 316 return SOS_OK;
316 } 317 }
317 318
318 319
319 320
320 321
321 322
322 323
323 324
324 struct sos_kmem_range *sos_kmem_vmm_new_range( 325 struct sos_kmem_range *sos_kmem_vmm_new_range(sos_count_t nb_pages,
325 326 sos_ui32_t flags,
326 327 sos_vaddr_t * range_start)
327 { 328 {
328 struct sos_kmem_range *free_range, *new_rang 329 struct sos_kmem_range *free_range, *new_range;
329 330
330 if (nb_pages <= 0) 331 if (nb_pages <= 0)
331 return NULL; 332 return NULL;
332 333
333 334
334 free_range = find_suitable_free_range(nb_pag 335 free_range = find_suitable_free_range(nb_pages);
335 if (free_range == NULL) 336 if (free_range == NULL)
336 return NULL; 337 return NULL;
337 338
338 339
339 340
340 if(free_range->nb_pages == nb_pages) 341 if(free_range->nb_pages == nb_pages)
341 { 342 {
342 list_delete(kmem_free_range_list, free_r 343 list_delete(kmem_free_range_list, free_range);
343 kmem_used_range_list = insert_range(kmem 344 kmem_used_range_list = insert_range(kmem_used_range_list,
344 free 345 free_range);
345 346
346 new_range = free_range; 347 new_range = free_range;
347 } 348 }
348 349
349 350
350 351
351 352
352 else 353 else
353 { 354 {
354 355
355 new_range = (struct sos_kmem_range*) 356 new_range = (struct sos_kmem_range*)
356 sos_kmem_cache_alloc(kmem_range_cache, 357 sos_kmem_cache_alloc(kmem_range_cache,
357 (flags & SOS_KMEM 358 (flags & SOS_KMEM_VMM_ATOMIC)?
358 SOS_KSLAB_ALLOC_A 359 SOS_KSLAB_ALLOC_ATOMIC:0);
359 if (! new_range) 360 if (! new_range)
360 return NULL; 361 return NULL;
361 362
362 new_range->base_vaddr = free_range->ba 363 new_range->base_vaddr = free_range->base_vaddr;
363 new_range->nb_pages = nb_pages; 364 new_range->nb_pages = nb_pages;
364 free_range->base_vaddr += nb_pages*SOS_P 365 free_range->base_vaddr += nb_pages*SOS_PAGE_SIZE;
365 free_range->nb_pages -= nb_pages; 366 free_range->nb_pages -= nb_pages;
366 367
367 368
368 369
369 kmem_used_range_list = insert_range(kmem 370 kmem_used_range_list = insert_range(kmem_used_range_list,
370 new_ 371 new_range);
371 } 372 }
372 373
373 374
374 new_range->slab = NULL; 375 new_range->slab = NULL;
375 376
376 377
377 if (flags & SOS_KMEM_VMM_MAP) 378 if (flags & SOS_KMEM_VMM_MAP)
378 { 379 {
379 int i; !! 380 unsigned int i;
380 for (i = 0 ; i < nb_pages ; i ++) 381 for (i = 0 ; i < nb_pages ; i ++)
381 { 382 {
382 383
383 sos_paddr_t ppage_paddr 384 sos_paddr_t ppage_paddr
384 = sos_physmem_ref_physpage_new(! ( 385 = sos_physmem_ref_physpage_new(! (flags & SOS_KMEM_VMM_ATOMIC));
385 386
386 387
387 if (ppage_paddr) 388 if (ppage_paddr)
388 { 389 {
389 if (sos_paging_map(ppage_paddr, 390 if (sos_paging_map(ppage_paddr,
390 new_range->ba 391 new_range->base_vaddr
391 + i * SOS_P 392 + i * SOS_PAGE_SIZE,
392 FALSE 393 FALSE ,
393 ((flags & SOS 394 ((flags & SOS_KMEM_VMM_ATOMIC)?
394 SOS_VM_MAP_A 395 SOS_VM_MAP_ATOMIC:0)
395 | SOS_VM_MAP_ 396 | SOS_VM_MAP_PROT_READ
396 | SOS_VM_MAP_ 397 | SOS_VM_MAP_PROT_WRITE))
397 { 398 {
398 399
399 sos_physmem_unref_physpage(p 400 sos_physmem_unref_physpage(ppage_paddr);
400 ppage_paddr = (sos_paddr_t)N 401 ppage_paddr = (sos_paddr_t)NULL;
401 } 402 }
402 else 403 else
403 { 404 {
404 405
405 406
406 sos_physmem_unref_physpage(p 407 sos_physmem_unref_physpage(ppage_paddr);
407 } 408 }
408 } 409 }
409 410
410 411
411 if (! ppage_paddr) 412 if (! ppage_paddr)
412 { 413 {
413 sos_kmem_vmm_del_range(new_range 414 sos_kmem_vmm_del_range(new_range);
414 return NULL; 415 return NULL;
415 } 416 }
416 417
417 418
418 sos_physmem_set_kmem_range(ppage_pad 419 sos_physmem_set_kmem_range(ppage_paddr, new_range);
419 } 420 }
420 } 421 }
421 422
422 423
423 if (range_start) 424 if (range_start)
424 *range_start = new_range->base_vaddr; 425 *range_start = new_range->base_vaddr;
425 426
426 return new_range; 427 return new_range;
427 } 428 }
428 429
429 430
430 sos_ret_t sos_kmem_vmm_del_range(struct sos_km 431 sos_ret_t sos_kmem_vmm_del_range(struct sos_kmem_range *range)
431 { 432 {
432 int i; <<
433 struct sos_kmem_range *ranges_to_free; 433 struct sos_kmem_range *ranges_to_free;
434 list_init(ranges_to_free); 434 list_init(ranges_to_free);
435 435
436 SOS_ASSERT_FATAL(range != NULL); 436 SOS_ASSERT_FATAL(range != NULL);
437 SOS_ASSERT_FATAL(range->slab == NULL); 437 SOS_ASSERT_FATAL(range->slab == NULL);
438 438
439 439
440 list_delete(kmem_used_range_list, range); 440 list_delete(kmem_used_range_list, range);
441 441
442 442
443 443
444 444
445 445
446 446
447 447
448 448
449 449
450 450
451 451
452 452
453 453
454 454
455 455
456 456
457 457
458 do 458 do
459 { 459 {
>> 460 unsigned int i;
>> 461
460 462
461 kmem_free_range_list = insert_range(kmem 463 kmem_free_range_list = insert_range(kmem_free_range_list, range);
462 464
463 465
464 for (i = 0 ; i < range->nb_pages ; i ++) 466 for (i = 0 ; i < range->nb_pages ; i ++)
465 { 467 {
466 468
467 sos_paging_unmap(range->base_vaddr + 469 sos_paging_unmap(range->base_vaddr + i*SOS_PAGE_SIZE);
468 } 470 }
469 471
470 472
471 473
472 474
473 475
474 476
475 477
476 if (range->prev->base_vaddr + range->pre 478 if (range->prev->base_vaddr + range->prev->nb_pages*SOS_PAGE_SIZE
477 == range->base_vaddr) 479 == range->base_vaddr)
478 { 480 {
479 struct sos_kmem_range *empty_range_o 481 struct sos_kmem_range *empty_range_of_ranges = NULL;
480 struct sos_kmem_range *prec_free = r 482 struct sos_kmem_range *prec_free = range->prev;
481 483
482 484
483 prec_free->nb_pages += range->nb_pag 485 prec_free->nb_pages += range->nb_pages;
484 list_delete(kmem_free_range_list, ra 486 list_delete(kmem_free_range_list, range);
485 487
486 488
487 489
488 empty_range_of_ranges = 490 empty_range_of_ranges =
489 sos_kmem_cache_release_struct_rang 491 sos_kmem_cache_release_struct_range(range);
490 492
491 493
492 494
493 495
494 496
495 if (empty_range_of_ranges != NULL) 497 if (empty_range_of_ranges != NULL)
496 { 498 {
497 list_delete(kmem_used_range_list 499 list_delete(kmem_used_range_list, empty_range_of_ranges);
498 list_add_tail(ranges_to_free, em 500 list_add_tail(ranges_to_free, empty_range_of_ranges);
499 } 501 }
500 502
501 503
502 range = prec_free; 504 range = prec_free;
503 } 505 }
504 506
505 507
506 508
507 if (range->base_vaddr + range->nb_pages* 509 if (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE
508 == range->next->base_vaddr) 510 == range->next->base_vaddr)
509 { 511 {
510 struct sos_kmem_range *empty_range_o 512 struct sos_kmem_range *empty_range_of_ranges = NULL;
511 struct sos_kmem_range *next_range = 513 struct sos_kmem_range *next_range = range->next;
512 514
513 515
514 range->nb_pages += next_range->nb_pa 516 range->nb_pages += next_range->nb_pages;
515 list_delete(kmem_free_range_list, ne 517 list_delete(kmem_free_range_list, next_range);
516 518
517 519
518 520
519 empty_range_of_ranges = 521 empty_range_of_ranges =
520 sos_kmem_cache_release_struct_rang 522 sos_kmem_cache_release_struct_range(next_range);
521 523
522 524
523 525
524 526
525 527
526 528
527 if (empty_range_of_ranges != NULL) 529 if (empty_range_of_ranges != NULL)
528 { 530 {
529 list_delete(kmem_used_range_list 531 list_delete(kmem_used_range_list, empty_range_of_ranges);
530 list_add_tail(ranges_to_free, em 532 list_add_tail(ranges_to_free, empty_range_of_ranges);
531 } 533 }
532 } 534 }
533 535
534 536
535 537
536 538
537 if (list_is_empty(ranges_to_free)) 539 if (list_is_empty(ranges_to_free))
538 range = NULL; 540 range = NULL;
539 else 541 else
540 range = list_pop_head(ranges_to_free); 542 range = list_pop_head(ranges_to_free);
541 543
542 } 544 }
543 545
544 while (range != NULL); 546 while (range != NULL);
545 547
546 return SOS_OK; 548 return SOS_OK;
547 } 549 }
548 550
549 551
550 sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_ 552 sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_pages,
551 sos_ui32_t fla 553 sos_ui32_t flags)
552 { 554 {
553 struct sos_kmem_range *range 555 struct sos_kmem_range *range
554 = sos_kmem_vmm_new_range(nb_pages, 556 = sos_kmem_vmm_new_range(nb_pages,
555 flags, 557 flags,
556 NULL); 558 NULL);
557 if (! range) 559 if (! range)
558 return (sos_vaddr_t)NULL; 560 return (sos_vaddr_t)NULL;
559 561
560 return range->base_vaddr; 562 return range->base_vaddr;
561 } 563 }
562 564
563 565
564 sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr) 566 sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr)
565 { 567 {
566 struct sos_kmem_range *range = lookup_range( 568 struct sos_kmem_range *range = lookup_range(vaddr);
567 569
568 570
569 571
570 if (!range || (range->base_vaddr != vaddr)) 572 if (!range || (range->base_vaddr != vaddr))
571 return -SOS_EINVAL; 573 return -SOS_EINVAL;
572 574
573 575
574 if (range->slab != NULL) 576 if (range->slab != NULL)
575 return -SOS_EBUSY; 577 return -SOS_EBUSY;
576 578
577 return sos_kmem_vmm_del_range(range); 579 return sos_kmem_vmm_del_range(range);
578 } 580 }
579 581
580 582
581 sos_ret_t sos_kmem_vmm_set_slab(struct sos_kme 583 sos_ret_t sos_kmem_vmm_set_slab(struct sos_kmem_range *range,
582 struct sos_ksl 584 struct sos_kslab *slab)
583 { 585 {
584 if (! range) 586 if (! range)
585 return -SOS_EINVAL; 587 return -SOS_EINVAL;
586 588
587 range->slab = slab; 589 range->slab = slab;
588 return SOS_OK; 590 return SOS_OK;
589 } 591 }
590 592
591 struct sos_kslab * sos_kmem_vmm_resolve_slab(s 593 struct sos_kslab * sos_kmem_vmm_resolve_slab(sos_vaddr_t vaddr)
592 { 594 {
593 struct sos_kmem_range *range = lookup_range( 595 struct sos_kmem_range *range = lookup_range(vaddr);
594 if (! range) 596 if (! range)
595 return NULL; 597 return NULL;
596 598
597 return range->slab; 599 return range->slab;
598 } 600 }
599 601
600 602
601 sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vad 603 sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vaddr_t vaddr)
602 { 604 {
603 struct sos_kmem_range *range = lookup_range( 605 struct sos_kmem_range *range = lookup_range(vaddr);
604 return (range != NULL); 606 return (range != NULL);
605 } 607 }