Diff markup
001 001
002 002
003 003
004 004
005 005
006 006
007 007
008 008
009 009
010 010
011 011
012 012
013 013
014 014
015 015
016 016
017 017
018 018
019 019
020 #include <sos/list.h> 020 #include <sos/list.h>
021 #include <sos/physmem.h> 021 #include <sos/physmem.h>
022 #include <hwcore/paging.h> 022 #include <hwcore/paging.h>
023 #include <sos/assert.h> 023 #include <sos/assert.h>
024 024
025 #include "kmem_vmm.h" 025 #include "kmem_vmm.h"
026 026
027 027
028 struct sos_kmem_range 028 struct sos_kmem_range
029 { 029 {
030 sos_vaddr_t base_vaddr; 030 sos_vaddr_t base_vaddr;
031 sos_count_t nb_pages; 031 sos_count_t nb_pages;
032 032
033 033
034 struct sos_kslab *slab; 034 struct sos_kslab *slab;
035 035
036 struct sos_kmem_range *prev, *next; 036 struct sos_kmem_range *prev, *next;
037 }; 037 };
038 const int sizeof_struct_sos_kmem_range = sizeo 038 const int sizeof_struct_sos_kmem_range = sizeof(struct sos_kmem_range);
039 039
040 040
041 static struct sos_kmem_range *kmem_free_range_ 041 static struct sos_kmem_range *kmem_free_range_list, *kmem_used_range_list;
042 042
043 043
044 static struct sos_kslab_cache *kmem_range_cach 044 static struct sos_kslab_cache *kmem_range_cache;
045 045
046 046
047 047
048 048
049 049
050 static struct sos_kmem_range * 050 static struct sos_kmem_range *
051 get_closest_preceding_kmem_range(struct sos_km 051 get_closest_preceding_kmem_range(struct sos_kmem_range *the_list,
052 sos_vaddr_t v 052 sos_vaddr_t vaddr)
053 { 053 {
054 int nb_elements; 054 int nb_elements;
055 struct sos_kmem_range *a_range, *ret_range; 055 struct sos_kmem_range *a_range, *ret_range;
056 056
057 057
058 058
059 ret_range = NULL; 059 ret_range = NULL;
060 list_foreach(the_list, a_range, nb_elements) 060 list_foreach(the_list, a_range, nb_elements)
061 { 061 {
062 if (vaddr < a_range->base_vaddr) 062 if (vaddr < a_range->base_vaddr)
063 return ret_range; 063 return ret_range;
064 ret_range = a_range; 064 ret_range = a_range;
065 } 065 }
066 066
067 067
068 return ret_range; 068 return ret_range;
069 } 069 }
070 070
071 071
072 072
073 073
074 074
075 075
076 static struct sos_kmem_range *find_suitable_fr 076 static struct sos_kmem_range *find_suitable_free_range(sos_count_t nb_pages)
077 { 077 {
078 int nb_elements; 078 int nb_elements;
079 struct sos_kmem_range *r; 079 struct sos_kmem_range *r;
080 080
081 list_foreach(kmem_free_range_list, r, nb_ele 081 list_foreach(kmem_free_range_list, r, nb_elements)
082 { 082 {
083 if (r->nb_pages >= nb_pages) 083 if (r->nb_pages >= nb_pages)
084 return r; 084 return r;
085 } 085 }
086 086
087 return NULL; 087 return NULL;
088 } 088 }
089 089
090 090
091 091
092 092
093 093
094 094
095 095
096 static struct sos_kmem_range *insert_range(str 096 static struct sos_kmem_range *insert_range(struct sos_kmem_range *the_list,
097 str 097 struct sos_kmem_range *a_range)
098 { 098 {
099 struct sos_kmem_range *prec_used; 099 struct sos_kmem_range *prec_used;
100 100
101 101
102 prec_used = get_closest_preceding_kmem_range 102 prec_used = get_closest_preceding_kmem_range(the_list,
103 103 a_range->base_vaddr);
104 104
105 if (prec_used != NULL) 105 if (prec_used != NULL)
106 list_insert_after(the_list, prec_used, a_r 106 list_insert_after(the_list, prec_used, a_range);
107 else 107 else
108 list_add_head(the_list, a_range); 108 list_add_head(the_list, a_range);
109 109
110 return the_list; 110 return the_list;
111 } 111 }
112 112
113 113
114 114
115 115
116 116
117 117
118 static struct sos_kmem_range *lookup_range(sos 118 static struct sos_kmem_range *lookup_range(sos_vaddr_t vaddr)
119 { 119 {
120 struct sos_kmem_range *range; 120 struct sos_kmem_range *range;
121 121
122 122
123 sos_paddr_t ppage_paddr = SOS_PAGE_ALIGN_INF 123 sos_paddr_t ppage_paddr = SOS_PAGE_ALIGN_INF(sos_paging_get_paddr(vaddr));
124 <<
125 if (ppage_paddr) 124 if (ppage_paddr)
126 { 125 {
127 range = sos_physmem_get_kmem_range(ppage 126 range = sos_physmem_get_kmem_range(ppage_paddr);
128 127
129 128
130 129
131 SOS_ASSERT_FATAL(range != NULL); 130 SOS_ASSERT_FATAL(range != NULL);
132 } 131 }
133 132
134 133
135 134
136 else 135 else
137 { 136 {
138 range = get_closest_preceding_kmem_range 137 range = get_closest_preceding_kmem_range(kmem_used_range_list,
139 138 vaddr);
140 139
141 if (! range) 140 if (! range)
142 return NULL; 141 return NULL;
143 142
144 143
145 if ( (vaddr < range->base_vaddr) 144 if ( (vaddr < range->base_vaddr)
146 || (vaddr >= (range->base_vaddr + r 145 || (vaddr >= (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE)) )
147 return NULL; 146 return NULL;
148 } 147 }
149 148
150 return range; 149 return range;
151 } 150 }
152 151
153 152
154 153
155 154
156 155
157 156
158 157
159 static struct sos_kmem_range * 158 static struct sos_kmem_range *
160 create_range(sos_bool_t is_free, 159 create_range(sos_bool_t is_free,
161 sos_vaddr_t base_vaddr, 160 sos_vaddr_t base_vaddr,
162 sos_vaddr_t top_vaddr, 161 sos_vaddr_t top_vaddr,
163 struct sos_kslab *associated_slab 162 struct sos_kslab *associated_slab)
164 { 163 {
165 struct sos_kmem_range *range; 164 struct sos_kmem_range *range;
166 165
167 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_va 166 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_vaddr));
168 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(top_vad 167 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(top_vaddr));
169 168
170 if ((top_vaddr - base_vaddr) < SOS_PAGE_SIZE 169 if ((top_vaddr - base_vaddr) < SOS_PAGE_SIZE)
171 return NULL; 170 return NULL;
172 171
173 range = (struct sos_kmem_range*)sos_kmem_cac 172 range = (struct sos_kmem_range*)sos_kmem_cache_alloc(kmem_range_cache,
174 173 SOS_KSLAB_ALLOC_ATOMIC);
175 SOS_ASSERT_FATAL(range != NULL); 174 SOS_ASSERT_FATAL(range != NULL);
176 175
177 range->base_vaddr = base_vaddr; 176 range->base_vaddr = base_vaddr;
178 range->nb_pages = (top_vaddr - base_vaddr) 177 range->nb_pages = (top_vaddr - base_vaddr) / SOS_PAGE_SIZE;
179 178
180 if (is_free) 179 if (is_free)
181 { 180 {
182 list_add_tail(kmem_free_range_list, 181 list_add_tail(kmem_free_range_list,
183 range); 182 range);
184 } 183 }
185 else 184 else
186 { 185 {
187 sos_vaddr_t vaddr; 186 sos_vaddr_t vaddr;
188 range->slab = associated_slab; 187 range->slab = associated_slab;
189 list_add_tail(kmem_used_range_list, 188 list_add_tail(kmem_used_range_list,
190 range); 189 range);
191 190
192 191
193 for (vaddr = base_vaddr ; 192 for (vaddr = base_vaddr ;
194 vaddr < top_vaddr ; 193 vaddr < top_vaddr ;
195 vaddr += SOS_PAGE_SIZE) 194 vaddr += SOS_PAGE_SIZE)
196 { 195 {
197 sos_paddr_t ppage_paddr = sos_paging_g 196 sos_paddr_t ppage_paddr = sos_paging_get_paddr(vaddr);
198 SOS_ASSERT_FATAL((void*)ppage_paddr != 197 SOS_ASSERT_FATAL((void*)ppage_paddr != NULL);
199 sos_physmem_set_kmem_range(ppage_paddr 198 sos_physmem_set_kmem_range(ppage_paddr, range);
200 } 199 }
201 } 200 }
202 201
203 return range; 202 return range;
204 } 203 }
205 204
206 205
207 sos_ret_t 206 sos_ret_t
208 sos_kmem_vmm_subsystem_setup(sos_vaddr_t kerne 207 sos_kmem_vmm_subsystem_setup(sos_vaddr_t kernel_core_base,
209 sos_vaddr_t kerne 208 sos_vaddr_t kernel_core_top,
210 sos_vaddr_t boots 209 sos_vaddr_t bootstrap_stack_bottom_vaddr,
211 sos_vaddr_t boots 210 sos_vaddr_t bootstrap_stack_top_vaddr)
212 { 211 {
213 struct sos_kslab *first_struct_slab_of_cache 212 struct sos_kslab *first_struct_slab_of_caches,
214 *first_struct_slab_of_ranges; 213 *first_struct_slab_of_ranges;
215 sos_vaddr_t first_slab_of_caches_base, 214 sos_vaddr_t first_slab_of_caches_base,
216 first_slab_of_caches_nb_pages, 215 first_slab_of_caches_nb_pages,
217 first_slab_of_ranges_base, 216 first_slab_of_ranges_base,
218 first_slab_of_ranges_nb_pages; 217 first_slab_of_ranges_nb_pages;
219 struct sos_kmem_range *first_range_of_caches 218 struct sos_kmem_range *first_range_of_caches,
220 *first_range_of_ranges; 219 *first_range_of_ranges;
221 220
222 list_init(kmem_free_range_list); 221 list_init(kmem_free_range_list);
223 list_init(kmem_used_range_list); 222 list_init(kmem_used_range_list);
224 223
225 kmem_range_cache 224 kmem_range_cache
226 = sos_kmem_cache_subsystem_setup_prepare(k 225 = sos_kmem_cache_subsystem_setup_prepare(kernel_core_base,
227 k 226 kernel_core_top,
228 s 227 sizeof(struct sos_kmem_range),
229 & 228 & first_struct_slab_of_caches,
230 & 229 & first_slab_of_caches_base,
231 & 230 & first_slab_of_caches_nb_pages,
232 & 231 & first_struct_slab_of_ranges,
233 & 232 & first_slab_of_ranges_base,
234 & 233 & first_slab_of_ranges_nb_pages);
235 SOS_ASSERT_FATAL(kmem_range_cache != NULL); 234 SOS_ASSERT_FATAL(kmem_range_cache != NULL);
236 235
237 236
238 create_range(TRUE, 237 create_range(TRUE,
239 SOS_KMEM_VMM_BASE, 238 SOS_KMEM_VMM_BASE,
240 SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO 239 SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
241 NULL); 240 NULL);
242 241
243 242
244 create_range(FALSE, 243 create_range(FALSE,
245 SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO 244 SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
246 SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO 245 SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
247 NULL); 246 NULL);
248 247
249 248
250 create_range(TRUE, 249 create_range(TRUE,
251 SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO 250 SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
252 SOS_PAGE_ALIGN_INF(kernel_core_ 251 SOS_PAGE_ALIGN_INF(kernel_core_base),
253 NULL); 252 NULL);
254 253
255 254
256 255
257 create_range(FALSE, 256 create_range(FALSE,
258 SOS_PAGE_ALIGN_INF(kernel_core_ 257 SOS_PAGE_ALIGN_INF(kernel_core_base),
259 bootstrap_stack_bottom_vaddr, 258 bootstrap_stack_bottom_vaddr,
260 NULL); 259 NULL);
261 260
262 261
263 262
264 create_range(FALSE, 263 create_range(FALSE,
265 bootstrap_stack_bottom_vaddr, 264 bootstrap_stack_bottom_vaddr,
266 bootstrap_stack_top_vaddr, 265 bootstrap_stack_top_vaddr,
267 NULL); 266 NULL);
268 267
269 268
270 269
271 create_range(FALSE, 270 create_range(FALSE,
272 bootstrap_stack_top_vaddr, 271 bootstrap_stack_top_vaddr,
273 SOS_PAGE_ALIGN_SUP(kernel_core_ 272 SOS_PAGE_ALIGN_SUP(kernel_core_top),
274 NULL); 273 NULL);
275 274
276 275
277 276
278 SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_c 277 SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_core_top)
279 == first_slab_of_caches_bas 278 == first_slab_of_caches_base);
280 SOS_ASSERT_FATAL(first_struct_slab_of_caches 279 SOS_ASSERT_FATAL(first_struct_slab_of_caches != NULL);
281 first_range_of_caches 280 first_range_of_caches
282 = create_range(FALSE, 281 = create_range(FALSE,
283 first_slab_of_caches_base, 282 first_slab_of_caches_base,
284 first_slab_of_caches_base 283 first_slab_of_caches_base
285 + first_slab_of_caches_nb_p 284 + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE,
286 first_struct_slab_of_caches 285 first_struct_slab_of_caches);
287 286
288 287
289 288
290 SOS_ASSERT_FATAL((first_slab_of_caches_base 289 SOS_ASSERT_FATAL((first_slab_of_caches_base
291 + first_slab_of_caches_nb_ 290 + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE)
292 == first_slab_of_ranges_bas 291 == first_slab_of_ranges_base);
293 SOS_ASSERT_FATAL(first_struct_slab_of_ranges 292 SOS_ASSERT_FATAL(first_struct_slab_of_ranges != NULL);
294 first_range_of_ranges 293 first_range_of_ranges
295 = create_range(FALSE, 294 = create_range(FALSE,
296 first_slab_of_ranges_base, 295 first_slab_of_ranges_base,
297 first_slab_of_ranges_base 296 first_slab_of_ranges_base
298 + first_slab_of_ranges_nb_p 297 + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
299 first_struct_slab_of_ranges 298 first_struct_slab_of_ranges);
300 299
301 300
302 create_range(TRUE, 301 create_range(TRUE,
303 first_slab_of_ranges_base 302 first_slab_of_ranges_base
304 + first_slab_of_ranges_nb_pages 303 + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
305 SOS_KMEM_VMM_TOP, 304 SOS_KMEM_VMM_TOP,
306 NULL); 305 NULL);
307 306
308 307
309 308
310 309
311 sos_kmem_cache_subsystem_setup_commit(first_ 310 sos_kmem_cache_subsystem_setup_commit(first_struct_slab_of_caches,
312 first_ 311 first_range_of_caches,
313 first_ 312 first_struct_slab_of_ranges,
314 first_ 313 first_range_of_ranges);
315 314
316 return SOS_OK; 315 return SOS_OK;
317 } 316 }
318 317
319 318
320 319
321 320
322 321
323 322
324 323
325 struct sos_kmem_range *sos_kmem_vmm_new_range( 324 struct sos_kmem_range *sos_kmem_vmm_new_range(sos_count_t nb_pages,
326 325 sos_ui32_t flags,
327 326 sos_vaddr_t * range_start)
328 { 327 {
329 struct sos_kmem_range *free_range, *new_rang 328 struct sos_kmem_range *free_range, *new_range;
330 329
331 if (nb_pages <= 0) 330 if (nb_pages <= 0)
332 return NULL; 331 return NULL;
333 332
334 333
335 free_range = find_suitable_free_range(nb_pag 334 free_range = find_suitable_free_range(nb_pages);
336 if (free_range == NULL) 335 if (free_range == NULL)
337 return NULL; 336 return NULL;
338 337
339 338
340 339
341 if(free_range->nb_pages == nb_pages) 340 if(free_range->nb_pages == nb_pages)
342 { 341 {
343 list_delete(kmem_free_range_list, free_r 342 list_delete(kmem_free_range_list, free_range);
344 kmem_used_range_list = insert_range(kmem 343 kmem_used_range_list = insert_range(kmem_used_range_list,
345 free 344 free_range);
346 345
347 new_range = free_range; 346 new_range = free_range;
348 } 347 }
349 348
350 349
351 350
352 351
353 else 352 else
354 { 353 {
355 354
356 new_range = (struct sos_kmem_range*) 355 new_range = (struct sos_kmem_range*)
357 sos_kmem_cache_alloc(kmem_range_cache, 356 sos_kmem_cache_alloc(kmem_range_cache,
358 (flags & SOS_KMEM 357 (flags & SOS_KMEM_VMM_ATOMIC)?
359 SOS_KSLAB_ALLOC_A 358 SOS_KSLAB_ALLOC_ATOMIC:0);
360 if (! new_range) 359 if (! new_range)
361 return NULL; 360 return NULL;
362 361
363 new_range->base_vaddr = free_range->ba 362 new_range->base_vaddr = free_range->base_vaddr;
364 new_range->nb_pages = nb_pages; 363 new_range->nb_pages = nb_pages;
365 free_range->base_vaddr += nb_pages*SOS_P 364 free_range->base_vaddr += nb_pages*SOS_PAGE_SIZE;
366 free_range->nb_pages -= nb_pages; 365 free_range->nb_pages -= nb_pages;
367 366
368 367
369 368
370 kmem_used_range_list = insert_range(kmem 369 kmem_used_range_list = insert_range(kmem_used_range_list,
371 new_ 370 new_range);
372 } 371 }
373 372
374 373
375 new_range->slab = NULL; 374 new_range->slab = NULL;
376 375
377 376
378 if (flags & SOS_KMEM_VMM_MAP) 377 if (flags & SOS_KMEM_VMM_MAP)
379 { 378 {
380 int i; 379 int i;
381 for (i = 0 ; i < nb_pages ; i ++) 380 for (i = 0 ; i < nb_pages ; i ++)
382 { 381 {
383 382
384 sos_paddr_t ppage_paddr 383 sos_paddr_t ppage_paddr
385 = sos_physmem_ref_physpage_new(! ( 384 = sos_physmem_ref_physpage_new(! (flags & SOS_KMEM_VMM_ATOMIC));
386 385
387 386
388 if (ppage_paddr) 387 if (ppage_paddr)
389 { 388 {
390 if (sos_paging_map(ppage_paddr, 389 if (sos_paging_map(ppage_paddr,
391 new_range->ba 390 new_range->base_vaddr
392 + i * SOS_P 391 + i * SOS_PAGE_SIZE,
393 FALSE 392 FALSE ,
394 ((flags & SOS 393 ((flags & SOS_KMEM_VMM_ATOMIC)?
395 SOS_VM_MAP_A 394 SOS_VM_MAP_ATOMIC:0)
396 | SOS_VM_MAP_ 395 | SOS_VM_MAP_PROT_READ
397 | SOS_VM_MAP_ 396 | SOS_VM_MAP_PROT_WRITE))
398 { 397 {
399 398
400 sos_physmem_unref_physpage(p 399 sos_physmem_unref_physpage(ppage_paddr);
401 ppage_paddr = (sos_paddr_t)N 400 ppage_paddr = (sos_paddr_t)NULL;
402 } 401 }
403 else 402 else
404 { 403 {
405 404
406 405
407 sos_physmem_unref_physpage(p 406 sos_physmem_unref_physpage(ppage_paddr);
408 } 407 }
409 } 408 }
410 409
411 410
412 if (! ppage_paddr) 411 if (! ppage_paddr)
413 { 412 {
414 sos_kmem_vmm_del_range(new_range 413 sos_kmem_vmm_del_range(new_range);
415 return NULL; 414 return NULL;
416 } 415 }
417 416
418 417
419 sos_physmem_set_kmem_range(ppage_pad 418 sos_physmem_set_kmem_range(ppage_paddr, new_range);
420 } 419 }
421 } 420 }
422 421
423 422
424 if (range_start) 423 if (range_start)
425 *range_start = new_range->base_vaddr; 424 *range_start = new_range->base_vaddr;
426 425
427 return new_range; 426 return new_range;
428 } 427 }
429 428
430 429
431 sos_ret_t sos_kmem_vmm_del_range(struct sos_km 430 sos_ret_t sos_kmem_vmm_del_range(struct sos_kmem_range *range)
432 { 431 {
433 int i; 432 int i;
434 struct sos_kmem_range *ranges_to_free; 433 struct sos_kmem_range *ranges_to_free;
435 list_init(ranges_to_free); 434 list_init(ranges_to_free);
436 435
437 SOS_ASSERT_FATAL(range != NULL); 436 SOS_ASSERT_FATAL(range != NULL);
438 SOS_ASSERT_FATAL(range->slab == NULL); 437 SOS_ASSERT_FATAL(range->slab == NULL);
439 438
440 439
441 list_delete(kmem_used_range_list, range); 440 list_delete(kmem_used_range_list, range);
442 441
443 442
444 443
445 444
446 445
447 446
448 447
449 448
450 449
451 450
452 451
453 452
454 453
455 454
456 455
457 456
458 457
459 do 458 do
460 { 459 {
461 460
462 kmem_free_range_list = insert_range(kmem 461 kmem_free_range_list = insert_range(kmem_free_range_list, range);
463 462
464 463
465 for (i = 0 ; i < range->nb_pages ; i ++) 464 for (i = 0 ; i < range->nb_pages ; i ++)
466 { 465 {
467 466
468 sos_paging_unmap(range->base_vaddr + 467 sos_paging_unmap(range->base_vaddr + i*SOS_PAGE_SIZE);
469 } 468 }
470 469
471 470
472 471
473 472
474 473
475 474
476 475
477 if (range->prev->base_vaddr + range->pre 476 if (range->prev->base_vaddr + range->prev->nb_pages*SOS_PAGE_SIZE
478 == range->base_vaddr) 477 == range->base_vaddr)
479 { 478 {
480 struct sos_kmem_range *empty_range_o 479 struct sos_kmem_range *empty_range_of_ranges = NULL;
481 struct sos_kmem_range *prec_free = r 480 struct sos_kmem_range *prec_free = range->prev;
482 481
483 482
484 prec_free->nb_pages += range->nb_pag 483 prec_free->nb_pages += range->nb_pages;
485 list_delete(kmem_free_range_list, ra 484 list_delete(kmem_free_range_list, range);
486 485
487 486
488 487
489 empty_range_of_ranges = 488 empty_range_of_ranges =
490 sos_kmem_cache_release_struct_rang 489 sos_kmem_cache_release_struct_range(range);
491 490
492 491
493 492
494 493
495 494
496 if (empty_range_of_ranges != NULL) 495 if (empty_range_of_ranges != NULL)
497 { 496 {
498 list_delete(kmem_used_range_list 497 list_delete(kmem_used_range_list, empty_range_of_ranges);
499 list_add_tail(ranges_to_free, em 498 list_add_tail(ranges_to_free, empty_range_of_ranges);
500 } 499 }
501 500
502 501
503 range = prec_free; 502 range = prec_free;
504 } 503 }
505 504
506 505
507 506
508 if (range->base_vaddr + range->nb_pages* 507 if (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE
509 == range->next->base_vaddr) 508 == range->next->base_vaddr)
510 { 509 {
511 struct sos_kmem_range *empty_range_o 510 struct sos_kmem_range *empty_range_of_ranges = NULL;
512 struct sos_kmem_range *next_range = 511 struct sos_kmem_range *next_range = range->next;
513 512
514 513
515 range->nb_pages += next_range->nb_pa 514 range->nb_pages += next_range->nb_pages;
516 list_delete(kmem_free_range_list, ne 515 list_delete(kmem_free_range_list, next_range);
517 516
518 517
519 518
520 empty_range_of_ranges = 519 empty_range_of_ranges =
521 sos_kmem_cache_release_struct_rang 520 sos_kmem_cache_release_struct_range(next_range);
522 521
523 522
524 523
525 524
526 525
527 526
528 if (empty_range_of_ranges != NULL) 527 if (empty_range_of_ranges != NULL)
529 { 528 {
530 list_delete(kmem_used_range_list 529 list_delete(kmem_used_range_list, empty_range_of_ranges);
531 list_add_tail(ranges_to_free, em 530 list_add_tail(ranges_to_free, empty_range_of_ranges);
532 } 531 }
533 } 532 }
534 533
535 534
536 535
537 536
538 if (list_is_empty(ranges_to_free)) 537 if (list_is_empty(ranges_to_free))
539 range = NULL; 538 range = NULL;
540 else 539 else
541 range = list_pop_head(ranges_to_free); 540 range = list_pop_head(ranges_to_free);
542 541
543 } 542 }
544 543
545 while (range != NULL); 544 while (range != NULL);
546 545
547 return SOS_OK; 546 return SOS_OK;
548 } 547 }
549 548
550 549
551 sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_ 550 sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_pages,
552 sos_ui32_t fla 551 sos_ui32_t flags)
553 { 552 {
554 struct sos_kmem_range *range 553 struct sos_kmem_range *range
555 = sos_kmem_vmm_new_range(nb_pages, 554 = sos_kmem_vmm_new_range(nb_pages,
556 flags, 555 flags,
557 NULL); 556 NULL);
558 if (! range) 557 if (! range)
559 return (sos_vaddr_t)NULL; 558 return (sos_vaddr_t)NULL;
560 559
561 return range->base_vaddr; 560 return range->base_vaddr;
562 } 561 }
563 562
564 563
565 sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr) 564 sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr)
566 { 565 {
567 struct sos_kmem_range *range = lookup_range( 566 struct sos_kmem_range *range = lookup_range(vaddr);
568 567
569 568
570 569
571 if (!range || (range->base_vaddr != vaddr)) 570 if (!range || (range->base_vaddr != vaddr))
572 return -SOS_EINVAL; 571 return -SOS_EINVAL;
573 572
574 573
575 if (range->slab != NULL) 574 if (range->slab != NULL)
576 return -SOS_EBUSY; 575 return -SOS_EBUSY;
577 576
578 return sos_kmem_vmm_del_range(range); 577 return sos_kmem_vmm_del_range(range);
579 } 578 }
580 579
581 580
582 sos_ret_t sos_kmem_vmm_set_slab(struct sos_kme 581 sos_ret_t sos_kmem_vmm_set_slab(struct sos_kmem_range *range,
583 struct sos_ksl 582 struct sos_kslab *slab)
584 { 583 {
585 if (! range) 584 if (! range)
586 return -SOS_EINVAL; 585 return -SOS_EINVAL;
587 586
588 range->slab = slab; 587 range->slab = slab;
589 return SOS_OK; 588 return SOS_OK;
590 } 589 }
591 590
592 struct sos_kslab * sos_kmem_vmm_resolve_slab(s 591 struct sos_kslab * sos_kmem_vmm_resolve_slab(sos_vaddr_t vaddr)
593 { 592 {
594 struct sos_kmem_range *range = lookup_range( 593 struct sos_kmem_range *range = lookup_range(vaddr);
595 if (! range) 594 if (! range)
596 return NULL; 595 return NULL;
597 596
598 return range->slab; 597 return range->slab;
599 } 598 }
600 599
601 600
602 sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vad 601 sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vaddr_t vaddr)
603 { 602 {
604 struct sos_kmem_range *range = lookup_range( 603 struct sos_kmem_range *range = lookup_range(vaddr);
605 return (range != NULL); 604 return (range != NULL);
606 } 605 }