SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2000 Thomas Petazzoni
002    Copyright (C) 2004 David Decotigny
003 
004    This program is free software; you can redistribute it and/or
005    modify it under the terms of the GNU General Public License
006    as published by the Free Software Foundation; either version 2
007    of the License, or (at your option) any later version.
008    
009    This program is distributed in the hope that it will be useful,
010    but WITHOUT ANY WARRANTY; without even the implied warranty of
011    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
012    GNU General Public License for more details.
013    
014    You should have received a copy of the GNU General Public License
015    along with this program; if not, write to the Free Software
016    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
017    USA. 
018 */
019 
020 #include <sos/list.h>
021 #include <sos/physmem.h>
022 #include <hwcore/paging.h>
023 #include <sos/assert.h>
024 
025 #include "kmem_vmm.h"
026 
027 /** The structure of a range of kernel-space virtual addresses */
028 struct sos_kmem_range
029 {
030   sos_vaddr_t base_vaddr;
031   sos_count_t nb_pages;
032 
033   /* The slab owning this range, or NULL */
034   struct sos_kslab *slab;
035 
036   struct sos_kmem_range *prev, *next;
037 };
038 const int sizeof_struct_sos_kmem_range = sizeof(struct sos_kmem_range);
039 
040 /** The ranges are SORTED in (strictly) ascending base addresses */
041 static struct sos_kmem_range *kmem_free_range_list, *kmem_used_range_list;
042 
043 /** The slab cache for the kmem ranges */
044 static struct sos_kslab_cache *kmem_range_cache;
045 
046 
047 
048 /** Helper function to get the closest preceding or containing
049     range for the given virtual address */
050 static struct sos_kmem_range *
051 get_closest_preceding_kmem_range(struct sos_kmem_range *the_list,
052                                  sos_vaddr_t vaddr)
053 {
054   int nb_elements;
055   struct sos_kmem_range *a_range, *ret_range;
056 
057   /* kmem_range list is kept SORTED, so we exit as soon as vaddr >= a
058      range base address */
059   ret_range = NULL;
060   list_foreach(the_list, a_range, nb_elements)
061     {
062       if (vaddr < a_range->base_vaddr)
063         return ret_range;
064       ret_range = a_range;
065     }
066 
067   /* This will always be the LAST range in the kmem area */
068   return ret_range;
069 }
070 
071 
072 /**
073  * Helper function to lookup a free range large enough to hold nb_pages
074  * pages (first fit)
075  */
076 static struct sos_kmem_range *find_suitable_free_range(sos_count_t nb_pages)
077 {
078   int nb_elements;
079   struct sos_kmem_range *r;
080 
081   list_foreach(kmem_free_range_list, r, nb_elements)
082   {
083     if (r->nb_pages >= nb_pages)
084       return r;
085   }
086 
087   return NULL;
088 }
089 
090 
091 /**
092  * Helper function to add a_range in the_list, in strictly ascending order.
093  *
094  * @return The (possibly) new head of the_list
095  */
096 static struct sos_kmem_range *insert_range(struct sos_kmem_range *the_list,
097                                            struct sos_kmem_range *a_range)
098 {
099   struct sos_kmem_range *prec_used;
100 
101   /** Look for any preceding range */
102   prec_used = get_closest_preceding_kmem_range(the_list,
103                                                a_range->base_vaddr);
104   /** insert a_range /after/ this prec_used */
105   if (prec_used != NULL)
106     list_insert_after(the_list, prec_used, a_range);
107   else /* Insert at the beginning of the list */
108     list_add_head(the_list, a_range);
109 
110   return the_list;
111 }
112 
113 
114 /**
115  * Helper function to retrieve the range owning the given vaddr, by
116  * scanning the physical memory first if vaddr is mapped in RAM
117  */
118 static struct sos_kmem_range *lookup_range(sos_vaddr_t vaddr)
119 {
120   struct sos_kmem_range *range;
121 
122   /* First: try to retrieve the physical page mapped at this address */
123   sos_paddr_t ppage_paddr = SOS_PAGE_ALIGN_INF(sos_paging_get_paddr(vaddr));
124 
125   if (ppage_paddr)
126     {
127       range = sos_physmem_get_kmem_range(ppage_paddr);
128 
129       /* If a page is mapped at this address, it is EXPECTED that it
130          is really associated with a range */
131       SOS_ASSERT_FATAL(range != NULL);
132     }
133 
134   /* Otherwise scan the list of used ranges, looking for the range
135      owning the address */
136   else
137     {
138       range = get_closest_preceding_kmem_range(kmem_used_range_list,
139                                                vaddr);
140       /* Not found */
141       if (! range)
142         return NULL;
143 
144       /* vaddr not covered by this range */
145       if ( (vaddr < range->base_vaddr)
146            || (vaddr >= (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE)) )
147         return NULL;
148     }
149 
150   return range;
151 }
152 
153 
154 /**
155  * Helper function for sos_kmem_vmm_setup() to initialize a new range
156  * that maps a given area as free or as already used.
157  * This function either succeeds or halts the whole system.
158  */
159 static struct sos_kmem_range *
160 create_range(sos_bool_t  is_free,
161              sos_vaddr_t base_vaddr,
162              sos_vaddr_t top_vaddr,
163              struct sos_kslab *associated_slab)
164 {
165   struct sos_kmem_range *range;
166 
167   SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_vaddr));
168   SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(top_vaddr));
169 
170   if ((top_vaddr - base_vaddr) < SOS_PAGE_SIZE)
171     return NULL;
172 
173   range = (struct sos_kmem_range*)sos_kmem_cache_alloc(kmem_range_cache,
174                                                        SOS_KSLAB_ALLOC_ATOMIC);
175   SOS_ASSERT_FATAL(range != NULL);
176 
177   range->base_vaddr = base_vaddr;
178   range->nb_pages   = (top_vaddr - base_vaddr) / SOS_PAGE_SIZE;
179 
180   if (is_free)
181     {
182       list_add_tail(kmem_free_range_list,
183                     range);
184     }
185   else
186     {
187       sos_vaddr_t vaddr;
188       range->slab = associated_slab;
189       list_add_tail(kmem_used_range_list,
190                     range);
191 
192       /* Ok, set the range owner for the pages in this page */
193       for (vaddr = base_vaddr ;
194            vaddr < top_vaddr ;
195            vaddr += SOS_PAGE_SIZE)
196       {
197         sos_paddr_t ppage_paddr = sos_paging_get_paddr(vaddr);
198         SOS_ASSERT_FATAL((void*)ppage_paddr != NULL);
199         sos_physmem_set_kmem_range(ppage_paddr, range);
200       }
201     }
202 
203   return range;
204 }
205 
206 
207 sos_ret_t
208 sos_kmem_vmm_subsystem_setup(sos_vaddr_t kernel_core_base,
209                              sos_vaddr_t kernel_core_top,
210                              sos_vaddr_t bootstrap_stack_bottom_vaddr,
211                              sos_vaddr_t bootstrap_stack_top_vaddr)
212 {
213   struct sos_kslab *first_struct_slab_of_caches,
214     *first_struct_slab_of_ranges;
215   sos_vaddr_t first_slab_of_caches_base,
216     first_slab_of_caches_nb_pages,
217     first_slab_of_ranges_base,
218     first_slab_of_ranges_nb_pages;
219   struct sos_kmem_range *first_range_of_caches,
220     *first_range_of_ranges;
221 
222   list_init(kmem_free_range_list);
223   list_init(kmem_used_range_list);
224 
225   kmem_range_cache
226     = sos_kmem_cache_subsystem_setup_prepare(kernel_core_base,
227                                              kernel_core_top,
228                                              sizeof(struct sos_kmem_range),
229                                              & first_struct_slab_of_caches,
230                                              & first_slab_of_caches_base,
231                                              & first_slab_of_caches_nb_pages,
232                                              & first_struct_slab_of_ranges,
233                                              & first_slab_of_ranges_base,
234                                              & first_slab_of_ranges_nb_pages);
235   SOS_ASSERT_FATAL(kmem_range_cache != NULL);
236 
237   /* Mark virtual addresses 16kB - Video as FREE */
238   create_range(TRUE,
239                SOS_KMEM_VMM_BASE,
240                SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
241                NULL);
242   
243   /* Mark virtual addresses in Video hardware mapping as NOT FREE */
244   create_range(FALSE,
245                SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
246                SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
247                NULL);
248   
249   /* Mark virtual addresses Video - Kernel as FREE */
250   create_range(TRUE,
251                SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
252                SOS_PAGE_ALIGN_INF(kernel_core_base),
253                NULL);
254   
255   /* Mark virtual addresses in Kernel code/data up to the bootstrap stack
256      as NOT FREE */
257   create_range(FALSE,
258                SOS_PAGE_ALIGN_INF(kernel_core_base),
259                bootstrap_stack_bottom_vaddr,
260                NULL);
261 
262   /* Mark virtual addresses in the bootstrap stack as NOT FREE too,
263      but in another vmm region in order to be un-allocated later */
264   create_range(FALSE,
265                bootstrap_stack_bottom_vaddr,
266                bootstrap_stack_top_vaddr,
267                NULL);
268 
269   /* Mark the remaining virtual addresses in Kernel code/data after
270      the bootstrap stack as NOT FREE */
271   create_range(FALSE,
272                bootstrap_stack_top_vaddr,
273                SOS_PAGE_ALIGN_SUP(kernel_core_top),
274                NULL);
275 
276   /* Mark virtual addresses in the first slab of the cache of caches
277      as NOT FREE */
278   SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_core_top)
279                    == first_slab_of_caches_base);
280   SOS_ASSERT_FATAL(first_struct_slab_of_caches != NULL);
281   first_range_of_caches
282     = create_range(FALSE,
283                    first_slab_of_caches_base,
284                    first_slab_of_caches_base
285                    + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE,
286                    first_struct_slab_of_caches);
287 
288   /* Mark virtual addresses in the first slab of the cache of ranges
289      as NOT FREE */
290   SOS_ASSERT_FATAL((first_slab_of_caches_base
291                     + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE)
292                    == first_slab_of_ranges_base);
293   SOS_ASSERT_FATAL(first_struct_slab_of_ranges != NULL);
294   first_range_of_ranges
295     = create_range(FALSE,
296                    first_slab_of_ranges_base,
297                    first_slab_of_ranges_base
298                    + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
299                    first_struct_slab_of_ranges);
300   
301   /* Mark virtual addresses after these slabs as FREE */
302   create_range(TRUE,
303                first_slab_of_ranges_base
304                + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
305                SOS_KMEM_VMM_TOP,
306                NULL);
307 
308   /* Update the cache subsystem so that the artificially-created
309      caches of caches and ranges really behave like *normal* caches (ie
310      those allocated by the normal slab API) */
311   sos_kmem_cache_subsystem_setup_commit(first_struct_slab_of_caches,
312                                         first_range_of_caches,
313                                         first_struct_slab_of_ranges,
314                                         first_range_of_ranges);
315 
316   return SOS_OK;
317 }
318 
319 
320 /**
321  * Allocate a new kernel area spanning one or multiple pages.
322  *
323  * @eturn a new range structure
324  */
325 struct sos_kmem_range *sos_kmem_vmm_new_range(sos_count_t nb_pages,
326                                               sos_ui32_t  flags,
327                                               sos_vaddr_t * range_start)
328 {
329   struct sos_kmem_range *free_range, *new_range;
330 
331   if (nb_pages <= 0)
332     return NULL;
333 
334   /* Find a suitable free range to hold the size-sized object */
335   free_range = find_suitable_free_range(nb_pages);
336   if (free_range == NULL)
337     return NULL;
338 
339   /* If range has exactly the requested size, just move it to the
340      "used" list */
341   if(free_range->nb_pages == nb_pages)
342     {
343       list_delete(kmem_free_range_list, free_range);
344       kmem_used_range_list = insert_range(kmem_used_range_list,
345                                           free_range);
346       /* The new_range is exactly the free_range */
347       new_range = free_range;
348     }
349 
350   /* Otherwise the range is bigger than the requested size, split it.
351      This involves reducing its size, and allocate a new range, which
352      is going to be added to the "used" list */
353   else
354     {
355       /* free_range split in { new_range | free_range } */
356       new_range = (struct sos_kmem_range*)
357         sos_kmem_cache_alloc(kmem_range_cache,
358                              (flags & SOS_KMEM_VMM_ATOMIC)?
359                              SOS_KSLAB_ALLOC_ATOMIC:0);
360       if (! new_range)
361         return NULL;
362 
363       new_range->base_vaddr   = free_range->base_vaddr;
364       new_range->nb_pages     = nb_pages;
365       free_range->base_vaddr += nb_pages*SOS_PAGE_SIZE;
366       free_range->nb_pages   -= nb_pages;
367 
368       /* free_range is still at the same place in the list */
369       /* insert new_range in the used list */
370       kmem_used_range_list = insert_range(kmem_used_range_list,
371                                           new_range);
372     }
373 
374   /* By default, the range is not associated with any slab */
375   new_range->slab = NULL;
376 
377   /* If mapping of physical pages is needed, map them now */
378   if (flags & SOS_KMEM_VMM_MAP)
379     {
380       int i;
381       for (i = 0 ; i < nb_pages ; i ++)
382         {
383           /* Get a new physical page */
384           sos_paddr_t ppage_paddr
385             = sos_physmem_ref_physpage_new(! (flags & SOS_KMEM_VMM_ATOMIC));
386           
387           /* Map the page in kernel space */
388           if (ppage_paddr)
389             {
390               if (sos_paging_map(ppage_paddr,
391                                  new_range->base_vaddr
392                                    + i * SOS_PAGE_SIZE,
393                                  FALSE /* Not a user page */,
394                                  ((flags & SOS_KMEM_VMM_ATOMIC)?
395                                   SOS_VM_MAP_ATOMIC:0)
396                                  | SOS_VM_MAP_PROT_READ
397                                  | SOS_VM_MAP_PROT_WRITE))
398                 {
399                   /* Failed => force unallocation, see below */
400                   sos_physmem_unref_physpage(ppage_paddr);
401                   ppage_paddr = (sos_paddr_t)NULL;
402                 }
403               else
404                 {
405                   /* Success : page can be unreferenced since it is
406                      now mapped */
407                   sos_physmem_unref_physpage(ppage_paddr);
408                 }
409             }
410 
411           /* Undo the allocation if failed to allocate or map a new page */
412           if (! ppage_paddr)
413             {
414               sos_kmem_vmm_del_range(new_range);
415               return NULL;
416             }
417 
418           /* Ok, set the range owner for this page */
419           sos_physmem_set_kmem_range(ppage_paddr, new_range);
420         }
421     }
422   /* ... Otherwise: Demand Paging will do the job */
423 
424   if (range_start)
425     *range_start = new_range->base_vaddr;
426 
427   return new_range;
428 }
429 
430 
431 sos_ret_t sos_kmem_vmm_del_range(struct sos_kmem_range *range)
432 {
433   int i;
434   struct sos_kmem_range *ranges_to_free;
435   list_init(ranges_to_free);
436 
437   SOS_ASSERT_FATAL(range != NULL);
438   SOS_ASSERT_FATAL(range->slab == NULL);
439 
440   /* Remove the range from the 'USED' list now */
441   list_delete(kmem_used_range_list, range);
442 
443   /*
444    * The following do..while() loop is here to avoid an indirect
445    * recursion: if we call directly kmem_cache_free() from inside the
446    * current function, we take the risk to re-enter the current function
447    * (sos_kmem_vmm_del_range()) again, which may cause problem if it
448    * in turn calls kmem_slab again and sos_kmem_vmm_del_range again,
449    * and again and again. This may happen while freeing ranges of
450    * struct sos_kslab...
451    *
452    * To avoid this,we choose to call a special function of kmem_slab
453    * doing almost the same as sos_kmem_cache_free(), but which does
454    * NOT call us (ie sos_kmem_vmm_del_range()): instead WE add the
455    * range that is to be freed to a list, and the do..while() loop is
456    * here to process this list ! The recursion is replaced by
457    * classical iterations.
458    */
459   do
460     {
461       /* Ok, we got the range. Now, insert this range in the free list */
462       kmem_free_range_list = insert_range(kmem_free_range_list, range);
463 
464       /* Unmap the physical pages */
465       for (i = 0 ; i < range->nb_pages ; i ++)
466         {
467           /* This will work even if no page is mapped at this address */
468           sos_paging_unmap(range->base_vaddr + i*SOS_PAGE_SIZE);
469         }
470       
471       /* Eventually coalesce it with prev/next free ranges (there is
472          always a valid prev/next link since the list is circular). Note:
473          the tests below will lead to correct behaviour even if the list
474          is limited to the 'range' singleton, at least as long as the
475          range is not zero-sized */
476       /* Merge with preceding one ? */
477       if (range->prev->base_vaddr + range->prev->nb_pages*SOS_PAGE_SIZE
478           == range->base_vaddr)
479         {
480           struct sos_kmem_range *empty_range_of_ranges = NULL;
481           struct sos_kmem_range *prec_free = range->prev;
482           
483           /* Merge them */
484           prec_free->nb_pages += range->nb_pages;
485           list_delete(kmem_free_range_list, range);
486           
487           /* Mark the range as free. This may cause the slab owning
488              the range to become empty */
489           empty_range_of_ranges = 
490             sos_kmem_cache_release_struct_range(range);
491 
492           /* If this causes the slab owning the range to become empty,
493              add the range corresponding to the slab at the end of the
494              list of the ranges to be freed: it will be actually freed
495              in one of the next iterations of the do{} loop. */
496           if (empty_range_of_ranges != NULL)
497             {
498               list_delete(kmem_used_range_list, empty_range_of_ranges);
499               list_add_tail(ranges_to_free, empty_range_of_ranges);
500             }
501           
502           /* Set range to the beginning of this coelescion */
503           range = prec_free;
504         }
505       
506       /* Merge with next one ? [NO 'else' since range may be the result of
507          the merge above] */
508       if (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE
509           == range->next->base_vaddr)
510         {
511           struct sos_kmem_range *empty_range_of_ranges = NULL;
512           struct sos_kmem_range *next_range = range->next;
513           
514           /* Merge them */
515           range->nb_pages += next_range->nb_pages;
516           list_delete(kmem_free_range_list, next_range);
517           
518           /* Mark the next_range as free. This may cause the slab
519              owning the next_range to become empty */
520           empty_range_of_ranges = 
521             sos_kmem_cache_release_struct_range(next_range);
522 
523           /* If this causes the slab owning the next_range to become
524              empty, add the range corresponding to the slab at the end
525              of the list of the ranges to be freed: it will be
526              actually freed in one of the next iterations of the
527              do{} loop. */
528           if (empty_range_of_ranges != NULL)
529             {
530               list_delete(kmem_used_range_list, empty_range_of_ranges);
531               list_add_tail(ranges_to_free, empty_range_of_ranges);
532             }
533         }
534       
535 
536       /* If deleting the range(s) caused one or more range(s) to be
537          freed, get the next one to free */
538       if (list_is_empty(ranges_to_free))
539         range = NULL; /* No range left to free */
540       else
541         range = list_pop_head(ranges_to_free);
542 
543     }
544   /* Stop when there is no range left to be freed for now */
545   while (range != NULL);
546 
547   return SOS_OK;
548 }
549 
550 
551 sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_pages,
552                                sos_ui32_t  flags)
553 {
554   struct sos_kmem_range *range
555     = sos_kmem_vmm_new_range(nb_pages,
556                              flags,
557                              NULL);
558   if (! range)
559     return (sos_vaddr_t)NULL;
560   
561   return range->base_vaddr;
562 }
563 
564 
565 sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr)
566 {
567   struct sos_kmem_range *range = lookup_range(vaddr);
568 
569   /* We expect that the given address is the base address of the
570      range */
571   if (!range || (range->base_vaddr != vaddr))
572     return -SOS_EINVAL;
573 
574   /* We expect that this range is not held by any cache */
575   if (range->slab != NULL)
576     return -SOS_EBUSY;
577 
578   return sos_kmem_vmm_del_range(range);
579 }
580 
581 
582 sos_ret_t sos_kmem_vmm_set_slab(struct sos_kmem_range *range,
583                                 struct sos_kslab *slab)
584 {
585   if (! range)
586     return -SOS_EINVAL;
587 
588   range->slab = slab;
589   return SOS_OK;
590 }
591 
592 struct sos_kslab * sos_kmem_vmm_resolve_slab(sos_vaddr_t vaddr)
593 {
594   struct sos_kmem_range *range = lookup_range(vaddr);
595   if (! range)
596     return NULL;
597 
598   return range->slab;
599 }
600 
601 
602 sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vaddr_t vaddr)
603 {
604   struct sos_kmem_range *range = lookup_range(vaddr);
605   return (range != NULL);
606 }

source navigation ] diff markup ] identifier search ] general search ]