SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

Diff markup

Differences between /sos/kmem_vmm.c (Article 5) and /sos/kmem_vmm.c (Article 8)


001 /* Copyright (C) 2000 Thomas Petazzoni            001 /* Copyright (C) 2000 Thomas Petazzoni
002    Copyright (C) 2004 David Decotigny             002    Copyright (C) 2004 David Decotigny
003                                                   003 
004    This program is free software; you can redi    004    This program is free software; you can redistribute it and/or
005    modify it under the terms of the GNU Genera    005    modify it under the terms of the GNU General Public License
006    as published by the Free Software Foundatio    006    as published by the Free Software Foundation; either version 2
007    of the License, or (at your option) any lat    007    of the License, or (at your option) any later version.
008                                                   008    
009    This program is distributed in the hope tha    009    This program is distributed in the hope that it will be useful,
010    but WITHOUT ANY WARRANTY; without even the     010    but WITHOUT ANY WARRANTY; without even the implied warranty of
011    MERCHANTABILITY or FITNESS FOR A PARTICULAR    011    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
012    GNU General Public License for more details    012    GNU General Public License for more details.
013                                                   013    
014    You should have received a copy of the GNU     014    You should have received a copy of the GNU General Public License
015    along with this program; if not, write to t    015    along with this program; if not, write to the Free Software
016    Foundation, Inc., 59 Temple Place - Suite 3    016    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
017    USA.                                           017    USA. 
018 */                                                018 */
019                                                   019 
020 #include <sos/list.h>                             020 #include <sos/list.h>
021 #include <sos/physmem.h>                          021 #include <sos/physmem.h>
022 #include <hwcore/paging.h>                        022 #include <hwcore/paging.h>
023 #include <sos/assert.h>                           023 #include <sos/assert.h>
024                                                   024 
025 #include "kmem_vmm.h"                             025 #include "kmem_vmm.h"
026                                                   026 
027 /** The structure of a range of kernel-space v    027 /** The structure of a range of kernel-space virtual addresses */
028 struct sos_kmem_range                             028 struct sos_kmem_range
029 {                                                 029 {
030   sos_vaddr_t base_vaddr;                         030   sos_vaddr_t base_vaddr;
031   sos_count_t nb_pages;                           031   sos_count_t nb_pages;
032                                                   032 
033   /* The slab owning this range, or NULL */       033   /* The slab owning this range, or NULL */
034   struct sos_kslab *slab;                         034   struct sos_kslab *slab;
035                                                   035 
036   struct sos_kmem_range *prev, *next;             036   struct sos_kmem_range *prev, *next;
037 };                                                037 };
038 const int sizeof_struct_sos_kmem_range = sizeo    038 const int sizeof_struct_sos_kmem_range = sizeof(struct sos_kmem_range);
039                                                   039 
040 /** The ranges are SORTED in (strictly) ascend    040 /** The ranges are SORTED in (strictly) ascending base addresses */
041 static struct sos_kmem_range *kmem_free_range_    041 static struct sos_kmem_range *kmem_free_range_list, *kmem_used_range_list;
042                                                   042 
043 /** The slab cache for the kmem ranges */         043 /** The slab cache for the kmem ranges */
044 static struct sos_kslab_cache *kmem_range_cach    044 static struct sos_kslab_cache *kmem_range_cache;
045                                                   045 
046                                                   046 
047                                                   047 
048 /** Helper function to get the closest precedi    048 /** Helper function to get the closest preceding or containing
049     range for the given virtual address */        049     range for the given virtual address */
050 static struct sos_kmem_range *                    050 static struct sos_kmem_range *
051 get_closest_preceding_kmem_range(struct sos_km    051 get_closest_preceding_kmem_range(struct sos_kmem_range *the_list,
052                                  sos_vaddr_t v    052                                  sos_vaddr_t vaddr)
053 {                                                 053 {
054   int nb_elements;                                054   int nb_elements;
055   struct sos_kmem_range *a_range, *ret_range;     055   struct sos_kmem_range *a_range, *ret_range;
056                                                   056 
057   /* kmem_range list is kept SORTED, so we exi    057   /* kmem_range list is kept SORTED, so we exit as soon as vaddr >= a
058      range base address */                        058      range base address */
059   ret_range = NULL;                               059   ret_range = NULL;
060   list_foreach(the_list, a_range, nb_elements)    060   list_foreach(the_list, a_range, nb_elements)
061     {                                             061     {
062       if (vaddr < a_range->base_vaddr)            062       if (vaddr < a_range->base_vaddr)
063         return ret_range;                         063         return ret_range;
064       ret_range = a_range;                        064       ret_range = a_range;
065     }                                             065     }
066                                                   066 
067   /* This will always be the LAST range in the    067   /* This will always be the LAST range in the kmem area */
068   return ret_range;                               068   return ret_range;
069 }                                                 069 }
070                                                   070 
071                                                   071 
072 /**                                               072 /**
073  * Helper function to lookup a free range larg    073  * Helper function to lookup a free range large enough to hold nb_pages
074  * pages (first fit)                              074  * pages (first fit)
075  */                                               075  */
076 static struct sos_kmem_range *find_suitable_fr    076 static struct sos_kmem_range *find_suitable_free_range(sos_count_t nb_pages)
077 {                                                 077 {
078   int nb_elements;                                078   int nb_elements;
079   struct sos_kmem_range *r;                       079   struct sos_kmem_range *r;
080                                                   080 
081   list_foreach(kmem_free_range_list, r, nb_ele    081   list_foreach(kmem_free_range_list, r, nb_elements)
082   {                                               082   {
083     if (r->nb_pages >= nb_pages)                  083     if (r->nb_pages >= nb_pages)
084       return r;                                   084       return r;
085   }                                               085   }
086                                                   086 
087   return NULL;                                    087   return NULL;
088 }                                                 088 }
089                                                   089 
090                                                   090 
091 /**                                               091 /**
092  * Helper function to add a_range in the_list,    092  * Helper function to add a_range in the_list, in strictly ascending order.
093  *                                                093  *
094  * @return The (possibly) new head of the_list    094  * @return The (possibly) new head of the_list
095  */                                               095  */
096 static struct sos_kmem_range *insert_range(str    096 static struct sos_kmem_range *insert_range(struct sos_kmem_range *the_list,
097                                            str    097                                            struct sos_kmem_range *a_range)
098 {                                                 098 {
099   struct sos_kmem_range *prec_used;               099   struct sos_kmem_range *prec_used;
100                                                   100 
101   /** Look for any preceding range */             101   /** Look for any preceding range */
102   prec_used = get_closest_preceding_kmem_range    102   prec_used = get_closest_preceding_kmem_range(the_list,
103                                                   103                                                a_range->base_vaddr);
104   /** insert a_range /after/ this prec_used */    104   /** insert a_range /after/ this prec_used */
105   if (prec_used != NULL)                          105   if (prec_used != NULL)
106     list_insert_after(the_list, prec_used, a_r    106     list_insert_after(the_list, prec_used, a_range);
107   else /* Insert at the beginning of the list     107   else /* Insert at the beginning of the list */
108     list_add_head(the_list, a_range);             108     list_add_head(the_list, a_range);
109                                                   109 
110   return the_list;                                110   return the_list;
111 }                                                 111 }
112                                                   112 
113                                                   113 
114 /**                                               114 /**
115  * Helper function to retrieve the range ownin    115  * Helper function to retrieve the range owning the given vaddr, by
116  * scanning the physical memory first if vaddr    116  * scanning the physical memory first if vaddr is mapped in RAM
117  */                                               117  */
118 static struct sos_kmem_range *lookup_range(sos    118 static struct sos_kmem_range *lookup_range(sos_vaddr_t vaddr)
119 {                                                 119 {
120   struct sos_kmem_range *range;                   120   struct sos_kmem_range *range;
121                                                   121 
122   /* First: try to retrieve the physical page     122   /* First: try to retrieve the physical page mapped at this address */
123   sos_paddr_t ppage_paddr = sos_paging_get_pad !! 123   sos_paddr_t ppage_paddr = SOS_PAGE_ALIGN_INF(sos_paging_get_paddr(vaddr));
124   if (! ppage_paddr)                           !! 124 
                                                   >> 125   if (ppage_paddr)
125     {                                             126     {
126       range = sos_physmem_get_kmem_range(ppage    127       range = sos_physmem_get_kmem_range(ppage_paddr);
                                                   >> 128 
127       /* If a page is mapped at this address,     129       /* If a page is mapped at this address, it is EXPECTED that it
128          is really associated with a range */     130          is really associated with a range */
129       SOS_ASSERT_FATAL(range != NULL);            131       SOS_ASSERT_FATAL(range != NULL);
130     }                                             132     }
131                                                   133 
132   /* Otherwise scan the list of used ranges, l    134   /* Otherwise scan the list of used ranges, looking for the range
133      owning the address */                        135      owning the address */
134   else                                            136   else
135     {                                             137     {
136       range = get_closest_preceding_kmem_range    138       range = get_closest_preceding_kmem_range(kmem_used_range_list,
137                                                   139                                                vaddr);
138       /* Not found */                             140       /* Not found */
139       if (! range)                                141       if (! range)
140         return NULL;                              142         return NULL;
                                                   >> 143 
                                                   >> 144       /* vaddr not covered by this range */
                                                   >> 145       if ( (vaddr < range->base_vaddr)
                                                   >> 146            || (vaddr >= (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE)) )
                                                   >> 147         return NULL;
141     }                                             148     }
142                                                   149 
143   return range;                                   150   return range;
144 }                                                 151 }
145                                                   152 
146                                                   153 
147 /**                                               154 /**
148  * Helper function for sos_kmem_vmm_setup() to    155  * Helper function for sos_kmem_vmm_setup() to initialize a new range
149  * that maps a given area as free or as alread    156  * that maps a given area as free or as already used.
150  * This function either succeeds or halts the     157  * This function either succeeds or halts the whole system.
151  */                                               158  */
152 static struct sos_kmem_range *                    159 static struct sos_kmem_range *
153 create_range(sos_bool_t  is_free,                 160 create_range(sos_bool_t  is_free,
154              sos_vaddr_t base_vaddr,              161              sos_vaddr_t base_vaddr,
155              sos_vaddr_t top_addr,             !! 162              sos_vaddr_t top_vaddr,
156              struct sos_kslab *associated_slab    163              struct sos_kslab *associated_slab)
157 {                                                 164 {
158   struct sos_kmem_range *range;                   165   struct sos_kmem_range *range;
                                                   >> 166 
                                                   >> 167   SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_vaddr));
                                                   >> 168   SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(top_vaddr));
                                                   >> 169 
                                                   >> 170   if ((top_vaddr - base_vaddr) < SOS_PAGE_SIZE)
                                                   >> 171     return NULL;
                                                   >> 172 
159   range = (struct sos_kmem_range*)sos_kmem_cac    173   range = (struct sos_kmem_range*)sos_kmem_cache_alloc(kmem_range_cache,
160                                                   174                                                        SOS_KSLAB_ALLOC_ATOMIC);
161   SOS_ASSERT_FATAL(range != NULL);                175   SOS_ASSERT_FATAL(range != NULL);
162                                                   176 
163   range->base_vaddr = base_vaddr;                 177   range->base_vaddr = base_vaddr;
164   range->nb_pages   = (top_addr - base_vaddr)  !! 178   range->nb_pages   = (top_vaddr - base_vaddr) / SOS_PAGE_SIZE;
165                                                   179 
166   if (is_free)                                    180   if (is_free)
167     {                                             181     {
168       list_add_tail(kmem_free_range_list,         182       list_add_tail(kmem_free_range_list,
169                     range);                       183                     range);
170     }                                             184     }
171   else                                            185   else
172     {                                             186     {
173       sos_vaddr_t vaddr;                          187       sos_vaddr_t vaddr;
174       range->slab = associated_slab;              188       range->slab = associated_slab;
175       list_add_tail(kmem_used_range_list,         189       list_add_tail(kmem_used_range_list,
176                     range);                       190                     range);
177                                                   191 
178       /* Ok, set the range owner for the pages    192       /* Ok, set the range owner for the pages in this page */
179       for (vaddr = base_vaddr ;                   193       for (vaddr = base_vaddr ;
180            vaddr < top_addr ;                  !! 194            vaddr < top_vaddr ;
181            vaddr += SOS_PAGE_SIZE)                195            vaddr += SOS_PAGE_SIZE)
182       {                                           196       {
183         sos_paddr_t ppage_paddr = sos_paging_g    197         sos_paddr_t ppage_paddr = sos_paging_get_paddr(vaddr);
184         SOS_ASSERT_FATAL((void*)ppage_paddr !=    198         SOS_ASSERT_FATAL((void*)ppage_paddr != NULL);
185         sos_physmem_set_kmem_range(ppage_paddr    199         sos_physmem_set_kmem_range(ppage_paddr, range);
186       }                                           200       }
187     }                                             201     }
188                                                   202 
189   return range;                                   203   return range;
190 }                                                 204 }
191                                                   205 
192                                                   206 
193 sos_ret_t sos_kmem_vmm_setup(sos_vaddr_t kerne !! 207 sos_ret_t
194                              sos_vaddr_t kerne !! 208 sos_kmem_vmm_subsystem_setup(sos_vaddr_t kernel_core_base,
                                                   >> 209                              sos_vaddr_t kernel_core_top,
                                                   >> 210                              sos_vaddr_t bootstrap_stack_bottom_vaddr,
                                                   >> 211                              sos_vaddr_t bootstrap_stack_top_vaddr)
195 {                                                 212 {
196   struct sos_kslab *first_struct_slab_of_cache    213   struct sos_kslab *first_struct_slab_of_caches,
197     *first_struct_slab_of_ranges;                 214     *first_struct_slab_of_ranges;
198   sos_vaddr_t first_slab_of_caches_base,          215   sos_vaddr_t first_slab_of_caches_base,
199     first_slab_of_caches_nb_pages,                216     first_slab_of_caches_nb_pages,
200     first_slab_of_ranges_base,                    217     first_slab_of_ranges_base,
201     first_slab_of_ranges_nb_pages;                218     first_slab_of_ranges_nb_pages;
202   struct sos_kmem_range *first_range_of_caches    219   struct sos_kmem_range *first_range_of_caches,
203     *first_range_of_ranges;                       220     *first_range_of_ranges;
204                                                   221 
205   list_init(kmem_free_range_list);                222   list_init(kmem_free_range_list);
206   list_init(kmem_used_range_list);                223   list_init(kmem_used_range_list);
207                                                   224 
208   kmem_range_cache                                225   kmem_range_cache
209     = sos_kmem_cache_setup_prepare(kernel_core !! 226     = sos_kmem_cache_subsystem_setup_prepare(kernel_core_base,
210                                    kernel_core !! 227                                              kernel_core_top,
211                                    sizeof(stru !! 228                                              sizeof(struct sos_kmem_range),
212                                    & first_str !! 229                                              & first_struct_slab_of_caches,
213                                    & first_sla !! 230                                              & first_slab_of_caches_base,
214                                    & first_sla !! 231                                              & first_slab_of_caches_nb_pages,
215                                    & first_str !! 232                                              & first_struct_slab_of_ranges,
216                                    & first_sla !! 233                                              & first_slab_of_ranges_base,
217                                    & first_sla !! 234                                              & first_slab_of_ranges_nb_pages);
218   SOS_ASSERT_FATAL(kmem_range_cache != NULL);     235   SOS_ASSERT_FATAL(kmem_range_cache != NULL);
219                                                   236 
220   /* Mark virtual addresses 16kB - Video as FR    237   /* Mark virtual addresses 16kB - Video as FREE */
221   create_range(TRUE,                              238   create_range(TRUE,
222                SOS_KMEM_VMM_BASE,                 239                SOS_KMEM_VMM_BASE,
223                SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO    240                SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
224                NULL);                             241                NULL);
225                                                   242   
226   /* Mark virtual addresses in Video hardware     243   /* Mark virtual addresses in Video hardware mapping as NOT FREE */
227   create_range(FALSE,                             244   create_range(FALSE,
228                SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO    245                SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
229                SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO    246                SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
230                NULL);                             247                NULL);
231                                                   248   
232   /* Mark virtual addresses Video - Kernel as     249   /* Mark virtual addresses Video - Kernel as FREE */
233   create_range(TRUE,                              250   create_range(TRUE,
234                SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO    251                SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
235                SOS_PAGE_ALIGN_INF(kernel_core_    252                SOS_PAGE_ALIGN_INF(kernel_core_base),
236                NULL);                             253                NULL);
237                                                   254   
238   /* Mark virtual addresses in Kernel code/dat !! 255   /* Mark virtual addresses in Kernel code/data up to the bootstrap stack
                                                   >> 256      as NOT FREE */
239   create_range(FALSE,                             257   create_range(FALSE,
240                SOS_PAGE_ALIGN_INF(kernel_core_    258                SOS_PAGE_ALIGN_INF(kernel_core_base),
                                                   >> 259                bootstrap_stack_bottom_vaddr,
                                                   >> 260                NULL);
                                                   >> 261 
                                                   >> 262   /* Mark virtual addresses in the bootstrap stack as NOT FREE too,
                                                   >> 263      but in another vmm region in order to be un-allocated later */
                                                   >> 264   create_range(FALSE,
                                                   >> 265                bootstrap_stack_bottom_vaddr,
                                                   >> 266                bootstrap_stack_top_vaddr,
                                                   >> 267                NULL);
                                                   >> 268 
                                                   >> 269   /* Mark the remaining virtual addresses in Kernel code/data after
                                                   >> 270      the bootstrap stack as NOT FREE */
                                                   >> 271   create_range(FALSE,
                                                   >> 272                bootstrap_stack_top_vaddr,
241                SOS_PAGE_ALIGN_SUP(kernel_core_    273                SOS_PAGE_ALIGN_SUP(kernel_core_top),
242                NULL);                             274                NULL);
243                                                   275 
244   /* Mark virtual addresses in the first slab     276   /* Mark virtual addresses in the first slab of the cache of caches
245      as NOT FREE */                               277      as NOT FREE */
246   SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_c    278   SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_core_top)
247                    == first_slab_of_caches_bas    279                    == first_slab_of_caches_base);
248   SOS_ASSERT_FATAL(first_struct_slab_of_caches    280   SOS_ASSERT_FATAL(first_struct_slab_of_caches != NULL);
249   first_range_of_caches                           281   first_range_of_caches
250     = create_range(FALSE,                         282     = create_range(FALSE,
251                    first_slab_of_caches_base,     283                    first_slab_of_caches_base,
252                    first_slab_of_caches_base      284                    first_slab_of_caches_base
253                    + first_slab_of_caches_nb_p    285                    + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE,
254                    first_struct_slab_of_caches    286                    first_struct_slab_of_caches);
255                                                   287 
256   /* Mark virtual addresses in the first slab     288   /* Mark virtual addresses in the first slab of the cache of ranges
257      as NOT FREE */                               289      as NOT FREE */
258   SOS_ASSERT_FATAL((first_slab_of_caches_base     290   SOS_ASSERT_FATAL((first_slab_of_caches_base
259                     + first_slab_of_caches_nb_    291                     + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE)
260                    == first_slab_of_ranges_bas    292                    == first_slab_of_ranges_base);
261   SOS_ASSERT_FATAL(first_struct_slab_of_ranges    293   SOS_ASSERT_FATAL(first_struct_slab_of_ranges != NULL);
262   first_range_of_ranges                           294   first_range_of_ranges
263     = create_range(FALSE,                         295     = create_range(FALSE,
264                    first_slab_of_ranges_base,     296                    first_slab_of_ranges_base,
265                    first_slab_of_ranges_base      297                    first_slab_of_ranges_base
266                    + first_slab_of_ranges_nb_p    298                    + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
267                    first_struct_slab_of_ranges    299                    first_struct_slab_of_ranges);
268                                                   300   
269   /* Mark virtual addresses after these slabs     301   /* Mark virtual addresses after these slabs as FREE */
270   create_range(TRUE,                              302   create_range(TRUE,
271                first_slab_of_ranges_base          303                first_slab_of_ranges_base
272                + first_slab_of_ranges_nb_pages    304                + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
273                SOS_KMEM_VMM_TOP,                  305                SOS_KMEM_VMM_TOP,
274                NULL);                             306                NULL);
275                                                   307 
276   /* Update the cache subsystem so that the ar    308   /* Update the cache subsystem so that the artificially-created
277      caches of caches and ranges really behave    309      caches of caches and ranges really behave like *normal* caches (ie
278      those allocated by the normal slab API) *    310      those allocated by the normal slab API) */
279   sos_kmem_cache_setup_commit(first_struct_sla !! 311   sos_kmem_cache_subsystem_setup_commit(first_struct_slab_of_caches,
280                               first_range_of_c !! 312                                         first_range_of_caches,
281                               first_struct_sla !! 313                                         first_struct_slab_of_ranges,
282                               first_range_of_r !! 314                                         first_range_of_ranges);
283                                                   315 
284   return SOS_OK;                                  316   return SOS_OK;
285 }                                                 317 }
286                                                   318 
287                                                   319 
288 /**                                               320 /**
289  * Allocate a new kernel area spanning one or     321  * Allocate a new kernel area spanning one or multiple pages.
290  *                                                322  *
291  * @eturn a new range structure                   323  * @eturn a new range structure
292  */                                               324  */
293 struct sos_kmem_range *sos_kmem_vmm_new_range(    325 struct sos_kmem_range *sos_kmem_vmm_new_range(sos_count_t nb_pages,
294                                                   326                                               sos_ui32_t  flags,
295                                                   327                                               sos_vaddr_t * range_start)
296 {                                                 328 {
297   struct sos_kmem_range *free_range, *new_rang    329   struct sos_kmem_range *free_range, *new_range;
298                                                   330 
299   if (nb_pages <= 0)                              331   if (nb_pages <= 0)
300     return NULL;                                  332     return NULL;
301                                                   333 
302   /* Find a suitable free range to hold the si    334   /* Find a suitable free range to hold the size-sized object */
303   free_range = find_suitable_free_range(nb_pag    335   free_range = find_suitable_free_range(nb_pages);
304   if (free_range == NULL)                         336   if (free_range == NULL)
305     return NULL;                                  337     return NULL;
306                                                   338 
307   /* If range has exactly the requested size,     339   /* If range has exactly the requested size, just move it to the
308      "used" list */                               340      "used" list */
309   if(free_range->nb_pages == nb_pages)            341   if(free_range->nb_pages == nb_pages)
310     {                                             342     {
311       list_delete(kmem_free_range_list, free_r    343       list_delete(kmem_free_range_list, free_range);
312       kmem_used_range_list = insert_range(kmem    344       kmem_used_range_list = insert_range(kmem_used_range_list,
313                                           free    345                                           free_range);
314       /* The new_range is exactly the free_ran    346       /* The new_range is exactly the free_range */
315       new_range = free_range;                     347       new_range = free_range;
316     }                                             348     }
317                                                   349 
318   /* Otherwise the range is bigger than the re    350   /* Otherwise the range is bigger than the requested size, split it.
319      This involves reducing its size, and allo    351      This involves reducing its size, and allocate a new range, which
320      is going to be added to the "used" list *    352      is going to be added to the "used" list */
321   else                                            353   else
322     {                                             354     {
323       /* free_range split in { new_range | fre    355       /* free_range split in { new_range | free_range } */
324       new_range = (struct sos_kmem_range*)        356       new_range = (struct sos_kmem_range*)
325         sos_kmem_cache_alloc(kmem_range_cache,    357         sos_kmem_cache_alloc(kmem_range_cache,
326                              (flags & SOS_KMEM    358                              (flags & SOS_KMEM_VMM_ATOMIC)?
327                              SOS_KSLAB_ALLOC_A    359                              SOS_KSLAB_ALLOC_ATOMIC:0);
328       if (! new_range)                            360       if (! new_range)
329         return NULL;                              361         return NULL;
330                                                   362 
331       new_range->base_vaddr   = free_range->ba    363       new_range->base_vaddr   = free_range->base_vaddr;
332       new_range->nb_pages     = nb_pages;         364       new_range->nb_pages     = nb_pages;
333       free_range->base_vaddr += nb_pages*SOS_P    365       free_range->base_vaddr += nb_pages*SOS_PAGE_SIZE;
334       free_range->nb_pages   -= nb_pages;         366       free_range->nb_pages   -= nb_pages;
335                                                   367 
336       /* free_range is still at the same place    368       /* free_range is still at the same place in the list */
337       /* insert new_range in the used list */     369       /* insert new_range in the used list */
338       kmem_used_range_list = insert_range(kmem    370       kmem_used_range_list = insert_range(kmem_used_range_list,
339                                           new_    371                                           new_range);
340     }                                             372     }
341                                                   373 
342   /* By default, the range is not associated w    374   /* By default, the range is not associated with any slab */
343   new_range->slab = NULL;                         375   new_range->slab = NULL;
344                                                   376 
345   /* If mapping of physical pages is needed, m    377   /* If mapping of physical pages is needed, map them now */
346   if (flags & SOS_KMEM_VMM_MAP)                   378   if (flags & SOS_KMEM_VMM_MAP)
347     {                                             379     {
348       int i;                                      380       int i;
349       for (i = 0 ; i < nb_pages ; i ++)           381       for (i = 0 ; i < nb_pages ; i ++)
350         {                                         382         {
351           /* Get a new physical page */           383           /* Get a new physical page */
352           sos_paddr_t ppage_paddr                 384           sos_paddr_t ppage_paddr
353             = sos_physmem_ref_physpage_new(! (    385             = sos_physmem_ref_physpage_new(! (flags & SOS_KMEM_VMM_ATOMIC));
354                                                   386           
355           /* Map the page in kernel space */      387           /* Map the page in kernel space */
356           if (ppage_paddr)                        388           if (ppage_paddr)
357             {                                     389             {
358               if (sos_paging_map(ppage_paddr,     390               if (sos_paging_map(ppage_paddr,
359                                  new_range->ba    391                                  new_range->base_vaddr
360                                    + i * SOS_P    392                                    + i * SOS_PAGE_SIZE,
361                                  FALSE /* Not     393                                  FALSE /* Not a user page */,
362                                  ((flags & SOS    394                                  ((flags & SOS_KMEM_VMM_ATOMIC)?
363                                   SOS_VM_MAP_A    395                                   SOS_VM_MAP_ATOMIC:0)
364                                  | SOS_VM_MAP_    396                                  | SOS_VM_MAP_PROT_READ
365                                  | SOS_VM_MAP_    397                                  | SOS_VM_MAP_PROT_WRITE))
366                 {                                 398                 {
367                   /* Failed => force unallocat    399                   /* Failed => force unallocation, see below */
368                   sos_physmem_unref_physpage(p    400                   sos_physmem_unref_physpage(ppage_paddr);
369                   ppage_paddr = (sos_paddr_t)N    401                   ppage_paddr = (sos_paddr_t)NULL;
370                 }                                 402                 }
371               else                                403               else
372                 {                                 404                 {
373                   /* Success : page can be unr    405                   /* Success : page can be unreferenced since it is
374                      now mapped */                406                      now mapped */
375                   sos_physmem_unref_physpage(p    407                   sos_physmem_unref_physpage(ppage_paddr);
376                 }                                 408                 }
377             }                                     409             }
378                                                   410 
379           /* Undo the allocation if failed to     411           /* Undo the allocation if failed to allocate or map a new page */
380           if (! ppage_paddr)                      412           if (! ppage_paddr)
381             {                                     413             {
382               sos_kmem_vmm_del_range(new_range    414               sos_kmem_vmm_del_range(new_range);
383               return NULL;                        415               return NULL;
384             }                                     416             }
385                                                   417 
386           /* Ok, set the range owner for this     418           /* Ok, set the range owner for this page */
387           sos_physmem_set_kmem_range(ppage_pad    419           sos_physmem_set_kmem_range(ppage_paddr, new_range);
388         }                                         420         }
389     }                                             421     }
390                                                !! 422   /* ... Otherwise: Demand Paging will do the job */
391   /* Otherwise we need a correct page fault ha << 
392      deferred mapping (aka demand paging) of r << 
393   else                                         << 
394     SOS_ASSERT_FATAL(! "No demand paging yet") << 
395                                                   423 
396   if (range_start)                                424   if (range_start)
397     *range_start = new_range->base_vaddr;         425     *range_start = new_range->base_vaddr;
398                                                   426 
399   return new_range;                               427   return new_range;
400 }                                                 428 }
401                                                   429 
402                                                   430 
403 sos_vaddr_t sos_kmem_vmm_del_range(struct sos_ !! 431 sos_ret_t sos_kmem_vmm_del_range(struct sos_kmem_range *range)
404 {                                                 432 {
405   int i;                                          433   int i;
406   struct sos_kmem_range *ranges_to_free;          434   struct sos_kmem_range *ranges_to_free;
407   list_init(ranges_to_free);                      435   list_init(ranges_to_free);
408                                                   436 
409   SOS_ASSERT_FATAL(range != NULL);                437   SOS_ASSERT_FATAL(range != NULL);
410   SOS_ASSERT_FATAL(range->slab == NULL);          438   SOS_ASSERT_FATAL(range->slab == NULL);
411                                                   439 
412   /* Remove the range from the 'USED' list now    440   /* Remove the range from the 'USED' list now */
413   list_delete(kmem_used_range_list, range);       441   list_delete(kmem_used_range_list, range);
414                                                   442 
415   /*                                              443   /*
416    * The following do..while() loop is here to    444    * The following do..while() loop is here to avoid an indirect
417    * recursion: if we call directly kmem_cache    445    * recursion: if we call directly kmem_cache_free() from inside the
418    * current function, we take the risk to re-    446    * current function, we take the risk to re-enter the current function
419    * (sos_kmem_vmm_del_range()) again, which m    447    * (sos_kmem_vmm_del_range()) again, which may cause problem if it
420    * in turn calls kmem_slab again and sos_kme    448    * in turn calls kmem_slab again and sos_kmem_vmm_del_range again,
421    * and again and again. This may happen whil    449    * and again and again. This may happen while freeing ranges of
422    * struct sos_kslab...                          450    * struct sos_kslab...
423    *                                              451    *
424    * To avoid this,we choose to call a special    452    * To avoid this,we choose to call a special function of kmem_slab
425    * doing almost the same as sos_kmem_cache_f    453    * doing almost the same as sos_kmem_cache_free(), but which does
426    * NOT call us (ie sos_kmem_vmm_del_range())    454    * NOT call us (ie sos_kmem_vmm_del_range()): instead WE add the
427    * range that is to be freed to a list, and     455    * range that is to be freed to a list, and the do..while() loop is
428    * here to process this list ! The recursion    456    * here to process this list ! The recursion is replaced by
429    * classical iterations.                        457    * classical iterations.
430    */                                             458    */
431   do                                              459   do
432     {                                             460     {
433       /* Ok, we got the range. Now, insert thi    461       /* Ok, we got the range. Now, insert this range in the free list */
434       kmem_free_range_list = insert_range(kmem    462       kmem_free_range_list = insert_range(kmem_free_range_list, range);
435                                                   463 
436       /* Unmap the physical pages */              464       /* Unmap the physical pages */
437       for (i = 0 ; i < range->nb_pages ; i ++)    465       for (i = 0 ; i < range->nb_pages ; i ++)
438         {                                         466         {
439           /* This will work even if no page is    467           /* This will work even if no page is mapped at this address */
440           sos_paging_unmap(range->base_vaddr +    468           sos_paging_unmap(range->base_vaddr + i*SOS_PAGE_SIZE);
441         }                                         469         }
442                                                   470       
443       /* Eventually coalesce it with prev/next    471       /* Eventually coalesce it with prev/next free ranges (there is
444          always a valid prev/next link since t    472          always a valid prev/next link since the list is circular). Note:
445          the tests below will lead to correct     473          the tests below will lead to correct behaviour even if the list
446          is limited to the 'range' singleton,     474          is limited to the 'range' singleton, at least as long as the
447          range is not zero-sized */               475          range is not zero-sized */
448       /* Merge with preceding one ? */            476       /* Merge with preceding one ? */
449       if (range->prev->base_vaddr + range->pre    477       if (range->prev->base_vaddr + range->prev->nb_pages*SOS_PAGE_SIZE
450           == range->base_vaddr)                   478           == range->base_vaddr)
451         {                                         479         {
452           struct sos_kmem_range *empty_range_o    480           struct sos_kmem_range *empty_range_of_ranges = NULL;
453           struct sos_kmem_range *prec_free = r    481           struct sos_kmem_range *prec_free = range->prev;
454                                                   482           
455           /* Merge them */                        483           /* Merge them */
456           prec_free->nb_pages += range->nb_pag    484           prec_free->nb_pages += range->nb_pages;
457           list_delete(kmem_free_range_list, ra    485           list_delete(kmem_free_range_list, range);
458                                                   486           
459           /* Mark the range as free. This may     487           /* Mark the range as free. This may cause the slab owning
460              the range to become empty */         488              the range to become empty */
461           empty_range_of_ranges =                 489           empty_range_of_ranges = 
462             sos_kmem_cache_release_struct_rang    490             sos_kmem_cache_release_struct_range(range);
463                                                   491 
464           /* If this causes the slab owning th    492           /* If this causes the slab owning the range to become empty,
465              add the range corresponding to th    493              add the range corresponding to the slab at the end of the
466              list of the ranges to be freed: i    494              list of the ranges to be freed: it will be actually freed
467              in one of the next iterations of     495              in one of the next iterations of the do{} loop. */
468           if (empty_range_of_ranges != NULL)      496           if (empty_range_of_ranges != NULL)
469             {                                     497             {
470               list_delete(kmem_used_range_list    498               list_delete(kmem_used_range_list, empty_range_of_ranges);
471               list_add_tail(ranges_to_free, em    499               list_add_tail(ranges_to_free, empty_range_of_ranges);
472             }                                     500             }
473                                                   501           
474           /* Set range to the beginning of thi    502           /* Set range to the beginning of this coelescion */
475           range = prec_free;                      503           range = prec_free;
476         }                                         504         }
477                                                   505       
478       /* Merge with next one ? [NO 'else' sinc    506       /* Merge with next one ? [NO 'else' since range may be the result of
479          the merge above] */                      507          the merge above] */
480       if (range->base_vaddr + range->nb_pages*    508       if (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE
481           == range->next->base_vaddr)             509           == range->next->base_vaddr)
482         {                                         510         {
483           struct sos_kmem_range *empty_range_o    511           struct sos_kmem_range *empty_range_of_ranges = NULL;
484           struct sos_kmem_range *next_range =     512           struct sos_kmem_range *next_range = range->next;
485                                                   513           
486           /* Merge them */                        514           /* Merge them */
487           range->nb_pages += next_range->nb_pa    515           range->nb_pages += next_range->nb_pages;
488           list_delete(kmem_free_range_list, ne    516           list_delete(kmem_free_range_list, next_range);
489                                                   517           
490           /* Mark the next_range as free. This    518           /* Mark the next_range as free. This may cause the slab
491              owning the next_range to become e    519              owning the next_range to become empty */
492           empty_range_of_ranges =                 520           empty_range_of_ranges = 
493             sos_kmem_cache_release_struct_rang    521             sos_kmem_cache_release_struct_range(next_range);
494                                                   522 
495           /* If this causes the slab owning th    523           /* If this causes the slab owning the next_range to become
496              empty, add the range correspondin    524              empty, add the range corresponding to the slab at the end
497              of the list of the ranges to be f    525              of the list of the ranges to be freed: it will be
498              actually freed in one of the next    526              actually freed in one of the next iterations of the
499              do{} loop. */                        527              do{} loop. */
500           if (empty_range_of_ranges != NULL)      528           if (empty_range_of_ranges != NULL)
501             {                                     529             {
502               list_delete(kmem_used_range_list    530               list_delete(kmem_used_range_list, empty_range_of_ranges);
503               list_add_tail(ranges_to_free, em    531               list_add_tail(ranges_to_free, empty_range_of_ranges);
504             }                                     532             }
505         }                                         533         }
506                                                   534       
507                                                   535 
508       /* If deleting the range(s) caused one o    536       /* If deleting the range(s) caused one or more range(s) to be
509          freed, get the next one to free */       537          freed, get the next one to free */
510       if (list_is_empty(ranges_to_free))          538       if (list_is_empty(ranges_to_free))
511         range = NULL; /* No range left to free    539         range = NULL; /* No range left to free */
512       else                                        540       else
513         range = list_pop_head(ranges_to_free);    541         range = list_pop_head(ranges_to_free);
514                                                   542 
515     }                                             543     }
516   /* Stop when there is no range left to be fr    544   /* Stop when there is no range left to be freed for now */
517   while (range != NULL);                          545   while (range != NULL);
518                                                   546 
519   return SOS_OK;                                  547   return SOS_OK;
520 }                                                 548 }
521                                                   549 
522                                                   550 
523 sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_    551 sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_pages,
524                                sos_ui32_t  fla    552                                sos_ui32_t  flags)
525 {                                                 553 {
526   struct sos_kmem_range *range                    554   struct sos_kmem_range *range
527     = sos_kmem_vmm_new_range(nb_pages,            555     = sos_kmem_vmm_new_range(nb_pages,
528                              flags,               556                              flags,
529                              NULL);               557                              NULL);
530   if (! range)                                    558   if (! range)
531     return (sos_vaddr_t)NULL;                     559     return (sos_vaddr_t)NULL;
532                                                   560   
533   return range->base_vaddr;                       561   return range->base_vaddr;
534 }                                                 562 }
535                                                   563 
536                                                   564 
537 sos_vaddr_t sos_kmem_vmm_free(sos_vaddr_t vadd !! 565 sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr)
538 {                                                 566 {
539   struct sos_kmem_range *range = lookup_range(    567   struct sos_kmem_range *range = lookup_range(vaddr);
540                                                   568 
541   /* We expect that the given address is the b    569   /* We expect that the given address is the base address of the
542      range */                                     570      range */
543   if (!range || (range->base_vaddr != vaddr))     571   if (!range || (range->base_vaddr != vaddr))
544     return -SOS_EINVAL;                           572     return -SOS_EINVAL;
545                                                   573 
546   /* We expect that this range is not held by     574   /* We expect that this range is not held by any cache */
547   if (range->slab != NULL)                        575   if (range->slab != NULL)
548     return -SOS_EBUSY;                            576     return -SOS_EBUSY;
549                                                   577 
550   return sos_kmem_vmm_del_range(range);           578   return sos_kmem_vmm_del_range(range);
551 }                                                 579 }
552                                                   580 
553                                                   581 
554 sos_ret_t sos_kmem_vmm_set_slab(struct sos_kme    582 sos_ret_t sos_kmem_vmm_set_slab(struct sos_kmem_range *range,
555                                 struct sos_ksl    583                                 struct sos_kslab *slab)
556 {                                                 584 {
557   if (! range)                                    585   if (! range)
558     return -SOS_EINVAL;                           586     return -SOS_EINVAL;
559                                                   587 
560   range->slab = slab;                             588   range->slab = slab;
561   return SOS_OK;                                  589   return SOS_OK;
562 }                                                 590 }
563                                                   591 
564 struct sos_kslab * sos_kmem_vmm_resolve_slab(s    592 struct sos_kslab * sos_kmem_vmm_resolve_slab(sos_vaddr_t vaddr)
565 {                                                 593 {
566   struct sos_kmem_range *range = lookup_range(    594   struct sos_kmem_range *range = lookup_range(vaddr);
567   if (! range)                                    595   if (! range)
568     return NULL;                                  596     return NULL;
569                                                   597 
570   return range->slab;                             598   return range->slab;
571 }                                                 599 }
572                                                   600 
                                                   >> 601 
                                                   >> 602 sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vaddr_t vaddr)
                                                   >> 603 {
                                                   >> 604   struct sos_kmem_range *range = lookup_range(vaddr);
                                                   >> 605   return (range != NULL);
                                                   >> 606 }
                                                      

source navigation ] diff markup ] identifier search ] general search ]