SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

Diff markup

Differences between /sos/kmem_vmm.c (Article 9.5) and /sos/kmem_vmm.c (Article 6)


001 /* Copyright (C) 2000 Thomas Petazzoni            001 /* Copyright (C) 2000 Thomas Petazzoni
002    Copyright (C) 2004 David Decotigny             002    Copyright (C) 2004 David Decotigny
003                                                   003 
004    This program is free software; you can redi    004    This program is free software; you can redistribute it and/or
005    modify it under the terms of the GNU Genera    005    modify it under the terms of the GNU General Public License
006    as published by the Free Software Foundatio    006    as published by the Free Software Foundation; either version 2
007    of the License, or (at your option) any lat    007    of the License, or (at your option) any later version.
008                                                   008    
009    This program is distributed in the hope tha    009    This program is distributed in the hope that it will be useful,
010    but WITHOUT ANY WARRANTY; without even the     010    but WITHOUT ANY WARRANTY; without even the implied warranty of
011    MERCHANTABILITY or FITNESS FOR A PARTICULAR    011    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
012    GNU General Public License for more details    012    GNU General Public License for more details.
013                                                   013    
014    You should have received a copy of the GNU     014    You should have received a copy of the GNU General Public License
015    along with this program; if not, write to t    015    along with this program; if not, write to the Free Software
016    Foundation, Inc., 59 Temple Place - Suite 3    016    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
017    USA.                                           017    USA. 
018 */                                                018 */
019                                                   019 
020 #include <sos/list.h>                             020 #include <sos/list.h>
021 #include <sos/physmem.h>                          021 #include <sos/physmem.h>
022 #include <hwcore/paging.h>                        022 #include <hwcore/paging.h>
023 #include <sos/assert.h>                           023 #include <sos/assert.h>
024                                                   024 
025 #include "kmem_vmm.h"                             025 #include "kmem_vmm.h"
026                                                   026 
027 /** The structure of a range of kernel-space v    027 /** The structure of a range of kernel-space virtual addresses */
028 struct sos_kmem_range                             028 struct sos_kmem_range
029 {                                                 029 {
030   sos_vaddr_t base_vaddr;                         030   sos_vaddr_t base_vaddr;
031   sos_count_t nb_pages;                           031   sos_count_t nb_pages;
032                                                   032 
033   /* The slab owning this range, or NULL */       033   /* The slab owning this range, or NULL */
034   struct sos_kslab *slab;                         034   struct sos_kslab *slab;
035                                                   035 
036   struct sos_kmem_range *prev, *next;             036   struct sos_kmem_range *prev, *next;
037 };                                                037 };
038 const int sizeof_struct_sos_kmem_range = sizeo    038 const int sizeof_struct_sos_kmem_range = sizeof(struct sos_kmem_range);
039                                                   039 
040 /** The ranges are SORTED in (strictly) ascend    040 /** The ranges are SORTED in (strictly) ascending base addresses */
041 static struct sos_kmem_range *kmem_free_range_    041 static struct sos_kmem_range *kmem_free_range_list, *kmem_used_range_list;
042                                                   042 
043 /** The slab cache for the kmem ranges */         043 /** The slab cache for the kmem ranges */
044 static struct sos_kslab_cache *kmem_range_cach    044 static struct sos_kslab_cache *kmem_range_cache;
045                                                   045 
046                                                   046 
047                                                   047 
048 /** Helper function to get the closest precedi    048 /** Helper function to get the closest preceding or containing
049     range for the given virtual address */        049     range for the given virtual address */
050 static struct sos_kmem_range *                    050 static struct sos_kmem_range *
051 get_closest_preceding_kmem_range(struct sos_km    051 get_closest_preceding_kmem_range(struct sos_kmem_range *the_list,
052                                  sos_vaddr_t v    052                                  sos_vaddr_t vaddr)
053 {                                                 053 {
054   int nb_elements;                                054   int nb_elements;
055   struct sos_kmem_range *a_range, *ret_range;     055   struct sos_kmem_range *a_range, *ret_range;
056                                                   056 
057   /* kmem_range list is kept SORTED, so we exi    057   /* kmem_range list is kept SORTED, so we exit as soon as vaddr >= a
058      range base address */                        058      range base address */
059   ret_range = NULL;                               059   ret_range = NULL;
060   list_foreach(the_list, a_range, nb_elements)    060   list_foreach(the_list, a_range, nb_elements)
061     {                                             061     {
062       if (vaddr < a_range->base_vaddr)            062       if (vaddr < a_range->base_vaddr)
063         return ret_range;                         063         return ret_range;
064       ret_range = a_range;                        064       ret_range = a_range;
065     }                                             065     }
066                                                   066 
067   /* This will always be the LAST range in the    067   /* This will always be the LAST range in the kmem area */
068   return ret_range;                               068   return ret_range;
069 }                                                 069 }
070                                                   070 
071                                                   071 
072 /**                                               072 /**
073  * Helper function to lookup a free range larg    073  * Helper function to lookup a free range large enough to hold nb_pages
074  * pages (first fit)                              074  * pages (first fit)
075  */                                               075  */
076 static struct sos_kmem_range *find_suitable_fr    076 static struct sos_kmem_range *find_suitable_free_range(sos_count_t nb_pages)
077 {                                                 077 {
078   int nb_elements;                                078   int nb_elements;
079   struct sos_kmem_range *r;                       079   struct sos_kmem_range *r;
080                                                   080 
081   list_foreach(kmem_free_range_list, r, nb_ele    081   list_foreach(kmem_free_range_list, r, nb_elements)
082   {                                               082   {
083     if (r->nb_pages >= nb_pages)                  083     if (r->nb_pages >= nb_pages)
084       return r;                                   084       return r;
085   }                                               085   }
086                                                   086 
087   return NULL;                                    087   return NULL;
088 }                                                 088 }
089                                                   089 
090                                                   090 
091 /**                                               091 /**
092  * Helper function to add a_range in the_list,    092  * Helper function to add a_range in the_list, in strictly ascending order.
093  *                                                093  *
094  * @return The (possibly) new head of the_list    094  * @return The (possibly) new head of the_list
095  */                                               095  */
096 static struct sos_kmem_range *insert_range(str    096 static struct sos_kmem_range *insert_range(struct sos_kmem_range *the_list,
097                                            str    097                                            struct sos_kmem_range *a_range)
098 {                                                 098 {
099   struct sos_kmem_range *prec_used;               099   struct sos_kmem_range *prec_used;
100                                                   100 
101   /** Look for any preceding range */             101   /** Look for any preceding range */
102   prec_used = get_closest_preceding_kmem_range    102   prec_used = get_closest_preceding_kmem_range(the_list,
103                                                   103                                                a_range->base_vaddr);
104   /** insert a_range /after/ this prec_used */    104   /** insert a_range /after/ this prec_used */
105   if (prec_used != NULL)                          105   if (prec_used != NULL)
106     list_insert_after(the_list, prec_used, a_r    106     list_insert_after(the_list, prec_used, a_range);
107   else /* Insert at the beginning of the list     107   else /* Insert at the beginning of the list */
108     list_add_head(the_list, a_range);             108     list_add_head(the_list, a_range);
109                                                   109 
110   return the_list;                                110   return the_list;
111 }                                                 111 }
112                                                   112 
113                                                   113 
114 /**                                               114 /**
115  * Helper function to retrieve the range ownin    115  * Helper function to retrieve the range owning the given vaddr, by
116  * scanning the physical memory first if vaddr    116  * scanning the physical memory first if vaddr is mapped in RAM
117  */                                               117  */
118 static struct sos_kmem_range *lookup_range(sos    118 static struct sos_kmem_range *lookup_range(sos_vaddr_t vaddr)
119 {                                                 119 {
120   struct sos_kmem_range *range;                   120   struct sos_kmem_range *range;
121                                                   121 
122   /* First: try to retrieve the physical page     122   /* First: try to retrieve the physical page mapped at this address */
123   sos_paddr_t ppage_paddr = SOS_PAGE_ALIGN_INF    123   sos_paddr_t ppage_paddr = SOS_PAGE_ALIGN_INF(sos_paging_get_paddr(vaddr));
124                                                << 
125   if (ppage_paddr)                                124   if (ppage_paddr)
126     {                                             125     {
127       range = sos_physmem_get_kmem_range(ppage    126       range = sos_physmem_get_kmem_range(ppage_paddr);
128                                                   127 
129       /* If a page is mapped at this address,     128       /* If a page is mapped at this address, it is EXPECTED that it
130          is really associated with a range */     129          is really associated with a range */
131       SOS_ASSERT_FATAL(range != NULL);            130       SOS_ASSERT_FATAL(range != NULL);
132     }                                             131     }
133                                                   132 
134   /* Otherwise scan the list of used ranges, l    133   /* Otherwise scan the list of used ranges, looking for the range
135      owning the address */                        134      owning the address */
136   else                                            135   else
137     {                                             136     {
138       range = get_closest_preceding_kmem_range    137       range = get_closest_preceding_kmem_range(kmem_used_range_list,
139                                                   138                                                vaddr);
140       /* Not found */                             139       /* Not found */
141       if (! range)                                140       if (! range)
142         return NULL;                              141         return NULL;
143                                                   142 
144       /* vaddr not covered by this range */       143       /* vaddr not covered by this range */
145       if ( (vaddr < range->base_vaddr)            144       if ( (vaddr < range->base_vaddr)
146            || (vaddr >= (range->base_vaddr + r    145            || (vaddr >= (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE)) )
147         return NULL;                              146         return NULL;
148     }                                             147     }
149                                                   148 
150   return range;                                   149   return range;
151 }                                                 150 }
152                                                   151 
153                                                   152 
154 /**                                               153 /**
155  * Helper function for sos_kmem_vmm_setup() to    154  * Helper function for sos_kmem_vmm_setup() to initialize a new range
156  * that maps a given area as free or as alread    155  * that maps a given area as free or as already used.
157  * This function either succeeds or halts the     156  * This function either succeeds or halts the whole system.
158  */                                               157  */
159 static struct sos_kmem_range *                    158 static struct sos_kmem_range *
160 create_range(sos_bool_t  is_free,                 159 create_range(sos_bool_t  is_free,
161              sos_vaddr_t base_vaddr,              160              sos_vaddr_t base_vaddr,
162              sos_vaddr_t top_vaddr,               161              sos_vaddr_t top_vaddr,
163              struct sos_kslab *associated_slab    162              struct sos_kslab *associated_slab)
164 {                                                 163 {
165   struct sos_kmem_range *range;                   164   struct sos_kmem_range *range;
166                                                   165 
167   SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_va    166   SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_vaddr));
168   SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(top_vad    167   SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(top_vaddr));
169                                                   168 
170   if ((top_vaddr - base_vaddr) < SOS_PAGE_SIZE    169   if ((top_vaddr - base_vaddr) < SOS_PAGE_SIZE)
171     return NULL;                                  170     return NULL;
172                                                   171 
173   range = (struct sos_kmem_range*)sos_kmem_cac    172   range = (struct sos_kmem_range*)sos_kmem_cache_alloc(kmem_range_cache,
174                                                   173                                                        SOS_KSLAB_ALLOC_ATOMIC);
175   SOS_ASSERT_FATAL(range != NULL);                174   SOS_ASSERT_FATAL(range != NULL);
176                                                   175 
177   range->base_vaddr = base_vaddr;                 176   range->base_vaddr = base_vaddr;
178   range->nb_pages   = (top_vaddr - base_vaddr)    177   range->nb_pages   = (top_vaddr - base_vaddr) / SOS_PAGE_SIZE;
179                                                   178 
180   if (is_free)                                    179   if (is_free)
181     {                                             180     {
182       list_add_tail(kmem_free_range_list,         181       list_add_tail(kmem_free_range_list,
183                     range);                       182                     range);
184     }                                             183     }
185   else                                            184   else
186     {                                             185     {
187       sos_vaddr_t vaddr;                          186       sos_vaddr_t vaddr;
188       range->slab = associated_slab;              187       range->slab = associated_slab;
189       list_add_tail(kmem_used_range_list,         188       list_add_tail(kmem_used_range_list,
190                     range);                       189                     range);
191                                                   190 
192       /* Ok, set the range owner for the pages    191       /* Ok, set the range owner for the pages in this page */
193       for (vaddr = base_vaddr ;                   192       for (vaddr = base_vaddr ;
194            vaddr < top_vaddr ;                    193            vaddr < top_vaddr ;
195            vaddr += SOS_PAGE_SIZE)                194            vaddr += SOS_PAGE_SIZE)
196       {                                           195       {
197         sos_paddr_t ppage_paddr = sos_paging_g    196         sos_paddr_t ppage_paddr = sos_paging_get_paddr(vaddr);
198         SOS_ASSERT_FATAL((void*)ppage_paddr !=    197         SOS_ASSERT_FATAL((void*)ppage_paddr != NULL);
199         sos_physmem_set_kmem_range(ppage_paddr    198         sos_physmem_set_kmem_range(ppage_paddr, range);
200       }                                           199       }
201     }                                             200     }
202                                                   201 
203   return range;                                   202   return range;
204 }                                                 203 }
205                                                   204 
206                                                   205 
207 sos_ret_t                                         206 sos_ret_t
208 sos_kmem_vmm_subsystem_setup(sos_vaddr_t kerne    207 sos_kmem_vmm_subsystem_setup(sos_vaddr_t kernel_core_base,
209                              sos_vaddr_t kerne    208                              sos_vaddr_t kernel_core_top,
210                              sos_vaddr_t boots    209                              sos_vaddr_t bootstrap_stack_bottom_vaddr,
211                              sos_vaddr_t boots    210                              sos_vaddr_t bootstrap_stack_top_vaddr)
212 {                                                 211 {
213   struct sos_kslab *first_struct_slab_of_cache    212   struct sos_kslab *first_struct_slab_of_caches,
214     *first_struct_slab_of_ranges;                 213     *first_struct_slab_of_ranges;
215   sos_vaddr_t first_slab_of_caches_base,          214   sos_vaddr_t first_slab_of_caches_base,
216     first_slab_of_caches_nb_pages,                215     first_slab_of_caches_nb_pages,
217     first_slab_of_ranges_base,                    216     first_slab_of_ranges_base,
218     first_slab_of_ranges_nb_pages;                217     first_slab_of_ranges_nb_pages;
219   struct sos_kmem_range *first_range_of_caches    218   struct sos_kmem_range *first_range_of_caches,
220     *first_range_of_ranges;                       219     *first_range_of_ranges;
221                                                   220 
222   list_init(kmem_free_range_list);                221   list_init(kmem_free_range_list);
223   list_init(kmem_used_range_list);                222   list_init(kmem_used_range_list);
224                                                   223 
225   kmem_range_cache                                224   kmem_range_cache
226     = sos_kmem_cache_subsystem_setup_prepare(k    225     = sos_kmem_cache_subsystem_setup_prepare(kernel_core_base,
227                                              k    226                                              kernel_core_top,
228                                              s    227                                              sizeof(struct sos_kmem_range),
229                                              &    228                                              & first_struct_slab_of_caches,
230                                              &    229                                              & first_slab_of_caches_base,
231                                              &    230                                              & first_slab_of_caches_nb_pages,
232                                              &    231                                              & first_struct_slab_of_ranges,
233                                              &    232                                              & first_slab_of_ranges_base,
234                                              &    233                                              & first_slab_of_ranges_nb_pages);
235   SOS_ASSERT_FATAL(kmem_range_cache != NULL);     234   SOS_ASSERT_FATAL(kmem_range_cache != NULL);
236                                                   235 
237   /* Mark virtual addresses 16kB - Video as FR    236   /* Mark virtual addresses 16kB - Video as FREE */
238   create_range(TRUE,                              237   create_range(TRUE,
239                SOS_KMEM_VMM_BASE,                 238                SOS_KMEM_VMM_BASE,
240                SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO    239                SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
241                NULL);                             240                NULL);
242                                                   241   
243   /* Mark virtual addresses in Video hardware     242   /* Mark virtual addresses in Video hardware mapping as NOT FREE */
244   create_range(FALSE,                             243   create_range(FALSE,
245                SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO    244                SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
246                SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO    245                SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
247                NULL);                             246                NULL);
248                                                   247   
249   /* Mark virtual addresses Video - Kernel as     248   /* Mark virtual addresses Video - Kernel as FREE */
250   create_range(TRUE,                              249   create_range(TRUE,
251                SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO    250                SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
252                SOS_PAGE_ALIGN_INF(kernel_core_    251                SOS_PAGE_ALIGN_INF(kernel_core_base),
253                NULL);                             252                NULL);
254                                                   253   
255   /* Mark virtual addresses in Kernel code/dat    254   /* Mark virtual addresses in Kernel code/data up to the bootstrap stack
256      as NOT FREE */                               255      as NOT FREE */
257   create_range(FALSE,                             256   create_range(FALSE,
258                SOS_PAGE_ALIGN_INF(kernel_core_    257                SOS_PAGE_ALIGN_INF(kernel_core_base),
259                bootstrap_stack_bottom_vaddr,      258                bootstrap_stack_bottom_vaddr,
260                NULL);                             259                NULL);
261                                                   260 
262   /* Mark virtual addresses in the bootstrap s    261   /* Mark virtual addresses in the bootstrap stack as NOT FREE too,
263      but in another vmm region in order to be     262      but in another vmm region in order to be un-allocated later */
264   create_range(FALSE,                             263   create_range(FALSE,
265                bootstrap_stack_bottom_vaddr,      264                bootstrap_stack_bottom_vaddr,
266                bootstrap_stack_top_vaddr,         265                bootstrap_stack_top_vaddr,
267                NULL);                             266                NULL);
268                                                   267 
269   /* Mark the remaining virtual addresses in K    268   /* Mark the remaining virtual addresses in Kernel code/data after
270      the bootstrap stack as NOT FREE */           269      the bootstrap stack as NOT FREE */
271   create_range(FALSE,                             270   create_range(FALSE,
272                bootstrap_stack_top_vaddr,         271                bootstrap_stack_top_vaddr,
273                SOS_PAGE_ALIGN_SUP(kernel_core_    272                SOS_PAGE_ALIGN_SUP(kernel_core_top),
274                NULL);                             273                NULL);
275                                                   274 
276   /* Mark virtual addresses in the first slab     275   /* Mark virtual addresses in the first slab of the cache of caches
277      as NOT FREE */                               276      as NOT FREE */
278   SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_c    277   SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_core_top)
279                    == first_slab_of_caches_bas    278                    == first_slab_of_caches_base);
280   SOS_ASSERT_FATAL(first_struct_slab_of_caches    279   SOS_ASSERT_FATAL(first_struct_slab_of_caches != NULL);
281   first_range_of_caches                           280   first_range_of_caches
282     = create_range(FALSE,                         281     = create_range(FALSE,
283                    first_slab_of_caches_base,     282                    first_slab_of_caches_base,
284                    first_slab_of_caches_base      283                    first_slab_of_caches_base
285                    + first_slab_of_caches_nb_p    284                    + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE,
286                    first_struct_slab_of_caches    285                    first_struct_slab_of_caches);
287                                                   286 
288   /* Mark virtual addresses in the first slab     287   /* Mark virtual addresses in the first slab of the cache of ranges
289      as NOT FREE */                               288      as NOT FREE */
290   SOS_ASSERT_FATAL((first_slab_of_caches_base     289   SOS_ASSERT_FATAL((first_slab_of_caches_base
291                     + first_slab_of_caches_nb_    290                     + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE)
292                    == first_slab_of_ranges_bas    291                    == first_slab_of_ranges_base);
293   SOS_ASSERT_FATAL(first_struct_slab_of_ranges    292   SOS_ASSERT_FATAL(first_struct_slab_of_ranges != NULL);
294   first_range_of_ranges                           293   first_range_of_ranges
295     = create_range(FALSE,                         294     = create_range(FALSE,
296                    first_slab_of_ranges_base,     295                    first_slab_of_ranges_base,
297                    first_slab_of_ranges_base      296                    first_slab_of_ranges_base
298                    + first_slab_of_ranges_nb_p    297                    + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
299                    first_struct_slab_of_ranges    298                    first_struct_slab_of_ranges);
300                                                   299   
301   /* Mark virtual addresses after these slabs     300   /* Mark virtual addresses after these slabs as FREE */
302   create_range(TRUE,                              301   create_range(TRUE,
303                first_slab_of_ranges_base          302                first_slab_of_ranges_base
304                + first_slab_of_ranges_nb_pages    303                + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
305                SOS_KMEM_VMM_TOP,                  304                SOS_KMEM_VMM_TOP,
306                NULL);                             305                NULL);
307                                                   306 
308   /* Update the cache subsystem so that the ar    307   /* Update the cache subsystem so that the artificially-created
309      caches of caches and ranges really behave    308      caches of caches and ranges really behave like *normal* caches (ie
310      those allocated by the normal slab API) *    309      those allocated by the normal slab API) */
311   sos_kmem_cache_subsystem_setup_commit(first_    310   sos_kmem_cache_subsystem_setup_commit(first_struct_slab_of_caches,
312                                         first_    311                                         first_range_of_caches,
313                                         first_    312                                         first_struct_slab_of_ranges,
314                                         first_    313                                         first_range_of_ranges);
315                                                   314 
316   return SOS_OK;                                  315   return SOS_OK;
317 }                                                 316 }
318                                                   317 
319                                                   318 
320 /**                                               319 /**
321  * Allocate a new kernel area spanning one or     320  * Allocate a new kernel area spanning one or multiple pages.
322  *                                                321  *
323  * @eturn a new range structure                   322  * @eturn a new range structure
324  */                                               323  */
325 struct sos_kmem_range *sos_kmem_vmm_new_range(    324 struct sos_kmem_range *sos_kmem_vmm_new_range(sos_count_t nb_pages,
326                                                   325                                               sos_ui32_t  flags,
327                                                   326                                               sos_vaddr_t * range_start)
328 {                                                 327 {
329   struct sos_kmem_range *free_range, *new_rang    328   struct sos_kmem_range *free_range, *new_range;
330                                                   329 
331   if (nb_pages <= 0)                              330   if (nb_pages <= 0)
332     return NULL;                                  331     return NULL;
333                                                   332 
334   /* Find a suitable free range to hold the si    333   /* Find a suitable free range to hold the size-sized object */
335   free_range = find_suitable_free_range(nb_pag    334   free_range = find_suitable_free_range(nb_pages);
336   if (free_range == NULL)                         335   if (free_range == NULL)
337     return NULL;                                  336     return NULL;
338                                                   337 
339   /* If range has exactly the requested size,     338   /* If range has exactly the requested size, just move it to the
340      "used" list */                               339      "used" list */
341   if(free_range->nb_pages == nb_pages)            340   if(free_range->nb_pages == nb_pages)
342     {                                             341     {
343       list_delete(kmem_free_range_list, free_r    342       list_delete(kmem_free_range_list, free_range);
344       kmem_used_range_list = insert_range(kmem    343       kmem_used_range_list = insert_range(kmem_used_range_list,
345                                           free    344                                           free_range);
346       /* The new_range is exactly the free_ran    345       /* The new_range is exactly the free_range */
347       new_range = free_range;                     346       new_range = free_range;
348     }                                             347     }
349                                                   348 
350   /* Otherwise the range is bigger than the re    349   /* Otherwise the range is bigger than the requested size, split it.
351      This involves reducing its size, and allo    350      This involves reducing its size, and allocate a new range, which
352      is going to be added to the "used" list *    351      is going to be added to the "used" list */
353   else                                            352   else
354     {                                             353     {
355       /* free_range split in { new_range | fre    354       /* free_range split in { new_range | free_range } */
356       new_range = (struct sos_kmem_range*)        355       new_range = (struct sos_kmem_range*)
357         sos_kmem_cache_alloc(kmem_range_cache,    356         sos_kmem_cache_alloc(kmem_range_cache,
358                              (flags & SOS_KMEM    357                              (flags & SOS_KMEM_VMM_ATOMIC)?
359                              SOS_KSLAB_ALLOC_A    358                              SOS_KSLAB_ALLOC_ATOMIC:0);
360       if (! new_range)                            359       if (! new_range)
361         return NULL;                              360         return NULL;
362                                                   361 
363       new_range->base_vaddr   = free_range->ba    362       new_range->base_vaddr   = free_range->base_vaddr;
364       new_range->nb_pages     = nb_pages;         363       new_range->nb_pages     = nb_pages;
365       free_range->base_vaddr += nb_pages*SOS_P    364       free_range->base_vaddr += nb_pages*SOS_PAGE_SIZE;
366       free_range->nb_pages   -= nb_pages;         365       free_range->nb_pages   -= nb_pages;
367                                                   366 
368       /* free_range is still at the same place    367       /* free_range is still at the same place in the list */
369       /* insert new_range in the used list */     368       /* insert new_range in the used list */
370       kmem_used_range_list = insert_range(kmem    369       kmem_used_range_list = insert_range(kmem_used_range_list,
371                                           new_    370                                           new_range);
372     }                                             371     }
373                                                   372 
374   /* By default, the range is not associated w    373   /* By default, the range is not associated with any slab */
375   new_range->slab = NULL;                         374   new_range->slab = NULL;
376                                                   375 
377   /* If mapping of physical pages is needed, m    376   /* If mapping of physical pages is needed, map them now */
378   if (flags & SOS_KMEM_VMM_MAP)                   377   if (flags & SOS_KMEM_VMM_MAP)
379     {                                             378     {
380       unsigned int i;                          !! 379       int i;
381       for (i = 0 ; i < nb_pages ; i ++)           380       for (i = 0 ; i < nb_pages ; i ++)
382         {                                         381         {
383           /* Get a new physical page */           382           /* Get a new physical page */
384           sos_paddr_t ppage_paddr                 383           sos_paddr_t ppage_paddr
385             = sos_physmem_ref_physpage_new(! (    384             = sos_physmem_ref_physpage_new(! (flags & SOS_KMEM_VMM_ATOMIC));
386                                                   385           
387           /* Map the page in kernel space */      386           /* Map the page in kernel space */
388           if (ppage_paddr)                        387           if (ppage_paddr)
389             {                                     388             {
390               if (sos_paging_map(ppage_paddr,     389               if (sos_paging_map(ppage_paddr,
391                                  new_range->ba    390                                  new_range->base_vaddr
392                                    + i * SOS_P    391                                    + i * SOS_PAGE_SIZE,
393                                  FALSE /* Not     392                                  FALSE /* Not a user page */,
394                                  ((flags & SOS    393                                  ((flags & SOS_KMEM_VMM_ATOMIC)?
395                                   SOS_VM_MAP_A    394                                   SOS_VM_MAP_ATOMIC:0)
396                                  | SOS_VM_MAP_    395                                  | SOS_VM_MAP_PROT_READ
397                                  | SOS_VM_MAP_    396                                  | SOS_VM_MAP_PROT_WRITE))
398                 {                                 397                 {
399                   /* Failed => force unallocat    398                   /* Failed => force unallocation, see below */
400                   sos_physmem_unref_physpage(p    399                   sos_physmem_unref_physpage(ppage_paddr);
401                   ppage_paddr = (sos_paddr_t)N    400                   ppage_paddr = (sos_paddr_t)NULL;
402                 }                                 401                 }
403               else                                402               else
404                 {                                 403                 {
405                   /* Success : page can be unr    404                   /* Success : page can be unreferenced since it is
406                      now mapped */                405                      now mapped */
407                   sos_physmem_unref_physpage(p    406                   sos_physmem_unref_physpage(ppage_paddr);
408                 }                                 407                 }
409             }                                     408             }
410                                                   409 
411           /* Undo the allocation if failed to     410           /* Undo the allocation if failed to allocate or map a new page */
412           if (! ppage_paddr)                      411           if (! ppage_paddr)
413             {                                     412             {
414               sos_kmem_vmm_del_range(new_range    413               sos_kmem_vmm_del_range(new_range);
415               return NULL;                        414               return NULL;
416             }                                     415             }
417                                                   416 
418           /* Ok, set the range owner for this     417           /* Ok, set the range owner for this page */
419           sos_physmem_set_kmem_range(ppage_pad    418           sos_physmem_set_kmem_range(ppage_paddr, new_range);
420         }                                         419         }
421     }                                             420     }
422   /* ... Otherwise: Demand Paging will do the     421   /* ... Otherwise: Demand Paging will do the job */
423                                                   422 
424   if (range_start)                                423   if (range_start)
425     *range_start = new_range->base_vaddr;         424     *range_start = new_range->base_vaddr;
426                                                   425 
427   return new_range;                               426   return new_range;
428 }                                                 427 }
429                                                   428 
430                                                   429 
431 sos_ret_t sos_kmem_vmm_del_range(struct sos_km    430 sos_ret_t sos_kmem_vmm_del_range(struct sos_kmem_range *range)
432 {                                                 431 {
                                                   >> 432   int i;
433   struct sos_kmem_range *ranges_to_free;          433   struct sos_kmem_range *ranges_to_free;
434   list_init(ranges_to_free);                      434   list_init(ranges_to_free);
435                                                   435 
436   SOS_ASSERT_FATAL(range != NULL);                436   SOS_ASSERT_FATAL(range != NULL);
437   SOS_ASSERT_FATAL(range->slab == NULL);          437   SOS_ASSERT_FATAL(range->slab == NULL);
438                                                   438 
439   /* Remove the range from the 'USED' list now    439   /* Remove the range from the 'USED' list now */
440   list_delete(kmem_used_range_list, range);       440   list_delete(kmem_used_range_list, range);
441                                                   441 
442   /*                                              442   /*
443    * The following do..while() loop is here to    443    * The following do..while() loop is here to avoid an indirect
444    * recursion: if we call directly kmem_cache    444    * recursion: if we call directly kmem_cache_free() from inside the
445    * current function, we take the risk to re-    445    * current function, we take the risk to re-enter the current function
446    * (sos_kmem_vmm_del_range()) again, which m    446    * (sos_kmem_vmm_del_range()) again, which may cause problem if it
447    * in turn calls kmem_slab again and sos_kme    447    * in turn calls kmem_slab again and sos_kmem_vmm_del_range again,
448    * and again and again. This may happen whil    448    * and again and again. This may happen while freeing ranges of
449    * struct sos_kslab...                          449    * struct sos_kslab...
450    *                                              450    *
451    * To avoid this,we choose to call a special    451    * To avoid this,we choose to call a special function of kmem_slab
452    * doing almost the same as sos_kmem_cache_f    452    * doing almost the same as sos_kmem_cache_free(), but which does
453    * NOT call us (ie sos_kmem_vmm_del_range())    453    * NOT call us (ie sos_kmem_vmm_del_range()): instead WE add the
454    * range that is to be freed to a list, and     454    * range that is to be freed to a list, and the do..while() loop is
455    * here to process this list ! The recursion    455    * here to process this list ! The recursion is replaced by
456    * classical iterations.                        456    * classical iterations.
457    */                                             457    */
458   do                                              458   do
459     {                                             459     {
460       unsigned int i;                          << 
461                                                << 
462       /* Ok, we got the range. Now, insert thi    460       /* Ok, we got the range. Now, insert this range in the free list */
463       kmem_free_range_list = insert_range(kmem    461       kmem_free_range_list = insert_range(kmem_free_range_list, range);
464                                                   462 
465       /* Unmap the physical pages */              463       /* Unmap the physical pages */
466       for (i = 0 ; i < range->nb_pages ; i ++)    464       for (i = 0 ; i < range->nb_pages ; i ++)
467         {                                         465         {
468           /* This will work even if no page is    466           /* This will work even if no page is mapped at this address */
469           sos_paging_unmap(range->base_vaddr +    467           sos_paging_unmap(range->base_vaddr + i*SOS_PAGE_SIZE);
470         }                                         468         }
471                                                   469       
472       /* Eventually coalesce it with prev/next    470       /* Eventually coalesce it with prev/next free ranges (there is
473          always a valid prev/next link since t    471          always a valid prev/next link since the list is circular). Note:
474          the tests below will lead to correct     472          the tests below will lead to correct behaviour even if the list
475          is limited to the 'range' singleton,     473          is limited to the 'range' singleton, at least as long as the
476          range is not zero-sized */               474          range is not zero-sized */
477       /* Merge with preceding one ? */            475       /* Merge with preceding one ? */
478       if (range->prev->base_vaddr + range->pre    476       if (range->prev->base_vaddr + range->prev->nb_pages*SOS_PAGE_SIZE
479           == range->base_vaddr)                   477           == range->base_vaddr)
480         {                                         478         {
481           struct sos_kmem_range *empty_range_o    479           struct sos_kmem_range *empty_range_of_ranges = NULL;
482           struct sos_kmem_range *prec_free = r    480           struct sos_kmem_range *prec_free = range->prev;
483                                                   481           
484           /* Merge them */                        482           /* Merge them */
485           prec_free->nb_pages += range->nb_pag    483           prec_free->nb_pages += range->nb_pages;
486           list_delete(kmem_free_range_list, ra    484           list_delete(kmem_free_range_list, range);
487                                                   485           
488           /* Mark the range as free. This may     486           /* Mark the range as free. This may cause the slab owning
489              the range to become empty */         487              the range to become empty */
490           empty_range_of_ranges =                 488           empty_range_of_ranges = 
491             sos_kmem_cache_release_struct_rang    489             sos_kmem_cache_release_struct_range(range);
492                                                   490 
493           /* If this causes the slab owning th    491           /* If this causes the slab owning the range to become empty,
494              add the range corresponding to th    492              add the range corresponding to the slab at the end of the
495              list of the ranges to be freed: i    493              list of the ranges to be freed: it will be actually freed
496              in one of the next iterations of     494              in one of the next iterations of the do{} loop. */
497           if (empty_range_of_ranges != NULL)      495           if (empty_range_of_ranges != NULL)
498             {                                     496             {
499               list_delete(kmem_used_range_list    497               list_delete(kmem_used_range_list, empty_range_of_ranges);
500               list_add_tail(ranges_to_free, em    498               list_add_tail(ranges_to_free, empty_range_of_ranges);
501             }                                     499             }
502                                                   500           
503           /* Set range to the beginning of thi    501           /* Set range to the beginning of this coelescion */
504           range = prec_free;                      502           range = prec_free;
505         }                                         503         }
506                                                   504       
507       /* Merge with next one ? [NO 'else' sinc    505       /* Merge with next one ? [NO 'else' since range may be the result of
508          the merge above] */                      506          the merge above] */
509       if (range->base_vaddr + range->nb_pages*    507       if (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE
510           == range->next->base_vaddr)             508           == range->next->base_vaddr)
511         {                                         509         {
512           struct sos_kmem_range *empty_range_o    510           struct sos_kmem_range *empty_range_of_ranges = NULL;
513           struct sos_kmem_range *next_range =     511           struct sos_kmem_range *next_range = range->next;
514                                                   512           
515           /* Merge them */                        513           /* Merge them */
516           range->nb_pages += next_range->nb_pa    514           range->nb_pages += next_range->nb_pages;
517           list_delete(kmem_free_range_list, ne    515           list_delete(kmem_free_range_list, next_range);
518                                                   516           
519           /* Mark the next_range as free. This    517           /* Mark the next_range as free. This may cause the slab
520              owning the next_range to become e    518              owning the next_range to become empty */
521           empty_range_of_ranges =                 519           empty_range_of_ranges = 
522             sos_kmem_cache_release_struct_rang    520             sos_kmem_cache_release_struct_range(next_range);
523                                                   521 
524           /* If this causes the slab owning th    522           /* If this causes the slab owning the next_range to become
525              empty, add the range correspondin    523              empty, add the range corresponding to the slab at the end
526              of the list of the ranges to be f    524              of the list of the ranges to be freed: it will be
527              actually freed in one of the next    525              actually freed in one of the next iterations of the
528              do{} loop. */                        526              do{} loop. */
529           if (empty_range_of_ranges != NULL)      527           if (empty_range_of_ranges != NULL)
530             {                                     528             {
531               list_delete(kmem_used_range_list    529               list_delete(kmem_used_range_list, empty_range_of_ranges);
532               list_add_tail(ranges_to_free, em    530               list_add_tail(ranges_to_free, empty_range_of_ranges);
533             }                                     531             }
534         }                                         532         }
535                                                   533       
536                                                   534 
537       /* If deleting the range(s) caused one o    535       /* If deleting the range(s) caused one or more range(s) to be
538          freed, get the next one to free */       536          freed, get the next one to free */
539       if (list_is_empty(ranges_to_free))          537       if (list_is_empty(ranges_to_free))
540         range = NULL; /* No range left to free    538         range = NULL; /* No range left to free */
541       else                                        539       else
542         range = list_pop_head(ranges_to_free);    540         range = list_pop_head(ranges_to_free);
543                                                   541 
544     }                                             542     }
545   /* Stop when there is no range left to be fr    543   /* Stop when there is no range left to be freed for now */
546   while (range != NULL);                          544   while (range != NULL);
547                                                   545 
548   return SOS_OK;                                  546   return SOS_OK;
549 }                                                 547 }
550                                                   548 
551                                                   549 
552 sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_    550 sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_pages,
553                                sos_ui32_t  fla    551                                sos_ui32_t  flags)
554 {                                                 552 {
555   struct sos_kmem_range *range                    553   struct sos_kmem_range *range
556     = sos_kmem_vmm_new_range(nb_pages,            554     = sos_kmem_vmm_new_range(nb_pages,
557                              flags,               555                              flags,
558                              NULL);               556                              NULL);
559   if (! range)                                    557   if (! range)
560     return (sos_vaddr_t)NULL;                     558     return (sos_vaddr_t)NULL;
561                                                   559   
562   return range->base_vaddr;                       560   return range->base_vaddr;
563 }                                                 561 }
564                                                   562 
565                                                   563 
566 sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr)    564 sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr)
567 {                                                 565 {
568   struct sos_kmem_range *range = lookup_range(    566   struct sos_kmem_range *range = lookup_range(vaddr);
569                                                   567 
570   /* We expect that the given address is the b    568   /* We expect that the given address is the base address of the
571      range */                                     569      range */
572   if (!range || (range->base_vaddr != vaddr))     570   if (!range || (range->base_vaddr != vaddr))
573     return -SOS_EINVAL;                           571     return -SOS_EINVAL;
574                                                   572 
575   /* We expect that this range is not held by     573   /* We expect that this range is not held by any cache */
576   if (range->slab != NULL)                        574   if (range->slab != NULL)
577     return -SOS_EBUSY;                            575     return -SOS_EBUSY;
578                                                   576 
579   return sos_kmem_vmm_del_range(range);           577   return sos_kmem_vmm_del_range(range);
580 }                                                 578 }
581                                                   579 
582                                                   580 
583 sos_ret_t sos_kmem_vmm_set_slab(struct sos_kme    581 sos_ret_t sos_kmem_vmm_set_slab(struct sos_kmem_range *range,
584                                 struct sos_ksl    582                                 struct sos_kslab *slab)
585 {                                                 583 {
586   if (! range)                                    584   if (! range)
587     return -SOS_EINVAL;                           585     return -SOS_EINVAL;
588                                                   586 
589   range->slab = slab;                             587   range->slab = slab;
590   return SOS_OK;                                  588   return SOS_OK;
591 }                                                 589 }
592                                                   590 
593 struct sos_kslab * sos_kmem_vmm_resolve_slab(s    591 struct sos_kslab * sos_kmem_vmm_resolve_slab(sos_vaddr_t vaddr)
594 {                                                 592 {
595   struct sos_kmem_range *range = lookup_range(    593   struct sos_kmem_range *range = lookup_range(vaddr);
596   if (! range)                                    594   if (! range)
597     return NULL;                                  595     return NULL;
598                                                   596 
599   return range->slab;                             597   return range->slab;
600 }                                                 598 }
601                                                   599 
602                                                   600 
603 sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vad    601 sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vaddr_t vaddr)
604 {                                                 602 {
605   struct sos_kmem_range *range = lookup_range(    603   struct sos_kmem_range *range = lookup_range(vaddr);
606   return (range != NULL);                         604   return (range != NULL);
607 }                                                 605 }
                                                      

source navigation ] diff markup ] identifier search ] general search ]