SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

Diff markup

Differences between /sos/kmem_vmm.c (Article 6) and /sos/kmem_vmm.c (Article 5)


001 /* Copyright (C) 2000 Thomas Petazzoni            001 /* Copyright (C) 2000 Thomas Petazzoni
002    Copyright (C) 2004 David Decotigny             002    Copyright (C) 2004 David Decotigny
003                                                   003 
004    This program is free software; you can redi    004    This program is free software; you can redistribute it and/or
005    modify it under the terms of the GNU Genera    005    modify it under the terms of the GNU General Public License
006    as published by the Free Software Foundatio    006    as published by the Free Software Foundation; either version 2
007    of the License, or (at your option) any lat    007    of the License, or (at your option) any later version.
008                                                   008    
009    This program is distributed in the hope tha    009    This program is distributed in the hope that it will be useful,
010    but WITHOUT ANY WARRANTY; without even the     010    but WITHOUT ANY WARRANTY; without even the implied warranty of
011    MERCHANTABILITY or FITNESS FOR A PARTICULAR    011    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
012    GNU General Public License for more details    012    GNU General Public License for more details.
013                                                   013    
014    You should have received a copy of the GNU     014    You should have received a copy of the GNU General Public License
015    along with this program; if not, write to t    015    along with this program; if not, write to the Free Software
016    Foundation, Inc., 59 Temple Place - Suite 3    016    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
017    USA.                                           017    USA. 
018 */                                                018 */
019                                                   019 
020 #include <sos/list.h>                             020 #include <sos/list.h>
021 #include <sos/physmem.h>                          021 #include <sos/physmem.h>
022 #include <hwcore/paging.h>                        022 #include <hwcore/paging.h>
023 #include <sos/assert.h>                           023 #include <sos/assert.h>
024                                                   024 
025 #include "kmem_vmm.h"                             025 #include "kmem_vmm.h"
026                                                   026 
027 /** The structure of a range of kernel-space v    027 /** The structure of a range of kernel-space virtual addresses */
028 struct sos_kmem_range                             028 struct sos_kmem_range
029 {                                                 029 {
030   sos_vaddr_t base_vaddr;                         030   sos_vaddr_t base_vaddr;
031   sos_count_t nb_pages;                           031   sos_count_t nb_pages;
032                                                   032 
033   /* The slab owning this range, or NULL */       033   /* The slab owning this range, or NULL */
034   struct sos_kslab *slab;                         034   struct sos_kslab *slab;
035                                                   035 
036   struct sos_kmem_range *prev, *next;             036   struct sos_kmem_range *prev, *next;
037 };                                                037 };
038 const int sizeof_struct_sos_kmem_range = sizeo    038 const int sizeof_struct_sos_kmem_range = sizeof(struct sos_kmem_range);
039                                                   039 
040 /** The ranges are SORTED in (strictly) ascend    040 /** The ranges are SORTED in (strictly) ascending base addresses */
041 static struct sos_kmem_range *kmem_free_range_    041 static struct sos_kmem_range *kmem_free_range_list, *kmem_used_range_list;
042                                                   042 
043 /** The slab cache for the kmem ranges */         043 /** The slab cache for the kmem ranges */
044 static struct sos_kslab_cache *kmem_range_cach    044 static struct sos_kslab_cache *kmem_range_cache;
045                                                   045 
046                                                   046 
047                                                   047 
048 /** Helper function to get the closest precedi    048 /** Helper function to get the closest preceding or containing
049     range for the given virtual address */        049     range for the given virtual address */
050 static struct sos_kmem_range *                    050 static struct sos_kmem_range *
051 get_closest_preceding_kmem_range(struct sos_km    051 get_closest_preceding_kmem_range(struct sos_kmem_range *the_list,
052                                  sos_vaddr_t v    052                                  sos_vaddr_t vaddr)
053 {                                                 053 {
054   int nb_elements;                                054   int nb_elements;
055   struct sos_kmem_range *a_range, *ret_range;     055   struct sos_kmem_range *a_range, *ret_range;
056                                                   056 
057   /* kmem_range list is kept SORTED, so we exi    057   /* kmem_range list is kept SORTED, so we exit as soon as vaddr >= a
058      range base address */                        058      range base address */
059   ret_range = NULL;                               059   ret_range = NULL;
060   list_foreach(the_list, a_range, nb_elements)    060   list_foreach(the_list, a_range, nb_elements)
061     {                                             061     {
062       if (vaddr < a_range->base_vaddr)            062       if (vaddr < a_range->base_vaddr)
063         return ret_range;                         063         return ret_range;
064       ret_range = a_range;                        064       ret_range = a_range;
065     }                                             065     }
066                                                   066 
067   /* This will always be the LAST range in the    067   /* This will always be the LAST range in the kmem area */
068   return ret_range;                               068   return ret_range;
069 }                                                 069 }
070                                                   070 
071                                                   071 
072 /**                                               072 /**
073  * Helper function to lookup a free range larg    073  * Helper function to lookup a free range large enough to hold nb_pages
074  * pages (first fit)                              074  * pages (first fit)
075  */                                               075  */
076 static struct sos_kmem_range *find_suitable_fr    076 static struct sos_kmem_range *find_suitable_free_range(sos_count_t nb_pages)
077 {                                                 077 {
078   int nb_elements;                                078   int nb_elements;
079   struct sos_kmem_range *r;                       079   struct sos_kmem_range *r;
080                                                   080 
081   list_foreach(kmem_free_range_list, r, nb_ele    081   list_foreach(kmem_free_range_list, r, nb_elements)
082   {                                               082   {
083     if (r->nb_pages >= nb_pages)                  083     if (r->nb_pages >= nb_pages)
084       return r;                                   084       return r;
085   }                                               085   }
086                                                   086 
087   return NULL;                                    087   return NULL;
088 }                                                 088 }
089                                                   089 
090                                                   090 
091 /**                                               091 /**
092  * Helper function to add a_range in the_list,    092  * Helper function to add a_range in the_list, in strictly ascending order.
093  *                                                093  *
094  * @return The (possibly) new head of the_list    094  * @return The (possibly) new head of the_list
095  */                                               095  */
096 static struct sos_kmem_range *insert_range(str    096 static struct sos_kmem_range *insert_range(struct sos_kmem_range *the_list,
097                                            str    097                                            struct sos_kmem_range *a_range)
098 {                                                 098 {
099   struct sos_kmem_range *prec_used;               099   struct sos_kmem_range *prec_used;
100                                                   100 
101   /** Look for any preceding range */             101   /** Look for any preceding range */
102   prec_used = get_closest_preceding_kmem_range    102   prec_used = get_closest_preceding_kmem_range(the_list,
103                                                   103                                                a_range->base_vaddr);
104   /** insert a_range /after/ this prec_used */    104   /** insert a_range /after/ this prec_used */
105   if (prec_used != NULL)                          105   if (prec_used != NULL)
106     list_insert_after(the_list, prec_used, a_r    106     list_insert_after(the_list, prec_used, a_range);
107   else /* Insert at the beginning of the list     107   else /* Insert at the beginning of the list */
108     list_add_head(the_list, a_range);             108     list_add_head(the_list, a_range);
109                                                   109 
110   return the_list;                                110   return the_list;
111 }                                                 111 }
112                                                   112 
113                                                   113 
114 /**                                               114 /**
115  * Helper function to retrieve the range ownin    115  * Helper function to retrieve the range owning the given vaddr, by
116  * scanning the physical memory first if vaddr    116  * scanning the physical memory first if vaddr is mapped in RAM
117  */                                               117  */
118 static struct sos_kmem_range *lookup_range(sos    118 static struct sos_kmem_range *lookup_range(sos_vaddr_t vaddr)
119 {                                                 119 {
120   struct sos_kmem_range *range;                   120   struct sos_kmem_range *range;
121                                                   121 
122   /* First: try to retrieve the physical page     122   /* First: try to retrieve the physical page mapped at this address */
123   sos_paddr_t ppage_paddr = SOS_PAGE_ALIGN_INF !! 123   sos_paddr_t ppage_paddr = sos_paging_get_paddr(vaddr);
124   if (ppage_paddr)                             !! 124   if (! ppage_paddr)
125     {                                             125     {
126       range = sos_physmem_get_kmem_range(ppage    126       range = sos_physmem_get_kmem_range(ppage_paddr);
127                                                << 
128       /* If a page is mapped at this address,     127       /* If a page is mapped at this address, it is EXPECTED that it
129          is really associated with a range */     128          is really associated with a range */
130       SOS_ASSERT_FATAL(range != NULL);            129       SOS_ASSERT_FATAL(range != NULL);
131     }                                             130     }
132                                                   131 
133   /* Otherwise scan the list of used ranges, l    132   /* Otherwise scan the list of used ranges, looking for the range
134      owning the address */                        133      owning the address */
135   else                                            134   else
136     {                                             135     {
137       range = get_closest_preceding_kmem_range    136       range = get_closest_preceding_kmem_range(kmem_used_range_list,
138                                                   137                                                vaddr);
139       /* Not found */                             138       /* Not found */
140       if (! range)                                139       if (! range)
141         return NULL;                              140         return NULL;
142                                                << 
143       /* vaddr not covered by this range */    << 
144       if ( (vaddr < range->base_vaddr)         << 
145            || (vaddr >= (range->base_vaddr + r << 
146         return NULL;                           << 
147     }                                             141     }
148                                                   142 
149   return range;                                   143   return range;
150 }                                                 144 }
151                                                   145 
152                                                   146 
153 /**                                               147 /**
154  * Helper function for sos_kmem_vmm_setup() to    148  * Helper function for sos_kmem_vmm_setup() to initialize a new range
155  * that maps a given area as free or as alread    149  * that maps a given area as free or as already used.
156  * This function either succeeds or halts the     150  * This function either succeeds or halts the whole system.
157  */                                               151  */
158 static struct sos_kmem_range *                    152 static struct sos_kmem_range *
159 create_range(sos_bool_t  is_free,                 153 create_range(sos_bool_t  is_free,
160              sos_vaddr_t base_vaddr,              154              sos_vaddr_t base_vaddr,
161              sos_vaddr_t top_vaddr,            !! 155              sos_vaddr_t top_addr,
162              struct sos_kslab *associated_slab    156              struct sos_kslab *associated_slab)
163 {                                                 157 {
164   struct sos_kmem_range *range;                   158   struct sos_kmem_range *range;
165                                                << 
166   SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_va << 
167   SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(top_vad << 
168                                                << 
169   if ((top_vaddr - base_vaddr) < SOS_PAGE_SIZE << 
170     return NULL;                               << 
171                                                << 
172   range = (struct sos_kmem_range*)sos_kmem_cac    159   range = (struct sos_kmem_range*)sos_kmem_cache_alloc(kmem_range_cache,
173                                                   160                                                        SOS_KSLAB_ALLOC_ATOMIC);
174   SOS_ASSERT_FATAL(range != NULL);                161   SOS_ASSERT_FATAL(range != NULL);
175                                                   162 
176   range->base_vaddr = base_vaddr;                 163   range->base_vaddr = base_vaddr;
177   range->nb_pages   = (top_vaddr - base_vaddr) !! 164   range->nb_pages   = (top_addr - base_vaddr) / SOS_PAGE_SIZE;
178                                                   165 
179   if (is_free)                                    166   if (is_free)
180     {                                             167     {
181       list_add_tail(kmem_free_range_list,         168       list_add_tail(kmem_free_range_list,
182                     range);                       169                     range);
183     }                                             170     }
184   else                                            171   else
185     {                                             172     {
186       sos_vaddr_t vaddr;                          173       sos_vaddr_t vaddr;
187       range->slab = associated_slab;              174       range->slab = associated_slab;
188       list_add_tail(kmem_used_range_list,         175       list_add_tail(kmem_used_range_list,
189                     range);                       176                     range);
190                                                   177 
191       /* Ok, set the range owner for the pages    178       /* Ok, set the range owner for the pages in this page */
192       for (vaddr = base_vaddr ;                   179       for (vaddr = base_vaddr ;
193            vaddr < top_vaddr ;                 !! 180            vaddr < top_addr ;
194            vaddr += SOS_PAGE_SIZE)                181            vaddr += SOS_PAGE_SIZE)
195       {                                           182       {
196         sos_paddr_t ppage_paddr = sos_paging_g    183         sos_paddr_t ppage_paddr = sos_paging_get_paddr(vaddr);
197         SOS_ASSERT_FATAL((void*)ppage_paddr !=    184         SOS_ASSERT_FATAL((void*)ppage_paddr != NULL);
198         sos_physmem_set_kmem_range(ppage_paddr    185         sos_physmem_set_kmem_range(ppage_paddr, range);
199       }                                           186       }
200     }                                             187     }
201                                                   188 
202   return range;                                   189   return range;
203 }                                                 190 }
204                                                   191 
205                                                   192 
206 sos_ret_t                                      !! 193 sos_ret_t sos_kmem_vmm_setup(sos_vaddr_t kernel_core_base,
207 sos_kmem_vmm_subsystem_setup(sos_vaddr_t kerne !! 194                              sos_vaddr_t kernel_core_top)
208                              sos_vaddr_t kerne << 
209                              sos_vaddr_t boots << 
210                              sos_vaddr_t boots << 
211 {                                                 195 {
212   struct sos_kslab *first_struct_slab_of_cache    196   struct sos_kslab *first_struct_slab_of_caches,
213     *first_struct_slab_of_ranges;                 197     *first_struct_slab_of_ranges;
214   sos_vaddr_t first_slab_of_caches_base,          198   sos_vaddr_t first_slab_of_caches_base,
215     first_slab_of_caches_nb_pages,                199     first_slab_of_caches_nb_pages,
216     first_slab_of_ranges_base,                    200     first_slab_of_ranges_base,
217     first_slab_of_ranges_nb_pages;                201     first_slab_of_ranges_nb_pages;
218   struct sos_kmem_range *first_range_of_caches    202   struct sos_kmem_range *first_range_of_caches,
219     *first_range_of_ranges;                       203     *first_range_of_ranges;
220                                                   204 
221   list_init(kmem_free_range_list);                205   list_init(kmem_free_range_list);
222   list_init(kmem_used_range_list);                206   list_init(kmem_used_range_list);
223                                                   207 
224   kmem_range_cache                                208   kmem_range_cache
225     = sos_kmem_cache_subsystem_setup_prepare(k !! 209     = sos_kmem_cache_setup_prepare(kernel_core_base,
226                                              k !! 210                                    kernel_core_top,
227                                              s !! 211                                    sizeof(struct sos_kmem_range),
228                                              & !! 212                                    & first_struct_slab_of_caches,
229                                              & !! 213                                    & first_slab_of_caches_base,
230                                              & !! 214                                    & first_slab_of_caches_nb_pages,
231                                              & !! 215                                    & first_struct_slab_of_ranges,
232                                              & !! 216                                    & first_slab_of_ranges_base,
233                                              & !! 217                                    & first_slab_of_ranges_nb_pages);
234   SOS_ASSERT_FATAL(kmem_range_cache != NULL);     218   SOS_ASSERT_FATAL(kmem_range_cache != NULL);
235                                                   219 
236   /* Mark virtual addresses 16kB - Video as FR    220   /* Mark virtual addresses 16kB - Video as FREE */
237   create_range(TRUE,                              221   create_range(TRUE,
238                SOS_KMEM_VMM_BASE,                 222                SOS_KMEM_VMM_BASE,
239                SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO    223                SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
240                NULL);                             224                NULL);
241                                                   225   
242   /* Mark virtual addresses in Video hardware     226   /* Mark virtual addresses in Video hardware mapping as NOT FREE */
243   create_range(FALSE,                             227   create_range(FALSE,
244                SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO    228                SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START),
245                SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO    229                SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
246                NULL);                             230                NULL);
247                                                   231   
248   /* Mark virtual addresses Video - Kernel as     232   /* Mark virtual addresses Video - Kernel as FREE */
249   create_range(TRUE,                              233   create_range(TRUE,
250                SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO    234                SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END),
251                SOS_PAGE_ALIGN_INF(kernel_core_    235                SOS_PAGE_ALIGN_INF(kernel_core_base),
252                NULL);                             236                NULL);
253                                                   237   
254   /* Mark virtual addresses in Kernel code/dat !! 238   /* Mark virtual addresses in Kernel code/data as NOT FREE */
255      as NOT FREE */                            << 
256   create_range(FALSE,                             239   create_range(FALSE,
257                SOS_PAGE_ALIGN_INF(kernel_core_    240                SOS_PAGE_ALIGN_INF(kernel_core_base),
258                bootstrap_stack_bottom_vaddr,   << 
259                NULL);                          << 
260                                                << 
261   /* Mark virtual addresses in the bootstrap s << 
262      but in another vmm region in order to be  << 
263   create_range(FALSE,                          << 
264                bootstrap_stack_bottom_vaddr,   << 
265                bootstrap_stack_top_vaddr,      << 
266                NULL);                          << 
267                                                << 
268   /* Mark the remaining virtual addresses in K << 
269      the bootstrap stack as NOT FREE */        << 
270   create_range(FALSE,                          << 
271                bootstrap_stack_top_vaddr,      << 
272                SOS_PAGE_ALIGN_SUP(kernel_core_    241                SOS_PAGE_ALIGN_SUP(kernel_core_top),
273                NULL);                             242                NULL);
274                                                   243 
275   /* Mark virtual addresses in the first slab     244   /* Mark virtual addresses in the first slab of the cache of caches
276      as NOT FREE */                               245      as NOT FREE */
277   SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_c    246   SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_core_top)
278                    == first_slab_of_caches_bas    247                    == first_slab_of_caches_base);
279   SOS_ASSERT_FATAL(first_struct_slab_of_caches    248   SOS_ASSERT_FATAL(first_struct_slab_of_caches != NULL);
280   first_range_of_caches                           249   first_range_of_caches
281     = create_range(FALSE,                         250     = create_range(FALSE,
282                    first_slab_of_caches_base,     251                    first_slab_of_caches_base,
283                    first_slab_of_caches_base      252                    first_slab_of_caches_base
284                    + first_slab_of_caches_nb_p    253                    + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE,
285                    first_struct_slab_of_caches    254                    first_struct_slab_of_caches);
286                                                   255 
287   /* Mark virtual addresses in the first slab     256   /* Mark virtual addresses in the first slab of the cache of ranges
288      as NOT FREE */                               257      as NOT FREE */
289   SOS_ASSERT_FATAL((first_slab_of_caches_base     258   SOS_ASSERT_FATAL((first_slab_of_caches_base
290                     + first_slab_of_caches_nb_    259                     + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE)
291                    == first_slab_of_ranges_bas    260                    == first_slab_of_ranges_base);
292   SOS_ASSERT_FATAL(first_struct_slab_of_ranges    261   SOS_ASSERT_FATAL(first_struct_slab_of_ranges != NULL);
293   first_range_of_ranges                           262   first_range_of_ranges
294     = create_range(FALSE,                         263     = create_range(FALSE,
295                    first_slab_of_ranges_base,     264                    first_slab_of_ranges_base,
296                    first_slab_of_ranges_base      265                    first_slab_of_ranges_base
297                    + first_slab_of_ranges_nb_p    266                    + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
298                    first_struct_slab_of_ranges    267                    first_struct_slab_of_ranges);
299                                                   268   
300   /* Mark virtual addresses after these slabs     269   /* Mark virtual addresses after these slabs as FREE */
301   create_range(TRUE,                              270   create_range(TRUE,
302                first_slab_of_ranges_base          271                first_slab_of_ranges_base
303                + first_slab_of_ranges_nb_pages    272                + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE,
304                SOS_KMEM_VMM_TOP,                  273                SOS_KMEM_VMM_TOP,
305                NULL);                             274                NULL);
306                                                   275 
307   /* Update the cache subsystem so that the ar    276   /* Update the cache subsystem so that the artificially-created
308      caches of caches and ranges really behave    277      caches of caches and ranges really behave like *normal* caches (ie
309      those allocated by the normal slab API) *    278      those allocated by the normal slab API) */
310   sos_kmem_cache_subsystem_setup_commit(first_ !! 279   sos_kmem_cache_setup_commit(first_struct_slab_of_caches,
311                                         first_ !! 280                               first_range_of_caches,
312                                         first_ !! 281                               first_struct_slab_of_ranges,
313                                         first_ !! 282                               first_range_of_ranges);
314                                                   283 
315   return SOS_OK;                                  284   return SOS_OK;
316 }                                                 285 }
317                                                   286 
318                                                   287 
319 /**                                               288 /**
320  * Allocate a new kernel area spanning one or     289  * Allocate a new kernel area spanning one or multiple pages.
321  *                                                290  *
322  * @eturn a new range structure                   291  * @eturn a new range structure
323  */                                               292  */
324 struct sos_kmem_range *sos_kmem_vmm_new_range(    293 struct sos_kmem_range *sos_kmem_vmm_new_range(sos_count_t nb_pages,
325                                                   294                                               sos_ui32_t  flags,
326                                                   295                                               sos_vaddr_t * range_start)
327 {                                                 296 {
328   struct sos_kmem_range *free_range, *new_rang    297   struct sos_kmem_range *free_range, *new_range;
329                                                   298 
330   if (nb_pages <= 0)                              299   if (nb_pages <= 0)
331     return NULL;                                  300     return NULL;
332                                                   301 
333   /* Find a suitable free range to hold the si    302   /* Find a suitable free range to hold the size-sized object */
334   free_range = find_suitable_free_range(nb_pag    303   free_range = find_suitable_free_range(nb_pages);
335   if (free_range == NULL)                         304   if (free_range == NULL)
336     return NULL;                                  305     return NULL;
337                                                   306 
338   /* If range has exactly the requested size,     307   /* If range has exactly the requested size, just move it to the
339      "used" list */                               308      "used" list */
340   if(free_range->nb_pages == nb_pages)            309   if(free_range->nb_pages == nb_pages)
341     {                                             310     {
342       list_delete(kmem_free_range_list, free_r    311       list_delete(kmem_free_range_list, free_range);
343       kmem_used_range_list = insert_range(kmem    312       kmem_used_range_list = insert_range(kmem_used_range_list,
344                                           free    313                                           free_range);
345       /* The new_range is exactly the free_ran    314       /* The new_range is exactly the free_range */
346       new_range = free_range;                     315       new_range = free_range;
347     }                                             316     }
348                                                   317 
349   /* Otherwise the range is bigger than the re    318   /* Otherwise the range is bigger than the requested size, split it.
350      This involves reducing its size, and allo    319      This involves reducing its size, and allocate a new range, which
351      is going to be added to the "used" list *    320      is going to be added to the "used" list */
352   else                                            321   else
353     {                                             322     {
354       /* free_range split in { new_range | fre    323       /* free_range split in { new_range | free_range } */
355       new_range = (struct sos_kmem_range*)        324       new_range = (struct sos_kmem_range*)
356         sos_kmem_cache_alloc(kmem_range_cache,    325         sos_kmem_cache_alloc(kmem_range_cache,
357                              (flags & SOS_KMEM    326                              (flags & SOS_KMEM_VMM_ATOMIC)?
358                              SOS_KSLAB_ALLOC_A    327                              SOS_KSLAB_ALLOC_ATOMIC:0);
359       if (! new_range)                            328       if (! new_range)
360         return NULL;                              329         return NULL;
361                                                   330 
362       new_range->base_vaddr   = free_range->ba    331       new_range->base_vaddr   = free_range->base_vaddr;
363       new_range->nb_pages     = nb_pages;         332       new_range->nb_pages     = nb_pages;
364       free_range->base_vaddr += nb_pages*SOS_P    333       free_range->base_vaddr += nb_pages*SOS_PAGE_SIZE;
365       free_range->nb_pages   -= nb_pages;         334       free_range->nb_pages   -= nb_pages;
366                                                   335 
367       /* free_range is still at the same place    336       /* free_range is still at the same place in the list */
368       /* insert new_range in the used list */     337       /* insert new_range in the used list */
369       kmem_used_range_list = insert_range(kmem    338       kmem_used_range_list = insert_range(kmem_used_range_list,
370                                           new_    339                                           new_range);
371     }                                             340     }
372                                                   341 
373   /* By default, the range is not associated w    342   /* By default, the range is not associated with any slab */
374   new_range->slab = NULL;                         343   new_range->slab = NULL;
375                                                   344 
376   /* If mapping of physical pages is needed, m    345   /* If mapping of physical pages is needed, map them now */
377   if (flags & SOS_KMEM_VMM_MAP)                   346   if (flags & SOS_KMEM_VMM_MAP)
378     {                                             347     {
379       int i;                                      348       int i;
380       for (i = 0 ; i < nb_pages ; i ++)           349       for (i = 0 ; i < nb_pages ; i ++)
381         {                                         350         {
382           /* Get a new physical page */           351           /* Get a new physical page */
383           sos_paddr_t ppage_paddr                 352           sos_paddr_t ppage_paddr
384             = sos_physmem_ref_physpage_new(! (    353             = sos_physmem_ref_physpage_new(! (flags & SOS_KMEM_VMM_ATOMIC));
385                                                   354           
386           /* Map the page in kernel space */      355           /* Map the page in kernel space */
387           if (ppage_paddr)                        356           if (ppage_paddr)
388             {                                     357             {
389               if (sos_paging_map(ppage_paddr,     358               if (sos_paging_map(ppage_paddr,
390                                  new_range->ba    359                                  new_range->base_vaddr
391                                    + i * SOS_P    360                                    + i * SOS_PAGE_SIZE,
392                                  FALSE /* Not     361                                  FALSE /* Not a user page */,
393                                  ((flags & SOS    362                                  ((flags & SOS_KMEM_VMM_ATOMIC)?
394                                   SOS_VM_MAP_A    363                                   SOS_VM_MAP_ATOMIC:0)
395                                  | SOS_VM_MAP_    364                                  | SOS_VM_MAP_PROT_READ
396                                  | SOS_VM_MAP_    365                                  | SOS_VM_MAP_PROT_WRITE))
397                 {                                 366                 {
398                   /* Failed => force unallocat    367                   /* Failed => force unallocation, see below */
399                   sos_physmem_unref_physpage(p    368                   sos_physmem_unref_physpage(ppage_paddr);
400                   ppage_paddr = (sos_paddr_t)N    369                   ppage_paddr = (sos_paddr_t)NULL;
401                 }                                 370                 }
402               else                                371               else
403                 {                                 372                 {
404                   /* Success : page can be unr    373                   /* Success : page can be unreferenced since it is
405                      now mapped */                374                      now mapped */
406                   sos_physmem_unref_physpage(p    375                   sos_physmem_unref_physpage(ppage_paddr);
407                 }                                 376                 }
408             }                                     377             }
409                                                   378 
410           /* Undo the allocation if failed to     379           /* Undo the allocation if failed to allocate or map a new page */
411           if (! ppage_paddr)                      380           if (! ppage_paddr)
412             {                                     381             {
413               sos_kmem_vmm_del_range(new_range    382               sos_kmem_vmm_del_range(new_range);
414               return NULL;                        383               return NULL;
415             }                                     384             }
416                                                   385 
417           /* Ok, set the range owner for this     386           /* Ok, set the range owner for this page */
418           sos_physmem_set_kmem_range(ppage_pad    387           sos_physmem_set_kmem_range(ppage_paddr, new_range);
419         }                                         388         }
420     }                                             389     }
421   /* ... Otherwise: Demand Paging will do the  !! 390 
                                                   >> 391   /* Otherwise we need a correct page fault handler to support
                                                   >> 392      deferred mapping (aka demand paging) of ranges */
                                                   >> 393   else
                                                   >> 394     SOS_ASSERT_FATAL(! "No demand paging yet");
422                                                   395 
423   if (range_start)                                396   if (range_start)
424     *range_start = new_range->base_vaddr;         397     *range_start = new_range->base_vaddr;
425                                                   398 
426   return new_range;                               399   return new_range;
427 }                                                 400 }
428                                                   401 
429                                                   402 
430 sos_ret_t sos_kmem_vmm_del_range(struct sos_km !! 403 sos_vaddr_t sos_kmem_vmm_del_range(struct sos_kmem_range *range)
431 {                                                 404 {
432   int i;                                          405   int i;
433   struct sos_kmem_range *ranges_to_free;          406   struct sos_kmem_range *ranges_to_free;
434   list_init(ranges_to_free);                      407   list_init(ranges_to_free);
435                                                   408 
436   SOS_ASSERT_FATAL(range != NULL);                409   SOS_ASSERT_FATAL(range != NULL);
437   SOS_ASSERT_FATAL(range->slab == NULL);          410   SOS_ASSERT_FATAL(range->slab == NULL);
438                                                   411 
439   /* Remove the range from the 'USED' list now    412   /* Remove the range from the 'USED' list now */
440   list_delete(kmem_used_range_list, range);       413   list_delete(kmem_used_range_list, range);
441                                                   414 
442   /*                                              415   /*
443    * The following do..while() loop is here to    416    * The following do..while() loop is here to avoid an indirect
444    * recursion: if we call directly kmem_cache    417    * recursion: if we call directly kmem_cache_free() from inside the
445    * current function, we take the risk to re-    418    * current function, we take the risk to re-enter the current function
446    * (sos_kmem_vmm_del_range()) again, which m    419    * (sos_kmem_vmm_del_range()) again, which may cause problem if it
447    * in turn calls kmem_slab again and sos_kme    420    * in turn calls kmem_slab again and sos_kmem_vmm_del_range again,
448    * and again and again. This may happen whil    421    * and again and again. This may happen while freeing ranges of
449    * struct sos_kslab...                          422    * struct sos_kslab...
450    *                                              423    *
451    * To avoid this,we choose to call a special    424    * To avoid this,we choose to call a special function of kmem_slab
452    * doing almost the same as sos_kmem_cache_f    425    * doing almost the same as sos_kmem_cache_free(), but which does
453    * NOT call us (ie sos_kmem_vmm_del_range())    426    * NOT call us (ie sos_kmem_vmm_del_range()): instead WE add the
454    * range that is to be freed to a list, and     427    * range that is to be freed to a list, and the do..while() loop is
455    * here to process this list ! The recursion    428    * here to process this list ! The recursion is replaced by
456    * classical iterations.                        429    * classical iterations.
457    */                                             430    */
458   do                                              431   do
459     {                                             432     {
460       /* Ok, we got the range. Now, insert thi    433       /* Ok, we got the range. Now, insert this range in the free list */
461       kmem_free_range_list = insert_range(kmem    434       kmem_free_range_list = insert_range(kmem_free_range_list, range);
462                                                   435 
463       /* Unmap the physical pages */              436       /* Unmap the physical pages */
464       for (i = 0 ; i < range->nb_pages ; i ++)    437       for (i = 0 ; i < range->nb_pages ; i ++)
465         {                                         438         {
466           /* This will work even if no page is    439           /* This will work even if no page is mapped at this address */
467           sos_paging_unmap(range->base_vaddr +    440           sos_paging_unmap(range->base_vaddr + i*SOS_PAGE_SIZE);
468         }                                         441         }
469                                                   442       
470       /* Eventually coalesce it with prev/next    443       /* Eventually coalesce it with prev/next free ranges (there is
471          always a valid prev/next link since t    444          always a valid prev/next link since the list is circular). Note:
472          the tests below will lead to correct     445          the tests below will lead to correct behaviour even if the list
473          is limited to the 'range' singleton,     446          is limited to the 'range' singleton, at least as long as the
474          range is not zero-sized */               447          range is not zero-sized */
475       /* Merge with preceding one ? */            448       /* Merge with preceding one ? */
476       if (range->prev->base_vaddr + range->pre    449       if (range->prev->base_vaddr + range->prev->nb_pages*SOS_PAGE_SIZE
477           == range->base_vaddr)                   450           == range->base_vaddr)
478         {                                         451         {
479           struct sos_kmem_range *empty_range_o    452           struct sos_kmem_range *empty_range_of_ranges = NULL;
480           struct sos_kmem_range *prec_free = r    453           struct sos_kmem_range *prec_free = range->prev;
481                                                   454           
482           /* Merge them */                        455           /* Merge them */
483           prec_free->nb_pages += range->nb_pag    456           prec_free->nb_pages += range->nb_pages;
484           list_delete(kmem_free_range_list, ra    457           list_delete(kmem_free_range_list, range);
485                                                   458           
486           /* Mark the range as free. This may     459           /* Mark the range as free. This may cause the slab owning
487              the range to become empty */         460              the range to become empty */
488           empty_range_of_ranges =                 461           empty_range_of_ranges = 
489             sos_kmem_cache_release_struct_rang    462             sos_kmem_cache_release_struct_range(range);
490                                                   463 
491           /* If this causes the slab owning th    464           /* If this causes the slab owning the range to become empty,
492              add the range corresponding to th    465              add the range corresponding to the slab at the end of the
493              list of the ranges to be freed: i    466              list of the ranges to be freed: it will be actually freed
494              in one of the next iterations of     467              in one of the next iterations of the do{} loop. */
495           if (empty_range_of_ranges != NULL)      468           if (empty_range_of_ranges != NULL)
496             {                                     469             {
497               list_delete(kmem_used_range_list    470               list_delete(kmem_used_range_list, empty_range_of_ranges);
498               list_add_tail(ranges_to_free, em    471               list_add_tail(ranges_to_free, empty_range_of_ranges);
499             }                                     472             }
500                                                   473           
501           /* Set range to the beginning of thi    474           /* Set range to the beginning of this coelescion */
502           range = prec_free;                      475           range = prec_free;
503         }                                         476         }
504                                                   477       
505       /* Merge with next one ? [NO 'else' sinc    478       /* Merge with next one ? [NO 'else' since range may be the result of
506          the merge above] */                      479          the merge above] */
507       if (range->base_vaddr + range->nb_pages*    480       if (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE
508           == range->next->base_vaddr)             481           == range->next->base_vaddr)
509         {                                         482         {
510           struct sos_kmem_range *empty_range_o    483           struct sos_kmem_range *empty_range_of_ranges = NULL;
511           struct sos_kmem_range *next_range =     484           struct sos_kmem_range *next_range = range->next;
512                                                   485           
513           /* Merge them */                        486           /* Merge them */
514           range->nb_pages += next_range->nb_pa    487           range->nb_pages += next_range->nb_pages;
515           list_delete(kmem_free_range_list, ne    488           list_delete(kmem_free_range_list, next_range);
516                                                   489           
517           /* Mark the next_range as free. This    490           /* Mark the next_range as free. This may cause the slab
518              owning the next_range to become e    491              owning the next_range to become empty */
519           empty_range_of_ranges =                 492           empty_range_of_ranges = 
520             sos_kmem_cache_release_struct_rang    493             sos_kmem_cache_release_struct_range(next_range);
521                                                   494 
522           /* If this causes the slab owning th    495           /* If this causes the slab owning the next_range to become
523              empty, add the range correspondin    496              empty, add the range corresponding to the slab at the end
524              of the list of the ranges to be f    497              of the list of the ranges to be freed: it will be
525              actually freed in one of the next    498              actually freed in one of the next iterations of the
526              do{} loop. */                        499              do{} loop. */
527           if (empty_range_of_ranges != NULL)      500           if (empty_range_of_ranges != NULL)
528             {                                     501             {
529               list_delete(kmem_used_range_list    502               list_delete(kmem_used_range_list, empty_range_of_ranges);
530               list_add_tail(ranges_to_free, em    503               list_add_tail(ranges_to_free, empty_range_of_ranges);
531             }                                     504             }
532         }                                         505         }
533                                                   506       
534                                                   507 
535       /* If deleting the range(s) caused one o    508       /* If deleting the range(s) caused one or more range(s) to be
536          freed, get the next one to free */       509          freed, get the next one to free */
537       if (list_is_empty(ranges_to_free))          510       if (list_is_empty(ranges_to_free))
538         range = NULL; /* No range left to free    511         range = NULL; /* No range left to free */
539       else                                        512       else
540         range = list_pop_head(ranges_to_free);    513         range = list_pop_head(ranges_to_free);
541                                                   514 
542     }                                             515     }
543   /* Stop when there is no range left to be fr    516   /* Stop when there is no range left to be freed for now */
544   while (range != NULL);                          517   while (range != NULL);
545                                                   518 
546   return SOS_OK;                                  519   return SOS_OK;
547 }                                                 520 }
548                                                   521 
549                                                   522 
550 sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_    523 sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_pages,
551                                sos_ui32_t  fla    524                                sos_ui32_t  flags)
552 {                                                 525 {
553   struct sos_kmem_range *range                    526   struct sos_kmem_range *range
554     = sos_kmem_vmm_new_range(nb_pages,            527     = sos_kmem_vmm_new_range(nb_pages,
555                              flags,               528                              flags,
556                              NULL);               529                              NULL);
557   if (! range)                                    530   if (! range)
558     return (sos_vaddr_t)NULL;                     531     return (sos_vaddr_t)NULL;
559                                                   532   
560   return range->base_vaddr;                       533   return range->base_vaddr;
561 }                                                 534 }
562                                                   535 
563                                                   536 
564 sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr) !! 537 sos_vaddr_t sos_kmem_vmm_free(sos_vaddr_t vaddr)
565 {                                                 538 {
566   struct sos_kmem_range *range = lookup_range(    539   struct sos_kmem_range *range = lookup_range(vaddr);
567                                                   540 
568   /* We expect that the given address is the b    541   /* We expect that the given address is the base address of the
569      range */                                     542      range */
570   if (!range || (range->base_vaddr != vaddr))     543   if (!range || (range->base_vaddr != vaddr))
571     return -SOS_EINVAL;                           544     return -SOS_EINVAL;
572                                                   545 
573   /* We expect that this range is not held by     546   /* We expect that this range is not held by any cache */
574   if (range->slab != NULL)                        547   if (range->slab != NULL)
575     return -SOS_EBUSY;                            548     return -SOS_EBUSY;
576                                                   549 
577   return sos_kmem_vmm_del_range(range);           550   return sos_kmem_vmm_del_range(range);
578 }                                                 551 }
579                                                   552 
580                                                   553 
581 sos_ret_t sos_kmem_vmm_set_slab(struct sos_kme    554 sos_ret_t sos_kmem_vmm_set_slab(struct sos_kmem_range *range,
582                                 struct sos_ksl    555                                 struct sos_kslab *slab)
583 {                                                 556 {
584   if (! range)                                    557   if (! range)
585     return -SOS_EINVAL;                           558     return -SOS_EINVAL;
586                                                   559 
587   range->slab = slab;                             560   range->slab = slab;
588   return SOS_OK;                                  561   return SOS_OK;
589 }                                                 562 }
590                                                   563 
591 struct sos_kslab * sos_kmem_vmm_resolve_slab(s    564 struct sos_kslab * sos_kmem_vmm_resolve_slab(sos_vaddr_t vaddr)
592 {                                                 565 {
593   struct sos_kmem_range *range = lookup_range(    566   struct sos_kmem_range *range = lookup_range(vaddr);
594   if (! range)                                    567   if (! range)
595     return NULL;                                  568     return NULL;
596                                                   569 
597   return range->slab;                             570   return range->slab;
598 }                                                 571 }
599                                                   572 
600                                                << 
601 sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vad << 
602 {                                              << 
603   struct sos_kmem_range *range = lookup_range( << 
604   return (range != NULL);                      << 
605 }                                              << 
                                                      

source navigation ] diff markup ] identifier search ] general search ]