SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2005  David Decotigny
002 
003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any later version.
007    
008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details.
012    
013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA. 
017 */
018 
019 #include <hwcore/paging.h>
020 #include <hwcore/irq.h>
021 
022 #include <sos/assert.h>
023 #include <sos/list.h>
024 #include <sos/klibc.h>
025 #include <sos/physmem.h>
026 #include <sos/kmem_slab.h>
027 #include <sos/kmem_vmm.h>
028 
029 #include "mm_context.h"
030 
031 
032 /**
033  * Definition of an MMU context.
034  */
035 struct sos_mm_context
036 {
037   /** Physical address of the PD for this MMU context */
038   sos_paddr_t paddr_PD;
039 
040   /** Virtual address where it is mapped into the Kernel space */
041   sos_vaddr_t vaddr_PD;
042 
043   /** Reference counter for this mm_context */
044   sos_ui32_t  ref_cnt;
045 
046   /** List of MMU contexts in the system */
047   struct sos_mm_context *prev, *next;
048 };
049 
050 
051 /**
052  * The cache of mm_context structures
053  */
054 struct sos_kslab_cache * cache_struct_mm_context;
055 
056 
057 /**
058  * The current MMU context corresponding to the current configuration
059  * of the MMU.
060  */
061 static struct sos_mm_context *current_mm_context = NULL;
062 
063 
064 /**
065  * System-wide list of all the mm_contexts in the system
066  */
067 static struct sos_mm_context *list_mm_context = NULL;
068 /* The "= NULL" here is FUNDAMENTAL, because paging.c must work
069    correctly, ie synch_PDE below must behave reasonably (eg do
070    nothing), until the mm_context subsystem has been initialized. */
071 
072 
073 sos_ret_t sos_mm_context_subsystem_setup()
074 {
075   struct sos_mm_context * initial_mm_context;
076   sos_ret_t retval;
077 
078   /* Create the new mm_context cache */
079   cache_struct_mm_context = sos_kmem_cache_create("struct mm_context",
080                                                   sizeof(struct sos_mm_context),
081                                                   1, 0,
082                                                   SOS_KSLAB_CREATE_MAP);
083   if (NULL == cache_struct_mm_context)
084     return -SOS_ENOMEM;
085 
086   /*
087    * Allocate the initial mm_context structure
088    */
089   initial_mm_context
090     = (struct sos_mm_context*) sos_kmem_cache_alloc(cache_struct_mm_context,
091                                                     SOS_KSLAB_ALLOC_ATOMIC);
092   if (NULL == initial_mm_context)
093     return -SOS_ENOMEM;
094 
095   /* Retrieve the address of the current page where the PD lies */
096   initial_mm_context->paddr_PD = sos_paging_get_current_PD_paddr();
097 
098   /*
099    * Map it somewhere in kernel virtual memory
100    */
101 
102   /* Allocate 1 page of kernel Virtual memory */
103   initial_mm_context->vaddr_PD = sos_kmem_vmm_alloc(1, 0);
104   if (initial_mm_context->vaddr_PD == 0)
105     return -SOS_ENOMEM;
106 
107   /* Map the PD at this virtual address: it will thus be mapped 2
108      times (1 time for the mirroring, 1 time for mm_context) ! */
109   retval = sos_paging_map(initial_mm_context->paddr_PD,
110                           initial_mm_context->vaddr_PD,
111                           FALSE,
112                           SOS_VM_MAP_PROT_READ
113                           | SOS_VM_MAP_PROT_WRITE);
114   if (SOS_OK != retval)
115     return retval;
116 
117   /* Initialize the initial list of mm_contexts */
118   list_singleton(list_mm_context, initial_mm_context);
119 
120   /* We just created this mm_context: mark it as "referenced" */
121   initial_mm_context->ref_cnt ++;
122 
123   /* We are actually already using it ! */
124   initial_mm_context->ref_cnt ++; /* ie reference it a 2nd time ! */
125   current_mm_context = initial_mm_context;
126 
127   return SOS_OK;
128 }
129 
130 
131 struct sos_mm_context * sos_mm_context_create(void)
132 {
133   sos_ui32_t flags;
134   struct sos_mm_context *mmctxt;
135 
136   /*
137    * Allocate the initial mm_context structure
138    */
139   mmctxt = (struct sos_mm_context*) sos_kmem_cache_alloc(cache_struct_mm_context, 0);
140   if (NULL == mmctxt)
141     return NULL;
142 
143   /* Allocate a new page for the new PD and map it into the kernel */
144   mmctxt->vaddr_PD = sos_kmem_vmm_alloc(1, SOS_KMEM_VMM_MAP);
145   if (mmctxt->vaddr_PD == 0)
146     {
147       sos_kmem_cache_free((sos_vaddr_t) mmctxt);
148       return NULL;
149     }
150 
151   /* Retrieve its physical address */
152   mmctxt->paddr_PD = sos_paging_get_paddr(mmctxt->vaddr_PD);
153   if (mmctxt->paddr_PD == 0)
154     {
155       sos_kmem_cache_free((sos_vaddr_t) mmctxt->vaddr_PD);
156       sos_kmem_cache_free((sos_vaddr_t) mmctxt);
157       return NULL;
158     }
159 
160   /* Copy the current hardware MMU address translation tables */
161   if (SOS_OK != sos_paging_copy_kernel_space(mmctxt->vaddr_PD,
162                                              current_mm_context->vaddr_PD))
163     {
164       sos_kmem_cache_free((sos_vaddr_t) mmctxt->vaddr_PD);
165       sos_kmem_cache_free((sos_vaddr_t) mmctxt);
166       return NULL;
167     }
168 
169   /* Mark the mm_context as "referenced" */
170   mmctxt->ref_cnt = 1;
171 
172   /* Add it to the list of MMU contexts */
173   sos_disable_IRQs(flags);
174   list_add_tail(list_mm_context, mmctxt);
175   sos_restore_IRQs(flags);
176 
177   return mmctxt;
178 }
179 
180 
181 struct sos_mm_context *
182 sos_mm_context_duplicate(const struct sos_mm_context *model)
183 {
184   struct sos_mm_context *mmctxt;
185 
186   /* Create an mm_context, the kernel space will be copied in it */
187   mmctxt = sos_mm_context_create();
188   if (NULL == mmctxt)
189     return NULL;
190 
191   /* Copy the user-space configuration of the MMU */
192   if (SOS_OK != sos_paging_copy_user_space(mmctxt->vaddr_PD,
193                                            model->vaddr_PD))
194     {
195       sos_mm_context_unref(mmctxt);
196       return NULL;
197     }
198 
199   return mmctxt;
200 }
201 
202 
203 sos_ret_t sos_mm_context_unref(struct sos_mm_context *mmctxt)
204 {
205   sos_ui32_t flags;
206 
207   sos_disable_IRQs(flags);
208 
209   /* A valid mmctxt is one which is not yet unreferenced */
210   SOS_ASSERT_FATAL(mmctxt->ref_cnt > 0);
211 
212   /* Unreference it */
213   mmctxt->ref_cnt --;
214 
215   /* If somebody is still using it, don't release it now */
216   if (mmctxt->ref_cnt > 0)
217     {
218       sos_restore_IRQs(flags);
219       return SOS_OK;
220     }
221 
222   /* If nobody uses it, then it cannot be the current mm_context ! */
223   SOS_ASSERT_FATAL(mmctxt != current_mm_context);
224 
225   /* Remove it from the list of mm_contexts */
226   list_delete(list_mm_context, mmctxt);
227 
228   sos_restore_IRQs(flags);
229 
230   /* Remove all user mappings (if any) */
231   sos_paging_dispose(mmctxt->vaddr_PD);
232 
233   /* Unmap the PD from the kernel */
234   sos_kmem_vmm_free(mmctxt->vaddr_PD);
235 
236   memset(mmctxt, 0x0, sizeof(*mmctxt));
237 
238   return SOS_OK;
239 }
240 
241 
242 sos_ret_t sos_mm_context_ref(struct sos_mm_context *mmctxt)
243 {
244   sos_ui32_t flags;
245 
246   sos_disable_IRQs(flags);
247 
248   /* A valid mmctxt is one which is not yet unreferenced */
249   SOS_ASSERT_FATAL(mmctxt->ref_cnt > 0);
250 
251   /* Reference it once again */
252   mmctxt->ref_cnt ++;
253 
254   sos_restore_IRQs(flags);
255 
256   return SOS_OK;
257 }
258 
259 
260 sos_ret_t sos_mm_context_switch_to(struct sos_mm_context *mmctxt)
261 {
262   SOS_ASSERT_FATAL(NULL != mmctxt);
263   SOS_ASSERT_FATAL(mmctxt->ref_cnt > 0);
264   SOS_ASSERT_FATAL(current_mm_context->ref_cnt > 0);
265   if (mmctxt != current_mm_context)
266     {
267       sos_ui32_t flags;
268       struct sos_mm_context * prev_mm_context = current_mm_context;
269 
270       /* This is the most dangerous part of the whole thing. If we set
271          the wrong MMU configuration in mmctxt, this will hang or
272          reboot the machine... */
273       sos_paging_set_current_PD_paddr(mmctxt->paddr_PD);
274 
275       /* Exchange the mm_contexts */
276       current_mm_context = mmctxt;
277 
278       /* Update the reference counts */
279       sos_disable_IRQs(flags);
280       mmctxt->ref_cnt ++;
281       sos_mm_context_unref(prev_mm_context);
282       sos_restore_IRQs(flags);
283     }
284 
285   return SOS_OK;
286 }
287 
288 
289 struct sos_mm_context *get_current_mm_context()
290 {
291   SOS_ASSERT_FATAL(current_mm_context->ref_cnt > 0);
292   return current_mm_context;
293 }
294 
295 
296 /* ******************************************************
297  * Reserved functions
298  */
299 
300 
301 sos_ret_t sos_mm_context_synch_kernel_PDE(unsigned int index_in_pd,
302                                           sos_ui32_t pde)
303 {
304   sos_ui32_t flags;
305   struct sos_mm_context * dest_mm_context;
306   int nb_mm_contexts;
307 
308   sos_disable_IRQs(flags);
309   list_foreach_forward(list_mm_context, dest_mm_context, nb_mm_contexts)
310     {
311       sos_ui32_t * dest_pd;
312 
313       SOS_ASSERT_FATAL(dest_mm_context->ref_cnt > 0);
314 
315       dest_pd = (sos_ui32_t*) dest_mm_context->vaddr_PD;
316       dest_pd[index_in_pd] = pde;
317     }
318   sos_restore_IRQs(flags);
319 
320   return SOS_OK;
321 }

source navigation ] diff markup ] identifier search ] general search ]