001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019 #include <hwcore/paging.h>
020 #include <hwcore/irq.h>
021
022 #include <sos/assert.h>
023 #include <sos/list.h>
024 #include <sos/klibc.h>
025 #include <sos/physmem.h>
026 #include <sos/kmem_slab.h>
027 #include <sos/kmem_vmm.h>
028
029 #include "mm_context.h"
030
031
032
033
034
035 struct sos_mm_context
036 {
037
038 sos_paddr_t paddr_PD;
039
040
041 sos_vaddr_t vaddr_PD;
042
043
044 sos_ui32_t ref_cnt;
045
046
047 struct sos_mm_context *prev, *next;
048 };
049
050
051
052
053
054 struct sos_kslab_cache * cache_struct_mm_context;
055
056
057
058
059
060
061 static struct sos_mm_context *current_mm_context = NULL;
062
063
064
065
066
067 static struct sos_mm_context *list_mm_context = NULL;
068
069
070
071
072
073 sos_ret_t sos_mm_context_subsystem_setup()
074 {
075 struct sos_mm_context * initial_mm_context;
076 sos_ret_t retval;
077
078
079 cache_struct_mm_context = sos_kmem_cache_create("struct mm_context",
080 sizeof(struct sos_mm_context),
081 1, 0,
082 SOS_KSLAB_CREATE_MAP);
083 if (NULL == cache_struct_mm_context)
084 return -SOS_ENOMEM;
085
086
087
088
089 initial_mm_context
090 = (struct sos_mm_context*) sos_kmem_cache_alloc(cache_struct_mm_context,
091 SOS_KSLAB_ALLOC_ATOMIC);
092 if (NULL == initial_mm_context)
093 return -SOS_ENOMEM;
094
095
096 initial_mm_context->paddr_PD = sos_paging_get_current_PD_paddr();
097
098
099
100
101
102
103 initial_mm_context->vaddr_PD = sos_kmem_vmm_alloc(1, 0);
104 if (initial_mm_context->vaddr_PD == 0)
105 return -SOS_ENOMEM;
106
107
108
109 retval = sos_paging_map(initial_mm_context->paddr_PD,
110 initial_mm_context->vaddr_PD,
111 FALSE,
112 SOS_VM_MAP_PROT_READ
113 | SOS_VM_MAP_PROT_WRITE);
114 if (SOS_OK != retval)
115 return retval;
116
117
118 list_singleton(list_mm_context, initial_mm_context);
119
120
121 initial_mm_context->ref_cnt ++;
122
123
124 initial_mm_context->ref_cnt ++;
125 current_mm_context = initial_mm_context;
126
127 return SOS_OK;
128 }
129
130
131 struct sos_mm_context * sos_mm_context_create(void)
132 {
133 sos_ui32_t flags;
134 struct sos_mm_context *mmctxt;
135
136
137
138
139 mmctxt = (struct sos_mm_context*) sos_kmem_cache_alloc(cache_struct_mm_context, 0);
140 if (NULL == mmctxt)
141 return NULL;
142
143
144 mmctxt->vaddr_PD = sos_kmem_vmm_alloc(1, SOS_KMEM_VMM_MAP);
145 if (mmctxt->vaddr_PD == 0)
146 {
147 sos_kmem_cache_free((sos_vaddr_t) mmctxt);
148 return NULL;
149 }
150
151
152 mmctxt->paddr_PD = sos_paging_get_paddr(mmctxt->vaddr_PD);
153 if (mmctxt->paddr_PD == 0)
154 {
155 sos_kmem_cache_free((sos_vaddr_t) mmctxt->vaddr_PD);
156 sos_kmem_cache_free((sos_vaddr_t) mmctxt);
157 return NULL;
158 }
159
160
161 if (SOS_OK != sos_paging_copy_kernel_space(mmctxt->vaddr_PD,
162 current_mm_context->vaddr_PD))
163 {
164 sos_kmem_cache_free((sos_vaddr_t) mmctxt->vaddr_PD);
165 sos_kmem_cache_free((sos_vaddr_t) mmctxt);
166 return NULL;
167 }
168
169
170 mmctxt->ref_cnt = 1;
171
172
173 sos_disable_IRQs(flags);
174 list_add_tail(list_mm_context, mmctxt);
175 sos_restore_IRQs(flags);
176
177 return mmctxt;
178 }
179
180
181 struct sos_mm_context *
182 sos_mm_context_duplicate(const struct sos_mm_context *model)
183 {
184 struct sos_mm_context *mmctxt;
185
186
187 mmctxt = sos_mm_context_create();
188 if (NULL == mmctxt)
189 return NULL;
190
191
192 if (SOS_OK != sos_paging_copy_user_space(mmctxt->vaddr_PD,
193 model->vaddr_PD))
194 {
195 sos_mm_context_unref(mmctxt);
196 return NULL;
197 }
198
199 return mmctxt;
200 }
201
202
203 sos_ret_t sos_mm_context_unref(struct sos_mm_context *mmctxt)
204 {
205 sos_ui32_t flags;
206
207 sos_disable_IRQs(flags);
208
209
210 SOS_ASSERT_FATAL(mmctxt->ref_cnt > 0);
211
212
213 mmctxt->ref_cnt --;
214
215
216 if (mmctxt->ref_cnt > 0)
217 {
218 sos_restore_IRQs(flags);
219 return SOS_OK;
220 }
221
222
223 SOS_ASSERT_FATAL(mmctxt != current_mm_context);
224
225
226 list_delete(list_mm_context, mmctxt);
227
228 sos_restore_IRQs(flags);
229
230
231 sos_paging_dispose(mmctxt->vaddr_PD);
232
233
234 sos_kmem_vmm_free(mmctxt->vaddr_PD);
235
236 memset(mmctxt, 0x0, sizeof(*mmctxt));
237
238 return SOS_OK;
239 }
240
241
242 sos_ret_t sos_mm_context_ref(struct sos_mm_context *mmctxt)
243 {
244 sos_ui32_t flags;
245
246 sos_disable_IRQs(flags);
247
248
249 SOS_ASSERT_FATAL(mmctxt->ref_cnt > 0);
250
251
252 mmctxt->ref_cnt ++;
253
254 sos_restore_IRQs(flags);
255
256 return SOS_OK;
257 }
258
259
260 sos_ret_t sos_mm_context_switch_to(struct sos_mm_context *mmctxt)
261 {
262 SOS_ASSERT_FATAL(NULL != mmctxt);
263 SOS_ASSERT_FATAL(mmctxt->ref_cnt > 0);
264 SOS_ASSERT_FATAL(current_mm_context->ref_cnt > 0);
265 if (mmctxt != current_mm_context)
266 {
267 sos_ui32_t flags;
268 struct sos_mm_context * prev_mm_context = current_mm_context;
269
270
271
272
273 sos_paging_set_current_PD_paddr(mmctxt->paddr_PD);
274
275
276 current_mm_context = mmctxt;
277
278
279 sos_disable_IRQs(flags);
280 mmctxt->ref_cnt ++;
281 sos_mm_context_unref(prev_mm_context);
282 sos_restore_IRQs(flags);
283 }
284
285 return SOS_OK;
286 }
287
288
289 struct sos_mm_context *get_current_mm_context()
290 {
291 SOS_ASSERT_FATAL(current_mm_context->ref_cnt > 0);
292 return current_mm_context;
293 }
294
295
296
297
298
299
300
301 sos_ret_t sos_mm_context_synch_kernel_PDE(unsigned int index_in_pd,
302 sos_ui32_t pde)
303 {
304 sos_ui32_t flags;
305 struct sos_mm_context * dest_mm_context;
306 int nb_mm_contexts;
307
308 sos_disable_IRQs(flags);
309 list_foreach_forward(list_mm_context, dest_mm_context, nb_mm_contexts)
310 {
311 sos_ui32_t * dest_pd;
312
313 SOS_ASSERT_FATAL(dest_mm_context->ref_cnt > 0);
314
315 dest_pd = (sos_ui32_t*) dest_mm_context->vaddr_PD;
316 dest_pd[index_in_pd] = pde;
317 }
318 sos_restore_IRQs(flags);
319
320 return SOS_OK;
321 }