SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2005 David Decotigny
002 
003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any later version.
007    
008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details.
012    
013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA. 
017 */
018 
019 #include <sos/assert.h>
020 #include <sos/list.h>
021 #include <sos/physmem.h>
022 #include <sos/kmem_slab.h>
023 #include <drivers/bochs.h>
024 #include <hwcore/mm_context.h>
025 #include <hwcore/paging.h>
026 #include <drivers/zero.h>
027 
028 #include "umem_vmm.h"
029 
030 
031 struct sos_umem_vmm_as
032 {
033   /** The process that owns this address space */
034   struct sos_process     * process;
035 
036   /** The MMU configuration of this address space */
037   struct sos_mm_context  * mm_context;
038 
039   /** The list of VRs in this address space */
040   struct sos_umem_vmm_vr * list_vr;
041 
042   /** Heap location */
043   sos_uaddr_t heap_start;
044   sos_size_t  heap_size; /**< Updated by sos_umem_vmm_brk() */
045 
046   /* Memory usage statistics */
047   sos_size_t phys_total; /* shared + private */
048   struct vm_usage
049   {
050     sos_size_t overall;
051     sos_size_t ro, rw, code /* all: non readable, read and read/write */;
052   } vm_total, vm_shrd;
053 
054   /* Page fault counters */
055   sos_size_t pgflt_cow;
056   sos_size_t pgflt_page_in;
057   sos_size_t pgflt_invalid;
058 };
059 
060 
061 struct sos_umem_vmm_vr
062 {
063   /** The address space owning this VR */
064   struct sos_umem_vmm_as *address_space;
065 
066   /** The location of the mapping in user space */
067   sos_uaddr_t start;
068   sos_size_t  size;
069 
070   /** What accesses are allowed (read, write, exec): @see
071       SOS_VM_MAP_PROT_* flags in hwcore/paging.h */
072   sos_ui32_t  access_rights;
073 
074   /** Flags of the VR. Allowed flags:
075    *  - SOS_VR_MAP_SHARED
076    */
077   sos_ui32_t  flags;
078 
079   /**
080    * The callbacks for the VR called along map/unmapping of the
081    * resource
082    */
083   struct sos_umem_vmm_vr_ops *ops;
084 
085   /** Description of the resource being mapped, if any */
086   struct sos_umem_vmm_mapped_resource *mapped_resource;
087   sos_luoffset_t offset_in_resource;
088 
089   /** The VRs of an AS are linked together and are accessible by way
090       of as->list_vr */
091   struct sos_umem_vmm_vr *prev_in_as, *next_in_as;
092 
093   /** The VRs mapping a given resource are linked together and are
094       accessible by way of mapped_resource->list_vr */
095   struct sos_umem_vmm_vr *prev_in_mapped_resource, *next_in_mapped_resource;
096 };
097 
098 
099 /*
100  * We use special slab caches to allocate AS and VR data structures
101  */
102 static struct sos_kslab_cache * cache_of_as;
103 static struct sos_kslab_cache * cache_of_vr;
104 
105 
106 /** Temporary function to debug: list the VRs of the given As */
107 void sos_dump_as(const struct sos_umem_vmm_as * as, const char *str)
108 {
109   struct sos_umem_vmm_vr *vr;
110   int nb_vr;
111 
112   sos_bochs_printf("AS %p - %s:\n", as, str);
113   sos_bochs_printf("   physical mem: %x\n",
114                    as->phys_total);
115   sos_bochs_printf("   VM (all/ro+rw/exec) tot:%x/%x+%x/%x shrd:%x/%x+%x/%x\n",
116                    as->vm_total.overall,
117                    as->vm_total.ro, as->vm_total.rw, as->vm_total.code,
118                    as->vm_shrd.overall,
119                    as->vm_shrd.ro, as->vm_shrd.rw, as->vm_shrd.code);
120   sos_bochs_printf("   pgflt cow=%d pgin=%d inv=%d\n",
121                    as->pgflt_cow, as->pgflt_page_in, as->pgflt_invalid);
122   list_foreach_named(as->list_vr, vr, nb_vr, prev_in_as, next_in_as)
123     {
124       sos_bochs_printf("  VR[%d]=%x: [%x,%x[ (sz=%x) mr=(%x)+%llx %c%c%c fl=%x\n",
125                        nb_vr, (unsigned)vr,
126                        vr->start, vr->start + vr->size, vr->size,
127                        (unsigned)vr->mapped_resource,
128                        vr->offset_in_resource,
129                        (vr->access_rights & SOS_VM_MAP_PROT_READ)?'r':'-',
130                        (vr->access_rights & SOS_VM_MAP_PROT_WRITE)?'w':'-',
131                        (vr->access_rights & SOS_VM_MAP_PROT_EXEC)?'x':'-',
132                        (unsigned)vr->flags);
133     }
134   sos_bochs_printf("FIN (%s)\n", str);
135 }
136 
137 
138 /**
139  * Physical address of THE page (full of 0s) used for anonymous
140  * mappings
141  */
142 sos_paddr_t sos_zero_page = 0 /* Initial value prior to allocation */;
143 
144 
145 /*
146  * Helper functions defined at the bottom of the file
147  */
148 
149 /**
150  * Helper function to retrieve the first VR to have a vr->end >= uaddr
151  */
152 static struct sos_umem_vmm_vr *
153 find_enclosing_or_next_vr(struct sos_umem_vmm_as * as,
154                           sos_uaddr_t uaddr);
155 
156 
157 /**
158  * Helper function to retrieve the first VR that overlaps the given
159  * interval, if any
160  */
161 static struct sos_umem_vmm_vr *
162 find_first_intersecting_vr(struct sos_umem_vmm_as * as,
163                            sos_uaddr_t start_uaddr, sos_size_t size);
164 
165 
166 /**
167  * Helper function to find first address where there is enough
168  * space. Begin to look for such an interval at or after the given
169  * address
170  *
171  * @param hint_addr The address where to begin the scan, or NULL
172  */
173 static sos_uaddr_t
174 find_first_free_interval(struct sos_umem_vmm_as * as,
175                          sos_uaddr_t hint_uaddr, sos_size_t size);
176 
177 
178 /** Called each time a VR of the AS changes. Don't cope with any
179     underlying physcal mapping/unmapping, COW, etc... */
180 static void
181 as_account_change_of_vr_protection(struct sos_umem_vmm_as * as,
182                                    sos_bool_t is_shared,
183                                    sos_size_t size,
184                                    sos_ui32_t prev_access_rights,
185                                    sos_ui32_t new_access_rights);
186 
187 
188 sos_ret_t sos_umem_vmm_subsystem_setup()
189 {
190   sos_vaddr_t vaddr_zero_page;
191 
192   /* Allocate a new kernel physical page mapped into kernel space and
193      reset it with 0s */
194   vaddr_zero_page = sos_kmem_vmm_alloc(1, SOS_KMEM_VMM_MAP);
195   if (vaddr_zero_page == (sos_vaddr_t)NULL)
196     return -SOS_ENOMEM;
197   memset((void*)vaddr_zero_page, 0x0, SOS_PAGE_SIZE);
198   
199   /* Keep a reference to the underlying pphysical page... */
200   sos_zero_page = sos_paging_get_paddr(vaddr_zero_page);
201   SOS_ASSERT_FATAL(NULL != (void*)sos_zero_page);
202   sos_physmem_ref_physpage_at(sos_zero_page);
203 
204   /* ... but it is not needed in kernel space anymore, so we can
205      safely unmap it from kernel space */
206   sos_paging_unmap(vaddr_zero_page);
207 
208   /* Allocate the VR/AS caches */
209   cache_of_as
210     = sos_kmem_cache_create("Address space structures",
211                             sizeof(struct sos_umem_vmm_as),
212                             1, 0,
213                             SOS_KSLAB_CREATE_MAP
214                             | SOS_KSLAB_CREATE_ZERO);
215   if (! cache_of_as)
216     {
217       sos_physmem_unref_physpage(sos_zero_page);
218       return -SOS_ENOMEM;
219     }
220 
221   cache_of_vr
222     = sos_kmem_cache_create("Virtual Region structures",
223                             sizeof(struct sos_umem_vmm_vr),
224                             1, 0,
225                             SOS_KSLAB_CREATE_MAP
226                             | SOS_KSLAB_CREATE_ZERO);
227   if (! cache_of_vr)
228     {
229       sos_physmem_unref_physpage(sos_zero_page);
230       sos_kmem_cache_destroy(cache_of_as);
231       return -SOS_ENOMEM;
232     }
233 
234   return SOS_OK;
235 }
236 
237 
238 struct sos_umem_vmm_as *
239 sos_umem_vmm_create_empty_as(struct sos_process *owner)
240 {
241   struct sos_umem_vmm_as * as
242     = (struct sos_umem_vmm_as *) sos_kmem_cache_alloc(cache_of_as, 0);
243   if (! as)
244     return NULL;
245 
246   as->mm_context = sos_mm_context_create();
247   if (NULL == as->mm_context)
248     {
249       /* Error */
250       sos_kmem_cache_free((sos_vaddr_t)as);
251       return NULL;
252     }
253 
254   as->process = owner;
255   return as;
256 }
257 
258 
259 struct sos_umem_vmm_as *
260 sos_umem_vmm_duplicate_current_thread_as(struct sos_process *owner)
261 {
262   __label__ undo_creation;
263   struct sos_umem_vmm_as * my_as;
264   struct sos_umem_vmm_vr * model_vr;
265   int nb_vr;
266 
267   struct sos_umem_vmm_as * new_as
268     = (struct sos_umem_vmm_as *) sos_kmem_cache_alloc(cache_of_as, 0);
269   if (! new_as)
270     return NULL;
271 
272   my_as = sos_process_get_address_space(sos_thread_get_current()->process);
273   new_as->process = owner;
274   list_init_named(new_as->list_vr, prev_in_as, next_in_as);
275 
276   /*
277    * Switch to the current threads' mm_context, as duplicating it implies
278    * being able to configure some of its mappings as read-only (for
279    * COW)
280    */
281   SOS_ASSERT_FATAL(SOS_OK
282                    == sos_thread_prepare_user_space_access(my_as,
283                                                            (sos_vaddr_t)
284                                                              NULL));
285 
286   /* Copy the virtual regions */
287   list_foreach_named(my_as->list_vr, model_vr, nb_vr, prev_in_as, next_in_as)
288     {
289       struct sos_umem_vmm_vr * vr;
290 
291       /* Prepare COW on the read/write private mappings */
292       if ( !(model_vr->flags & SOS_VR_MAP_SHARED)
293            && (model_vr->access_rights & SOS_VM_MAP_PROT_WRITE) )
294         {
295           /* Mark the underlying physical pages (if any) as
296              read-only */
297           SOS_ASSERT_FATAL(SOS_OK
298                            == sos_paging_prepare_COW(model_vr->start,
299                                                      model_vr->size));
300         }
301 
302       /* Allocate a new virtual region and copy the 'model' into it */
303       vr = (struct sos_umem_vmm_vr *) sos_kmem_cache_alloc(cache_of_vr, 0);
304       if (! vr)
305         goto undo_creation;
306       memcpy(vr, model_vr, sizeof(*vr));
307       vr->address_space = new_as;
308 
309       /* Signal the "new" mapping to the underlying VR mapper */
310       if (vr->ops && vr->ops->ref)
311         vr->ops->ref(vr);
312 
313       /* Insert the new VR into the new AS */
314       list_add_tail_named(new_as->list_vr, vr, prev_in_as, next_in_as);
315 
316       /* Insert the new VR into the list of mappings of the resource */
317       list_add_tail_named(model_vr->mapped_resource->list_vr, vr,
318                           prev_in_mapped_resource,
319                           next_in_mapped_resource);
320     }
321 
322   /* Now copy the current MMU configuration */
323   new_as->mm_context = sos_mm_context_duplicate(my_as->mm_context);
324   if (NULL == new_as->mm_context)
325     goto undo_creation;
326 
327   /* Correct behavior */
328   new_as->heap_start = my_as->heap_start;
329   new_as->heap_size  = my_as->heap_size;
330   new_as->phys_total = my_as->phys_total;
331   memcpy(& new_as->vm_total, & my_as->vm_total, sizeof(struct vm_usage));
332   memcpy(& new_as->vm_shrd, & my_as->vm_shrd, sizeof(struct vm_usage));
333   SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
334   return new_as;
335 
336   /* Handle erroneous behavior */
337  undo_creation:
338   SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
339   sos_umem_vmm_delete_as(new_as);
340   return NULL;
341 }
342 
343 
344 sos_ret_t
345 sos_umem_vmm_delete_as(struct sos_umem_vmm_as * as)
346 {
347   while(! list_is_empty_named(as->list_vr, prev_in_as, next_in_as))
348     {
349       struct sos_umem_vmm_vr * vr;
350       vr = list_get_head_named(as->list_vr, prev_in_as, next_in_as);
351 
352       /* Remove the vr from the lists */
353       list_pop_head_named(as->list_vr, prev_in_as, next_in_as);
354       list_delete_named(vr->mapped_resource->list_vr, vr,
355                         prev_in_mapped_resource,
356                         next_in_mapped_resource);
357 
358       /* Signal to the underlying VR mapper that the mapping is
359          suppressed */
360       if (vr->ops)
361         {
362           if (vr->ops->unmap)
363             vr->ops->unmap(vr, vr->start, vr->size);
364           if (vr->ops->unref)
365             vr->ops->unref(vr);
366         }
367 
368       sos_kmem_cache_free((sos_vaddr_t)vr);
369     }
370   
371   /* Release MMU configuration */
372   if (as->mm_context)
373     sos_mm_context_unref(as->mm_context);
374 
375   /* Now unallocate main address space construct */
376   sos_kmem_cache_free((sos_vaddr_t)as);
377 
378   return SOS_OK;
379 }
380 
381 
382 struct sos_process *
383 sos_umem_vmm_get_process(struct sos_umem_vmm_as * as)
384 {
385   return as->process;
386 }
387 
388 
389 struct sos_mm_context *
390 sos_umem_vmm_get_mm_context(struct sos_umem_vmm_as * as)
391 {
392   return as->mm_context;
393 }
394 
395 
396 struct sos_umem_vmm_vr *
397 sos_umem_vmm_get_vr_at_address(struct sos_umem_vmm_as * as,
398                                sos_uaddr_t uaddr)
399 {
400   struct sos_umem_vmm_vr * vr;
401   vr = find_enclosing_or_next_vr(as, uaddr);
402   if (! vr)
403     return NULL;
404 
405   /* Ok uaddr <= vr->end, but do we have uaddr > vr->start ? */
406   if (uaddr < vr->start)
407     return NULL;
408 
409   return vr;
410 }
411 
412 
413 struct sos_umem_vmm_as *
414 sos_umem_vmm_get_as_of_vr(struct sos_umem_vmm_vr * vr)
415 {
416   return vr->address_space;
417 }
418 
419 
420 struct sos_umem_vmm_vr_ops *
421 sos_umem_vmm_get_ops_of_vr(struct sos_umem_vmm_vr * vr)
422 {
423   return vr->ops;
424 }
425 
426 
427 sos_ui32_t sos_umem_vmm_get_prot_of_vr(struct sos_umem_vmm_vr * vr)
428 {
429   return vr->access_rights;
430 }
431 
432 
433 sos_ui32_t sos_umem_vmm_get_flags_of_vr(struct sos_umem_vmm_vr * vr)
434 {
435   return vr->flags;
436 }
437 
438 
439 struct sos_umem_vmm_mapped_resource *
440 sos_umem_vmm_get_mapped_resource_of_vr(struct sos_umem_vmm_vr * vr)
441 {
442   return vr->mapped_resource;
443 }
444 
445 
446 sos_uaddr_t sos_umem_vmm_get_start_of_vr(struct sos_umem_vmm_vr * vr)
447 {
448   return vr->start;
449 }
450 
451 
452 sos_size_t sos_umem_vmm_get_size_of_vr(struct sos_umem_vmm_vr * vr)
453 {
454   return vr->size;
455 }
456 
457 
458 sos_luoffset_t sos_umem_vmm_get_offset_in_resource(struct sos_umem_vmm_vr * vr)
459 {
460   return vr->offset_in_resource;
461 }
462 
463 
464 sos_ret_t
465 sos_umem_vmm_set_ops_of_vr(struct sos_umem_vmm_vr * vr,
466                            struct sos_umem_vmm_vr_ops * ops)
467 {
468   /* Don't allow to overwrite any preceding VR ops */
469   SOS_ASSERT_FATAL(NULL == vr->ops);
470 
471   vr->ops = ops;
472   return SOS_OK;
473 }
474 
475 
476 /**
477  * When resize asks to map the resource elsewhere, make sure not to
478  * overwrite the offset_in_resource field
479  */
480 #define INTERNAL_MAP_CALLED_FROM_MREMAP (1 << 8)
481 
482 sos_ret_t
483 sos_umem_vmm_map(struct sos_umem_vmm_as * as,
484                  sos_uaddr_t * /*in/out*/uaddr, sos_size_t size,
485                  sos_ui32_t access_rights,
486                  sos_ui32_t flags,
487                  struct sos_umem_vmm_mapped_resource * resource,
488                  sos_luoffset_t offset_in_resource)
489 {
490   __label__ return_mmap;
491   sos_uaddr_t hint_uaddr;
492   struct sos_umem_vmm_vr *prev_vr, *next_vr, *vr, *preallocated_vr;
493   sos_bool_t merge_with_preceding, merge_with_next, used_preallocated_vr;
494   sos_bool_t internal_map_called_from_mremap
495     = (flags & INTERNAL_MAP_CALLED_FROM_MREMAP);
496 
497   sos_ret_t retval     = SOS_OK;
498   used_preallocated_vr = FALSE;
499   hint_uaddr           = *uaddr;
500 
501   /* Default mapping address is NULL */
502   *uaddr = (sos_vaddr_t)NULL;
503 
504   if (! resource)
505     return -SOS_EINVAL;
506   if (! resource->mmap)
507     return -SOS_EPERM;
508 
509   if (! SOS_IS_PAGE_ALIGNED(hint_uaddr))
510     return -SOS_EINVAL;
511 
512   if (size <= 0)
513     return -SOS_EINVAL;
514   size = SOS_PAGE_ALIGN_SUP(size);
515 
516   if (flags & SOS_VR_MAP_SHARED)
517     {
518       /* Make sure the mapped resource allows the required protection flags */
519       if ( ( (access_rights & SOS_VM_MAP_PROT_READ)
520              && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_READ) )
521            || ( (access_rights & SOS_VM_MAP_PROT_WRITE)
522                 && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_WRITE) )
523            || ( (access_rights & SOS_VM_MAP_PROT_EXEC)
524                 && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_EXEC)) )
525         return -SOS_EPERM;
526     }
527 
528   /* Sanity checks over the offset_in_resource parameter */
529   if ( !internal_map_called_from_mremap
530        && ( resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS ) )
531     /* Initial offset ignored for anonymous mappings */
532     {
533       /* Nothing to check */
534     }
535 
536   /* Make sure that the offset in resource won't overflow */
537   else if (offset_in_resource + size <= offset_in_resource)
538     return -SOS_EINVAL;
539 
540   /* Filter out unsupported flags */
541   access_rights &= (SOS_VM_MAP_PROT_READ
542                     | SOS_VM_MAP_PROT_WRITE
543                     | SOS_VM_MAP_PROT_EXEC);
544   flags &= (SOS_VR_MAP_SHARED
545             | SOS_VR_MAP_FIXED);
546 
547   /* Pre-allocate a new VR. Because once we found a valid slot inside
548      the VR list, we don't want the list to be altered by another
549      process */
550   preallocated_vr
551     = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
552   if (! preallocated_vr)
553     return -SOS_ENOMEM;
554 
555   /* Compute the user address of the new mapping */
556   if (flags & SOS_VR_MAP_FIXED)
557     {
558       /*
559        * The address is imposed
560        */
561 
562       /* Make sure the hint_uaddr hint is valid */
563       if (hint_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
564         { retval = -SOS_EINVAL; goto return_mmap; }
565       if (hint_uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
566         { retval = -SOS_EINVAL; goto return_mmap; }
567 
568       /* Unmap any overlapped VR */
569       retval = sos_umem_vmm_unmap(as, hint_uaddr, size);
570       if (SOS_OK != retval)
571         { goto return_mmap; }
572     }
573   else
574     {
575       /*
576        * A free range has to be determined
577        */
578 
579       /* Find a suitable free VR */
580       hint_uaddr = find_first_free_interval(as, hint_uaddr, size);
581       if (! hint_uaddr)
582         { retval = -SOS_ENOMEM; goto return_mmap; }
583     }
584 
585   /* For anonymous resource mappings, set the initial
586      offset_in_resource to the initial virtual start address in user
587      space */
588   if ( !internal_map_called_from_mremap
589        && (resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS ) )
590     offset_in_resource = hint_uaddr;
591 
592   /* Lookup next and previous VR, if any. This will allow us to merge
593      the regions, when possible */
594   next_vr = find_enclosing_or_next_vr(as, hint_uaddr);
595   if (next_vr)
596     {
597       /* Find previous VR, if any */
598       prev_vr = next_vr->prev_in_as;
599       /* The list is curcular: it may happen that we looped over the
600          tail of the list (ie the list is a singleton) */
601       if (prev_vr->start > hint_uaddr)
602         prev_vr = NULL; /* No preceding VR */
603     }
604   else
605     {
606       /* Otherwise we went beyond the last VR */
607       prev_vr = list_get_tail_named(as->list_vr, prev_in_as, next_in_as);
608     }
609 
610   /* Merge with preceding VR ? */
611   merge_with_preceding
612     = ( (NULL != prev_vr)
613         && (prev_vr->mapped_resource == resource)
614         && (prev_vr->offset_in_resource + prev_vr->size == offset_in_resource)
615         && (prev_vr->start + prev_vr->size == hint_uaddr)
616         && (prev_vr->flags == flags)
617         && (prev_vr->access_rights == access_rights) );
618 
619   /* Merge with next VR ? */
620   merge_with_next
621     = ( (NULL != next_vr)
622         && (next_vr->mapped_resource == resource)
623         && (offset_in_resource + size == next_vr->offset_in_resource)
624         && (hint_uaddr + size == next_vr->start)
625         && (next_vr->flags == flags)
626         && (next_vr->access_rights == access_rights) );
627 
628   if (merge_with_preceding && merge_with_next)
629     {
630       /* Widen the prev_vr VR to encompass both the new VR and the next_vr */
631       vr = prev_vr;
632       vr->size += size + next_vr->size;
633       
634       /* Remove the next_vr VR */
635       list_delete_named(as->list_vr, next_vr, prev_in_as, next_in_as);
636       list_delete_named(next_vr->mapped_resource->list_vr, next_vr,
637                         prev_in_mapped_resource, next_in_mapped_resource);
638 
639       if (next_vr->ops && next_vr->ops->unref)
640         next_vr->ops->unref(next_vr);
641 
642       sos_kmem_vmm_free((sos_vaddr_t) next_vr);
643     }
644   else if (merge_with_preceding)
645     {
646       /* Widen the prev_vr VR to encompass the new VR */
647       vr = prev_vr;
648       vr->size += size;
649     }
650   else if (merge_with_next)
651     {
652       /* Widen the next_vr VR to encompass the new VR */
653       vr = next_vr;
654       vr->start -= size;
655       vr->size  += size;
656     }
657   else
658     {
659       /* Allocate a brand new VR and insert it into the list */
660 
661       vr = preallocated_vr;
662       used_preallocated_vr = TRUE;
663 
664       vr->start              = hint_uaddr;
665       vr->size               = size;
666       vr->access_rights      = access_rights;
667       vr->flags              = flags;
668       vr->mapped_resource    = resource;
669       vr->offset_in_resource = offset_in_resource;
670 
671       /* Insert VR in address space */
672       vr->address_space      = as;
673       if (prev_vr)
674         list_insert_after_named(as->list_vr, prev_vr, vr,
675                                 prev_in_as, next_in_as);
676       else
677         list_add_head_named(as->list_vr, vr, prev_in_as, next_in_as);
678       list_add_tail_named(vr->mapped_resource->list_vr, vr,
679                           prev_in_mapped_resource,
680                           next_in_mapped_resource);
681       
682       /* Signal the resource we are mapping it */
683       if (resource && resource->mmap)
684         {
685           retval = resource->mmap(vr);
686           if (SOS_OK != retval)
687             {
688               retval = sos_umem_vmm_unmap(as, vr->start, vr->size);
689               goto return_mmap;
690             }
691 
692           /* The page_in method is MANDATORY for mapped resources */
693           SOS_ASSERT_FATAL(vr->ops && vr->ops->page_in);
694         }
695 
696       if (vr->ops && vr->ops->ref)
697         vr->ops->ref(vr);
698     }
699 
700   /* Ok, fine, we got it right ! Return the address to the caller */
701   *uaddr = hint_uaddr;
702   as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
703                                      size, 0, vr->access_rights);
704   retval = SOS_OK;
705 
706  return_mmap:
707   if (! used_preallocated_vr)
708     sos_kmem_vmm_free((sos_vaddr_t)preallocated_vr);
709     
710   return retval;
711 }
712 
713 
714 sos_ret_t
715 sos_umem_vmm_unmap(struct sos_umem_vmm_as * as,
716                    sos_uaddr_t uaddr, sos_size_t size)
717 {
718   struct sos_umem_vmm_vr *vr, *preallocated_vr;
719   sos_bool_t need_to_setup_mmu;
720   sos_bool_t used_preallocated_vr;
721 
722   if (! SOS_IS_PAGE_ALIGNED(uaddr))
723     return -SOS_EINVAL;
724   if (size <= 0)
725     return -SOS_EINVAL;
726   size = SOS_PAGE_ALIGN_SUP(size);
727 
728   /* Make sure the uaddr is valid */
729   if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
730     return -SOS_EINVAL;
731   if (uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
732     return -SOS_EINVAL;
733 
734   /* In some cases, the unmapping might imply a VR to be split into
735      2. Actually, allocating a new VR can be a blocking operation, but
736      actually we can block now, it won't do no harm. But we must be
737      careful not to block later, while altering the VR lists: that's
738      why we pre-allocate now. */
739   used_preallocated_vr = FALSE;
740   preallocated_vr
741     = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
742   if (! preallocated_vr)
743     return -SOS_ENOMEM;
744 
745   /* Find any VR intersecting with the given interval */
746   vr = find_first_intersecting_vr(as, uaddr, size);
747 
748   /* Unmap (part of) the VR covered by [uaddr .. uaddr+size[ */
749   while (NULL != vr)
750     {
751       /* Went past the end of the *circular* list => back at the
752          beginning ? */
753       if (vr->start + vr->size <= uaddr)
754         /* Yes, stop now */
755         break;
756 
757       /* Went beyond the region to unmap ? */
758       if (uaddr + size <= vr->start)
759         /* Yes, stop now */
760         break;
761 
762       /* VR totally unmapped ? */
763       if ((vr->start >= uaddr)
764           && (vr->start + vr->size <= uaddr + size))
765         {
766           struct sos_umem_vmm_vr *next_vr;
767 
768           /* Yes: signal we remove it completely */
769           if (vr->ops && vr->ops->unmap)
770             vr->ops->unmap(vr, vr->start, vr->size);
771 
772           /* Remove it from the AS list now */
773           next_vr = vr->next_in_as;
774           if (next_vr == vr) /* singleton ? */
775             next_vr = NULL;
776           list_delete_named(as->list_vr, vr, prev_in_as, next_in_as);
777 
778           /* Remove from the list of VRs mapping the resource */
779           list_delete_named(vr->mapped_resource->list_vr, vr,
780                             prev_in_mapped_resource,
781                             next_in_mapped_resource);
782 
783           if (vr->ops && vr->ops->unref)
784             vr->ops->unref(vr);
785           
786           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
787                                              vr->size, vr->access_rights, 0);
788           sos_kmem_vmm_free((sos_vaddr_t)vr);
789       
790           /* Prepare next iteration */
791           vr = next_vr;
792           continue;
793         }
794 
795       /* unmapped region lies completely INSIDE the the VR */
796       else if ( (vr->start < uaddr)
797                 && (vr->start + vr->size > uaddr + size) )
798         {
799           /* VR has to be split into 2 */
800 
801           /* Use the preallocated VR and copy the VR into it */
802           used_preallocated_vr = TRUE;
803           memcpy(preallocated_vr, vr, sizeof(*vr));
804 
805           /* Adjust the start/size of both VRs */
806           preallocated_vr->start = uaddr + size;
807           preallocated_vr->size  = vr->start + vr->size - (uaddr + size);
808           preallocated_vr->offset_in_resource += uaddr + size - vr->start;
809           vr->size                             = uaddr - vr->start;
810 
811           /* Insert the new VR into the list */
812           list_insert_after_named(as->list_vr, vr, preallocated_vr,
813                                   prev_in_as, next_in_as);
814           list_add_tail_named(vr->mapped_resource->list_vr, preallocated_vr,
815                               prev_in_mapped_resource,
816                               next_in_mapped_resource);
817 
818           /* Signal the changes to the underlying resource */
819           if (vr->ops && vr->ops->unmap)
820             vr->ops->unmap(vr, uaddr, size);
821           if (preallocated_vr->ops && preallocated_vr->ops->ref)
822             preallocated_vr->ops->ref(preallocated_vr);
823 
824           /* Account for change in VRs */
825           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
826                                              size, vr->access_rights, 0);
827 
828           /* No need to go further */
829           break;
830         }
831 
832       /* Unmapped region only affects the START address of the VR */
833       else if (uaddr <= vr->start)
834         {
835           sos_size_t translation = uaddr + size - vr->start;
836 
837           /* Shift the VR */
838           vr->size               -= translation;
839           vr->offset_in_resource += translation;
840           vr->start              += translation;
841           
842           /* Signal unmapping */
843           if (vr->ops && vr->ops->unmap)
844             vr->ops->unmap(vr, uaddr + size,
845                            translation);
846           
847           /* Account for change in VRs */
848           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
849                                              translation,
850                                              vr->access_rights, 0);
851 
852           /* No need to go further, we reached the last VR that
853              overlaps the unmapped region */
854           break;
855         }
856 
857       /* Unmapped region only affects the ENDING address of the VR */
858       else if (uaddr + size >= vr->start + vr->size)
859         {
860           sos_size_t unmapped_size = vr->start + vr->size - uaddr;
861 
862           /* Resize VR */
863           vr->size = uaddr - vr->start;
864           
865           /* Signal unmapping */
866           if (vr->ops && vr->ops->unmap)
867             vr->ops->unmap(vr, uaddr, unmapped_size);
868 
869           /* Account for change in VRs */
870           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
871                                              unmapped_size,
872                                              vr->access_rights, 0);
873           
874           vr = vr->next_in_as;
875           continue;
876         }
877 
878       sos_display_fatal_error("BUG uaddr=%x sz=%x vr_start=%x, vr_sz=%x",
879                               uaddr, size, vr->start, vr->size);
880     }
881 
882   need_to_setup_mmu = (sos_thread_get_current()->squatted_mm_context
883                        != as->mm_context);
884   if (need_to_setup_mmu)
885     SOS_ASSERT_FATAL(SOS_OK
886                      == sos_thread_prepare_user_space_access(as,
887                                                              (sos_vaddr_t)
888                                                                NULL));
889   {
890     sos_size_t sz_unmapped = sos_paging_unmap_interval(uaddr, size);
891     SOS_ASSERT_FATAL(sz_unmapped >= 0);
892     as->phys_total -= sz_unmapped;
893   }
894   if (need_to_setup_mmu)
895     SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
896 
897   if (! used_preallocated_vr)
898     sos_kmem_vmm_free((sos_vaddr_t)preallocated_vr);
899 
900   return SOS_OK;
901 }
902 
903 
904 sos_ret_t
905 sos_umem_vmm_chprot(struct sos_umem_vmm_as * as,
906                     sos_uaddr_t uaddr, sos_size_t size,
907                     sos_ui32_t new_access_rights)
908 {
909   struct sos_umem_vmm_vr *start_vr, *vr,
910     *preallocated_middle_vr, *preallocated_right_vr;
911   sos_bool_t used_preallocated_middle_vr, used_preallocated_right_vr;
912 
913   if (! SOS_IS_PAGE_ALIGNED(uaddr))
914     return -SOS_EINVAL;
915   if (size <= 0)
916     return -SOS_EINVAL;
917   size = SOS_PAGE_ALIGN_SUP(size);
918 
919   /* Make sure the uaddr is valid */
920   if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
921     return -SOS_EINVAL;
922   if (uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
923     return -SOS_EINVAL;
924 
925   /* Pre-allocate 2 new VRs (same reason as for unmap). Because chprot
926      may imply at most 2 regions to be split */
927   used_preallocated_middle_vr = FALSE;
928   used_preallocated_right_vr  = FALSE;
929   preallocated_middle_vr
930     = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
931   if (! preallocated_middle_vr)
932     return -SOS_ENOMEM;
933   preallocated_right_vr
934     = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
935   if (! preallocated_right_vr)
936     {
937       sos_kmem_vmm_free((sos_vaddr_t)preallocated_middle_vr);
938       return -SOS_ENOMEM;
939     }
940 
941   /* Find any VR intersecting with the given interval */
942   start_vr = find_first_intersecting_vr(as, uaddr, size);
943   if (NULL == start_vr)
944     return SOS_OK;
945 
946   /* First of all: make sure we are allowed to change the access
947      rights of all the VRs concerned by the chprot */
948   vr = start_vr;
949   while (TRUE)
950     {
951       /* Went past the end of the *circular* list => back at the
952          begining ? */
953       if (vr->start + vr->size <= uaddr)
954         /* Yes, stop now */
955         break;
956 
957       /* Went beyond the region to chprot ? */
958       if (uaddr + size < vr->start)
959         /* Yes, stop now */
960         break;
961 
962       if (vr->flags & SOS_VR_MAP_SHARED)
963         {
964           /* Make sure the mapped resource allows the required
965              protection flags */
966           if ( ( (new_access_rights & SOS_VM_MAP_PROT_READ)
967                  && !(vr->mapped_resource->allowed_access_rights
968                       & SOS_VM_MAP_PROT_READ) )
969                || ( (new_access_rights & SOS_VM_MAP_PROT_WRITE)
970                     && !(vr->mapped_resource->allowed_access_rights
971                          & SOS_VM_MAP_PROT_WRITE) )
972                || ( (new_access_rights & SOS_VM_MAP_PROT_EXEC)
973                     && !(vr->mapped_resource->allowed_access_rights
974                          & SOS_VM_MAP_PROT_EXEC) ) )
975             return -SOS_EPERM;
976         }
977 
978       vr = vr->next_in_as;
979     }
980 
981   /* Change the access rights of the VRs covered by [uaddr
982      .. uaddr+size[ */
983   vr = start_vr;
984   while (TRUE)
985     {
986 
987       /* Went past the end of the *circular* list => back at the
988          begining ? */
989       if (vr->start + vr->size <= uaddr)
990         /* Yes, stop now */
991         break;
992 
993       /* Went beyond the region to chprot ? */
994       if (uaddr + size <= vr->start)
995         /* Yes, stop now */
996         break;
997 
998       /* Access rights unchanged ? */
999       if (vr->access_rights == new_access_rights)
1000         /* nop */
1001         {
1002           vr = vr->next_in_as;
1003           continue;
1004         }
1005 
1006       /* VR totally chprot ? */
1007       if ((vr->start >= uaddr)
1008           && (vr->start + vr->size <= uaddr + size))
1009         {
1010           /* Account for change in VRs */
1011           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1012                                              vr->size, vr->access_rights,
1013                                              new_access_rights);
1014           vr->access_rights = new_access_rights;
1015 
1016           if (vr->flags & SOS_VR_MAP_SHARED)
1017             /* For shared mappings: effectively change the access
1018                rights of the physical pages  */
1019             sos_paging_set_prot_of_interval(vr->start, vr->size,
1020                                             new_access_rights);
1021           else
1022             /* Private mapping */
1023             {
1024               /* For private mappings, we set the new access_rights
1025                  only if it becomes read-only. For private mappings
1026                  that become writable, we don't do anything: we keep
1027                  the access rights unchanged to preserve the COW
1028                  semantics */
1029               if (! (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1030                 sos_paging_set_prot_of_interval(vr->start, vr->size,
1031                                                 new_access_rights);
1032             }
1033 
1034           vr = vr->next_in_as;
1035           continue;
1036         }
1037 
1038       /* chprot region lies completely INSIDE the VR */
1039       else if ( (vr->start < uaddr)
1040                 && (vr->start + vr->size > uaddr + size) )
1041         {
1042           /* VR has to be split into 3 */
1043 
1044           /* Use the preallocated VRs and copy the VR into them */
1045           SOS_ASSERT_FATAL(! used_preallocated_middle_vr);
1046           SOS_ASSERT_FATAL(! used_preallocated_right_vr);
1047           used_preallocated_middle_vr = TRUE;
1048           memcpy(preallocated_middle_vr, vr, sizeof(*vr));
1049           used_preallocated_right_vr = TRUE;
1050           memcpy(preallocated_right_vr, vr, sizeof(*vr));
1051 
1052           /* Adjust the start/size of the VRs */
1053           preallocated_middle_vr->start = uaddr;
1054           preallocated_middle_vr->size  = size;
1055           preallocated_right_vr->start  = uaddr + size;
1056           preallocated_right_vr->size   = vr->start + vr->size
1057                                             - (uaddr + size);
1058           preallocated_middle_vr->offset_in_resource
1059             += uaddr - vr->start;
1060           preallocated_right_vr->offset_in_resource
1061             += uaddr + size - vr->start;
1062           vr->size = uaddr - vr->start;
1063 
1064           /* Account for change in VRs */
1065           preallocated_middle_vr->access_rights = new_access_rights;
1066           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1067                                              size, vr->access_rights,
1068                                              new_access_rights);
1069 
1070           /* Insert the new VRs into the lists */
1071           list_insert_after_named(as->list_vr, vr, preallocated_middle_vr,
1072                                   prev_in_as, next_in_as);
1073           list_insert_after_named(as->list_vr, preallocated_middle_vr,
1074                                   preallocated_right_vr,
1075                                   prev_in_as, next_in_as);
1076 
1077           list_add_tail_named(vr->mapped_resource->list_vr,
1078                               preallocated_middle_vr,
1079                               prev_in_mapped_resource,
1080                               next_in_mapped_resource);
1081           list_add_tail_named(vr->mapped_resource->list_vr,
1082                               preallocated_right_vr,
1083                               prev_in_mapped_resource,
1084                               next_in_mapped_resource);
1085 
1086           /* Effectively change the access rights of the physical pages */
1087           if (!(preallocated_middle_vr->flags & SOS_VR_MAP_SHARED)
1088               && (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1089             /* For private mappings with write access, prepare for COW */
1090             sos_paging_prepare_COW(preallocated_middle_vr->start,
1091                                    preallocated_middle_vr->size);
1092           else
1093             sos_paging_set_prot_of_interval(preallocated_middle_vr->start,
1094                                             preallocated_middle_vr->size,
1095                                             new_access_rights);
1096 
1097           if (preallocated_right_vr->ops && preallocated_right_vr->ops->ref)
1098             preallocated_right_vr->ops->ref(preallocated_right_vr);
1099           if (preallocated_middle_vr->ops && preallocated_middle_vr->ops->ref)
1100             preallocated_middle_vr->ops->ref(preallocated_middle_vr);
1101 
1102           /* No need to go further */
1103           break;
1104         }
1105 
1106       /* Chprot region only affects the START address of the VR */
1107       else if (uaddr <= vr->start)
1108         {
1109           /* Split the region into 2 */
1110           sos_uoffset_t offset_in_region = uaddr + size - vr->start;
1111 
1112           /* Use the preallocated VRs and copy the VR into them */
1113           SOS_ASSERT_FATAL(! used_preallocated_middle_vr);
1114           used_preallocated_middle_vr = TRUE;
1115           memcpy(preallocated_middle_vr, vr, sizeof(*vr));
1116 
1117           /* Adjust the start/size of the VRs */
1118           preallocated_middle_vr->start += offset_in_region;
1119           preallocated_middle_vr->size  -= offset_in_region;
1120           vr->size                       = offset_in_region;
1121           preallocated_middle_vr->offset_in_resource += offset_in_region;
1122 
1123           /* Account for change in VRs */
1124           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1125                                              vr->size,
1126                                              vr->access_rights,
1127                                              new_access_rights);
1128           vr->access_rights = new_access_rights;
1129 
1130           /* Insert the new VR into the lists */
1131           list_insert_after_named(as->list_vr, vr,
1132                                   preallocated_middle_vr,
1133                                   prev_in_as, next_in_as);
1134           list_add_tail_named(vr->mapped_resource->list_vr,
1135                               preallocated_middle_vr,
1136                               prev_in_mapped_resource,
1137                               next_in_mapped_resource);
1138 
1139           /* Effectively change the access rights of the physical pages */
1140           if (!(vr->flags & SOS_VR_MAP_SHARED)
1141               && (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1142             /* For private mappings with write access, prepare for COW */
1143             sos_paging_prepare_COW(vr->start, vr->size);
1144           else
1145             sos_paging_set_prot_of_interval(vr->start, vr->size,
1146                                             new_access_rights);
1147 
1148           if (preallocated_middle_vr->ops && preallocated_middle_vr->ops->ref)
1149             preallocated_middle_vr->ops->ref(preallocated_middle_vr);
1150 
1151           /* Ne need to go further (we reached the last VR that
1152              overlaps the given interval to chprot) */
1153           break;
1154         }
1155 
1156       /* Chprot region only affects the ENDING address of the VR */
1157       else if (uaddr + size >= vr->start + vr->size)
1158         {
1159           /* Split the region into 2 */
1160           sos_uoffset_t offset_in_region = uaddr - vr->start;
1161 
1162           /* Use the preallocated VRs and copy the VR into them */
1163           SOS_ASSERT_FATAL(! used_preallocated_right_vr);
1164           used_preallocated_right_vr = TRUE;
1165           memcpy(preallocated_right_vr, vr, sizeof(*vr));
1166 
1167           /* Adjust the start/size of the VRs */
1168           preallocated_right_vr->start        += offset_in_region;
1169           preallocated_right_vr->size         -= offset_in_region;
1170           vr->size                             = offset_in_region;
1171           preallocated_right_vr->offset_in_resource += offset_in_region;
1172 
1173           /* Account for change in VRs */
1174           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1175                                              preallocated_right_vr->size,
1176                                              vr->access_rights,
1177                                              new_access_rights);
1178           preallocated_right_vr->access_rights = new_access_rights;
1179 
1180           /* Insert the new VR into the lists */
1181           list_insert_after_named(as->list_vr, vr,
1182                                   preallocated_right_vr,
1183                                   prev_in_as, next_in_as);
1184           list_add_tail_named(vr->mapped_resource->list_vr,
1185                               preallocated_right_vr,
1186                               prev_in_mapped_resource,
1187                               next_in_mapped_resource);
1188 
1189           /* Effectively change the access rights of the physical pages */
1190           if (!(preallocated_right_vr->flags & SOS_VR_MAP_SHARED)
1191               && (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1192             /* For private mappings with write access, prepare for COW */
1193             sos_paging_prepare_COW(preallocated_right_vr->start,
1194                                    preallocated_right_vr->size);
1195           else
1196             sos_paging_set_prot_of_interval(preallocated_right_vr->start,
1197                                             preallocated_right_vr->size,
1198                                             new_access_rights);
1199 
1200           if (preallocated_right_vr->ops && preallocated_right_vr->ops->ref)
1201             preallocated_right_vr->ops->ref(preallocated_right_vr);
1202 
1203           vr = vr->next_in_as;
1204           continue;
1205         }
1206 
1207       sos_display_fatal_error("BUG");
1208     }
1209 
1210   if (! used_preallocated_middle_vr)
1211     sos_kmem_vmm_free((sos_vaddr_t)preallocated_middle_vr);
1212   if (! used_preallocated_right_vr)
1213     sos_kmem_vmm_free((sos_vaddr_t)preallocated_right_vr);
1214 
1215   return SOS_OK;
1216 }
1217 
1218 
1219 sos_ret_t
1220 sos_umem_vmm_resize(struct sos_umem_vmm_as * as,
1221                     sos_uaddr_t old_uaddr, sos_size_t old_size,
1222                     sos_uaddr_t *new_uaddr, sos_size_t new_size,
1223                     sos_ui32_t flags)
1224 {
1225   sos_luoffset_t new_offset_in_resource;
1226   sos_bool_t must_move_vr = FALSE;
1227   struct sos_umem_vmm_vr *vr, *prev_vr, *next_vr;
1228 
1229   /* Make sure the new uaddr is valid */
1230   if (*new_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1231     return -SOS_EINVAL;
1232   if (*new_uaddr > SOS_PAGING_TOP_USER_ADDRESS - new_size)
1233     return -SOS_EINVAL;
1234 
1235   old_uaddr = SOS_PAGE_ALIGN_INF(old_uaddr);
1236   old_size  = SOS_PAGE_ALIGN_SUP(old_size);
1237   if (! SOS_IS_PAGE_ALIGNED(*new_uaddr))
1238     return -SOS_EINVAL;
1239   if (new_size <= 0)
1240     return -SOS_EINVAL;
1241   new_size = SOS_PAGE_ALIGN_SUP(new_size);
1242   
1243   /* Lookup a VR overlapping the address range */
1244   vr = find_first_intersecting_vr(as, old_uaddr, old_size);
1245   if (! vr)
1246     return -SOS_EINVAL;
1247   
1248   /* Make sure there is exactly ONE VR overlapping the area */
1249   if ( (vr->start > old_uaddr)
1250        || (vr->start + vr->size < old_uaddr + old_size) )
1251     return -SOS_EINVAL;
1252 
1253   /* Retrieve the prev/next VR if they exist (the VR are on circular
1254      list) */
1255   prev_vr = vr->prev_in_as;
1256   if (prev_vr->start >= vr->start)
1257     prev_vr = NULL;
1258   next_vr = vr->prev_in_as;
1259   if (next_vr->start <= vr->start)
1260     next_vr = NULL;
1261 
1262   /*
1263    * Compute new offset inside the mapped resource, if any
1264    */
1265 
1266   /* Don't allow to resize if the uaddr goes beyond the 'offset 0' of
1267      the resource */
1268   if ( (*new_uaddr < vr->start)
1269        && (vr->start - *new_uaddr > vr->offset_in_resource) )
1270     return -SOS_EINVAL;
1271   
1272   /* Compute new offset in the resource (overflow-safe) */
1273   if (vr->start > *new_uaddr)
1274     new_offset_in_resource
1275       = vr->offset_in_resource
1276       - (vr->start - *new_uaddr);
1277   else
1278     new_offset_in_resource
1279       = vr->offset_in_resource
1280       + (*new_uaddr - vr->start);
1281 
1282   /* If other VRs would be affected by this resizing, then the VR must
1283      be moved */
1284   if (prev_vr && (prev_vr->start + prev_vr->size > *new_uaddr))
1285     must_move_vr |= TRUE;
1286   if (next_vr && (next_vr->start < *new_uaddr + new_size))
1287     must_move_vr |= TRUE;
1288 
1289   /* If VR would be out-of-user-space, it must be moved */
1290   if (*new_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1291     must_move_vr |= TRUE;
1292   if (*new_uaddr > SOS_PAGING_TOP_USER_ADDRESS - new_size)
1293     must_move_vr |= TRUE;
1294 
1295   /* The VR must be moved but the user forbids it */
1296   if ( must_move_vr && !(flags & SOS_VR_REMAP_MAYMOVE) )
1297     return -SOS_EINVAL;
1298 
1299   /* If the VR must be moved, we simply map the resource elsewhere and
1300      unmap the current VR */
1301   if (must_move_vr)
1302     {
1303       sos_uaddr_t uaddr, result_uaddr;
1304       sos_ret_t retval;
1305 
1306       result_uaddr = *new_uaddr;
1307       retval = sos_umem_vmm_map(as, & result_uaddr, new_size,
1308                                 vr->access_rights,
1309                                 vr->flags | INTERNAL_MAP_CALLED_FROM_MREMAP,
1310                                 vr->mapped_resource,
1311                                 new_offset_in_resource);
1312       if (SOS_OK != retval)
1313         return retval;
1314 
1315       /* Remap the physical pages at their new address */
1316       for (uaddr = vr->start ;
1317            uaddr < vr->start + vr->size ;
1318            uaddr += SOS_PAGE_SIZE)
1319         {
1320           sos_paddr_t paddr;
1321           sos_ui32_t  prot;
1322           sos_uaddr_t vaddr;
1323           
1324           if (uaddr < *new_uaddr)
1325             continue;
1326           if (uaddr > *new_uaddr + new_size)
1327             continue;
1328 
1329           /* Compute destination virtual address (should be
1330              overflow-safe) */
1331           if (vr->start >= *new_uaddr)
1332             vaddr = result_uaddr
1333               + (uaddr - vr->start)
1334               + (vr->start - *new_uaddr);
1335           else
1336             vaddr = result_uaddr
1337               + (uaddr - vr->start)
1338               - (*new_uaddr - vr->start);
1339 
1340           paddr = sos_paging_get_paddr(uaddr);
1341           if (! paddr)
1342             /* No physical page mapped at this address yet */
1343             continue;
1344 
1345           prot  = sos_paging_get_prot(uaddr);
1346           SOS_ASSERT_FATAL(prot);
1347 
1348           /* Remap it at its destination address */
1349           retval = sos_paging_map(paddr, vaddr, TRUE, prot);
1350           if (SOS_OK != retval)
1351             {
1352               sos_umem_vmm_unmap(as, result_uaddr, new_size);
1353               return retval;
1354             }
1355         }
1356 
1357       retval = sos_umem_vmm_unmap(as, vr->start, vr->size);
1358       if (SOS_OK != retval)
1359         {
1360           sos_umem_vmm_unmap(as, result_uaddr, new_size);
1361           return retval;
1362         }
1363 
1364       *new_uaddr = result_uaddr;
1365       return retval;
1366     }
1367 
1368   /* Otherwise we simply resize the VR, taking care of unmapping
1369      what's been unmapped  */
1370 
1371   if (*new_uaddr + new_size < vr->start + vr->size)
1372     sos_umem_vmm_unmap(as, *new_uaddr + new_size,
1373                        vr->start + vr->size - (*new_uaddr + new_size));
1374   else
1375     {
1376       as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1377                                          *new_uaddr + new_size
1378                                            - (vr->start + vr->size),
1379                                          0, vr->access_rights);
1380       vr->size += *new_uaddr + new_size - (vr->start + vr->size);
1381     }
1382   
1383   if (*new_uaddr > vr->start)
1384     sos_umem_vmm_unmap(as, vr->start, *new_uaddr - vr->start);
1385   else
1386     {
1387       as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1388                                          vr->start - *new_uaddr,
1389                                          0, vr->access_rights);
1390       vr->size  += vr->start - *new_uaddr;
1391       vr->start  = *new_uaddr;
1392       vr->offset_in_resource = new_offset_in_resource; 
1393     }
1394 
1395   SOS_ASSERT_FATAL(vr->start == *new_uaddr);
1396   SOS_ASSERT_FATAL(vr->size  == new_size);
1397   SOS_ASSERT_FATAL(vr->offset_in_resource == new_offset_in_resource);
1398 
1399   return SOS_OK;
1400 }
1401 
1402 
1403 sos_ret_t sos_umem_vmm_try_resolve_page_fault(sos_uaddr_t uaddr,
1404                                               sos_bool_t write_access,
1405                                               sos_bool_t user_access)
1406 {
1407   struct sos_process     *process = sos_thread_get_current()->process;
1408   struct sos_umem_vmm_as *as;
1409   struct sos_umem_vmm_vr *vr;
1410 
1411   if (! process)
1412     return -SOS_EFAULT;
1413 
1414   as = sos_process_get_address_space(process);
1415   if (! as)
1416     return -SOS_EFAULT;
1417 
1418   vr = find_first_intersecting_vr(as, uaddr, 1);
1419   if (! vr)
1420     return -SOS_EFAULT;
1421 
1422   /* Write on a read-only VR */
1423   if (write_access && !(vr->access_rights & SOS_VM_MAP_PROT_WRITE))
1424     return -SOS_EFAULT;
1425 
1426   /* Write on a COW VR */
1427   if (write_access && !(vr->flags & SOS_VR_MAP_SHARED))
1428     {
1429       if (SOS_OK == sos_paging_try_resolve_COW(uaddr))
1430         {
1431           as->pgflt_cow ++;
1432           return SOS_OK;
1433         }
1434     }
1435 
1436   /* Ask the underlying resource to resolve the page fault */
1437   if (SOS_OK != vr->ops->page_in(vr, uaddr, write_access))
1438     {
1439       as->pgflt_invalid ++;
1440       return -SOS_EFAULT;
1441     }
1442 
1443   as->phys_total += SOS_PAGE_SIZE;
1444   as->pgflt_page_in ++;
1445 
1446   /* For a private mapping, keep the mapping read-only */
1447   if (!(vr->flags & SOS_VR_MAP_SHARED))
1448     {
1449       sos_paging_prepare_COW(SOS_PAGE_ALIGN_INF(uaddr),
1450                              SOS_PAGE_SIZE);
1451     }
1452 
1453   return SOS_OK;
1454 }
1455 
1456 
1457 sos_ret_t
1458 sos_umem_vmm_init_heap(struct sos_umem_vmm_as * as,
1459                        sos_uaddr_t heap_start)
1460 {
1461   SOS_ASSERT_FATAL(! as->heap_start);
1462 
1463   as->heap_start = heap_start;
1464   as->heap_size  = 0;
1465   return SOS_OK;
1466 }
1467 
1468 
1469 sos_uaddr_t
1470 sos_umem_vmm_brk(struct sos_umem_vmm_as * as,
1471                  sos_uaddr_t new_top_uaddr)
1472 {
1473   sos_uaddr_t new_start;
1474   sos_size_t  new_size;
1475   SOS_ASSERT_FATAL(as->heap_start);
1476 
1477   if (! new_top_uaddr)
1478     return as->heap_start + as->heap_size;
1479 
1480   if (new_top_uaddr == as->heap_start + as->heap_size)
1481     return as->heap_start + as->heap_size;
1482  
1483   if (new_top_uaddr < as->heap_start)
1484     return (sos_uaddr_t)NULL;
1485 
1486   new_top_uaddr = SOS_PAGE_ALIGN_SUP(new_top_uaddr);
1487   new_start = as->heap_start;
1488   new_size  = new_top_uaddr - as->heap_start;
1489 
1490   /* First call to brk: we must map /dev/zero */
1491   if (! as->heap_size)
1492     {
1493       if (SOS_OK != sos_dev_zero_map(as, & as->heap_start,
1494                                      new_size,
1495                                      SOS_VM_MAP_PROT_READ
1496                                      | SOS_VM_MAP_PROT_WRITE,
1497                                      0 /* private non-fixed */))
1498         return (sos_uaddr_t)NULL;
1499 
1500       as->heap_size = new_size;
1501       return as->heap_start + as->heap_size;
1502     }
1503 
1504   /* Otherwise we just have to unmap or resize the region */
1505   if (new_size <= 0)
1506     {
1507       if (SOS_OK != sos_umem_vmm_unmap(as,
1508                                        as->heap_start, as->heap_size))
1509         return (sos_uaddr_t)NULL;
1510     }
1511   else
1512     {
1513       if (SOS_OK != sos_umem_vmm_resize(as,
1514                                         as->heap_start, as->heap_size,
1515                                         & new_start, new_size,
1516                                         0))
1517         return (sos_uaddr_t)NULL;
1518     }
1519 
1520   SOS_ASSERT_FATAL(new_start == as->heap_start);
1521   as->heap_size = new_size;
1522   return new_top_uaddr;
1523 }
1524 
1525 
1526 static struct sos_umem_vmm_vr *
1527 find_enclosing_or_next_vr(struct sos_umem_vmm_as * as,
1528                           sos_uaddr_t uaddr)
1529 {
1530   struct sos_umem_vmm_vr *vr;
1531   int nb_vr;
1532 
1533   if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1534     return NULL;
1535   if (uaddr > SOS_PAGING_TOP_USER_ADDRESS)
1536     return NULL;
1537 
1538   list_foreach_named(as->list_vr, vr, nb_vr, prev_in_as, next_in_as)
1539     {
1540       /* Equivalent to "if (uaddr < vr->start + vr->size)" but more
1541          robust (resilient to integer overflows) */
1542       if (uaddr <= vr->start + (vr->size - 1))
1543         return vr;
1544     }
1545 
1546   return NULL;
1547 }
1548 
1549 
1550 static struct sos_umem_vmm_vr *
1551 find_first_intersecting_vr(struct sos_umem_vmm_as * as,
1552                            sos_uaddr_t start_uaddr, sos_size_t size)
1553 {
1554   struct sos_umem_vmm_vr * vr;
1555   vr = find_enclosing_or_next_vr(as, start_uaddr);
1556   if (! vr)
1557     return NULL;
1558 
1559   if (start_uaddr + size <= vr->start)
1560     return NULL;
1561 
1562   return vr;
1563 }
1564 
1565 
1566 static sos_uaddr_t
1567 find_first_free_interval(struct sos_umem_vmm_as * as,
1568                          sos_uaddr_t hint_uaddr, sos_size_t size)
1569 {
1570   struct sos_umem_vmm_vr * initial_vr, * vr;
1571 
1572   if (hint_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1573     hint_uaddr = SOS_PAGING_BASE_USER_ADDRESS;
1574 
1575   if (hint_uaddr > SOS_PAGING_TOP_USER_ADDRESS - size + 1)
1576     return (sos_uaddr_t)NULL;
1577 
1578   initial_vr = vr = find_enclosing_or_next_vr(as, hint_uaddr);
1579   if (! vr)
1580     /* Great, there is nothing after ! */
1581     return hint_uaddr;
1582 
1583   /* Scan the remaining VRs in the list */
1584   do
1585     {
1586       /* Is there enough space /before/ that VR ? */
1587       if (hint_uaddr + size <= vr->start)
1588         /* Great ! */
1589         return hint_uaddr;
1590 
1591       /* Is there any VR /after/ this one, or do we have to wrap back
1592          at the begining of the user space ? */
1593       if (vr->next_in_as->start >= hint_uaddr)
1594         /* Ok, the next VR is really after us */
1595         hint_uaddr = vr->start + vr->size;
1596       else
1597         {
1598           /* No: wrapping up */
1599 
1600           /* Is there any space before the end of user space ? */
1601           if (hint_uaddr <= SOS_PAGING_TOP_USER_ADDRESS - size)
1602             return hint_uaddr;
1603 
1604           hint_uaddr = SOS_PAGING_BASE_USER_ADDRESS;
1605         }
1606 
1607       /* Prepare to look after this VR */
1608       vr = vr->next_in_as;
1609     }
1610   while (vr != initial_vr);
1611 
1612   /* Reached the end of the list and did not find anything ?... Look
1613      at the space after the last VR */
1614 
1615   return (sos_uaddr_t)NULL;
1616 }
1617 
1618 
1619 static void
1620 as_account_change_of_vr_protection(struct sos_umem_vmm_as * as,
1621                                    sos_bool_t is_shared,
1622                                    sos_size_t size,
1623                                    sos_ui32_t prev_access_rights,
1624                                    sos_ui32_t new_access_rights)
1625 {
1626   if (prev_access_rights == new_access_rights)
1627     return;
1628 
1629 #define _UPDATE_VMSTAT(field,is_increment) \
1630   ({ if (is_increment > 0) \
1631        as->field += size; \
1632      else \
1633        { SOS_ASSERT_FATAL(as->field >= size); as->field -= size; } })
1634 #define UPDATE_VMSTAT(field,is_increment) \
1635   ({ if (is_shared) _UPDATE_VMSTAT(vm_shrd.field, is_increment); \
1636      _UPDATE_VMSTAT(vm_total.field, is_increment); \
1637      SOS_ASSERT_FATAL(as->vm_total.field >= as->vm_shrd.field); })
1638 
1639   if ( (new_access_rights & SOS_VM_MAP_PROT_WRITE)
1640        && !(prev_access_rights & SOS_VM_MAP_PROT_WRITE))
1641     {
1642       UPDATE_VMSTAT(rw, +1);
1643       if (prev_access_rights & SOS_VM_MAP_PROT_READ)
1644         UPDATE_VMSTAT(ro, -1);
1645     }
1646   else if ( !(new_access_rights & SOS_VM_MAP_PROT_WRITE)
1647             && (prev_access_rights & SOS_VM_MAP_PROT_WRITE))
1648     {
1649       if (new_access_rights & SOS_VM_MAP_PROT_READ)
1650         UPDATE_VMSTAT(ro, +1);
1651       UPDATE_VMSTAT(rw, -1);
1652     }
1653   else if (new_access_rights & SOS_VM_MAP_PROT_READ)
1654     UPDATE_VMSTAT(ro, +1);
1655   else if (!(new_access_rights & SOS_VM_MAP_PROT_READ))
1656     UPDATE_VMSTAT(ro, -1);
1657 
1658   if ( (new_access_rights & SOS_VM_MAP_PROT_EXEC)
1659        && !(prev_access_rights & SOS_VM_MAP_PROT_EXEC))
1660     {
1661       UPDATE_VMSTAT(code, +1);
1662     }
1663   else if ( !(new_access_rights & SOS_VM_MAP_PROT_EXEC)
1664             && (prev_access_rights & SOS_VM_MAP_PROT_EXEC))
1665     {
1666       UPDATE_VMSTAT(code, -1);
1667     }
1668 
1669   if (new_access_rights && !prev_access_rights)
1670     UPDATE_VMSTAT(overall, +1);
1671   else if (!new_access_rights && prev_access_rights)
1672     UPDATE_VMSTAT(overall, -1);
1673 
1674 }

source navigation ] diff markup ] identifier search ] general search ]