SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2005 David Decotigny
002 
003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any later version.
007    
008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details.
012    
013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA. 
017 */
018 
019 #include <sos/assert.h>
020 #include <sos/list.h>
021 #include <sos/physmem.h>
022 #include <sos/kmem_slab.h>
023 #include <drivers/bochs.h>
024 #include <hwcore/mm_context.h>
025 #include <hwcore/paging.h>
026 #include <drivers/zero.h>
027 
028 #include "umem_vmm.h"
029 
030 
031 struct sos_umem_vmm_as
032 {
033   /** The process that owns this address space */
034   struct sos_process     * process;
035 
036   /** The MMU configuration of this address space */
037   struct sos_mm_context  * mm_context;
038 
039   /** The list of VRs in this address space */
040   struct sos_umem_vmm_vr * list_vr;
041 
042   /** Heap location */
043   sos_uaddr_t heap_start;
044   sos_size_t  heap_size; /**< Updated by sos_umem_vmm_brk() */
045 
046   /* Memory usage statistics */
047   sos_size_t phys_total; /* shared + private */
048   struct vm_usage
049   {
050     sos_size_t overall;
051     sos_size_t ro, rw, code /* all: non readable, read and read/write */;
052   } vm_total, vm_shrd;
053 
054   /* Page fault counters */
055   sos_size_t pgflt_cow;
056   sos_size_t pgflt_page_in;
057   sos_size_t pgflt_invalid;
058 };
059 
060 
061 struct sos_umem_vmm_vr
062 {
063   /** The address space owning this VR */
064   struct sos_umem_vmm_as *address_space;
065 
066   /** The location of the mapping in user space */
067   sos_uaddr_t start;
068   sos_size_t  size;
069 
070   /** What accesses are allowed (read, write, exec): @see
071       SOS_VM_MAP_PROT_* flags in hwcore/paging.h */
072   sos_ui32_t  access_rights;
073 
074   /** Flags of the VR. Allowed flags:
075    *  - SOS_VR_MAP_SHARED
076    */
077   sos_ui32_t  flags;
078 
079   /**
080    * The callbacks for the VR called along map/unmapping of the
081    * resource
082    */
083   struct sos_umem_vmm_vr_ops *ops;
084 
085   /** Description of the resource being mapped, if any */
086   struct sos_umem_vmm_mapped_resource *mapped_resource;
087   sos_luoffset_t offset_in_resource;
088 
089   /** The VRs of an AS are linked together and are accessible by way
090       of as->list_vr */
091   struct sos_umem_vmm_vr *prev_in_as, *next_in_as;
092 
093   /** The VRs mapping a given resource are linked together and are
094       accessible by way of mapped_resource->list_vr */
095   struct sos_umem_vmm_vr *prev_in_mapped_resource, *next_in_mapped_resource;
096 };
097 
098 
099 /*
100  * We use special slab caches to allocate AS and VR data structures
101  */
102 static struct sos_kslab_cache * cache_of_as;
103 static struct sos_kslab_cache * cache_of_vr;
104 
105 
106 /** Temporary function to debug: list the VRs of the given As */
107 void sos_dump_as(const struct sos_umem_vmm_as * as, const char *str)
108 {
109   struct sos_umem_vmm_vr *vr;
110   int nb_vr;
111 
112   sos_bochs_printf("AS %p - %s:\n", as, str);
113   sos_bochs_printf("   physical mem: %x\n",
114                    as->phys_total);
115   sos_bochs_printf("   VM (all/ro+rw/exec) tot:%x/%x+%x/%x shrd:%x/%x+%x/%x\n",
116                    as->vm_total.overall,
117                    as->vm_total.ro, as->vm_total.rw, as->vm_total.code,
118                    as->vm_shrd.overall,
119                    as->vm_shrd.ro, as->vm_shrd.rw, as->vm_shrd.code);
120   sos_bochs_printf("   pgflt cow=%d pgin=%d inv=%d\n",
121                    as->pgflt_cow, as->pgflt_page_in, as->pgflt_invalid);
122   list_foreach_named(as->list_vr, vr, nb_vr, prev_in_as, next_in_as)
123     {
124       sos_bochs_printf("  VR[%d]=%x: [%x,%x[ (sz=%x) mr=(%x)+%llx %c%c%c fl=%x\n",
125                        nb_vr, (unsigned)vr,
126                        vr->start, vr->start + vr->size, vr->size,
127                        (unsigned)vr->mapped_resource,
128                        vr->offset_in_resource,
129                        (vr->access_rights & SOS_VM_MAP_PROT_READ)?'r':'-',
130                        (vr->access_rights & SOS_VM_MAP_PROT_WRITE)?'w':'-',
131                        (vr->access_rights & SOS_VM_MAP_PROT_EXEC)?'x':'-',
132                        (unsigned)vr->flags);
133     }
134   sos_bochs_printf("FIN (%s)\n", str);
135 }
136 
137 
138 /**
139  * Physical address of THE page (full of 0s) used for anonymous
140  * mappings
141  */
142 sos_paddr_t sos_zero_physpage = 0 /* Initial value prior to allocation */;
143 sos_vaddr_t sos_zero_kernelpage = 0 /* Initial value prior to allocation */;
144 
145 
146 /*
147  * Helper functions defined at the bottom of the file
148  */
149 
150 /**
151  * Helper function to retrieve the first VR to have a vr->end >= uaddr
152  */
153 static struct sos_umem_vmm_vr *
154 find_enclosing_or_next_vr(struct sos_umem_vmm_as * as,
155                           sos_uaddr_t uaddr);
156 
157 
158 /**
159  * Helper function to retrieve the first VR that overlaps the given
160  * interval, if any
161  */
162 static struct sos_umem_vmm_vr *
163 find_first_intersecting_vr(struct sos_umem_vmm_as * as,
164                            sos_uaddr_t start_uaddr, sos_size_t size);
165 
166 
167 /**
168  * Helper function to find first address where there is enough
169  * space. Begin to look for such an interval at or after the given
170  * address
171  *
172  * @param hint_addr The address where to begin the scan, or NULL
173  */
174 static sos_uaddr_t
175 find_first_free_interval(struct sos_umem_vmm_as * as,
176                          sos_uaddr_t hint_uaddr, sos_size_t size);
177 
178 
179 /** Called each time a VR of the AS changes. Don't cope with any
180     underlying physcal mapping/unmapping, COW, etc... */
181 static void
182 as_account_change_of_vr_protection(struct sos_umem_vmm_as * as,
183                                    sos_bool_t is_shared,
184                                    sos_size_t size,
185                                    sos_ui32_t prev_access_rights,
186                                    sos_ui32_t new_access_rights);
187 
188 
189 sos_ret_t sos_umem_vmm_subsystem_setup()
190 {
191   /* Allocate a new kernel physical page mapped into kernel space and
192      reset it with 0s */
193   sos_zero_kernelpage = sos_kmem_vmm_alloc(1, SOS_KMEM_VMM_MAP);
194   if (sos_zero_kernelpage == (sos_vaddr_t)NULL)
195     return -SOS_ENOMEM;
196   memset((void*)sos_zero_kernelpage, 0x0, SOS_PAGE_SIZE);
197   
198   /* Keep a reference to the underlying pphysical page... */
199   sos_zero_physpage = sos_paging_get_paddr(sos_zero_kernelpage);
200   SOS_ASSERT_FATAL(NULL != (void*)sos_zero_physpage);
201   sos_physmem_ref_physpage_at(sos_zero_physpage);
202 
203   /* Allocate the VR/AS caches */
204   cache_of_as
205     = sos_kmem_cache_create("Address space structures",
206                             sizeof(struct sos_umem_vmm_as),
207                             1, 0,
208                             SOS_KSLAB_CREATE_MAP
209                             | SOS_KSLAB_CREATE_ZERO);
210   if (! cache_of_as)
211     {
212       sos_physmem_unref_physpage(sos_zero_physpage);
213       return -SOS_ENOMEM;
214     }
215 
216   cache_of_vr
217     = sos_kmem_cache_create("Virtual Region structures",
218                             sizeof(struct sos_umem_vmm_vr),
219                             1, 0,
220                             SOS_KSLAB_CREATE_MAP
221                             | SOS_KSLAB_CREATE_ZERO);
222   if (! cache_of_vr)
223     {
224       sos_physmem_unref_physpage(sos_zero_physpage);
225       sos_kmem_cache_destroy(cache_of_as);
226       return -SOS_ENOMEM;
227     }
228 
229   return SOS_OK;
230 }
231 
232 
233 struct sos_umem_vmm_as *
234 sos_umem_vmm_create_empty_as(struct sos_process *owner)
235 {
236   struct sos_umem_vmm_as * as
237     = (struct sos_umem_vmm_as *) sos_kmem_cache_alloc(cache_of_as, 0);
238   if (! as)
239     return NULL;
240 
241   as->mm_context = sos_mm_context_create();
242   if (NULL == as->mm_context)
243     {
244       /* Error */
245       sos_kmem_cache_free((sos_vaddr_t)as);
246       return NULL;
247     }
248 
249   as->process = owner;
250   return as;
251 }
252 
253 
254 struct sos_umem_vmm_as *
255 sos_umem_vmm_duplicate_current_thread_as(struct sos_process *owner)
256 {
257   __label__ undo_creation;
258   struct sos_umem_vmm_as * my_as;
259   struct sos_umem_vmm_vr * model_vr;
260   int nb_vr;
261 
262   struct sos_umem_vmm_as * new_as
263     = (struct sos_umem_vmm_as *) sos_kmem_cache_alloc(cache_of_as, 0);
264   if (! new_as)
265     return NULL;
266 
267   my_as = sos_process_get_address_space(sos_thread_get_current()->process);
268   new_as->process = owner;
269   list_init_named(new_as->list_vr, prev_in_as, next_in_as);
270 
271   /*
272    * Switch to the current threads' mm_context, as duplicating it implies
273    * being able to configure some of its mappings as read-only (for
274    * COW)
275    */
276   SOS_ASSERT_FATAL(SOS_OK
277                    == sos_thread_prepare_user_space_access(my_as,
278                                                            (sos_vaddr_t)
279                                                              NULL));
280 
281   /* Copy the virtual regions */
282   list_foreach_named(my_as->list_vr, model_vr, nb_vr, prev_in_as, next_in_as)
283     {
284       struct sos_umem_vmm_vr * vr;
285 
286       /* Prepare COW on the read/write private mappings */
287       if ( !(model_vr->flags & SOS_VR_MAP_SHARED)
288            && (model_vr->access_rights & SOS_VM_MAP_PROT_WRITE) )
289         {
290           /* Mark the underlying physical pages (if any) as
291              read-only */
292           SOS_ASSERT_FATAL(SOS_OK
293                            == sos_paging_prepare_COW(model_vr->start,
294                                                      model_vr->size));
295         }
296 
297       /* Allocate a new virtual region and copy the 'model' into it */
298       vr = (struct sos_umem_vmm_vr *) sos_kmem_cache_alloc(cache_of_vr, 0);
299       if (! vr)
300         goto undo_creation;
301       memcpy(vr, model_vr, sizeof(*vr));
302       vr->address_space = new_as;
303 
304       /* Signal the "new" mapping to the underlying VR mapper */
305       if (vr->ops && vr->ops->ref)
306         vr->ops->ref(vr);
307 
308       /* Insert the new VR into the new AS */
309       list_add_tail_named(new_as->list_vr, vr, prev_in_as, next_in_as);
310 
311       /* Insert the new VR into the list of mappings of the resource */
312       list_add_tail_named(model_vr->mapped_resource->list_vr, vr,
313                           prev_in_mapped_resource,
314                           next_in_mapped_resource);
315     }
316 
317   /* Now copy the current MMU configuration */
318   new_as->mm_context = sos_mm_context_duplicate(my_as->mm_context);
319   if (NULL == new_as->mm_context)
320     goto undo_creation;
321 
322   /* Correct behavior */
323   new_as->heap_start = my_as->heap_start;
324   new_as->heap_size  = my_as->heap_size;
325   new_as->phys_total = my_as->phys_total;
326   memcpy(& new_as->vm_total, & my_as->vm_total, sizeof(struct vm_usage));
327   memcpy(& new_as->vm_shrd, & my_as->vm_shrd, sizeof(struct vm_usage));
328   SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
329   return new_as;
330 
331   /* Handle erroneous behavior */
332  undo_creation:
333   SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
334   sos_umem_vmm_delete_as(new_as);
335   return NULL;
336 }
337 
338 
339 sos_ret_t
340 sos_umem_vmm_delete_as(struct sos_umem_vmm_as * as)
341 {
342   while(! list_is_empty_named(as->list_vr, prev_in_as, next_in_as))
343     {
344       struct sos_umem_vmm_vr * vr;
345       vr = list_get_head_named(as->list_vr, prev_in_as, next_in_as);
346 
347       /* Remove the vr from the lists */
348       list_pop_head_named(as->list_vr, prev_in_as, next_in_as);
349       list_delete_named(vr->mapped_resource->list_vr, vr,
350                         prev_in_mapped_resource,
351                         next_in_mapped_resource);
352 
353       /* Signal to the underlying VR mapper that the mapping is
354          suppressed */
355       if (vr->ops)
356         {
357           if (vr->ops->unmap)
358             vr->ops->unmap(vr, vr->start, vr->size);
359           if (vr->ops->unref)
360             vr->ops->unref(vr);
361         }
362 
363       sos_kmem_cache_free((sos_vaddr_t)vr);
364     }
365   
366   /* Release MMU configuration */
367   if (as->mm_context)
368     sos_mm_context_unref(as->mm_context);
369 
370   /* Now unallocate main address space construct */
371   sos_kmem_cache_free((sos_vaddr_t)as);
372 
373   return SOS_OK;
374 }
375 
376 
377 struct sos_process *
378 sos_umem_vmm_get_process(struct sos_umem_vmm_as * as)
379 {
380   return as->process;
381 }
382 
383 
384 struct sos_mm_context *
385 sos_umem_vmm_get_mm_context(struct sos_umem_vmm_as * as)
386 {
387   return as->mm_context;
388 }
389 
390 
391 struct sos_umem_vmm_vr *
392 sos_umem_vmm_get_vr_at_address(struct sos_umem_vmm_as * as,
393                                sos_uaddr_t uaddr)
394 {
395   struct sos_umem_vmm_vr * vr;
396   vr = find_enclosing_or_next_vr(as, uaddr);
397   if (! vr)
398     return NULL;
399 
400   /* Ok uaddr <= vr->end, but do we have uaddr > vr->start ? */
401   if (uaddr < vr->start)
402     return NULL;
403 
404   return vr;
405 }
406 
407 
408 struct sos_umem_vmm_as *
409 sos_umem_vmm_get_as_of_vr(struct sos_umem_vmm_vr * vr)
410 {
411   return vr->address_space;
412 }
413 
414 
415 struct sos_umem_vmm_vr_ops *
416 sos_umem_vmm_get_ops_of_vr(struct sos_umem_vmm_vr * vr)
417 {
418   return vr->ops;
419 }
420 
421 
422 sos_ui32_t sos_umem_vmm_get_prot_of_vr(struct sos_umem_vmm_vr * vr)
423 {
424   return vr->access_rights;
425 }
426 
427 
428 sos_ui32_t sos_umem_vmm_get_flags_of_vr(struct sos_umem_vmm_vr * vr)
429 {
430   return vr->flags;
431 }
432 
433 
434 struct sos_umem_vmm_mapped_resource *
435 sos_umem_vmm_get_mapped_resource_of_vr(struct sos_umem_vmm_vr * vr)
436 {
437   return vr->mapped_resource;
438 }
439 
440 
441 sos_uaddr_t sos_umem_vmm_get_start_of_vr(struct sos_umem_vmm_vr * vr)
442 {
443   return vr->start;
444 }
445 
446 
447 sos_size_t sos_umem_vmm_get_size_of_vr(struct sos_umem_vmm_vr * vr)
448 {
449   return vr->size;
450 }
451 
452 
453 sos_luoffset_t sos_umem_vmm_get_offset_in_resource(struct sos_umem_vmm_vr * vr)
454 {
455   return vr->offset_in_resource;
456 }
457 
458 
459 sos_ret_t
460 sos_umem_vmm_set_ops_of_vr(struct sos_umem_vmm_vr * vr,
461                            struct sos_umem_vmm_vr_ops * ops)
462 {
463   /* Don't allow to overwrite any preceding VR ops */
464   SOS_ASSERT_FATAL(NULL == vr->ops);
465 
466   vr->ops = ops;
467   return SOS_OK;
468 }
469 
470 
471 /**
472  * When resize asks to map the resource elsewhere, make sure not to
473  * overwrite the offset_in_resource field
474  */
475 #define INTERNAL_MAP_CALLED_FROM_MREMAP (1 << 8)
476 
477 sos_ret_t
478 sos_umem_vmm_map(struct sos_umem_vmm_as * as,
479                  sos_uaddr_t * /*in/out*/uaddr, sos_size_t size,
480                  sos_ui32_t access_rights,
481                  sos_ui32_t flags,
482                  struct sos_umem_vmm_mapped_resource * resource,
483                  sos_luoffset_t offset_in_resource)
484 {
485   __label__ return_mmap;
486   sos_uaddr_t hint_uaddr;
487   struct sos_umem_vmm_vr *prev_vr, *next_vr, *vr, *preallocated_vr;
488   sos_bool_t merge_with_preceding, merge_with_next, used_preallocated_vr;
489   sos_bool_t internal_map_called_from_mremap
490     = (flags & INTERNAL_MAP_CALLED_FROM_MREMAP);
491 
492   sos_ret_t retval     = SOS_OK;
493   used_preallocated_vr = FALSE;
494   hint_uaddr           = *uaddr;
495 
496   /* Default mapping address is NULL */
497   *uaddr = (sos_vaddr_t)NULL;
498 
499   if (! resource)
500     return -SOS_EINVAL;
501   if (! resource->mmap)
502     return -SOS_EPERM;
503 
504   if (! SOS_IS_PAGE_ALIGNED(hint_uaddr))
505     return -SOS_EINVAL;
506 
507   if (size <= 0)
508     return -SOS_EINVAL;
509   size = SOS_PAGE_ALIGN_SUP(size);
510 
511   if (flags & SOS_VR_MAP_SHARED)
512     {
513       /* Make sure the mapped resource allows the required protection flags */
514       if ( ( (access_rights & SOS_VM_MAP_PROT_READ)
515              && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_READ) )
516            || ( (access_rights & SOS_VM_MAP_PROT_WRITE)
517                 && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_WRITE) )
518            || ( (access_rights & SOS_VM_MAP_PROT_EXEC)
519                 && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_EXEC)) )
520         return -SOS_EPERM;
521     }
522 
523   /* Sanity checks over the offset_in_resource parameter */
524   if ( !internal_map_called_from_mremap
525        && ( resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS ) )
526     /* Initial offset ignored for anonymous mappings */
527     {
528       /* Nothing to check */
529     }
530 
531   /* Make sure that the offset in resource won't overflow */
532   else if (offset_in_resource + size <= offset_in_resource)
533     return -SOS_EINVAL;
534 
535   /* Filter out unsupported flags */
536   access_rights &= (SOS_VM_MAP_PROT_READ
537                     | SOS_VM_MAP_PROT_WRITE
538                     | SOS_VM_MAP_PROT_EXEC);
539   flags &= (SOS_VR_MAP_SHARED
540             | SOS_VR_MAP_FIXED);
541 
542   /* Pre-allocate a new VR. Because once we found a valid slot inside
543      the VR list, we don't want the list to be altered by another
544      process */
545   preallocated_vr
546     = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
547   if (! preallocated_vr)
548     return -SOS_ENOMEM;
549 
550   /* Compute the user address of the new mapping */
551   if (flags & SOS_VR_MAP_FIXED)
552     {
553       /*
554        * The address is imposed
555        */
556 
557       /* Make sure the hint_uaddr hint is valid */
558       if (hint_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
559         { retval = -SOS_EINVAL; goto return_mmap; }
560       if (hint_uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
561         { retval = -SOS_EINVAL; goto return_mmap; }
562 
563       /* Unmap any overlapped VR */
564       retval = sos_umem_vmm_unmap(as, hint_uaddr, size);
565       if (SOS_OK != retval)
566         { goto return_mmap; }
567     }
568   else
569     {
570       /*
571        * A free range has to be determined
572        */
573 
574       /* Find a suitable free VR */
575       hint_uaddr = find_first_free_interval(as, hint_uaddr, size);
576       if (! hint_uaddr)
577         { retval = -SOS_ENOMEM; goto return_mmap; }
578     }
579 
580   /* For anonymous resource mappings, set the initial
581      offset_in_resource to the initial virtual start address in user
582      space */
583   if ( !internal_map_called_from_mremap
584        && (resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS ) )
585     offset_in_resource = hint_uaddr;
586 
587   /* Lookup next and previous VR, if any. This will allow us to merge
588      the regions, when possible */
589   next_vr = find_enclosing_or_next_vr(as, hint_uaddr);
590   if (next_vr)
591     {
592       /* Find previous VR, if any */
593       prev_vr = next_vr->prev_in_as;
594       /* The list is curcular: it may happen that we looped over the
595          tail of the list (ie the list is a singleton) */
596       if (prev_vr->start > hint_uaddr)
597         prev_vr = NULL; /* No preceding VR */
598     }
599   else
600     {
601       /* Otherwise we went beyond the last VR */
602       prev_vr = list_get_tail_named(as->list_vr, prev_in_as, next_in_as);
603     }
604 
605   /* Merge with preceding VR ? */
606   merge_with_preceding
607     = ( (NULL != prev_vr)
608         && (prev_vr->mapped_resource == resource)
609         && (prev_vr->offset_in_resource + prev_vr->size == offset_in_resource)
610         && (prev_vr->start + prev_vr->size == hint_uaddr)
611         && (prev_vr->flags == flags)
612         && (prev_vr->access_rights == access_rights) );
613 
614   /* Merge with next VR ? */
615   merge_with_next
616     = ( (NULL != next_vr)
617         && (next_vr->mapped_resource == resource)
618         && (offset_in_resource + size == next_vr->offset_in_resource)
619         && (hint_uaddr + size == next_vr->start)
620         && (next_vr->flags == flags)
621         && (next_vr->access_rights == access_rights) );
622 
623   if (merge_with_preceding && merge_with_next)
624     {
625       /* Widen the prev_vr VR to encompass both the new VR and the next_vr */
626       vr = prev_vr;
627       vr->size += size + next_vr->size;
628       
629       /* Remove the next_vr VR */
630       list_delete_named(as->list_vr, next_vr, prev_in_as, next_in_as);
631       list_delete_named(next_vr->mapped_resource->list_vr, next_vr,
632                         prev_in_mapped_resource, next_in_mapped_resource);
633 
634       if (next_vr->ops && next_vr->ops->unref)
635         next_vr->ops->unref(next_vr);
636 
637       sos_kmem_vmm_free((sos_vaddr_t) next_vr);
638     }
639   else if (merge_with_preceding)
640     {
641       /* Widen the prev_vr VR to encompass the new VR */
642       vr = prev_vr;
643       vr->size += size;
644     }
645   else if (merge_with_next)
646     {
647       /* Widen the next_vr VR to encompass the new VR */
648       vr = next_vr;
649       vr->start -= size;
650       vr->size  += size;
651     }
652   else
653     {
654       /* Allocate a brand new VR and insert it into the list */
655 
656       vr = preallocated_vr;
657       used_preallocated_vr = TRUE;
658 
659       vr->start              = hint_uaddr;
660       vr->size               = size;
661       vr->access_rights      = access_rights;
662       vr->flags              = flags;
663       vr->mapped_resource    = resource;
664       vr->offset_in_resource = offset_in_resource;
665 
666       /* Insert VR in address space */
667       vr->address_space      = as;
668       if (prev_vr)
669         list_insert_after_named(as->list_vr, prev_vr, vr,
670                                 prev_in_as, next_in_as);
671       else
672         list_add_head_named(as->list_vr, vr, prev_in_as, next_in_as);
673       list_add_tail_named(vr->mapped_resource->list_vr, vr,
674                           prev_in_mapped_resource,
675                           next_in_mapped_resource);
676       
677       /* Signal the resource we are mapping it */
678       if (resource && resource->mmap)
679         {
680           retval = resource->mmap(vr);
681           if (SOS_OK != retval)
682             {
683               retval = sos_umem_vmm_unmap(as, vr->start, vr->size);
684               goto return_mmap;
685             }
686 
687           /* The page_in method is MANDATORY for mapped resources */
688           SOS_ASSERT_FATAL(vr->ops && vr->ops->page_in);
689         }
690 
691       if (vr->ops && vr->ops->ref)
692         vr->ops->ref(vr);
693     }
694 
695   /* Ok, fine, we got it right ! Return the address to the caller */
696   *uaddr = hint_uaddr;
697   as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
698                                      size, 0, vr->access_rights);
699   retval = SOS_OK;
700 
701  return_mmap:
702   if (! used_preallocated_vr)
703     sos_kmem_vmm_free((sos_vaddr_t)preallocated_vr);
704     
705   return retval;
706 }
707 
708 
709 sos_ret_t
710 sos_umem_vmm_unmap(struct sos_umem_vmm_as * as,
711                    sos_uaddr_t uaddr, sos_size_t size)
712 {
713   struct sos_umem_vmm_vr *vr, *preallocated_vr;
714   sos_bool_t need_to_setup_mmu;
715   sos_bool_t used_preallocated_vr;
716 
717   if (! SOS_IS_PAGE_ALIGNED(uaddr))
718     return -SOS_EINVAL;
719   if (size <= 0)
720     return -SOS_EINVAL;
721   size = SOS_PAGE_ALIGN_SUP(size);
722 
723   /* Make sure the uaddr is valid */
724   if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
725     return -SOS_EINVAL;
726   if (uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
727     return -SOS_EINVAL;
728 
729   /* In some cases, the unmapping might imply a VR to be split into
730      2. Actually, allocating a new VR can be a blocking operation, but
731      actually we can block now, it won't do no harm. But we must be
732      careful not to block later, while altering the VR lists: that's
733      why we pre-allocate now. */
734   used_preallocated_vr = FALSE;
735   preallocated_vr
736     = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
737   if (! preallocated_vr)
738     return -SOS_ENOMEM;
739 
740   /* Find any VR intersecting with the given interval */
741   vr = find_first_intersecting_vr(as, uaddr, size);
742 
743   /* Unmap (part of) the VR covered by [uaddr .. uaddr+size[ */
744   while (NULL != vr)
745     {
746       /* Went past the end of the *circular* list => back at the
747          beginning ? */
748       if (vr->start + vr->size <= uaddr)
749         /* Yes, stop now */
750         break;
751 
752       /* Went beyond the region to unmap ? */
753       if (uaddr + size <= vr->start)
754         /* Yes, stop now */
755         break;
756 
757       /* VR totally unmapped ? */
758       if ((vr->start >= uaddr)
759           && (vr->start + vr->size <= uaddr + size))
760         {
761           struct sos_umem_vmm_vr *next_vr;
762 
763           /* Yes: signal we remove it completely */
764           if (vr->ops && vr->ops->unmap)
765             vr->ops->unmap(vr, vr->start, vr->size);
766 
767           /* Remove it from the AS list now */
768           next_vr = vr->next_in_as;
769           if (next_vr == vr) /* singleton ? */
770             next_vr = NULL;
771           list_delete_named(as->list_vr, vr, prev_in_as, next_in_as);
772 
773           /* Remove from the list of VRs mapping the resource */
774           list_delete_named(vr->mapped_resource->list_vr, vr,
775                             prev_in_mapped_resource,
776                             next_in_mapped_resource);
777 
778           if (vr->ops && vr->ops->unref)
779             vr->ops->unref(vr);
780           
781           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
782                                              vr->size, vr->access_rights, 0);
783           sos_kmem_vmm_free((sos_vaddr_t)vr);
784       
785           /* Prepare next iteration */
786           vr = next_vr;
787           continue;
788         }
789 
790       /* unmapped region lies completely INSIDE the the VR */
791       else if ( (vr->start < uaddr)
792                 && (vr->start + vr->size > uaddr + size) )
793         {
794           /* VR has to be split into 2 */
795 
796           /* Use the preallocated VR and copy the VR into it */
797           used_preallocated_vr = TRUE;
798           memcpy(preallocated_vr, vr, sizeof(*vr));
799 
800           /* Adjust the start/size of both VRs */
801           preallocated_vr->start = uaddr + size;
802           preallocated_vr->size  = vr->start + vr->size - (uaddr + size);
803           preallocated_vr->offset_in_resource += uaddr + size - vr->start;
804           vr->size                             = uaddr - vr->start;
805 
806           /* Insert the new VR into the list */
807           list_insert_after_named(as->list_vr, vr, preallocated_vr,
808                                   prev_in_as, next_in_as);
809           list_add_tail_named(vr->mapped_resource->list_vr, preallocated_vr,
810                               prev_in_mapped_resource,
811                               next_in_mapped_resource);
812 
813           /* Signal the changes to the underlying resource */
814           if (vr->ops && vr->ops->unmap)
815             vr->ops->unmap(vr, uaddr, size);
816           if (preallocated_vr->ops && preallocated_vr->ops->ref)
817             preallocated_vr->ops->ref(preallocated_vr);
818 
819           /* Account for change in VRs */
820           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
821                                              size, vr->access_rights, 0);
822 
823           /* No need to go further */
824           break;
825         }
826 
827       /* Unmapped region only affects the START address of the VR */
828       else if (uaddr <= vr->start)
829         {
830           sos_size_t translation = uaddr + size - vr->start;
831 
832           /* Shift the VR */
833           vr->size               -= translation;
834           vr->offset_in_resource += translation;
835           vr->start              += translation;
836           
837           /* Signal unmapping */
838           if (vr->ops && vr->ops->unmap)
839             vr->ops->unmap(vr, uaddr + size,
840                            translation);
841           
842           /* Account for change in VRs */
843           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
844                                              translation,
845                                              vr->access_rights, 0);
846 
847           /* No need to go further, we reached the last VR that
848              overlaps the unmapped region */
849           break;
850         }
851 
852       /* Unmapped region only affects the ENDING address of the VR */
853       else if (uaddr + size >= vr->start + vr->size)
854         {
855           sos_size_t unmapped_size = vr->start + vr->size - uaddr;
856 
857           /* Resize VR */
858           vr->size = uaddr - vr->start;
859           
860           /* Signal unmapping */
861           if (vr->ops && vr->ops->unmap)
862             vr->ops->unmap(vr, uaddr, unmapped_size);
863 
864           /* Account for change in VRs */
865           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
866                                              unmapped_size,
867                                              vr->access_rights, 0);
868           
869           vr = vr->next_in_as;
870           continue;
871         }
872 
873       sos_display_fatal_error("BUG uaddr=%x sz=%x vr_start=%x, vr_sz=%x",
874                               uaddr, size, vr->start, vr->size);
875     }
876 
877   need_to_setup_mmu = (sos_thread_get_current()->squatted_mm_context
878                        != as->mm_context);
879   if (need_to_setup_mmu)
880     SOS_ASSERT_FATAL(SOS_OK
881                      == sos_thread_prepare_user_space_access(as,
882                                                              (sos_vaddr_t)
883                                                                NULL));
884   {
885     sos_size_t sz_unmapped = sos_paging_unmap_interval(uaddr, size);
886     SOS_ASSERT_FATAL(sz_unmapped >= 0);
887     as->phys_total -= sz_unmapped;
888   }
889   if (need_to_setup_mmu)
890     SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
891 
892   if (! used_preallocated_vr)
893     sos_kmem_vmm_free((sos_vaddr_t)preallocated_vr);
894 
895   return SOS_OK;
896 }
897 
898 
899 sos_ret_t
900 sos_umem_vmm_chprot(struct sos_umem_vmm_as * as,
901                     sos_uaddr_t uaddr, sos_size_t size,
902                     sos_ui32_t new_access_rights)
903 {
904   struct sos_umem_vmm_vr *start_vr, *vr,
905     *preallocated_middle_vr, *preallocated_right_vr;
906   sos_bool_t used_preallocated_middle_vr, used_preallocated_right_vr;
907 
908   if (! SOS_IS_PAGE_ALIGNED(uaddr))
909     return -SOS_EINVAL;
910   if (size <= 0)
911     return -SOS_EINVAL;
912   size = SOS_PAGE_ALIGN_SUP(size);
913 
914   /* Make sure the uaddr is valid */
915   if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
916     return -SOS_EINVAL;
917   if (uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
918     return -SOS_EINVAL;
919 
920   /* Pre-allocate 2 new VRs (same reason as for unmap). Because chprot
921      may imply at most 2 regions to be split */
922   used_preallocated_middle_vr = FALSE;
923   used_preallocated_right_vr  = FALSE;
924   preallocated_middle_vr
925     = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
926   if (! preallocated_middle_vr)
927     return -SOS_ENOMEM;
928   preallocated_right_vr
929     = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
930   if (! preallocated_right_vr)
931     {
932       sos_kmem_vmm_free((sos_vaddr_t)preallocated_middle_vr);
933       return -SOS_ENOMEM;
934     }
935 
936   /* Find any VR intersecting with the given interval */
937   start_vr = find_first_intersecting_vr(as, uaddr, size);
938   if (NULL == start_vr)
939     return SOS_OK;
940 
941   /* First of all: make sure we are allowed to change the access
942      rights of all the VRs concerned by the chprot */
943   vr = start_vr;
944   while (TRUE)
945     {
946       /* Went past the end of the *circular* list => back at the
947          begining ? */
948       if (vr->start + vr->size <= uaddr)
949         /* Yes, stop now */
950         break;
951 
952       /* Went beyond the region to chprot ? */
953       if (uaddr + size < vr->start)
954         /* Yes, stop now */
955         break;
956 
957       if (vr->flags & SOS_VR_MAP_SHARED)
958         {
959           /* Make sure the mapped resource allows the required
960              protection flags */
961           if ( ( (new_access_rights & SOS_VM_MAP_PROT_READ)
962                  && !(vr->mapped_resource->allowed_access_rights
963                       & SOS_VM_MAP_PROT_READ) )
964                || ( (new_access_rights & SOS_VM_MAP_PROT_WRITE)
965                     && !(vr->mapped_resource->allowed_access_rights
966                          & SOS_VM_MAP_PROT_WRITE) )
967                || ( (new_access_rights & SOS_VM_MAP_PROT_EXEC)
968                     && !(vr->mapped_resource->allowed_access_rights
969                          & SOS_VM_MAP_PROT_EXEC) ) )
970             return -SOS_EPERM;
971         }
972 
973       vr = vr->next_in_as;
974     }
975 
976   /* Change the access rights of the VRs covered by [uaddr
977      .. uaddr+size[ */
978   vr = start_vr;
979   while (TRUE)
980     {
981 
982       /* Went past the end of the *circular* list => back at the
983          begining ? */
984       if (vr->start + vr->size <= uaddr)
985         /* Yes, stop now */
986         break;
987 
988       /* Went beyond the region to chprot ? */
989       if (uaddr + size <= vr->start)
990         /* Yes, stop now */
991         break;
992 
993       /* Access rights unchanged ? */
994       if (vr->access_rights == new_access_rights)
995         /* nop */
996         {
997           vr = vr->next_in_as;
998           continue;
999         }
1000 
1001       /* VR totally chprot ? */
1002       if ((vr->start >= uaddr)
1003           && (vr->start + vr->size <= uaddr + size))
1004         {
1005           /* Account for change in VRs */
1006           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1007                                              vr->size, vr->access_rights,
1008                                              new_access_rights);
1009           vr->access_rights = new_access_rights;
1010 
1011           if (vr->flags & SOS_VR_MAP_SHARED)
1012             /* For shared mappings: effectively change the access
1013                rights of the physical pages  */
1014             sos_paging_set_prot_of_interval(vr->start, vr->size,
1015                                             new_access_rights);
1016           else
1017             /* Private mapping */
1018             {
1019               /* For private mappings, we set the new access_rights
1020                  only if it becomes read-only. For private mappings
1021                  that become writable, we don't do anything: we keep
1022                  the access rights unchanged to preserve the COW
1023                  semantics */
1024               if (! (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1025                 sos_paging_set_prot_of_interval(vr->start, vr->size,
1026                                                 new_access_rights);
1027             }
1028 
1029           vr = vr->next_in_as;
1030           continue;
1031         }
1032 
1033       /* chprot region lies completely INSIDE the VR */
1034       else if ( (vr->start < uaddr)
1035                 && (vr->start + vr->size > uaddr + size) )
1036         {
1037           /* VR has to be split into 3 */
1038 
1039           /* Use the preallocated VRs and copy the VR into them */
1040           SOS_ASSERT_FATAL(! used_preallocated_middle_vr);
1041           SOS_ASSERT_FATAL(! used_preallocated_right_vr);
1042           used_preallocated_middle_vr = TRUE;
1043           memcpy(preallocated_middle_vr, vr, sizeof(*vr));
1044           used_preallocated_right_vr = TRUE;
1045           memcpy(preallocated_right_vr, vr, sizeof(*vr));
1046 
1047           /* Adjust the start/size of the VRs */
1048           preallocated_middle_vr->start = uaddr;
1049           preallocated_middle_vr->size  = size;
1050           preallocated_right_vr->start  = uaddr + size;
1051           preallocated_right_vr->size   = vr->start + vr->size
1052                                             - (uaddr + size);
1053           preallocated_middle_vr->offset_in_resource
1054             += uaddr - vr->start;
1055           preallocated_right_vr->offset_in_resource
1056             += uaddr + size - vr->start;
1057           vr->size = uaddr - vr->start;
1058 
1059           /* Account for change in VRs */
1060           preallocated_middle_vr->access_rights = new_access_rights;
1061           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1062                                              size, vr->access_rights,
1063                                              new_access_rights);
1064 
1065           /* Insert the new VRs into the lists */
1066           list_insert_after_named(as->list_vr, vr, preallocated_middle_vr,
1067                                   prev_in_as, next_in_as);
1068           list_insert_after_named(as->list_vr, preallocated_middle_vr,
1069                                   preallocated_right_vr,
1070                                   prev_in_as, next_in_as);
1071 
1072           list_add_tail_named(vr->mapped_resource->list_vr,
1073                               preallocated_middle_vr,
1074                               prev_in_mapped_resource,
1075                               next_in_mapped_resource);
1076           list_add_tail_named(vr->mapped_resource->list_vr,
1077                               preallocated_right_vr,
1078                               prev_in_mapped_resource,
1079                               next_in_mapped_resource);
1080 
1081           /* Effectively change the access rights of the physical pages */
1082           if (!(preallocated_middle_vr->flags & SOS_VR_MAP_SHARED)
1083               && (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1084             /* For private mappings with write access, prepare for COW */
1085             sos_paging_prepare_COW(preallocated_middle_vr->start,
1086                                    preallocated_middle_vr->size);
1087           else
1088             sos_paging_set_prot_of_interval(preallocated_middle_vr->start,
1089                                             preallocated_middle_vr->size,
1090                                             new_access_rights);
1091 
1092           if (preallocated_right_vr->ops && preallocated_right_vr->ops->ref)
1093             preallocated_right_vr->ops->ref(preallocated_right_vr);
1094           if (preallocated_middle_vr->ops && preallocated_middle_vr->ops->ref)
1095             preallocated_middle_vr->ops->ref(preallocated_middle_vr);
1096 
1097           /* No need to go further */
1098           break;
1099         }
1100 
1101       /* Chprot region only affects the START address of the VR */
1102       else if (uaddr <= vr->start)
1103         {
1104           /* Split the region into 2 */
1105           sos_uoffset_t offset_in_region = uaddr + size - vr->start;
1106 
1107           /* Use the preallocated VRs and copy the VR into them */
1108           SOS_ASSERT_FATAL(! used_preallocated_middle_vr);
1109           used_preallocated_middle_vr = TRUE;
1110           memcpy(preallocated_middle_vr, vr, sizeof(*vr));
1111 
1112           /* Adjust the start/size of the VRs */
1113           preallocated_middle_vr->start += offset_in_region;
1114           preallocated_middle_vr->size  -= offset_in_region;
1115           vr->size                       = offset_in_region;
1116           preallocated_middle_vr->offset_in_resource += offset_in_region;
1117 
1118           /* Account for change in VRs */
1119           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1120                                              vr->size,
1121                                              vr->access_rights,
1122                                              new_access_rights);
1123           vr->access_rights = new_access_rights;
1124 
1125           /* Insert the new VR into the lists */
1126           list_insert_after_named(as->list_vr, vr,
1127                                   preallocated_middle_vr,
1128                                   prev_in_as, next_in_as);
1129           list_add_tail_named(vr->mapped_resource->list_vr,
1130                               preallocated_middle_vr,
1131                               prev_in_mapped_resource,
1132                               next_in_mapped_resource);
1133 
1134           /* Effectively change the access rights of the physical pages */
1135           if (!(vr->flags & SOS_VR_MAP_SHARED)
1136               && (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1137             /* For private mappings with write access, prepare for COW */
1138             sos_paging_prepare_COW(vr->start, vr->size);
1139           else
1140             sos_paging_set_prot_of_interval(vr->start, vr->size,
1141                                             new_access_rights);
1142 
1143           if (preallocated_middle_vr->ops && preallocated_middle_vr->ops->ref)
1144             preallocated_middle_vr->ops->ref(preallocated_middle_vr);
1145 
1146           /* Ne need to go further (we reached the last VR that
1147              overlaps the given interval to chprot) */
1148           break;
1149         }
1150 
1151       /* Chprot region only affects the ENDING address of the VR */
1152       else if (uaddr + size >= vr->start + vr->size)
1153         {
1154           /* Split the region into 2 */
1155           sos_uoffset_t offset_in_region = uaddr - vr->start;
1156 
1157           /* Use the preallocated VRs and copy the VR into them */
1158           SOS_ASSERT_FATAL(! used_preallocated_right_vr);
1159           used_preallocated_right_vr = TRUE;
1160           memcpy(preallocated_right_vr, vr, sizeof(*vr));
1161 
1162           /* Adjust the start/size of the VRs */
1163           preallocated_right_vr->start        += offset_in_region;
1164           preallocated_right_vr->size         -= offset_in_region;
1165           vr->size                             = offset_in_region;
1166           preallocated_right_vr->offset_in_resource += offset_in_region;
1167 
1168           /* Account for change in VRs */
1169           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1170                                              preallocated_right_vr->size,
1171                                              vr->access_rights,
1172                                              new_access_rights);
1173           preallocated_right_vr->access_rights = new_access_rights;
1174 
1175           /* Insert the new VR into the lists */
1176           list_insert_after_named(as->list_vr, vr,
1177                                   preallocated_right_vr,
1178                                   prev_in_as, next_in_as);
1179           list_add_tail_named(vr->mapped_resource->list_vr,
1180                               preallocated_right_vr,
1181                               prev_in_mapped_resource,
1182                               next_in_mapped_resource);
1183 
1184           /* Effectively change the access rights of the physical pages */
1185           if (!(preallocated_right_vr->flags & SOS_VR_MAP_SHARED)
1186               && (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1187             /* For private mappings with write access, prepare for COW */
1188             sos_paging_prepare_COW(preallocated_right_vr->start,
1189                                    preallocated_right_vr->size);
1190           else
1191             sos_paging_set_prot_of_interval(preallocated_right_vr->start,
1192                                             preallocated_right_vr->size,
1193                                             new_access_rights);
1194 
1195           if (preallocated_right_vr->ops && preallocated_right_vr->ops->ref)
1196             preallocated_right_vr->ops->ref(preallocated_right_vr);
1197 
1198           vr = vr->next_in_as;
1199           continue;
1200         }
1201 
1202       sos_display_fatal_error("BUG");
1203     }
1204 
1205   if (! used_preallocated_middle_vr)
1206     sos_kmem_vmm_free((sos_vaddr_t)preallocated_middle_vr);
1207   if (! used_preallocated_right_vr)
1208     sos_kmem_vmm_free((sos_vaddr_t)preallocated_right_vr);
1209 
1210   return SOS_OK;
1211 }
1212 
1213 
1214 sos_ret_t
1215 sos_umem_vmm_resize(struct sos_umem_vmm_as * as,
1216                     sos_uaddr_t old_uaddr, sos_size_t old_size,
1217                     sos_uaddr_t *new_uaddr, sos_size_t new_size,
1218                     sos_ui32_t flags)
1219 {
1220   sos_luoffset_t new_offset_in_resource;
1221   sos_bool_t must_move_vr = FALSE;
1222   struct sos_umem_vmm_vr *vr, *prev_vr, *next_vr;
1223 
1224   /* Make sure the new uaddr is valid */
1225   if (*new_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1226     return -SOS_EINVAL;
1227   if (*new_uaddr > SOS_PAGING_TOP_USER_ADDRESS - new_size)
1228     return -SOS_EINVAL;
1229 
1230   old_uaddr = SOS_PAGE_ALIGN_INF(old_uaddr);
1231   old_size  = SOS_PAGE_ALIGN_SUP(old_size);
1232   if (! SOS_IS_PAGE_ALIGNED(*new_uaddr))
1233     return -SOS_EINVAL;
1234   if (new_size <= 0)
1235     return -SOS_EINVAL;
1236   new_size = SOS_PAGE_ALIGN_SUP(new_size);
1237   
1238   /* Lookup a VR overlapping the address range */
1239   vr = find_first_intersecting_vr(as, old_uaddr, old_size);
1240   if (! vr)
1241     return -SOS_EINVAL;
1242   
1243   /* Make sure there is exactly ONE VR overlapping the area */
1244   if ( (vr->start > old_uaddr)
1245        || (vr->start + vr->size < old_uaddr + old_size) )
1246     return -SOS_EINVAL;
1247 
1248   /* Retrieve the prev/next VR if they exist (the VR are on circular
1249      list) */
1250   prev_vr = vr->prev_in_as;
1251   if (prev_vr->start >= vr->start)
1252     prev_vr = NULL;
1253   next_vr = vr->prev_in_as;
1254   if (next_vr->start <= vr->start)
1255     next_vr = NULL;
1256 
1257   /*
1258    * Compute new offset inside the mapped resource, if any
1259    */
1260 
1261   /* Don't allow to resize if the uaddr goes beyond the 'offset 0' of
1262      the resource */
1263   if ( (*new_uaddr < vr->start)
1264        && (vr->start - *new_uaddr > vr->offset_in_resource) )
1265     return -SOS_EINVAL;
1266   
1267   /* Compute new offset in the resource (overflow-safe) */
1268   if (vr->start > *new_uaddr)
1269     new_offset_in_resource
1270       = vr->offset_in_resource
1271       - (vr->start - *new_uaddr);
1272   else
1273     new_offset_in_resource
1274       = vr->offset_in_resource
1275       + (*new_uaddr - vr->start);
1276 
1277   /* If other VRs would be affected by this resizing, then the VR must
1278      be moved */
1279   if (prev_vr && (prev_vr->start + prev_vr->size > *new_uaddr))
1280     must_move_vr |= TRUE;
1281   if (next_vr && (next_vr->start < *new_uaddr + new_size))
1282     must_move_vr |= TRUE;
1283 
1284   /* If VR would be out-of-user-space, it must be moved */
1285   if (*new_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1286     must_move_vr |= TRUE;
1287   if (*new_uaddr > SOS_PAGING_TOP_USER_ADDRESS - new_size)
1288     must_move_vr |= TRUE;
1289 
1290   /* The VR must be moved but the user forbids it */
1291   if ( must_move_vr && !(flags & SOS_VR_REMAP_MAYMOVE) )
1292     return -SOS_EINVAL;
1293 
1294   /* If the VR must be moved, we simply map the resource elsewhere and
1295      unmap the current VR */
1296   if (must_move_vr)
1297     {
1298       sos_uaddr_t uaddr, result_uaddr;
1299       sos_ret_t retval;
1300 
1301       result_uaddr = *new_uaddr;
1302       retval = sos_umem_vmm_map(as, & result_uaddr, new_size,
1303                                 vr->access_rights,
1304                                 vr->flags | INTERNAL_MAP_CALLED_FROM_MREMAP,
1305                                 vr->mapped_resource,
1306                                 new_offset_in_resource);
1307       if (SOS_OK != retval)
1308         return retval;
1309 
1310       /* Remap the physical pages at their new address */
1311       for (uaddr = vr->start ;
1312            uaddr < vr->start + vr->size ;
1313            uaddr += SOS_PAGE_SIZE)
1314         {
1315           sos_paddr_t paddr;
1316           sos_ui32_t  prot;
1317           sos_uaddr_t vaddr;
1318           
1319           if (uaddr < *new_uaddr)
1320             continue;
1321           if (uaddr > *new_uaddr + new_size)
1322             continue;
1323 
1324           /* Compute destination virtual address (should be
1325              overflow-safe) */
1326           if (vr->start >= *new_uaddr)
1327             vaddr = result_uaddr
1328               + (uaddr - vr->start)
1329               + (vr->start - *new_uaddr);
1330           else
1331             vaddr = result_uaddr
1332               + (uaddr - vr->start)
1333               - (*new_uaddr - vr->start);
1334 
1335           paddr = sos_paging_get_paddr(uaddr);
1336           if (! paddr)
1337             /* No physical page mapped at this address yet */
1338             continue;
1339 
1340           prot  = sos_paging_get_prot(uaddr);
1341           SOS_ASSERT_FATAL(prot);
1342 
1343           /* Remap it at its destination address */
1344           retval = sos_paging_map(paddr, vaddr, TRUE, prot);
1345           if (SOS_OK != retval)
1346             {
1347               sos_umem_vmm_unmap(as, result_uaddr, new_size);
1348               return retval;
1349             }
1350         }
1351 
1352       retval = sos_umem_vmm_unmap(as, vr->start, vr->size);
1353       if (SOS_OK != retval)
1354         {
1355           sos_umem_vmm_unmap(as, result_uaddr, new_size);
1356           return retval;
1357         }
1358 
1359       *new_uaddr = result_uaddr;
1360       return retval;
1361     }
1362 
1363   /* Otherwise we simply resize the VR, taking care of unmapping
1364      what's been unmapped  */
1365 
1366   if (*new_uaddr + new_size < vr->start + vr->size)
1367     sos_umem_vmm_unmap(as, *new_uaddr + new_size,
1368                        vr->start + vr->size - (*new_uaddr + new_size));
1369   else
1370     {
1371       as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1372                                          *new_uaddr + new_size
1373                                            - (vr->start + vr->size),
1374                                          0, vr->access_rights);
1375       vr->size += *new_uaddr + new_size - (vr->start + vr->size);
1376     }
1377   
1378   if (*new_uaddr > vr->start)
1379     sos_umem_vmm_unmap(as, vr->start, *new_uaddr - vr->start);
1380   else
1381     {
1382       as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1383                                          vr->start - *new_uaddr,
1384                                          0, vr->access_rights);
1385       vr->size  += vr->start - *new_uaddr;
1386       vr->start  = *new_uaddr;
1387       vr->offset_in_resource = new_offset_in_resource; 
1388     }
1389 
1390   SOS_ASSERT_FATAL(vr->start == *new_uaddr);
1391   SOS_ASSERT_FATAL(vr->size  == new_size);
1392   SOS_ASSERT_FATAL(vr->offset_in_resource == new_offset_in_resource);
1393 
1394   return SOS_OK;
1395 }
1396 
1397 
1398 sos_ret_t sos_umem_vmm_try_resolve_page_fault(sos_uaddr_t uaddr,
1399                                               sos_bool_t write_access,
1400                                               sos_bool_t user_access)
1401 {
1402   struct sos_process     *process = sos_thread_get_current()->process;
1403   struct sos_umem_vmm_as *as;
1404   struct sos_umem_vmm_vr *vr;
1405 
1406   if (! process)
1407     return -SOS_EFAULT;
1408 
1409   as = sos_process_get_address_space(process);
1410   if (! as)
1411     return -SOS_EFAULT;
1412 
1413   vr = find_first_intersecting_vr(as, uaddr, 1);
1414   if (! vr)
1415     return -SOS_EFAULT;
1416 
1417   /* Write on a read-only VR */
1418   if (write_access && !(vr->access_rights & SOS_VM_MAP_PROT_WRITE))
1419     return -SOS_EFAULT;
1420 
1421   /* Write on a COW VR */
1422   if (write_access && !(vr->flags & SOS_VR_MAP_SHARED))
1423     {
1424       if (SOS_OK == sos_paging_try_resolve_COW(uaddr))
1425         {
1426           as->pgflt_cow ++;
1427           return SOS_OK;
1428         }
1429     }
1430 
1431   /* Ask the underlying resource to resolve the page fault */
1432   if (SOS_OK != vr->ops->page_in(vr, uaddr, write_access))
1433     {
1434       as->pgflt_invalid ++;
1435       return -SOS_EFAULT;
1436     }
1437 
1438   as->phys_total += SOS_PAGE_SIZE;
1439   as->pgflt_page_in ++;
1440 
1441   /* For a private mapping, keep the mapping read-only */
1442   if (!(vr->flags & SOS_VR_MAP_SHARED))
1443     {
1444       sos_paging_prepare_COW(SOS_PAGE_ALIGN_INF(uaddr),
1445                              SOS_PAGE_SIZE);
1446     }
1447 
1448   return SOS_OK;
1449 }
1450 
1451 
1452 sos_ret_t
1453 sos_umem_vmm_init_heap(struct sos_umem_vmm_as * as,
1454                        sos_uaddr_t heap_start)
1455 {
1456   SOS_ASSERT_FATAL(! as->heap_start);
1457 
1458   as->heap_start = heap_start;
1459   as->heap_size  = 0;
1460   return SOS_OK;
1461 }
1462 
1463 
1464 sos_uaddr_t
1465 sos_umem_vmm_brk(struct sos_umem_vmm_as * as,
1466                  sos_uaddr_t new_top_uaddr)
1467 {
1468   sos_uaddr_t new_start;
1469   sos_size_t  new_size;
1470   SOS_ASSERT_FATAL(as->heap_start);
1471 
1472   if (! new_top_uaddr)
1473     return as->heap_start + as->heap_size;
1474 
1475   if (new_top_uaddr == as->heap_start + as->heap_size)
1476     return as->heap_start + as->heap_size;
1477  
1478   if (new_top_uaddr < as->heap_start)
1479     return (sos_uaddr_t)NULL;
1480 
1481   new_top_uaddr = SOS_PAGE_ALIGN_SUP(new_top_uaddr);
1482   new_start = as->heap_start;
1483   new_size  = new_top_uaddr - as->heap_start;
1484 
1485   /* First call to brk: we must map /dev/zero */
1486   if (! as->heap_size)
1487     {
1488       if (SOS_OK != sos_dev_zero_map(as, & as->heap_start,
1489                                      new_size,
1490                                      SOS_VM_MAP_PROT_READ
1491                                      | SOS_VM_MAP_PROT_WRITE,
1492                                      0 /* private non-fixed */))
1493         return (sos_uaddr_t)NULL;
1494 
1495       as->heap_size = new_size;
1496       return as->heap_start + as->heap_size;
1497     }
1498 
1499   /* Otherwise we just have to unmap or resize the region */
1500   if (new_size <= 0)
1501     {
1502       if (SOS_OK != sos_umem_vmm_unmap(as,
1503                                        as->heap_start, as->heap_size))
1504         return (sos_uaddr_t)NULL;
1505     }
1506   else
1507     {
1508       if (SOS_OK != sos_umem_vmm_resize(as,
1509                                         as->heap_start, as->heap_size,
1510                                         & new_start, new_size,
1511                                         0))
1512         return (sos_uaddr_t)NULL;
1513     }
1514 
1515   SOS_ASSERT_FATAL(new_start == as->heap_start);
1516   as->heap_size = new_size;
1517   return new_top_uaddr;
1518 }
1519 
1520 
1521 static struct sos_umem_vmm_vr *
1522 find_enclosing_or_next_vr(struct sos_umem_vmm_as * as,
1523                           sos_uaddr_t uaddr)
1524 {
1525   struct sos_umem_vmm_vr *vr;
1526   int nb_vr;
1527 
1528   if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1529     return NULL;
1530   if (uaddr > SOS_PAGING_TOP_USER_ADDRESS)
1531     return NULL;
1532 
1533   list_foreach_named(as->list_vr, vr, nb_vr, prev_in_as, next_in_as)
1534     {
1535       /* Equivalent to "if (uaddr < vr->start + vr->size)" but more
1536          robust (resilient to integer overflows) */
1537       if (uaddr <= vr->start + (vr->size - 1))
1538         return vr;
1539     }
1540 
1541   return NULL;
1542 }
1543 
1544 
1545 static struct sos_umem_vmm_vr *
1546 find_first_intersecting_vr(struct sos_umem_vmm_as * as,
1547                            sos_uaddr_t start_uaddr, sos_size_t size)
1548 {
1549   struct sos_umem_vmm_vr * vr;
1550   vr = find_enclosing_or_next_vr(as, start_uaddr);
1551   if (! vr)
1552     return NULL;
1553 
1554   if (start_uaddr + size <= vr->start)
1555     return NULL;
1556 
1557   return vr;
1558 }
1559 
1560 
1561 static sos_uaddr_t
1562 find_first_free_interval(struct sos_umem_vmm_as * as,
1563                          sos_uaddr_t hint_uaddr, sos_size_t size)
1564 {
1565   struct sos_umem_vmm_vr * initial_vr, * vr;
1566 
1567   if (hint_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1568     hint_uaddr = SOS_PAGING_BASE_USER_ADDRESS;
1569 
1570   if (hint_uaddr > SOS_PAGING_TOP_USER_ADDRESS - size + 1)
1571     return (sos_uaddr_t)NULL;
1572 
1573   initial_vr = vr = find_enclosing_or_next_vr(as, hint_uaddr);
1574   if (! vr)
1575     /* Great, there is nothing after ! */
1576     return hint_uaddr;
1577 
1578   /* Scan the remaining VRs in the list */
1579   do
1580     {
1581       /* Is there enough space /before/ that VR ? */
1582       if (hint_uaddr + size <= vr->start)
1583         /* Great ! */
1584         return hint_uaddr;
1585 
1586       /* Is there any VR /after/ this one, or do we have to wrap back
1587          at the begining of the user space ? */
1588       if (vr->next_in_as->start >= hint_uaddr)
1589         /* Ok, the next VR is really after us */
1590         hint_uaddr = vr->start + vr->size;
1591       else
1592         {
1593           /* No: wrapping up */
1594 
1595           /* Is there any space before the end of user space ? */
1596           if (hint_uaddr <= SOS_PAGING_TOP_USER_ADDRESS - size)
1597             return hint_uaddr;
1598 
1599           hint_uaddr = SOS_PAGING_BASE_USER_ADDRESS;
1600         }
1601 
1602       /* Prepare to look after this VR */
1603       vr = vr->next_in_as;
1604     }
1605   while (vr != initial_vr);
1606 
1607   /* Reached the end of the list and did not find anything ?... Look
1608      at the space after the last VR */
1609 
1610   return (sos_uaddr_t)NULL;
1611 }
1612 
1613 
1614 static void
1615 as_account_change_of_vr_protection(struct sos_umem_vmm_as * as,
1616                                    sos_bool_t is_shared,
1617                                    sos_size_t size,
1618                                    sos_ui32_t prev_access_rights,
1619                                    sos_ui32_t new_access_rights)
1620 {
1621   if (prev_access_rights == new_access_rights)
1622     return;
1623 
1624 #define _UPDATE_VMSTAT(field,is_increment) \
1625   ({ if (is_increment > 0) \
1626        as->field += size; \
1627      else \
1628        { SOS_ASSERT_FATAL(as->field >= size); as->field -= size; } })
1629 #define UPDATE_VMSTAT(field,is_increment) \
1630   ({ if (is_shared) _UPDATE_VMSTAT(vm_shrd.field, is_increment); \
1631      _UPDATE_VMSTAT(vm_total.field, is_increment); \
1632      SOS_ASSERT_FATAL(as->vm_total.field >= as->vm_shrd.field); })
1633 
1634   if ( (new_access_rights & SOS_VM_MAP_PROT_WRITE)
1635        && !(prev_access_rights & SOS_VM_MAP_PROT_WRITE))
1636     {
1637       UPDATE_VMSTAT(rw, +1);
1638       if (prev_access_rights & SOS_VM_MAP_PROT_READ)
1639         UPDATE_VMSTAT(ro, -1);
1640     }
1641   else if ( !(new_access_rights & SOS_VM_MAP_PROT_WRITE)
1642             && (prev_access_rights & SOS_VM_MAP_PROT_WRITE))
1643     {
1644       if (new_access_rights & SOS_VM_MAP_PROT_READ)
1645         UPDATE_VMSTAT(ro, +1);
1646       UPDATE_VMSTAT(rw, -1);
1647     }
1648   else if (new_access_rights & SOS_VM_MAP_PROT_READ)
1649     UPDATE_VMSTAT(ro, +1);
1650   else if (!(new_access_rights & SOS_VM_MAP_PROT_READ))
1651     UPDATE_VMSTAT(ro, -1);
1652 
1653   if ( (new_access_rights & SOS_VM_MAP_PROT_EXEC)
1654        && !(prev_access_rights & SOS_VM_MAP_PROT_EXEC))
1655     {
1656       UPDATE_VMSTAT(code, +1);
1657     }
1658   else if ( !(new_access_rights & SOS_VM_MAP_PROT_EXEC)
1659             && (prev_access_rights & SOS_VM_MAP_PROT_EXEC))
1660     {
1661       UPDATE_VMSTAT(code, -1);
1662     }
1663 
1664   if (new_access_rights && !prev_access_rights)
1665     UPDATE_VMSTAT(overall, +1);
1666   else if (!new_access_rights && prev_access_rights)
1667     UPDATE_VMSTAT(overall, -1);
1668 
1669 }

source navigation ] diff markup ] identifier search ] general search ]