SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2005,2006 David Decotigny
002 
003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any later version.
007    
008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details.
012    
013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA. 
017 */
018 
019 #include <sos/assert.h>
020 #include <sos/list.h>
021 #include <sos/physmem.h>
022 #include <sos/kmem_slab.h>
023 #include <drivers/bochs.h>
024 #include <hwcore/mm_context.h>
025 #include <hwcore/paging.h>
026 #include <drivers/zero.h>
027 
028 #include "umem_vmm.h"
029 
030 
031 struct sos_umem_vmm_as
032 {
033   /** The process that owns this address space */
034   struct sos_process     * process;
035 
036   /** The MMU configuration of this address space */
037   struct sos_mm_context  * mm_context;
038 
039   /** The list of VRs in this address space */
040   struct sos_umem_vmm_vr * list_vr;
041 
042   /** Heap location */
043   sos_uaddr_t heap_start;
044   sos_size_t  heap_size; /**< Updated by sos_umem_vmm_brk() */
045 
046   /* Memory usage statistics */
047   sos_size_t phys_total; /* shared + private */
048   struct vm_usage
049   {
050     sos_size_t overall;
051     sos_size_t ro, rw, code /* all: non readable, read and read/write */;
052   } vm_total, vm_shrd;
053 
054   /* Page fault counters */
055   sos_size_t pgflt_cow;
056   sos_size_t pgflt_page_in;
057   sos_size_t pgflt_invalid;
058 };
059 
060 
061 struct sos_umem_vmm_vr
062 {
063   /** The address space owning this VR */
064   struct sos_umem_vmm_as *address_space;
065 
066   /** The location of the mapping in user space */
067   sos_uaddr_t start;
068   sos_size_t  size;
069 
070   /** What accesses are allowed (read, write, exec): @see
071       SOS_VM_MAP_PROT_* flags in hwcore/paging.h */
072   sos_ui32_t  access_rights;
073 
074   /** Flags of the VR. Allowed flags:
075    *  - SOS_VR_MAP_SHARED
076    */
077   sos_ui32_t  flags;
078 
079   /**
080    * The callbacks for the VR called along map/unmapping of the
081    * resource
082    */
083   struct sos_umem_vmm_vr_ops *ops;
084 
085   /** Description of the resource being mapped, if any */
086   struct sos_umem_vmm_mapped_resource *mapped_resource;
087   sos_luoffset_t offset_in_resource;
088 
089   /** The VRs of an AS are linked together and are accessible by way
090       of as->list_vr */
091   struct sos_umem_vmm_vr *prev_in_as, *next_in_as;
092 
093   /** The VRs mapping a given resource are linked together and are
094       accessible by way of mapped_resource->list_vr */
095   struct sos_umem_vmm_vr *prev_in_mapped_resource, *next_in_mapped_resource;
096 };
097 
098 
099 /*
100  * We use special slab caches to allocate AS and VR data structures
101  */
102 static struct sos_kslab_cache * cache_of_as;
103 static struct sos_kslab_cache * cache_of_vr;
104 
105 
106 /** Temporary function to debug: list the VRs of the given As */
107 void sos_dump_as(const struct sos_umem_vmm_as * as, const char *str)
108 {
109   struct sos_umem_vmm_vr *vr;
110   int nb_vr;
111 
112   sos_bochs_printf("AS %p - %s:\n", as, str);
113   sos_bochs_printf("   physical mem: %x\n",
114                    as->phys_total);
115   sos_bochs_printf("   VM (all/ro+rw/exec) tot:%x/%x+%x/%x shrd:%x/%x+%x/%x\n",
116                    as->vm_total.overall,
117                    as->vm_total.ro, as->vm_total.rw, as->vm_total.code,
118                    as->vm_shrd.overall,
119                    as->vm_shrd.ro, as->vm_shrd.rw, as->vm_shrd.code);
120   sos_bochs_printf("   pgflt cow=%d pgin=%d inv=%d\n",
121                    as->pgflt_cow, as->pgflt_page_in, as->pgflt_invalid);
122   list_foreach_named(as->list_vr, vr, nb_vr, prev_in_as, next_in_as)
123     {
124       sos_bochs_printf("  VR[%d]=%x: [%x,%x[ (sz=%x) mr=(%x)+%llx %c%c%c fl=%x\n",
125                        nb_vr, (unsigned)vr,
126                        vr->start, vr->start + vr->size, vr->size,
127                        (unsigned)vr->mapped_resource,
128                        vr->offset_in_resource,
129                        (vr->access_rights & SOS_VM_MAP_PROT_READ)?'r':'-',
130                        (vr->access_rights & SOS_VM_MAP_PROT_WRITE)?'w':'-',
131                        (vr->access_rights & SOS_VM_MAP_PROT_EXEC)?'x':'-',
132                        (unsigned)vr->flags);
133     }
134   sos_bochs_printf("FIN (%s)\n", str);
135 }
136 
137 
138 /**
139  * Physical address of THE page (full of 0s) used for anonymous
140  * mappings
141  */
142 sos_paddr_t sos_zero_physpage = 0 /* Initial value prior to allocation */;
143 sos_vaddr_t sos_zero_kernelpage = 0 /* Initial value prior to allocation */;
144 
145 
146 /*
147  * Helper functions defined at the bottom of the file
148  */
149 
150 /**
151  * Helper function to retrieve the first VR to have a vr->end >= uaddr
152  */
153 static struct sos_umem_vmm_vr *
154 find_enclosing_or_next_vr(struct sos_umem_vmm_as * as,
155                           sos_uaddr_t uaddr);
156 
157 
158 /**
159  * Helper function to retrieve the first VR that overlaps the given
160  * interval, if any
161  */
162 static struct sos_umem_vmm_vr *
163 find_first_intersecting_vr(struct sos_umem_vmm_as * as,
164                            sos_uaddr_t start_uaddr, sos_size_t size);
165 
166 
167 /**
168  * Helper function to find first address where there is enough
169  * space. Begin to look for such an interval at or after the given
170  * address
171  *
172  * @param hint_addr The address where to begin the scan, or NULL
173  */
174 static sos_uaddr_t
175 find_first_free_interval(struct sos_umem_vmm_as * as,
176                          sos_uaddr_t hint_uaddr, sos_size_t size);
177 
178 
179 /** Called each time a VR of the AS changes. Don't cope with any
180     underlying physcal mapping/unmapping, COW, etc... */
181 static void
182 as_account_change_of_vr_protection(struct sos_umem_vmm_as * as,
183                                    sos_bool_t is_shared,
184                                    sos_size_t size,
185                                    sos_ui32_t prev_access_rights,
186                                    sos_ui32_t new_access_rights);
187 
188 
189 sos_ret_t sos_umem_vmm_subsystem_setup()
190 {
191   /* Allocate a new kernel physical page mapped into kernel space and
192      reset it with 0s */
193   sos_zero_kernelpage = sos_kmem_vmm_alloc(1, SOS_KMEM_VMM_MAP);
194   if (sos_zero_kernelpage == (sos_vaddr_t)NULL)
195     return -SOS_ENOMEM;
196   memset((void*)sos_zero_kernelpage, 0x0, SOS_PAGE_SIZE);
197   
198   /* Keep a reference to the underlying pphysical page... */
199   sos_zero_physpage = sos_paging_get_paddr(sos_zero_kernelpage);
200   SOS_ASSERT_FATAL(NULL != (void*)sos_zero_physpage);
201   sos_physmem_ref_physpage_at(sos_zero_physpage);
202 
203   /* Allocate the VR/AS caches */
204   cache_of_as
205     = sos_kmem_cache_create("Address space structures",
206                             sizeof(struct sos_umem_vmm_as),
207                             1, 0,
208                             SOS_KSLAB_CREATE_MAP
209                             | SOS_KSLAB_CREATE_ZERO);
210   if (! cache_of_as)
211     {
212       sos_physmem_unref_physpage(sos_zero_physpage);
213       return -SOS_ENOMEM;
214     }
215 
216   cache_of_vr
217     = sos_kmem_cache_create("Virtual Region structures",
218                             sizeof(struct sos_umem_vmm_vr),
219                             1, 0,
220                             SOS_KSLAB_CREATE_MAP
221                             | SOS_KSLAB_CREATE_ZERO);
222   if (! cache_of_vr)
223     {
224       sos_physmem_unref_physpage(sos_zero_physpage);
225       sos_kmem_cache_destroy(cache_of_as);
226       return -SOS_ENOMEM;
227     }
228 
229   return SOS_OK;
230 }
231 
232 
233 struct sos_umem_vmm_as *
234 sos_umem_vmm_create_empty_as(struct sos_process *owner)
235 {
236   struct sos_umem_vmm_as * as
237     = (struct sos_umem_vmm_as *) sos_kmem_cache_alloc(cache_of_as, 0);
238   if (! as)
239     return NULL;
240 
241   as->mm_context = sos_mm_context_create();
242   if (NULL == as->mm_context)
243     {
244       /* Error */
245       sos_kmem_cache_free((sos_vaddr_t)as);
246       return NULL;
247     }
248 
249   as->process = owner;
250   return as;
251 }
252 
253 
254 struct sos_umem_vmm_as *
255 sos_umem_vmm_duplicate_current_thread_as(struct sos_process *owner)
256 {
257   __label__ undo_creation;
258   struct sos_umem_vmm_as * my_as;
259   struct sos_umem_vmm_vr * model_vr;
260   int nb_vr;
261 
262   struct sos_umem_vmm_as * new_as
263     = (struct sos_umem_vmm_as *) sos_kmem_cache_alloc(cache_of_as, 0);
264   if (! new_as)
265     return NULL;
266 
267   my_as = sos_process_get_address_space(sos_thread_get_current()->process);
268   new_as->process = owner;
269   list_init_named(new_as->list_vr, prev_in_as, next_in_as);
270 
271   /*
272    * Switch to the current threads' mm_context, as duplicating it implies
273    * being able to configure some of its mappings as read-only (for
274    * COW)
275    */
276   SOS_ASSERT_FATAL(SOS_OK
277                    == sos_thread_prepare_user_space_access(my_as,
278                                                            (sos_vaddr_t)
279                                                              NULL));
280 
281   /* Copy the virtual regions */
282   list_foreach_named(my_as->list_vr, model_vr, nb_vr, prev_in_as, next_in_as)
283     {
284       struct sos_umem_vmm_vr * vr;
285 
286       /* Prepare COW on the read/write private mappings */
287       if ( !(model_vr->flags & SOS_VR_MAP_SHARED)
288            && (model_vr->access_rights & SOS_VM_MAP_PROT_WRITE) )
289         {
290           /* Mark the underlying physical pages (if any) as
291              read-only */
292           SOS_ASSERT_FATAL(SOS_OK
293                            == sos_paging_prepare_COW(model_vr->start,
294                                                      model_vr->size));
295         }
296 
297       /* Allocate a new virtual region and copy the 'model' into it */
298       vr = (struct sos_umem_vmm_vr *) sos_kmem_cache_alloc(cache_of_vr, 0);
299       if (! vr)
300         goto undo_creation;
301       memcpy(vr, model_vr, sizeof(*vr));
302       vr->address_space = new_as;
303 
304       /* Signal the "new" mapping to the underlying VR mapper */
305       if (vr->ops && vr->ops->ref)
306         vr->ops->ref(vr);
307 
308       /* Insert the new VR into the new AS */
309       list_add_tail_named(new_as->list_vr, vr, prev_in_as, next_in_as);
310 
311       /* Insert the new VR into the list of mappings of the resource */
312       list_add_tail_named(model_vr->mapped_resource->list_vr, vr,
313                           prev_in_mapped_resource,
314                           next_in_mapped_resource);
315     }
316 
317   /* Now copy the current MMU configuration */
318   new_as->mm_context = sos_mm_context_duplicate(my_as->mm_context);
319   if (NULL == new_as->mm_context)
320     goto undo_creation;
321 
322   /* Correct behavior */
323   new_as->heap_start = my_as->heap_start;
324   new_as->heap_size  = my_as->heap_size;
325   new_as->phys_total = my_as->phys_total;
326   memcpy(& new_as->vm_total, & my_as->vm_total, sizeof(struct vm_usage));
327   memcpy(& new_as->vm_shrd, & my_as->vm_shrd, sizeof(struct vm_usage));
328   SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
329   return new_as;
330 
331   /* Handle erroneous behavior */
332  undo_creation:
333   SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
334   sos_umem_vmm_delete_as(new_as);
335   return NULL;
336 }
337 
338 
339 sos_ret_t
340 sos_umem_vmm_delete_as(struct sos_umem_vmm_as * as)
341 {
342   while(! list_is_empty_named(as->list_vr, prev_in_as, next_in_as))
343     {
344       struct sos_umem_vmm_vr * vr;
345       vr = list_get_head_named(as->list_vr, prev_in_as, next_in_as);
346 
347       /* Remove the vr from the lists */
348       list_pop_head_named(as->list_vr, prev_in_as, next_in_as);
349       list_delete_named(vr->mapped_resource->list_vr, vr,
350                         prev_in_mapped_resource,
351                         next_in_mapped_resource);
352 
353       /* Signal to the underlying VR mapper that the mapping is
354          suppressed */
355       if (vr->ops)
356         {
357           if (vr->ops->unmap)
358             vr->ops->unmap(vr, vr->start, vr->size);
359           if (vr->ops->unref)
360             vr->ops->unref(vr);
361         }
362 
363       sos_kmem_cache_free((sos_vaddr_t)vr);
364     }
365   
366   /* Release MMU configuration */
367   if (as->mm_context)
368     sos_mm_context_unref(as->mm_context);
369 
370   /* Now unallocate main address space construct */
371   sos_kmem_cache_free((sos_vaddr_t)as);
372 
373   return SOS_OK;
374 }
375 
376 
377 struct sos_process *
378 sos_umem_vmm_get_process(struct sos_umem_vmm_as * as)
379 {
380   return as->process;
381 }
382 
383 
384 struct sos_mm_context *
385 sos_umem_vmm_get_mm_context(struct sos_umem_vmm_as * as)
386 {
387   return as->mm_context;
388 }
389 
390 
391 struct sos_umem_vmm_vr *
392 sos_umem_vmm_get_vr_at_address(struct sos_umem_vmm_as * as,
393                                sos_uaddr_t uaddr)
394 {
395   struct sos_umem_vmm_vr * vr;
396   vr = find_enclosing_or_next_vr(as, uaddr);
397   if (! vr)
398     return NULL;
399 
400   /* Ok uaddr <= vr->end, but do we have uaddr > vr->start ? */
401   if (uaddr < vr->start)
402     return NULL;
403 
404   return vr;
405 }
406 
407 
408 struct sos_umem_vmm_as *
409 sos_umem_vmm_get_as_of_vr(struct sos_umem_vmm_vr * vr)
410 {
411   return vr->address_space;
412 }
413 
414 
415 struct sos_umem_vmm_vr_ops *
416 sos_umem_vmm_get_ops_of_vr(struct sos_umem_vmm_vr * vr)
417 {
418   return vr->ops;
419 }
420 
421 
422 sos_ui32_t sos_umem_vmm_get_prot_of_vr(struct sos_umem_vmm_vr * vr)
423 {
424   return vr->access_rights;
425 }
426 
427 
428 sos_ui32_t sos_umem_vmm_get_flags_of_vr(struct sos_umem_vmm_vr * vr)
429 {
430   return vr->flags;
431 }
432 
433 
434 struct sos_umem_vmm_mapped_resource *
435 sos_umem_vmm_get_mapped_resource_of_vr(struct sos_umem_vmm_vr * vr)
436 {
437   return vr->mapped_resource;
438 }
439 
440 
441 sos_uaddr_t sos_umem_vmm_get_start_of_vr(struct sos_umem_vmm_vr * vr)
442 {
443   return vr->start;
444 }
445 
446 
447 sos_size_t sos_umem_vmm_get_size_of_vr(struct sos_umem_vmm_vr * vr)
448 {
449   return vr->size;
450 }
451 
452 
453 sos_luoffset_t sos_umem_vmm_get_offset_in_resource(struct sos_umem_vmm_vr * vr)
454 {
455   return vr->offset_in_resource;
456 }
457 
458 
459 sos_ret_t
460 sos_umem_vmm_set_ops_of_vr(struct sos_umem_vmm_vr * vr,
461                            struct sos_umem_vmm_vr_ops * ops)
462 {
463   /* Don't allow to overwrite any preceding VR ops */
464   SOS_ASSERT_FATAL(NULL == vr->ops);
465 
466   vr->ops = ops;
467   return SOS_OK;
468 }
469 
470 
471 /**
472  * When resize asks to map the resource elsewhere, make sure not to
473  * overwrite the offset_in_resource field
474  */
475 #define INTERNAL_MAP_CALLED_FROM_MREMAP (1 << 8)
476 
477 sos_ret_t
478 sos_umem_vmm_map(struct sos_umem_vmm_as * as,
479                  sos_uaddr_t * /*in/out*/uaddr, sos_size_t size,
480                  sos_ui32_t access_rights,
481                  sos_ui32_t flags,
482                  struct sos_umem_vmm_mapped_resource * resource,
483                  sos_luoffset_t offset_in_resource)
484 {
485   __label__ return_mmap;
486   sos_uaddr_t hint_uaddr;
487   struct sos_umem_vmm_vr *prev_vr, *next_vr, *vr, *preallocated_vr;
488   sos_bool_t merge_with_preceding, merge_with_next, used_preallocated_vr;
489   sos_bool_t internal_map_called_from_mremap
490     = (flags & INTERNAL_MAP_CALLED_FROM_MREMAP);
491 
492   sos_ret_t retval     = SOS_OK;
493   used_preallocated_vr = FALSE;
494   hint_uaddr           = *uaddr;
495 
496   /* Default mapping address is NULL */
497   *uaddr = (sos_vaddr_t)NULL;
498 
499   if (! resource)
500     return -SOS_EINVAL;
501   if (! resource->mmap)
502     return -SOS_EPERM;
503 
504   if (! SOS_IS_PAGE_ALIGNED(hint_uaddr))
505     return -SOS_EINVAL;
506 
507   if (size <= 0)
508     return -SOS_EINVAL;
509   size = SOS_PAGE_ALIGN_SUP(size);
510 
511   if (flags & SOS_VR_MAP_SHARED)
512     {
513       /* Make sure the mapped resource allows the required protection flags */
514       if ( ( (access_rights & SOS_VM_MAP_PROT_READ)
515              && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_READ) )
516            || ( (access_rights & SOS_VM_MAP_PROT_WRITE)
517                 && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_WRITE) )
518            || ( (access_rights & SOS_VM_MAP_PROT_EXEC)
519                 && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_EXEC)) )
520         return -SOS_EPERM;
521     }
522 
523   /* Sanity checks over the offset_in_resource parameter */
524   if ( !internal_map_called_from_mremap
525        && ( resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS ) )
526     /* Initial offset ignored for anonymous mappings */
527     {
528       /* Nothing to check */
529     }
530 
531   /* Make sure that the offset in resource won't overflow */
532   else if (offset_in_resource + size <= offset_in_resource)
533     return -SOS_EINVAL;
534 
535   /* Filter out unsupported flags */
536   access_rights &= (SOS_VM_MAP_PROT_READ
537                     | SOS_VM_MAP_PROT_WRITE
538                     | SOS_VM_MAP_PROT_EXEC);
539   flags &= (SOS_VR_MAP_SHARED
540             | SOS_VR_MAP_FIXED);
541 
542   /* Pre-allocate a new VR. Because once we found a valid slot inside
543      the VR list, we don't want the list to be altered by another
544      process */
545   preallocated_vr
546     = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
547   if (! preallocated_vr)
548     return -SOS_ENOMEM;
549 
550   /* Compute the user address of the new mapping */
551   if (flags & SOS_VR_MAP_FIXED)
552     {
553       /*
554        * The address is imposed
555        */
556 
557       /* Make sure the hint_uaddr hint is valid */
558       if (hint_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
559         { retval = -SOS_EINVAL; goto return_mmap; }
560       if (hint_uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
561         { retval = -SOS_EINVAL; goto return_mmap; }
562 
563       /* Unmap any overlapped VR */
564       retval = sos_umem_vmm_unmap(as, hint_uaddr, size);
565       if (SOS_OK != retval)
566         { goto return_mmap; }
567     }
568   else
569     {
570       /*
571        * A free range has to be determined
572        */
573 
574       /* Find a suitable free VR */
575       hint_uaddr = find_first_free_interval(as, hint_uaddr, size);
576       if (! hint_uaddr)
577         { retval = -SOS_ENOMEM; goto return_mmap; }
578     }
579 
580   /* For anonymous resource mappings, set the initial
581      offset_in_resource to the initial virtual start address in user
582      space */
583   if ( !internal_map_called_from_mremap
584        && (resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS ) )
585     offset_in_resource = hint_uaddr;
586 
587   /* Lookup next and previous VR, if any. This will allow us to merge
588      the regions, when possible */
589   next_vr = find_enclosing_or_next_vr(as, hint_uaddr);
590   if (next_vr)
591     {
592       /* Find previous VR, if any */
593       prev_vr = next_vr->prev_in_as;
594       /* The list is curcular: it may happen that we looped over the
595          tail of the list (ie the list is a singleton) */
596       if (prev_vr->start > hint_uaddr)
597         prev_vr = NULL; /* No preceding VR */
598     }
599   else
600     {
601       /* Otherwise we went beyond the last VR */
602       prev_vr = list_get_tail_named(as->list_vr, prev_in_as, next_in_as);
603     }
604 
605   /* Merge with preceding VR ? */
606   merge_with_preceding
607     = ( (NULL != prev_vr)
608         && (prev_vr->mapped_resource == resource)
609         && (prev_vr->offset_in_resource + prev_vr->size == offset_in_resource)
610         && (prev_vr->start + prev_vr->size == hint_uaddr)
611         && (prev_vr->flags == flags)
612         && (prev_vr->access_rights == access_rights) );
613 
614   /* Merge with next VR ? */
615   merge_with_next
616     = ( (NULL != next_vr)
617         && (next_vr->mapped_resource == resource)
618         && (offset_in_resource + size == next_vr->offset_in_resource)
619         && (hint_uaddr + size == next_vr->start)
620         && (next_vr->flags == flags)
621         && (next_vr->access_rights == access_rights) );
622 
623   if (merge_with_preceding && merge_with_next)
624     {
625       /* Widen the prev_vr VR to encompass both the new VR and the next_vr */
626       vr = prev_vr;
627       vr->size += size + next_vr->size;
628       
629       /* Remove the next_vr VR */
630       list_delete_named(as->list_vr, next_vr, prev_in_as, next_in_as);
631       list_delete_named(next_vr->mapped_resource->list_vr, next_vr,
632                         prev_in_mapped_resource, next_in_mapped_resource);
633 
634       if (next_vr->ops && next_vr->ops->unref)
635         next_vr->ops->unref(next_vr);
636 
637       sos_kmem_vmm_free((sos_vaddr_t) next_vr);
638     }
639   else if (merge_with_preceding)
640     {
641       /* Widen the prev_vr VR to encompass the new VR */
642       vr = prev_vr;
643       vr->size += size;
644     }
645   else if (merge_with_next)
646     {
647       /* Widen the next_vr VR to encompass the new VR */
648       vr = next_vr;
649       vr->start -= size;
650       vr->size  += size;
651     }
652   else
653     {
654       /* Allocate a brand new VR and insert it into the list */
655 
656       vr = preallocated_vr;
657       used_preallocated_vr = TRUE;
658 
659       vr->start              = hint_uaddr;
660       vr->size               = size;
661       vr->access_rights      = access_rights;
662       vr->flags              = flags;
663       vr->mapped_resource    = resource;
664       vr->offset_in_resource = offset_in_resource;
665 
666       /* Insert VR in address space */
667       vr->address_space      = as;
668       if (prev_vr)
669         list_insert_after_named(as->list_vr, prev_vr, vr,
670                                 prev_in_as, next_in_as);
671       else
672         list_add_head_named(as->list_vr, vr, prev_in_as, next_in_as);
673 
674       list_add_tail_named(vr->mapped_resource->list_vr, vr,
675                           prev_in_mapped_resource,
676                           next_in_mapped_resource);
677       
678       /* Signal the resource we are mapping it */
679       if (resource && resource->mmap)
680         {
681           retval = resource->mmap(vr);
682           if (SOS_OK != retval)
683             {
684               retval = sos_umem_vmm_unmap(as, vr->start, vr->size);
685               goto return_mmap;
686             }
687 
688           /* The page_in method is MANDATORY for mapped resources */
689           SOS_ASSERT_FATAL(vr->ops && vr->ops->page_in);
690         }
691 
692       if (vr->ops && vr->ops->ref)
693         vr->ops->ref(vr);
694     }
695 
696   /* Ok, fine, we got it right ! Return the address to the caller */
697   *uaddr = hint_uaddr;
698   as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
699                                      size, 0, vr->access_rights);
700   retval = SOS_OK;
701 
702  return_mmap:
703   if (! used_preallocated_vr)
704     sos_kmem_vmm_free((sos_vaddr_t)preallocated_vr);
705     
706   return retval;
707 }
708 
709 
710 sos_ret_t
711 sos_umem_vmm_unmap(struct sos_umem_vmm_as * as,
712                    sos_uaddr_t uaddr, sos_size_t size)
713 {
714   struct sos_umem_vmm_vr *vr, *preallocated_vr;
715   sos_bool_t need_to_setup_mmu;
716   sos_bool_t used_preallocated_vr;
717 
718   if (! SOS_IS_PAGE_ALIGNED(uaddr))
719     return -SOS_EINVAL;
720   if (size <= 0)
721     return -SOS_EINVAL;
722   size = SOS_PAGE_ALIGN_SUP(size);
723 
724   /* Make sure the uaddr is valid */
725   if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
726     return -SOS_EINVAL;
727   if (uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
728     return -SOS_EINVAL;
729 
730   /* In some cases, the unmapping might imply a VR to be split into
731      2. Actually, allocating a new VR can be a blocking operation, but
732      actually we can block now, it won't do no harm. But we must be
733      careful not to block later, while altering the VR lists: that's
734      why we pre-allocate now. */
735   used_preallocated_vr = FALSE;
736   preallocated_vr
737     = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
738   if (! preallocated_vr)
739     return -SOS_ENOMEM;
740 
741   /* Find any VR intersecting with the given interval */
742   vr = find_first_intersecting_vr(as, uaddr, size);
743 
744   /* Unmap (part of) the VR covered by [uaddr .. uaddr+size[ */
745   while (NULL != vr)
746     {
747       /* Went past the end of the *circular* list => back at the
748          beginning ? */
749       if (vr->start + vr->size <= uaddr)
750         /* Yes, stop now */
751         break;
752 
753       /* Went beyond the region to unmap ? */
754       if (uaddr + size <= vr->start)
755         /* Yes, stop now */
756         break;
757 
758       /* VR totally unmapped ? */
759       if ((vr->start >= uaddr)
760           && (vr->start + vr->size <= uaddr + size))
761         {
762           struct sos_umem_vmm_vr *next_vr;
763 
764           /* Yes: signal we remove it completely */
765           if (vr->ops && vr->ops->unmap)
766             vr->ops->unmap(vr, vr->start, vr->size);
767 
768           /* Remove it from the AS list now */
769           next_vr = vr->next_in_as;
770           if (next_vr == vr) /* singleton ? */
771             next_vr = NULL;
772           list_delete_named(as->list_vr, vr, prev_in_as, next_in_as);
773 
774           /* Remove from the list of VRs mapping the resource */
775           list_delete_named(vr->mapped_resource->list_vr, vr,
776                             prev_in_mapped_resource,
777                             next_in_mapped_resource);
778 
779           if (vr->ops && vr->ops->unref)
780             vr->ops->unref(vr);
781           
782           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
783                                              vr->size, vr->access_rights, 0);
784           sos_kmem_vmm_free((sos_vaddr_t)vr);
785       
786           /* Prepare next iteration */
787           vr = next_vr;
788           continue;
789         }
790 
791       /* unmapped region lies completely INSIDE the the VR */
792       else if ( (vr->start < uaddr)
793                 && (vr->start + vr->size > uaddr + size) )
794         {
795           /* VR has to be split into 2 */
796 
797           /* Use the preallocated VR and copy the VR into it */
798           used_preallocated_vr = TRUE;
799           memcpy(preallocated_vr, vr, sizeof(*vr));
800 
801           /* Adjust the start/size of both VRs */
802           preallocated_vr->start = uaddr + size;
803           preallocated_vr->size  = vr->start + vr->size - (uaddr + size);
804           preallocated_vr->offset_in_resource += uaddr + size - vr->start;
805           vr->size                             = uaddr - vr->start;
806 
807           /* Insert the new VR into the list */
808           list_insert_after_named(as->list_vr, vr, preallocated_vr,
809                                   prev_in_as, next_in_as);
810           list_add_tail_named(vr->mapped_resource->list_vr, preallocated_vr,
811                               prev_in_mapped_resource,
812                               next_in_mapped_resource);
813 
814           /* Signal the changes to the underlying resource */
815           if (vr->ops && vr->ops->unmap)
816             vr->ops->unmap(vr, uaddr, size);
817           if (preallocated_vr->ops && preallocated_vr->ops->ref)
818             preallocated_vr->ops->ref(preallocated_vr);
819 
820           /* Account for change in VRs */
821           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
822                                              size, vr->access_rights, 0);
823 
824           /* No need to go further */
825           break;
826         }
827 
828       /* Unmapped region only affects the START address of the VR */
829       else if (uaddr <= vr->start)
830         {
831           sos_size_t translation = uaddr + size - vr->start;
832 
833           /* Shift the VR */
834           vr->size               -= translation;
835           vr->offset_in_resource += translation;
836           vr->start              += translation;
837           
838           /* Signal unmapping */
839           if (vr->ops && vr->ops->unmap)
840             vr->ops->unmap(vr, uaddr + size,
841                            translation);
842           
843           /* Account for change in VRs */
844           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
845                                              translation,
846                                              vr->access_rights, 0);
847 
848           /* No need to go further, we reached the last VR that
849              overlaps the unmapped region */
850           break;
851         }
852 
853       /* Unmapped region only affects the ENDING address of the VR */
854       else if (uaddr + size >= vr->start + vr->size)
855         {
856           sos_size_t unmapped_size = vr->start + vr->size - uaddr;
857 
858           /* Resize VR */
859           vr->size = uaddr - vr->start;
860           
861           /* Signal unmapping */
862           if (vr->ops && vr->ops->unmap)
863             vr->ops->unmap(vr, uaddr, unmapped_size);
864 
865           /* Account for change in VRs */
866           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
867                                              unmapped_size,
868                                              vr->access_rights, 0);
869           
870           vr = vr->next_in_as;
871           continue;
872         }
873 
874       sos_display_fatal_error("BUG uaddr=%x sz=%x vr_start=%x, vr_sz=%x",
875                               uaddr, size, vr->start, vr->size);
876     }
877 
878   need_to_setup_mmu = (sos_thread_get_current()->squatted_mm_context
879                        != as->mm_context);
880   if (need_to_setup_mmu)
881     SOS_ASSERT_FATAL(SOS_OK
882                      == sos_thread_prepare_user_space_access(as,
883                                                              (sos_vaddr_t)
884                                                                NULL));
885   {
886     sos_ret_t sz_unmapped = sos_paging_unmap_interval(uaddr, size);
887     SOS_ASSERT_FATAL(sz_unmapped >= 0);
888     as->phys_total -= sz_unmapped;
889   }
890   if (need_to_setup_mmu)
891     SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
892 
893   if (! used_preallocated_vr)
894     sos_kmem_vmm_free((sos_vaddr_t)preallocated_vr);
895 
896   return SOS_OK;
897 }
898 
899 
900 sos_ret_t
901 sos_umem_vmm_chprot(struct sos_umem_vmm_as * as,
902                     sos_uaddr_t uaddr, sos_size_t size,
903                     sos_ui32_t new_access_rights)
904 {
905   struct sos_umem_vmm_vr *start_vr, *vr,
906     *preallocated_middle_vr, *preallocated_right_vr;
907   sos_bool_t used_preallocated_middle_vr, used_preallocated_right_vr;
908 
909   if (! SOS_IS_PAGE_ALIGNED(uaddr))
910     return -SOS_EINVAL;
911   if (size <= 0)
912     return -SOS_EINVAL;
913   size = SOS_PAGE_ALIGN_SUP(size);
914 
915   /* Make sure the uaddr is valid */
916   if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
917     return -SOS_EINVAL;
918   if (uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
919     return -SOS_EINVAL;
920 
921   /* Pre-allocate 2 new VRs (same reason as for unmap). Because chprot
922      may imply at most 2 regions to be split */
923   used_preallocated_middle_vr = FALSE;
924   used_preallocated_right_vr  = FALSE;
925   preallocated_middle_vr
926     = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
927   if (! preallocated_middle_vr)
928     return -SOS_ENOMEM;
929   preallocated_right_vr
930     = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
931   if (! preallocated_right_vr)
932     {
933       sos_kmem_vmm_free((sos_vaddr_t)preallocated_middle_vr);
934       return -SOS_ENOMEM;
935     }
936 
937   /* Find any VR intersecting with the given interval */
938   start_vr = find_first_intersecting_vr(as, uaddr, size);
939   if (NULL == start_vr)
940     return SOS_OK;
941 
942   /* First of all: make sure we are allowed to change the access
943      rights of all the VRs concerned by the chprot */
944   vr = start_vr;
945   while (TRUE)
946     {
947       /* Went past the end of the *circular* list => back at the
948          begining ? */
949       if (vr->start + vr->size <= uaddr)
950         /* Yes, stop now */
951         break;
952 
953       /* Went beyond the region to chprot ? */
954       if (uaddr + size < vr->start)
955         /* Yes, stop now */
956         break;
957 
958       if (vr->flags & SOS_VR_MAP_SHARED)
959         {
960           /* Make sure the mapped resource allows the required
961              protection flags */
962           if ( ( (new_access_rights & SOS_VM_MAP_PROT_READ)
963                  && !(vr->mapped_resource->allowed_access_rights
964                       & SOS_VM_MAP_PROT_READ) )
965                || ( (new_access_rights & SOS_VM_MAP_PROT_WRITE)
966                     && !(vr->mapped_resource->allowed_access_rights
967                          & SOS_VM_MAP_PROT_WRITE) )
968                || ( (new_access_rights & SOS_VM_MAP_PROT_EXEC)
969                     && !(vr->mapped_resource->allowed_access_rights
970                          & SOS_VM_MAP_PROT_EXEC) ) )
971             return -SOS_EPERM;
972         }
973 
974       vr = vr->next_in_as;
975     }
976 
977   /* Change the access rights of the VRs covered by [uaddr
978      .. uaddr+size[ */
979   vr = start_vr;
980   while (TRUE)
981     {
982 
983       /* Went past the end of the *circular* list => back at the
984          begining ? */
985       if (vr->start + vr->size <= uaddr)
986         /* Yes, stop now */
987         break;
988 
989       /* Went beyond the region to chprot ? */
990       if (uaddr + size <= vr->start)
991         /* Yes, stop now */
992         break;
993 
994       /* Access rights unchanged ? */
995       if (vr->access_rights == new_access_rights)
996         /* nop */
997         {
998           vr = vr->next_in_as;
999           continue;
1000         }
1001 
1002       /* VR totally chprot ? */
1003       if ((vr->start >= uaddr)
1004           && (vr->start + vr->size <= uaddr + size))
1005         {
1006           /* Account for change in VRs */
1007           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1008                                              vr->size, vr->access_rights,
1009                                              new_access_rights);
1010           vr->access_rights = new_access_rights;
1011 
1012           if (vr->flags & SOS_VR_MAP_SHARED)
1013             /* For shared mappings: effectively change the access
1014                rights of the physical pages  */
1015             sos_paging_set_prot_of_interval(vr->start, vr->size,
1016                                             new_access_rights);
1017           else
1018             /* Private mapping */
1019             {
1020               /* For private mappings, we set the new access_rights
1021                  only if it becomes read-only. For private mappings
1022                  that become writable, we don't do anything: we keep
1023                  the access rights unchanged to preserve the COW
1024                  semantics */
1025               if (! (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1026                 sos_paging_set_prot_of_interval(vr->start, vr->size,
1027                                                 new_access_rights);
1028             }
1029 
1030           vr = vr->next_in_as;
1031           continue;
1032         }
1033 
1034       /* chprot region lies completely INSIDE the VR */
1035       else if ( (vr->start < uaddr)
1036                 && (vr->start + vr->size > uaddr + size) )
1037         {
1038           /* VR has to be split into 3 */
1039 
1040           /* Use the preallocated VRs and copy the VR into them */
1041           SOS_ASSERT_FATAL(! used_preallocated_middle_vr);
1042           SOS_ASSERT_FATAL(! used_preallocated_right_vr);
1043           used_preallocated_middle_vr = TRUE;
1044           memcpy(preallocated_middle_vr, vr, sizeof(*vr));
1045           used_preallocated_right_vr = TRUE;
1046           memcpy(preallocated_right_vr, vr, sizeof(*vr));
1047 
1048           /* Adjust the start/size of the VRs */
1049           preallocated_middle_vr->start = uaddr;
1050           preallocated_middle_vr->size  = size;
1051           preallocated_right_vr->start  = uaddr + size;
1052           preallocated_right_vr->size   = vr->start + vr->size
1053                                             - (uaddr + size);
1054           preallocated_middle_vr->offset_in_resource
1055             += uaddr - vr->start;
1056           preallocated_right_vr->offset_in_resource
1057             += uaddr + size - vr->start;
1058           vr->size = uaddr - vr->start;
1059 
1060           /* Account for change in VRs */
1061           preallocated_middle_vr->access_rights = new_access_rights;
1062           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1063                                              size, vr->access_rights,
1064                                              new_access_rights);
1065 
1066           /* Insert the new VRs into the lists */
1067           list_insert_after_named(as->list_vr, vr, preallocated_middle_vr,
1068                                   prev_in_as, next_in_as);
1069           list_insert_after_named(as->list_vr, preallocated_middle_vr,
1070                                   preallocated_right_vr,
1071                                   prev_in_as, next_in_as);
1072 
1073           list_add_tail_named(vr->mapped_resource->list_vr,
1074                               preallocated_middle_vr,
1075                               prev_in_mapped_resource,
1076                               next_in_mapped_resource);
1077           list_add_tail_named(vr->mapped_resource->list_vr,
1078                               preallocated_right_vr,
1079                               prev_in_mapped_resource,
1080                               next_in_mapped_resource);
1081 
1082           /* Effectively change the access rights of the physical pages */
1083           if (!(preallocated_middle_vr->flags & SOS_VR_MAP_SHARED)
1084               && (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1085             /* For private mappings with write access, prepare for COW */
1086             sos_paging_prepare_COW(preallocated_middle_vr->start,
1087                                    preallocated_middle_vr->size);
1088           else
1089             sos_paging_set_prot_of_interval(preallocated_middle_vr->start,
1090                                             preallocated_middle_vr->size,
1091                                             new_access_rights);
1092 
1093           if (preallocated_right_vr->ops && preallocated_right_vr->ops->ref)
1094             preallocated_right_vr->ops->ref(preallocated_right_vr);
1095           if (preallocated_middle_vr->ops && preallocated_middle_vr->ops->ref)
1096             preallocated_middle_vr->ops->ref(preallocated_middle_vr);
1097 
1098           /* No need to go further */
1099           break;
1100         }
1101 
1102       /* Chprot region only affects the START address of the VR */
1103       else if (uaddr <= vr->start)
1104         {
1105           /* Split the region into 2 */
1106           sos_uoffset_t offset_in_region = uaddr + size - vr->start;
1107 
1108           /* Use the preallocated VRs and copy the VR into them */
1109           SOS_ASSERT_FATAL(! used_preallocated_middle_vr);
1110           used_preallocated_middle_vr = TRUE;
1111           memcpy(preallocated_middle_vr, vr, sizeof(*vr));
1112 
1113           /* Adjust the start/size of the VRs */
1114           preallocated_middle_vr->start += offset_in_region;
1115           preallocated_middle_vr->size  -= offset_in_region;
1116           vr->size                       = offset_in_region;
1117           preallocated_middle_vr->offset_in_resource += offset_in_region;
1118 
1119           /* Account for change in VRs */
1120           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1121                                              vr->size,
1122                                              vr->access_rights,
1123                                              new_access_rights);
1124           vr->access_rights = new_access_rights;
1125 
1126           /* Insert the new VR into the lists */
1127           list_insert_after_named(as->list_vr, vr,
1128                                   preallocated_middle_vr,
1129                                   prev_in_as, next_in_as);
1130           list_add_tail_named(vr->mapped_resource->list_vr,
1131                               preallocated_middle_vr,
1132                               prev_in_mapped_resource,
1133                               next_in_mapped_resource);
1134 
1135           /* Effectively change the access rights of the physical pages */
1136           if (!(vr->flags & SOS_VR_MAP_SHARED)
1137               && (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1138             /* For private mappings with write access, prepare for COW */
1139             sos_paging_prepare_COW(vr->start, vr->size);
1140           else
1141             sos_paging_set_prot_of_interval(vr->start, vr->size,
1142                                             new_access_rights);
1143 
1144           if (preallocated_middle_vr->ops && preallocated_middle_vr->ops->ref)
1145             preallocated_middle_vr->ops->ref(preallocated_middle_vr);
1146 
1147           /* Ne need to go further (we reached the last VR that
1148              overlaps the given interval to chprot) */
1149           break;
1150         }
1151 
1152       /* Chprot region only affects the ENDING address of the VR */
1153       else if (uaddr + size >= vr->start + vr->size)
1154         {
1155           /* Split the region into 2 */
1156           sos_uoffset_t offset_in_region = uaddr - vr->start;
1157 
1158           /* Use the preallocated VRs and copy the VR into them */
1159           SOS_ASSERT_FATAL(! used_preallocated_right_vr);
1160           used_preallocated_right_vr = TRUE;
1161           memcpy(preallocated_right_vr, vr, sizeof(*vr));
1162 
1163           /* Adjust the start/size of the VRs */
1164           preallocated_right_vr->start        += offset_in_region;
1165           preallocated_right_vr->size         -= offset_in_region;
1166           vr->size                             = offset_in_region;
1167           preallocated_right_vr->offset_in_resource += offset_in_region;
1168 
1169           /* Account for change in VRs */
1170           as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1171                                              preallocated_right_vr->size,
1172                                              vr->access_rights,
1173                                              new_access_rights);
1174           preallocated_right_vr->access_rights = new_access_rights;
1175 
1176           /* Insert the new VR into the lists */
1177           list_insert_after_named(as->list_vr, vr,
1178                                   preallocated_right_vr,
1179                                   prev_in_as, next_in_as);
1180           list_add_tail_named(vr->mapped_resource->list_vr,
1181                               preallocated_right_vr,
1182                               prev_in_mapped_resource,
1183                               next_in_mapped_resource);
1184 
1185           /* Effectively change the access rights of the physical pages */
1186           if (!(preallocated_right_vr->flags & SOS_VR_MAP_SHARED)
1187               && (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1188             /* For private mappings with write access, prepare for COW */
1189             sos_paging_prepare_COW(preallocated_right_vr->start,
1190                                    preallocated_right_vr->size);
1191           else
1192             sos_paging_set_prot_of_interval(preallocated_right_vr->start,
1193                                             preallocated_right_vr->size,
1194                                             new_access_rights);
1195 
1196           if (preallocated_right_vr->ops && preallocated_right_vr->ops->ref)
1197             preallocated_right_vr->ops->ref(preallocated_right_vr);
1198 
1199           vr = vr->next_in_as;
1200           continue;
1201         }
1202 
1203       sos_display_fatal_error("BUG");
1204     }
1205 
1206   if (! used_preallocated_middle_vr)
1207     sos_kmem_vmm_free((sos_vaddr_t)preallocated_middle_vr);
1208   if (! used_preallocated_right_vr)
1209     sos_kmem_vmm_free((sos_vaddr_t)preallocated_right_vr);
1210 
1211   return SOS_OK;
1212 }
1213 
1214 
1215 sos_ret_t
1216 sos_umem_vmm_sync(struct sos_umem_vmm_as * as,
1217                   sos_uaddr_t uaddr, sos_size_t size,
1218                   sos_ui32_t flags)
1219 {
1220   if (! SOS_IS_PAGE_ALIGNED(uaddr))
1221     return -SOS_EINVAL;
1222   if (size <= 0)
1223     return -SOS_EINVAL;
1224   size = SOS_PAGE_ALIGN_SUP(size);
1225 
1226   /* Make sure the uaddr is valid */
1227   if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1228     return -SOS_EINVAL;
1229   if (uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
1230     return -SOS_EINVAL;
1231 
1232   /* Go from page to page, and for each dirty page in the region, call
1233      the sync_page method */
1234   while (TRUE)
1235     {
1236       struct sos_umem_vmm_vr *vr;
1237 
1238       if (size <= 0)
1239         break;
1240 
1241       /* Find any VR intersecting with the given interval */
1242       vr = find_first_intersecting_vr(as, uaddr, size);
1243       if (NULL == vr)
1244         break;
1245 
1246       /* For private or anonymous mappings => no backing store */
1247       if ( !(vr->flags & SOS_VR_MAP_SHARED)
1248            || (vr->mapped_resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS)
1249 
1250            /* Likewise for non msync-able regions */
1251            || ! vr->ops->sync_page )
1252         {
1253           if (size <= vr->size)
1254             break;
1255 
1256           uaddr += vr->size;
1257           size  -= vr->size;
1258         }
1259 
1260       /* Find the next dirty page in this VR */
1261       for ( ; (size > 0)
1262               && (uaddr - vr->start < vr->size) ;
1263             uaddr += SOS_PAGE_SIZE,
1264               size -= SOS_PAGE_SIZE)
1265         if (sos_paging_is_dirty(uaddr))
1266           {
1267             /* Synchronize it with its backing store */
1268             vr->ops->sync_page(vr, uaddr, flags);
1269             uaddr += SOS_PAGE_SIZE;
1270             size  -= SOS_PAGE_SIZE;
1271             break;
1272           }
1273     }
1274 
1275   return SOS_OK;
1276 }
1277 
1278 
1279 sos_ret_t
1280 sos_umem_vmm_resize(struct sos_umem_vmm_as * as,
1281                     sos_uaddr_t old_uaddr, sos_size_t old_size,
1282                     sos_uaddr_t *new_uaddr, sos_size_t new_size,
1283                     sos_ui32_t flags)
1284 {
1285   sos_luoffset_t new_offset_in_resource;
1286   sos_bool_t must_move_vr = FALSE;
1287   struct sos_umem_vmm_vr *vr, *prev_vr, *next_vr;
1288 
1289   /* Make sure the new uaddr is valid */
1290   if (*new_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1291     return -SOS_EINVAL;
1292   if (*new_uaddr > SOS_PAGING_TOP_USER_ADDRESS - new_size)
1293     return -SOS_EINVAL;
1294 
1295   old_uaddr = SOS_PAGE_ALIGN_INF(old_uaddr);
1296   old_size  = SOS_PAGE_ALIGN_SUP(old_size);
1297   if (! SOS_IS_PAGE_ALIGNED(*new_uaddr))
1298     return -SOS_EINVAL;
1299   if (new_size <= 0)
1300     return -SOS_EINVAL;
1301   new_size = SOS_PAGE_ALIGN_SUP(new_size);
1302   
1303   /* Lookup a VR overlapping the address range */
1304   vr = find_first_intersecting_vr(as, old_uaddr, old_size);
1305   if (! vr)
1306     return -SOS_EINVAL;
1307   
1308   /* Make sure there is exactly ONE VR overlapping the area */
1309   if ( (vr->start > old_uaddr)
1310        || (vr->start + vr->size < old_uaddr + old_size) )
1311     return -SOS_EINVAL;
1312 
1313   /* Retrieve the prev/next VR if they exist (the VR are on circular
1314      list) */
1315   prev_vr = vr->prev_in_as;
1316   if (prev_vr->start >= vr->start)
1317     prev_vr = NULL;
1318   next_vr = vr->prev_in_as;
1319   if (next_vr->start <= vr->start)
1320     next_vr = NULL;
1321 
1322   /*
1323    * Compute new offset inside the mapped resource, if any
1324    */
1325 
1326   /* Don't allow to resize if the uaddr goes beyond the 'offset 0' of
1327      the resource */
1328   if ( (*new_uaddr < vr->start)
1329        && (vr->start - *new_uaddr > vr->offset_in_resource) )
1330     return -SOS_EINVAL;
1331   
1332   /* Compute new offset in the resource (overflow-safe) */
1333   if (vr->start > *new_uaddr)
1334     new_offset_in_resource
1335       = vr->offset_in_resource
1336       - (vr->start - *new_uaddr);
1337   else
1338     new_offset_in_resource
1339       = vr->offset_in_resource
1340       + (*new_uaddr - vr->start);
1341 
1342   /* If other VRs would be affected by this resizing, then the VR must
1343      be moved */
1344   if (prev_vr && (prev_vr->start + prev_vr->size > *new_uaddr))
1345     must_move_vr |= TRUE;
1346   if (next_vr && (next_vr->start < *new_uaddr + new_size))
1347     must_move_vr |= TRUE;
1348 
1349   /* If VR would be out-of-user-space, it must be moved */
1350   if (*new_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1351     must_move_vr |= TRUE;
1352   if (*new_uaddr > SOS_PAGING_TOP_USER_ADDRESS - new_size)
1353     must_move_vr |= TRUE;
1354 
1355   /* The VR must be moved but the user forbids it */
1356   if ( must_move_vr && !(flags & SOS_VR_REMAP_MAYMOVE) )
1357     return -SOS_EINVAL;
1358 
1359   /* If the VR must be moved, we simply map the resource elsewhere and
1360      unmap the current VR */
1361   if (must_move_vr)
1362     {
1363       sos_uaddr_t uaddr, result_uaddr;
1364       sos_ret_t retval;
1365 
1366       result_uaddr = *new_uaddr;
1367       retval = sos_umem_vmm_map(as, & result_uaddr, new_size,
1368                                 vr->access_rights,
1369                                 vr->flags | INTERNAL_MAP_CALLED_FROM_MREMAP,
1370                                 vr->mapped_resource,
1371                                 new_offset_in_resource);
1372       if (SOS_OK != retval)
1373         return retval;
1374 
1375       /* Remap the physical pages at their new address */
1376       for (uaddr = vr->start ;
1377            uaddr < vr->start + vr->size ;
1378            uaddr += SOS_PAGE_SIZE)
1379         {
1380           sos_paddr_t paddr;
1381           sos_ui32_t  prot;
1382           sos_uaddr_t vaddr;
1383           
1384           if (uaddr < *new_uaddr)
1385             continue;
1386           if (uaddr > *new_uaddr + new_size)
1387             continue;
1388 
1389           /* Compute destination virtual address (should be
1390              overflow-safe) */
1391           if (vr->start >= *new_uaddr)
1392             vaddr = result_uaddr
1393               + (uaddr - vr->start)
1394               + (vr->start - *new_uaddr);
1395           else
1396             vaddr = result_uaddr
1397               + (uaddr - vr->start)
1398               - (*new_uaddr - vr->start);
1399 
1400           paddr = sos_paging_get_paddr(uaddr);
1401           if (! paddr)
1402             /* No physical page mapped at this address yet */
1403             continue;
1404 
1405           prot  = sos_paging_get_prot(uaddr);
1406           SOS_ASSERT_FATAL(prot);
1407 
1408           /* Remap it at its destination address */
1409           retval = sos_paging_map(paddr, vaddr, TRUE, prot);
1410           if (SOS_OK != retval)
1411             {
1412               sos_umem_vmm_unmap(as, result_uaddr, new_size);
1413               return retval;
1414             }
1415         }
1416 
1417       retval = sos_umem_vmm_unmap(as, vr->start, vr->size);
1418       if (SOS_OK != retval)
1419         {
1420           sos_umem_vmm_unmap(as, result_uaddr, new_size);
1421           return retval;
1422         }
1423 
1424       *new_uaddr = result_uaddr;
1425       return retval;
1426     }
1427 
1428   /* Otherwise we simply resize the VR, taking care of unmapping
1429      what's been unmapped  */
1430 
1431   if (*new_uaddr + new_size < vr->start + vr->size)
1432     sos_umem_vmm_unmap(as, *new_uaddr + new_size,
1433                        vr->start + vr->size - (*new_uaddr + new_size));
1434   else
1435     {
1436       as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1437                                          *new_uaddr + new_size
1438                                            - (vr->start + vr->size),
1439                                          0, vr->access_rights);
1440       vr->size += *new_uaddr + new_size - (vr->start + vr->size);
1441     }
1442   
1443   if (*new_uaddr > vr->start)
1444     sos_umem_vmm_unmap(as, vr->start, *new_uaddr - vr->start);
1445   else
1446     {
1447       as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1448                                          vr->start - *new_uaddr,
1449                                          0, vr->access_rights);
1450       vr->size  += vr->start - *new_uaddr;
1451       vr->start  = *new_uaddr;
1452       vr->offset_in_resource = new_offset_in_resource; 
1453     }
1454 
1455   SOS_ASSERT_FATAL(vr->start == *new_uaddr);
1456   SOS_ASSERT_FATAL(vr->size  == new_size);
1457   SOS_ASSERT_FATAL(vr->offset_in_resource == new_offset_in_resource);
1458 
1459   return SOS_OK;
1460 }
1461 
1462 
1463 sos_ret_t sos_umem_vmm_try_resolve_page_fault(sos_uaddr_t uaddr,
1464                                               sos_bool_t write_access,
1465                                               sos_bool_t user_access)
1466 {
1467   struct sos_process     *process = sos_thread_get_current()->process;
1468   struct sos_umem_vmm_as *as;
1469   struct sos_umem_vmm_vr *vr;
1470 
1471   if (! process)
1472     return -SOS_EFAULT;
1473 
1474   as = sos_process_get_address_space(process);
1475   if (! as)
1476     return -SOS_EFAULT;
1477 
1478   vr = find_first_intersecting_vr(as, uaddr, 1);
1479   if (! vr)
1480     return -SOS_EFAULT;
1481 
1482   /* Write on a read-only VR */
1483   if (write_access && !(vr->access_rights & SOS_VM_MAP_PROT_WRITE))
1484     return -SOS_EFAULT;
1485 
1486   /* Write on a COW VR */
1487   if (write_access && !(vr->flags & SOS_VR_MAP_SHARED))
1488     {
1489       if (SOS_OK == sos_paging_try_resolve_COW(uaddr))
1490         {
1491           as->pgflt_cow ++;
1492           return SOS_OK;
1493         }
1494     }
1495 
1496   /* Ask the underlying resource to resolve the page fault */
1497   if (SOS_OK != vr->ops->page_in(vr, uaddr, write_access))
1498     {
1499       as->pgflt_invalid ++;
1500       return -SOS_EFAULT;
1501     }
1502 
1503   as->phys_total += SOS_PAGE_SIZE;
1504   as->pgflt_page_in ++;
1505 
1506   /* For a private mapping, keep the mapping read-only */
1507   if (!(vr->flags & SOS_VR_MAP_SHARED))
1508     {
1509       sos_paging_prepare_COW(SOS_PAGE_ALIGN_INF(uaddr),
1510                              SOS_PAGE_SIZE);
1511     }
1512 
1513   return SOS_OK;
1514 }
1515 
1516 
1517 sos_ret_t
1518 sos_umem_vmm_init_heap(struct sos_umem_vmm_as * as,
1519                        sos_uaddr_t heap_start)
1520 {
1521   SOS_ASSERT_FATAL(! as->heap_start);
1522 
1523   as->heap_start = heap_start;
1524   as->heap_size  = 0;
1525   return SOS_OK;
1526 }
1527 
1528 
1529 sos_uaddr_t
1530 sos_umem_vmm_brk(struct sos_umem_vmm_as * as,
1531                  sos_uaddr_t new_top_uaddr)
1532 {
1533   sos_uaddr_t new_start;
1534   sos_size_t  new_size;
1535   SOS_ASSERT_FATAL(as->heap_start);
1536 
1537   if (! new_top_uaddr)
1538     return as->heap_start + as->heap_size;
1539 
1540   if (new_top_uaddr == as->heap_start + as->heap_size)
1541     return as->heap_start + as->heap_size;
1542  
1543   if (new_top_uaddr < as->heap_start)
1544     return (sos_uaddr_t)NULL;
1545 
1546   new_top_uaddr = SOS_PAGE_ALIGN_SUP(new_top_uaddr);
1547   new_start = as->heap_start;
1548   new_size  = new_top_uaddr - as->heap_start;
1549 
1550   /* First call to brk: we must map /dev/zero */
1551   if (! as->heap_size)
1552     {
1553       if (SOS_OK != sos_dev_zero_map(as, & as->heap_start,
1554                                      new_size,
1555                                      SOS_VM_MAP_PROT_READ
1556                                      | SOS_VM_MAP_PROT_WRITE,
1557                                      0 /* private non-fixed */))
1558         return (sos_uaddr_t)NULL;
1559 
1560       as->heap_size = new_size;
1561       return as->heap_start + as->heap_size;
1562     }
1563 
1564   /* Otherwise we just have to unmap or resize the region */
1565   if (new_size <= 0)
1566     {
1567       if (SOS_OK != sos_umem_vmm_unmap(as,
1568                                        as->heap_start, as->heap_size))
1569         return (sos_uaddr_t)NULL;
1570     }
1571   else
1572     {
1573       if (SOS_OK != sos_umem_vmm_resize(as,
1574                                         as->heap_start, as->heap_size,
1575                                         & new_start, new_size,
1576                                         0))
1577         return (sos_uaddr_t)NULL;
1578     }
1579 
1580   SOS_ASSERT_FATAL(new_start == as->heap_start);
1581   as->heap_size = new_size;
1582   return new_top_uaddr;
1583 }
1584 
1585 
1586 static struct sos_umem_vmm_vr *
1587 find_enclosing_or_next_vr(struct sos_umem_vmm_as * as,
1588                           sos_uaddr_t uaddr)
1589 {
1590   struct sos_umem_vmm_vr *vr;
1591   int nb_vr;
1592 
1593   if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1594     return NULL;
1595   if (uaddr > SOS_PAGING_TOP_USER_ADDRESS)
1596     return NULL;
1597 
1598   list_foreach_named(as->list_vr, vr, nb_vr, prev_in_as, next_in_as)
1599     {
1600       /* Equivalent to "if (uaddr < vr->start + vr->size)" but more
1601          robust (resilient to integer overflows) */
1602       if (uaddr <= vr->start + (vr->size - 1))
1603         return vr;
1604     }
1605 
1606   return NULL;
1607 }
1608 
1609 
1610 static struct sos_umem_vmm_vr *
1611 find_first_intersecting_vr(struct sos_umem_vmm_as * as,
1612                            sos_uaddr_t start_uaddr, sos_size_t size)
1613 {
1614   struct sos_umem_vmm_vr * vr;
1615   vr = find_enclosing_or_next_vr(as, start_uaddr);
1616   if (! vr)
1617     return NULL;
1618 
1619   if (start_uaddr + size <= vr->start)
1620     return NULL;
1621 
1622   return vr;
1623 }
1624 
1625 
1626 static sos_uaddr_t
1627 find_first_free_interval(struct sos_umem_vmm_as * as,
1628                          sos_uaddr_t hint_uaddr, sos_size_t size)
1629 {
1630   struct sos_umem_vmm_vr * initial_vr, * vr;
1631 
1632   if (hint_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1633     hint_uaddr = SOS_PAGING_BASE_USER_ADDRESS;
1634 
1635   if (hint_uaddr > SOS_PAGING_TOP_USER_ADDRESS - size + 1)
1636     return (sos_uaddr_t)NULL;
1637 
1638   initial_vr = vr = find_enclosing_or_next_vr(as, hint_uaddr);
1639   if (! vr)
1640     /* Great, there is nothing after ! */
1641     return hint_uaddr;
1642 
1643   /* Scan the remaining VRs in the list */
1644   do
1645     {
1646       /* Is there enough space /before/ that VR ? */
1647       if (hint_uaddr + size <= vr->start)
1648         /* Great ! */
1649         return hint_uaddr;
1650 
1651       /* Is there any VR /after/ this one, or do we have to wrap back
1652          at the begining of the user space ? */
1653       if (vr->next_in_as->start >= hint_uaddr)
1654         /* Ok, the next VR is really after us */
1655         hint_uaddr = vr->start + vr->size;
1656       else
1657         {
1658           /* No: wrapping up */
1659 
1660           /* Is there any space before the end of user space ? */
1661           if (hint_uaddr <= SOS_PAGING_TOP_USER_ADDRESS - size)
1662             return hint_uaddr;
1663 
1664           hint_uaddr = SOS_PAGING_BASE_USER_ADDRESS;
1665         }
1666 
1667       /* Prepare to look after this VR */
1668       vr = vr->next_in_as;
1669     }
1670   while (vr != initial_vr);
1671 
1672   /* Reached the end of the list and did not find anything ?... Look
1673      at the space after the last VR */
1674 
1675   return (sos_uaddr_t)NULL;
1676 }
1677 
1678 
1679 static void
1680 as_account_change_of_vr_protection(struct sos_umem_vmm_as * as,
1681                                    sos_bool_t is_shared,
1682                                    sos_size_t size,
1683                                    sos_ui32_t prev_access_rights,
1684                                    sos_ui32_t new_access_rights)
1685 {
1686   if (prev_access_rights == new_access_rights)
1687     return;
1688 
1689 #define _UPDATE_VMSTAT(field,is_increment) \
1690   ({ if (is_increment > 0) \
1691        as->field += size; \
1692      else \
1693        { SOS_ASSERT_FATAL(as->field >= size); as->field -= size; } })
1694 #define UPDATE_VMSTAT(field,is_increment) \
1695   ({ if (is_shared) _UPDATE_VMSTAT(vm_shrd.field, is_increment); \
1696      _UPDATE_VMSTAT(vm_total.field, is_increment); \
1697      SOS_ASSERT_FATAL(as->vm_total.field >= as->vm_shrd.field); })
1698 
1699   if ( (new_access_rights & SOS_VM_MAP_PROT_WRITE)
1700        && !(prev_access_rights & SOS_VM_MAP_PROT_WRITE))
1701     {
1702       UPDATE_VMSTAT(rw, +1);
1703       if (prev_access_rights & SOS_VM_MAP_PROT_READ)
1704         UPDATE_VMSTAT(ro, -1);
1705     }
1706   else if ( !(new_access_rights & SOS_VM_MAP_PROT_WRITE)
1707             && (prev_access_rights & SOS_VM_MAP_PROT_WRITE))
1708     {
1709       if (new_access_rights & SOS_VM_MAP_PROT_READ)
1710         UPDATE_VMSTAT(ro, +1);
1711       UPDATE_VMSTAT(rw, -1);
1712     }
1713   else if (new_access_rights & SOS_VM_MAP_PROT_READ)
1714     UPDATE_VMSTAT(ro, +1);
1715   else if (!(new_access_rights & SOS_VM_MAP_PROT_READ))
1716     UPDATE_VMSTAT(ro, -1);
1717 
1718   if ( (new_access_rights & SOS_VM_MAP_PROT_EXEC)
1719        && !(prev_access_rights & SOS_VM_MAP_PROT_EXEC))
1720     {
1721       UPDATE_VMSTAT(code, +1);
1722     }
1723   else if ( !(new_access_rights & SOS_VM_MAP_PROT_EXEC)
1724             && (prev_access_rights & SOS_VM_MAP_PROT_EXEC))
1725     {
1726       UPDATE_VMSTAT(code, -1);
1727     }
1728 
1729   if (new_access_rights && !prev_access_rights)
1730     UPDATE_VMSTAT(overall, +1);
1731   else if (!new_access_rights && prev_access_rights)
1732     UPDATE_VMSTAT(overall, -1);
1733 
1734 }

source navigation ] diff markup ] identifier search ] general search ]