001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019 #include <sos/assert.h>
020 #include <sos/list.h>
021 #include <sos/physmem.h>
022 #include <sos/kmem_slab.h>
023 #include <drivers/bochs.h>
024 #include <hwcore/mm_context.h>
025 #include <hwcore/paging.h>
026 #include <drivers/zero.h>
027
028 #include "umem_vmm.h"
029
030
031 struct sos_umem_vmm_as
032 {
033
034 struct sos_process * process;
035
036
037 struct sos_mm_context * mm_context;
038
039
040 struct sos_umem_vmm_vr * list_vr;
041
042
043 sos_uaddr_t heap_start;
044 sos_size_t heap_size;
045
046
047 sos_size_t phys_total;
048 struct vm_usage
049 {
050 sos_size_t overall;
051 sos_size_t ro, rw, code ;
052 } vm_total, vm_shrd;
053
054
055 sos_size_t pgflt_cow;
056 sos_size_t pgflt_page_in;
057 sos_size_t pgflt_invalid;
058 };
059
060
061 struct sos_umem_vmm_vr
062 {
063
064 struct sos_umem_vmm_as *address_space;
065
066
067 sos_uaddr_t start;
068 sos_size_t size;
069
070
071
072 sos_ui32_t access_rights;
073
074
075
076
077 sos_ui32_t flags;
078
079
080
081
082
083 struct sos_umem_vmm_vr_ops *ops;
084
085
086 struct sos_umem_vmm_mapped_resource *mapped_resource;
087 sos_luoffset_t offset_in_resource;
088
089
090
091 struct sos_umem_vmm_vr *prev_in_as, *next_in_as;
092
093
094
095 struct sos_umem_vmm_vr *prev_in_mapped_resource, *next_in_mapped_resource;
096 };
097
098
099
100
101
102 static struct sos_kslab_cache * cache_of_as;
103 static struct sos_kslab_cache * cache_of_vr;
104
105
106
107 void sos_dump_as(const struct sos_umem_vmm_as * as, const char *str)
108 {
109 struct sos_umem_vmm_vr *vr;
110 int nb_vr;
111
112 sos_bochs_printf("AS %p - %s:\n", as, str);
113 sos_bochs_printf(" physical mem: %x\n",
114 as->phys_total);
115 sos_bochs_printf(" VM (all/ro+rw/exec) tot:%x/%x+%x/%x shrd:%x/%x+%x/%x\n",
116 as->vm_total.overall,
117 as->vm_total.ro, as->vm_total.rw, as->vm_total.code,
118 as->vm_shrd.overall,
119 as->vm_shrd.ro, as->vm_shrd.rw, as->vm_shrd.code);
120 sos_bochs_printf(" pgflt cow=%d pgin=%d inv=%d\n",
121 as->pgflt_cow, as->pgflt_page_in, as->pgflt_invalid);
122 list_foreach_named(as->list_vr, vr, nb_vr, prev_in_as, next_in_as)
123 {
124 sos_bochs_printf(" VR[%d]=%x: [%x,%x[ (sz=%x) mr=(%x)+%llx %c%c%c fl=%x\n",
125 nb_vr, (unsigned)vr,
126 vr->start, vr->start + vr->size, vr->size,
127 (unsigned)vr->mapped_resource,
128 vr->offset_in_resource,
129 (vr->access_rights & SOS_VM_MAP_PROT_READ)?'r':'-',
130 (vr->access_rights & SOS_VM_MAP_PROT_WRITE)?'w':'-',
131 (vr->access_rights & SOS_VM_MAP_PROT_EXEC)?'x':'-',
132 (unsigned)vr->flags);
133 }
134 sos_bochs_printf("FIN (%s)\n", str);
135 }
136
137
138
139
140
141
142 sos_paddr_t sos_zero_physpage = 0 ;
143 sos_vaddr_t sos_zero_kernelpage = 0 ;
144
145
146
147
148
149
150
151
152
153 static struct sos_umem_vmm_vr *
154 find_enclosing_or_next_vr(struct sos_umem_vmm_as * as,
155 sos_uaddr_t uaddr);
156
157
158
159
160
161
162 static struct sos_umem_vmm_vr *
163 find_first_intersecting_vr(struct sos_umem_vmm_as * as,
164 sos_uaddr_t start_uaddr, sos_size_t size);
165
166
167
168
169
170
171
172
173
174 static sos_uaddr_t
175 find_first_free_interval(struct sos_umem_vmm_as * as,
176 sos_uaddr_t hint_uaddr, sos_size_t size);
177
178
179
180
181 static void
182 as_account_change_of_vr_protection(struct sos_umem_vmm_as * as,
183 sos_bool_t is_shared,
184 sos_size_t size,
185 sos_ui32_t prev_access_rights,
186 sos_ui32_t new_access_rights);
187
188
189 sos_ret_t sos_umem_vmm_subsystem_setup()
190 {
191
192
193 sos_zero_kernelpage = sos_kmem_vmm_alloc(1, SOS_KMEM_VMM_MAP);
194 if (sos_zero_kernelpage == (sos_vaddr_t)NULL)
195 return -SOS_ENOMEM;
196 memset((void*)sos_zero_kernelpage, 0x0, SOS_PAGE_SIZE);
197
198
199 sos_zero_physpage = sos_paging_get_paddr(sos_zero_kernelpage);
200 SOS_ASSERT_FATAL(NULL != (void*)sos_zero_physpage);
201 sos_physmem_ref_physpage_at(sos_zero_physpage);
202
203
204 cache_of_as
205 = sos_kmem_cache_create("Address space structures",
206 sizeof(struct sos_umem_vmm_as),
207 1, 0,
208 SOS_KSLAB_CREATE_MAP
209 | SOS_KSLAB_CREATE_ZERO);
210 if (! cache_of_as)
211 {
212 sos_physmem_unref_physpage(sos_zero_physpage);
213 return -SOS_ENOMEM;
214 }
215
216 cache_of_vr
217 = sos_kmem_cache_create("Virtual Region structures",
218 sizeof(struct sos_umem_vmm_vr),
219 1, 0,
220 SOS_KSLAB_CREATE_MAP
221 | SOS_KSLAB_CREATE_ZERO);
222 if (! cache_of_vr)
223 {
224 sos_physmem_unref_physpage(sos_zero_physpage);
225 sos_kmem_cache_destroy(cache_of_as);
226 return -SOS_ENOMEM;
227 }
228
229 return SOS_OK;
230 }
231
232
233 struct sos_umem_vmm_as *
234 sos_umem_vmm_create_empty_as(struct sos_process *owner)
235 {
236 struct sos_umem_vmm_as * as
237 = (struct sos_umem_vmm_as *) sos_kmem_cache_alloc(cache_of_as, 0);
238 if (! as)
239 return NULL;
240
241 as->mm_context = sos_mm_context_create();
242 if (NULL == as->mm_context)
243 {
244
245 sos_kmem_cache_free((sos_vaddr_t)as);
246 return NULL;
247 }
248
249 as->process = owner;
250 return as;
251 }
252
253
254 struct sos_umem_vmm_as *
255 sos_umem_vmm_duplicate_current_thread_as(struct sos_process *owner)
256 {
257 __label__ undo_creation;
258 struct sos_umem_vmm_as * my_as;
259 struct sos_umem_vmm_vr * model_vr;
260 int nb_vr;
261
262 struct sos_umem_vmm_as * new_as
263 = (struct sos_umem_vmm_as *) sos_kmem_cache_alloc(cache_of_as, 0);
264 if (! new_as)
265 return NULL;
266
267 my_as = sos_process_get_address_space(sos_thread_get_current()->process);
268 new_as->process = owner;
269 list_init_named(new_as->list_vr, prev_in_as, next_in_as);
270
271
272
273
274
275
276 SOS_ASSERT_FATAL(SOS_OK
277 == sos_thread_prepare_user_space_access(my_as,
278 (sos_vaddr_t)
279 NULL));
280
281
282 list_foreach_named(my_as->list_vr, model_vr, nb_vr, prev_in_as, next_in_as)
283 {
284 struct sos_umem_vmm_vr * vr;
285
286
287 if ( !(model_vr->flags & SOS_VR_MAP_SHARED)
288 && (model_vr->access_rights & SOS_VM_MAP_PROT_WRITE) )
289 {
290
291
292 SOS_ASSERT_FATAL(SOS_OK
293 == sos_paging_prepare_COW(model_vr->start,
294 model_vr->size));
295 }
296
297
298 vr = (struct sos_umem_vmm_vr *) sos_kmem_cache_alloc(cache_of_vr, 0);
299 if (! vr)
300 goto undo_creation;
301 memcpy(vr, model_vr, sizeof(*vr));
302 vr->address_space = new_as;
303
304
305 if (vr->ops && vr->ops->ref)
306 vr->ops->ref(vr);
307
308
309 list_add_tail_named(new_as->list_vr, vr, prev_in_as, next_in_as);
310
311
312 list_add_tail_named(model_vr->mapped_resource->list_vr, vr,
313 prev_in_mapped_resource,
314 next_in_mapped_resource);
315 }
316
317
318 new_as->mm_context = sos_mm_context_duplicate(my_as->mm_context);
319 if (NULL == new_as->mm_context)
320 goto undo_creation;
321
322
323 new_as->heap_start = my_as->heap_start;
324 new_as->heap_size = my_as->heap_size;
325 new_as->phys_total = my_as->phys_total;
326 memcpy(& new_as->vm_total, & my_as->vm_total, sizeof(struct vm_usage));
327 memcpy(& new_as->vm_shrd, & my_as->vm_shrd, sizeof(struct vm_usage));
328 SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
329 return new_as;
330
331
332 undo_creation:
333 SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
334 sos_umem_vmm_delete_as(new_as);
335 return NULL;
336 }
337
338
339 sos_ret_t
340 sos_umem_vmm_delete_as(struct sos_umem_vmm_as * as)
341 {
342 while(! list_is_empty_named(as->list_vr, prev_in_as, next_in_as))
343 {
344 struct sos_umem_vmm_vr * vr;
345 vr = list_get_head_named(as->list_vr, prev_in_as, next_in_as);
346
347
348 list_pop_head_named(as->list_vr, prev_in_as, next_in_as);
349 list_delete_named(vr->mapped_resource->list_vr, vr,
350 prev_in_mapped_resource,
351 next_in_mapped_resource);
352
353
354
355 if (vr->ops)
356 {
357 if (vr->ops->unmap)
358 vr->ops->unmap(vr, vr->start, vr->size);
359 if (vr->ops->unref)
360 vr->ops->unref(vr);
361 }
362
363 sos_kmem_cache_free((sos_vaddr_t)vr);
364 }
365
366
367 if (as->mm_context)
368 sos_mm_context_unref(as->mm_context);
369
370
371 sos_kmem_cache_free((sos_vaddr_t)as);
372
373 return SOS_OK;
374 }
375
376
377 struct sos_process *
378 sos_umem_vmm_get_process(struct sos_umem_vmm_as * as)
379 {
380 return as->process;
381 }
382
383
384 struct sos_mm_context *
385 sos_umem_vmm_get_mm_context(struct sos_umem_vmm_as * as)
386 {
387 return as->mm_context;
388 }
389
390
391 struct sos_umem_vmm_vr *
392 sos_umem_vmm_get_vr_at_address(struct sos_umem_vmm_as * as,
393 sos_uaddr_t uaddr)
394 {
395 struct sos_umem_vmm_vr * vr;
396 vr = find_enclosing_or_next_vr(as, uaddr);
397 if (! vr)
398 return NULL;
399
400
401 if (uaddr < vr->start)
402 return NULL;
403
404 return vr;
405 }
406
407
408 struct sos_umem_vmm_as *
409 sos_umem_vmm_get_as_of_vr(struct sos_umem_vmm_vr * vr)
410 {
411 return vr->address_space;
412 }
413
414
415 struct sos_umem_vmm_vr_ops *
416 sos_umem_vmm_get_ops_of_vr(struct sos_umem_vmm_vr * vr)
417 {
418 return vr->ops;
419 }
420
421
422 sos_ui32_t sos_umem_vmm_get_prot_of_vr(struct sos_umem_vmm_vr * vr)
423 {
424 return vr->access_rights;
425 }
426
427
428 sos_ui32_t sos_umem_vmm_get_flags_of_vr(struct sos_umem_vmm_vr * vr)
429 {
430 return vr->flags;
431 }
432
433
434 struct sos_umem_vmm_mapped_resource *
435 sos_umem_vmm_get_mapped_resource_of_vr(struct sos_umem_vmm_vr * vr)
436 {
437 return vr->mapped_resource;
438 }
439
440
441 sos_uaddr_t sos_umem_vmm_get_start_of_vr(struct sos_umem_vmm_vr * vr)
442 {
443 return vr->start;
444 }
445
446
447 sos_size_t sos_umem_vmm_get_size_of_vr(struct sos_umem_vmm_vr * vr)
448 {
449 return vr->size;
450 }
451
452
453 sos_luoffset_t sos_umem_vmm_get_offset_in_resource(struct sos_umem_vmm_vr * vr)
454 {
455 return vr->offset_in_resource;
456 }
457
458
459 sos_ret_t
460 sos_umem_vmm_set_ops_of_vr(struct sos_umem_vmm_vr * vr,
461 struct sos_umem_vmm_vr_ops * ops)
462 {
463
464 SOS_ASSERT_FATAL(NULL == vr->ops);
465
466 vr->ops = ops;
467 return SOS_OK;
468 }
469
470
471
472
473
474
475 #define INTERNAL_MAP_CALLED_FROM_MREMAP (1 << 8)
476
477 sos_ret_t
478 sos_umem_vmm_map(struct sos_umem_vmm_as * as,
479 sos_uaddr_t * uaddr, sos_size_t size,
480 sos_ui32_t access_rights,
481 sos_ui32_t flags,
482 struct sos_umem_vmm_mapped_resource * resource,
483 sos_luoffset_t offset_in_resource)
484 {
485 __label__ return_mmap;
486 sos_uaddr_t hint_uaddr;
487 struct sos_umem_vmm_vr *prev_vr, *next_vr, *vr, *preallocated_vr;
488 sos_bool_t merge_with_preceding, merge_with_next, used_preallocated_vr;
489 sos_bool_t internal_map_called_from_mremap
490 = (flags & INTERNAL_MAP_CALLED_FROM_MREMAP);
491
492 sos_ret_t retval = SOS_OK;
493 used_preallocated_vr = FALSE;
494 hint_uaddr = *uaddr;
495
496
497 *uaddr = (sos_vaddr_t)NULL;
498
499 if (! resource)
500 return -SOS_EINVAL;
501 if (! resource->mmap)
502 return -SOS_EPERM;
503
504 if (! SOS_IS_PAGE_ALIGNED(hint_uaddr))
505 return -SOS_EINVAL;
506
507 if (size <= 0)
508 return -SOS_EINVAL;
509 size = SOS_PAGE_ALIGN_SUP(size);
510
511 if (flags & SOS_VR_MAP_SHARED)
512 {
513
514 if ( ( (access_rights & SOS_VM_MAP_PROT_READ)
515 && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_READ) )
516 || ( (access_rights & SOS_VM_MAP_PROT_WRITE)
517 && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_WRITE) )
518 || ( (access_rights & SOS_VM_MAP_PROT_EXEC)
519 && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_EXEC)) )
520 return -SOS_EPERM;
521 }
522
523
524 if ( !internal_map_called_from_mremap
525 && ( resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS ) )
526
527 {
528
529 }
530
531
532 else if (offset_in_resource + size <= offset_in_resource)
533 return -SOS_EINVAL;
534
535
536 access_rights &= (SOS_VM_MAP_PROT_READ
537 | SOS_VM_MAP_PROT_WRITE
538 | SOS_VM_MAP_PROT_EXEC);
539 flags &= (SOS_VR_MAP_SHARED
540 | SOS_VR_MAP_FIXED);
541
542
543
544
545 preallocated_vr
546 = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
547 if (! preallocated_vr)
548 return -SOS_ENOMEM;
549
550
551 if (flags & SOS_VR_MAP_FIXED)
552 {
553
554
555
556
557
558 if (hint_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
559 { retval = -SOS_EINVAL; goto return_mmap; }
560 if (hint_uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
561 { retval = -SOS_EINVAL; goto return_mmap; }
562
563
564 retval = sos_umem_vmm_unmap(as, hint_uaddr, size);
565 if (SOS_OK != retval)
566 { goto return_mmap; }
567 }
568 else
569 {
570
571
572
573
574
575 hint_uaddr = find_first_free_interval(as, hint_uaddr, size);
576 if (! hint_uaddr)
577 { retval = -SOS_ENOMEM; goto return_mmap; }
578 }
579
580
581
582
583 if ( !internal_map_called_from_mremap
584 && (resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS ) )
585 offset_in_resource = hint_uaddr;
586
587
588
589 next_vr = find_enclosing_or_next_vr(as, hint_uaddr);
590 if (next_vr)
591 {
592
593 prev_vr = next_vr->prev_in_as;
594
595
596 if (prev_vr->start > hint_uaddr)
597 prev_vr = NULL;
598 }
599 else
600 {
601
602 prev_vr = list_get_tail_named(as->list_vr, prev_in_as, next_in_as);
603 }
604
605
606 merge_with_preceding
607 = ( (NULL != prev_vr)
608 && (prev_vr->mapped_resource == resource)
609 && (prev_vr->offset_in_resource + prev_vr->size == offset_in_resource)
610 && (prev_vr->start + prev_vr->size == hint_uaddr)
611 && (prev_vr->flags == flags)
612 && (prev_vr->access_rights == access_rights) );
613
614
615 merge_with_next
616 = ( (NULL != next_vr)
617 && (next_vr->mapped_resource == resource)
618 && (offset_in_resource + size == next_vr->offset_in_resource)
619 && (hint_uaddr + size == next_vr->start)
620 && (next_vr->flags == flags)
621 && (next_vr->access_rights == access_rights) );
622
623 if (merge_with_preceding && merge_with_next)
624 {
625
626 vr = prev_vr;
627 vr->size += size + next_vr->size;
628
629
630 list_delete_named(as->list_vr, next_vr, prev_in_as, next_in_as);
631 list_delete_named(next_vr->mapped_resource->list_vr, next_vr,
632 prev_in_mapped_resource, next_in_mapped_resource);
633
634 if (next_vr->ops && next_vr->ops->unref)
635 next_vr->ops->unref(next_vr);
636
637 sos_kmem_vmm_free((sos_vaddr_t) next_vr);
638 }
639 else if (merge_with_preceding)
640 {
641
642 vr = prev_vr;
643 vr->size += size;
644 }
645 else if (merge_with_next)
646 {
647
648 vr = next_vr;
649 vr->start -= size;
650 vr->size += size;
651 }
652 else
653 {
654
655
656 vr = preallocated_vr;
657 used_preallocated_vr = TRUE;
658
659 vr->start = hint_uaddr;
660 vr->size = size;
661 vr->access_rights = access_rights;
662 vr->flags = flags;
663 vr->mapped_resource = resource;
664 vr->offset_in_resource = offset_in_resource;
665
666
667 vr->address_space = as;
668 if (prev_vr)
669 list_insert_after_named(as->list_vr, prev_vr, vr,
670 prev_in_as, next_in_as);
671 else
672 list_add_head_named(as->list_vr, vr, prev_in_as, next_in_as);
673 list_add_tail_named(vr->mapped_resource->list_vr, vr,
674 prev_in_mapped_resource,
675 next_in_mapped_resource);
676
677
678 if (resource && resource->mmap)
679 {
680 retval = resource->mmap(vr);
681 if (SOS_OK != retval)
682 {
683 retval = sos_umem_vmm_unmap(as, vr->start, vr->size);
684 goto return_mmap;
685 }
686
687
688 SOS_ASSERT_FATAL(vr->ops && vr->ops->page_in);
689 }
690
691 if (vr->ops && vr->ops->ref)
692 vr->ops->ref(vr);
693 }
694
695
696 *uaddr = hint_uaddr;
697 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
698 size, 0, vr->access_rights);
699 retval = SOS_OK;
700
701 return_mmap:
702 if (! used_preallocated_vr)
703 sos_kmem_vmm_free((sos_vaddr_t)preallocated_vr);
704
705 return retval;
706 }
707
708
709 sos_ret_t
710 sos_umem_vmm_unmap(struct sos_umem_vmm_as * as,
711 sos_uaddr_t uaddr, sos_size_t size)
712 {
713 struct sos_umem_vmm_vr *vr, *preallocated_vr;
714 sos_bool_t need_to_setup_mmu;
715 sos_bool_t used_preallocated_vr;
716
717 if (! SOS_IS_PAGE_ALIGNED(uaddr))
718 return -SOS_EINVAL;
719 if (size <= 0)
720 return -SOS_EINVAL;
721 size = SOS_PAGE_ALIGN_SUP(size);
722
723
724 if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
725 return -SOS_EINVAL;
726 if (uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
727 return -SOS_EINVAL;
728
729
730
731
732
733
734 used_preallocated_vr = FALSE;
735 preallocated_vr
736 = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
737 if (! preallocated_vr)
738 return -SOS_ENOMEM;
739
740
741 vr = find_first_intersecting_vr(as, uaddr, size);
742
743
744 while (NULL != vr)
745 {
746
747
748 if (vr->start + vr->size <= uaddr)
749
750 break;
751
752
753 if (uaddr + size <= vr->start)
754
755 break;
756
757
758 if ((vr->start >= uaddr)
759 && (vr->start + vr->size <= uaddr + size))
760 {
761 struct sos_umem_vmm_vr *next_vr;
762
763
764 if (vr->ops && vr->ops->unmap)
765 vr->ops->unmap(vr, vr->start, vr->size);
766
767
768 next_vr = vr->next_in_as;
769 if (next_vr == vr)
770 next_vr = NULL;
771 list_delete_named(as->list_vr, vr, prev_in_as, next_in_as);
772
773
774 list_delete_named(vr->mapped_resource->list_vr, vr,
775 prev_in_mapped_resource,
776 next_in_mapped_resource);
777
778 if (vr->ops && vr->ops->unref)
779 vr->ops->unref(vr);
780
781 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
782 vr->size, vr->access_rights, 0);
783 sos_kmem_vmm_free((sos_vaddr_t)vr);
784
785
786 vr = next_vr;
787 continue;
788 }
789
790
791 else if ( (vr->start < uaddr)
792 && (vr->start + vr->size > uaddr + size) )
793 {
794
795
796
797 used_preallocated_vr = TRUE;
798 memcpy(preallocated_vr, vr, sizeof(*vr));
799
800
801 preallocated_vr->start = uaddr + size;
802 preallocated_vr->size = vr->start + vr->size - (uaddr + size);
803 preallocated_vr->offset_in_resource += uaddr + size - vr->start;
804 vr->size = uaddr - vr->start;
805
806
807 list_insert_after_named(as->list_vr, vr, preallocated_vr,
808 prev_in_as, next_in_as);
809 list_add_tail_named(vr->mapped_resource->list_vr, preallocated_vr,
810 prev_in_mapped_resource,
811 next_in_mapped_resource);
812
813
814 if (vr->ops && vr->ops->unmap)
815 vr->ops->unmap(vr, uaddr, size);
816 if (preallocated_vr->ops && preallocated_vr->ops->ref)
817 preallocated_vr->ops->ref(preallocated_vr);
818
819
820 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
821 size, vr->access_rights, 0);
822
823
824 break;
825 }
826
827
828 else if (uaddr <= vr->start)
829 {
830 sos_size_t translation = uaddr + size - vr->start;
831
832
833 vr->size -= translation;
834 vr->offset_in_resource += translation;
835 vr->start += translation;
836
837
838 if (vr->ops && vr->ops->unmap)
839 vr->ops->unmap(vr, uaddr + size,
840 translation);
841
842
843 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
844 translation,
845 vr->access_rights, 0);
846
847
848
849 break;
850 }
851
852
853 else if (uaddr + size >= vr->start + vr->size)
854 {
855 sos_size_t unmapped_size = vr->start + vr->size - uaddr;
856
857
858 vr->size = uaddr - vr->start;
859
860
861 if (vr->ops && vr->ops->unmap)
862 vr->ops->unmap(vr, uaddr, unmapped_size);
863
864
865 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
866 unmapped_size,
867 vr->access_rights, 0);
868
869 vr = vr->next_in_as;
870 continue;
871 }
872
873 sos_display_fatal_error("BUG uaddr=%x sz=%x vr_start=%x, vr_sz=%x",
874 uaddr, size, vr->start, vr->size);
875 }
876
877 need_to_setup_mmu = (sos_thread_get_current()->squatted_mm_context
878 != as->mm_context);
879 if (need_to_setup_mmu)
880 SOS_ASSERT_FATAL(SOS_OK
881 == sos_thread_prepare_user_space_access(as,
882 (sos_vaddr_t)
883 NULL));
884 {
885 sos_size_t sz_unmapped = sos_paging_unmap_interval(uaddr, size);
886 SOS_ASSERT_FATAL(sz_unmapped >= 0);
887 as->phys_total -= sz_unmapped;
888 }
889 if (need_to_setup_mmu)
890 SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
891
892 if (! used_preallocated_vr)
893 sos_kmem_vmm_free((sos_vaddr_t)preallocated_vr);
894
895 return SOS_OK;
896 }
897
898
899 sos_ret_t
900 sos_umem_vmm_chprot(struct sos_umem_vmm_as * as,
901 sos_uaddr_t uaddr, sos_size_t size,
902 sos_ui32_t new_access_rights)
903 {
904 struct sos_umem_vmm_vr *start_vr, *vr,
905 *preallocated_middle_vr, *preallocated_right_vr;
906 sos_bool_t used_preallocated_middle_vr, used_preallocated_right_vr;
907
908 if (! SOS_IS_PAGE_ALIGNED(uaddr))
909 return -SOS_EINVAL;
910 if (size <= 0)
911 return -SOS_EINVAL;
912 size = SOS_PAGE_ALIGN_SUP(size);
913
914
915 if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
916 return -SOS_EINVAL;
917 if (uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
918 return -SOS_EINVAL;
919
920
921
922 used_preallocated_middle_vr = FALSE;
923 used_preallocated_right_vr = FALSE;
924 preallocated_middle_vr
925 = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
926 if (! preallocated_middle_vr)
927 return -SOS_ENOMEM;
928 preallocated_right_vr
929 = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
930 if (! preallocated_right_vr)
931 {
932 sos_kmem_vmm_free((sos_vaddr_t)preallocated_middle_vr);
933 return -SOS_ENOMEM;
934 }
935
936
937 start_vr = find_first_intersecting_vr(as, uaddr, size);
938 if (NULL == start_vr)
939 return SOS_OK;
940
941
942
943 vr = start_vr;
944 while (TRUE)
945 {
946
947
948 if (vr->start + vr->size <= uaddr)
949
950 break;
951
952
953 if (uaddr + size < vr->start)
954
955 break;
956
957 if (vr->flags & SOS_VR_MAP_SHARED)
958 {
959
960
961 if ( ( (new_access_rights & SOS_VM_MAP_PROT_READ)
962 && !(vr->mapped_resource->allowed_access_rights
963 & SOS_VM_MAP_PROT_READ) )
964 || ( (new_access_rights & SOS_VM_MAP_PROT_WRITE)
965 && !(vr->mapped_resource->allowed_access_rights
966 & SOS_VM_MAP_PROT_WRITE) )
967 || ( (new_access_rights & SOS_VM_MAP_PROT_EXEC)
968 && !(vr->mapped_resource->allowed_access_rights
969 & SOS_VM_MAP_PROT_EXEC) ) )
970 return -SOS_EPERM;
971 }
972
973 vr = vr->next_in_as;
974 }
975
976
977
978 vr = start_vr;
979 while (TRUE)
980 {
981
982
983
984 if (vr->start + vr->size <= uaddr)
985
986 break;
987
988
989 if (uaddr + size <= vr->start)
990
991 break;
992
993
994 if (vr->access_rights == new_access_rights)
995
996 {
997 vr = vr->next_in_as;
998 continue;
999 }
1000
1001
1002 if ((vr->start >= uaddr)
1003 && (vr->start + vr->size <= uaddr + size))
1004 {
1005
1006 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1007 vr->size, vr->access_rights,
1008 new_access_rights);
1009 vr->access_rights = new_access_rights;
1010
1011 if (vr->flags & SOS_VR_MAP_SHARED)
1012
1013
1014 sos_paging_set_prot_of_interval(vr->start, vr->size,
1015 new_access_rights);
1016 else
1017
1018 {
1019
1020
1021
1022
1023
1024 if (! (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1025 sos_paging_set_prot_of_interval(vr->start, vr->size,
1026 new_access_rights);
1027 }
1028
1029 vr = vr->next_in_as;
1030 continue;
1031 }
1032
1033
1034 else if ( (vr->start < uaddr)
1035 && (vr->start + vr->size > uaddr + size) )
1036 {
1037
1038
1039
1040 SOS_ASSERT_FATAL(! used_preallocated_middle_vr);
1041 SOS_ASSERT_FATAL(! used_preallocated_right_vr);
1042 used_preallocated_middle_vr = TRUE;
1043 memcpy(preallocated_middle_vr, vr, sizeof(*vr));
1044 used_preallocated_right_vr = TRUE;
1045 memcpy(preallocated_right_vr, vr, sizeof(*vr));
1046
1047
1048 preallocated_middle_vr->start = uaddr;
1049 preallocated_middle_vr->size = size;
1050 preallocated_right_vr->start = uaddr + size;
1051 preallocated_right_vr->size = vr->start + vr->size
1052 - (uaddr + size);
1053 preallocated_middle_vr->offset_in_resource
1054 += uaddr - vr->start;
1055 preallocated_right_vr->offset_in_resource
1056 += uaddr + size - vr->start;
1057 vr->size = uaddr - vr->start;
1058
1059
1060 preallocated_middle_vr->access_rights = new_access_rights;
1061 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1062 size, vr->access_rights,
1063 new_access_rights);
1064
1065
1066 list_insert_after_named(as->list_vr, vr, preallocated_middle_vr,
1067 prev_in_as, next_in_as);
1068 list_insert_after_named(as->list_vr, preallocated_middle_vr,
1069 preallocated_right_vr,
1070 prev_in_as, next_in_as);
1071
1072 list_add_tail_named(vr->mapped_resource->list_vr,
1073 preallocated_middle_vr,
1074 prev_in_mapped_resource,
1075 next_in_mapped_resource);
1076 list_add_tail_named(vr->mapped_resource->list_vr,
1077 preallocated_right_vr,
1078 prev_in_mapped_resource,
1079 next_in_mapped_resource);
1080
1081
1082 if (!(preallocated_middle_vr->flags & SOS_VR_MAP_SHARED)
1083 && (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1084
1085 sos_paging_prepare_COW(preallocated_middle_vr->start,
1086 preallocated_middle_vr->size);
1087 else
1088 sos_paging_set_prot_of_interval(preallocated_middle_vr->start,
1089 preallocated_middle_vr->size,
1090 new_access_rights);
1091
1092 if (preallocated_right_vr->ops && preallocated_right_vr->ops->ref)
1093 preallocated_right_vr->ops->ref(preallocated_right_vr);
1094 if (preallocated_middle_vr->ops && preallocated_middle_vr->ops->ref)
1095 preallocated_middle_vr->ops->ref(preallocated_middle_vr);
1096
1097
1098 break;
1099 }
1100
1101
1102 else if (uaddr <= vr->start)
1103 {
1104
1105 sos_uoffset_t offset_in_region = uaddr + size - vr->start;
1106
1107
1108 SOS_ASSERT_FATAL(! used_preallocated_middle_vr);
1109 used_preallocated_middle_vr = TRUE;
1110 memcpy(preallocated_middle_vr, vr, sizeof(*vr));
1111
1112
1113 preallocated_middle_vr->start += offset_in_region;
1114 preallocated_middle_vr->size -= offset_in_region;
1115 vr->size = offset_in_region;
1116 preallocated_middle_vr->offset_in_resource += offset_in_region;
1117
1118
1119 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1120 vr->size,
1121 vr->access_rights,
1122 new_access_rights);
1123 vr->access_rights = new_access_rights;
1124
1125
1126 list_insert_after_named(as->list_vr, vr,
1127 preallocated_middle_vr,
1128 prev_in_as, next_in_as);
1129 list_add_tail_named(vr->mapped_resource->list_vr,
1130 preallocated_middle_vr,
1131 prev_in_mapped_resource,
1132 next_in_mapped_resource);
1133
1134
1135 if (!(vr->flags & SOS_VR_MAP_SHARED)
1136 && (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1137
1138 sos_paging_prepare_COW(vr->start, vr->size);
1139 else
1140 sos_paging_set_prot_of_interval(vr->start, vr->size,
1141 new_access_rights);
1142
1143 if (preallocated_middle_vr->ops && preallocated_middle_vr->ops->ref)
1144 preallocated_middle_vr->ops->ref(preallocated_middle_vr);
1145
1146
1147
1148 break;
1149 }
1150
1151
1152 else if (uaddr + size >= vr->start + vr->size)
1153 {
1154
1155 sos_uoffset_t offset_in_region = uaddr - vr->start;
1156
1157
1158 SOS_ASSERT_FATAL(! used_preallocated_right_vr);
1159 used_preallocated_right_vr = TRUE;
1160 memcpy(preallocated_right_vr, vr, sizeof(*vr));
1161
1162
1163 preallocated_right_vr->start += offset_in_region;
1164 preallocated_right_vr->size -= offset_in_region;
1165 vr->size = offset_in_region;
1166 preallocated_right_vr->offset_in_resource += offset_in_region;
1167
1168
1169 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1170 preallocated_right_vr->size,
1171 vr->access_rights,
1172 new_access_rights);
1173 preallocated_right_vr->access_rights = new_access_rights;
1174
1175
1176 list_insert_after_named(as->list_vr, vr,
1177 preallocated_right_vr,
1178 prev_in_as, next_in_as);
1179 list_add_tail_named(vr->mapped_resource->list_vr,
1180 preallocated_right_vr,
1181 prev_in_mapped_resource,
1182 next_in_mapped_resource);
1183
1184
1185 if (!(preallocated_right_vr->flags & SOS_VR_MAP_SHARED)
1186 && (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1187
1188 sos_paging_prepare_COW(preallocated_right_vr->start,
1189 preallocated_right_vr->size);
1190 else
1191 sos_paging_set_prot_of_interval(preallocated_right_vr->start,
1192 preallocated_right_vr->size,
1193 new_access_rights);
1194
1195 if (preallocated_right_vr->ops && preallocated_right_vr->ops->ref)
1196 preallocated_right_vr->ops->ref(preallocated_right_vr);
1197
1198 vr = vr->next_in_as;
1199 continue;
1200 }
1201
1202 sos_display_fatal_error("BUG");
1203 }
1204
1205 if (! used_preallocated_middle_vr)
1206 sos_kmem_vmm_free((sos_vaddr_t)preallocated_middle_vr);
1207 if (! used_preallocated_right_vr)
1208 sos_kmem_vmm_free((sos_vaddr_t)preallocated_right_vr);
1209
1210 return SOS_OK;
1211 }
1212
1213
1214 sos_ret_t
1215 sos_umem_vmm_resize(struct sos_umem_vmm_as * as,
1216 sos_uaddr_t old_uaddr, sos_size_t old_size,
1217 sos_uaddr_t *new_uaddr, sos_size_t new_size,
1218 sos_ui32_t flags)
1219 {
1220 sos_luoffset_t new_offset_in_resource;
1221 sos_bool_t must_move_vr = FALSE;
1222 struct sos_umem_vmm_vr *vr, *prev_vr, *next_vr;
1223
1224
1225 if (*new_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1226 return -SOS_EINVAL;
1227 if (*new_uaddr > SOS_PAGING_TOP_USER_ADDRESS - new_size)
1228 return -SOS_EINVAL;
1229
1230 old_uaddr = SOS_PAGE_ALIGN_INF(old_uaddr);
1231 old_size = SOS_PAGE_ALIGN_SUP(old_size);
1232 if (! SOS_IS_PAGE_ALIGNED(*new_uaddr))
1233 return -SOS_EINVAL;
1234 if (new_size <= 0)
1235 return -SOS_EINVAL;
1236 new_size = SOS_PAGE_ALIGN_SUP(new_size);
1237
1238
1239 vr = find_first_intersecting_vr(as, old_uaddr, old_size);
1240 if (! vr)
1241 return -SOS_EINVAL;
1242
1243
1244 if ( (vr->start > old_uaddr)
1245 || (vr->start + vr->size < old_uaddr + old_size) )
1246 return -SOS_EINVAL;
1247
1248
1249
1250 prev_vr = vr->prev_in_as;
1251 if (prev_vr->start >= vr->start)
1252 prev_vr = NULL;
1253 next_vr = vr->prev_in_as;
1254 if (next_vr->start <= vr->start)
1255 next_vr = NULL;
1256
1257
1258
1259
1260
1261
1262
1263 if ( (*new_uaddr < vr->start)
1264 && (vr->start - *new_uaddr > vr->offset_in_resource) )
1265 return -SOS_EINVAL;
1266
1267
1268 if (vr->start > *new_uaddr)
1269 new_offset_in_resource
1270 = vr->offset_in_resource
1271 - (vr->start - *new_uaddr);
1272 else
1273 new_offset_in_resource
1274 = vr->offset_in_resource
1275 + (*new_uaddr - vr->start);
1276
1277
1278
1279 if (prev_vr && (prev_vr->start + prev_vr->size > *new_uaddr))
1280 must_move_vr |= TRUE;
1281 if (next_vr && (next_vr->start < *new_uaddr + new_size))
1282 must_move_vr |= TRUE;
1283
1284
1285 if (*new_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1286 must_move_vr |= TRUE;
1287 if (*new_uaddr > SOS_PAGING_TOP_USER_ADDRESS - new_size)
1288 must_move_vr |= TRUE;
1289
1290
1291 if ( must_move_vr && !(flags & SOS_VR_REMAP_MAYMOVE) )
1292 return -SOS_EINVAL;
1293
1294
1295
1296 if (must_move_vr)
1297 {
1298 sos_uaddr_t uaddr, result_uaddr;
1299 sos_ret_t retval;
1300
1301 result_uaddr = *new_uaddr;
1302 retval = sos_umem_vmm_map(as, & result_uaddr, new_size,
1303 vr->access_rights,
1304 vr->flags | INTERNAL_MAP_CALLED_FROM_MREMAP,
1305 vr->mapped_resource,
1306 new_offset_in_resource);
1307 if (SOS_OK != retval)
1308 return retval;
1309
1310
1311 for (uaddr = vr->start ;
1312 uaddr < vr->start + vr->size ;
1313 uaddr += SOS_PAGE_SIZE)
1314 {
1315 sos_paddr_t paddr;
1316 sos_ui32_t prot;
1317 sos_uaddr_t vaddr;
1318
1319 if (uaddr < *new_uaddr)
1320 continue;
1321 if (uaddr > *new_uaddr + new_size)
1322 continue;
1323
1324
1325
1326 if (vr->start >= *new_uaddr)
1327 vaddr = result_uaddr
1328 + (uaddr - vr->start)
1329 + (vr->start - *new_uaddr);
1330 else
1331 vaddr = result_uaddr
1332 + (uaddr - vr->start)
1333 - (*new_uaddr - vr->start);
1334
1335 paddr = sos_paging_get_paddr(uaddr);
1336 if (! paddr)
1337
1338 continue;
1339
1340 prot = sos_paging_get_prot(uaddr);
1341 SOS_ASSERT_FATAL(prot);
1342
1343
1344 retval = sos_paging_map(paddr, vaddr, TRUE, prot);
1345 if (SOS_OK != retval)
1346 {
1347 sos_umem_vmm_unmap(as, result_uaddr, new_size);
1348 return retval;
1349 }
1350 }
1351
1352 retval = sos_umem_vmm_unmap(as, vr->start, vr->size);
1353 if (SOS_OK != retval)
1354 {
1355 sos_umem_vmm_unmap(as, result_uaddr, new_size);
1356 return retval;
1357 }
1358
1359 *new_uaddr = result_uaddr;
1360 return retval;
1361 }
1362
1363
1364
1365
1366 if (*new_uaddr + new_size < vr->start + vr->size)
1367 sos_umem_vmm_unmap(as, *new_uaddr + new_size,
1368 vr->start + vr->size - (*new_uaddr + new_size));
1369 else
1370 {
1371 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1372 *new_uaddr + new_size
1373 - (vr->start + vr->size),
1374 0, vr->access_rights);
1375 vr->size += *new_uaddr + new_size - (vr->start + vr->size);
1376 }
1377
1378 if (*new_uaddr > vr->start)
1379 sos_umem_vmm_unmap(as, vr->start, *new_uaddr - vr->start);
1380 else
1381 {
1382 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1383 vr->start - *new_uaddr,
1384 0, vr->access_rights);
1385 vr->size += vr->start - *new_uaddr;
1386 vr->start = *new_uaddr;
1387 vr->offset_in_resource = new_offset_in_resource;
1388 }
1389
1390 SOS_ASSERT_FATAL(vr->start == *new_uaddr);
1391 SOS_ASSERT_FATAL(vr->size == new_size);
1392 SOS_ASSERT_FATAL(vr->offset_in_resource == new_offset_in_resource);
1393
1394 return SOS_OK;
1395 }
1396
1397
1398 sos_ret_t sos_umem_vmm_try_resolve_page_fault(sos_uaddr_t uaddr,
1399 sos_bool_t write_access,
1400 sos_bool_t user_access)
1401 {
1402 struct sos_process *process = sos_thread_get_current()->process;
1403 struct sos_umem_vmm_as *as;
1404 struct sos_umem_vmm_vr *vr;
1405
1406 if (! process)
1407 return -SOS_EFAULT;
1408
1409 as = sos_process_get_address_space(process);
1410 if (! as)
1411 return -SOS_EFAULT;
1412
1413 vr = find_first_intersecting_vr(as, uaddr, 1);
1414 if (! vr)
1415 return -SOS_EFAULT;
1416
1417
1418 if (write_access && !(vr->access_rights & SOS_VM_MAP_PROT_WRITE))
1419 return -SOS_EFAULT;
1420
1421
1422 if (write_access && !(vr->flags & SOS_VR_MAP_SHARED))
1423 {
1424 if (SOS_OK == sos_paging_try_resolve_COW(uaddr))
1425 {
1426 as->pgflt_cow ++;
1427 return SOS_OK;
1428 }
1429 }
1430
1431
1432 if (SOS_OK != vr->ops->page_in(vr, uaddr, write_access))
1433 {
1434 as->pgflt_invalid ++;
1435 return -SOS_EFAULT;
1436 }
1437
1438 as->phys_total += SOS_PAGE_SIZE;
1439 as->pgflt_page_in ++;
1440
1441
1442 if (!(vr->flags & SOS_VR_MAP_SHARED))
1443 {
1444 sos_paging_prepare_COW(SOS_PAGE_ALIGN_INF(uaddr),
1445 SOS_PAGE_SIZE);
1446 }
1447
1448 return SOS_OK;
1449 }
1450
1451
1452 sos_ret_t
1453 sos_umem_vmm_init_heap(struct sos_umem_vmm_as * as,
1454 sos_uaddr_t heap_start)
1455 {
1456 SOS_ASSERT_FATAL(! as->heap_start);
1457
1458 as->heap_start = heap_start;
1459 as->heap_size = 0;
1460 return SOS_OK;
1461 }
1462
1463
1464 sos_uaddr_t
1465 sos_umem_vmm_brk(struct sos_umem_vmm_as * as,
1466 sos_uaddr_t new_top_uaddr)
1467 {
1468 sos_uaddr_t new_start;
1469 sos_size_t new_size;
1470 SOS_ASSERT_FATAL(as->heap_start);
1471
1472 if (! new_top_uaddr)
1473 return as->heap_start + as->heap_size;
1474
1475 if (new_top_uaddr == as->heap_start + as->heap_size)
1476 return as->heap_start + as->heap_size;
1477
1478 if (new_top_uaddr < as->heap_start)
1479 return (sos_uaddr_t)NULL;
1480
1481 new_top_uaddr = SOS_PAGE_ALIGN_SUP(new_top_uaddr);
1482 new_start = as->heap_start;
1483 new_size = new_top_uaddr - as->heap_start;
1484
1485
1486 if (! as->heap_size)
1487 {
1488 if (SOS_OK != sos_dev_zero_map(as, & as->heap_start,
1489 new_size,
1490 SOS_VM_MAP_PROT_READ
1491 | SOS_VM_MAP_PROT_WRITE,
1492 0 ))
1493 return (sos_uaddr_t)NULL;
1494
1495 as->heap_size = new_size;
1496 return as->heap_start + as->heap_size;
1497 }
1498
1499
1500 if (new_size <= 0)
1501 {
1502 if (SOS_OK != sos_umem_vmm_unmap(as,
1503 as->heap_start, as->heap_size))
1504 return (sos_uaddr_t)NULL;
1505 }
1506 else
1507 {
1508 if (SOS_OK != sos_umem_vmm_resize(as,
1509 as->heap_start, as->heap_size,
1510 & new_start, new_size,
1511 0))
1512 return (sos_uaddr_t)NULL;
1513 }
1514
1515 SOS_ASSERT_FATAL(new_start == as->heap_start);
1516 as->heap_size = new_size;
1517 return new_top_uaddr;
1518 }
1519
1520
1521 static struct sos_umem_vmm_vr *
1522 find_enclosing_or_next_vr(struct sos_umem_vmm_as * as,
1523 sos_uaddr_t uaddr)
1524 {
1525 struct sos_umem_vmm_vr *vr;
1526 int nb_vr;
1527
1528 if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1529 return NULL;
1530 if (uaddr > SOS_PAGING_TOP_USER_ADDRESS)
1531 return NULL;
1532
1533 list_foreach_named(as->list_vr, vr, nb_vr, prev_in_as, next_in_as)
1534 {
1535
1536
1537 if (uaddr <= vr->start + (vr->size - 1))
1538 return vr;
1539 }
1540
1541 return NULL;
1542 }
1543
1544
1545 static struct sos_umem_vmm_vr *
1546 find_first_intersecting_vr(struct sos_umem_vmm_as * as,
1547 sos_uaddr_t start_uaddr, sos_size_t size)
1548 {
1549 struct sos_umem_vmm_vr * vr;
1550 vr = find_enclosing_or_next_vr(as, start_uaddr);
1551 if (! vr)
1552 return NULL;
1553
1554 if (start_uaddr + size <= vr->start)
1555 return NULL;
1556
1557 return vr;
1558 }
1559
1560
1561 static sos_uaddr_t
1562 find_first_free_interval(struct sos_umem_vmm_as * as,
1563 sos_uaddr_t hint_uaddr, sos_size_t size)
1564 {
1565 struct sos_umem_vmm_vr * initial_vr, * vr;
1566
1567 if (hint_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1568 hint_uaddr = SOS_PAGING_BASE_USER_ADDRESS;
1569
1570 if (hint_uaddr > SOS_PAGING_TOP_USER_ADDRESS - size + 1)
1571 return (sos_uaddr_t)NULL;
1572
1573 initial_vr = vr = find_enclosing_or_next_vr(as, hint_uaddr);
1574 if (! vr)
1575
1576 return hint_uaddr;
1577
1578
1579 do
1580 {
1581
1582 if (hint_uaddr + size <= vr->start)
1583
1584 return hint_uaddr;
1585
1586
1587
1588 if (vr->next_in_as->start >= hint_uaddr)
1589
1590 hint_uaddr = vr->start + vr->size;
1591 else
1592 {
1593
1594
1595
1596 if (hint_uaddr <= SOS_PAGING_TOP_USER_ADDRESS - size)
1597 return hint_uaddr;
1598
1599 hint_uaddr = SOS_PAGING_BASE_USER_ADDRESS;
1600 }
1601
1602
1603 vr = vr->next_in_as;
1604 }
1605 while (vr != initial_vr);
1606
1607
1608
1609
1610 return (sos_uaddr_t)NULL;
1611 }
1612
1613
1614 static void
1615 as_account_change_of_vr_protection(struct sos_umem_vmm_as * as,
1616 sos_bool_t is_shared,
1617 sos_size_t size,
1618 sos_ui32_t prev_access_rights,
1619 sos_ui32_t new_access_rights)
1620 {
1621 if (prev_access_rights == new_access_rights)
1622 return;
1623
1624 #define _UPDATE_VMSTAT(field,is_increment) \
1625 ({ if (is_increment > 0) \
1626 as->field += size; \
1627 else \
1628 { SOS_ASSERT_FATAL(as->field >= size); as->field -= size; } })
1629 #define UPDATE_VMSTAT(field,is_increment) \
1630 ({ if (is_shared) _UPDATE_VMSTAT(vm_shrd.field, is_increment); \
1631 _UPDATE_VMSTAT(vm_total.field, is_increment); \
1632 SOS_ASSERT_FATAL(as->vm_total.field >= as->vm_shrd.field); })
1633
1634 if ( (new_access_rights & SOS_VM_MAP_PROT_WRITE)
1635 && !(prev_access_rights & SOS_VM_MAP_PROT_WRITE))
1636 {
1637 UPDATE_VMSTAT(rw, +1);
1638 if (prev_access_rights & SOS_VM_MAP_PROT_READ)
1639 UPDATE_VMSTAT(ro, -1);
1640 }
1641 else if ( !(new_access_rights & SOS_VM_MAP_PROT_WRITE)
1642 && (prev_access_rights & SOS_VM_MAP_PROT_WRITE))
1643 {
1644 if (new_access_rights & SOS_VM_MAP_PROT_READ)
1645 UPDATE_VMSTAT(ro, +1);
1646 UPDATE_VMSTAT(rw, -1);
1647 }
1648 else if (new_access_rights & SOS_VM_MAP_PROT_READ)
1649 UPDATE_VMSTAT(ro, +1);
1650 else if (!(new_access_rights & SOS_VM_MAP_PROT_READ))
1651 UPDATE_VMSTAT(ro, -1);
1652
1653 if ( (new_access_rights & SOS_VM_MAP_PROT_EXEC)
1654 && !(prev_access_rights & SOS_VM_MAP_PROT_EXEC))
1655 {
1656 UPDATE_VMSTAT(code, +1);
1657 }
1658 else if ( !(new_access_rights & SOS_VM_MAP_PROT_EXEC)
1659 && (prev_access_rights & SOS_VM_MAP_PROT_EXEC))
1660 {
1661 UPDATE_VMSTAT(code, -1);
1662 }
1663
1664 if (new_access_rights && !prev_access_rights)
1665 UPDATE_VMSTAT(overall, +1);
1666 else if (!new_access_rights && prev_access_rights)
1667 UPDATE_VMSTAT(overall, -1);
1668
1669 }