001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019 #include <sos/assert.h>
020 #include <sos/list.h>
021 #include <sos/physmem.h>
022 #include <sos/kmem_slab.h>
023 #include <drivers/bochs.h>
024 #include <hwcore/mm_context.h>
025 #include <hwcore/paging.h>
026 #include <drivers/zero.h>
027
028 #include "umem_vmm.h"
029
030
031 struct sos_umem_vmm_as
032 {
033
034 struct sos_process * process;
035
036
037 struct sos_mm_context * mm_context;
038
039
040 struct sos_umem_vmm_vr * list_vr;
041
042
043 sos_uaddr_t heap_start;
044 sos_size_t heap_size;
045
046
047 sos_size_t phys_total;
048 struct vm_usage
049 {
050 sos_size_t overall;
051 sos_size_t ro, rw, code ;
052 } vm_total, vm_shrd;
053
054
055 sos_size_t pgflt_cow;
056 sos_size_t pgflt_page_in;
057 sos_size_t pgflt_invalid;
058 };
059
060
061 struct sos_umem_vmm_vr
062 {
063
064 struct sos_umem_vmm_as *address_space;
065
066
067 sos_uaddr_t start;
068 sos_size_t size;
069
070
071
072 sos_ui32_t access_rights;
073
074
075
076
077 sos_ui32_t flags;
078
079
080
081
082
083 struct sos_umem_vmm_vr_ops *ops;
084
085
086 struct sos_umem_vmm_mapped_resource *mapped_resource;
087 sos_luoffset_t offset_in_resource;
088
089
090
091 struct sos_umem_vmm_vr *prev_in_as, *next_in_as;
092
093
094
095 struct sos_umem_vmm_vr *prev_in_mapped_resource, *next_in_mapped_resource;
096 };
097
098
099
100
101
102 static struct sos_kslab_cache * cache_of_as;
103 static struct sos_kslab_cache * cache_of_vr;
104
105
106
107 void sos_dump_as(const struct sos_umem_vmm_as * as, const char *str)
108 {
109 struct sos_umem_vmm_vr *vr;
110 int nb_vr;
111
112 sos_bochs_printf("AS %p - %s:\n", as, str);
113 sos_bochs_printf(" physical mem: %x\n",
114 as->phys_total);
115 sos_bochs_printf(" VM (all/ro+rw/exec) tot:%x/%x+%x/%x shrd:%x/%x+%x/%x\n",
116 as->vm_total.overall,
117 as->vm_total.ro, as->vm_total.rw, as->vm_total.code,
118 as->vm_shrd.overall,
119 as->vm_shrd.ro, as->vm_shrd.rw, as->vm_shrd.code);
120 sos_bochs_printf(" pgflt cow=%d pgin=%d inv=%d\n",
121 as->pgflt_cow, as->pgflt_page_in, as->pgflt_invalid);
122 list_foreach_named(as->list_vr, vr, nb_vr, prev_in_as, next_in_as)
123 {
124 sos_bochs_printf(" VR[%d]=%x: [%x,%x[ (sz=%x) mr=(%x)+%llx %c%c%c fl=%x\n",
125 nb_vr, (unsigned)vr,
126 vr->start, vr->start + vr->size, vr->size,
127 (unsigned)vr->mapped_resource,
128 vr->offset_in_resource,
129 (vr->access_rights & SOS_VM_MAP_PROT_READ)?'r':'-',
130 (vr->access_rights & SOS_VM_MAP_PROT_WRITE)?'w':'-',
131 (vr->access_rights & SOS_VM_MAP_PROT_EXEC)?'x':'-',
132 (unsigned)vr->flags);
133 }
134 sos_bochs_printf("FIN (%s)\n", str);
135 }
136
137
138
139
140
141
142 sos_paddr_t sos_zero_page = 0 ;
143
144
145
146
147
148
149
150
151
152 static struct sos_umem_vmm_vr *
153 find_enclosing_or_next_vr(struct sos_umem_vmm_as * as,
154 sos_uaddr_t uaddr);
155
156
157
158
159
160
161 static struct sos_umem_vmm_vr *
162 find_first_intersecting_vr(struct sos_umem_vmm_as * as,
163 sos_uaddr_t start_uaddr, sos_size_t size);
164
165
166
167
168
169
170
171
172
173 static sos_uaddr_t
174 find_first_free_interval(struct sos_umem_vmm_as * as,
175 sos_uaddr_t hint_uaddr, sos_size_t size);
176
177
178
179
180 static void
181 as_account_change_of_vr_protection(struct sos_umem_vmm_as * as,
182 sos_bool_t is_shared,
183 sos_size_t size,
184 sos_ui32_t prev_access_rights,
185 sos_ui32_t new_access_rights);
186
187
188 sos_ret_t sos_umem_vmm_subsystem_setup()
189 {
190 sos_vaddr_t vaddr_zero_page;
191
192
193
194 vaddr_zero_page = sos_kmem_vmm_alloc(1, SOS_KMEM_VMM_MAP);
195 if (vaddr_zero_page == (sos_vaddr_t)NULL)
196 return -SOS_ENOMEM;
197 memset((void*)vaddr_zero_page, 0x0, SOS_PAGE_SIZE);
198
199
200 sos_zero_page = sos_paging_get_paddr(vaddr_zero_page);
201 SOS_ASSERT_FATAL(NULL != (void*)sos_zero_page);
202 sos_physmem_ref_physpage_at(sos_zero_page);
203
204
205
206 sos_paging_unmap(vaddr_zero_page);
207
208
209 cache_of_as
210 = sos_kmem_cache_create("Address space structures",
211 sizeof(struct sos_umem_vmm_as),
212 1, 0,
213 SOS_KSLAB_CREATE_MAP
214 | SOS_KSLAB_CREATE_ZERO);
215 if (! cache_of_as)
216 {
217 sos_physmem_unref_physpage(sos_zero_page);
218 return -SOS_ENOMEM;
219 }
220
221 cache_of_vr
222 = sos_kmem_cache_create("Virtual Region structures",
223 sizeof(struct sos_umem_vmm_vr),
224 1, 0,
225 SOS_KSLAB_CREATE_MAP
226 | SOS_KSLAB_CREATE_ZERO);
227 if (! cache_of_vr)
228 {
229 sos_physmem_unref_physpage(sos_zero_page);
230 sos_kmem_cache_destroy(cache_of_as);
231 return -SOS_ENOMEM;
232 }
233
234 return SOS_OK;
235 }
236
237
238 struct sos_umem_vmm_as *
239 sos_umem_vmm_create_empty_as(struct sos_process *owner)
240 {
241 struct sos_umem_vmm_as * as
242 = (struct sos_umem_vmm_as *) sos_kmem_cache_alloc(cache_of_as, 0);
243 if (! as)
244 return NULL;
245
246 as->mm_context = sos_mm_context_create();
247 if (NULL == as->mm_context)
248 {
249
250 sos_kmem_cache_free((sos_vaddr_t)as);
251 return NULL;
252 }
253
254 as->process = owner;
255 return as;
256 }
257
258
259 struct sos_umem_vmm_as *
260 sos_umem_vmm_duplicate_current_thread_as(struct sos_process *owner)
261 {
262 __label__ undo_creation;
263 struct sos_umem_vmm_as * my_as;
264 struct sos_umem_vmm_vr * model_vr;
265 int nb_vr;
266
267 struct sos_umem_vmm_as * new_as
268 = (struct sos_umem_vmm_as *) sos_kmem_cache_alloc(cache_of_as, 0);
269 if (! new_as)
270 return NULL;
271
272 my_as = sos_process_get_address_space(sos_thread_get_current()->process);
273 new_as->process = owner;
274 list_init_named(new_as->list_vr, prev_in_as, next_in_as);
275
276
277
278
279
280
281 SOS_ASSERT_FATAL(SOS_OK
282 == sos_thread_prepare_user_space_access(my_as,
283 (sos_vaddr_t)
284 NULL));
285
286
287 list_foreach_named(my_as->list_vr, model_vr, nb_vr, prev_in_as, next_in_as)
288 {
289 struct sos_umem_vmm_vr * vr;
290
291
292 if ( !(model_vr->flags & SOS_VR_MAP_SHARED)
293 && (model_vr->access_rights & SOS_VM_MAP_PROT_WRITE) )
294 {
295
296
297 SOS_ASSERT_FATAL(SOS_OK
298 == sos_paging_prepare_COW(model_vr->start,
299 model_vr->size));
300 }
301
302
303 vr = (struct sos_umem_vmm_vr *) sos_kmem_cache_alloc(cache_of_vr, 0);
304 if (! vr)
305 goto undo_creation;
306 memcpy(vr, model_vr, sizeof(*vr));
307 vr->address_space = new_as;
308
309
310 if (vr->ops && vr->ops->ref)
311 vr->ops->ref(vr);
312
313
314 list_add_tail_named(new_as->list_vr, vr, prev_in_as, next_in_as);
315
316
317 list_add_tail_named(model_vr->mapped_resource->list_vr, vr,
318 prev_in_mapped_resource,
319 next_in_mapped_resource);
320 }
321
322
323 new_as->mm_context = sos_mm_context_duplicate(my_as->mm_context);
324 if (NULL == new_as->mm_context)
325 goto undo_creation;
326
327
328 new_as->heap_start = my_as->heap_start;
329 new_as->heap_size = my_as->heap_size;
330 new_as->phys_total = my_as->phys_total;
331 memcpy(& new_as->vm_total, & my_as->vm_total, sizeof(struct vm_usage));
332 memcpy(& new_as->vm_shrd, & my_as->vm_shrd, sizeof(struct vm_usage));
333 SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
334 return new_as;
335
336
337 undo_creation:
338 SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
339 sos_umem_vmm_delete_as(new_as);
340 return NULL;
341 }
342
343
344 sos_ret_t
345 sos_umem_vmm_delete_as(struct sos_umem_vmm_as * as)
346 {
347 while(! list_is_empty_named(as->list_vr, prev_in_as, next_in_as))
348 {
349 struct sos_umem_vmm_vr * vr;
350 vr = list_get_head_named(as->list_vr, prev_in_as, next_in_as);
351
352
353 list_pop_head_named(as->list_vr, prev_in_as, next_in_as);
354 list_delete_named(vr->mapped_resource->list_vr, vr,
355 prev_in_mapped_resource,
356 next_in_mapped_resource);
357
358
359
360 if (vr->ops)
361 {
362 if (vr->ops->unmap)
363 vr->ops->unmap(vr, vr->start, vr->size);
364 if (vr->ops->unref)
365 vr->ops->unref(vr);
366 }
367
368 sos_kmem_cache_free((sos_vaddr_t)vr);
369 }
370
371
372 if (as->mm_context)
373 sos_mm_context_unref(as->mm_context);
374
375
376 sos_kmem_cache_free((sos_vaddr_t)as);
377
378 return SOS_OK;
379 }
380
381
382 struct sos_process *
383 sos_umem_vmm_get_process(struct sos_umem_vmm_as * as)
384 {
385 return as->process;
386 }
387
388
389 struct sos_mm_context *
390 sos_umem_vmm_get_mm_context(struct sos_umem_vmm_as * as)
391 {
392 return as->mm_context;
393 }
394
395
396 struct sos_umem_vmm_vr *
397 sos_umem_vmm_get_vr_at_address(struct sos_umem_vmm_as * as,
398 sos_uaddr_t uaddr)
399 {
400 struct sos_umem_vmm_vr * vr;
401 vr = find_enclosing_or_next_vr(as, uaddr);
402 if (! vr)
403 return NULL;
404
405
406 if (uaddr < vr->start)
407 return NULL;
408
409 return vr;
410 }
411
412
413 struct sos_umem_vmm_as *
414 sos_umem_vmm_get_as_of_vr(struct sos_umem_vmm_vr * vr)
415 {
416 return vr->address_space;
417 }
418
419
420 struct sos_umem_vmm_vr_ops *
421 sos_umem_vmm_get_ops_of_vr(struct sos_umem_vmm_vr * vr)
422 {
423 return vr->ops;
424 }
425
426
427 sos_ui32_t sos_umem_vmm_get_prot_of_vr(struct sos_umem_vmm_vr * vr)
428 {
429 return vr->access_rights;
430 }
431
432
433 sos_ui32_t sos_umem_vmm_get_flags_of_vr(struct sos_umem_vmm_vr * vr)
434 {
435 return vr->flags;
436 }
437
438
439 struct sos_umem_vmm_mapped_resource *
440 sos_umem_vmm_get_mapped_resource_of_vr(struct sos_umem_vmm_vr * vr)
441 {
442 return vr->mapped_resource;
443 }
444
445
446 sos_uaddr_t sos_umem_vmm_get_start_of_vr(struct sos_umem_vmm_vr * vr)
447 {
448 return vr->start;
449 }
450
451
452 sos_size_t sos_umem_vmm_get_size_of_vr(struct sos_umem_vmm_vr * vr)
453 {
454 return vr->size;
455 }
456
457
458 sos_luoffset_t sos_umem_vmm_get_offset_in_resource(struct sos_umem_vmm_vr * vr)
459 {
460 return vr->offset_in_resource;
461 }
462
463
464 sos_ret_t
465 sos_umem_vmm_set_ops_of_vr(struct sos_umem_vmm_vr * vr,
466 struct sos_umem_vmm_vr_ops * ops)
467 {
468
469 SOS_ASSERT_FATAL(NULL == vr->ops);
470
471 vr->ops = ops;
472 return SOS_OK;
473 }
474
475
476
477
478
479
480 #define INTERNAL_MAP_CALLED_FROM_MREMAP (1 << 8)
481
482 sos_ret_t
483 sos_umem_vmm_map(struct sos_umem_vmm_as * as,
484 sos_uaddr_t * uaddr, sos_size_t size,
485 sos_ui32_t access_rights,
486 sos_ui32_t flags,
487 struct sos_umem_vmm_mapped_resource * resource,
488 sos_luoffset_t offset_in_resource)
489 {
490 __label__ return_mmap;
491 sos_uaddr_t hint_uaddr;
492 struct sos_umem_vmm_vr *prev_vr, *next_vr, *vr, *preallocated_vr;
493 sos_bool_t merge_with_preceding, merge_with_next, used_preallocated_vr;
494 sos_bool_t internal_map_called_from_mremap
495 = (flags & INTERNAL_MAP_CALLED_FROM_MREMAP);
496
497 sos_ret_t retval = SOS_OK;
498 used_preallocated_vr = FALSE;
499 hint_uaddr = *uaddr;
500
501
502 *uaddr = (sos_vaddr_t)NULL;
503
504 if (! resource)
505 return -SOS_EINVAL;
506 if (! resource->mmap)
507 return -SOS_EPERM;
508
509 if (! SOS_IS_PAGE_ALIGNED(hint_uaddr))
510 return -SOS_EINVAL;
511
512 if (size <= 0)
513 return -SOS_EINVAL;
514 size = SOS_PAGE_ALIGN_SUP(size);
515
516 if (flags & SOS_VR_MAP_SHARED)
517 {
518
519 if ( ( (access_rights & SOS_VM_MAP_PROT_READ)
520 && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_READ) )
521 || ( (access_rights & SOS_VM_MAP_PROT_WRITE)
522 && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_WRITE) )
523 || ( (access_rights & SOS_VM_MAP_PROT_EXEC)
524 && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_EXEC)) )
525 return -SOS_EPERM;
526 }
527
528
529 if ( !internal_map_called_from_mremap
530 && ( resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS ) )
531
532 {
533
534 }
535
536
537 else if (offset_in_resource + size <= offset_in_resource)
538 return -SOS_EINVAL;
539
540
541 access_rights &= (SOS_VM_MAP_PROT_READ
542 | SOS_VM_MAP_PROT_WRITE
543 | SOS_VM_MAP_PROT_EXEC);
544 flags &= (SOS_VR_MAP_SHARED
545 | SOS_VR_MAP_FIXED);
546
547
548
549
550 preallocated_vr
551 = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
552 if (! preallocated_vr)
553 return -SOS_ENOMEM;
554
555
556 if (flags & SOS_VR_MAP_FIXED)
557 {
558
559
560
561
562
563 if (hint_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
564 { retval = -SOS_EINVAL; goto return_mmap; }
565 if (hint_uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
566 { retval = -SOS_EINVAL; goto return_mmap; }
567
568
569 retval = sos_umem_vmm_unmap(as, hint_uaddr, size);
570 if (SOS_OK != retval)
571 { goto return_mmap; }
572 }
573 else
574 {
575
576
577
578
579
580 hint_uaddr = find_first_free_interval(as, hint_uaddr, size);
581 if (! hint_uaddr)
582 { retval = -SOS_ENOMEM; goto return_mmap; }
583 }
584
585
586
587
588 if ( !internal_map_called_from_mremap
589 && (resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS ) )
590 offset_in_resource = hint_uaddr;
591
592
593
594 next_vr = find_enclosing_or_next_vr(as, hint_uaddr);
595 if (next_vr)
596 {
597
598 prev_vr = next_vr->prev_in_as;
599
600
601 if (prev_vr->start > hint_uaddr)
602 prev_vr = NULL;
603 }
604 else
605 {
606
607 prev_vr = list_get_tail_named(as->list_vr, prev_in_as, next_in_as);
608 }
609
610
611 merge_with_preceding
612 = ( (NULL != prev_vr)
613 && (prev_vr->mapped_resource == resource)
614 && (prev_vr->offset_in_resource + prev_vr->size == offset_in_resource)
615 && (prev_vr->start + prev_vr->size == hint_uaddr)
616 && (prev_vr->flags == flags)
617 && (prev_vr->access_rights == access_rights) );
618
619
620 merge_with_next
621 = ( (NULL != next_vr)
622 && (next_vr->mapped_resource == resource)
623 && (offset_in_resource + size == next_vr->offset_in_resource)
624 && (hint_uaddr + size == next_vr->start)
625 && (next_vr->flags == flags)
626 && (next_vr->access_rights == access_rights) );
627
628 if (merge_with_preceding && merge_with_next)
629 {
630
631 vr = prev_vr;
632 vr->size += size + next_vr->size;
633
634
635 list_delete_named(as->list_vr, next_vr, prev_in_as, next_in_as);
636 list_delete_named(next_vr->mapped_resource->list_vr, next_vr,
637 prev_in_mapped_resource, next_in_mapped_resource);
638
639 if (next_vr->ops && next_vr->ops->unref)
640 next_vr->ops->unref(next_vr);
641
642 sos_kmem_vmm_free((sos_vaddr_t) next_vr);
643 }
644 else if (merge_with_preceding)
645 {
646
647 vr = prev_vr;
648 vr->size += size;
649 }
650 else if (merge_with_next)
651 {
652
653 vr = next_vr;
654 vr->start -= size;
655 vr->size += size;
656 }
657 else
658 {
659
660
661 vr = preallocated_vr;
662 used_preallocated_vr = TRUE;
663
664 vr->start = hint_uaddr;
665 vr->size = size;
666 vr->access_rights = access_rights;
667 vr->flags = flags;
668 vr->mapped_resource = resource;
669 vr->offset_in_resource = offset_in_resource;
670
671
672 vr->address_space = as;
673 if (prev_vr)
674 list_insert_after_named(as->list_vr, prev_vr, vr,
675 prev_in_as, next_in_as);
676 else
677 list_add_head_named(as->list_vr, vr, prev_in_as, next_in_as);
678 list_add_tail_named(vr->mapped_resource->list_vr, vr,
679 prev_in_mapped_resource,
680 next_in_mapped_resource);
681
682
683 if (resource && resource->mmap)
684 {
685 retval = resource->mmap(vr);
686 if (SOS_OK != retval)
687 {
688 retval = sos_umem_vmm_unmap(as, vr->start, vr->size);
689 goto return_mmap;
690 }
691
692
693 SOS_ASSERT_FATAL(vr->ops && vr->ops->page_in);
694 }
695
696 if (vr->ops && vr->ops->ref)
697 vr->ops->ref(vr);
698 }
699
700
701 *uaddr = hint_uaddr;
702 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
703 size, 0, vr->access_rights);
704 retval = SOS_OK;
705
706 return_mmap:
707 if (! used_preallocated_vr)
708 sos_kmem_vmm_free((sos_vaddr_t)preallocated_vr);
709
710 return retval;
711 }
712
713
714 sos_ret_t
715 sos_umem_vmm_unmap(struct sos_umem_vmm_as * as,
716 sos_uaddr_t uaddr, sos_size_t size)
717 {
718 struct sos_umem_vmm_vr *vr, *preallocated_vr;
719 sos_bool_t need_to_setup_mmu;
720 sos_bool_t used_preallocated_vr;
721
722 if (! SOS_IS_PAGE_ALIGNED(uaddr))
723 return -SOS_EINVAL;
724 if (size <= 0)
725 return -SOS_EINVAL;
726 size = SOS_PAGE_ALIGN_SUP(size);
727
728
729 if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
730 return -SOS_EINVAL;
731 if (uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
732 return -SOS_EINVAL;
733
734
735
736
737
738
739 used_preallocated_vr = FALSE;
740 preallocated_vr
741 = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
742 if (! preallocated_vr)
743 return -SOS_ENOMEM;
744
745
746 vr = find_first_intersecting_vr(as, uaddr, size);
747
748
749 while (NULL != vr)
750 {
751
752
753 if (vr->start + vr->size <= uaddr)
754
755 break;
756
757
758 if (uaddr + size <= vr->start)
759
760 break;
761
762
763 if ((vr->start >= uaddr)
764 && (vr->start + vr->size <= uaddr + size))
765 {
766 struct sos_umem_vmm_vr *next_vr;
767
768
769 if (vr->ops && vr->ops->unmap)
770 vr->ops->unmap(vr, vr->start, vr->size);
771
772
773 next_vr = vr->next_in_as;
774 if (next_vr == vr)
775 next_vr = NULL;
776 list_delete_named(as->list_vr, vr, prev_in_as, next_in_as);
777
778
779 list_delete_named(vr->mapped_resource->list_vr, vr,
780 prev_in_mapped_resource,
781 next_in_mapped_resource);
782
783 if (vr->ops && vr->ops->unref)
784 vr->ops->unref(vr);
785
786 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
787 vr->size, vr->access_rights, 0);
788 sos_kmem_vmm_free((sos_vaddr_t)vr);
789
790
791 vr = next_vr;
792 continue;
793 }
794
795
796 else if ( (vr->start < uaddr)
797 && (vr->start + vr->size > uaddr + size) )
798 {
799
800
801
802 used_preallocated_vr = TRUE;
803 memcpy(preallocated_vr, vr, sizeof(*vr));
804
805
806 preallocated_vr->start = uaddr + size;
807 preallocated_vr->size = vr->start + vr->size - (uaddr + size);
808 preallocated_vr->offset_in_resource += uaddr + size - vr->start;
809 vr->size = uaddr - vr->start;
810
811
812 list_insert_after_named(as->list_vr, vr, preallocated_vr,
813 prev_in_as, next_in_as);
814 list_add_tail_named(vr->mapped_resource->list_vr, preallocated_vr,
815 prev_in_mapped_resource,
816 next_in_mapped_resource);
817
818
819 if (vr->ops && vr->ops->unmap)
820 vr->ops->unmap(vr, uaddr, size);
821 if (preallocated_vr->ops && preallocated_vr->ops->ref)
822 preallocated_vr->ops->ref(preallocated_vr);
823
824
825 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
826 size, vr->access_rights, 0);
827
828
829 break;
830 }
831
832
833 else if (uaddr <= vr->start)
834 {
835 sos_size_t translation = uaddr + size - vr->start;
836
837
838 vr->size -= translation;
839 vr->offset_in_resource += translation;
840 vr->start += translation;
841
842
843 if (vr->ops && vr->ops->unmap)
844 vr->ops->unmap(vr, uaddr + size,
845 translation);
846
847
848 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
849 translation,
850 vr->access_rights, 0);
851
852
853
854 break;
855 }
856
857
858 else if (uaddr + size >= vr->start + vr->size)
859 {
860 sos_size_t unmapped_size = vr->start + vr->size - uaddr;
861
862
863 vr->size = uaddr - vr->start;
864
865
866 if (vr->ops && vr->ops->unmap)
867 vr->ops->unmap(vr, uaddr, unmapped_size);
868
869
870 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
871 unmapped_size,
872 vr->access_rights, 0);
873
874 vr = vr->next_in_as;
875 continue;
876 }
877
878 sos_display_fatal_error("BUG uaddr=%x sz=%x vr_start=%x, vr_sz=%x",
879 uaddr, size, vr->start, vr->size);
880 }
881
882 need_to_setup_mmu = (sos_thread_get_current()->squatted_mm_context
883 != as->mm_context);
884 if (need_to_setup_mmu)
885 SOS_ASSERT_FATAL(SOS_OK
886 == sos_thread_prepare_user_space_access(as,
887 (sos_vaddr_t)
888 NULL));
889 {
890 sos_size_t sz_unmapped = sos_paging_unmap_interval(uaddr, size);
891 SOS_ASSERT_FATAL(sz_unmapped >= 0);
892 as->phys_total -= sz_unmapped;
893 }
894 if (need_to_setup_mmu)
895 SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
896
897 if (! used_preallocated_vr)
898 sos_kmem_vmm_free((sos_vaddr_t)preallocated_vr);
899
900 return SOS_OK;
901 }
902
903
904 sos_ret_t
905 sos_umem_vmm_chprot(struct sos_umem_vmm_as * as,
906 sos_uaddr_t uaddr, sos_size_t size,
907 sos_ui32_t new_access_rights)
908 {
909 struct sos_umem_vmm_vr *start_vr, *vr,
910 *preallocated_middle_vr, *preallocated_right_vr;
911 sos_bool_t used_preallocated_middle_vr, used_preallocated_right_vr;
912
913 if (! SOS_IS_PAGE_ALIGNED(uaddr))
914 return -SOS_EINVAL;
915 if (size <= 0)
916 return -SOS_EINVAL;
917 size = SOS_PAGE_ALIGN_SUP(size);
918
919
920 if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
921 return -SOS_EINVAL;
922 if (uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
923 return -SOS_EINVAL;
924
925
926
927 used_preallocated_middle_vr = FALSE;
928 used_preallocated_right_vr = FALSE;
929 preallocated_middle_vr
930 = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
931 if (! preallocated_middle_vr)
932 return -SOS_ENOMEM;
933 preallocated_right_vr
934 = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
935 if (! preallocated_right_vr)
936 {
937 sos_kmem_vmm_free((sos_vaddr_t)preallocated_middle_vr);
938 return -SOS_ENOMEM;
939 }
940
941
942 start_vr = find_first_intersecting_vr(as, uaddr, size);
943 if (NULL == start_vr)
944 return SOS_OK;
945
946
947
948 vr = start_vr;
949 while (TRUE)
950 {
951
952
953 if (vr->start + vr->size <= uaddr)
954
955 break;
956
957
958 if (uaddr + size < vr->start)
959
960 break;
961
962 if (vr->flags & SOS_VR_MAP_SHARED)
963 {
964
965
966 if ( ( (new_access_rights & SOS_VM_MAP_PROT_READ)
967 && !(vr->mapped_resource->allowed_access_rights
968 & SOS_VM_MAP_PROT_READ) )
969 || ( (new_access_rights & SOS_VM_MAP_PROT_WRITE)
970 && !(vr->mapped_resource->allowed_access_rights
971 & SOS_VM_MAP_PROT_WRITE) )
972 || ( (new_access_rights & SOS_VM_MAP_PROT_EXEC)
973 && !(vr->mapped_resource->allowed_access_rights
974 & SOS_VM_MAP_PROT_EXEC) ) )
975 return -SOS_EPERM;
976 }
977
978 vr = vr->next_in_as;
979 }
980
981
982
983 vr = start_vr;
984 while (TRUE)
985 {
986
987
988
989 if (vr->start + vr->size <= uaddr)
990
991 break;
992
993
994 if (uaddr + size <= vr->start)
995
996 break;
997
998
999 if (vr->access_rights == new_access_rights)
1000
1001 {
1002 vr = vr->next_in_as;
1003 continue;
1004 }
1005
1006
1007 if ((vr->start >= uaddr)
1008 && (vr->start + vr->size <= uaddr + size))
1009 {
1010
1011 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1012 vr->size, vr->access_rights,
1013 new_access_rights);
1014 vr->access_rights = new_access_rights;
1015
1016 if (vr->flags & SOS_VR_MAP_SHARED)
1017
1018
1019 sos_paging_set_prot_of_interval(vr->start, vr->size,
1020 new_access_rights);
1021 else
1022
1023 {
1024
1025
1026
1027
1028
1029 if (! (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1030 sos_paging_set_prot_of_interval(vr->start, vr->size,
1031 new_access_rights);
1032 }
1033
1034 vr = vr->next_in_as;
1035 continue;
1036 }
1037
1038
1039 else if ( (vr->start < uaddr)
1040 && (vr->start + vr->size > uaddr + size) )
1041 {
1042
1043
1044
1045 SOS_ASSERT_FATAL(! used_preallocated_middle_vr);
1046 SOS_ASSERT_FATAL(! used_preallocated_right_vr);
1047 used_preallocated_middle_vr = TRUE;
1048 memcpy(preallocated_middle_vr, vr, sizeof(*vr));
1049 used_preallocated_right_vr = TRUE;
1050 memcpy(preallocated_right_vr, vr, sizeof(*vr));
1051
1052
1053 preallocated_middle_vr->start = uaddr;
1054 preallocated_middle_vr->size = size;
1055 preallocated_right_vr->start = uaddr + size;
1056 preallocated_right_vr->size = vr->start + vr->size
1057 - (uaddr + size);
1058 preallocated_middle_vr->offset_in_resource
1059 += uaddr - vr->start;
1060 preallocated_right_vr->offset_in_resource
1061 += uaddr + size - vr->start;
1062 vr->size = uaddr - vr->start;
1063
1064
1065 preallocated_middle_vr->access_rights = new_access_rights;
1066 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1067 size, vr->access_rights,
1068 new_access_rights);
1069
1070
1071 list_insert_after_named(as->list_vr, vr, preallocated_middle_vr,
1072 prev_in_as, next_in_as);
1073 list_insert_after_named(as->list_vr, preallocated_middle_vr,
1074 preallocated_right_vr,
1075 prev_in_as, next_in_as);
1076
1077 list_add_tail_named(vr->mapped_resource->list_vr,
1078 preallocated_middle_vr,
1079 prev_in_mapped_resource,
1080 next_in_mapped_resource);
1081 list_add_tail_named(vr->mapped_resource->list_vr,
1082 preallocated_right_vr,
1083 prev_in_mapped_resource,
1084 next_in_mapped_resource);
1085
1086
1087 if (!(preallocated_middle_vr->flags & SOS_VR_MAP_SHARED)
1088 && (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1089
1090 sos_paging_prepare_COW(preallocated_middle_vr->start,
1091 preallocated_middle_vr->size);
1092 else
1093 sos_paging_set_prot_of_interval(preallocated_middle_vr->start,
1094 preallocated_middle_vr->size,
1095 new_access_rights);
1096
1097 if (preallocated_right_vr->ops && preallocated_right_vr->ops->ref)
1098 preallocated_right_vr->ops->ref(preallocated_right_vr);
1099 if (preallocated_middle_vr->ops && preallocated_middle_vr->ops->ref)
1100 preallocated_middle_vr->ops->ref(preallocated_middle_vr);
1101
1102
1103 break;
1104 }
1105
1106
1107 else if (uaddr <= vr->start)
1108 {
1109
1110 sos_uoffset_t offset_in_region = uaddr + size - vr->start;
1111
1112
1113 SOS_ASSERT_FATAL(! used_preallocated_middle_vr);
1114 used_preallocated_middle_vr = TRUE;
1115 memcpy(preallocated_middle_vr, vr, sizeof(*vr));
1116
1117
1118 preallocated_middle_vr->start += offset_in_region;
1119 preallocated_middle_vr->size -= offset_in_region;
1120 vr->size = offset_in_region;
1121 preallocated_middle_vr->offset_in_resource += offset_in_region;
1122
1123
1124 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1125 vr->size,
1126 vr->access_rights,
1127 new_access_rights);
1128 vr->access_rights = new_access_rights;
1129
1130
1131 list_insert_after_named(as->list_vr, vr,
1132 preallocated_middle_vr,
1133 prev_in_as, next_in_as);
1134 list_add_tail_named(vr->mapped_resource->list_vr,
1135 preallocated_middle_vr,
1136 prev_in_mapped_resource,
1137 next_in_mapped_resource);
1138
1139
1140 if (!(vr->flags & SOS_VR_MAP_SHARED)
1141 && (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1142
1143 sos_paging_prepare_COW(vr->start, vr->size);
1144 else
1145 sos_paging_set_prot_of_interval(vr->start, vr->size,
1146 new_access_rights);
1147
1148 if (preallocated_middle_vr->ops && preallocated_middle_vr->ops->ref)
1149 preallocated_middle_vr->ops->ref(preallocated_middle_vr);
1150
1151
1152
1153 break;
1154 }
1155
1156
1157 else if (uaddr + size >= vr->start + vr->size)
1158 {
1159
1160 sos_uoffset_t offset_in_region = uaddr - vr->start;
1161
1162
1163 SOS_ASSERT_FATAL(! used_preallocated_right_vr);
1164 used_preallocated_right_vr = TRUE;
1165 memcpy(preallocated_right_vr, vr, sizeof(*vr));
1166
1167
1168 preallocated_right_vr->start += offset_in_region;
1169 preallocated_right_vr->size -= offset_in_region;
1170 vr->size = offset_in_region;
1171 preallocated_right_vr->offset_in_resource += offset_in_region;
1172
1173
1174 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1175 preallocated_right_vr->size,
1176 vr->access_rights,
1177 new_access_rights);
1178 preallocated_right_vr->access_rights = new_access_rights;
1179
1180
1181 list_insert_after_named(as->list_vr, vr,
1182 preallocated_right_vr,
1183 prev_in_as, next_in_as);
1184 list_add_tail_named(vr->mapped_resource->list_vr,
1185 preallocated_right_vr,
1186 prev_in_mapped_resource,
1187 next_in_mapped_resource);
1188
1189
1190 if (!(preallocated_right_vr->flags & SOS_VR_MAP_SHARED)
1191 && (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1192
1193 sos_paging_prepare_COW(preallocated_right_vr->start,
1194 preallocated_right_vr->size);
1195 else
1196 sos_paging_set_prot_of_interval(preallocated_right_vr->start,
1197 preallocated_right_vr->size,
1198 new_access_rights);
1199
1200 if (preallocated_right_vr->ops && preallocated_right_vr->ops->ref)
1201 preallocated_right_vr->ops->ref(preallocated_right_vr);
1202
1203 vr = vr->next_in_as;
1204 continue;
1205 }
1206
1207 sos_display_fatal_error("BUG");
1208 }
1209
1210 if (! used_preallocated_middle_vr)
1211 sos_kmem_vmm_free((sos_vaddr_t)preallocated_middle_vr);
1212 if (! used_preallocated_right_vr)
1213 sos_kmem_vmm_free((sos_vaddr_t)preallocated_right_vr);
1214
1215 return SOS_OK;
1216 }
1217
1218
1219 sos_ret_t
1220 sos_umem_vmm_resize(struct sos_umem_vmm_as * as,
1221 sos_uaddr_t old_uaddr, sos_size_t old_size,
1222 sos_uaddr_t *new_uaddr, sos_size_t new_size,
1223 sos_ui32_t flags)
1224 {
1225 sos_luoffset_t new_offset_in_resource;
1226 sos_bool_t must_move_vr = FALSE;
1227 struct sos_umem_vmm_vr *vr, *prev_vr, *next_vr;
1228
1229
1230 if (*new_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1231 return -SOS_EINVAL;
1232 if (*new_uaddr > SOS_PAGING_TOP_USER_ADDRESS - new_size)
1233 return -SOS_EINVAL;
1234
1235 old_uaddr = SOS_PAGE_ALIGN_INF(old_uaddr);
1236 old_size = SOS_PAGE_ALIGN_SUP(old_size);
1237 if (! SOS_IS_PAGE_ALIGNED(*new_uaddr))
1238 return -SOS_EINVAL;
1239 if (new_size <= 0)
1240 return -SOS_EINVAL;
1241 new_size = SOS_PAGE_ALIGN_SUP(new_size);
1242
1243
1244 vr = find_first_intersecting_vr(as, old_uaddr, old_size);
1245 if (! vr)
1246 return -SOS_EINVAL;
1247
1248
1249 if ( (vr->start > old_uaddr)
1250 || (vr->start + vr->size < old_uaddr + old_size) )
1251 return -SOS_EINVAL;
1252
1253
1254
1255 prev_vr = vr->prev_in_as;
1256 if (prev_vr->start >= vr->start)
1257 prev_vr = NULL;
1258 next_vr = vr->prev_in_as;
1259 if (next_vr->start <= vr->start)
1260 next_vr = NULL;
1261
1262
1263
1264
1265
1266
1267
1268 if ( (*new_uaddr < vr->start)
1269 && (vr->start - *new_uaddr > vr->offset_in_resource) )
1270 return -SOS_EINVAL;
1271
1272
1273 if (vr->start > *new_uaddr)
1274 new_offset_in_resource
1275 = vr->offset_in_resource
1276 - (vr->start - *new_uaddr);
1277 else
1278 new_offset_in_resource
1279 = vr->offset_in_resource
1280 + (*new_uaddr - vr->start);
1281
1282
1283
1284 if (prev_vr && (prev_vr->start + prev_vr->size > *new_uaddr))
1285 must_move_vr |= TRUE;
1286 if (next_vr && (next_vr->start < *new_uaddr + new_size))
1287 must_move_vr |= TRUE;
1288
1289
1290 if (*new_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1291 must_move_vr |= TRUE;
1292 if (*new_uaddr > SOS_PAGING_TOP_USER_ADDRESS - new_size)
1293 must_move_vr |= TRUE;
1294
1295
1296 if ( must_move_vr && !(flags & SOS_VR_REMAP_MAYMOVE) )
1297 return -SOS_EINVAL;
1298
1299
1300
1301 if (must_move_vr)
1302 {
1303 sos_uaddr_t uaddr, result_uaddr;
1304 sos_ret_t retval;
1305
1306 result_uaddr = *new_uaddr;
1307 retval = sos_umem_vmm_map(as, & result_uaddr, new_size,
1308 vr->access_rights,
1309 vr->flags | INTERNAL_MAP_CALLED_FROM_MREMAP,
1310 vr->mapped_resource,
1311 new_offset_in_resource);
1312 if (SOS_OK != retval)
1313 return retval;
1314
1315
1316 for (uaddr = vr->start ;
1317 uaddr < vr->start + vr->size ;
1318 uaddr += SOS_PAGE_SIZE)
1319 {
1320 sos_paddr_t paddr;
1321 sos_ui32_t prot;
1322 sos_uaddr_t vaddr;
1323
1324 if (uaddr < *new_uaddr)
1325 continue;
1326 if (uaddr > *new_uaddr + new_size)
1327 continue;
1328
1329
1330
1331 if (vr->start >= *new_uaddr)
1332 vaddr = result_uaddr
1333 + (uaddr - vr->start)
1334 + (vr->start - *new_uaddr);
1335 else
1336 vaddr = result_uaddr
1337 + (uaddr - vr->start)
1338 - (*new_uaddr - vr->start);
1339
1340 paddr = sos_paging_get_paddr(uaddr);
1341 if (! paddr)
1342
1343 continue;
1344
1345 prot = sos_paging_get_prot(uaddr);
1346 SOS_ASSERT_FATAL(prot);
1347
1348
1349 retval = sos_paging_map(paddr, vaddr, TRUE, prot);
1350 if (SOS_OK != retval)
1351 {
1352 sos_umem_vmm_unmap(as, result_uaddr, new_size);
1353 return retval;
1354 }
1355 }
1356
1357 retval = sos_umem_vmm_unmap(as, vr->start, vr->size);
1358 if (SOS_OK != retval)
1359 {
1360 sos_umem_vmm_unmap(as, result_uaddr, new_size);
1361 return retval;
1362 }
1363
1364 *new_uaddr = result_uaddr;
1365 return retval;
1366 }
1367
1368
1369
1370
1371 if (*new_uaddr + new_size < vr->start + vr->size)
1372 sos_umem_vmm_unmap(as, *new_uaddr + new_size,
1373 vr->start + vr->size - (*new_uaddr + new_size));
1374 else
1375 {
1376 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1377 *new_uaddr + new_size
1378 - (vr->start + vr->size),
1379 0, vr->access_rights);
1380 vr->size += *new_uaddr + new_size - (vr->start + vr->size);
1381 }
1382
1383 if (*new_uaddr > vr->start)
1384 sos_umem_vmm_unmap(as, vr->start, *new_uaddr - vr->start);
1385 else
1386 {
1387 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1388 vr->start - *new_uaddr,
1389 0, vr->access_rights);
1390 vr->size += vr->start - *new_uaddr;
1391 vr->start = *new_uaddr;
1392 vr->offset_in_resource = new_offset_in_resource;
1393 }
1394
1395 SOS_ASSERT_FATAL(vr->start == *new_uaddr);
1396 SOS_ASSERT_FATAL(vr->size == new_size);
1397 SOS_ASSERT_FATAL(vr->offset_in_resource == new_offset_in_resource);
1398
1399 return SOS_OK;
1400 }
1401
1402
1403 sos_ret_t sos_umem_vmm_try_resolve_page_fault(sos_uaddr_t uaddr,
1404 sos_bool_t write_access,
1405 sos_bool_t user_access)
1406 {
1407 struct sos_process *process = sos_thread_get_current()->process;
1408 struct sos_umem_vmm_as *as;
1409 struct sos_umem_vmm_vr *vr;
1410
1411 if (! process)
1412 return -SOS_EFAULT;
1413
1414 as = sos_process_get_address_space(process);
1415 if (! as)
1416 return -SOS_EFAULT;
1417
1418 vr = find_first_intersecting_vr(as, uaddr, 1);
1419 if (! vr)
1420 return -SOS_EFAULT;
1421
1422
1423 if (write_access && !(vr->access_rights & SOS_VM_MAP_PROT_WRITE))
1424 return -SOS_EFAULT;
1425
1426
1427 if (write_access && !(vr->flags & SOS_VR_MAP_SHARED))
1428 {
1429 if (SOS_OK == sos_paging_try_resolve_COW(uaddr))
1430 {
1431 as->pgflt_cow ++;
1432 return SOS_OK;
1433 }
1434 }
1435
1436
1437 if (SOS_OK != vr->ops->page_in(vr, uaddr, write_access))
1438 {
1439 as->pgflt_invalid ++;
1440 return -SOS_EFAULT;
1441 }
1442
1443 as->phys_total += SOS_PAGE_SIZE;
1444 as->pgflt_page_in ++;
1445
1446
1447 if (!(vr->flags & SOS_VR_MAP_SHARED))
1448 {
1449 sos_paging_prepare_COW(SOS_PAGE_ALIGN_INF(uaddr),
1450 SOS_PAGE_SIZE);
1451 }
1452
1453 return SOS_OK;
1454 }
1455
1456
1457 sos_ret_t
1458 sos_umem_vmm_init_heap(struct sos_umem_vmm_as * as,
1459 sos_uaddr_t heap_start)
1460 {
1461 SOS_ASSERT_FATAL(! as->heap_start);
1462
1463 as->heap_start = heap_start;
1464 as->heap_size = 0;
1465 return SOS_OK;
1466 }
1467
1468
1469 sos_uaddr_t
1470 sos_umem_vmm_brk(struct sos_umem_vmm_as * as,
1471 sos_uaddr_t new_top_uaddr)
1472 {
1473 sos_uaddr_t new_start;
1474 sos_size_t new_size;
1475 SOS_ASSERT_FATAL(as->heap_start);
1476
1477 if (! new_top_uaddr)
1478 return as->heap_start + as->heap_size;
1479
1480 if (new_top_uaddr == as->heap_start + as->heap_size)
1481 return as->heap_start + as->heap_size;
1482
1483 if (new_top_uaddr < as->heap_start)
1484 return (sos_uaddr_t)NULL;
1485
1486 new_top_uaddr = SOS_PAGE_ALIGN_SUP(new_top_uaddr);
1487 new_start = as->heap_start;
1488 new_size = new_top_uaddr - as->heap_start;
1489
1490
1491 if (! as->heap_size)
1492 {
1493 if (SOS_OK != sos_dev_zero_map(as, & as->heap_start,
1494 new_size,
1495 SOS_VM_MAP_PROT_READ
1496 | SOS_VM_MAP_PROT_WRITE,
1497 0 ))
1498 return (sos_uaddr_t)NULL;
1499
1500 as->heap_size = new_size;
1501 return as->heap_start + as->heap_size;
1502 }
1503
1504
1505 if (new_size <= 0)
1506 {
1507 if (SOS_OK != sos_umem_vmm_unmap(as,
1508 as->heap_start, as->heap_size))
1509 return (sos_uaddr_t)NULL;
1510 }
1511 else
1512 {
1513 if (SOS_OK != sos_umem_vmm_resize(as,
1514 as->heap_start, as->heap_size,
1515 & new_start, new_size,
1516 0))
1517 return (sos_uaddr_t)NULL;
1518 }
1519
1520 SOS_ASSERT_FATAL(new_start == as->heap_start);
1521 as->heap_size = new_size;
1522 return new_top_uaddr;
1523 }
1524
1525
1526 static struct sos_umem_vmm_vr *
1527 find_enclosing_or_next_vr(struct sos_umem_vmm_as * as,
1528 sos_uaddr_t uaddr)
1529 {
1530 struct sos_umem_vmm_vr *vr;
1531 int nb_vr;
1532
1533 if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1534 return NULL;
1535 if (uaddr > SOS_PAGING_TOP_USER_ADDRESS)
1536 return NULL;
1537
1538 list_foreach_named(as->list_vr, vr, nb_vr, prev_in_as, next_in_as)
1539 {
1540
1541
1542 if (uaddr <= vr->start + (vr->size - 1))
1543 return vr;
1544 }
1545
1546 return NULL;
1547 }
1548
1549
1550 static struct sos_umem_vmm_vr *
1551 find_first_intersecting_vr(struct sos_umem_vmm_as * as,
1552 sos_uaddr_t start_uaddr, sos_size_t size)
1553 {
1554 struct sos_umem_vmm_vr * vr;
1555 vr = find_enclosing_or_next_vr(as, start_uaddr);
1556 if (! vr)
1557 return NULL;
1558
1559 if (start_uaddr + size <= vr->start)
1560 return NULL;
1561
1562 return vr;
1563 }
1564
1565
1566 static sos_uaddr_t
1567 find_first_free_interval(struct sos_umem_vmm_as * as,
1568 sos_uaddr_t hint_uaddr, sos_size_t size)
1569 {
1570 struct sos_umem_vmm_vr * initial_vr, * vr;
1571
1572 if (hint_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1573 hint_uaddr = SOS_PAGING_BASE_USER_ADDRESS;
1574
1575 if (hint_uaddr > SOS_PAGING_TOP_USER_ADDRESS - size + 1)
1576 return (sos_uaddr_t)NULL;
1577
1578 initial_vr = vr = find_enclosing_or_next_vr(as, hint_uaddr);
1579 if (! vr)
1580
1581 return hint_uaddr;
1582
1583
1584 do
1585 {
1586
1587 if (hint_uaddr + size <= vr->start)
1588
1589 return hint_uaddr;
1590
1591
1592
1593 if (vr->next_in_as->start >= hint_uaddr)
1594
1595 hint_uaddr = vr->start + vr->size;
1596 else
1597 {
1598
1599
1600
1601 if (hint_uaddr <= SOS_PAGING_TOP_USER_ADDRESS - size)
1602 return hint_uaddr;
1603
1604 hint_uaddr = SOS_PAGING_BASE_USER_ADDRESS;
1605 }
1606
1607
1608 vr = vr->next_in_as;
1609 }
1610 while (vr != initial_vr);
1611
1612
1613
1614
1615 return (sos_uaddr_t)NULL;
1616 }
1617
1618
1619 static void
1620 as_account_change_of_vr_protection(struct sos_umem_vmm_as * as,
1621 sos_bool_t is_shared,
1622 sos_size_t size,
1623 sos_ui32_t prev_access_rights,
1624 sos_ui32_t new_access_rights)
1625 {
1626 if (prev_access_rights == new_access_rights)
1627 return;
1628
1629 #define _UPDATE_VMSTAT(field,is_increment) \
1630 ({ if (is_increment > 0) \
1631 as->field += size; \
1632 else \
1633 { SOS_ASSERT_FATAL(as->field >= size); as->field -= size; } })
1634 #define UPDATE_VMSTAT(field,is_increment) \
1635 ({ if (is_shared) _UPDATE_VMSTAT(vm_shrd.field, is_increment); \
1636 _UPDATE_VMSTAT(vm_total.field, is_increment); \
1637 SOS_ASSERT_FATAL(as->vm_total.field >= as->vm_shrd.field); })
1638
1639 if ( (new_access_rights & SOS_VM_MAP_PROT_WRITE)
1640 && !(prev_access_rights & SOS_VM_MAP_PROT_WRITE))
1641 {
1642 UPDATE_VMSTAT(rw, +1);
1643 if (prev_access_rights & SOS_VM_MAP_PROT_READ)
1644 UPDATE_VMSTAT(ro, -1);
1645 }
1646 else if ( !(new_access_rights & SOS_VM_MAP_PROT_WRITE)
1647 && (prev_access_rights & SOS_VM_MAP_PROT_WRITE))
1648 {
1649 if (new_access_rights & SOS_VM_MAP_PROT_READ)
1650 UPDATE_VMSTAT(ro, +1);
1651 UPDATE_VMSTAT(rw, -1);
1652 }
1653 else if (new_access_rights & SOS_VM_MAP_PROT_READ)
1654 UPDATE_VMSTAT(ro, +1);
1655 else if (!(new_access_rights & SOS_VM_MAP_PROT_READ))
1656 UPDATE_VMSTAT(ro, -1);
1657
1658 if ( (new_access_rights & SOS_VM_MAP_PROT_EXEC)
1659 && !(prev_access_rights & SOS_VM_MAP_PROT_EXEC))
1660 {
1661 UPDATE_VMSTAT(code, +1);
1662 }
1663 else if ( !(new_access_rights & SOS_VM_MAP_PROT_EXEC)
1664 && (prev_access_rights & SOS_VM_MAP_PROT_EXEC))
1665 {
1666 UPDATE_VMSTAT(code, -1);
1667 }
1668
1669 if (new_access_rights && !prev_access_rights)
1670 UPDATE_VMSTAT(overall, +1);
1671 else if (!new_access_rights && prev_access_rights)
1672 UPDATE_VMSTAT(overall, -1);
1673
1674 }