001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019 #include <sos/assert.h>
020 #include <sos/list.h>
021 #include <sos/physmem.h>
022 #include <sos/kmem_slab.h>
023 #include <drivers/bochs.h>
024 #include <hwcore/mm_context.h>
025 #include <hwcore/paging.h>
026 #include <drivers/zero.h>
027
028 #include "umem_vmm.h"
029
030
031 struct sos_umem_vmm_as
032 {
033
034 struct sos_process * process;
035
036
037 struct sos_mm_context * mm_context;
038
039
040 struct sos_umem_vmm_vr * list_vr;
041
042
043 sos_uaddr_t heap_start;
044 sos_size_t heap_size;
045
046
047 sos_size_t phys_total;
048 struct vm_usage
049 {
050 sos_size_t overall;
051 sos_size_t ro, rw, code ;
052 } vm_total, vm_shrd;
053
054
055 sos_size_t pgflt_cow;
056 sos_size_t pgflt_page_in;
057 sos_size_t pgflt_invalid;
058 };
059
060
061 struct sos_umem_vmm_vr
062 {
063
064 struct sos_umem_vmm_as *address_space;
065
066
067 sos_uaddr_t start;
068 sos_size_t size;
069
070
071
072 sos_ui32_t access_rights;
073
074
075
076
077 sos_ui32_t flags;
078
079
080
081
082
083 struct sos_umem_vmm_vr_ops *ops;
084
085
086 struct sos_umem_vmm_mapped_resource *mapped_resource;
087 sos_luoffset_t offset_in_resource;
088
089
090
091 struct sos_umem_vmm_vr *prev_in_as, *next_in_as;
092
093
094
095 struct sos_umem_vmm_vr *prev_in_mapped_resource, *next_in_mapped_resource;
096 };
097
098
099
100
101
102 static struct sos_kslab_cache * cache_of_as;
103 static struct sos_kslab_cache * cache_of_vr;
104
105
106
107 void sos_dump_as(const struct sos_umem_vmm_as * as, const char *str)
108 {
109 struct sos_umem_vmm_vr *vr;
110 int nb_vr;
111
112 sos_bochs_printf("AS %p - %s:\n", as, str);
113 sos_bochs_printf(" physical mem: %x\n",
114 as->phys_total);
115 sos_bochs_printf(" VM (all/ro+rw/exec) tot:%x/%x+%x/%x shrd:%x/%x+%x/%x\n",
116 as->vm_total.overall,
117 as->vm_total.ro, as->vm_total.rw, as->vm_total.code,
118 as->vm_shrd.overall,
119 as->vm_shrd.ro, as->vm_shrd.rw, as->vm_shrd.code);
120 sos_bochs_printf(" pgflt cow=%d pgin=%d inv=%d\n",
121 as->pgflt_cow, as->pgflt_page_in, as->pgflt_invalid);
122 list_foreach_named(as->list_vr, vr, nb_vr, prev_in_as, next_in_as)
123 {
124 sos_bochs_printf(" VR[%d]=%x: [%x,%x[ (sz=%x) mr=(%x)+%llx %c%c%c fl=%x\n",
125 nb_vr, (unsigned)vr,
126 vr->start, vr->start + vr->size, vr->size,
127 (unsigned)vr->mapped_resource,
128 vr->offset_in_resource,
129 (vr->access_rights & SOS_VM_MAP_PROT_READ)?'r':'-',
130 (vr->access_rights & SOS_VM_MAP_PROT_WRITE)?'w':'-',
131 (vr->access_rights & SOS_VM_MAP_PROT_EXEC)?'x':'-',
132 (unsigned)vr->flags);
133 }
134 sos_bochs_printf("FIN (%s)\n", str);
135 }
136
137
138
139
140
141
142 sos_paddr_t sos_zero_physpage = 0 ;
143 sos_vaddr_t sos_zero_kernelpage = 0 ;
144
145
146
147
148
149
150
151
152
153 static struct sos_umem_vmm_vr *
154 find_enclosing_or_next_vr(struct sos_umem_vmm_as * as,
155 sos_uaddr_t uaddr);
156
157
158
159
160
161
162 static struct sos_umem_vmm_vr *
163 find_first_intersecting_vr(struct sos_umem_vmm_as * as,
164 sos_uaddr_t start_uaddr, sos_size_t size);
165
166
167
168
169
170
171
172
173
174 static sos_uaddr_t
175 find_first_free_interval(struct sos_umem_vmm_as * as,
176 sos_uaddr_t hint_uaddr, sos_size_t size);
177
178
179
180
181 static void
182 as_account_change_of_vr_protection(struct sos_umem_vmm_as * as,
183 sos_bool_t is_shared,
184 sos_size_t size,
185 sos_ui32_t prev_access_rights,
186 sos_ui32_t new_access_rights);
187
188
189 sos_ret_t sos_umem_vmm_subsystem_setup()
190 {
191
192
193 sos_zero_kernelpage = sos_kmem_vmm_alloc(1, SOS_KMEM_VMM_MAP);
194 if (sos_zero_kernelpage == (sos_vaddr_t)NULL)
195 return -SOS_ENOMEM;
196 memset((void*)sos_zero_kernelpage, 0x0, SOS_PAGE_SIZE);
197
198
199 sos_zero_physpage = sos_paging_get_paddr(sos_zero_kernelpage);
200 SOS_ASSERT_FATAL(NULL != (void*)sos_zero_physpage);
201 sos_physmem_ref_physpage_at(sos_zero_physpage);
202
203
204 cache_of_as
205 = sos_kmem_cache_create("Address space structures",
206 sizeof(struct sos_umem_vmm_as),
207 1, 0,
208 SOS_KSLAB_CREATE_MAP
209 | SOS_KSLAB_CREATE_ZERO);
210 if (! cache_of_as)
211 {
212 sos_physmem_unref_physpage(sos_zero_physpage);
213 return -SOS_ENOMEM;
214 }
215
216 cache_of_vr
217 = sos_kmem_cache_create("Virtual Region structures",
218 sizeof(struct sos_umem_vmm_vr),
219 1, 0,
220 SOS_KSLAB_CREATE_MAP
221 | SOS_KSLAB_CREATE_ZERO);
222 if (! cache_of_vr)
223 {
224 sos_physmem_unref_physpage(sos_zero_physpage);
225 sos_kmem_cache_destroy(cache_of_as);
226 return -SOS_ENOMEM;
227 }
228
229 return SOS_OK;
230 }
231
232
233 struct sos_umem_vmm_as *
234 sos_umem_vmm_create_empty_as(struct sos_process *owner)
235 {
236 struct sos_umem_vmm_as * as
237 = (struct sos_umem_vmm_as *) sos_kmem_cache_alloc(cache_of_as, 0);
238 if (! as)
239 return NULL;
240
241 as->mm_context = sos_mm_context_create();
242 if (NULL == as->mm_context)
243 {
244
245 sos_kmem_cache_free((sos_vaddr_t)as);
246 return NULL;
247 }
248
249 as->process = owner;
250 return as;
251 }
252
253
254 struct sos_umem_vmm_as *
255 sos_umem_vmm_duplicate_current_thread_as(struct sos_process *owner)
256 {
257 __label__ undo_creation;
258 struct sos_umem_vmm_as * my_as;
259 struct sos_umem_vmm_vr * model_vr;
260 int nb_vr;
261
262 struct sos_umem_vmm_as * new_as
263 = (struct sos_umem_vmm_as *) sos_kmem_cache_alloc(cache_of_as, 0);
264 if (! new_as)
265 return NULL;
266
267 my_as = sos_process_get_address_space(sos_thread_get_current()->process);
268 new_as->process = owner;
269 list_init_named(new_as->list_vr, prev_in_as, next_in_as);
270
271
272
273
274
275
276 SOS_ASSERT_FATAL(SOS_OK
277 == sos_thread_prepare_user_space_access(my_as,
278 (sos_vaddr_t)
279 NULL));
280
281
282 list_foreach_named(my_as->list_vr, model_vr, nb_vr, prev_in_as, next_in_as)
283 {
284 struct sos_umem_vmm_vr * vr;
285
286
287 if ( !(model_vr->flags & SOS_VR_MAP_SHARED)
288 && (model_vr->access_rights & SOS_VM_MAP_PROT_WRITE) )
289 {
290
291
292 SOS_ASSERT_FATAL(SOS_OK
293 == sos_paging_prepare_COW(model_vr->start,
294 model_vr->size));
295 }
296
297
298 vr = (struct sos_umem_vmm_vr *) sos_kmem_cache_alloc(cache_of_vr, 0);
299 if (! vr)
300 goto undo_creation;
301 memcpy(vr, model_vr, sizeof(*vr));
302 vr->address_space = new_as;
303
304
305 if (vr->ops && vr->ops->ref)
306 vr->ops->ref(vr);
307
308
309 list_add_tail_named(new_as->list_vr, vr, prev_in_as, next_in_as);
310
311
312 list_add_tail_named(model_vr->mapped_resource->list_vr, vr,
313 prev_in_mapped_resource,
314 next_in_mapped_resource);
315 }
316
317
318 new_as->mm_context = sos_mm_context_duplicate(my_as->mm_context);
319 if (NULL == new_as->mm_context)
320 goto undo_creation;
321
322
323 new_as->heap_start = my_as->heap_start;
324 new_as->heap_size = my_as->heap_size;
325 new_as->phys_total = my_as->phys_total;
326 memcpy(& new_as->vm_total, & my_as->vm_total, sizeof(struct vm_usage));
327 memcpy(& new_as->vm_shrd, & my_as->vm_shrd, sizeof(struct vm_usage));
328 SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
329 return new_as;
330
331
332 undo_creation:
333 SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
334 sos_umem_vmm_delete_as(new_as);
335 return NULL;
336 }
337
338
339 sos_ret_t
340 sos_umem_vmm_delete_as(struct sos_umem_vmm_as * as)
341 {
342 while(! list_is_empty_named(as->list_vr, prev_in_as, next_in_as))
343 {
344 struct sos_umem_vmm_vr * vr;
345 vr = list_get_head_named(as->list_vr, prev_in_as, next_in_as);
346
347
348 list_pop_head_named(as->list_vr, prev_in_as, next_in_as);
349 list_delete_named(vr->mapped_resource->list_vr, vr,
350 prev_in_mapped_resource,
351 next_in_mapped_resource);
352
353
354
355 if (vr->ops)
356 {
357 if (vr->ops->unmap)
358 vr->ops->unmap(vr, vr->start, vr->size);
359 if (vr->ops->unref)
360 vr->ops->unref(vr);
361 }
362
363 sos_kmem_cache_free((sos_vaddr_t)vr);
364 }
365
366
367 if (as->mm_context)
368 sos_mm_context_unref(as->mm_context);
369
370
371 sos_kmem_cache_free((sos_vaddr_t)as);
372
373 return SOS_OK;
374 }
375
376
377 struct sos_process *
378 sos_umem_vmm_get_process(struct sos_umem_vmm_as * as)
379 {
380 return as->process;
381 }
382
383
384 struct sos_mm_context *
385 sos_umem_vmm_get_mm_context(struct sos_umem_vmm_as * as)
386 {
387 return as->mm_context;
388 }
389
390
391 struct sos_umem_vmm_vr *
392 sos_umem_vmm_get_vr_at_address(struct sos_umem_vmm_as * as,
393 sos_uaddr_t uaddr)
394 {
395 struct sos_umem_vmm_vr * vr;
396 vr = find_enclosing_or_next_vr(as, uaddr);
397 if (! vr)
398 return NULL;
399
400
401 if (uaddr < vr->start)
402 return NULL;
403
404 return vr;
405 }
406
407
408 struct sos_umem_vmm_as *
409 sos_umem_vmm_get_as_of_vr(struct sos_umem_vmm_vr * vr)
410 {
411 return vr->address_space;
412 }
413
414
415 struct sos_umem_vmm_vr_ops *
416 sos_umem_vmm_get_ops_of_vr(struct sos_umem_vmm_vr * vr)
417 {
418 return vr->ops;
419 }
420
421
422 sos_ui32_t sos_umem_vmm_get_prot_of_vr(struct sos_umem_vmm_vr * vr)
423 {
424 return vr->access_rights;
425 }
426
427
428 sos_ui32_t sos_umem_vmm_get_flags_of_vr(struct sos_umem_vmm_vr * vr)
429 {
430 return vr->flags;
431 }
432
433
434 struct sos_umem_vmm_mapped_resource *
435 sos_umem_vmm_get_mapped_resource_of_vr(struct sos_umem_vmm_vr * vr)
436 {
437 return vr->mapped_resource;
438 }
439
440
441 sos_uaddr_t sos_umem_vmm_get_start_of_vr(struct sos_umem_vmm_vr * vr)
442 {
443 return vr->start;
444 }
445
446
447 sos_size_t sos_umem_vmm_get_size_of_vr(struct sos_umem_vmm_vr * vr)
448 {
449 return vr->size;
450 }
451
452
453 sos_luoffset_t sos_umem_vmm_get_offset_in_resource(struct sos_umem_vmm_vr * vr)
454 {
455 return vr->offset_in_resource;
456 }
457
458
459 sos_ret_t
460 sos_umem_vmm_set_ops_of_vr(struct sos_umem_vmm_vr * vr,
461 struct sos_umem_vmm_vr_ops * ops)
462 {
463
464 SOS_ASSERT_FATAL(NULL == vr->ops);
465
466 vr->ops = ops;
467 return SOS_OK;
468 }
469
470
471
472
473
474
475 #define INTERNAL_MAP_CALLED_FROM_MREMAP (1 << 8)
476
477 sos_ret_t
478 sos_umem_vmm_map(struct sos_umem_vmm_as * as,
479 sos_uaddr_t * uaddr, sos_size_t size,
480 sos_ui32_t access_rights,
481 sos_ui32_t flags,
482 struct sos_umem_vmm_mapped_resource * resource,
483 sos_luoffset_t offset_in_resource)
484 {
485 __label__ return_mmap;
486 sos_uaddr_t hint_uaddr;
487 struct sos_umem_vmm_vr *prev_vr, *next_vr, *vr, *preallocated_vr;
488 sos_bool_t merge_with_preceding, merge_with_next, used_preallocated_vr;
489 sos_bool_t internal_map_called_from_mremap
490 = (flags & INTERNAL_MAP_CALLED_FROM_MREMAP);
491
492 sos_ret_t retval = SOS_OK;
493 used_preallocated_vr = FALSE;
494 hint_uaddr = *uaddr;
495
496
497 *uaddr = (sos_vaddr_t)NULL;
498
499 if (! resource)
500 return -SOS_EINVAL;
501 if (! resource->mmap)
502 return -SOS_EPERM;
503
504 if (! SOS_IS_PAGE_ALIGNED(hint_uaddr))
505 return -SOS_EINVAL;
506
507 if (size <= 0)
508 return -SOS_EINVAL;
509 size = SOS_PAGE_ALIGN_SUP(size);
510
511 if (flags & SOS_VR_MAP_SHARED)
512 {
513
514 if ( ( (access_rights & SOS_VM_MAP_PROT_READ)
515 && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_READ) )
516 || ( (access_rights & SOS_VM_MAP_PROT_WRITE)
517 && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_WRITE) )
518 || ( (access_rights & SOS_VM_MAP_PROT_EXEC)
519 && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_EXEC)) )
520 return -SOS_EPERM;
521 }
522
523
524 if ( !internal_map_called_from_mremap
525 && ( resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS ) )
526
527 {
528
529 }
530
531
532 else if (offset_in_resource + size <= offset_in_resource)
533 return -SOS_EINVAL;
534
535
536 access_rights &= (SOS_VM_MAP_PROT_READ
537 | SOS_VM_MAP_PROT_WRITE
538 | SOS_VM_MAP_PROT_EXEC);
539 flags &= (SOS_VR_MAP_SHARED
540 | SOS_VR_MAP_FIXED);
541
542
543
544
545 preallocated_vr
546 = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
547 if (! preallocated_vr)
548 return -SOS_ENOMEM;
549
550
551 if (flags & SOS_VR_MAP_FIXED)
552 {
553
554
555
556
557
558 if (hint_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
559 { retval = -SOS_EINVAL; goto return_mmap; }
560 if (hint_uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
561 { retval = -SOS_EINVAL; goto return_mmap; }
562
563
564 retval = sos_umem_vmm_unmap(as, hint_uaddr, size);
565 if (SOS_OK != retval)
566 { goto return_mmap; }
567 }
568 else
569 {
570
571
572
573
574
575 hint_uaddr = find_first_free_interval(as, hint_uaddr, size);
576 if (! hint_uaddr)
577 { retval = -SOS_ENOMEM; goto return_mmap; }
578 }
579
580
581
582
583 if ( !internal_map_called_from_mremap
584 && (resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS ) )
585 offset_in_resource = hint_uaddr;
586
587
588
589 next_vr = find_enclosing_or_next_vr(as, hint_uaddr);
590 if (next_vr)
591 {
592
593 prev_vr = next_vr->prev_in_as;
594
595
596 if (prev_vr->start > hint_uaddr)
597 prev_vr = NULL;
598 }
599 else
600 {
601
602 prev_vr = list_get_tail_named(as->list_vr, prev_in_as, next_in_as);
603 }
604
605
606 merge_with_preceding
607 = ( (NULL != prev_vr)
608 && (prev_vr->mapped_resource == resource)
609 && (prev_vr->offset_in_resource + prev_vr->size == offset_in_resource)
610 && (prev_vr->start + prev_vr->size == hint_uaddr)
611 && (prev_vr->flags == flags)
612 && (prev_vr->access_rights == access_rights) );
613
614
615 merge_with_next
616 = ( (NULL != next_vr)
617 && (next_vr->mapped_resource == resource)
618 && (offset_in_resource + size == next_vr->offset_in_resource)
619 && (hint_uaddr + size == next_vr->start)
620 && (next_vr->flags == flags)
621 && (next_vr->access_rights == access_rights) );
622
623 if (merge_with_preceding && merge_with_next)
624 {
625
626 vr = prev_vr;
627 vr->size += size + next_vr->size;
628
629
630 list_delete_named(as->list_vr, next_vr, prev_in_as, next_in_as);
631 list_delete_named(next_vr->mapped_resource->list_vr, next_vr,
632 prev_in_mapped_resource, next_in_mapped_resource);
633
634 if (next_vr->ops && next_vr->ops->unref)
635 next_vr->ops->unref(next_vr);
636
637 sos_kmem_vmm_free((sos_vaddr_t) next_vr);
638 }
639 else if (merge_with_preceding)
640 {
641
642 vr = prev_vr;
643 vr->size += size;
644 }
645 else if (merge_with_next)
646 {
647
648 vr = next_vr;
649 vr->start -= size;
650 vr->size += size;
651 }
652 else
653 {
654
655
656 vr = preallocated_vr;
657 used_preallocated_vr = TRUE;
658
659 vr->start = hint_uaddr;
660 vr->size = size;
661 vr->access_rights = access_rights;
662 vr->flags = flags;
663 vr->mapped_resource = resource;
664 vr->offset_in_resource = offset_in_resource;
665
666
667 vr->address_space = as;
668 if (prev_vr)
669 list_insert_after_named(as->list_vr, prev_vr, vr,
670 prev_in_as, next_in_as);
671 else
672 list_add_head_named(as->list_vr, vr, prev_in_as, next_in_as);
673
674 list_add_tail_named(vr->mapped_resource->list_vr, vr,
675 prev_in_mapped_resource,
676 next_in_mapped_resource);
677
678
679 if (resource && resource->mmap)
680 {
681 retval = resource->mmap(vr);
682 if (SOS_OK != retval)
683 {
684 retval = sos_umem_vmm_unmap(as, vr->start, vr->size);
685 goto return_mmap;
686 }
687
688
689 SOS_ASSERT_FATAL(vr->ops && vr->ops->page_in);
690 }
691
692 if (vr->ops && vr->ops->ref)
693 vr->ops->ref(vr);
694 }
695
696
697 *uaddr = hint_uaddr;
698 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
699 size, 0, vr->access_rights);
700 retval = SOS_OK;
701
702 return_mmap:
703 if (! used_preallocated_vr)
704 sos_kmem_vmm_free((sos_vaddr_t)preallocated_vr);
705
706 return retval;
707 }
708
709
710 sos_ret_t
711 sos_umem_vmm_unmap(struct sos_umem_vmm_as * as,
712 sos_uaddr_t uaddr, sos_size_t size)
713 {
714 struct sos_umem_vmm_vr *vr, *preallocated_vr;
715 sos_bool_t need_to_setup_mmu;
716 sos_bool_t used_preallocated_vr;
717
718 if (! SOS_IS_PAGE_ALIGNED(uaddr))
719 return -SOS_EINVAL;
720 if (size <= 0)
721 return -SOS_EINVAL;
722 size = SOS_PAGE_ALIGN_SUP(size);
723
724
725 if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
726 return -SOS_EINVAL;
727 if (uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
728 return -SOS_EINVAL;
729
730
731
732
733
734
735 used_preallocated_vr = FALSE;
736 preallocated_vr
737 = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
738 if (! preallocated_vr)
739 return -SOS_ENOMEM;
740
741
742 vr = find_first_intersecting_vr(as, uaddr, size);
743
744
745 while (NULL != vr)
746 {
747
748
749 if (vr->start + vr->size <= uaddr)
750
751 break;
752
753
754 if (uaddr + size <= vr->start)
755
756 break;
757
758
759 if ((vr->start >= uaddr)
760 && (vr->start + vr->size <= uaddr + size))
761 {
762 struct sos_umem_vmm_vr *next_vr;
763
764
765 if (vr->ops && vr->ops->unmap)
766 vr->ops->unmap(vr, vr->start, vr->size);
767
768
769 next_vr = vr->next_in_as;
770 if (next_vr == vr)
771 next_vr = NULL;
772 list_delete_named(as->list_vr, vr, prev_in_as, next_in_as);
773
774
775 list_delete_named(vr->mapped_resource->list_vr, vr,
776 prev_in_mapped_resource,
777 next_in_mapped_resource);
778
779 if (vr->ops && vr->ops->unref)
780 vr->ops->unref(vr);
781
782 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
783 vr->size, vr->access_rights, 0);
784 sos_kmem_vmm_free((sos_vaddr_t)vr);
785
786
787 vr = next_vr;
788 continue;
789 }
790
791
792 else if ( (vr->start < uaddr)
793 && (vr->start + vr->size > uaddr + size) )
794 {
795
796
797
798 used_preallocated_vr = TRUE;
799 memcpy(preallocated_vr, vr, sizeof(*vr));
800
801
802 preallocated_vr->start = uaddr + size;
803 preallocated_vr->size = vr->start + vr->size - (uaddr + size);
804 preallocated_vr->offset_in_resource += uaddr + size - vr->start;
805 vr->size = uaddr - vr->start;
806
807
808 list_insert_after_named(as->list_vr, vr, preallocated_vr,
809 prev_in_as, next_in_as);
810 list_add_tail_named(vr->mapped_resource->list_vr, preallocated_vr,
811 prev_in_mapped_resource,
812 next_in_mapped_resource);
813
814
815 if (vr->ops && vr->ops->unmap)
816 vr->ops->unmap(vr, uaddr, size);
817 if (preallocated_vr->ops && preallocated_vr->ops->ref)
818 preallocated_vr->ops->ref(preallocated_vr);
819
820
821 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
822 size, vr->access_rights, 0);
823
824
825 break;
826 }
827
828
829 else if (uaddr <= vr->start)
830 {
831 sos_size_t translation = uaddr + size - vr->start;
832
833
834 vr->size -= translation;
835 vr->offset_in_resource += translation;
836 vr->start += translation;
837
838
839 if (vr->ops && vr->ops->unmap)
840 vr->ops->unmap(vr, uaddr + size,
841 translation);
842
843
844 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
845 translation,
846 vr->access_rights, 0);
847
848
849
850 break;
851 }
852
853
854 else if (uaddr + size >= vr->start + vr->size)
855 {
856 sos_size_t unmapped_size = vr->start + vr->size - uaddr;
857
858
859 vr->size = uaddr - vr->start;
860
861
862 if (vr->ops && vr->ops->unmap)
863 vr->ops->unmap(vr, uaddr, unmapped_size);
864
865
866 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
867 unmapped_size,
868 vr->access_rights, 0);
869
870 vr = vr->next_in_as;
871 continue;
872 }
873
874 sos_display_fatal_error("BUG uaddr=%x sz=%x vr_start=%x, vr_sz=%x",
875 uaddr, size, vr->start, vr->size);
876 }
877
878 need_to_setup_mmu = (sos_thread_get_current()->squatted_mm_context
879 != as->mm_context);
880 if (need_to_setup_mmu)
881 SOS_ASSERT_FATAL(SOS_OK
882 == sos_thread_prepare_user_space_access(as,
883 (sos_vaddr_t)
884 NULL));
885 {
886 sos_ret_t sz_unmapped = sos_paging_unmap_interval(uaddr, size);
887 SOS_ASSERT_FATAL(sz_unmapped >= 0);
888 as->phys_total -= sz_unmapped;
889 }
890 if (need_to_setup_mmu)
891 SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access());
892
893 if (! used_preallocated_vr)
894 sos_kmem_vmm_free((sos_vaddr_t)preallocated_vr);
895
896 return SOS_OK;
897 }
898
899
900 sos_ret_t
901 sos_umem_vmm_chprot(struct sos_umem_vmm_as * as,
902 sos_uaddr_t uaddr, sos_size_t size,
903 sos_ui32_t new_access_rights)
904 {
905 struct sos_umem_vmm_vr *start_vr, *vr,
906 *preallocated_middle_vr, *preallocated_right_vr;
907 sos_bool_t used_preallocated_middle_vr, used_preallocated_right_vr;
908
909 if (! SOS_IS_PAGE_ALIGNED(uaddr))
910 return -SOS_EINVAL;
911 if (size <= 0)
912 return -SOS_EINVAL;
913 size = SOS_PAGE_ALIGN_SUP(size);
914
915
916 if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
917 return -SOS_EINVAL;
918 if (uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
919 return -SOS_EINVAL;
920
921
922
923 used_preallocated_middle_vr = FALSE;
924 used_preallocated_right_vr = FALSE;
925 preallocated_middle_vr
926 = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
927 if (! preallocated_middle_vr)
928 return -SOS_ENOMEM;
929 preallocated_right_vr
930 = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0);
931 if (! preallocated_right_vr)
932 {
933 sos_kmem_vmm_free((sos_vaddr_t)preallocated_middle_vr);
934 return -SOS_ENOMEM;
935 }
936
937
938 start_vr = find_first_intersecting_vr(as, uaddr, size);
939 if (NULL == start_vr)
940 return SOS_OK;
941
942
943
944 vr = start_vr;
945 while (TRUE)
946 {
947
948
949 if (vr->start + vr->size <= uaddr)
950
951 break;
952
953
954 if (uaddr + size < vr->start)
955
956 break;
957
958 if (vr->flags & SOS_VR_MAP_SHARED)
959 {
960
961
962 if ( ( (new_access_rights & SOS_VM_MAP_PROT_READ)
963 && !(vr->mapped_resource->allowed_access_rights
964 & SOS_VM_MAP_PROT_READ) )
965 || ( (new_access_rights & SOS_VM_MAP_PROT_WRITE)
966 && !(vr->mapped_resource->allowed_access_rights
967 & SOS_VM_MAP_PROT_WRITE) )
968 || ( (new_access_rights & SOS_VM_MAP_PROT_EXEC)
969 && !(vr->mapped_resource->allowed_access_rights
970 & SOS_VM_MAP_PROT_EXEC) ) )
971 return -SOS_EPERM;
972 }
973
974 vr = vr->next_in_as;
975 }
976
977
978
979 vr = start_vr;
980 while (TRUE)
981 {
982
983
984
985 if (vr->start + vr->size <= uaddr)
986
987 break;
988
989
990 if (uaddr + size <= vr->start)
991
992 break;
993
994
995 if (vr->access_rights == new_access_rights)
996
997 {
998 vr = vr->next_in_as;
999 continue;
1000 }
1001
1002
1003 if ((vr->start >= uaddr)
1004 && (vr->start + vr->size <= uaddr + size))
1005 {
1006
1007 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1008 vr->size, vr->access_rights,
1009 new_access_rights);
1010 vr->access_rights = new_access_rights;
1011
1012 if (vr->flags & SOS_VR_MAP_SHARED)
1013
1014
1015 sos_paging_set_prot_of_interval(vr->start, vr->size,
1016 new_access_rights);
1017 else
1018
1019 {
1020
1021
1022
1023
1024
1025 if (! (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1026 sos_paging_set_prot_of_interval(vr->start, vr->size,
1027 new_access_rights);
1028 }
1029
1030 vr = vr->next_in_as;
1031 continue;
1032 }
1033
1034
1035 else if ( (vr->start < uaddr)
1036 && (vr->start + vr->size > uaddr + size) )
1037 {
1038
1039
1040
1041 SOS_ASSERT_FATAL(! used_preallocated_middle_vr);
1042 SOS_ASSERT_FATAL(! used_preallocated_right_vr);
1043 used_preallocated_middle_vr = TRUE;
1044 memcpy(preallocated_middle_vr, vr, sizeof(*vr));
1045 used_preallocated_right_vr = TRUE;
1046 memcpy(preallocated_right_vr, vr, sizeof(*vr));
1047
1048
1049 preallocated_middle_vr->start = uaddr;
1050 preallocated_middle_vr->size = size;
1051 preallocated_right_vr->start = uaddr + size;
1052 preallocated_right_vr->size = vr->start + vr->size
1053 - (uaddr + size);
1054 preallocated_middle_vr->offset_in_resource
1055 += uaddr - vr->start;
1056 preallocated_right_vr->offset_in_resource
1057 += uaddr + size - vr->start;
1058 vr->size = uaddr - vr->start;
1059
1060
1061 preallocated_middle_vr->access_rights = new_access_rights;
1062 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1063 size, vr->access_rights,
1064 new_access_rights);
1065
1066
1067 list_insert_after_named(as->list_vr, vr, preallocated_middle_vr,
1068 prev_in_as, next_in_as);
1069 list_insert_after_named(as->list_vr, preallocated_middle_vr,
1070 preallocated_right_vr,
1071 prev_in_as, next_in_as);
1072
1073 list_add_tail_named(vr->mapped_resource->list_vr,
1074 preallocated_middle_vr,
1075 prev_in_mapped_resource,
1076 next_in_mapped_resource);
1077 list_add_tail_named(vr->mapped_resource->list_vr,
1078 preallocated_right_vr,
1079 prev_in_mapped_resource,
1080 next_in_mapped_resource);
1081
1082
1083 if (!(preallocated_middle_vr->flags & SOS_VR_MAP_SHARED)
1084 && (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1085
1086 sos_paging_prepare_COW(preallocated_middle_vr->start,
1087 preallocated_middle_vr->size);
1088 else
1089 sos_paging_set_prot_of_interval(preallocated_middle_vr->start,
1090 preallocated_middle_vr->size,
1091 new_access_rights);
1092
1093 if (preallocated_right_vr->ops && preallocated_right_vr->ops->ref)
1094 preallocated_right_vr->ops->ref(preallocated_right_vr);
1095 if (preallocated_middle_vr->ops && preallocated_middle_vr->ops->ref)
1096 preallocated_middle_vr->ops->ref(preallocated_middle_vr);
1097
1098
1099 break;
1100 }
1101
1102
1103 else if (uaddr <= vr->start)
1104 {
1105
1106 sos_uoffset_t offset_in_region = uaddr + size - vr->start;
1107
1108
1109 SOS_ASSERT_FATAL(! used_preallocated_middle_vr);
1110 used_preallocated_middle_vr = TRUE;
1111 memcpy(preallocated_middle_vr, vr, sizeof(*vr));
1112
1113
1114 preallocated_middle_vr->start += offset_in_region;
1115 preallocated_middle_vr->size -= offset_in_region;
1116 vr->size = offset_in_region;
1117 preallocated_middle_vr->offset_in_resource += offset_in_region;
1118
1119
1120 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1121 vr->size,
1122 vr->access_rights,
1123 new_access_rights);
1124 vr->access_rights = new_access_rights;
1125
1126
1127 list_insert_after_named(as->list_vr, vr,
1128 preallocated_middle_vr,
1129 prev_in_as, next_in_as);
1130 list_add_tail_named(vr->mapped_resource->list_vr,
1131 preallocated_middle_vr,
1132 prev_in_mapped_resource,
1133 next_in_mapped_resource);
1134
1135
1136 if (!(vr->flags & SOS_VR_MAP_SHARED)
1137 && (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1138
1139 sos_paging_prepare_COW(vr->start, vr->size);
1140 else
1141 sos_paging_set_prot_of_interval(vr->start, vr->size,
1142 new_access_rights);
1143
1144 if (preallocated_middle_vr->ops && preallocated_middle_vr->ops->ref)
1145 preallocated_middle_vr->ops->ref(preallocated_middle_vr);
1146
1147
1148
1149 break;
1150 }
1151
1152
1153 else if (uaddr + size >= vr->start + vr->size)
1154 {
1155
1156 sos_uoffset_t offset_in_region = uaddr - vr->start;
1157
1158
1159 SOS_ASSERT_FATAL(! used_preallocated_right_vr);
1160 used_preallocated_right_vr = TRUE;
1161 memcpy(preallocated_right_vr, vr, sizeof(*vr));
1162
1163
1164 preallocated_right_vr->start += offset_in_region;
1165 preallocated_right_vr->size -= offset_in_region;
1166 vr->size = offset_in_region;
1167 preallocated_right_vr->offset_in_resource += offset_in_region;
1168
1169
1170 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1171 preallocated_right_vr->size,
1172 vr->access_rights,
1173 new_access_rights);
1174 preallocated_right_vr->access_rights = new_access_rights;
1175
1176
1177 list_insert_after_named(as->list_vr, vr,
1178 preallocated_right_vr,
1179 prev_in_as, next_in_as);
1180 list_add_tail_named(vr->mapped_resource->list_vr,
1181 preallocated_right_vr,
1182 prev_in_mapped_resource,
1183 next_in_mapped_resource);
1184
1185
1186 if (!(preallocated_right_vr->flags & SOS_VR_MAP_SHARED)
1187 && (new_access_rights & SOS_VM_MAP_PROT_WRITE))
1188
1189 sos_paging_prepare_COW(preallocated_right_vr->start,
1190 preallocated_right_vr->size);
1191 else
1192 sos_paging_set_prot_of_interval(preallocated_right_vr->start,
1193 preallocated_right_vr->size,
1194 new_access_rights);
1195
1196 if (preallocated_right_vr->ops && preallocated_right_vr->ops->ref)
1197 preallocated_right_vr->ops->ref(preallocated_right_vr);
1198
1199 vr = vr->next_in_as;
1200 continue;
1201 }
1202
1203 sos_display_fatal_error("BUG");
1204 }
1205
1206 if (! used_preallocated_middle_vr)
1207 sos_kmem_vmm_free((sos_vaddr_t)preallocated_middle_vr);
1208 if (! used_preallocated_right_vr)
1209 sos_kmem_vmm_free((sos_vaddr_t)preallocated_right_vr);
1210
1211 return SOS_OK;
1212 }
1213
1214
1215 sos_ret_t
1216 sos_umem_vmm_sync(struct sos_umem_vmm_as * as,
1217 sos_uaddr_t uaddr, sos_size_t size,
1218 sos_ui32_t flags)
1219 {
1220 if (! SOS_IS_PAGE_ALIGNED(uaddr))
1221 return -SOS_EINVAL;
1222 if (size <= 0)
1223 return -SOS_EINVAL;
1224 size = SOS_PAGE_ALIGN_SUP(size);
1225
1226
1227 if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1228 return -SOS_EINVAL;
1229 if (uaddr > SOS_PAGING_TOP_USER_ADDRESS - size)
1230 return -SOS_EINVAL;
1231
1232
1233
1234 while (TRUE)
1235 {
1236 struct sos_umem_vmm_vr *vr;
1237
1238 if (size <= 0)
1239 break;
1240
1241
1242 vr = find_first_intersecting_vr(as, uaddr, size);
1243 if (NULL == vr)
1244 break;
1245
1246
1247 if ( !(vr->flags & SOS_VR_MAP_SHARED)
1248 || (vr->mapped_resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS)
1249
1250
1251 || ! vr->ops->sync_page )
1252 {
1253 if (size <= vr->size)
1254 break;
1255
1256 uaddr += vr->size;
1257 size -= vr->size;
1258 }
1259
1260
1261 for ( ; (size > 0)
1262 && (uaddr - vr->start < vr->size) ;
1263 uaddr += SOS_PAGE_SIZE,
1264 size -= SOS_PAGE_SIZE)
1265 if (sos_paging_is_dirty(uaddr))
1266 {
1267
1268 vr->ops->sync_page(vr, uaddr, flags);
1269 uaddr += SOS_PAGE_SIZE;
1270 size -= SOS_PAGE_SIZE;
1271 break;
1272 }
1273 }
1274
1275 return SOS_OK;
1276 }
1277
1278
1279 sos_ret_t
1280 sos_umem_vmm_resize(struct sos_umem_vmm_as * as,
1281 sos_uaddr_t old_uaddr, sos_size_t old_size,
1282 sos_uaddr_t *new_uaddr, sos_size_t new_size,
1283 sos_ui32_t flags)
1284 {
1285 sos_luoffset_t new_offset_in_resource;
1286 sos_bool_t must_move_vr = FALSE;
1287 struct sos_umem_vmm_vr *vr, *prev_vr, *next_vr;
1288
1289
1290 if (*new_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1291 return -SOS_EINVAL;
1292 if (*new_uaddr > SOS_PAGING_TOP_USER_ADDRESS - new_size)
1293 return -SOS_EINVAL;
1294
1295 old_uaddr = SOS_PAGE_ALIGN_INF(old_uaddr);
1296 old_size = SOS_PAGE_ALIGN_SUP(old_size);
1297 if (! SOS_IS_PAGE_ALIGNED(*new_uaddr))
1298 return -SOS_EINVAL;
1299 if (new_size <= 0)
1300 return -SOS_EINVAL;
1301 new_size = SOS_PAGE_ALIGN_SUP(new_size);
1302
1303
1304 vr = find_first_intersecting_vr(as, old_uaddr, old_size);
1305 if (! vr)
1306 return -SOS_EINVAL;
1307
1308
1309 if ( (vr->start > old_uaddr)
1310 || (vr->start + vr->size < old_uaddr + old_size) )
1311 return -SOS_EINVAL;
1312
1313
1314
1315 prev_vr = vr->prev_in_as;
1316 if (prev_vr->start >= vr->start)
1317 prev_vr = NULL;
1318 next_vr = vr->prev_in_as;
1319 if (next_vr->start <= vr->start)
1320 next_vr = NULL;
1321
1322
1323
1324
1325
1326
1327
1328 if ( (*new_uaddr < vr->start)
1329 && (vr->start - *new_uaddr > vr->offset_in_resource) )
1330 return -SOS_EINVAL;
1331
1332
1333 if (vr->start > *new_uaddr)
1334 new_offset_in_resource
1335 = vr->offset_in_resource
1336 - (vr->start - *new_uaddr);
1337 else
1338 new_offset_in_resource
1339 = vr->offset_in_resource
1340 + (*new_uaddr - vr->start);
1341
1342
1343
1344 if (prev_vr && (prev_vr->start + prev_vr->size > *new_uaddr))
1345 must_move_vr |= TRUE;
1346 if (next_vr && (next_vr->start < *new_uaddr + new_size))
1347 must_move_vr |= TRUE;
1348
1349
1350 if (*new_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1351 must_move_vr |= TRUE;
1352 if (*new_uaddr > SOS_PAGING_TOP_USER_ADDRESS - new_size)
1353 must_move_vr |= TRUE;
1354
1355
1356 if ( must_move_vr && !(flags & SOS_VR_REMAP_MAYMOVE) )
1357 return -SOS_EINVAL;
1358
1359
1360
1361 if (must_move_vr)
1362 {
1363 sos_uaddr_t uaddr, result_uaddr;
1364 sos_ret_t retval;
1365
1366 result_uaddr = *new_uaddr;
1367 retval = sos_umem_vmm_map(as, & result_uaddr, new_size,
1368 vr->access_rights,
1369 vr->flags | INTERNAL_MAP_CALLED_FROM_MREMAP,
1370 vr->mapped_resource,
1371 new_offset_in_resource);
1372 if (SOS_OK != retval)
1373 return retval;
1374
1375
1376 for (uaddr = vr->start ;
1377 uaddr < vr->start + vr->size ;
1378 uaddr += SOS_PAGE_SIZE)
1379 {
1380 sos_paddr_t paddr;
1381 sos_ui32_t prot;
1382 sos_uaddr_t vaddr;
1383
1384 if (uaddr < *new_uaddr)
1385 continue;
1386 if (uaddr > *new_uaddr + new_size)
1387 continue;
1388
1389
1390
1391 if (vr->start >= *new_uaddr)
1392 vaddr = result_uaddr
1393 + (uaddr - vr->start)
1394 + (vr->start - *new_uaddr);
1395 else
1396 vaddr = result_uaddr
1397 + (uaddr - vr->start)
1398 - (*new_uaddr - vr->start);
1399
1400 paddr = sos_paging_get_paddr(uaddr);
1401 if (! paddr)
1402
1403 continue;
1404
1405 prot = sos_paging_get_prot(uaddr);
1406 SOS_ASSERT_FATAL(prot);
1407
1408
1409 retval = sos_paging_map(paddr, vaddr, TRUE, prot);
1410 if (SOS_OK != retval)
1411 {
1412 sos_umem_vmm_unmap(as, result_uaddr, new_size);
1413 return retval;
1414 }
1415 }
1416
1417 retval = sos_umem_vmm_unmap(as, vr->start, vr->size);
1418 if (SOS_OK != retval)
1419 {
1420 sos_umem_vmm_unmap(as, result_uaddr, new_size);
1421 return retval;
1422 }
1423
1424 *new_uaddr = result_uaddr;
1425 return retval;
1426 }
1427
1428
1429
1430
1431 if (*new_uaddr + new_size < vr->start + vr->size)
1432 sos_umem_vmm_unmap(as, *new_uaddr + new_size,
1433 vr->start + vr->size - (*new_uaddr + new_size));
1434 else
1435 {
1436 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1437 *new_uaddr + new_size
1438 - (vr->start + vr->size),
1439 0, vr->access_rights);
1440 vr->size += *new_uaddr + new_size - (vr->start + vr->size);
1441 }
1442
1443 if (*new_uaddr > vr->start)
1444 sos_umem_vmm_unmap(as, vr->start, *new_uaddr - vr->start);
1445 else
1446 {
1447 as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED,
1448 vr->start - *new_uaddr,
1449 0, vr->access_rights);
1450 vr->size += vr->start - *new_uaddr;
1451 vr->start = *new_uaddr;
1452 vr->offset_in_resource = new_offset_in_resource;
1453 }
1454
1455 SOS_ASSERT_FATAL(vr->start == *new_uaddr);
1456 SOS_ASSERT_FATAL(vr->size == new_size);
1457 SOS_ASSERT_FATAL(vr->offset_in_resource == new_offset_in_resource);
1458
1459 return SOS_OK;
1460 }
1461
1462
1463 sos_ret_t sos_umem_vmm_try_resolve_page_fault(sos_uaddr_t uaddr,
1464 sos_bool_t write_access,
1465 sos_bool_t user_access)
1466 {
1467 struct sos_process *process = sos_thread_get_current()->process;
1468 struct sos_umem_vmm_as *as;
1469 struct sos_umem_vmm_vr *vr;
1470
1471 if (! process)
1472 return -SOS_EFAULT;
1473
1474 as = sos_process_get_address_space(process);
1475 if (! as)
1476 return -SOS_EFAULT;
1477
1478 vr = find_first_intersecting_vr(as, uaddr, 1);
1479 if (! vr)
1480 return -SOS_EFAULT;
1481
1482
1483 if (write_access && !(vr->access_rights & SOS_VM_MAP_PROT_WRITE))
1484 return -SOS_EFAULT;
1485
1486
1487 if (write_access && !(vr->flags & SOS_VR_MAP_SHARED))
1488 {
1489 if (SOS_OK == sos_paging_try_resolve_COW(uaddr))
1490 {
1491 as->pgflt_cow ++;
1492 return SOS_OK;
1493 }
1494 }
1495
1496
1497 if (SOS_OK != vr->ops->page_in(vr, uaddr, write_access))
1498 {
1499 as->pgflt_invalid ++;
1500 return -SOS_EFAULT;
1501 }
1502
1503 as->phys_total += SOS_PAGE_SIZE;
1504 as->pgflt_page_in ++;
1505
1506
1507 if (!(vr->flags & SOS_VR_MAP_SHARED))
1508 {
1509 sos_paging_prepare_COW(SOS_PAGE_ALIGN_INF(uaddr),
1510 SOS_PAGE_SIZE);
1511 }
1512
1513 return SOS_OK;
1514 }
1515
1516
1517 sos_ret_t
1518 sos_umem_vmm_init_heap(struct sos_umem_vmm_as * as,
1519 sos_uaddr_t heap_start)
1520 {
1521 SOS_ASSERT_FATAL(! as->heap_start);
1522
1523 as->heap_start = heap_start;
1524 as->heap_size = 0;
1525 return SOS_OK;
1526 }
1527
1528
1529 sos_uaddr_t
1530 sos_umem_vmm_brk(struct sos_umem_vmm_as * as,
1531 sos_uaddr_t new_top_uaddr)
1532 {
1533 sos_uaddr_t new_start;
1534 sos_size_t new_size;
1535 SOS_ASSERT_FATAL(as->heap_start);
1536
1537 if (! new_top_uaddr)
1538 return as->heap_start + as->heap_size;
1539
1540 if (new_top_uaddr == as->heap_start + as->heap_size)
1541 return as->heap_start + as->heap_size;
1542
1543 if (new_top_uaddr < as->heap_start)
1544 return (sos_uaddr_t)NULL;
1545
1546 new_top_uaddr = SOS_PAGE_ALIGN_SUP(new_top_uaddr);
1547 new_start = as->heap_start;
1548 new_size = new_top_uaddr - as->heap_start;
1549
1550
1551 if (! as->heap_size)
1552 {
1553 if (SOS_OK != sos_dev_zero_map(as, & as->heap_start,
1554 new_size,
1555 SOS_VM_MAP_PROT_READ
1556 | SOS_VM_MAP_PROT_WRITE,
1557 0 ))
1558 return (sos_uaddr_t)NULL;
1559
1560 as->heap_size = new_size;
1561 return as->heap_start + as->heap_size;
1562 }
1563
1564
1565 if (new_size <= 0)
1566 {
1567 if (SOS_OK != sos_umem_vmm_unmap(as,
1568 as->heap_start, as->heap_size))
1569 return (sos_uaddr_t)NULL;
1570 }
1571 else
1572 {
1573 if (SOS_OK != sos_umem_vmm_resize(as,
1574 as->heap_start, as->heap_size,
1575 & new_start, new_size,
1576 0))
1577 return (sos_uaddr_t)NULL;
1578 }
1579
1580 SOS_ASSERT_FATAL(new_start == as->heap_start);
1581 as->heap_size = new_size;
1582 return new_top_uaddr;
1583 }
1584
1585
1586 static struct sos_umem_vmm_vr *
1587 find_enclosing_or_next_vr(struct sos_umem_vmm_as * as,
1588 sos_uaddr_t uaddr)
1589 {
1590 struct sos_umem_vmm_vr *vr;
1591 int nb_vr;
1592
1593 if (uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1594 return NULL;
1595 if (uaddr > SOS_PAGING_TOP_USER_ADDRESS)
1596 return NULL;
1597
1598 list_foreach_named(as->list_vr, vr, nb_vr, prev_in_as, next_in_as)
1599 {
1600
1601
1602 if (uaddr <= vr->start + (vr->size - 1))
1603 return vr;
1604 }
1605
1606 return NULL;
1607 }
1608
1609
1610 static struct sos_umem_vmm_vr *
1611 find_first_intersecting_vr(struct sos_umem_vmm_as * as,
1612 sos_uaddr_t start_uaddr, sos_size_t size)
1613 {
1614 struct sos_umem_vmm_vr * vr;
1615 vr = find_enclosing_or_next_vr(as, start_uaddr);
1616 if (! vr)
1617 return NULL;
1618
1619 if (start_uaddr + size <= vr->start)
1620 return NULL;
1621
1622 return vr;
1623 }
1624
1625
1626 static sos_uaddr_t
1627 find_first_free_interval(struct sos_umem_vmm_as * as,
1628 sos_uaddr_t hint_uaddr, sos_size_t size)
1629 {
1630 struct sos_umem_vmm_vr * initial_vr, * vr;
1631
1632 if (hint_uaddr < SOS_PAGING_BASE_USER_ADDRESS)
1633 hint_uaddr = SOS_PAGING_BASE_USER_ADDRESS;
1634
1635 if (hint_uaddr > SOS_PAGING_TOP_USER_ADDRESS - size + 1)
1636 return (sos_uaddr_t)NULL;
1637
1638 initial_vr = vr = find_enclosing_or_next_vr(as, hint_uaddr);
1639 if (! vr)
1640
1641 return hint_uaddr;
1642
1643
1644 do
1645 {
1646
1647 if (hint_uaddr + size <= vr->start)
1648
1649 return hint_uaddr;
1650
1651
1652
1653 if (vr->next_in_as->start >= hint_uaddr)
1654
1655 hint_uaddr = vr->start + vr->size;
1656 else
1657 {
1658
1659
1660
1661 if (hint_uaddr <= SOS_PAGING_TOP_USER_ADDRESS - size)
1662 return hint_uaddr;
1663
1664 hint_uaddr = SOS_PAGING_BASE_USER_ADDRESS;
1665 }
1666
1667
1668 vr = vr->next_in_as;
1669 }
1670 while (vr != initial_vr);
1671
1672
1673
1674
1675 return (sos_uaddr_t)NULL;
1676 }
1677
1678
1679 static void
1680 as_account_change_of_vr_protection(struct sos_umem_vmm_as * as,
1681 sos_bool_t is_shared,
1682 sos_size_t size,
1683 sos_ui32_t prev_access_rights,
1684 sos_ui32_t new_access_rights)
1685 {
1686 if (prev_access_rights == new_access_rights)
1687 return;
1688
1689 #define _UPDATE_VMSTAT(field,is_increment) \
1690 ({ if (is_increment > 0) \
1691 as->field += size; \
1692 else \
1693 { SOS_ASSERT_FATAL(as->field >= size); as->field -= size; } })
1694 #define UPDATE_VMSTAT(field,is_increment) \
1695 ({ if (is_shared) _UPDATE_VMSTAT(vm_shrd.field, is_increment); \
1696 _UPDATE_VMSTAT(vm_total.field, is_increment); \
1697 SOS_ASSERT_FATAL(as->vm_total.field >= as->vm_shrd.field); })
1698
1699 if ( (new_access_rights & SOS_VM_MAP_PROT_WRITE)
1700 && !(prev_access_rights & SOS_VM_MAP_PROT_WRITE))
1701 {
1702 UPDATE_VMSTAT(rw, +1);
1703 if (prev_access_rights & SOS_VM_MAP_PROT_READ)
1704 UPDATE_VMSTAT(ro, -1);
1705 }
1706 else if ( !(new_access_rights & SOS_VM_MAP_PROT_WRITE)
1707 && (prev_access_rights & SOS_VM_MAP_PROT_WRITE))
1708 {
1709 if (new_access_rights & SOS_VM_MAP_PROT_READ)
1710 UPDATE_VMSTAT(ro, +1);
1711 UPDATE_VMSTAT(rw, -1);
1712 }
1713 else if (new_access_rights & SOS_VM_MAP_PROT_READ)
1714 UPDATE_VMSTAT(ro, +1);
1715 else if (!(new_access_rights & SOS_VM_MAP_PROT_READ))
1716 UPDATE_VMSTAT(ro, -1);
1717
1718 if ( (new_access_rights & SOS_VM_MAP_PROT_EXEC)
1719 && !(prev_access_rights & SOS_VM_MAP_PROT_EXEC))
1720 {
1721 UPDATE_VMSTAT(code, +1);
1722 }
1723 else if ( !(new_access_rights & SOS_VM_MAP_PROT_EXEC)
1724 && (prev_access_rights & SOS_VM_MAP_PROT_EXEC))
1725 {
1726 UPDATE_VMSTAT(code, -1);
1727 }
1728
1729 if (new_access_rights && !prev_access_rights)
1730 UPDATE_VMSTAT(overall, +1);
1731 else if (!new_access_rights && prev_access_rights)
1732 UPDATE_VMSTAT(overall, -1);
1733
1734 }