/tmp/sos-code-article7/hwcore/cpu_context.c (2005-02-05 17:52:22.000000000 +0100
) |
|
../sos-code-article7.5/hwcore/cpu_context.c (2005-04-27 20:17:15.000000000 +0200
) |
|
|
|
} | } |
| |
| |
sos_ret_t sos_cpu_ustate_init(struct sos_cpu_state **ctxt, | /** |
sos_uaddr_t user_start_PC, | * Helper function to create a new user thread context. When |
sos_ui32_t user_start_arg, | * model_uctxt is NON NULL, the new user context is the copy of |
sos_uaddr_t user_initial_SP, | * model_uctxt, otherwise the SP/PC registers are initialized to the |
sos_vaddr_t kernel_stack_bottom, | * user_initial_SP/PC arguments |
sos_size_t kernel_stack_size) | */ |
| static sos_ret_t cpu_ustate_init(struct sos_cpu_state **ctxt, |
| const struct sos_cpu_state *model_uctxt, |
| sos_uaddr_t user_start_PC, |
| sos_ui32_t user_start_arg1, |
| sos_ui32_t user_start_arg2, |
| sos_uaddr_t user_initial_SP, |
| sos_vaddr_t kernel_stack_bottom, |
| sos_size_t kernel_stack_size) |
/* We are initializing a User thread's context */ | /* We are initializing a User thread's context */ |
struct sos_cpu_ustate *uctxt; | struct sos_cpu_ustate *uctxt; |
|
|
- sizeof(struct sos_cpu_ustate); | - sizeof(struct sos_cpu_ustate); |
uctxt = (struct sos_cpu_ustate*)uctxt_vaddr; | uctxt = (struct sos_cpu_ustate*)uctxt_vaddr; |
| |
| if (model_uctxt && !sos_cpu_context_is_in_user_mode(model_uctxt)) |
| return -SOS_EINVAL; |
| |
/* If needed, poison the kernel stack */ | /* If needed, poison the kernel stack */ |
#ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS | #ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS |
memset((void*)kernel_stack_bottom, | memset((void*)kernel_stack_bottom, |
|
|
/* | /* |
* Setup the initial context structure, so that the CPU will restore | * Setup the initial context structure, so that the CPU will restore |
* the initial registers' value for the user thread. The | * the initial registers' value for the user thread. The |
* user thread argument is passed in the ax register. | * user thread argument is passed in the EAX register. |
| |
/* Initialize the CPU context structure */ | /* Initialize the CPU context structure */ |
memset(uctxt, 0x0, sizeof(struct sos_cpu_ustate)); | if (! model_uctxt) |
| { |
| memset(uctxt, 0x0, sizeof(struct sos_cpu_ustate)); |
/* Tell the CPU context structure that the first instruction to | /* Tell the CPU context structure that the first instruction to |
execute will be located at user_start_PC (in user space) */ | execute will be located at user_start_PC (in user space) */ |
uctxt->regs.eip = (sos_ui32_t)user_start_PC; | uctxt->regs.eip = (sos_ui32_t)user_start_PC; |
| |
| /* Tell the CPU where will be the user stack */ |
| uctxt->cpl3_esp = user_initial_SP; |
| } |
| else |
| memcpy(uctxt, model_uctxt, sizeof(struct sos_cpu_ustate)); |
/* The parameter to the start function is not passed by the stack to | /* The parameter to the start function is not passed by the stack to |
avoid a possible page fault */ | avoid a possible page fault */ |
uctxt->regs.eax = user_start_arg; | uctxt->regs.eax = user_start_arg1; |
/* Tell the CPU where will be the user stack */ | /* Optional additional argument for non-duplicated threads */ |
uctxt->cpl3_esp = user_initial_SP; | if (! model_uctxt) |
| uctxt->regs.ebx = user_start_arg2; |
/* Setup the segment registers */ | /* Setup the segment registers */ |
uctxt->regs.cs | uctxt->regs.cs |
|
|
} | } |
| |
| |
inline sos_ret_t | sos_ret_t sos_cpu_ustate_init(struct sos_cpu_state **ctxt, |
| sos_uaddr_t user_start_PC, |
| sos_ui32_t user_start_arg1, |
| sos_ui32_t user_start_arg2, |
| sos_uaddr_t user_initial_SP, |
| sos_vaddr_t kernel_stack_bottom, |
| sos_size_t kernel_stack_size) |
| { |
| return cpu_ustate_init(ctxt, NULL, |
| user_start_PC, |
| user_start_arg1, user_start_arg2, |
| user_initial_SP, |
| kernel_stack_bottom, kernel_stack_size); |
| } |
| |
| |
| sos_ret_t sos_cpu_ustate_duplicate(struct sos_cpu_state **ctxt, |
| const struct sos_cpu_state *model_uctxt, |
| sos_ui32_t user_retval, |
| sos_vaddr_t kernel_stack_bottom, |
| sos_size_t kernel_stack_size) |
| { |
| return cpu_ustate_init(ctxt, model_uctxt, |
| /* ignored */0, |
| user_retval, /* ignored */0, |
| /* ignored */0, |
| kernel_stack_bottom, kernel_stack_size); |
| } |
| |
| |
| sos_ret_t |
{ | { |
/* An interrupted user thread has its CS register set to that of the | /* An interrupted user thread has its CS register set to that of the |
|
|
} | } |
| |
| |
| sos_ret_t |
| sos_cpu_context_set_EX_return_address(struct sos_cpu_state *ctxt, |
| sos_vaddr_t ret_vaddr) |
| { |
| ctxt->eip = ret_vaddr; |
| return SOS_OK; |
| } |
| |
| |
void sos_cpu_context_dump(const struct sos_cpu_state *ctxt) | void sos_cpu_context_dump(const struct sos_cpu_state *ctxt) |
{ | { |
char buf[128]; | char buf[128]; |
| |
snprintf(buf, sizeof(buf), | snprintf(buf, sizeof(buf), |
"CPU: eip=%x esp=%x eflags=%x cs=%x ds=%x ss=%x err=%x", | "CPU: eip=%x esp0=%x eflags=%x cs=%x ds=%x ss0=%x err=%x", |
(unsigned)GET_CPU_CS_REGISTER_VALUE(ctxt->cs), (unsigned)ctxt->ds, | (unsigned)GET_CPU_CS_REGISTER_VALUE(ctxt->cs), (unsigned)ctxt->ds, |
(unsigned)ctxt->cpl0_ss, | (unsigned)ctxt->cpl0_ss, |
(unsigned)ctxt->error_code); | (unsigned)ctxt->error_code); |
| if (TRUE == sos_cpu_context_is_in_user_mode(ctxt)) |
| { |
| struct sos_cpu_ustate * uctxt = (struct sos_cpu_ustate*)ctxt; |
| snprintf(buf, sizeof(buf), |
| "%s esp3=%x ss3=%x", |
| buf, (unsigned)uctxt->cpl3_esp, (unsigned)uctxt->cpl3_ss); |
| } |
| else |
| snprintf(buf, sizeof(buf), "%s [KERNEL MODE]", buf); |
| |
sos_bochs_putstring(buf); sos_bochs_putstring("\n"); | sos_bochs_putstring(buf); sos_bochs_putstring("\n"); |
sos_x86_videomem_putstring(23, 0, | sos_x86_videomem_putstring(23, 0, |
SOS_X86_VIDEO_FG_BLACK | SOS_X86_VIDEO_BG_LTGRAY, | SOS_X86_VIDEO_FG_BLACK | SOS_X86_VIDEO_BG_LTGRAY, |
buf); | buf); |
| |
| |
|
|
| |
/* Copy the array containing the remaining arguments from user | /* Copy the array containing the remaining arguments from user |
space */ | space */ |
retval = sos_copy_from_user((sos_vaddr_t)other_args, | retval = sos_memcpy_from_user((sos_vaddr_t)other_args, |
(sos_uaddr_t)uaddr_other_args, | (sos_uaddr_t)uaddr_other_args, |
sizeof(other_args)); | sizeof(other_args)); |
if (SOS_OK != retval) | if (sizeof(other_args) != retval) |
return retval; | return -SOS_EFAULT; |
*arg3 = other_args[0]; | *arg3 = other_args[0]; |
*arg4 = other_args[1]; | *arg4 = other_args[1]; |
return retval; | return SOS_OK; |
| |
| |
|
|
| |
/* Copy the array containing the remaining arguments from user | /* Copy the array containing the remaining arguments from user |
space */ | space */ |
retval = sos_copy_from_user((sos_vaddr_t)other_args, | retval = sos_memcpy_from_user((sos_vaddr_t)other_args, |
(sos_uaddr_t)uaddr_other_args, | (sos_uaddr_t)uaddr_other_args, |
sizeof(other_args)); | sizeof(other_args)); |
if (SOS_OK != retval) | if (sizeof(other_args) != retval) |
return retval; | return -SOS_EFAULT; |
*arg3 = other_args[0]; | *arg3 = other_args[0]; |
*arg4 = other_args[1]; | *arg4 = other_args[1]; |
*arg5 = other_args[2]; | *arg5 = other_args[2]; |
return retval; | return SOS_OK; |
| |
| |
|
|
| |
/* Copy the array containing the remaining arguments from user | /* Copy the array containing the remaining arguments from user |
space */ | space */ |
retval = sos_copy_from_user((sos_vaddr_t)other_args, | retval = sos_memcpy_from_user((sos_vaddr_t)other_args, |
(sos_uaddr_t)uaddr_other_args, | (sos_uaddr_t)uaddr_other_args, |
sizeof(other_args)); | sizeof(other_args)); |
| if (sizeof(other_args) != retval) |
| return -SOS_EFAULT; |
| |
| *arg3 = other_args[0]; |
| *arg4 = other_args[1]; |
| *arg5 = other_args[2]; |
| *arg6 = other_args[3]; |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t sos_syscall_get7args(const struct sos_cpu_state *user_ctxt, |
| /* out */unsigned int *arg1, |
| /* out */unsigned int *arg2, |
| /* out */unsigned int *arg3, |
| /* out */unsigned int *arg4, |
| /* out */unsigned int *arg5, |
| /* out */unsigned int *arg6, |
| /* out */unsigned int *arg7) |
| { |
| sos_uaddr_t uaddr_other_args; |
| unsigned int other_args[5]; |
| sos_ret_t retval; |
| |
| /* Retrieve the 3 arguments. The last one is an array containing the |
| remaining arguments */ |
| retval = sos_syscall_get3args(user_ctxt, arg1, arg2, |
| (unsigned int *)& uaddr_other_args); |
| if (SOS_OK != retval) |
| return retval; |
| |
| /* Copy the array containing the remaining arguments from user |
| space */ |
| retval = sos_memcpy_from_user((sos_vaddr_t)other_args, |
| (sos_uaddr_t)uaddr_other_args, |
| sizeof(other_args)); |
| if (sizeof(other_args) != retval) |
| return -SOS_EFAULT; |
| |
| *arg3 = other_args[0]; |
| *arg4 = other_args[1]; |
| *arg5 = other_args[2]; |
| *arg6 = other_args[3]; |
| *arg7 = other_args[4]; |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t sos_syscall_get8args(const struct sos_cpu_state *user_ctxt, |
| /* out */unsigned int *arg1, |
| /* out */unsigned int *arg2, |
| /* out */unsigned int *arg3, |
| /* out */unsigned int *arg4, |
| /* out */unsigned int *arg5, |
| /* out */unsigned int *arg6, |
| /* out */unsigned int *arg7, |
| /* out */unsigned int *arg8) |
| { |
| sos_uaddr_t uaddr_other_args; |
| unsigned int other_args[6]; |
| sos_ret_t retval; |
| |
| /* Retrieve the 3 arguments. The last one is an array containing the |
| remaining arguments */ |
| retval = sos_syscall_get3args(user_ctxt, arg1, arg2, |
| (unsigned int *)& uaddr_other_args); |
return retval; | return retval; |
| |
| /* Copy the array containing the remaining arguments from user |
| space */ |
| retval = sos_memcpy_from_user((sos_vaddr_t)other_args, |
| (sos_uaddr_t)uaddr_other_args, |
| sizeof(other_args)); |
| if (sizeof(other_args) != retval) |
| return -SOS_EFAULT; |
| |
*arg3 = other_args[0]; | *arg3 = other_args[0]; |
*arg4 = other_args[1]; | *arg4 = other_args[1]; |
*arg5 = other_args[2]; | *arg5 = other_args[2]; |
*arg6 = other_args[3]; | *arg6 = other_args[3]; |
return retval; | *arg7 = other_args[4]; |
| *arg8 = other_args[5]; |
| return SOS_OK; |
| |
| |
| |
/tmp/sos-code-article7/hwcore/paging.c (2005-02-05 17:52:22.000000000 +0100
) |
|
../sos-code-article7.5/hwcore/paging.c (2005-04-27 20:17:14.000000000 +0200
) |
|
|
|
#include "paging.h" | #include "paging.h" |
| |
| |
| /* |
| * Important NOTICE concerning the use of the reference & occupation |
| * counters of the physical pages by the "paging" subsystem: |
| * - All the kernel PT are SHARED. This means that as soon as one |
| * kernel PT belongs to one mm_context, it belongs to ALL the |
| * mm_contexts. We don't update the real reference count of the PT |
| * in this respect, because it would require to update the |
| * reference counts of ALL the kernel PTs as soon as a new |
| * mm_context is created, or as soon as a mm_context is |
| * suppressed. This way, the reference count is constant |
| * independently of the actual number of PD really sharing them. |
| * - We do NOT maintain the occupation count of the PDs. This would add |
| * some little overhead that is useless |
| * - We do maintain the occupation count of ALL the PTs: it represents the |
| * number of PTE allocated in the PT |
| */ |
| |
| |
/** The structure of a page directory entry. See Intel vol 3 section | /** The structure of a page directory entry. See Intel vol 3 section |
3.6.4 */ | 3.6.4 */ |
struct x86_pde | struct x86_pde |
|
|
} __attribute__ ((packed)); | } __attribute__ ((packed)); |
| |
| |
| /** Intermediate type to speed up PDE copy */ |
| typedef union { |
| struct x86_pde pde; |
| sos_ui32_t ui32; |
| } x86_pde_val_t; |
| |
| |
/** The structure of a page table entry. See Intel vol 3 section | /** The structure of a page table entry. See Intel vol 3 section |
3.6.4 */ | 3.6.4 */ |
struct x86_pte | struct x86_pte |
|
|
} __attribute__ ((packed)); | } __attribute__ ((packed)); |
| |
| |
| /** Intermediate type to speed up PTE copy */ |
| typedef union { |
| struct x86_pte pte; |
| sos_ui32_t ui32; |
| } x86_pte_val_t; |
| |
| |
/** Structure of the x86 CR3 register: the Page Directory Base | /** Structure of the x86 CR3 register: the Page Directory Base |
Register. See Intel x86 doc Vol 3 section 2.5 */ | Register. See Intel x86 doc Vol 3 section 2.5 */ |
struct x86_pdbr | struct x86_pdbr |
|
|
{ | { |
pt = (struct x86_pte*) (pd[index_in_pd].pt_paddr << 12); | pt = (struct x86_pte*) (pd[index_in_pd].pt_paddr << 12); |
| |
/* If we allocate a new entry in the PT, increase its reference | /* This test will always be TRUE here, since the setup routine |
count. This test will always be TRUE here, since the setup | scans the kernel pages in a strictly increasing order: at |
routine scans the kernel pages in a strictly increasing | each step, the map will result in the allocation of a new PT |
order: at each step, the map will result in the allocation of | entry. For the sake of clarity, we keep the test here. */ |
a new PT entry. For the sake of clarity, we keep the test | if (pt[index_in_pt].present) |
here. */ | |
if (! pt[index_in_pt].present) | |
sos_physmem_ref_physpage_at((sos_paddr_t)pt); | |
| |
/* The previous test should always be TRUE */ | |
else | |
} | } |
else | else |
|
|
pt[index_in_pt].user = 0; | pt[index_in_pt].user = 0; |
pt[index_in_pt].paddr = ppage >> 12; | pt[index_in_pt].paddr = ppage >> 12; |
| |
| /* Increase the PT's occupation count because we allocated a new PTE |
| inside it */ |
| sos_physmem_inc_physpage_occupation((sos_paddr_t)pt); |
| |
return SOS_OK; | return SOS_OK; |
} | } |
| |
|
|
struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR | struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR |
+ SOS_PAGE_SIZE*index_in_pd); | + SOS_PAGE_SIZE*index_in_pd); |
| |
| SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(ppage_paddr)); |
| SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(vpage_vaddr)); |
| |
| /* EXEC permission ignored on x86 */ |
| flags &= ~SOS_VM_MAP_PROT_EXEC; |
| |
/* The mapping of anywhere in the PD mirroring is FORBIDDEN ;) */ | /* The mapping of anywhere in the PD mirroring is FORBIDDEN ;) */ |
if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR) | if ((vpage_vaddr >= SOS_PAGING_MIRROR_VADDR) |
&& (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE)) | && (vpage_vaddr < SOS_PAGING_MIRROR_VADDR + SOS_PAGING_MIRROR_SIZE)) |
|
|
/* Map a page for the PT if necessary */ | /* Map a page for the PT if necessary */ |
if (! pd[index_in_pd].present) | if (! pd[index_in_pd].present) |
{ | { |
union { struct x86_pde pde; sos_ui32_t ui32; } u; | x86_pde_val_t u; |
/* No : allocate a new one */ | /* No : allocate a new one */ |
sos_paddr_t pt_ppage | sos_paddr_t pt_ppage |
|
|
memset((void*)pt, 0x0, SOS_PAGE_SIZE); | memset((void*)pt, 0x0, SOS_PAGE_SIZE); |
} | } |
| |
/* If we allocate a new entry in the PT, increase its reference | /* If we allocate a new entry in the PT, increase its occupation |
else if (! pt[index_in_pt].present) | if (! pt[index_in_pt].present) |
sos_physmem_ref_physpage_at(pd[index_in_pd].pt_paddr << 12); | sos_physmem_inc_physpage_occupation(pd[index_in_pd].pt_paddr << 12); |
/* Otherwise, that means that a physical page is implicitely | /* Otherwise, that means that a physical page is implicitely |
unmapped */ | unmapped */ |
|
|
pt[index_in_pt].paddr = ppage_paddr >> 12; | pt[index_in_pt].paddr = ppage_paddr >> 12; |
sos_physmem_ref_physpage_at(ppage_paddr); | sos_physmem_ref_physpage_at(ppage_paddr); |
| |
| |
/* | /* |
* The page is now mapped in the current address space | * The page is now mapped in the current address space |
*/ | */ |
|
|
| |
sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_vaddr) | sos_ret_t sos_paging_unmap(sos_vaddr_t vpage_vaddr) |
{ | { |
sos_ret_t pt_unref_retval; | sos_ret_t pt_dec_occupation_retval; |
/* Get the page directory entry and table entry index for this | /* Get the page directory entry and table entry index for this |
address */ | address */ |
|
|
struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR | struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR |
+ SOS_PAGE_SIZE*index_in_pd); | + SOS_PAGE_SIZE*index_in_pd); |
| |
| SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(vpage_vaddr)); |
| |
/* No page mapped at this address ? */ | /* No page mapped at this address ? */ |
if (! pd[index_in_pd].present) | if (! pd[index_in_pd].present) |
return -SOS_EINVAL; | return -SOS_EINVAL; |
|
|
invlpg(vpage_vaddr); | invlpg(vpage_vaddr); |
| |
/* Reclaim this entry in the PT, which may free the PT */ | /* Reclaim this entry in the PT, which may free the PT */ |
pt_unref_retval = sos_physmem_unref_physpage(pd[index_in_pd].pt_paddr << 12); | pt_dec_occupation_retval |
SOS_ASSERT_FATAL(pt_unref_retval >= 0); | = sos_physmem_dec_physpage_occupation(pd[index_in_pd].pt_paddr << 12); |
if (pt_unref_retval > 0) | SOS_ASSERT_FATAL(pt_dec_occupation_retval >= 0); |
| if (pt_dec_occupation_retval > 0) |
{ | { |
union { struct x86_pde pde; sos_ui32_t ui32; } u; | x86_pde_val_t u; |
| |
| |
| /* |
| * The PT is not referenced by this PD anymore |
| */ |
| sos_physmem_unref_physpage(pd[index_in_pd].pt_paddr << 12); |
| |
/* | /* |
* Reset the PDE | * Reset the PDE |
|
|
} | } |
| |
| |
int sos_paging_get_prot(sos_vaddr_t vaddr) | sos_ret_t sos_paging_unmap_interval(sos_vaddr_t vaddr, |
| sos_size_t size) |
| { |
| sos_ret_t retval = 0; |
| |
| if (! SOS_IS_PAGE_ALIGNED(vaddr)) |
| return -SOS_EINVAL; |
| if (! SOS_IS_PAGE_ALIGNED(size)) |
| return -SOS_EINVAL; |
| |
| for ( ; |
| size >= SOS_PAGE_SIZE ; |
| vaddr += SOS_PAGE_SIZE, size -= SOS_PAGE_SIZE) |
| if (SOS_OK == sos_paging_unmap(vaddr)) |
| retval += SOS_PAGE_SIZE; |
| |
| return retval; |
| } |
| |
| |
| sos_ui32_t sos_paging_get_prot(sos_vaddr_t vaddr) |
int retval; | sos_ui32_t retval; |
/* Get the page directory entry and table entry index for this | /* Get the page directory entry and table entry index for this |
address */ | address */ |
|
|
} | } |
| |
| |
| sos_ret_t sos_paging_set_prot(sos_vaddr_t vaddr, |
| sos_ui32_t new_prot) |
| { |
| /* Get the page directory entry and table entry index for this |
| address */ |
| unsigned index_in_pd = virt_to_pd_index(vaddr); |
| unsigned index_in_pt = virt_to_pt_index(vaddr); |
| |
| /* Get the PD of the current context */ |
| struct x86_pde *pd = (struct x86_pde*) |
| (SOS_PAGING_MIRROR_VADDR |
| + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)); |
| |
| /* Address of the PT in the mirroring */ |
| struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR |
| + SOS_PAGE_SIZE*index_in_pd); |
| |
| /* EXEC permission ignored on x86 */ |
| new_prot &= ~SOS_VM_MAP_PROT_EXEC; |
| |
| /* Check flags */ |
| if (new_prot & ~(SOS_VM_MAP_PROT_READ | SOS_VM_MAP_PROT_WRITE)) |
| return -SOS_EINVAL; |
| if (! (new_prot & SOS_VM_MAP_PROT_READ)) |
| /* x86 READ flag always set by default */ |
| return -SOS_ENOSUP; |
| |
| /* No page mapped at this address ? */ |
| if (! pd[index_in_pd].present) |
| return -SOS_EINVAL; |
| if (! pt[index_in_pt].present) |
| return -SOS_EINVAL; |
| |
| /* Update access rights */ |
| pt[index_in_pt].write = ((new_prot & SOS_VM_MAP_PROT_WRITE) != 0); |
| invlpg(vaddr); |
| |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t sos_paging_set_prot_of_interval(sos_vaddr_t vaddr, |
| sos_size_t size, |
| sos_ui32_t new_prot) |
| { |
| if (! SOS_IS_PAGE_ALIGNED(vaddr)) |
| return -SOS_EINVAL; |
| if (! SOS_IS_PAGE_ALIGNED(size)) |
| return -SOS_EINVAL; |
| |
| for ( ; size >= SOS_PAGE_SIZE ; vaddr += SOS_PAGE_SIZE, size -= SOS_PAGE_SIZE) |
| sos_paging_set_prot(vaddr, new_prot); |
| |
| return SOS_OK; |
| } |
| |
| |
sos_paddr_t sos_paging_get_paddr(sos_vaddr_t vaddr) | sos_paddr_t sos_paging_get_paddr(sos_vaddr_t vaddr) |
{ | { |
/* Get the page directory entry and table entry index for this | /* Get the page directory entry and table entry index for this |
|
|
} | } |
| |
| |
sos_ui32_t sos_paging_compute_kernel_pde_value(sos_paddr_t paddr_PT) | sos_ret_t sos_paging_dispose(sos_vaddr_t vaddr_PD) |
union { | x86_pde_val_t *pd = (x86_pde_val_t*) vaddr_PD; |
struct x86_pde pde; | x86_pte_val_t *pt; |
sos_ui32_t ui32; | int index_in_pd; |
} u; | |
| /* Allocate 1 page in kernel space to map the PTs in order to |
memset(& u, 0x0, sizeof(u)); | unreference the physical pages they reference */ |
u.pde.present = TRUE; | pt = (x86_pte_val_t *)sos_kmem_vmm_alloc(1, 0); |
u.pde.write = 1; | if (! pt) |
u.pde.user = 0; /* This is a KERNEL PDE */ | return -SOS_ENOMEM; |
u.pde.pt_paddr = (paddr_PT >> 12); | |
| /* (Nothing to do in kernel space) */ |
| |
| /* Reset all the PTs in user space */ |
| for (index_in_pd = (SOS_PAGING_BASE_USER_ADDRESS >> 22) ; |
| index_in_pd < 1024 ; /* 1 PDE = 1 PT |
| = 1024 Pages |
| = 4MB */ |
| index_in_pd ++) |
| { |
| sos_paddr_t paddr_pt = (pd[index_in_pd].pde.pt_paddr << 12); |
| int index_in_pt; |
| |
| /* Nothing to do if there is no PT */ |
| if (! pd[index_in_pd].pde.present) |
| { |
| pd[index_in_pd].ui32 = 0; |
| continue; |
| } |
| |
| /* Map this PT inside kernel */ |
| SOS_ASSERT_FATAL(SOS_OK |
| == sos_paging_map(paddr_pt, |
| (sos_vaddr_t)pt, FALSE, |
| SOS_VM_MAP_PROT_READ |
| | SOS_VM_MAP_PROT_WRITE)); |
| |
| /* Reset all the mappings in this PT */ |
| for (index_in_pt = 0 ; index_in_pt < 1024 ; index_in_pt ++) |
| { |
| /* Ignore unmapped PTE */ |
| if (! pt[index_in_pt].pte.present) |
| { |
| pt[index_in_pt].ui32 = 0; |
| continue; |
| } |
| |
| /* Unreference the associated page */ |
| sos_physmem_unref_physpage(pt[index_in_pt].pte.paddr << 12); |
| |
| /* Decrease occupation count of the PT */ |
| sos_physmem_dec_physpage_occupation(paddr_pt); |
| |
| /* Reset PTE */ |
| pt[index_in_pt].ui32 = 0; |
| } |
| |
| /* Unmap PT */ |
| SOS_ASSERT_FATAL(SOS_OK == sos_paging_unmap((sos_vaddr_t)pt)); |
| |
| /* Reset PDE */ |
| pd[index_in_pd].ui32 = 0; |
| |
| /* Unreference PT */ |
| sos_physmem_unref_physpage(paddr_pt); |
| } |
| |
| /* Unallocate kernel space used for the temporary PT */ |
| SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free((sos_vaddr_t)pt)); |
return u.ui32; | return SOS_OK; |
| } |
| |
| |
| sos_ret_t sos_paging_copy_kernel_space(sos_vaddr_t dest_vaddr_PD, |
| sos_vaddr_t src_vaddr_PD) |
| { |
| x86_pde_val_t *src_pd = (x86_pde_val_t*) src_vaddr_PD; |
| x86_pde_val_t *dest_pd = (x86_pde_val_t*) dest_vaddr_PD; |
| sos_paddr_t dest_paddr_PD = sos_paging_get_paddr(dest_vaddr_PD); |
| x86_pde_val_t mirror_pde; |
| int index_in_pd; |
| |
| /* Fill destination PD with zeros */ |
| memset((void*)dest_vaddr_PD, 0x0, SOS_PAGE_SIZE); |
| |
| /* Synchronize it with the master Kernel MMU context. Stop just |
| before the mirroring ! */ |
| for (index_in_pd = 0 ; |
| index_in_pd < (SOS_PAGING_MIRROR_VADDR >> 22) ; /* 1 PDE = 1 PT |
| = 1024 Pages |
| = 4MB */ |
| index_in_pd ++) |
| { |
| /* Copy the master's configuration */ |
| dest_pd[index_in_pd].ui32 = src_pd[index_in_pd].ui32; |
| |
| /* We DON'T mark the underlying PT and pages as referenced |
| because all the PD are equivalent in the kernel space: as |
| soon as a page is mapped in the kernel, it is mapped by X |
| address spaces, and as soon as it is unmapped by 1 address |
| space, it is unmapped in all the others. So that for X |
| address spaces, the reference counter will be either 0 or X, |
| and not something else: using the reference counter correctly |
| won't be of any use and would consume some time in updating it. */ |
| } |
| |
| /* Setup the mirroring for the new address space */ |
| mirror_pde.ui32 = 0; |
| mirror_pde.pde.present = TRUE; |
| mirror_pde.pde.write = 1; |
| mirror_pde.pde.user = 0; /* This is a KERNEL PDE */ |
| mirror_pde.pde.pt_paddr = (dest_paddr_PD >> 12); |
| dest_pd[SOS_PAGING_MIRROR_VADDR >> 22].ui32 = mirror_pde.ui32; |
| |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t sos_paging_copy_user_space(sos_vaddr_t dest_vaddr_PD, |
| sos_vaddr_t src_vaddr_PD) |
| { |
| x86_pde_val_t *src_pd = (x86_pde_val_t*) src_vaddr_PD; |
| x86_pde_val_t *dest_pd = (x86_pde_val_t*) dest_vaddr_PD; |
| x86_pte_val_t *tmp_src_pt, *tmp_dest_pt; |
| int index_in_pd; |
| |
| /* Allocate 2 pages in kernel space to map the PT in order to |
| perform the copy of the PTs from source to destination */ |
| tmp_src_pt = (x86_pte_val_t *)sos_kmem_vmm_alloc(1, 0); |
| if (! tmp_src_pt) |
| return -SOS_ENOMEM; |
| |
| tmp_dest_pt = (x86_pte_val_t *)sos_kmem_vmm_alloc(1, 0); |
| if (! tmp_dest_pt) |
| { |
| sos_kmem_vmm_free((sos_vaddr_t)tmp_dest_pt); |
| return -SOS_ENOMEM; |
| } |
| |
| /* Copy each used PT from source to destination */ |
| for (index_in_pd = (SOS_PAGING_BASE_USER_ADDRESS >> 22) ; |
| index_in_pd < 1024 ; /* 1 PDE = 1 PT |
| = 1024 Pages |
| = 4MB */ |
| index_in_pd ++) |
| { |
| sos_paddr_t paddr_dest_pt; |
| int index_in_pt; |
| |
| /* We first litterally copy the source PDE in the destination |
| PDE. However, please bare in mind that, in the end, both |
| won't reference the same physical PT: the destination PDE |
| will be updated (below) to match the address of its own new |
| PT */ |
| dest_pd[index_in_pd].ui32 = src_pd[index_in_pd].ui32; |
| |
| /* Ignore unused PTs */ |
| if (! src_pd[index_in_pd].pde.present) |
| continue; |
| |
| /* Allocate the destination PT */ |
| paddr_dest_pt = sos_physmem_ref_physpage_new(TRUE); |
| if (NULL == (void*)paddr_dest_pt) |
| { |
| sos_paging_dispose((sos_vaddr_t)dest_vaddr_PD); |
| |
| /* Unallocate temporary kernel space used for the copy */ |
| sos_kmem_vmm_free((sos_vaddr_t)tmp_src_pt); |
| sos_kmem_vmm_free((sos_vaddr_t)tmp_dest_pt); |
| return -SOS_ENOMEM; |
| } |
| |
| /* Map source and destination PT */ |
| SOS_ASSERT_FATAL(SOS_OK |
| == sos_paging_map(src_pd[index_in_pd].pde.pt_paddr << 12, |
| (sos_vaddr_t)tmp_src_pt, FALSE, |
| SOS_VM_MAP_PROT_READ)); |
| SOS_ASSERT_FATAL(SOS_OK |
| == sos_paging_map(paddr_dest_pt, |
| (sos_vaddr_t)tmp_dest_pt, FALSE, |
| SOS_VM_MAP_PROT_READ |
| | SOS_VM_MAP_PROT_WRITE)); |
| |
| /* Copy the contents of the source to the destination PT, |
| updating the reference counts of the pages */ |
| for (index_in_pt = 0 ; index_in_pt < 1024 ; index_in_pt ++) |
| { |
| /* Copy the source PTE */ |
| tmp_dest_pt[index_in_pt].ui32 = tmp_src_pt[index_in_pt].ui32; |
| |
| /* Ignore non-present pages */ |
| if (! tmp_dest_pt[index_in_pt].pte.present) |
| continue; |
| |
| /* Reset the dirty/accessed flags */ |
| tmp_dest_pt[index_in_pt].pte.accessed = 0; |
| tmp_dest_pt[index_in_pt].pte.dirty = 0; |
| |
| /* Increase the reference count of the destination page */ |
| sos_physmem_ref_physpage_at(tmp_src_pt[index_in_pt].pte.paddr << 12); |
| |
| /* Increase occupation count of the PT */ |
| sos_physmem_inc_physpage_occupation(paddr_dest_pt); |
| } |
| |
| /* Unmap the temporary PTs */ |
| SOS_ASSERT_FATAL(SOS_OK == sos_paging_unmap((sos_vaddr_t)tmp_src_pt)); |
| SOS_ASSERT_FATAL(SOS_OK == sos_paging_unmap((sos_vaddr_t)tmp_dest_pt)); |
| |
| /* Update the destination PDE */ |
| dest_pd[index_in_pd].pde.pt_paddr = (paddr_dest_pt >> 12); |
| |
| /* Reset the dirty/accessed flags */ |
| dest_pd[index_in_pd].pde.accessed = 0; |
| } |
| |
| |
| /* Unallocate temporary kernel space used for the copy */ |
| SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free((sos_vaddr_t)tmp_src_pt)); |
| SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free((sos_vaddr_t)tmp_dest_pt)); |
| |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t sos_paging_prepare_COW(sos_uaddr_t base_address, |
| sos_size_t length) |
| { |
| SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_address)); |
| SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(length)); |
| SOS_ASSERT_FATAL(SOS_PAGING_BASE_USER_ADDRESS <= base_address); |
| |
| /* Mark all the pages read-only, when already mapped in physical |
| memory */ |
| for ( ; |
| length > 0 ; |
| length -= SOS_PAGE_SIZE, base_address += SOS_PAGE_SIZE) |
| { |
| sos_paging_set_prot(base_address, |
| SOS_VM_MAP_PROT_READ); |
| } |
| |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t sos_paging_try_resolve_COW(sos_uaddr_t uaddr) |
| { |
| sos_ret_t refcnt; |
| |
| /* Get the page directory entry and table entry index for this |
| address */ |
| unsigned index_in_pd = virt_to_pd_index(uaddr); |
| unsigned index_in_pt = virt_to_pt_index(uaddr); |
| |
| /* Get the PD of the current context */ |
| struct x86_pde *pd = (struct x86_pde*) |
| (SOS_PAGING_MIRROR_VADDR |
| + SOS_PAGE_SIZE*virt_to_pd_index(SOS_PAGING_MIRROR_VADDR)); |
| |
| /* Address of the PT in the mirroring */ |
| struct x86_pte * pt = (struct x86_pte*) (SOS_PAGING_MIRROR_VADDR |
| + SOS_PAGE_SIZE*index_in_pd); |
| |
| /* No page mapped at this address ? */ |
| if (! pd[index_in_pd].present) |
| return -SOS_EFAULT; |
| if (! pt[index_in_pt].present) |
| return -SOS_EFAULT; |
| |
| /* Read-only PT not supported by kernel ! */ |
| if (! pd[index_in_pd].write) |
| return -SOS_EFAULT; |
| |
| /* Cannot understand a COW request if the page is already |
| read/write */ |
| SOS_ASSERT_FATAL(! pt[index_in_pt].write); |
| |
| /* We do a private copy of the page only if the current mapped page |
| is shared by more than 1 process */ |
| refcnt = sos_physmem_get_physpage_refcount(pt[index_in_pt].paddr << 12); |
| SOS_ASSERT_FATAL(refcnt > 0); |
| |
| if (refcnt == 1) |
| { |
| /* We are the only address space to reference this page, we can |
| safely turn it read/write now */ |
| pt[index_in_pt].write = 1; |
| invlpg(pt[index_in_pt].paddr << 12); |
| } |
| |
| /* Otherwise we need to make a private copy of the page */ |
| else |
| { |
| sos_paddr_t new_ppage; |
| sos_vaddr_t vpage_src, tmp_dest; |
| |
| /* For that, we allocate the destination page inside the kernel |
| space to perform the copy. We will transfer it into its |
| final user-space address later */ |
| tmp_dest = sos_kmem_vmm_alloc(1, SOS_KMEM_VMM_MAP); |
| if (! tmp_dest) |
| return -SOS_ENOMEM; |
| |
| /* copy the contents of the page */ |
| vpage_src = SOS_PAGE_ALIGN_INF(uaddr); |
| memcpy((void*)tmp_dest, (void*)vpage_src, SOS_PAGE_SIZE); |
| |
| /* replace the original (read-only) mapping with a (read/write) |
| mapping to the new page. This will automatically unreference |
| the original page */ |
| new_ppage = sos_paging_get_paddr(tmp_dest); |
| SOS_ASSERT_FATAL(new_ppage != (sos_vaddr_t)NULL); |
| if (SOS_OK != sos_paging_map(new_ppage, vpage_src, |
| TRUE, |
| SOS_VM_MAP_PROT_READ |
| | SOS_VM_MAP_PROT_WRITE)) |
| { |
| sos_kmem_vmm_free(tmp_dest); |
| return -SOS_ENOMEM; |
| } |
| |
| /* We can now unmap the destination page from inside the |
| kernel and free the kernel VM range for it */ |
| SOS_ASSERT_FATAL(SOS_OK == sos_kmem_vmm_free(tmp_dest)); |
| } |
| |
| /* That's all, folks ! */ |
| return SOS_OK; |
| |
/tmp/sos-code-article7/sos/binfmt_elf32.c (1970-01-01 01:00:00.000000000 +0100
) |
|
../sos-code-article7.5/sos/binfmt_elf32.c (2005-04-27 20:17:18.000000000 +0200
) |
|
|
|
| /* Copyright (C) 2005 David Decotigny |
| |
| This program is free software; you can redistribute it and/or |
| modify it under the terms of the GNU General Public License |
| as published by the Free Software Foundation; either version 2 |
| of the License, or (at your option) any later version. |
| |
| This program is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with this program; if not, write to the Free Software |
| Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, |
| USA. |
| */ |
| |
| #include <sos/kmalloc.h> |
| #include <sos/assert.h> |
| #include <sos/physmem.h> |
| #include <drivers/bochs.h> |
| #include <hwcore/paging.h> |
| #include <drivers/zero.h> |
| |
| #include "binfmt_elf32.h" |
| |
| |
| /** |
| * The "C" structure of a user program image in the kernel. Structures |
| * like this are created by the Makefile in the userland/ directory |
| */ |
| struct userprog_entry |
| { |
| const char *name; |
| sos_vaddr_t bottom_vaddr; |
| sos_vaddr_t top_vaddr; |
| }; |
| |
| |
| /** |
| * Symbol marking the start of the userprogs table, as setup by the |
| * ld script in the userland/ directory |
| */ |
| extern char _userprogs_table; |
| |
| |
| /** |
| * Structure of a mapped resource for an ELF32 program (ie a portion |
| * of the kernel space) |
| */ |
| struct elf32_mapped_program |
| { |
| sos_vaddr_t vaddr; |
| sos_size_t size; |
| int ref_cnt; |
| |
| struct sos_umem_vmm_mapped_resource mr; |
| }; |
| |
| |
| /** Called after the virtual region has been inserted inside its |
| address space */ |
| static void elf32prog_ref(struct sos_umem_vmm_vr * vr) |
| { |
| struct elf32_mapped_program * elf32prog_resource; |
| elf32prog_resource = (struct elf32_mapped_program*) sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data; |
| |
| elf32prog_resource->ref_cnt ++; |
| } |
| |
| |
| /** Called when the virtual region is removed from its address |
| space */ |
| static void elf32prog_unref(struct sos_umem_vmm_vr * vr) |
| { |
| struct elf32_mapped_program * elf32prog_resource; |
| elf32prog_resource |
| = (struct elf32_mapped_program*) |
| sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data; |
| |
| elf32prog_resource->ref_cnt --; |
| SOS_ASSERT_FATAL(elf32prog_resource->ref_cnt >= 0); |
| |
| /* Free the resource if it becomes unused */ |
| if (elf32prog_resource->ref_cnt == 0) |
| sos_kfree((sos_vaddr_t)elf32prog_resource); |
| } |
| |
| |
| /** Called when a legitimate page fault is occuring in the VR */ |
| static sos_ret_t elf32prog_page_in(struct sos_umem_vmm_vr * vr, |
| sos_uaddr_t uaddr, |
| sos_bool_t write_access) |
| { |
| struct elf32_mapped_program * elf32prog_resource; |
| sos_ret_t retval = SOS_OK; |
| sos_paddr_t ppage_paddr; |
| sos_uaddr_t upage_uaddr = SOS_PAGE_ALIGN_INF(uaddr); |
| sos_uoffset_t offset_in_prog; |
| sos_size_t size_to_copy; |
| sos_ui32_t access_rights = sos_umem_vmm_get_prot_of_vr(vr); |
| |
| elf32prog_resource |
| = (struct elf32_mapped_program*) |
| sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data; |
| |
| /* Compute the offset in program of the page, and the size to copy |
| in user space */ |
| offset_in_prog = upage_uaddr - sos_umem_vmm_get_start_of_vr(vr) |
| + sos_umem_vmm_get_offset_in_resource(vr); |
| size_to_copy = SOS_PAGE_SIZE; |
| if (offset_in_prog + size_to_copy > elf32prog_resource->size) |
| size_to_copy = elf32prog_resource->size - offset_in_prog; |
| |
| /* Allocate a new page that contains the code/data of the |
| program */ |
| ppage_paddr = sos_physmem_ref_physpage_new(FALSE); |
| if (! ppage_paddr) |
| return -SOS_ENOMEM; |
| |
| /* Map it in user space, in read/write mode for the kernel to copy |
| the data in the page */ |
| retval = sos_paging_map(ppage_paddr, |
| upage_uaddr, |
| TRUE, |
| access_rights | SOS_VM_MAP_PROT_WRITE); |
| SOS_ASSERT_FATAL(SOS_OK == retval); |
| sos_physmem_unref_physpage(ppage_paddr); |
| |
| /* Copy the program in it */ |
| memcpy((void*)upage_uaddr, |
| (void*)elf32prog_resource->vaddr + offset_in_prog, |
| size_to_copy); |
| if (size_to_copy < SOS_PAGE_SIZE) |
| memset((void*)(upage_uaddr + size_to_copy), 0x0, |
| SOS_PAGE_SIZE - size_to_copy); |
| |
| /* Change it read-only if needed */ |
| if (! (access_rights & SOS_VM_MAP_PROT_WRITE)) |
| return sos_paging_set_prot(upage_uaddr, |
| access_rights & ~SOS_VM_MAP_PROT_WRITE); |
| |
| return retval; |
| } |
| |
| |
| static struct sos_umem_vmm_vr_ops elf32prog_ops = (struct sos_umem_vmm_vr_ops) |
| { |
| .ref = elf32prog_ref, |
| .unref = elf32prog_unref, |
| .page_in = elf32prog_page_in, |
| .unmap = NULL /* ignored */ |
| }; |
| |
| |
| static sos_ret_t elf32prog_mmap(struct sos_umem_vmm_vr *vr) |
| { |
| return sos_umem_vmm_set_ops_of_vr(vr, &elf32prog_ops); |
| } |
| |
| |
| /* |
| * Local functions |
| */ |
| |
| |
| /** |
| * Function to locate the given user program image in the kernel memory |
| */ |
| static struct userprog_entry * lookup_userprog(const char *name); |
| |
| |
| sos_uaddr_t sos_binfmt_elf32_map(struct sos_umem_vmm_as * dest_as, |
| const char * progname) |
| { |
| int i; |
| |
| /** |
| * Typedefs, constants and structure definitions as given by the ELF |
| * standard specifications. |
| */ |
| typedef unsigned long Elf32_Addr; |
| typedef unsigned long Elf32_Word; |
| typedef unsigned short Elf32_Half; |
| typedef unsigned long Elf32_Off; |
| typedef signed long Elf32_Sword; |
| |
| /* Elf identification */ |
| |
| #define EI_NIDENT 16 |
| typedef struct { |
| unsigned char e_ident[EI_NIDENT]; |
| Elf32_Half e_type; |
| Elf32_Half e_machine; |
| Elf32_Word e_version; |
| Elf32_Addr e_entry; |
| Elf32_Off e_phoff; |
| Elf32_Off e_shoff; |
| Elf32_Word e_flags; |
| Elf32_Half e_ehsize; |
| Elf32_Half e_phentsize; |
| Elf32_Half e_phnum; |
| Elf32_Half e_shentsize; |
| Elf32_Half e_shnum; |
| Elf32_Half e_shstrndx; |
| } __attribute__((packed)) Elf32_Ehdr_t; |
| |
| /* e_ident value */ |
| #define ELFMAG0 0x7f |
| #define ELFMAG1 'E' |
| #define ELFMAG2 'L' |
| #define ELFMAG3 'F' |
| |
| /* e_ident offsets */ |
| #define EI_MAG0 0 |
| #define EI_MAG1 1 |
| #define EI_MAG2 2 |
| #define EI_MAG3 3 |
| #define EI_CLASS 4 |
| #define EI_DATA 5 |
| #define EI_VERSION 6 |
| #define EI_PAD 7 |
| |
| /* e_ident[EI_CLASS] */ |
| #define ELFCLASSNONE 0 |
| #define ELFCLASS32 1 |
| #define ELFCLASS64 2 |
| |
| /* e_ident[EI_DATA] */ |
| #define ELFDATANONE 0 |
| #define ELFDATA2LSB 1 |
| #define ELFDATA2MSB 2 |
| |
| /* e_type */ |
| #define ET_NONE 0 /* No file type */ |
| #define ET_REL 1 /* Relocatable file */ |
| #define ET_EXEC 2 /* Executable file */ |
| #define ET_DYN 3 /* Shared object file */ |
| #define ET_CORE 4 /* Core file */ |
| #define ET_LOPROC 0xff00 /* Processor-specific */ |
| #define ET_HIPROC 0xffff /* Processor-specific */ |
| |
| /* e_machine */ |
| #define EM_NONE 0 /* No machine */ |
| #define EM_M32 1 /* AT&T WE 32100 */ |
| #define EM_SPARC 2 /* SPARC */ |
| #define EM_386 3 /* Intel 80386 */ |
| #define EM_68K 4 /* Motorola 68000 */ |
| #define EM_88K 5 /* Motorola 88000 */ |
| #define EM_860 7 /* Intel 80860 */ |
| #define EM_MIPS 8 /* MIPS RS3000 */ |
| |
| /* e_version */ |
| #define EV_NONE 0 /* invalid version */ |
| #define EV_CURRENT 1 /* current version */ |
| |
| typedef struct { |
| Elf32_Word p_type; |
| Elf32_Off p_offset; |
| Elf32_Addr p_vaddr; |
| Elf32_Addr p_paddr; |
| Elf32_Word p_filesz; |
| Elf32_Word p_memsz; |
| Elf32_Word p_flags; |
| Elf32_Word p_align; |
| } __attribute__((packed)) Elf32_Phdr_t; |
| |
| /* Reserved segment types p_type */ |
| #define PT_NULL 0 |
| #define PT_LOAD 1 |
| #define PT_DYNAMIC 2 |
| #define PT_INTERP 3 |
| #define PT_NOTE 4 |
| #define PT_SHLIB 5 |
| #define PT_PHDR 6 |
| #define PT_LOPROC 0x70000000 |
| #define PT_HIPROC 0x7fffffff |
| |
| /* p_flags */ |
| #define PF_X 1 |
| #define PF_W 2 |
| #define PF_R 4 |
| |
| |
| Elf32_Ehdr_t *elf_hdr; |
| Elf32_Phdr_t *elf_phdrs; |
| |
| struct elf32_mapped_program * mapped_prog; |
| struct userprog_entry * prog; |
| sos_uaddr_t prog_top_user_address = 0; |
| |
| mapped_prog |
| = (struct elf32_mapped_program*) |
| sos_kmalloc(sizeof(struct elf32_mapped_program), 0); |
| if (! mapped_prog) |
| return -SOS_ENOMEM; |
| |
| prog = lookup_userprog(progname); |
| if (! prog) |
| { |
| sos_kfree((sos_vaddr_t)mapped_prog); |
| return 0; |
| } |
| |
| /* Initialize mapped resource */ |
| memset(mapped_prog, 0x0, sizeof(*mapped_prog)); |
| mapped_prog->mr.custom_data = mapped_prog; |
| mapped_prog->mr.mmap = elf32prog_mmap; |
| mapped_prog->mr.allowed_access_rights |
| = SOS_VM_MAP_PROT_READ |
| | SOS_VM_MAP_PROT_WRITE |
| | SOS_VM_MAP_PROT_EXEC; |
| mapped_prog->vaddr = prog->bottom_vaddr; |
| mapped_prog->size = prog->top_vaddr - prog->bottom_vaddr; |
| |
| elf_hdr = (Elf32_Ehdr_t*) prog->bottom_vaddr; |
| |
| /* Make sure the image is large enough to contain at least the ELF |
| header */ |
| if (prog->bottom_vaddr + sizeof(Elf32_Ehdr_t) > prog->top_vaddr) |
| { |
| sos_bochs_printf("ELF prog %s: incorrect header\n", prog->name); |
| return 0; |
| } |
| |
| /* Macro to check expected values for some fields in the ELF header */ |
| #define ELF_CHECK(hdr,field,expected_value) \ |
| ({ if ((hdr)->field != (expected_value)) \ |
| { \ |
| sos_bochs_printf("ELF prog %s: for %s, expected %x, got %x\n", \ |
| prog->name, \ |
| #field, \ |
| (unsigned)(expected_value), \ |
| (unsigned)(hdr)->field); \ |
| return 0; \ |
| } \ |
| }) |
| |
| ELF_CHECK(elf_hdr, e_ident[EI_MAG0], ELFMAG0); |
| ELF_CHECK(elf_hdr, e_ident[EI_MAG1], ELFMAG1); |
| ELF_CHECK(elf_hdr, e_ident[EI_MAG2], ELFMAG2); |
| ELF_CHECK(elf_hdr, e_ident[EI_MAG3], ELFMAG3); |
| ELF_CHECK(elf_hdr, e_ident[EI_CLASS], ELFCLASS32); |
| ELF_CHECK(elf_hdr, e_ident[EI_DATA], ELFDATA2LSB); |
| ELF_CHECK(elf_hdr, e_type, ET_EXEC); |
| ELF_CHECK(elf_hdr, e_version, EV_CURRENT); |
| |
| /* Get the begining of the program header table */ |
| elf_phdrs = (Elf32_Phdr_t*) (prog->bottom_vaddr + elf_hdr->e_phoff); |
| |
| /* Map the program segment in R/W mode. To make things clean, we |
| should iterate over the sections, not the program header */ |
| for (i = 0 ; i < elf_hdr->e_phnum ; i++) |
| { |
| sos_ui32_t prot_flags; |
| sos_uaddr_t uaddr; |
| |
| /* Ignore the empty program headers that are not marked "LOAD" */ |
| if (elf_phdrs[i].p_type != PT_LOAD) |
| { |
| if (elf_phdrs[i].p_memsz != 0) |
| { |
| sos_display_fatal_error("ELF: non-empty non-LOAD segments not supported yet"); |
| } |
| continue; |
| } |
| |
| if (elf_phdrs[i].p_vaddr < SOS_PAGING_BASE_USER_ADDRESS) |
| { |
| sos_display_fatal_error("User program has an incorrect address"); |
| } |
| |
| prot_flags = 0; |
| if (elf_phdrs[i].p_flags & SOS_VM_MAP_PROT_READ) |
| prot_flags |= SOS_VM_MAP_PROT_READ; |
| if (elf_phdrs[i].p_flags & SOS_VM_MAP_PROT_WRITE) |
| prot_flags |= SOS_VM_MAP_PROT_WRITE; |
| if (elf_phdrs[i].p_flags & SOS_VM_MAP_PROT_EXEC) |
| prot_flags |= SOS_VM_MAP_PROT_EXEC; |
| |
| uaddr = elf_phdrs[i].p_vaddr; |
| SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(uaddr)); |
| |
| /* First of all: map the region of the phdr which is also |
| covered by the file */ |
| SOS_ASSERT_FATAL(SOS_OK |
| == sos_umem_vmm_map(dest_as, &uaddr, |
| SOS_PAGE_ALIGN_SUP(elf_phdrs[i].p_filesz), |
| prot_flags, |
| /* PRIVATE */ SOS_VR_MAP_FIXED, |
| & mapped_prog->mr, |
| elf_phdrs[i].p_offset)); |
| |
| /* Then map the remaining by a zero resource */ |
| uaddr += SOS_PAGE_ALIGN_SUP(elf_phdrs[i].p_filesz); |
| if (SOS_PAGE_ALIGN_SUP(elf_phdrs[i].p_filesz) |
| < SOS_PAGE_ALIGN_SUP(elf_phdrs[i].p_memsz)) |
| SOS_ASSERT_FATAL(SOS_OK |
| == sos_dev_zero_map(dest_as, &uaddr, |
| SOS_PAGE_ALIGN_SUP(elf_phdrs[i].p_memsz) |
| - SOS_PAGE_ALIGN_SUP(elf_phdrs[i].p_filesz), |
| prot_flags, |
| /* PRIVATE */ SOS_VR_MAP_FIXED)); |
| |
| if (prog_top_user_address |
| < uaddr + SOS_PAGE_ALIGN_SUP(elf_phdrs[i].p_memsz)) |
| prog_top_user_address |
| = uaddr + SOS_PAGE_ALIGN_SUP(elf_phdrs[i].p_memsz); |
| } |
| |
| /* Now prepare the heap */ |
| sos_umem_vmm_init_heap(dest_as, prog_top_user_address); |
| |
| return elf_hdr->e_entry; |
| } |
| |
| |
| /** |
| * Lookup a user program located inside the kernel's image |
| */ |
| static struct userprog_entry * lookup_userprog(const char *name) |
| { |
| struct userprog_entry *prog; |
| |
| if (! name) |
| return NULL; |
| |
| /* Walk through the table of user program description structures to |
| find the user program with the given name */ |
| for (prog = (struct userprog_entry*) & _userprogs_table ; |
| prog && (prog->name != NULL) ; |
| prog++) |
| { |
| if (0 == strcmp(name, prog->name)) |
| /* Found it ! */ |
| return prog; |
| } |
| |
| return NULL; |
| } |
| |
/tmp/sos-code-article7/sos/main.c (2005-02-05 17:52:23.000000000 +0100
) |
|
../sos-code-article7.5/sos/main.c (2005-04-27 20:17:17.000000000 +0200
) |
|
|
|
#include <sos/time.h> | #include <sos/time.h> |
#include <sos/thread.h> | #include <sos/thread.h> |
#include <sos/process.h> | #include <sos/process.h> |
| #include <sos/umem_vmm.h> |
#include <sos/klibc.h> | #include <sos/klibc.h> |
#include <sos/assert.h> | #include <sos/assert.h> |
#include <drivers/x86_videomem.h> | #include <drivers/x86_videomem.h> |
#include <drivers/bochs.h> | #include <drivers/bochs.h> |
#include <sos/calcload.h> | #include <sos/calcload.h> |
| #include <sos/umem_vmm.h> |
| #include <sos/binfmt_elf32.h> |
| #include <drivers/zero.h> |
| |
| |
/* Helper function to display each bits of a 32bits integer on the | /* Helper function to display each bits of a 32bits integer on the |
|
|
* Page fault exception handling | * Page fault exception handling |
*/ | */ |
| |
/* Helper function to dump a backtrace on bochs and/or the console */ | |
static void dump_backtrace(const struct sos_cpu_state *cpu_state, | |
sos_vaddr_t stack_bottom, | |
sos_size_t stack_size, | |
sos_bool_t on_console, | |
sos_bool_t on_bochs) | |
{ | |
static void backtracer(sos_vaddr_t PC, | |
sos_vaddr_t params, | |
sos_ui32_t depth, | |
void *custom_arg) | |
{ | |
sos_ui32_t invalid = 0xffffffff, *arg1, *arg2, *arg3, *arg4; | |
| |
/* Get the address of the first 3 arguments from the | |
frame. Among these arguments, 0, 1, 2, 3 arguments might be | |
meaningful (depending on how many arguments the function may | |
take). */ | |
arg1 = (sos_ui32_t*)params; | |
arg2 = (sos_ui32_t*)(params+4); | |
arg3 = (sos_ui32_t*)(params+8); | |
arg4 = (sos_ui32_t*)(params+12); | |
| |
/* Make sure the addresses of these arguments fit inside the | |
stack boundaries */ | |
#define INTERVAL_OK(b,v,u) ( ((b) <= (sos_vaddr_t)(v)) \ | |
&& ((sos_vaddr_t)(v) < (u)) ) | |
if (!INTERVAL_OK(stack_bottom, arg1, stack_bottom + stack_size)) | |
arg1 = &invalid; | |
if (!INTERVAL_OK(stack_bottom, arg2, stack_bottom + stack_size)) | |
arg2 = &invalid; | |
if (!INTERVAL_OK(stack_bottom, arg3, stack_bottom + stack_size)) | |
arg3 = &invalid; | |
if (!INTERVAL_OK(stack_bottom, arg4, stack_bottom + stack_size)) | |
arg4 = &invalid; | |
| |
/* Print the function context for this frame */ | |
if (on_bochs) | |
sos_bochs_printf("[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x\n", | |
(unsigned)depth, (unsigned)PC, | |
(unsigned)*arg1, (unsigned)*arg2, | |
(unsigned)*arg3); | |
| |
if (on_console) | |
sos_x86_videomem_printf(23-depth, 3, | |
SOS_X86_VIDEO_BG_BLUE | |
| SOS_X86_VIDEO_FG_LTGREEN, | |
"[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x arg4=0x%x", | |
(unsigned)depth, PC, | |
(unsigned)*arg1, (unsigned)*arg2, | |
(unsigned)*arg3, (unsigned)*arg4); | |
| |
} | |
| |
sos_backtrace(cpu_state, 15, stack_bottom, stack_size, backtracer, NULL); | |
} | |
| |
/* Page fault exception handler with demand paging for the kernel */ | /* Page fault exception handler with demand paging for the kernel */ |
static void pgflt_ex(int intid, const struct sos_cpu_state *ctxt) | static void pgflt_ex(int intid, struct sos_cpu_state *ctxt) |
static sos_ui32_t demand_paging_count = 0; | static sos_ui32_t demand_paging_count = 0; |
sos_vaddr_t faulting_vaddr = sos_cpu_context_get_EX_faulting_vaddr(ctxt); | struct sos_thread * cur_thr = sos_thread_get_current(); |
| sos_vaddr_t faulting_vaddr = sos_cpu_context_get_EX_faulting_vaddr(ctxt); |
| |
if (sos_cpu_context_is_in_user_mode(ctxt)) | if (sos_cpu_context_is_in_user_mode(ctxt) |
| || (cur_thr->fixup_uaccess.return_vaddr)) |
/* User-mode page faults are considered unresolved for the | __label__ unforce_address_space; |
moment */ | sos_bool_t need_to_setup_mmu; |
| sos_ui32_t errcode = sos_cpu_context_get_EX_info(ctxt); |
| |
| /* Make sure to always stay in the interrupted thread's MMU |
| configuration */ |
| need_to_setup_mmu = (cur_thr->squatted_mm_context |
| != sos_process_get_mm_context(cur_thr->process)); |
| if (need_to_setup_mmu) |
| sos_thread_prepare_user_space_access(NULL, 0); |
| |
| if (SOS_OK == |
| sos_umem_vmm_try_resolve_page_fault(faulting_vaddr, |
| errcode & (1 << 1), |
| TRUE)) |
| goto unforce_address_space; |
| |
| /* If the page fault occured in kernel mode, return to kernel to |
| the fixup address */ |
| if (! sos_cpu_context_is_in_user_mode(ctxt)) |
| { |
| cur_thr->fixup_uaccess.faulted_uaddr = faulting_vaddr; |
| sos_cpu_context_set_EX_return_address(ctxt, |
| cur_thr->fixup_uaccess.return_vaddr); |
| goto unforce_address_space; |
| } |
| |
| if (need_to_setup_mmu) |
| sos_thread_end_user_space_access(); |
| |
sos_cpu_context_get_PC(ctxt), | sos_cpu_context_get_PC(ctxt), |
(unsigned)faulting_vaddr, | (unsigned)faulting_vaddr, |
(unsigned)sos_cpu_context_get_EX_info(ctxt)); | (unsigned)sos_cpu_context_get_EX_info(ctxt)); |
sos_bochs_printf("Terminating User thread\n"); | sos_bochs_printf("Terminating User thread\n"); |
sos_thread_exit(); | sos_thread_exit(); |
| |
| unforce_address_space: |
| if (need_to_setup_mmu) |
| sos_thread_end_user_space_access(); |
| return; |
} | } |
| |
/* Check if address is covered by any VMM range */ | /* Check if address is covered by any VMM range */ |
|
|
{ | { |
/* No: The page fault is out of any kernel virtual region. For | /* No: The page fault is out of any kernel virtual region. For |
the moment, we don't handle this. */ | the moment, we don't handle this. */ |
dump_backtrace(ctxt, | |
bootstrap_stack_bottom, | |
bootstrap_stack_size, | |
TRUE, TRUE); | |
sos_cpu_context_get_PC(ctxt), | sos_cpu_context_get_PC(ctxt), |
(unsigned)faulting_vaddr, | (unsigned)faulting_vaddr, |
|
|
| |
| |
/* | /* |
* Demand paging | * Demand paging in kernel space |
| |
/* Update the number of demand paging requests handled */ | /* Update the number of demand paging requests handled */ |
|
|
} | } |
| |
| |
/* ====================================================================== | |
* Demonstrate the use of SOS kernel threads | |
* - Kernel Threads are created with various priorities and their | |
* state is printed on both the console and the bochs' 0xe9 port | |
* - For tests regarding threads' synchronization, see mouse_sim.c | |
*/ | |
| |
struct thr_arg | |
{ | |
char character; | |
int color; | |
| |
int col; | |
int row; | |
}; | |
| |
| |
static void demo_thread(void *arg) | |
{ | |
struct thr_arg *thr_arg = (struct thr_arg*)arg; | |
int progress = 0; | |
| |
sos_bochs_printf("start %c", thr_arg->character); | |
while (1) | |
{ | |
progress ++; | |
display_bits(thr_arg->row, thr_arg->col+1, thr_arg->color, progress); | |
| |
sos_bochs_putchar(thr_arg->character); | |
| |
/* Yield the CPU to another thread sometimes... */ | |
if ((random() % 100) == 0) | |
{ | |
sos_bochs_printf("[37myield(%c)[m\n", thr_arg->character); | |
sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'Y'); | |
SOS_ASSERT_FATAL(SOS_OK == sos_thread_yield()); | |
sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'R'); | |
} | |
| |
/* Go to sleep some other times... */ | |
else if ((random() % 200) == 0) | |
{ | |
struct sos_time t = (struct sos_time){ .sec=0, .nanosec=50000000 }; | |
sos_bochs_printf("[37msleep1(%c)[m\n", thr_arg->character); | |
sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 's'); | |
SOS_ASSERT_FATAL(SOS_OK == sos_thread_sleep(& t)); | |
SOS_ASSERT_FATAL(sos_time_is_zero(& t)); | |
sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'R'); | |
} | |
| |
/* Go to sleep for a longer time some other times... */ | |
else if ((random() % 300) == 0) | |
{ | |
struct sos_time t = (struct sos_time){ .sec=0, .nanosec=300000000 }; | |
sos_bochs_printf("[37msleep2(%c)[m\n", thr_arg->character); | |
sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'S'); | |
SOS_ASSERT_FATAL(SOS_OK == sos_thread_sleep(& t)); | |
SOS_ASSERT_FATAL(sos_time_is_zero(& t)); | |
sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'R'); | |
} | |
| |
/* Infinite loop otherwise */ | |
} | |
} | |
| |
| |
static void test_thread() | |
{ | |
/* "static" variables because we want them to remain even when the | |
function returns */ | |
static struct thr_arg arg_b, arg_c, arg_d, arg_e, arg_R, arg_S; | |
sos_ui32_t flags; | |
| |
sos_disable_IRQs(flags); | |
| |
arg_b = (struct thr_arg) { .character='b', .col=0, .row=21, .color=0x14 }; | |
sos_create_kernel_thread("YO[b]", demo_thread, (void*)&arg_b, SOS_SCHED_PRIO_TS_LOWEST); | |
| |
arg_c = (struct thr_arg) { .character='c', .col=46, .row=21, .color=0x14 }; | |
sos_create_kernel_thread("YO[c]", demo_thread, (void*)&arg_c, SOS_SCHED_PRIO_TS_LOWEST); | |
| |
arg_d = (struct thr_arg) { .character='d', .col=0, .row=20, .color=0x14 }; | |
sos_create_kernel_thread("YO[d]", demo_thread, (void*)&arg_d, SOS_SCHED_PRIO_TS_LOWEST-1); | |
| |
arg_e = (struct thr_arg) { .character='e', .col=0, .row=19, .color=0x14 }; | |
sos_create_kernel_thread("YO[e]", demo_thread, (void*)&arg_e, SOS_SCHED_PRIO_TS_LOWEST-2); | |
| |
arg_R = (struct thr_arg) { .character='R', .col=0, .row=17, .color=0x1c }; | |
sos_create_kernel_thread("YO[R]", demo_thread, (void*)&arg_R, SOS_SCHED_PRIO_RT_LOWEST); | |
| |
arg_S = (struct thr_arg) { .character='S', .col=0, .row=16, .color=0x1c }; | |
sos_create_kernel_thread("YO[S]", demo_thread, (void*)&arg_S, SOS_SCHED_PRIO_RT_LOWEST-1); | |
| |
sos_restore_IRQs(flags); | |
} | |
| |
/* ====================================================================== | /* ====================================================================== |
* An operating system MUST always have a ready thread ! Otherwise: | * An operating system MUST always have a ready thread ! Otherwise: |
|
|
| |
| |
/* ====================================================================== | /* ====================================================================== |
| * Start the "init" (userland) process |
| */ |
| static sos_ret_t start_init() |
| { |
| sos_ret_t retval; |
| struct sos_umem_vmm_as *as_init; |
| struct sos_process *proc_init; |
| struct sos_thread *new_thr; |
| sos_uaddr_t ustack, start_uaddr; |
| |
| /* Create the new process */ |
| proc_init = sos_process_create("init", FALSE); |
| if (! proc_init) |
| return -SOS_ENOMEM; |
| as_init = sos_process_get_address_space(proc_init); |
| |
| /* Map the 'init' program in user space */ |
| start_uaddr = sos_binfmt_elf32_map(as_init, "init"); |
| if (0 == start_uaddr) |
| { |
| sos_process_unref(proc_init); |
| return -SOS_ENOENT; |
| } |
| |
| /* Allocate the user stack */ |
| ustack = (SOS_PAGING_TOP_USER_ADDRESS - SOS_DEFAULT_USER_STACK_SIZE) + 1; |
| retval = sos_dev_zero_map(as_init, &ustack, SOS_DEFAULT_USER_STACK_SIZE, |
| SOS_VM_MAP_PROT_READ | SOS_VM_MAP_PROT_WRITE, |
| /* PRIVATE */ 0); |
| if (SOS_OK != retval) |
| { |
| sos_bochs_printf("ici 2\n"); |
| sos_process_unref(proc_init); |
| return -SOS_ENOMEM; |
| } |
| |
| /* Now create the user thread */ |
| new_thr = sos_create_user_thread(NULL, |
| proc_init, |
| start_uaddr, |
| 0, 0, |
| ustack + SOS_DEFAULT_USER_STACK_SIZE - 4, |
| SOS_SCHED_PRIO_TS_LOWEST); |
| if (! new_thr) |
| { |
| sos_bochs_printf("ici 3\n"); |
| sos_process_unref(proc_init); |
| return -SOS_ENOMEM; |
| } |
| |
| sos_process_unref(proc_init); |
| return SOS_OK; |
| } |
| |
| |
| /* ====================================================================== |
* The C entry point of our operating system | * The C entry point of our operating system |
*/ | */ |
void sos_main(unsigned long magic, unsigned long addr) | void sos_main(unsigned long magic, unsigned long addr) |
|
|
sos_x86_videomem_printf(1, 0, | sos_x86_videomem_printf(1, 0, |
SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, | SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, |
"Welcome From GRUB to %s%c RAM is %dMB (upper mem = 0x%x kB)", | "Welcome From GRUB to %s%c RAM is %dMB (upper mem = 0x%x kB)", |
"SOS article 7", ',', | "SOS article 7.5", ',', |
(unsigned)mbi->mem_upper); | (unsigned)mbi->mem_upper); |
else | else |
/* Not loaded with grub */ | /* Not loaded with grub */ |
sos_x86_videomem_printf(1, 0, | sos_x86_videomem_printf(1, 0, |
SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, | SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, |
"Welcome to SOS article 7"); | "Welcome to SOS article 7.5"); |
sos_bochs_putstring("Message in a bochs: This is SOS article 7.\n"); | sos_bochs_putstring("Message in a bochs: This is SOS article 7.5.\n"); |
/* Setup CPU segmentation and IRQ subsystem */ | /* Setup CPU segmentation and IRQ subsystem */ |
sos_gdt_subsystem_setup(); | sos_gdt_subsystem_setup(); |
|
|
| |
/* Declare the IDLE thread */ | /* Declare the IDLE thread */ |
SOS_ASSERT_FATAL(sos_create_kernel_thread("idle", idle_thread, NULL, | SOS_ASSERT_FATAL(sos_create_kernel_thread("idle", idle_thread, NULL, |
SOS_SCHED_PRIO_TS_LOWEST) != NULL); | SOS_SCHED_PRIO_TS_LOWEST) != NULL); |
/* Prepare the stats subsystem */ | /* Prepare the stats subsystem */ |
sos_load_subsystem_setup(); | sos_load_subsystem_setup(); |
|
|
| |
| |
/* | /* |
| * Initialise user address space management subsystem |
| */ |
| sos_umem_vmm_subsystem_setup(); |
| sos_dev_zero_subsystem_setup(); |
| |
| /* |
* Initialize process stuff | * Initialize process stuff |
*/ | */ |
sos_process_subsystem_setup(); | sos_process_subsystem_setup(); |
|
|
interrupt call the scheduler */ | interrupt call the scheduler */ |
asm volatile ("sti\n"); | asm volatile ("sti\n"); |
| |
/* Run some tests involving USER processes and threads */ | /* Start the 'init' process, which in turns launches the other |
extern void test_art7(); | programs */ |
test_art7(); | start_init(); |
| |
/* Now run some Kernel threads just for fun ! */ | |
extern void MouseSim(); | |
MouseSim(); | |
test_thread(); | |
/* | /* |
* We can safely exit from this function now, for there is already | * We can safely exit from this function now, for there is already |
| |
/tmp/sos-code-article7/sos/syscall.c (2005-02-05 17:52:23.000000000 +0100
) |
|
../sos-code-article7.5/sos/syscall.c (2005-04-27 20:17:18.000000000 +0200
) |
|
|
|
#include <sos/kmalloc.h> | #include <sos/kmalloc.h> |
#include <sos/klibc.h> | #include <sos/klibc.h> |
#include <drivers/bochs.h> | #include <drivers/bochs.h> |
| #include <hwcore/paging.h> |
| #include <sos/physmem.h> |
| #include <sos/umem_vmm.h> |
| #include <drivers/zero.h> |
| #include <drivers/mem.h> |
| #include <sos/binfmt_elf32.h> |
| |
#include <hwcore/cpu_context.h> | #include <hwcore/cpu_context.h> |
#include <sos/uaccess.h> | #include <sos/uaccess.h> |
|
|
retval = sos_syscall_get1arg(user_ctxt, & status); | retval = sos_syscall_get1arg(user_ctxt, & status); |
if (SOS_OK != retval) | if (SOS_OK != retval) |
break; | break; |
/* sos_bochs_printf("Syscall: exit(%d)\n", status); */ | |
retval = -SOS_EFATAL; /* Not reached */ | retval = -SOS_EFATAL; /* Not reached */ |
} | } |
break; | break; |
| |
| case SOS_SYSCALL_ID_FORK: |
| { |
| struct sos_thread *cur_thr, *new_thr; |
| struct sos_process *new_proc; |
| |
| cur_thr = sos_thread_get_current(); |
| |
| /* Duplicate the current process (and its address space) */ |
| new_proc = sos_process_create(NULL, TRUE); |
| if (! new_proc) |
| { |
| retval = -SOS_ENOMEM; |
| break; |
| } |
| |
| /* Create *the* thread in this new processs, copy of the |
| current user thread (same registers, EXCEPT eax which is |
| set to 0) */ |
| new_thr = |
| sos_duplicate_user_thread(NULL, new_proc, |
| cur_thr, |
| user_ctxt, |
| 0); |
| if (! new_thr) |
| { |
| sos_process_unref(new_proc); |
| retval = -SOS_ENOMEM; |
| break; |
| } |
| |
| sos_process_unref(new_proc); |
| |
| /* Return to the "parent" thread with a value different from |
| 0. Unix says it should be the "PID" of the child. We don't |
| have such a "PID" notion for now */ |
| retval = (sos_ui32_t)new_proc; |
| } |
| break; |
| |
| case SOS_SYSCALL_ID_EXEC: |
| { |
| struct sos_thread *cur_thr, *new_thr; |
| struct sos_umem_vmm_as *new_as; |
| sos_uaddr_t user_str, ustack, start_uaddr; |
| sos_size_t len; |
| char * str; |
| |
| cur_thr = sos_thread_get_current(); |
| |
| /* Make sure the process has exactly 1 thread in it */ |
| if (sos_process_get_nb_threads(cur_thr->process) != 1) |
| { |
| retval = -SOS_EBUSY; |
| break; |
| } |
| |
| /* Get the user arguments */ |
| retval = sos_syscall_get2args(user_ctxt, & user_str, & len); |
| if (SOS_OK != retval) |
| break; |
| |
| /* Copy the program name into kernel sppace */ |
| str = (char*)sos_kmalloc(len + 1, 0); |
| if (! str) |
| { |
| retval = -SOS_ENOMEM; |
| break; |
| } |
| retval = sos_strzcpy_from_user(str, user_str, len + 1); |
| if (retval < SOS_OK) |
| { |
| sos_kfree((sos_vaddr_t)str); |
| break; |
| } |
| |
| /* Create a new empty address space to map the program */ |
| new_as = sos_umem_vmm_create_empty_as(cur_thr->process); |
| if (! new_as) |
| { |
| sos_kfree((sos_vaddr_t)str); |
| retval = -SOS_ENOMEM; |
| break; |
| } |
| |
| /* Map the program in it */ |
| start_uaddr = sos_binfmt_elf32_map(new_as, str); |
| if (start_uaddr == (sos_uaddr_t)NULL) |
| { |
| sos_umem_vmm_delete_as(new_as); |
| sos_kfree((sos_vaddr_t)str); |
| retval = -SOS_ENOENT; |
| break; |
| } |
| |
| /* Allocate space for the user stack (8MB) */ |
| #define SOS_DEFAULT_USER_STACK_SIZE (8 << 20) |
| ustack = (SOS_PAGING_TOP_USER_ADDRESS - SOS_DEFAULT_USER_STACK_SIZE) |
| + 1; |
| retval = sos_dev_zero_map(new_as, &ustack, SOS_DEFAULT_USER_STACK_SIZE, |
| SOS_VM_MAP_PROT_READ | SOS_VM_MAP_PROT_WRITE, |
| /* PRIVATE */ 0); |
| if (SOS_OK != retval) |
| { |
| sos_umem_vmm_delete_as(new_as); |
| sos_kfree((sos_vaddr_t)str); |
| break; |
| } |
| |
| /* Now create the user thread */ |
| new_thr = sos_create_user_thread(NULL, |
| cur_thr->process, |
| start_uaddr, |
| 0, 0, |
| ustack + SOS_DEFAULT_USER_STACK_SIZE |
| - 4, |
| SOS_SCHED_PRIO_TS_LOWEST); |
| if (! new_thr) |
| { |
| sos_umem_vmm_delete_as(new_as); |
| sos_kfree((sos_vaddr_t)str); |
| retval = -SOS_ENOMEM; |
| break; |
| } |
| |
| sos_process_set_name(cur_thr->process, str); |
| |
| /* Switch to this address space */ |
| retval = sos_process_set_address_space(cur_thr->process, |
| new_as); |
| if (SOS_OK != retval) |
| { |
| sos_umem_vmm_delete_as(new_as); |
| sos_kfree((sos_vaddr_t)str); |
| break; |
| } |
| |
| /* The current thread must exit now */ |
| sos_kfree((sos_vaddr_t)str); |
| sos_thread_exit(); |
| retval = -SOS_EFATAL; |
| } |
| break; |
| |
| case SOS_SYSCALL_ID_MMAP: |
| { |
| sos_uaddr_t ptr_hint_uaddr; |
| sos_uaddr_t hint_uaddr; |
| sos_size_t length; |
| sos_ui32_t prot; |
| sos_ui32_t flags; |
| sos_uaddr_t name_user; |
| sos_ui32_t offs64_hi; |
| sos_ui32_t offs64_lo; |
| sos_luoffset_t offset_in_resource; |
| char name[256]; |
| struct sos_umem_vmm_as * my_as; |
| |
| retval = sos_syscall_get7args(user_ctxt, |
| (unsigned int*)& ptr_hint_uaddr, |
| (unsigned int*)& length, |
| (unsigned int*)& prot, |
| (unsigned int*)& flags, |
| (unsigned int*)& name_user, |
| (unsigned int*)& offs64_hi, |
| (unsigned int*)& offs64_lo); |
| if (SOS_OK != retval) |
| break; |
| |
| /* Compute 64 bits offset value */ |
| offset_in_resource = offs64_hi; |
| offset_in_resource <<= 32; |
| offset_in_resource |= offs64_lo; |
| |
| retval = sos_memcpy_from_user((sos_vaddr_t)& hint_uaddr, |
| ptr_hint_uaddr, |
| sizeof(hint_uaddr)); |
| if (sizeof(hint_uaddr) != retval) |
| { |
| retval = -SOS_EFAULT; |
| break; |
| } |
| |
| retval = sos_strzcpy_from_user(name, name_user, sizeof(name)); |
| if (SOS_OK != retval) |
| break; |
| |
| my_as |
| = sos_process_get_address_space(sos_thread_get_current()->process); |
| if ( (0 == strncmp(name, "/dev/zero", sizeof(name))) |
| || (0 == strncmp(name, "/dev/null", sizeof(name))) ) |
| retval = sos_dev_zero_map(my_as, & hint_uaddr, length, prot, flags); |
| else if (0 == strncmp(name, "/dev/mem", sizeof(name))) |
| retval = sos_dev_physmem_map(my_as, & hint_uaddr, length, |
| offset_in_resource, prot, flags); |
| else if (0 == strncmp(name, "/dev/kmem", sizeof(name))) |
| retval = sos_dev_kmem_map(my_as, & hint_uaddr, length, |
| offset_in_resource, prot, flags); |
| else |
| retval = -SOS_ENOENT; |
| |
| if (SOS_OK == retval) |
| { |
| if (sizeof(hint_uaddr) |
| != sos_memcpy_to_user(ptr_hint_uaddr, |
| (sos_vaddr_t)& hint_uaddr, |
| sizeof(hint_uaddr))) |
| { |
| sos_umem_vmm_unmap(my_as, hint_uaddr, length); |
| retval = -SOS_EFAULT; |
| } |
| } |
| |
| } |
| break; |
| |
| case SOS_SYSCALL_ID_MUNMAP: |
| { |
| sos_uaddr_t start_uaddr; |
| sos_size_t size; |
| struct sos_umem_vmm_as * my_as; |
| |
| my_as |
| = sos_process_get_address_space(sos_thread_get_current()->process); |
| |
| retval = sos_syscall_get2args(user_ctxt, |
| (unsigned int*)& start_uaddr, |
| (unsigned int*)& size); |
| if (SOS_OK != retval) |
| break; |
| |
| retval = sos_umem_vmm_unmap(my_as, start_uaddr, size); |
| } |
| break; |
| |
| case SOS_SYSCALL_ID_MPROTECT: |
| { |
| sos_uaddr_t start_uaddr; |
| sos_size_t size; |
| sos_ui32_t new_access_rights; |
| struct sos_umem_vmm_as * my_as; |
| |
| my_as |
| = sos_process_get_address_space(sos_thread_get_current()->process); |
| |
| retval = sos_syscall_get3args(user_ctxt, |
| (unsigned int*)& start_uaddr, |
| (unsigned int*)& size, |
| (unsigned int*)& new_access_rights); |
| if (SOS_OK != retval) |
| break; |
| |
| retval = sos_thread_prepare_user_space_access(NULL, (sos_vaddr_t)NULL); |
| if (SOS_OK != retval) |
| break; |
| |
| retval = sos_umem_vmm_chprot(my_as, start_uaddr, size, |
| new_access_rights); |
| |
| sos_thread_end_user_space_access(); |
| } |
| break; |
| |
| case SOS_SYSCALL_ID_MRESIZE: |
| { |
| sos_uaddr_t old_uaddr; |
| sos_size_t old_size; |
| sos_uaddr_t *uptr_new_uaddr; |
| sos_uaddr_t new_uaddr; |
| sos_size_t new_size; |
| sos_ui32_t flags; |
| struct sos_umem_vmm_as * my_as; |
| |
| my_as |
| = sos_process_get_address_space(sos_thread_get_current()->process); |
| |
| retval = sos_syscall_get5args(user_ctxt, |
| (unsigned int*)& old_uaddr, |
| (unsigned int*)& old_size, |
| (unsigned int*)& uptr_new_uaddr, |
| (unsigned int*)& new_size, |
| (unsigned int*)& flags); |
| if (SOS_OK != retval) |
| break; |
| |
| if (sizeof(new_uaddr) != sos_memcpy_from_user((sos_vaddr_t)& new_uaddr, |
| (sos_uaddr_t) |
| uptr_new_uaddr, |
| sizeof(new_uaddr))) |
| { |
| retval = -SOS_EFAULT; |
| break; |
| } |
| |
| retval = sos_thread_prepare_user_space_access(NULL, (sos_vaddr_t)NULL); |
| if (SOS_OK != retval) |
| break; |
| |
| retval = sos_umem_vmm_resize(my_as, old_uaddr, old_size, |
| & new_uaddr, new_size, flags); |
| sos_thread_end_user_space_access(); |
| if (SOS_OK != retval) |
| break; |
| |
| if (sizeof(new_uaddr) |
| == sos_memcpy_to_user((sos_uaddr_t)uptr_new_uaddr, |
| (sos_vaddr_t)&new_uaddr, |
| sizeof(new_uaddr))) |
| { |
| retval = -SOS_EFAULT; |
| break; |
| } |
| } |
| break; |
| |
| case SOS_SYSCALL_ID_NEW_THREAD: |
| { |
| sos_uaddr_t start_func; |
| sos_ui32_t start_arg1, start_arg2; |
| sos_size_t stack_size; |
| sos_uaddr_t stack_uaddr; |
| |
| struct sos_thread * new_thr; |
| struct sos_umem_vmm_as * my_as; |
| |
| my_as |
| = sos_process_get_address_space(sos_thread_get_current()->process); |
| |
| retval = sos_syscall_get4args(user_ctxt, |
| (unsigned int*)& start_func, |
| (unsigned int*)& start_arg1, |
| (unsigned int*)& start_arg2, |
| (unsigned int*)& stack_size); |
| if (SOS_OK != retval) |
| break; |
| |
| if (stack_size <= 0) |
| { |
| retval = -SOS_EINVAL; |
| break; |
| } |
| |
| /* Allocate the stack */ |
| stack_uaddr = 0; |
| stack_size = SOS_PAGE_ALIGN_SUP(stack_size); |
| retval = sos_dev_zero_map(my_as, & stack_uaddr, stack_size, |
| SOS_VM_MAP_PROT_READ | SOS_VM_MAP_PROT_WRITE, |
| /* PRIVATE */ 0); |
| if (SOS_OK != retval) |
| break; |
| |
| /* Now create the user thread */ |
| new_thr = sos_create_user_thread(NULL, |
| sos_thread_get_current()->process, |
| start_func, |
| start_arg1, |
| start_arg2, |
| stack_uaddr + stack_size - 4, |
| SOS_SCHED_PRIO_TS_LOWEST); |
| |
| if (! new_thr) |
| { |
| sos_umem_vmm_unmap(my_as, stack_uaddr, stack_size); |
| retval = -SOS_ENOMEM; |
| break; |
| } |
| } |
| break; |
| |
| case SOS_SYSCALL_ID_NANOSLEEP: |
| { |
| struct sos_time delay; |
| |
| retval = sos_syscall_get2args(user_ctxt, |
| (unsigned int*)& delay.sec, |
| (unsigned int*)& delay.nanosec); |
| if (SOS_OK != retval) |
| break; |
| |
| retval = sos_thread_sleep(& delay); |
| } |
| break; |
| |
| case SOS_SYSCALL_ID_BRK: |
| { |
| sos_uaddr_t new_top_heap; |
| struct sos_umem_vmm_as * my_as; |
| |
| my_as |
| = sos_process_get_address_space(sos_thread_get_current()->process); |
| |
| retval = sos_syscall_get1arg(user_ctxt, |
| (unsigned int*)& new_top_heap); |
| if (SOS_OK != retval) |
| break; |
| |
| retval = sos_thread_prepare_user_space_access(NULL, (sos_vaddr_t)NULL); |
| if (SOS_OK != retval) |
| break; |
| |
| retval = sos_umem_vmm_brk(my_as, new_top_heap); |
| sos_thread_end_user_space_access(); |
| } |
| break; |
| |
case SOS_SYSCALL_ID_BOCHS_WRITE: | case SOS_SYSCALL_ID_BOCHS_WRITE: |
{ | { |
sos_uaddr_t user_str; | sos_uaddr_t user_str; |
unsigned int len; | sos_size_t len; |
retval = sos_syscall_get2args(user_ctxt, & user_str, & len); | retval = sos_syscall_get2args(user_ctxt, & user_str, & len); |
if (SOS_OK != retval) | if (SOS_OK != retval) |
break; | break; |
| |
/* sos_bochs_printf("Syscall: bochs_write(0x%x, %d)\n", */ | |
/* user_str, len); */ | |
if (str) | if (str) |
{ | { |
retval = sos_copy_from_user((sos_vaddr_t) str, user_str, len); | retval = sos_strzcpy_from_user(str, user_str, len+1); |
if (SOS_OK != retval) | |
retval = sos_bochs_putstring(str); | |
retval = len; | { |
| sos_bochs_printf("THR 0x%x: ", |
| (unsigned)sos_thread_get_current()); |
| retval = sos_bochs_putstring(str); |
| retval = len; |
| } |
} | } |
else | else |
|
|
} | } |
break; | break; |
| |
| |
| /* *********************************************** |
| * Debug syscalls (will be removed in the future) |
| */ |
| |
| |
| /** |
| * Syscall 4012: hexdump of a user-space memory region |
| * args: addr_start size, retval=ignored |
| */ |
| case 4012: |
| { |
| sos_uaddr_t user_str; |
| unsigned int len; |
| unsigned char * str; |
| |
| retval = sos_syscall_get2args(user_ctxt, & user_str, & len); |
| if (SOS_OK != retval) |
| break; |
| |
| str = (char*)sos_kmalloc(len + 1, 0); |
| if (str) |
| { |
| int i; |
| sos_bochs_printf("Hexdump(0x%x, %d):\n", user_str, len); |
| retval = sos_memcpy_from_user((sos_vaddr_t) str, user_str, len); |
| sos_bochs_printf(" (Successfully copied %d out of %d)\n", |
| retval, len); |
| |
| for (i = 0 ; i < retval ; i++) |
| { |
| if ((i % 32) == 0) |
| sos_bochs_printf("%x:", i); |
| sos_bochs_printf(" %x", str[i]); |
| if (((i+1) % 32) == 0) |
| sos_bochs_printf("\n"); |
| } |
| if (i % 32) |
| sos_bochs_printf("\n"); |
| |
| sos_kfree((sos_vaddr_t)str); |
| } |
| else |
| retval = -SOS_ENOMEM; |
| } |
| break; |
| |
| |
| /** |
| * Syscall 4004: lists the VR of the current thread's address space |
| * args: debug_string, retval=ignored |
| */ |
| case 4004: |
| { |
| sos_uaddr_t ustr; |
| char * kstr; |
| struct sos_umem_vmm_as * my_as; |
| |
| retval = sos_syscall_get1arg(user_ctxt, & ustr); |
| if (SOS_OK != retval) |
| break; |
| |
| retval = sos_strndup_from_user(& kstr, ustr, 256, 0); |
| if (SOS_OK != retval) |
| break; |
| |
| extern void sos_dump_as(const struct sos_umem_vmm_as *, const char *); |
| my_as |
| = sos_process_get_address_space(sos_thread_get_current()->process); |
| sos_dump_as(my_as, kstr); |
| sos_kfree((sos_vaddr_t)kstr); |
| } |
| break; |
| |
| |
| /** |
| * Syscall 4008: dump the list of processes in the system |
| * args: none, retval=ignored |
| */ |
| case 4008: |
| { |
| extern void sos_process_dumplist(void); |
| sos_process_dumplist(); |
| retval = SOS_OK; |
| } |
| break; |
| |
default: | default: |
sos_bochs_printf("Syscall: UNKNOWN[%d]\n", syscall_id); | sos_bochs_printf("Syscall: UNKNOWN[%d]\n", syscall_id); |
retval = -SOS_ENOSUP; | retval = -SOS_ENOSUP; |
} | } |
| |
} | } |
| |
/tmp/sos-code-article7/sos/test-art7.c (2005-02-05 17:52:23.000000000 +0100
) |
|
../sos-code-article7.5/sos/test-art7.c (1970-01-01 01:00:00.000000000 +0100
) |
|
|
|
/* Copyright (C) 2005 David Decotigny | |
Copyright (C) 1995 TIS Committee (ELF typedefs, constants and macros) | |
| |
This program is free software; you can redistribute it and/or | |
modify it under the terms of the GNU General Public License | |
as published by the Free Software Foundation; either version 2 | |
of the License, or (at your option) any later version. | |
| |
This program is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
GNU General Public License for more details. | |
| |
You should have received a copy of the GNU General Public License | |
along with this program; if not, write to the Free Software | |
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, | |
USA. | |
*/ | |
| |
#include <sos/types.h> | |
#include <sos/klibc.h> | |
#include <drivers/bochs.h> | |
#include <sos/physmem.h> | |
#include <sos/assert.h> | |
| |
#include <sos/process.h> | |
#include <sos/thread.h> | |
| |
| |
/** | |
* @file test-art7.c | |
* | |
* Basic tests for the user thread/process management API | |
*/ | |
| |
| |
/** | |
* The "C" structure of a user program image in the kernel. Structures | |
* like this are created by the Makefile in the userland/ directory | |
*/ | |
struct userprog_entry | |
{ | |
const char *name; | |
sos_vaddr_t bottom_vaddr; | |
sos_vaddr_t top_vaddr; | |
}; | |
| |
| |
/** | |
* Symbol marking the start of the userprogs table, as setup by the | |
* ld script in the userland/ directory | |
*/ | |
extern char _userprogs_table; | |
| |
| |
/* | |
* Local functions | |
*/ | |
| |
| |
/** | |
* Function to locate the given user program image in the kernel memory | |
*/ | |
static struct userprog_entry * lookup_userprog(const char *name); | |
| |
| |
/** | |
* Function to create a new process containing the given USER program | |
* image. This function automatically locates the destination addresses | |
* of the program by examinating its ELF header | |
* | |
* @return The address of the first instruction of the program, as | |
* given by its ELF header, or 0 when the program is not a correct ELF | |
* image. | |
*/ | |
static sos_uaddr_t load_elf_prog(const struct userprog_entry *prog); | |
| |
| |
/** | |
* Function that locates a USER program in the kernel image, creates a | |
* new USER process for it, and creates the given nb_uthreads USER | |
* threads inside it. | |
*/ | |
static sos_ret_t spawn_program(const char *progname, | |
unsigned nb_uthreads); | |
| |
| |
/** | |
* The main function for our tests | |
*/ | |
void test_art7() | |
{ | |
spawn_program("myprog5", 5); | |
spawn_program("myprog1", 10); | |
spawn_program("myprog5", 1); | |
spawn_program("myprog6", 12); | |
spawn_program("myprog2", 10); | |
spawn_program("myprog5", 1); | |
spawn_program("myprog3", 10); | |
spawn_program("myprog5", 1); | |
spawn_program("myprog1", 10); | |
spawn_program("myprog6", 12); | |
spawn_program("myprog5", 1); | |
spawn_program("myprog4", 10); | |
spawn_program("myprog5", 1); | |
spawn_program("myprog2", 10); | |
spawn_program("myprog6", 12); | |
spawn_program("myprog5", 1); | |
} | |
| |
| |
static sos_ret_t spawn_program(const char *progname, | |
unsigned nb_uthreads) | |
{ | |
int i; | |
| |
sos_uaddr_t prog_entry, stack_top_uaddr; | |
struct userprog_entry *prog; | |
struct sos_process *new_proc; | |
| |
prog = lookup_userprog(progname); | |
if (! prog) | |
return -SOS_EINVAL; | |
| |
new_proc = sos_process_create_empty(progname); | |
if (! new_proc) | |
return -SOS_ENOMEM; | |
| |
/* Squat this new process to map the user program into it */ | |
SOS_ASSERT_FATAL(SOS_OK | |
== sos_thread_change_current_mm_context(sos_process_get_mm_context(new_proc))); | |
| |
/* Load the user program image */ | |
prog_entry = load_elf_prog(prog); | |
if (! prog_entry) | |
{ | |
sos_process_unref(new_proc); | |
return -SOS_ENOMEM; | |
} | |
| |
/* Map the user stacks into it and create the user threads */ | |
/* By default, the first user stack will be located at the end of | |
the user address space (ie 4GB), the stacks of the other threads | |
will be located (12 pages) below */ | |
for (i = 0, stack_top_uaddr = 0xfffffffc ; | |
i < nb_uthreads ; | |
i++, stack_top_uaddr -= 12*SOS_PAGE_SIZE) | |
{ | |
char thrname[16]; | |
| |
/* Allocate 1 page for the stack */ | |
sos_ret_t retval; | |
sos_uaddr_t stack_base = SOS_PAGE_ALIGN_INF(stack_top_uaddr); | |
sos_paddr_t ppage; | |
| |
ppage = sos_physmem_ref_physpage_new(FALSE); | |
SOS_ASSERT_FATAL(ppage != 0); | |
| |
/* Map it in the process space. Might fail is there is not | |
enough RAM (we don't support swap-out for the moment) */ | |
retval = sos_paging_map(ppage, stack_base, TRUE, | |
SOS_VM_MAP_PROT_READ | |
| SOS_VM_MAP_PROT_WRITE); | |
SOS_ASSERT_FATAL(retval == SOS_OK); | |
| |
retval = sos_physmem_unref_physpage(ppage); | |
SOS_ASSERT_FATAL(retval == SOS_OK); | |
| |
/* Poison the stack to detect the use of uninitialized | |
variables */ | |
memset((void*)stack_base, 0xa5, SOS_PAGE_SIZE); | |
| |
/* Create the user thread */ | |
snprintf(thrname, sizeof(thrname), "%s:%d", progname, i); | |
sos_bochs_printf("Spawning %s\n", thrname); | |
sos_create_user_thread(thrname, | |
new_proc, | |
prog_entry, | |
0, | |
stack_top_uaddr, | |
SOS_SCHED_PRIO_TS_LOWEST); | |
| |
sos_thread_yield(); | |
} | |
| |
/* Don't need the reference to the process anymore */ | |
sos_process_unref(new_proc); | |
| |
/* Revert to normal kernel thread's address space */ | |
SOS_ASSERT_FATAL(SOS_OK | |
== sos_thread_change_current_mm_context(NULL)); | |
| |
return SOS_OK; | |
} | |
| |
| |
/** | |
* Lookup a user program located inside the kernel's image | |
*/ | |
static struct userprog_entry * lookup_userprog(const char *name) | |
{ | |
struct userprog_entry *prog; | |
| |
if (! name) | |
return NULL; | |
| |
/* Walk through the table of user program description structures to | |
find the user program with the given name */ | |
for (prog = (struct userprog_entry*) & _userprogs_table ; | |
prog && (prog->name != NULL) ; | |
prog++) | |
{ | |
if (0 == strcmp(name, prog->name)) | |
/* Found it ! */ | |
return prog; | |
} | |
| |
return NULL; | |
} | |
| |
| |
/** | |
* Make sure the program is in a valid ELF format, map it into memory, | |
* and return the address of its entry point (ie _start function) | |
* | |
* @return 0 when the program is not a valid ELF | |
*/ | |
static sos_uaddr_t load_elf_prog(const struct userprog_entry *prog) | |
{ | |
int i; | |
| |
/** | |
* Typedefs, constants and structure definitions as given by the ELF | |
* standard specifications. | |
*/ | |
typedef unsigned long Elf32_Addr; | |
typedef unsigned long Elf32_Word; | |
typedef unsigned short Elf32_Half; | |
typedef unsigned long Elf32_Off; | |
typedef signed long Elf32_Sword; | |
| |
/* Elf identification */ | |
| |
#define EI_NIDENT 16 | |
typedef struct { | |
unsigned char e_ident[EI_NIDENT]; | |
Elf32_Half e_type; | |
Elf32_Half e_machine; | |
Elf32_Word e_version; | |
Elf32_Addr e_entry; | |
Elf32_Off e_phoff; | |
Elf32_Off e_shoff; | |
Elf32_Word e_flags; | |
Elf32_Half e_ehsize; | |
Elf32_Half e_phentsize; | |
Elf32_Half e_phnum; | |
Elf32_Half e_shentsize; | |
Elf32_Half e_shnum; | |
Elf32_Half e_shstrndx; | |
} __attribute__((packed)) Elf32_Ehdr_t; | |
| |
/* e_ident value */ | |
#define ELFMAG0 0x7f | |
#define ELFMAG1 'E' | |
#define ELFMAG2 'L' | |
#define ELFMAG3 'F' | |
| |
/* e_ident offsets */ | |
#define EI_MAG0 0 | |
#define EI_MAG1 1 | |
#define EI_MAG2 2 | |
#define EI_MAG3 3 | |
#define EI_CLASS 4 | |
#define EI_DATA 5 | |
#define EI_VERSION 6 | |
#define EI_PAD 7 | |
| |
/* e_ident[EI_CLASS] */ | |
#define ELFCLASSNONE 0 | |
#define ELFCLASS32 1 | |
#define ELFCLASS64 2 | |
| |
/* e_ident[EI_DATA] */ | |
#define ELFDATANONE 0 | |
#define ELFDATA2LSB 1 | |
#define ELFDATA2MSB 2 | |
| |
/* e_type */ | |
#define ET_NONE 0 /* No file type */ | |
#define ET_REL 1 /* Relocatable file */ | |
#define ET_EXEC 2 /* Executable file */ | |
#define ET_DYN 3 /* Shared object file */ | |
#define ET_CORE 4 /* Core file */ | |
#define ET_LOPROC 0xff00 /* Processor-specific */ | |
#define ET_HIPROC 0xffff /* Processor-specific */ | |
| |
/* e_machine */ | |
#define EM_NONE 0 /* No machine */ | |
#define EM_M32 1 /* AT&T WE 32100 */ | |
#define EM_SPARC 2 /* SPARC */ | |
#define EM_386 3 /* Intel 80386 */ | |
#define EM_68K 4 /* Motorola 68000 */ | |
#define EM_88K 5 /* Motorola 88000 */ | |
#define EM_860 7 /* Intel 80860 */ | |
#define EM_MIPS 8 /* MIPS RS3000 */ | |
| |
/* e_version */ | |
#define EV_NONE 0 /* invalid version */ | |
#define EV_CURRENT 1 /* current version */ | |
| |
typedef struct { | |
Elf32_Word p_type; | |
Elf32_Off p_offset; | |
Elf32_Addr p_vaddr; | |
Elf32_Addr p_paddr; | |
Elf32_Word p_filesz; | |
Elf32_Word p_memsz; | |
Elf32_Word p_flags; | |
Elf32_Word p_align; | |
} __attribute__((packed)) Elf32_Phdr_t; | |
| |
/* Reserved segment types p_type */ | |
#define PT_NULL 0 | |
#define PT_LOAD 1 | |
#define PT_DYNAMIC 2 | |
#define PT_INTERP 3 | |
#define PT_NOTE 4 | |
#define PT_SHLIB 5 | |
#define PT_PHDR 6 | |
#define PT_LOPROC 0x70000000 | |
#define PT_HIPROC 0x7fffffff | |
| |
/* p_flags */ | |
#define PF_X 1 | |
#define PF_W 2 | |
#define PF_R 4 | |
| |
| |
Elf32_Ehdr_t *elf_hdr = (Elf32_Ehdr_t*) prog->bottom_vaddr; | |
Elf32_Phdr_t *elf_phdrs; | |
| |
/* Make sure the image is large enough to contain at least the ELF | |
header */ | |
if (prog->bottom_vaddr + sizeof(Elf32_Ehdr_t) > prog->top_vaddr) | |
{ | |
sos_bochs_printf("ELF prog %s: incorrect header\n", prog->name); | |
return 0; | |
} | |
| |
/* Macro to check expected values for some fields in the ELF header */ | |
#define ELF_CHECK(hdr,field,expected_value) \ | |
({ if ((hdr)->field != (expected_value)) \ | |
{ \ | |
sos_bochs_printf("ELF prog %s: for %s, expected %x, got %x\n", \ | |
prog->name, \ | |
#field, \ | |
(unsigned)(expected_value), \ | |
(unsigned)(hdr)->field); \ | |
return 0; \ | |
} \ | |
}) | |
| |
ELF_CHECK(elf_hdr, e_ident[EI_MAG0], ELFMAG0); | |
ELF_CHECK(elf_hdr, e_ident[EI_MAG1], ELFMAG1); | |
ELF_CHECK(elf_hdr, e_ident[EI_MAG2], ELFMAG2); | |
ELF_CHECK(elf_hdr, e_ident[EI_MAG3], ELFMAG3); | |
ELF_CHECK(elf_hdr, e_ident[EI_CLASS], ELFCLASS32); | |
ELF_CHECK(elf_hdr, e_ident[EI_DATA], ELFDATA2LSB); | |
ELF_CHECK(elf_hdr, e_type, ET_EXEC); | |
ELF_CHECK(elf_hdr, e_version, EV_CURRENT); | |
| |
/* Get the begining of the program header table */ | |
elf_phdrs = (Elf32_Phdr_t*) (prog->bottom_vaddr + elf_hdr->e_phoff); | |
| |
/* Map the program segment in R/W mode. To make things clean, we | |
should iterate over the sections, not the program header */ | |
for (i = 0 ; i < elf_hdr->e_phnum ; i++) | |
{ | |
sos_uaddr_t uaddr; | |
| |
/* Ignore the empty program headers that are not marked "LOAD" */ | |
if (elf_phdrs[i].p_type != PT_LOAD) | |
{ | |
if (elf_phdrs[i].p_memsz != 0) | |
{ | |
SOS_FATAL_ERROR("ELF: non-empty non-LOAD segments not supported yet"); | |
} | |
continue; | |
} | |
| |
if (elf_phdrs[i].p_vaddr < SOS_PAGING_BASE_USER_ADDRESS) | |
{ | |
SOS_FATAL_ERROR("User program has an incorrect address"); | |
} | |
| |
/* Map pages of physical memory into user space */ | |
for (uaddr = SOS_PAGE_ALIGN_INF(elf_phdrs[i].p_vaddr) ; | |
uaddr < elf_phdrs[i].p_vaddr + elf_phdrs[i].p_memsz ; | |
uaddr += SOS_PAGE_SIZE) | |
{ | |
sos_ret_t retval; | |
sos_paddr_t ppage; | |
ppage = sos_physmem_ref_physpage_new(TRUE); | |
| |
retval = sos_paging_map(ppage, uaddr, TRUE, | |
SOS_VM_MAP_PROT_READ | |
| SOS_VM_MAP_PROT_WRITE); | |
SOS_ASSERT_FATAL(retval == SOS_OK); | |
| |
retval = sos_physmem_unref_physpage(ppage); | |
SOS_ASSERT_FATAL(retval == SOS_OK); | |
} | |
| |
/* Copy segment into memory */ | |
memcpy((void*) elf_phdrs[i].p_vaddr, | |
(void*) (prog->bottom_vaddr + elf_phdrs[i].p_offset), | |
elf_phdrs[i].p_filesz); | |
} | |
| |
return elf_hdr->e_entry; | |
} | |
/tmp/sos-code-article7/sos/thread.c (2005-02-05 17:52:23.000000000 +0100
) |
|
../sos-code-article7.5/sos/thread.c (2005-04-27 20:17:17.000000000 +0200
) |
|
|
|
#include <hwcore/mm_context.h> | #include <hwcore/mm_context.h> |
#include <sos/process.h> | #include <sos/process.h> |
| |
| #include <drivers/bochs.h> |
| #include <drivers/x86_videomem.h> |
| |
#include <hwcore/irq.h> | #include <hwcore/irq.h> |
| |
#include "thread.h" | #include "thread.h" |
|
|
static struct sos_kslab_cache *cache_thread; | static struct sos_kslab_cache *cache_thread; |
| |
| |
| /** |
| * (Forwad declaration) Helper function to change the MMU config of |
| * the current executing thread. Analogous to function |
| * sos_thread_change_current_mm_context() of article 7 |
| */ |
| static sos_ret_t change_current_mm_context(struct sos_mm_context *mm_ctxt); |
| |
| |
struct sos_thread *sos_thread_get_current() | struct sos_thread *sos_thread_get_current() |
{ | { |
SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING); | SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING); |
|
|
} | } |
| |
| |
struct sos_thread * | /** |
sos_create_user_thread(const char *name, | * Helper function to create a new user thread. If model_thread is |
struct sos_process *process, | * given, then the new thread will be the copy of this |
sos_uaddr_t user_initial_PC, | * thread. Otherwise the thread will have its initial SP/PC correctly |
sos_ui32_t user_start_arg, | * initialized with the user_initial_PC/SP arguments |
sos_uaddr_t user_initial_SP, | */ |
sos_sched_priority_t priority) | static struct sos_thread * |
| create_user_thread(const char *name, |
| struct sos_process *process, |
| const struct sos_thread * model_thread, |
| const struct sos_cpu_state * model_uctxt, |
| sos_uaddr_t user_initial_PC, |
| sos_ui32_t user_start_arg1, |
| sos_ui32_t user_start_arg2, |
| sos_uaddr_t user_initial_SP, |
| sos_sched_priority_t priority) |
__label__ undo_creation; | __label__ undo_creation; |
sos_ui32_t flags; | sos_ui32_t flags; |
struct sos_thread *new_thread; | struct sos_thread *new_thread; |
| |
if (! SOS_SCHED_PRIO_IS_VALID(priority)) | if (model_thread) |
return NULL; | { |
| SOS_ASSERT_FATAL(model_uctxt); |
| } |
| else |
| { |
| if (! SOS_SCHED_PRIO_IS_VALID(priority)) |
| return NULL; |
| } |
/* For a user thread, the process must be given */ | /* For a user thread, the process must be given */ |
if (! process) | if (! process) |
|
|
/* Initialize the thread attributes */ | /* Initialize the thread attributes */ |
strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN); | strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN); |
new_thread->state = SOS_THR_CREATED; | new_thread->state = SOS_THR_CREATED; |
new_thread->priority = priority; | if (model_thread) |
| new_thread->priority = model_thread->priority; |
| else |
| new_thread->priority = priority; |
/* Allocate the stack for the new thread */ | /* Allocate the stack for the new thread */ |
new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0); | new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0); |
|
|
goto undo_creation; | goto undo_creation; |
| |
/* Initialize the CPU context of the new thread */ | /* Initialize the CPU context of the new thread */ |
if (SOS_OK | if (model_thread) |
!= sos_cpu_ustate_init(& new_thread->cpu_state, | { |
user_initial_PC, | if (SOS_OK |
user_start_arg, | != sos_cpu_ustate_duplicate(& new_thread->cpu_state, |
user_initial_SP, | model_uctxt, |
new_thread->kernel_stack_base_addr, | user_start_arg1, |
new_thread->kernel_stack_size)) | new_thread->kernel_stack_base_addr, |
goto undo_creation; | new_thread->kernel_stack_size)) |
| goto undo_creation; |
| } |
| else |
| { |
| if (SOS_OK |
| != sos_cpu_ustate_init(& new_thread->cpu_state, |
| user_initial_PC, |
| user_start_arg1, |
| user_start_arg2, |
| user_initial_SP, |
| new_thread->kernel_stack_base_addr, |
| new_thread->kernel_stack_size)) |
| goto undo_creation; |
| } |
/* Attach the new thread to the process */ | /* Attach the new thread to the process */ |
if (SOS_OK != sos_process_register_thread(process, new_thread)) | if (SOS_OK != sos_process_register_thread(process, new_thread)) |
|
|
} | } |
| |
| |
| struct sos_thread * |
| sos_create_user_thread(const char *name, |
| struct sos_process *process, |
| sos_uaddr_t user_initial_PC, |
| sos_ui32_t user_start_arg1, |
| sos_ui32_t user_start_arg2, |
| sos_uaddr_t user_initial_SP, |
| sos_sched_priority_t priority) |
| { |
| return create_user_thread(name, process, NULL, NULL, |
| user_initial_PC, |
| user_start_arg1, |
| user_start_arg2, |
| user_initial_SP, |
| priority); |
| } |
| |
| |
| /** |
| * Create a new user thread, copy of the given user thread with the |
| * given user context |
| */ |
| struct sos_thread * |
| sos_duplicate_user_thread(const char *name, |
| struct sos_process *process, |
| const struct sos_thread * model_thread, |
| const struct sos_cpu_state * model_uctxt, |
| sos_ui32_t retval) |
| { |
| return create_user_thread(name, process, model_thread, model_uctxt, |
| 0, retval, 0, 0, 0); |
| } |
| |
| |
/** | /** |
* Helper function to switch to the correct MMU configuration to suit | * Helper function to switch to the correct MMU configuration to suit |
* the_thread's needs. | * the_thread's needs. |
|
|
| |
/* If the thread squats an address space, release it */ | /* If the thread squats an address space, release it */ |
if (thr->squatted_mm_context) | if (thr->squatted_mm_context) |
SOS_ASSERT_FATAL(SOS_OK == sos_thread_change_current_mm_context(NULL)); | SOS_ASSERT_FATAL(SOS_OK == change_current_mm_context(NULL)); |
/* For a user thread: remove the thread from the process threads' list */ | /* For a user thread: remove the thread from the process threads' list */ |
if (thr->process) | if (thr->process) |
|
|
} | } |
| |
| |
| void sos_thread_dump_backtrace(sos_bool_t on_console, |
| sos_bool_t on_bochs) |
| { |
| sos_vaddr_t stack_bottom = current_thread->kernel_stack_base_addr; |
| sos_size_t stack_size = current_thread->kernel_stack_size; |
| |
| static void backtracer(sos_vaddr_t PC, |
| sos_vaddr_t params, |
| sos_ui32_t depth, |
| void *custom_arg) |
| { |
| sos_ui32_t invalid = 0xffffffff, *arg1, *arg2, *arg3, *arg4; |
| |
| /* Get the address of the first 3 arguments from the |
| frame. Among these arguments, 0, 1, 2, 3 arguments might be |
| meaningful (depending on how many arguments the function may |
| take). */ |
| arg1 = (sos_ui32_t*)params; |
| arg2 = (sos_ui32_t*)(params+4); |
| arg3 = (sos_ui32_t*)(params+8); |
| arg4 = (sos_ui32_t*)(params+12); |
| |
| /* Make sure the addresses of these arguments fit inside the |
| stack boundaries */ |
| #define INTERVAL_OK(b,v,u) ( ((b) <= (sos_vaddr_t)(v)) \ |
| && ((sos_vaddr_t)(v) < (u)) ) |
| if (!INTERVAL_OK(stack_bottom, arg1, stack_bottom + stack_size)) |
| arg1 = &invalid; |
| if (!INTERVAL_OK(stack_bottom, arg2, stack_bottom + stack_size)) |
| arg2 = &invalid; |
| if (!INTERVAL_OK(stack_bottom, arg3, stack_bottom + stack_size)) |
| arg3 = &invalid; |
| if (!INTERVAL_OK(stack_bottom, arg4, stack_bottom + stack_size)) |
| arg4 = &invalid; |
| |
| /* Print the function context for this frame */ |
| if (on_bochs) |
| sos_bochs_printf("[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x\n", |
| (unsigned)depth, (unsigned)PC, |
| (unsigned)*arg1, (unsigned)*arg2, |
| (unsigned)*arg3); |
| |
| if (on_console) |
| sos_x86_videomem_printf(23-depth, 3, |
| SOS_X86_VIDEO_BG_BLUE |
| | SOS_X86_VIDEO_FG_LTGREEN, |
| "[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x arg4=0x%x", |
| (unsigned)depth, PC, |
| (unsigned)*arg1, (unsigned)*arg2, |
| (unsigned)*arg3, (unsigned)*arg4); |
| |
| } |
| |
| sos_backtrace(NULL, 15, stack_bottom, stack_size, |
| backtracer, NULL); |
| } |
| |
| |
| |
/* ********************************************** | /* ********************************************** |
* Restricted functions | * Restricted functions |
*/ | */ |
| |
sos_ret_t | |
sos_thread_change_current_mm_context(struct sos_mm_context *mm_ctxt) | |
{ | |
sos_ui32_t flags; | |
| static sos_ret_t |
| change_current_mm_context(struct sos_mm_context *mm_ctxt) |
| { |
/* Retrieve the previous mm context */ | /* Retrieve the previous mm context */ |
struct sos_mm_context * prev_mm_ctxt | struct sos_mm_context * prev_mm_ctxt |
= current_thread->squatted_mm_context; | = current_thread->squatted_mm_context; |
| |
/* We should either select a new squatted_mm_context or revert to | |
the default */ | |
if (mm_ctxt != NULL) | |
SOS_ASSERT_FATAL(prev_mm_ctxt == NULL); | |
else | |
SOS_ASSERT_FATAL(prev_mm_ctxt != NULL); | |
| |
sos_disable_IRQs(flags); | |
| |
current_thread->squatted_mm_context = mm_ctxt; | current_thread->squatted_mm_context = mm_ctxt; |
| |
|
|
the squatted_mm_context field of | the squatted_mm_context field of |
the thread any more */ | the thread any more */ |
| |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t |
| sos_thread_prepare_user_space_access(struct sos_umem_vmm_as * dest_as, |
| sos_vaddr_t fixup_retvaddr) |
| { |
| sos_ret_t retval; |
| sos_ui32_t flags; |
| |
| if (! dest_as) |
| { |
| /* Thread is not a user thread: do nothing */ |
| if (! current_thread->process) |
| return -SOS_EINVAL; |
| |
| dest_as = sos_process_get_address_space(current_thread->process); |
| } |
| else |
| /* Don't allow to access to an address space different than that |
| of the current thread if the page fault are allowed ! */ |
| SOS_ASSERT_FATAL(! fixup_retvaddr); |
| |
| sos_disable_IRQs(flags); |
| SOS_ASSERT_FATAL(NULL == current_thread->squatted_mm_context); |
| SOS_ASSERT_FATAL(0 == current_thread->fixup_uaccess.return_vaddr); |
| |
| /* Change the MMU configuration and init the fixup return address */ |
| retval = change_current_mm_context(sos_umem_vmm_get_mm_context(dest_as)); |
| if (SOS_OK == retval) |
| { |
| current_thread->fixup_uaccess.return_vaddr = fixup_retvaddr; |
| current_thread->fixup_uaccess.faulted_uaddr = 0; |
| } |
| |
sos_restore_IRQs(flags); | sos_restore_IRQs(flags); |
| return retval; |
| } |
| |
return SOS_OK; | |
| sos_ret_t |
| sos_thread_end_user_space_access(void) |
| { |
| sos_ret_t retval; |
| sos_ui32_t flags; |
| |
| sos_disable_IRQs(flags); |
| SOS_ASSERT_FATAL(NULL != current_thread->squatted_mm_context); |
| |
| /* Don't impose anything regarding the current MMU configuration anymore */ |
| retval = change_current_mm_context(NULL); |
| current_thread->fixup_uaccess.return_vaddr = 0; |
| current_thread->fixup_uaccess.faulted_uaddr = 0; |
| |
| sos_restore_IRQs(flags); |
| return retval; |
| |
| |
| |
/tmp/sos-code-article7/sos/umem_vmm.c (1970-01-01 01:00:00.000000000 +0100
) |
|
../sos-code-article7.5/sos/umem_vmm.c (2005-04-27 20:17:18.000000000 +0200
) |
|
|
|
| /* Copyright (C) 2005 David Decotigny |
| |
| This program is free software; you can redistribute it and/or |
| modify it under the terms of the GNU General Public License |
| as published by the Free Software Foundation; either version 2 |
| of the License, or (at your option) any later version. |
| |
| This program is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with this program; if not, write to the Free Software |
| Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, |
| USA. |
| */ |
| |
| #include <sos/assert.h> |
| #include <sos/list.h> |
| #include <sos/physmem.h> |
| #include <sos/kmem_slab.h> |
| #include <drivers/bochs.h> |
| #include <hwcore/mm_context.h> |
| #include <hwcore/paging.h> |
| #include <drivers/zero.h> |
| |
| #include "umem_vmm.h" |
| |
| |
| struct sos_umem_vmm_as |
| { |
| /** The process that owns this address space */ |
| struct sos_process * process; |
| |
| /** The MMU configuration of this address space */ |
| struct sos_mm_context * mm_context; |
| |
| /** The list of VRs in this address space */ |
| struct sos_umem_vmm_vr * list_vr; |
| |
| /** Heap location */ |
| sos_uaddr_t heap_start; |
| sos_size_t heap_size; /**< Updated by sos_umem_vmm_brk() */ |
| |
| /* Memory usage statistics */ |
| sos_size_t phys_total; /* shared + private */ |
| struct vm_usage |
| { |
| sos_size_t overall; |
| sos_size_t ro, rw, code /* all: non readable, read and read/write */; |
| } vm_total, vm_shrd; |
| |
| /* Page fault counters */ |
| sos_size_t pgflt_cow; |
| sos_size_t pgflt_page_in; |
| sos_size_t pgflt_invalid; |
| }; |
| |
| |
| struct sos_umem_vmm_vr |
| { |
| /** The address space owning this VR */ |
| struct sos_umem_vmm_as *address_space; |
| |
| /** The location of the mapping in user space */ |
| sos_uaddr_t start; |
| sos_size_t size; |
| |
| /** What accesses are allowed (read, write, exec): @see |
| SOS_VM_MAP_PROT_* flags in hwcore/paging.h */ |
| sos_ui32_t access_rights; |
| |
| /** Flags of the VR. Allowed flags: |
| * - SOS_VR_MAP_SHARED |
| */ |
| sos_ui32_t flags; |
| |
| /** |
| * The callbacks for the VR called along map/unmapping of the |
| * resource |
| */ |
| struct sos_umem_vmm_vr_ops *ops; |
| |
| /** Description of the resource being mapped, if any */ |
| struct sos_umem_vmm_mapped_resource *mapped_resource; |
| sos_luoffset_t offset_in_resource; |
| |
| /** The VRs of an AS are linked together and are accessible by way |
| of as->list_vr */ |
| struct sos_umem_vmm_vr *prev_in_as, *next_in_as; |
| |
| /** The VRs mapping a given resource are linked together and are |
| accessible by way of mapped_resource->list_vr */ |
| struct sos_umem_vmm_vr *prev_in_mapped_resource, *next_in_mapped_resource; |
| }; |
| |
| |
| /* |
| * We use special slab caches to allocate AS and VR data structures |
| */ |
| static struct sos_kslab_cache * cache_of_as; |
| static struct sos_kslab_cache * cache_of_vr; |
| |
| |
| /** Temporary function to debug: list the VRs of the given As */ |
| void sos_dump_as(const struct sos_umem_vmm_as * as, const char *str) |
| { |
| struct sos_umem_vmm_vr *vr; |
| int nb_vr; |
| |
| sos_bochs_printf("AS %p - %s:\n", as, str); |
| sos_bochs_printf(" physical mem: %x\n", |
| as->phys_total); |
| sos_bochs_printf(" VM (all/ro+rw/exec) tot:%x/%x+%x/%x shrd:%x/%x+%x/%x\n", |
| as->vm_total.overall, |
| as->vm_total.ro, as->vm_total.rw, as->vm_total.code, |
| as->vm_shrd.overall, |
| as->vm_shrd.ro, as->vm_shrd.rw, as->vm_shrd.code); |
| sos_bochs_printf(" pgflt cow=%d pgin=%d inv=%d\n", |
| as->pgflt_cow, as->pgflt_page_in, as->pgflt_invalid); |
| list_foreach_named(as->list_vr, vr, nb_vr, prev_in_as, next_in_as) |
| { |
| sos_bochs_printf(" VR[%d]=%x: [%x,%x[ (sz=%x) mr=(%x)+%llx %c%c%c fl=%x\n", |
| nb_vr, (unsigned)vr, |
| vr->start, vr->start + vr->size, vr->size, |
| (unsigned)vr->mapped_resource, |
| vr->offset_in_resource, |
| (vr->access_rights & SOS_VM_MAP_PROT_READ)?'r':'-', |
| (vr->access_rights & SOS_VM_MAP_PROT_WRITE)?'w':'-', |
| (vr->access_rights & SOS_VM_MAP_PROT_EXEC)?'x':'-', |
| (unsigned)vr->flags); |
| } |
| sos_bochs_printf("FIN (%s)\n", str); |
| } |
| |
| |
| /** |
| * Physical address of THE page (full of 0s) used for anonymous |
| * mappings |
| */ |
| sos_paddr_t sos_zero_page = 0 /* Initial value prior to allocation */; |
| |
| |
| /* |
| * Helper functions defined at the bottom of the file |
| */ |
| |
| /** |
| * Helper function to retrieve the first VR to have a vr->end >= uaddr |
| */ |
| static struct sos_umem_vmm_vr * |
| find_enclosing_or_next_vr(struct sos_umem_vmm_as * as, |
| sos_uaddr_t uaddr); |
| |
| |
| /** |
| * Helper function to retrieve the first VR that overlaps the given |
| * interval, if any |
| */ |
| static struct sos_umem_vmm_vr * |
| find_first_intersecting_vr(struct sos_umem_vmm_as * as, |
| sos_uaddr_t start_uaddr, sos_size_t size); |
| |
| |
| /** |
| * Helper function to find first address where there is enough |
| * space. Begin to look for such an interval at or after the given |
| * address |
| * |
| * @param hint_addr The address where to begin the scan, or NULL |
| */ |
| static sos_uaddr_t |
| find_first_free_interval(struct sos_umem_vmm_as * as, |
| sos_uaddr_t hint_uaddr, sos_size_t size); |
| |
| |
| /** Called each time a VR of the AS changes. Don't cope with any |
| underlying physcal mapping/unmapping, COW, etc... */ |
| static void |
| as_account_change_of_vr_protection(struct sos_umem_vmm_as * as, |
| sos_bool_t is_shared, |
| sos_size_t size, |
| sos_ui32_t prev_access_rights, |
| sos_ui32_t new_access_rights); |
| |
| |
| sos_ret_t sos_umem_vmm_subsystem_setup() |
| { |
| sos_vaddr_t vaddr_zero_page; |
| |
| /* Allocate a new kernel physical page mapped into kernel space and |
| reset it with 0s */ |
| vaddr_zero_page = sos_kmem_vmm_alloc(1, SOS_KMEM_VMM_MAP); |
| if (vaddr_zero_page == (sos_vaddr_t)NULL) |
| return -SOS_ENOMEM; |
| memset((void*)vaddr_zero_page, 0x0, SOS_PAGE_SIZE); |
| |
| /* Keep a reference to the underlying pphysical page... */ |
| sos_zero_page = sos_paging_get_paddr(vaddr_zero_page); |
| SOS_ASSERT_FATAL(NULL != (void*)sos_zero_page); |
| sos_physmem_ref_physpage_at(sos_zero_page); |
| |
| /* ... but it is not needed in kernel space anymore, so we can |
| safely unmap it from kernel space */ |
| sos_paging_unmap(vaddr_zero_page); |
| |
| /* Allocate the VR/AS caches */ |
| cache_of_as |
| = sos_kmem_cache_create("Address space structures", |
| sizeof(struct sos_umem_vmm_as), |
| 1, 0, |
| SOS_KSLAB_CREATE_MAP |
| | SOS_KSLAB_CREATE_ZERO); |
| if (! cache_of_as) |
| { |
| sos_physmem_unref_physpage(sos_zero_page); |
| return -SOS_ENOMEM; |
| } |
| |
| cache_of_vr |
| = sos_kmem_cache_create("Virtual Region structures", |
| sizeof(struct sos_umem_vmm_vr), |
| 1, 0, |
| SOS_KSLAB_CREATE_MAP |
| | SOS_KSLAB_CREATE_ZERO); |
| if (! cache_of_vr) |
| { |
| sos_physmem_unref_physpage(sos_zero_page); |
| sos_kmem_cache_destroy(cache_of_as); |
| return -SOS_ENOMEM; |
| } |
| |
| return SOS_OK; |
| } |
| |
| |
| struct sos_umem_vmm_as * |
| sos_umem_vmm_create_empty_as(struct sos_process *owner) |
| { |
| struct sos_umem_vmm_as * as |
| = (struct sos_umem_vmm_as *) sos_kmem_cache_alloc(cache_of_as, 0); |
| if (! as) |
| return NULL; |
| |
| as->mm_context = sos_mm_context_create(); |
| if (NULL == as->mm_context) |
| { |
| /* Error */ |
| sos_kmem_cache_free((sos_vaddr_t)as); |
| return NULL; |
| } |
| |
| as->process = owner; |
| return as; |
| } |
| |
| |
| struct sos_umem_vmm_as * |
| sos_umem_vmm_duplicate_current_thread_as(struct sos_process *owner) |
| { |
| __label__ undo_creation; |
| struct sos_umem_vmm_as * my_as; |
| struct sos_umem_vmm_vr * model_vr; |
| int nb_vr; |
| |
| struct sos_umem_vmm_as * new_as |
| = (struct sos_umem_vmm_as *) sos_kmem_cache_alloc(cache_of_as, 0); |
| if (! new_as) |
| return NULL; |
| |
| my_as = sos_process_get_address_space(sos_thread_get_current()->process); |
| new_as->process = owner; |
| list_init_named(new_as->list_vr, prev_in_as, next_in_as); |
| |
| /* |
| * Switch to the current threads' mm_context, as duplicating it implies |
| * being able to configure some of its mappings as read-only (for |
| * COW) |
| */ |
| SOS_ASSERT_FATAL(SOS_OK |
| == sos_thread_prepare_user_space_access(my_as, |
| (sos_vaddr_t) |
| NULL)); |
| |
| /* Copy the virtual regions */ |
| list_foreach_named(my_as->list_vr, model_vr, nb_vr, prev_in_as, next_in_as) |
| { |
| struct sos_umem_vmm_vr * vr; |
| |
| /* Prepare COW on the read/write private mappings */ |
| if ( !(model_vr->flags & SOS_VR_MAP_SHARED) |
| && (model_vr->access_rights & SOS_VM_MAP_PROT_WRITE) ) |
| { |
| /* Mark the underlying physical pages (if any) as |
| read-only */ |
| SOS_ASSERT_FATAL(SOS_OK |
| == sos_paging_prepare_COW(model_vr->start, |
| model_vr->size)); |
| } |
| |
| /* Allocate a new virtual region and copy the 'model' into it */ |
| vr = (struct sos_umem_vmm_vr *) sos_kmem_cache_alloc(cache_of_vr, 0); |
| if (! vr) |
| goto undo_creation; |
| memcpy(vr, model_vr, sizeof(*vr)); |
| vr->address_space = new_as; |
| |
| /* Signal the "new" mapping to the underlying VR mapper */ |
| if (vr->ops && vr->ops->ref) |
| vr->ops->ref(vr); |
| |
| /* Insert the new VR into the new AS */ |
| list_add_tail_named(new_as->list_vr, vr, prev_in_as, next_in_as); |
| |
| /* Insert the new VR into the list of mappings of the resource */ |
| list_add_tail_named(model_vr->mapped_resource->list_vr, vr, |
| prev_in_mapped_resource, |
| next_in_mapped_resource); |
| } |
| |
| /* Now copy the current MMU configuration */ |
| new_as->mm_context = sos_mm_context_duplicate(my_as->mm_context); |
| if (NULL == new_as->mm_context) |
| goto undo_creation; |
| |
| /* Correct behavior */ |
| new_as->heap_start = my_as->heap_start; |
| new_as->heap_size = my_as->heap_size; |
| new_as->phys_total = my_as->phys_total; |
| memcpy(& new_as->vm_total, & my_as->vm_total, sizeof(struct vm_usage)); |
| memcpy(& new_as->vm_shrd, & my_as->vm_shrd, sizeof(struct vm_usage)); |
| SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access()); |
| return new_as; |
| |
| /* Handle erroneous behavior */ |
| undo_creation: |
| SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access()); |
| sos_umem_vmm_delete_as(new_as); |
| return NULL; |
| } |
| |
| |
| sos_ret_t |
| sos_umem_vmm_delete_as(struct sos_umem_vmm_as * as) |
| { |
| while(! list_is_empty_named(as->list_vr, prev_in_as, next_in_as)) |
| { |
| struct sos_umem_vmm_vr * vr; |
| vr = list_get_head_named(as->list_vr, prev_in_as, next_in_as); |
| |
| /* Remove the vr from the lists */ |
| list_pop_head_named(as->list_vr, prev_in_as, next_in_as); |
| list_delete_named(vr->mapped_resource->list_vr, vr, |
| prev_in_mapped_resource, |
| next_in_mapped_resource); |
| |
| /* Signal to the underlying VR mapper that the mapping is |
| suppressed */ |
| if (vr->ops) |
| { |
| if (vr->ops->unmap) |
| vr->ops->unmap(vr, vr->start, vr->size); |
| if (vr->ops->unref) |
| vr->ops->unref(vr); |
| } |
| |
| sos_kmem_cache_free((sos_vaddr_t)vr); |
| } |
| |
| /* Release MMU configuration */ |
| if (as->mm_context) |
| sos_mm_context_unref(as->mm_context); |
| |
| /* Now unallocate main address space construct */ |
| sos_kmem_cache_free((sos_vaddr_t)as); |
| |
| return SOS_OK; |
| } |
| |
| |
| struct sos_process * |
| sos_umem_vmm_get_process(struct sos_umem_vmm_as * as) |
| { |
| return as->process; |
| } |
| |
| |
| struct sos_mm_context * |
| sos_umem_vmm_get_mm_context(struct sos_umem_vmm_as * as) |
| { |
| return as->mm_context; |
| } |
| |
| |
| struct sos_umem_vmm_vr * |
| sos_umem_vmm_get_vr_at_address(struct sos_umem_vmm_as * as, |
| sos_uaddr_t uaddr) |
| { |
| struct sos_umem_vmm_vr * vr; |
| vr = find_enclosing_or_next_vr(as, uaddr); |
| if (! vr) |
| return NULL; |
| |
| /* Ok uaddr <= vr->end, but do we have uaddr > vr->start ? */ |
| if (uaddr < vr->start) |
| return NULL; |
| |
| return vr; |
| } |
| |
| |
| struct sos_umem_vmm_as * |
| sos_umem_vmm_get_as_of_vr(struct sos_umem_vmm_vr * vr) |
| { |
| return vr->address_space; |
| } |
| |
| |
| struct sos_umem_vmm_vr_ops * |
| sos_umem_vmm_get_ops_of_vr(struct sos_umem_vmm_vr * vr) |
| { |
| return vr->ops; |
| } |
| |
| |
| sos_ui32_t sos_umem_vmm_get_prot_of_vr(struct sos_umem_vmm_vr * vr) |
| { |
| return vr->access_rights; |
| } |
| |
| |
| sos_ui32_t sos_umem_vmm_get_flags_of_vr(struct sos_umem_vmm_vr * vr) |
| { |
| return vr->flags; |
| } |
| |
| |
| struct sos_umem_vmm_mapped_resource * |
| sos_umem_vmm_get_mapped_resource_of_vr(struct sos_umem_vmm_vr * vr) |
| { |
| return vr->mapped_resource; |
| } |
| |
| |
| sos_uaddr_t sos_umem_vmm_get_start_of_vr(struct sos_umem_vmm_vr * vr) |
| { |
| return vr->start; |
| } |
| |
| |
| sos_size_t sos_umem_vmm_get_size_of_vr(struct sos_umem_vmm_vr * vr) |
| { |
| return vr->size; |
| } |
| |
| |
| sos_luoffset_t sos_umem_vmm_get_offset_in_resource(struct sos_umem_vmm_vr * vr) |
| { |
| return vr->offset_in_resource; |
| } |
| |
| |
| sos_ret_t |
| sos_umem_vmm_set_ops_of_vr(struct sos_umem_vmm_vr * vr, |
| struct sos_umem_vmm_vr_ops * ops) |
| { |
| /* Don't allow to overwrite any preceding VR ops */ |
| SOS_ASSERT_FATAL(NULL == vr->ops); |
| |
| vr->ops = ops; |
| return SOS_OK; |
| } |
| |
| |
| /** |
| * When resize asks to map the resource elsewhere, make sure not to |
| * overwrite the offset_in_resource field |
| */ |
| #define INTERNAL_MAP_CALLED_FROM_MREMAP (1 << 8) |
| |
| sos_ret_t |
| sos_umem_vmm_map(struct sos_umem_vmm_as * as, |
| sos_uaddr_t * /*in/out*/uaddr, sos_size_t size, |
| sos_ui32_t access_rights, |
| sos_ui32_t flags, |
| struct sos_umem_vmm_mapped_resource * resource, |
| sos_luoffset_t offset_in_resource) |
| { |
| __label__ return_mmap; |
| sos_uaddr_t hint_uaddr; |
| struct sos_umem_vmm_vr *prev_vr, *next_vr, *vr, *preallocated_vr; |
| sos_bool_t merge_with_preceding, merge_with_next, used_preallocated_vr; |
| sos_bool_t internal_map_called_from_mremap |
| = (flags & INTERNAL_MAP_CALLED_FROM_MREMAP); |
| |
| sos_ret_t retval = SOS_OK; |
| used_preallocated_vr = FALSE; |
| hint_uaddr = *uaddr; |
| |
| /* Default mapping address is NULL */ |
| *uaddr = (sos_vaddr_t)NULL; |
| |
| if (! resource) |
| return -SOS_EINVAL; |
| if (! resource->mmap) |
| return -SOS_EPERM; |
| |
| if (! SOS_IS_PAGE_ALIGNED(hint_uaddr)) |
| return -SOS_EINVAL; |
| |
| if (size <= 0) |
| return -SOS_EINVAL; |
| size = SOS_PAGE_ALIGN_SUP(size); |
| |
| if (flags & SOS_VR_MAP_SHARED) |
| { |
| /* Make sure the mapped resource allows the required protection flags */ |
| if ( ( (access_rights & SOS_VM_MAP_PROT_READ) |
| && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_READ) ) |
| || ( (access_rights & SOS_VM_MAP_PROT_WRITE) |
| && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_WRITE) ) |
| || ( (access_rights & SOS_VM_MAP_PROT_EXEC) |
| && !(resource->allowed_access_rights & SOS_VM_MAP_PROT_EXEC)) ) |
| return -SOS_EPERM; |
| } |
| |
| /* Sanity checks over the offset_in_resource parameter */ |
| if ( !internal_map_called_from_mremap |
| && ( resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS ) ) |
| /* Initial offset ignored for anonymous mappings */ |
| { |
| /* Nothing to check */ |
| } |
| |
| /* Make sure that the offset in resource won't overflow */ |
| else if (offset_in_resource + size <= offset_in_resource) |
| return -SOS_EINVAL; |
| |
| /* Filter out unsupported flags */ |
| access_rights &= (SOS_VM_MAP_PROT_READ |
| | SOS_VM_MAP_PROT_WRITE |
| | SOS_VM_MAP_PROT_EXEC); |
| flags &= (SOS_VR_MAP_SHARED |
| | SOS_VR_MAP_FIXED); |
| |
| /* Pre-allocate a new VR. Because once we found a valid slot inside |
| the VR list, we don't want the list to be altered by another |
| process */ |
| preallocated_vr |
| = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0); |
| if (! preallocated_vr) |
| return -SOS_ENOMEM; |
| |
| /* Compute the user address of the new mapping */ |
| if (flags & SOS_VR_MAP_FIXED) |
| { |
| /* |
| * The address is imposed |
| */ |
| |
| /* Make sure the hint_uaddr hint is valid */ |
| if (hint_uaddr < SOS_PAGING_BASE_USER_ADDRESS) |
| { retval = -SOS_EINVAL; goto return_mmap; } |
| if (hint_uaddr > SOS_PAGING_TOP_USER_ADDRESS - size) |
| { retval = -SOS_EINVAL; goto return_mmap; } |
| |
| /* Unmap any overlapped VR */ |
| retval = sos_umem_vmm_unmap(as, hint_uaddr, size); |
| if (SOS_OK != retval) |
| { goto return_mmap; } |
| } |
| else |
| { |
| /* |
| * A free range has to be determined |
| */ |
| |
| /* Find a suitable free VR */ |
| hint_uaddr = find_first_free_interval(as, hint_uaddr, size); |
| if (! hint_uaddr) |
| { retval = -SOS_ENOMEM; goto return_mmap; } |
| } |
| |
| /* For anonymous resource mappings, set the initial |
| offset_in_resource to the initial virtual start address in user |
| space */ |
| if ( !internal_map_called_from_mremap |
| && (resource->flags & SOS_MAPPED_RESOURCE_ANONYMOUS ) ) |
| offset_in_resource = hint_uaddr; |
| |
| /* Lookup next and previous VR, if any. This will allow us to merge |
| the regions, when possible */ |
| next_vr = find_enclosing_or_next_vr(as, hint_uaddr); |
| if (next_vr) |
| { |
| /* Find previous VR, if any */ |
| prev_vr = next_vr->prev_in_as; |
| /* The list is curcular: it may happen that we looped over the |
| tail of the list (ie the list is a singleton) */ |
| if (prev_vr->start > hint_uaddr) |
| prev_vr = NULL; /* No preceding VR */ |
| } |
| else |
| { |
| /* Otherwise we went beyond the last VR */ |
| prev_vr = list_get_tail_named(as->list_vr, prev_in_as, next_in_as); |
| } |
| |
| /* Merge with preceding VR ? */ |
| merge_with_preceding |
| = ( (NULL != prev_vr) |
| && (prev_vr->mapped_resource == resource) |
| && (prev_vr->offset_in_resource + prev_vr->size == offset_in_resource) |
| && (prev_vr->start + prev_vr->size == hint_uaddr) |
| && (prev_vr->flags == flags) |
| && (prev_vr->access_rights == access_rights) ); |
| |
| /* Merge with next VR ? */ |
| merge_with_next |
| = ( (NULL != next_vr) |
| && (next_vr->mapped_resource == resource) |
| && (offset_in_resource + size == next_vr->offset_in_resource) |
| && (hint_uaddr + size == next_vr->start) |
| && (next_vr->flags == flags) |
| && (next_vr->access_rights == access_rights) ); |
| |
| if (merge_with_preceding && merge_with_next) |
| { |
| /* Widen the prev_vr VR to encompass both the new VR and the next_vr */ |
| vr = prev_vr; |
| vr->size += size + next_vr->size; |
| |
| /* Remove the next_vr VR */ |
| list_delete_named(as->list_vr, next_vr, prev_in_as, next_in_as); |
| list_delete_named(next_vr->mapped_resource->list_vr, next_vr, |
| prev_in_mapped_resource, next_in_mapped_resource); |
| |
| if (next_vr->ops && next_vr->ops->unref) |
| next_vr->ops->unref(next_vr); |
| |
| sos_kmem_vmm_free((sos_vaddr_t) next_vr); |
| } |
| else if (merge_with_preceding) |
| { |
| /* Widen the prev_vr VR to encompass the new VR */ |
| vr = prev_vr; |
| vr->size += size; |
| } |
| else if (merge_with_next) |
| { |
| /* Widen the next_vr VR to encompass the new VR */ |
| vr = next_vr; |
| vr->start -= size; |
| vr->size += size; |
| } |
| else |
| { |
| /* Allocate a brand new VR and insert it into the list */ |
| |
| vr = preallocated_vr; |
| used_preallocated_vr = TRUE; |
| |
| vr->start = hint_uaddr; |
| vr->size = size; |
| vr->access_rights = access_rights; |
| vr->flags = flags; |
| vr->mapped_resource = resource; |
| vr->offset_in_resource = offset_in_resource; |
| |
| /* Insert VR in address space */ |
| vr->address_space = as; |
| if (prev_vr) |
| list_insert_after_named(as->list_vr, prev_vr, vr, |
| prev_in_as, next_in_as); |
| else |
| list_add_head_named(as->list_vr, vr, prev_in_as, next_in_as); |
| list_add_tail_named(vr->mapped_resource->list_vr, vr, |
| prev_in_mapped_resource, |
| next_in_mapped_resource); |
| |
| /* Signal the resource we are mapping it */ |
| if (resource && resource->mmap) |
| { |
| retval = resource->mmap(vr); |
| if (SOS_OK != retval) |
| { |
| retval = sos_umem_vmm_unmap(as, vr->start, vr->size); |
| goto return_mmap; |
| } |
| |
| /* The page_in method is MANDATORY for mapped resources */ |
| SOS_ASSERT_FATAL(vr->ops && vr->ops->page_in); |
| } |
| |
| if (vr->ops && vr->ops->ref) |
| vr->ops->ref(vr); |
| } |
| |
| /* Ok, fine, we got it right ! Return the address to the caller */ |
| *uaddr = hint_uaddr; |
| as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED, |
| size, 0, vr->access_rights); |
| retval = SOS_OK; |
| |
| return_mmap: |
| if (! used_preallocated_vr) |
| sos_kmem_vmm_free((sos_vaddr_t)preallocated_vr); |
| |
| return retval; |
| } |
| |
| |
| sos_ret_t |
| sos_umem_vmm_unmap(struct sos_umem_vmm_as * as, |
| sos_uaddr_t uaddr, sos_size_t size) |
| { |
| struct sos_umem_vmm_vr *vr, *preallocated_vr; |
| sos_bool_t need_to_setup_mmu; |
| sos_bool_t used_preallocated_vr; |
| |
| if (! SOS_IS_PAGE_ALIGNED(uaddr)) |
| return -SOS_EINVAL; |
| if (size <= 0) |
| return -SOS_EINVAL; |
| size = SOS_PAGE_ALIGN_SUP(size); |
| |
| /* Make sure the uaddr is valid */ |
| if (uaddr < SOS_PAGING_BASE_USER_ADDRESS) |
| return -SOS_EINVAL; |
| if (uaddr > SOS_PAGING_TOP_USER_ADDRESS - size) |
| return -SOS_EINVAL; |
| |
| /* In some cases, the unmapping might imply a VR to be split into |
| 2. Actually, allocating a new VR can be a blocking operation, but |
| actually we can block now, it won't do no harm. But we must be |
| careful not to block later, while altering the VR lists: that's |
| why we pre-allocate now. */ |
| used_preallocated_vr = FALSE; |
| preallocated_vr |
| = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0); |
| if (! preallocated_vr) |
| return -SOS_ENOMEM; |
| |
| /* Find any VR intersecting with the given interval */ |
| vr = find_first_intersecting_vr(as, uaddr, size); |
| |
| /* Unmap (part of) the VR covered by [uaddr .. uaddr+size[ */ |
| while (NULL != vr) |
| { |
| /* Went past the end of the *circular* list => back at the |
| beginning ? */ |
| if (vr->start + vr->size <= uaddr) |
| /* Yes, stop now */ |
| break; |
| |
| /* Went beyond the region to unmap ? */ |
| if (uaddr + size <= vr->start) |
| /* Yes, stop now */ |
| break; |
| |
| /* VR totally unmapped ? */ |
| if ((vr->start >= uaddr) |
| && (vr->start + vr->size <= uaddr + size)) |
| { |
| struct sos_umem_vmm_vr *next_vr; |
| |
| /* Yes: signal we remove it completely */ |
| if (vr->ops && vr->ops->unmap) |
| vr->ops->unmap(vr, vr->start, vr->size); |
| |
| /* Remove it from the AS list now */ |
| next_vr = vr->next_in_as; |
| if (next_vr == vr) /* singleton ? */ |
| next_vr = NULL; |
| list_delete_named(as->list_vr, vr, prev_in_as, next_in_as); |
| |
| /* Remove from the list of VRs mapping the resource */ |
| list_delete_named(vr->mapped_resource->list_vr, vr, |
| prev_in_mapped_resource, |
| next_in_mapped_resource); |
| |
| if (vr->ops && vr->ops->unref) |
| vr->ops->unref(vr); |
| |
| as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED, |
| vr->size, vr->access_rights, 0); |
| sos_kmem_vmm_free((sos_vaddr_t)vr); |
| |
| /* Prepare next iteration */ |
| vr = next_vr; |
| continue; |
| } |
| |
| /* unmapped region lies completely INSIDE the the VR */ |
| else if ( (vr->start < uaddr) |
| && (vr->start + vr->size > uaddr + size) ) |
| { |
| /* VR has to be split into 2 */ |
| |
| /* Use the preallocated VR and copy the VR into it */ |
| used_preallocated_vr = TRUE; |
| memcpy(preallocated_vr, vr, sizeof(*vr)); |
| |
| /* Adjust the start/size of both VRs */ |
| preallocated_vr->start = uaddr + size; |
| preallocated_vr->size = vr->start + vr->size - (uaddr + size); |
| preallocated_vr->offset_in_resource += uaddr + size - vr->start; |
| vr->size = uaddr - vr->start; |
| |
| /* Insert the new VR into the list */ |
| list_insert_after_named(as->list_vr, vr, preallocated_vr, |
| prev_in_as, next_in_as); |
| list_add_tail_named(vr->mapped_resource->list_vr, preallocated_vr, |
| prev_in_mapped_resource, |
| next_in_mapped_resource); |
| |
| /* Signal the changes to the underlying resource */ |
| if (vr->ops && vr->ops->unmap) |
| vr->ops->unmap(vr, uaddr, size); |
| if (preallocated_vr->ops && preallocated_vr->ops->ref) |
| preallocated_vr->ops->ref(preallocated_vr); |
| |
| /* Account for change in VRs */ |
| as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED, |
| size, vr->access_rights, 0); |
| |
| /* No need to go further */ |
| break; |
| } |
| |
| /* Unmapped region only affects the START address of the VR */ |
| else if (uaddr <= vr->start) |
| { |
| sos_size_t translation = uaddr + size - vr->start; |
| |
| /* Shift the VR */ |
| vr->size -= translation; |
| vr->offset_in_resource += translation; |
| vr->start += translation; |
| |
| /* Signal unmapping */ |
| if (vr->ops && vr->ops->unmap) |
| vr->ops->unmap(vr, uaddr + size, |
| translation); |
| |
| /* Account for change in VRs */ |
| as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED, |
| translation, |
| vr->access_rights, 0); |
| |
| /* No need to go further, we reached the last VR that |
| overlaps the unmapped region */ |
| break; |
| } |
| |
| /* Unmapped region only affects the ENDING address of the VR */ |
| else if (uaddr + size >= vr->start + vr->size) |
| { |
| sos_size_t unmapped_size = vr->start + vr->size - uaddr; |
| |
| /* Resize VR */ |
| vr->size = uaddr - vr->start; |
| |
| /* Signal unmapping */ |
| if (vr->ops && vr->ops->unmap) |
| vr->ops->unmap(vr, uaddr, unmapped_size); |
| |
| /* Account for change in VRs */ |
| as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED, |
| unmapped_size, |
| vr->access_rights, 0); |
| |
| vr = vr->next_in_as; |
| continue; |
| } |
| |
| sos_display_fatal_error("BUG uaddr=%x sz=%x vr_start=%x, vr_sz=%x", |
| uaddr, size, vr->start, vr->size); |
| } |
| |
| need_to_setup_mmu = (sos_thread_get_current()->squatted_mm_context |
| != as->mm_context); |
| if (need_to_setup_mmu) |
| SOS_ASSERT_FATAL(SOS_OK |
| == sos_thread_prepare_user_space_access(as, |
| (sos_vaddr_t) |
| NULL)); |
| { |
| sos_size_t sz_unmapped = sos_paging_unmap_interval(uaddr, size); |
| SOS_ASSERT_FATAL(sz_unmapped >= 0); |
| as->phys_total -= sz_unmapped; |
| } |
| if (need_to_setup_mmu) |
| SOS_ASSERT_FATAL(SOS_OK == sos_thread_end_user_space_access()); |
| |
| if (! used_preallocated_vr) |
| sos_kmem_vmm_free((sos_vaddr_t)preallocated_vr); |
| |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t |
| sos_umem_vmm_chprot(struct sos_umem_vmm_as * as, |
| sos_uaddr_t uaddr, sos_size_t size, |
| sos_ui32_t new_access_rights) |
| { |
| struct sos_umem_vmm_vr *start_vr, *vr, |
| *preallocated_middle_vr, *preallocated_right_vr; |
| sos_bool_t used_preallocated_middle_vr, used_preallocated_right_vr; |
| |
| if (! SOS_IS_PAGE_ALIGNED(uaddr)) |
| return -SOS_EINVAL; |
| if (size <= 0) |
| return -SOS_EINVAL; |
| size = SOS_PAGE_ALIGN_SUP(size); |
| |
| /* Make sure the uaddr is valid */ |
| if (uaddr < SOS_PAGING_BASE_USER_ADDRESS) |
| return -SOS_EINVAL; |
| if (uaddr > SOS_PAGING_TOP_USER_ADDRESS - size) |
| return -SOS_EINVAL; |
| |
| /* Pre-allocate 2 new VRs (same reason as for unmap). Because chprot |
| may imply at most 2 regions to be split */ |
| used_preallocated_middle_vr = FALSE; |
| used_preallocated_right_vr = FALSE; |
| preallocated_middle_vr |
| = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0); |
| if (! preallocated_middle_vr) |
| return -SOS_ENOMEM; |
| preallocated_right_vr |
| = (struct sos_umem_vmm_vr *)sos_kmem_cache_alloc(cache_of_vr, 0); |
| if (! preallocated_right_vr) |
| { |
| sos_kmem_vmm_free((sos_vaddr_t)preallocated_middle_vr); |
| return -SOS_ENOMEM; |
| } |
| |
| /* Find any VR intersecting with the given interval */ |
| start_vr = find_first_intersecting_vr(as, uaddr, size); |
| if (NULL == start_vr) |
| return SOS_OK; |
| |
| /* First of all: make sure we are allowed to change the access |
| rights of all the VRs concerned by the chprot */ |
| vr = start_vr; |
| while (TRUE) |
| { |
| /* Went past the end of the *circular* list => back at the |
| begining ? */ |
| if (vr->start + vr->size <= uaddr) |
| /* Yes, stop now */ |
| break; |
| |
| /* Went beyond the region to chprot ? */ |
| if (uaddr + size < vr->start) |
| /* Yes, stop now */ |
| break; |
| |
| if (vr->flags & SOS_VR_MAP_SHARED) |
| { |
| /* Make sure the mapped resource allows the required |
| protection flags */ |
| if ( ( (new_access_rights & SOS_VM_MAP_PROT_READ) |
| && !(vr->mapped_resource->allowed_access_rights |
| & SOS_VM_MAP_PROT_READ) ) |
| || ( (new_access_rights & SOS_VM_MAP_PROT_WRITE) |
| && !(vr->mapped_resource->allowed_access_rights |
| & SOS_VM_MAP_PROT_WRITE) ) |
| || ( (new_access_rights & SOS_VM_MAP_PROT_EXEC) |
| && !(vr->mapped_resource->allowed_access_rights |
| & SOS_VM_MAP_PROT_EXEC) ) ) |
| return -SOS_EPERM; |
| } |
| |
| vr = vr->next_in_as; |
| } |
| |
| /* Change the access rights of the VRs covered by [uaddr |
| .. uaddr+size[ */ |
| vr = start_vr; |
| while (TRUE) |
| { |
| |
| /* Went past the end of the *circular* list => back at the |
| begining ? */ |
| if (vr->start + vr->size <= uaddr) |
| /* Yes, stop now */ |
| break; |
| |
| /* Went beyond the region to chprot ? */ |
| if (uaddr + size <= vr->start) |
| /* Yes, stop now */ |
| break; |
| |
| /* Access rights unchanged ? */ |
| if (vr->access_rights == new_access_rights) |
| /* nop */ |
| { |
| vr = vr->next_in_as; |
| continue; |
| } |
| |
| /* VR totally chprot ? */ |
| if ((vr->start >= uaddr) |
| && (vr->start + vr->size <= uaddr + size)) |
| { |
| /* Account for change in VRs */ |
| as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED, |
| vr->size, vr->access_rights, |
| new_access_rights); |
| vr->access_rights = new_access_rights; |
| |
| if (vr->flags & SOS_VR_MAP_SHARED) |
| /* For shared mappings: effectively change the access |
| rights of the physical pages */ |
| sos_paging_set_prot_of_interval(vr->start, vr->size, |
| new_access_rights); |
| else |
| /* Private mapping */ |
| { |
| /* For private mappings, we set the new access_rights |
| only if it becomes read-only. For private mappings |
| that become writable, we don't do anything: we keep |
| the access rights unchanged to preserve the COW |
| semantics */ |
| if (! (new_access_rights & SOS_VM_MAP_PROT_WRITE)) |
| sos_paging_set_prot_of_interval(vr->start, vr->size, |
| new_access_rights); |
| } |
| |
| vr = vr->next_in_as; |
| continue; |
| } |
| |
| /* chprot region lies completely INSIDE the VR */ |
| else if ( (vr->start < uaddr) |
| && (vr->start + vr->size > uaddr + size) ) |
| { |
| /* VR has to be split into 3 */ |
| |
| /* Use the preallocated VRs and copy the VR into them */ |
| SOS_ASSERT_FATAL(! used_preallocated_middle_vr); |
| SOS_ASSERT_FATAL(! used_preallocated_right_vr); |
| used_preallocated_middle_vr = TRUE; |
| memcpy(preallocated_middle_vr, vr, sizeof(*vr)); |
| used_preallocated_right_vr = TRUE; |
| memcpy(preallocated_right_vr, vr, sizeof(*vr)); |
| |
| /* Adjust the start/size of the VRs */ |
| preallocated_middle_vr->start = uaddr; |
| preallocated_middle_vr->size = size; |
| preallocated_right_vr->start = uaddr + size; |
| preallocated_right_vr->size = vr->start + vr->size |
| - (uaddr + size); |
| preallocated_middle_vr->offset_in_resource |
| += uaddr - vr->start; |
| preallocated_right_vr->offset_in_resource |
| += uaddr + size - vr->start; |
| vr->size = uaddr - vr->start; |
| |
| /* Account for change in VRs */ |
| preallocated_middle_vr->access_rights = new_access_rights; |
| as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED, |
| size, vr->access_rights, |
| new_access_rights); |
| |
| /* Insert the new VRs into the lists */ |
| list_insert_after_named(as->list_vr, vr, preallocated_middle_vr, |
| prev_in_as, next_in_as); |
| list_insert_after_named(as->list_vr, preallocated_middle_vr, |
| preallocated_right_vr, |
| prev_in_as, next_in_as); |
| |
| list_add_tail_named(vr->mapped_resource->list_vr, |
| preallocated_middle_vr, |
| prev_in_mapped_resource, |
| next_in_mapped_resource); |
| list_add_tail_named(vr->mapped_resource->list_vr, |
| preallocated_right_vr, |
| prev_in_mapped_resource, |
| next_in_mapped_resource); |
| |
| /* Effectively change the access rights of the physical pages */ |
| if (!(preallocated_middle_vr->flags & SOS_VR_MAP_SHARED) |
| && (new_access_rights & SOS_VM_MAP_PROT_WRITE)) |
| /* For private mappings with write access, prepare for COW */ |
| sos_paging_prepare_COW(preallocated_middle_vr->start, |
| preallocated_middle_vr->size); |
| else |
| sos_paging_set_prot_of_interval(preallocated_middle_vr->start, |
| preallocated_middle_vr->size, |
| new_access_rights); |
| |
| if (preallocated_right_vr->ops && preallocated_right_vr->ops->ref) |
| preallocated_right_vr->ops->ref(preallocated_right_vr); |
| if (preallocated_middle_vr->ops && preallocated_middle_vr->ops->ref) |
| preallocated_middle_vr->ops->ref(preallocated_middle_vr); |
| |
| /* No need to go further */ |
| break; |
| } |
| |
| /* Chprot region only affects the START address of the VR */ |
| else if (uaddr <= vr->start) |
| { |
| /* Split the region into 2 */ |
| sos_uoffset_t offset_in_region = uaddr + size - vr->start; |
| |
| /* Use the preallocated VRs and copy the VR into them */ |
| SOS_ASSERT_FATAL(! used_preallocated_middle_vr); |
| used_preallocated_middle_vr = TRUE; |
| memcpy(preallocated_middle_vr, vr, sizeof(*vr)); |
| |
| /* Adjust the start/size of the VRs */ |
| preallocated_middle_vr->start += offset_in_region; |
| preallocated_middle_vr->size -= offset_in_region; |
| vr->size = offset_in_region; |
| preallocated_middle_vr->offset_in_resource += offset_in_region; |
| |
| /* Account for change in VRs */ |
| as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED, |
| vr->size, |
| vr->access_rights, |
| new_access_rights); |
| vr->access_rights = new_access_rights; |
| |
| /* Insert the new VR into the lists */ |
| list_insert_after_named(as->list_vr, vr, |
| preallocated_middle_vr, |
| prev_in_as, next_in_as); |
| list_add_tail_named(vr->mapped_resource->list_vr, |
| preallocated_middle_vr, |
| prev_in_mapped_resource, |
| next_in_mapped_resource); |
| |
| /* Effectively change the access rights of the physical pages */ |
| if (!(vr->flags & SOS_VR_MAP_SHARED) |
| && (new_access_rights & SOS_VM_MAP_PROT_WRITE)) |
| /* For private mappings with write access, prepare for COW */ |
| sos_paging_prepare_COW(vr->start, vr->size); |
| else |
| sos_paging_set_prot_of_interval(vr->start, vr->size, |
| new_access_rights); |
| |
| if (preallocated_middle_vr->ops && preallocated_middle_vr->ops->ref) |
| preallocated_middle_vr->ops->ref(preallocated_middle_vr); |
| |
| /* Ne need to go further (we reached the last VR that |
| overlaps the given interval to chprot) */ |
| break; |
| } |
| |
| /* Chprot region only affects the ENDING address of the VR */ |
| else if (uaddr + size >= vr->start + vr->size) |
| { |
| /* Split the region into 2 */ |
| sos_uoffset_t offset_in_region = uaddr - vr->start; |
| |
| /* Use the preallocated VRs and copy the VR into them */ |
| SOS_ASSERT_FATAL(! used_preallocated_right_vr); |
| used_preallocated_right_vr = TRUE; |
| memcpy(preallocated_right_vr, vr, sizeof(*vr)); |
| |
| /* Adjust the start/size of the VRs */ |
| preallocated_right_vr->start += offset_in_region; |
| preallocated_right_vr->size -= offset_in_region; |
| vr->size = offset_in_region; |
| preallocated_right_vr->offset_in_resource += offset_in_region; |
| |
| /* Account for change in VRs */ |
| as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED, |
| preallocated_right_vr->size, |
| vr->access_rights, |
| new_access_rights); |
| preallocated_right_vr->access_rights = new_access_rights; |
| |
| /* Insert the new VR into the lists */ |
| list_insert_after_named(as->list_vr, vr, |
| preallocated_right_vr, |
| prev_in_as, next_in_as); |
| list_add_tail_named(vr->mapped_resource->list_vr, |
| preallocated_right_vr, |
| prev_in_mapped_resource, |
| next_in_mapped_resource); |
| |
| /* Effectively change the access rights of the physical pages */ |
| if (!(preallocated_right_vr->flags & SOS_VR_MAP_SHARED) |
| && (new_access_rights & SOS_VM_MAP_PROT_WRITE)) |
| /* For private mappings with write access, prepare for COW */ |
| sos_paging_prepare_COW(preallocated_right_vr->start, |
| preallocated_right_vr->size); |
| else |
| sos_paging_set_prot_of_interval(preallocated_right_vr->start, |
| preallocated_right_vr->size, |
| new_access_rights); |
| |
| if (preallocated_right_vr->ops && preallocated_right_vr->ops->ref) |
| preallocated_right_vr->ops->ref(preallocated_right_vr); |
| |
| vr = vr->next_in_as; |
| continue; |
| } |
| |
| sos_display_fatal_error("BUG"); |
| } |
| |
| if (! used_preallocated_middle_vr) |
| sos_kmem_vmm_free((sos_vaddr_t)preallocated_middle_vr); |
| if (! used_preallocated_right_vr) |
| sos_kmem_vmm_free((sos_vaddr_t)preallocated_right_vr); |
| |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t |
| sos_umem_vmm_resize(struct sos_umem_vmm_as * as, |
| sos_uaddr_t old_uaddr, sos_size_t old_size, |
| sos_uaddr_t *new_uaddr, sos_size_t new_size, |
| sos_ui32_t flags) |
| { |
| sos_luoffset_t new_offset_in_resource; |
| sos_bool_t must_move_vr = FALSE; |
| struct sos_umem_vmm_vr *vr, *prev_vr, *next_vr; |
| |
| /* Make sure the new uaddr is valid */ |
| if (*new_uaddr < SOS_PAGING_BASE_USER_ADDRESS) |
| return -SOS_EINVAL; |
| if (*new_uaddr > SOS_PAGING_TOP_USER_ADDRESS - new_size) |
| return -SOS_EINVAL; |
| |
| old_uaddr = SOS_PAGE_ALIGN_INF(old_uaddr); |
| old_size = SOS_PAGE_ALIGN_SUP(old_size); |
| if (! SOS_IS_PAGE_ALIGNED(*new_uaddr)) |
| return -SOS_EINVAL; |
| if (new_size <= 0) |
| return -SOS_EINVAL; |
| new_size = SOS_PAGE_ALIGN_SUP(new_size); |
| |
| /* Lookup a VR overlapping the address range */ |
| vr = find_first_intersecting_vr(as, old_uaddr, old_size); |
| if (! vr) |
| return -SOS_EINVAL; |
| |
| /* Make sure there is exactly ONE VR overlapping the area */ |
| if ( (vr->start > old_uaddr) |
| || (vr->start + vr->size < old_uaddr + old_size) ) |
| return -SOS_EINVAL; |
| |
| /* Retrieve the prev/next VR if they exist (the VR are on circular |
| list) */ |
| prev_vr = vr->prev_in_as; |
| if (prev_vr->start >= vr->start) |
| prev_vr = NULL; |
| next_vr = vr->prev_in_as; |
| if (next_vr->start <= vr->start) |
| next_vr = NULL; |
| |
| /* |
| * Compute new offset inside the mapped resource, if any |
| */ |
| |
| /* Don't allow to resize if the uaddr goes beyond the 'offset 0' of |
| the resource */ |
| if ( (*new_uaddr < vr->start) |
| && (vr->start - *new_uaddr > vr->offset_in_resource) ) |
| return -SOS_EINVAL; |
| |
| /* Compute new offset in the resource (overflow-safe) */ |
| if (vr->start > *new_uaddr) |
| new_offset_in_resource |
| = vr->offset_in_resource |
| - (vr->start - *new_uaddr); |
| else |
| new_offset_in_resource |
| = vr->offset_in_resource |
| + (*new_uaddr - vr->start); |
| |
| /* If other VRs would be affected by this resizing, then the VR must |
| be moved */ |
| if (prev_vr && (prev_vr->start + prev_vr->size > *new_uaddr)) |
| must_move_vr |= TRUE; |
| if (next_vr && (next_vr->start < *new_uaddr + new_size)) |
| must_move_vr |= TRUE; |
| |
| /* If VR would be out-of-user-space, it must be moved */ |
| if (*new_uaddr < SOS_PAGING_BASE_USER_ADDRESS) |
| must_move_vr |= TRUE; |
| if (*new_uaddr > SOS_PAGING_TOP_USER_ADDRESS - new_size) |
| must_move_vr |= TRUE; |
| |
| /* The VR must be moved but the user forbids it */ |
| if ( must_move_vr && !(flags & SOS_VR_REMAP_MAYMOVE) ) |
| return -SOS_EINVAL; |
| |
| /* If the VR must be moved, we simply map the resource elsewhere and |
| unmap the current VR */ |
| if (must_move_vr) |
| { |
| sos_uaddr_t uaddr, result_uaddr; |
| sos_ret_t retval; |
| |
| result_uaddr = *new_uaddr; |
| retval = sos_umem_vmm_map(as, & result_uaddr, new_size, |
| vr->access_rights, |
| vr->flags | INTERNAL_MAP_CALLED_FROM_MREMAP, |
| vr->mapped_resource, |
| new_offset_in_resource); |
| if (SOS_OK != retval) |
| return retval; |
| |
| /* Remap the physical pages at their new address */ |
| for (uaddr = vr->start ; |
| uaddr < vr->start + vr->size ; |
| uaddr += SOS_PAGE_SIZE) |
| { |
| sos_paddr_t paddr; |
| sos_ui32_t prot; |
| sos_uaddr_t vaddr; |
| |
| if (uaddr < *new_uaddr) |
| continue; |
| if (uaddr > *new_uaddr + new_size) |
| continue; |
| |
| /* Compute destination virtual address (should be |
| overflow-safe) */ |
| if (vr->start >= *new_uaddr) |
| vaddr = result_uaddr |
| + (uaddr - vr->start) |
| + (vr->start - *new_uaddr); |
| else |
| vaddr = result_uaddr |
| + (uaddr - vr->start) |
| - (*new_uaddr - vr->start); |
| |
| paddr = sos_paging_get_paddr(uaddr); |
| if (! paddr) |
| /* No physical page mapped at this address yet */ |
| continue; |
| |
| prot = sos_paging_get_prot(uaddr); |
| SOS_ASSERT_FATAL(prot); |
| |
| /* Remap it at its destination address */ |
| retval = sos_paging_map(paddr, vaddr, TRUE, prot); |
| if (SOS_OK != retval) |
| { |
| sos_umem_vmm_unmap(as, result_uaddr, new_size); |
| return retval; |
| } |
| } |
| |
| retval = sos_umem_vmm_unmap(as, vr->start, vr->size); |
| if (SOS_OK != retval) |
| { |
| sos_umem_vmm_unmap(as, result_uaddr, new_size); |
| return retval; |
| } |
| |
| *new_uaddr = result_uaddr; |
| return retval; |
| } |
| |
| /* Otherwise we simply resize the VR, taking care of unmapping |
| what's been unmapped */ |
| |
| if (*new_uaddr + new_size < vr->start + vr->size) |
| sos_umem_vmm_unmap(as, *new_uaddr + new_size, |
| vr->start + vr->size - (*new_uaddr + new_size)); |
| else |
| { |
| as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED, |
| *new_uaddr + new_size |
| - (vr->start + vr->size), |
| 0, vr->access_rights); |
| vr->size += *new_uaddr + new_size - (vr->start + vr->size); |
| } |
| |
| if (*new_uaddr > vr->start) |
| sos_umem_vmm_unmap(as, vr->start, *new_uaddr - vr->start); |
| else |
| { |
| as_account_change_of_vr_protection(as, vr->flags & SOS_VR_MAP_SHARED, |
| vr->start - *new_uaddr, |
| 0, vr->access_rights); |
| vr->size += vr->start - *new_uaddr; |
| vr->start = *new_uaddr; |
| vr->offset_in_resource = new_offset_in_resource; |
| } |
| |
| SOS_ASSERT_FATAL(vr->start == *new_uaddr); |
| SOS_ASSERT_FATAL(vr->size == new_size); |
| SOS_ASSERT_FATAL(vr->offset_in_resource == new_offset_in_resource); |
| |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t sos_umem_vmm_try_resolve_page_fault(sos_uaddr_t uaddr, |
| sos_bool_t write_access, |
| sos_bool_t user_access) |
| { |
| struct sos_process *process = sos_thread_get_current()->process; |
| struct sos_umem_vmm_as *as; |
| struct sos_umem_vmm_vr *vr; |
| |
| if (! process) |
| return -SOS_EFAULT; |
| |
| as = sos_process_get_address_space(process); |
| if (! as) |
| return -SOS_EFAULT; |
| |
| vr = find_first_intersecting_vr(as, uaddr, 1); |
| if (! vr) |
| return -SOS_EFAULT; |
| |
| /* Write on a read-only VR */ |
| if (write_access && !(vr->access_rights & SOS_VM_MAP_PROT_WRITE)) |
| return -SOS_EFAULT; |
| |
| /* Write on a COW VR */ |
| if (write_access && !(vr->flags & SOS_VR_MAP_SHARED)) |
| { |
| if (SOS_OK == sos_paging_try_resolve_COW(uaddr)) |
| { |
| as->pgflt_cow ++; |
| return SOS_OK; |
| } |
| } |
| |
| /* Ask the underlying resource to resolve the page fault */ |
| if (SOS_OK != vr->ops->page_in(vr, uaddr, write_access)) |
| { |
| as->pgflt_invalid ++; |
| return -SOS_EFAULT; |
| } |
| |
| as->phys_total += SOS_PAGE_SIZE; |
| as->pgflt_page_in ++; |
| |
| /* For a private mapping, keep the mapping read-only */ |
| if (!(vr->flags & SOS_VR_MAP_SHARED)) |
| { |
| sos_paging_prepare_COW(SOS_PAGE_ALIGN_INF(uaddr), |
| SOS_PAGE_SIZE); |
| } |
| |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t |
| sos_umem_vmm_init_heap(struct sos_umem_vmm_as * as, |
| sos_uaddr_t heap_start) |
| { |
| SOS_ASSERT_FATAL(! as->heap_start); |
| |
| as->heap_start = heap_start; |
| as->heap_size = 0; |
| return SOS_OK; |
| } |
| |
| |
| sos_uaddr_t |
| sos_umem_vmm_brk(struct sos_umem_vmm_as * as, |
| sos_uaddr_t new_top_uaddr) |
| { |
| sos_uaddr_t new_start; |
| sos_size_t new_size; |
| SOS_ASSERT_FATAL(as->heap_start); |
| |
| if (! new_top_uaddr) |
| return as->heap_start + as->heap_size; |
| |
| if (new_top_uaddr == as->heap_start + as->heap_size) |
| return as->heap_start + as->heap_size; |
| |
| if (new_top_uaddr < as->heap_start) |
| return (sos_uaddr_t)NULL; |
| |
| new_top_uaddr = SOS_PAGE_ALIGN_SUP(new_top_uaddr); |
| new_start = as->heap_start; |
| new_size = new_top_uaddr - as->heap_start; |
| |
| /* First call to brk: we must map /dev/zero */ |
| if (! as->heap_size) |
| { |
| if (SOS_OK != sos_dev_zero_map(as, & as->heap_start, |
| new_size, |
| SOS_VM_MAP_PROT_READ |
| | SOS_VM_MAP_PROT_WRITE, |
| 0 /* private non-fixed */)) |
| return (sos_uaddr_t)NULL; |
| |
| as->heap_size = new_size; |
| return as->heap_start + as->heap_size; |
| } |
| |
| /* Otherwise we just have to unmap or resize the region */ |
| if (new_size <= 0) |
| { |
| if (SOS_OK != sos_umem_vmm_unmap(as, |
| as->heap_start, as->heap_size)) |
| return (sos_uaddr_t)NULL; |
| } |
| else |
| { |
| if (SOS_OK != sos_umem_vmm_resize(as, |
| as->heap_start, as->heap_size, |
| & new_start, new_size, |
| 0)) |
| return (sos_uaddr_t)NULL; |
| } |
| |
| SOS_ASSERT_FATAL(new_start == as->heap_start); |
| as->heap_size = new_size; |
| return new_top_uaddr; |
| } |
| |
| |
| static struct sos_umem_vmm_vr * |
| find_enclosing_or_next_vr(struct sos_umem_vmm_as * as, |
| sos_uaddr_t uaddr) |
| { |
| struct sos_umem_vmm_vr *vr; |
| int nb_vr; |
| |
| if (uaddr < SOS_PAGING_BASE_USER_ADDRESS) |
| return NULL; |
| if (uaddr > SOS_PAGING_TOP_USER_ADDRESS) |
| return NULL; |
| |
| list_foreach_named(as->list_vr, vr, nb_vr, prev_in_as, next_in_as) |
| { |
| /* Equivalent to "if (uaddr < vr->start + vr->size)" but more |
| robust (resilient to integer overflows) */ |
| if (uaddr <= vr->start + (vr->size - 1)) |
| return vr; |
| } |
| |
| return NULL; |
| } |
| |
| |
| static struct sos_umem_vmm_vr * |
| find_first_intersecting_vr(struct sos_umem_vmm_as * as, |
| sos_uaddr_t start_uaddr, sos_size_t size) |
| { |
| struct sos_umem_vmm_vr * vr; |
| vr = find_enclosing_or_next_vr(as, start_uaddr); |
| if (! vr) |
| return NULL; |
| |
| if (start_uaddr + size <= vr->start) |
| return NULL; |
| |
| return vr; |
| } |
| |
| |
| static sos_uaddr_t |
| find_first_free_interval(struct sos_umem_vmm_as * as, |
| sos_uaddr_t hint_uaddr, sos_size_t size) |
| { |
| struct sos_umem_vmm_vr * initial_vr, * vr; |
| |
| if (hint_uaddr < SOS_PAGING_BASE_USER_ADDRESS) |
| hint_uaddr = SOS_PAGING_BASE_USER_ADDRESS; |
| |
| if (hint_uaddr > SOS_PAGING_TOP_USER_ADDRESS - size + 1) |
| return (sos_uaddr_t)NULL; |
| |
| initial_vr = vr = find_enclosing_or_next_vr(as, hint_uaddr); |
| if (! vr) |
| /* Great, there is nothing after ! */ |
| return hint_uaddr; |
| |
| /* Scan the remaining VRs in the list */ |
| do |
| { |
| /* Is there enough space /before/ that VR ? */ |
| if (hint_uaddr + size <= vr->start) |
| /* Great ! */ |
| return hint_uaddr; |
| |
| /* Is there any VR /after/ this one, or do we have to wrap back |
| at the begining of the user space ? */ |
| if (vr->next_in_as->start >= hint_uaddr) |
| /* Ok, the next VR is really after us */ |
| hint_uaddr = vr->start + vr->size; |
| else |
| { |
| /* No: wrapping up */ |
| |
| /* Is there any space before the end of user space ? */ |
| if (hint_uaddr <= SOS_PAGING_TOP_USER_ADDRESS - size) |
| return hint_uaddr; |
| |
| hint_uaddr = SOS_PAGING_BASE_USER_ADDRESS; |
| } |
| |
| /* Prepare to look after this VR */ |
| vr = vr->next_in_as; |
| } |
| while (vr != initial_vr); |
| |
| /* Reached the end of the list and did not find anything ?... Look |
| at the space after the last VR */ |
| |
| return (sos_uaddr_t)NULL; |
| } |
| |
| |
| static void |
| as_account_change_of_vr_protection(struct sos_umem_vmm_as * as, |
| sos_bool_t is_shared, |
| sos_size_t size, |
| sos_ui32_t prev_access_rights, |
| sos_ui32_t new_access_rights) |
| { |
| if (prev_access_rights == new_access_rights) |
| return; |
| |
| #define _UPDATE_VMSTAT(field,is_increment) \ |
| ({ if (is_increment > 0) \ |
| as->field += size; \ |
| else \ |
| { SOS_ASSERT_FATAL(as->field >= size); as->field -= size; } }) |
| #define UPDATE_VMSTAT(field,is_increment) \ |
| ({ if (is_shared) _UPDATE_VMSTAT(vm_shrd.field, is_increment); \ |
| _UPDATE_VMSTAT(vm_total.field, is_increment); \ |
| SOS_ASSERT_FATAL(as->vm_total.field >= as->vm_shrd.field); }) |
| |
| if ( (new_access_rights & SOS_VM_MAP_PROT_WRITE) |
| && !(prev_access_rights & SOS_VM_MAP_PROT_WRITE)) |
| { |
| UPDATE_VMSTAT(rw, +1); |
| if (prev_access_rights & SOS_VM_MAP_PROT_READ) |
| UPDATE_VMSTAT(ro, -1); |
| } |
| else if ( !(new_access_rights & SOS_VM_MAP_PROT_WRITE) |
| && (prev_access_rights & SOS_VM_MAP_PROT_WRITE)) |
| { |
| if (new_access_rights & SOS_VM_MAP_PROT_READ) |
| UPDATE_VMSTAT(ro, +1); |
| UPDATE_VMSTAT(rw, -1); |
| } |
| else if (new_access_rights & SOS_VM_MAP_PROT_READ) |
| UPDATE_VMSTAT(ro, +1); |
| else if (!(new_access_rights & SOS_VM_MAP_PROT_READ)) |
| UPDATE_VMSTAT(ro, -1); |
| |
| if ( (new_access_rights & SOS_VM_MAP_PROT_EXEC) |
| && !(prev_access_rights & SOS_VM_MAP_PROT_EXEC)) |
| { |
| UPDATE_VMSTAT(code, +1); |
| } |
| else if ( !(new_access_rights & SOS_VM_MAP_PROT_EXEC) |
| && (prev_access_rights & SOS_VM_MAP_PROT_EXEC)) |
| { |
| UPDATE_VMSTAT(code, -1); |
| } |
| |
| if (new_access_rights && !prev_access_rights) |
| UPDATE_VMSTAT(overall, +1); |
| else if (!new_access_rights && prev_access_rights) |
| UPDATE_VMSTAT(overall, -1); |
| |
| } |
| |
/tmp/sos-code-article7/sos/umem_vmm.h (1970-01-01 01:00:00.000000000 +0100
) |
|
../sos-code-article7.5/sos/umem_vmm.h (2005-04-27 20:17:18.000000000 +0200
) |
|
|
|
| /* Copyright (C) 2005 David Decotigny |
| |
| This program is free software; you can redistribute it and/or |
| modify it under the terms of the GNU General Public License |
| as published by the Free Software Foundation; either version 2 |
| of the License, or (at your option) any later version. |
| |
| This program is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with this program; if not, write to the Free Software |
| Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, |
| USA. |
| */ |
| #ifndef _SOS_UMEM_VMM_H_ |
| #define _SOS_UMEM_VMM_H_ |
| |
| /** |
| * @file umem_vmm.h |
| * |
| * Management of the address space of a process in SOS. The so-called |
| * "address space" of a process consists in the description of the |
| * virtual addresses that are valid in the user space of a process (in |
| * SOS: addresses 1G-4G). The kernel-space of a process is managed by |
| * the "kmem" subsystem, and is kept identical accross all the |
| * processes in the system. |
| * |
| * The umem_vmm subsystem handles the following features: |
| * - demand-mapping of resourcs (files: mmap): mapping in physical RAM |
| * will be delayed as much as possible, until the process really |
| * need to access the mapped addresses |
| * - mprotect/mremap support |
| * - private and shared mappings |
| * - Copy-On-Write (COW) of the private mappings upon fork() to favour |
| * shared physical memory as much as possible |
| * - "heap" management (brk/sbrk) |
| * |
| * Swap is NOT supported (yet), which means that the following is NOT |
| * supported: |
| * - locked/reserved I/O pages (everything is locked in RAM) |
| * - "safe" demand-mapping of anonymous pages, ie conservative VMM |
| * allocation (alloc of anonymous pages on the swap) |
| * Other unsupported features: |
| * - dynamically-resizable regions (Linux's GROWUP/GROWDOWN vma): the |
| * user stack is expected to have a suitable virtual size from the |
| * beginning, or sos_umem_vmm_resize() must be used explicitely to |
| * resize it |
| * - no provision of "stack size" accounting, since there are |
| * multiple stacks (ie user threads) in a process: which stack to |
| * consider ??? |
| * |
| * The address space is divided into "virtual regions" (aka "VR") that |
| * describe a single mapping, aka a segment of contiguous pages in |
| * user-space virtual memory. Each such virtual region "maps" a |
| * "resource" and is characterised by: |
| * - its base address and length in user-space |
| * - the allowed accesses, aka "protection" (read-only or read/write) |
| * - the resource it maps in virtual memory |
| * |
| * A so-called resource is typically: |
| * - a file |
| * - a device |
| * - an area initially full of zeros (the VR mapping this are called |
| * "anonymous mappings") |
| * |
| * The implementation is very close to that of Linux and Kos. This is |
| * a "simple" implementation, not the most elegant one, such as those |
| * based on "shadow objects" hierarchies as found in BSD 4.4 and Mach, |
| * or that of Solaris (based on the "anon" lists). Actually, this |
| * implementation does not use "shadow-objects"/anon list when a COW |
| * page of a shared mapping is made anonymous. This won't hurt the |
| * implementation of the basic demand-mapping mechanism; on the |
| * contrary, it will make things simpler. But this will largely impact |
| * the implementation of the swap-in/swap-out strategies, as these |
| * would require a non trivial intrication of low-level and higher |
| * level algorithms. |
| */ |
| |
| |
| /** |
| * Definition of an "address space" in Kos. This is an opaque |
| * structure defined in umem_vmm.c. Its main role is to list virtual |
| * regions. It mainly consists in: |
| * - a reference to the process owning it |
| * - maximum allowed protection (ie can it be mapped read-only or |
| * read/write ?) |
| * - the list of VRs mapping resources |
| * - a mm_context that reflects the configuration of the MMU |
| * - the location of the heap for this process |
| * - statistics |
| */ |
| struct sos_umem_vmm_as; |
| |
| |
| /** |
| * Definition of a "virtual region". Linux would call them "vma" |
| * (Virtual Memory Area), and Solaris: "segments". It mainly consists |
| * in: |
| * - the start/end addresses of the mapping |
| * - a pointer to the resource that it maps |
| * - the type of mapping (shared/private) |
| * - the actual protection flags (@see SOS_VM_MAP_PROT_* flags in |
| * hwcore/paging.h) |
| * - a set of callbacks (@see sos_umem_vmm_vr_ops below) automatically |
| * called by the umem_vmm subsystem each time the VR is modified |
| */ |
| struct sos_umem_vmm_vr; |
| |
| |
| /** VR flag: region can be shared between a process and its |
| children */ |
| #define SOS_VR_MAP_SHARED (1 << 0) |
| |
| |
| #include <sos/types.h> |
| #include <sos/process.h> |
| |
| |
| /** |
| * The callbacks applicable on a virtual region. Automatically called |
| * by the umem_vmm subsystem. |
| * |
| * Calling sequences: |
| * - duplicate_as() (aka fork()): |
| * vr->ops->ref() |
| * add vr to lists |
| * - delete_as() (aka exit()): |
| * vr->ops->unmap() |
| * remove vr from lists |
| * vr->ops->unref() |
| * - mmap(): |
| * -> left + new + right VRs can fusion: |
| * remove right_vr from list |
| * right_vr->ops->unref() |
| * -> left + new VRs can fusion: |
| * nothing |
| * -> new + right VRs can fusion: |
| * nothing |
| * -> isolated: |
| * add new_vr to lists |
| * new_vr->map() |
| * new_vr->ops->ref() |
| * - munmap(): |
| * -> VR totally unmapped: |
| * vr->ops->unmap() |
| * remove vr from lists |
| * vr->ops->unref() |
| * -> VR unmapped in the middle (split into 2): |
| * add (new) right VR into the lists |
| * vr->unmap(middle_unmapped_area) |
| * right_vr->ops->ref() |
| * -> VR unmapped on its left: |
| * vr->ops->unmap(left_unmapped_area) |
| * -> VR unmapped on its right: |
| * vr->ops->unmap(right_unmapped_area) |
| * - chprot(): |
| * -> VR totally chprot: |
| * nothing |
| * -> VR chprot in the middle (split into 3): |
| * add (new) middle+right VRs into the lists |
| * middle_vr->ops->ref() |
| * right_vr->ops->ref() |
| * -> VR chprot on its left (split into 2): |
| * add (new) right VR into the lists |
| * right_vr->ops->ref() |
| * -> VR chprot on its right (split into 2): |
| * add (new) right VR into the lists |
| * right_vr->ops->ref() |
| * - resize(): |
| * -> if moving the VR: map/unmap |
| * -> otherwise: nothing |
| */ |
| struct sos_umem_vmm_vr_ops |
| { |
| /** |
| * Called after the virtual region has been inserted |
| * inside its address space. |
| * @note Optional |
| */ |
| void (*ref)(struct sos_umem_vmm_vr * vr); |
| |
| /** |
| * Called when the virtual region is removed from its |
| * address space |
| * @note Optional |
| */ |
| void (*unref)(struct sos_umem_vmm_vr * vr); |
| |
| /** |
| * Called when part or all a VR is unmapped |
| * @note Optional |
| */ |
| void (*unmap)(struct sos_umem_vmm_vr * vr, |
| sos_uaddr_t uaddr, sos_size_t size); |
| |
| /** |
| * Called by the page fault handler to map data at the given virtual |
| * address. In the Linux kernel, this callback is named "nopage". |
| * |
| * @note MANDATORY |
| */ |
| sos_ret_t (*page_in)(struct sos_umem_vmm_vr * vr, |
| sos_uaddr_t uaddr, |
| sos_bool_t write_access); |
| }; |
| |
| |
| /** |
| * The definition of a mapped resource. Typically, a mapped resource |
| * is a file or a device: in both cases, only part of the resource is |
| * mapped by each VR, this part is given by the offset_in_resource |
| * field of the VR, and the size field of the VR. |
| */ |
| struct sos_umem_vmm_mapped_resource |
| { |
| /** Represent the maximum authrized SOS_VR_PROT_* for the VRs mapping |
| it */ |
| sos_ui32_t allowed_access_rights; |
| |
| /** Some flags associated with the resource. Currently only |
| SOS_MAPPED_RESOURCE_ANONYMOUS is supported */ |
| sos_ui32_t flags; |
| |
| /** List of VRs mapping this resource */ |
| struct sos_umem_vmm_vr * list_vr; |
| |
| /** |
| * MANDATORY Callback function called when a new VR is created, |
| * which maps the resource. This callback is allowed to change the |
| * following fields of the VR: |
| * - sos_umem_vmm_set_ops_of_vr() |
| */ |
| sos_ret_t (*mmap)(struct sos_umem_vmm_vr *); |
| |
| /** |
| * Custom data that the user is free to define: the umem_vmm |
| * subsystem won't ever look at it or change it |
| */ |
| void *custom_data; |
| }; |
| |
| |
| /** Inidicate that this resource is not backed by any physical |
| storage. This means that the "offset_in_resource" field of the |
| VRs will be computed by sos_umem_vmm_map() */ |
| #define SOS_MAPPED_RESOURCE_ANONYMOUS (1 << 0) |
| |
| |
| /** |
| * Physical address of THE page (full of 0s) used for anonymous |
| * mappings. Anybody can map it provided it is ALWAYS in read-only |
| * mode |
| */ |
| extern sos_paddr_t sos_zero_page; |
| |
| |
| /** |
| * Setup the umem_vmm subsystem. |
| */ |
| sos_ret_t sos_umem_vmm_subsystem_setup(); |
| |
| |
| /** |
| * Create a new, empty, address space |
| * |
| * @param owner The process that will own the new address space |
| * |
| * @note no need to call |
| * sos_thread_prepare_user_space_access()/sos_thread_end_user_space_access() |
| */ |
| struct sos_umem_vmm_as * |
| sos_umem_vmm_create_empty_as(struct sos_process *owner); |
| |
| |
| /** |
| * Create a new address space, copy of the current thread's address |
| * space. All the translations belonging to private mappings are |
| * marked 'read-only' to activate the "copy-on-write" semantics. |
| * |
| * @param owner The process that will hold the new address space |
| * |
| * @note automatically calls |
| * sos_thread_prepare_user_space_access()/sos_thread_end_user_space_access() |
| */ |
| struct sos_umem_vmm_as * |
| sos_umem_vmm_duplicate_current_thread_as(struct sos_process *owner); |
| |
| |
| /** |
| * Called at process deletion time, to remove all mappings present in |
| * the address space. This function not only delete all the VR data |
| * structures, it also calls the unmap()/unref() callbacks of these |
| * VRs. However, the physical pages mapped inside the address space |
| * won't be unmapped at this stage: they will be unmapped all in one |
| * go when the undelying mm_context will become unused. |
| * |
| * @note no need to call |
| * sos_thread_prepare_user_space_access()/sos_thread_end_user_space_access() |
| */ |
| sos_ret_t |
| sos_umem_vmm_delete_as(struct sos_umem_vmm_as * as); |
| |
| |
| /* |
| * Accessor functions for the address space |
| */ |
| |
| /** Retrieve the pointer (NOT a new reference !) to the process owning |
| the given address space. */ |
| struct sos_process * |
| sos_umem_vmm_get_process(struct sos_umem_vmm_as * as); |
| |
| /** Retrieve the pointer (NOT a new reference !) to the MMU |
| configuration for the given address space */ |
| struct sos_mm_context * |
| sos_umem_vmm_get_mm_context(struct sos_umem_vmm_as * as); |
| |
| /** Retrieve a pointer to the VR that covers the given virtual address |
| in the given address space */ |
| struct sos_umem_vmm_vr * |
| sos_umem_vmm_get_vr_at_address(struct sos_umem_vmm_as * as, |
| sos_uaddr_t uaddr); |
| |
| |
| /* |
| * Accessor functions for the virtual regions |
| */ |
| |
| /** Retrieve the address space owning the given VR */ |
| struct sos_umem_vmm_as * |
| sos_umem_vmm_get_as_of_vr(struct sos_umem_vmm_vr * vr); |
| |
| /** Retrieve the set of callbacks of the given VR */ |
| struct sos_umem_vmm_vr_ops * |
| sos_umem_vmm_get_ops_of_vr(struct sos_umem_vmm_vr * vr); |
| |
| /** Retrieve the current protection of the given VR */ |
| sos_ui32_t sos_umem_vmm_get_prot_of_vr(struct sos_umem_vmm_vr * vr); |
| |
| /** Retrieve the flags of the given VR. One will especially be |
| interested in the SOS_VR_MAP_SHARED bit */ |
| sos_ui32_t sos_umem_vmm_get_flags_of_vr(struct sos_umem_vmm_vr * vr); |
| |
| /** Retrieve the resource mapped by the VR */ |
| struct sos_umem_vmm_mapped_resource * |
| sos_umem_vmm_get_mapped_resource_of_vr(struct sos_umem_vmm_vr * vr); |
| |
| /** Retrieve the start user address for the given mapping */ |
| sos_uaddr_t sos_umem_vmm_get_start_of_vr(struct sos_umem_vmm_vr * vr); |
| |
| /** Retrieve the size (in user space) of the given mapping */ |
| sos_size_t sos_umem_vmm_get_size_of_vr(struct sos_umem_vmm_vr * vr); |
| |
| /** Retrieve the offset in the resource of the mapping */ |
| sos_luoffset_t |
| sos_umem_vmm_get_offset_in_resource(struct sos_umem_vmm_vr * vr); |
| |
| |
| /* |
| * Restricted accessor functions. May only be called from inside the |
| * map() callback of a VR |
| */ |
| |
| /** |
| * Function that is not called directly by the umem_subsystem: It MUST |
| * always be called by the mmap() callback of the resource being |
| * mapped (@see sos_umem_vmm_mapped_resource::mmap()). The mmap() |
| * method is called at VR creation time, automatically by |
| * sos_umem_vmm_map(). |
| * |
| * @note The VR MUST NOT already have a set of operations (fatal error) |
| */ |
| sos_ret_t sos_umem_vmm_set_ops_of_vr(struct sos_umem_vmm_vr * vr, |
| struct sos_umem_vmm_vr_ops * ops); |
| |
| |
| /* |
| * mmap API |
| */ |
| |
| |
| /** sos_umem_vmm_map() flag: the address given as parameter to |
| sos_umem_vmm_map() is not only a hint, it is where the VR is |
| expected to be mapped */ |
| #define SOS_VR_MAP_FIXED (1 << 31) |
| |
| |
| /** |
| * Add a new VR in the given address space, that maps the given |
| * resource. Its semantics follows that of the UNIX mmap() call |
| * (including SOS_VR_MAP_FIXED). Real mapping in physical memory will |
| * be delayed as much as possible (demand paging) and the physical |
| * pages will be shared among processes as much as possible (COW). |
| * |
| * @param *uaddr must be page-aligned, and can be NULL. It stores the |
| * address of the mapping, when successful |
| * |
| * @param size The size of the mapping in user space |
| * |
| * @param access_rights The allowed accesses to the mapped resource |
| * (@see SOS_VM_MAP_PROT_* flags in hwcore/paging.h) |
| * |
| * @param flags mainly: is it shared mapping (SOS_VR_MAP_SHARED) or not ? |
| * |
| * @param resource MUST be NON NULL, and its mmap() method must also |
| * be NON NULL |
| * |
| * @param offset_in_resource where inside the resource does the |
| * mapping start |
| * |
| * @return SOS_OK on success (address of the mapping stored in uaddr) |
| * |
| * @note no need to call |
| * sos_thread_prepare_user_space_access()/sos_thread_end_user_space_access() |
| */ |
| sos_ret_t |
| sos_umem_vmm_map(struct sos_umem_vmm_as * as, |
| sos_uaddr_t *uaddr, sos_size_t size, |
| sos_ui32_t access_rights, |
| sos_ui32_t flags, |
| struct sos_umem_vmm_mapped_resource * resource, |
| sos_luoffset_t offset_in_resource); |
| |
| |
| /** |
| * Unmap the given address interval. This might imply the partial or |
| * complete unmapping of 0, 1 or more VRs. Same semantics as unix |
| * munmap() |
| * |
| * @note automatically calls |
| * sos_thread_prepare_user_space_access()/sos_thread_end_user_space_access() |
| */ |
| sos_ret_t |
| sos_umem_vmm_unmap(struct sos_umem_vmm_as * as, |
| sos_uaddr_t uaddr, sos_size_t size); |
| |
| |
| /** |
| * Change the access rights of the given address interval. This might |
| * concern 0, 1 or more VRs, and result in the splitting in 1 or 2 VRs |
| * if they are partially concerned by the change in protection.. Same |
| * semantics as unix mprotect() |
| * |
| * @param new_access_rights @see SOS_VM_MAP_PROT_* flags in hwcore/paging.h |
| * |
| * @note MAKE SURE YOU CALL |
| * sos_thread_prepare_user_space_access()/sos_thread_end_user_space_access() |
| */ |
| sos_ret_t |
| sos_umem_vmm_chprot(struct sos_umem_vmm_as * as, |
| sos_uaddr_t uaddr, sos_size_t size, |
| sos_ui32_t new_access_rights); |
| |
| |
| |
| /** |
| * Flag for sos_umem_vmm_resize() to indicate that the VR being |
| * resized can be moved elsewhere if there is not enough room to |
| * resize it in-place |
| */ |
| #define SOS_VR_REMAP_MAYMOVE (1 << 30) |
| |
| /** |
| * Lookup the region covering the old_uaddr/old_size interval, and |
| * resize it to match the *new_uaddr/new_size requirements. This is a |
| * variant of Unix's mremap() that allow to resize the VR by its |
| * low-addresses (mremap only allows to resize a VR by its |
| * top-address). |
| * |
| * @param old_uaddr Low address of the interval covered by the VR to resize |
| * |
| * @param old_size Size of the interval covered by the VR to resize |
| * |
| * @param new_uaddr MUST BE page-aligned ! Initially: the new start |
| * address of the VR, allowing to change the low-address. Once the |
| * function returns: the actual start address of the VR (which might |
| * be different, due to SOS_VR_REMAP_MAYMOVE flag, when set) |
| * |
| * @param new_size The size requested for the VR. Might be |
| * smaller/larger than the original VR size |
| * |
| * @param flags Essentially: 0 or SOS_VR_REMAP_MAYMOVE |
| * |
| * @note MAKE SURE YOU CALL |
| * sos_thread_prepare_user_space_access()/sos_thread_end_user_space_access() |
| */ |
| sos_ret_t |
| sos_umem_vmm_resize(struct sos_umem_vmm_as * as, |
| sos_uaddr_t old_uaddr, sos_size_t old_size, |
| sos_uaddr_t /* in/out */*new_uaddr, sos_size_t new_size, |
| sos_ui32_t flags); |
| |
| |
| /* |
| * Heap management API (ie libc's malloc support) |
| */ |
| |
| /** |
| * Change the top address of the heap. |
| * |
| * @param new_top_uaddr When NULL don't change anything. Otherwise: |
| * change the top address of the heap |
| * |
| * @return The top address of the heap after having been updated (if |
| * ever) |
| */ |
| sos_uaddr_t |
| sos_umem_vmm_brk(struct sos_umem_vmm_as * as, |
| sos_uaddr_t new_top_uaddr); |
| |
| |
| /* |
| * Reserved functions |
| */ |
| |
| /** |
| * Called by the main page fault handler when a physical page is not |
| * mapped for the given address of the current address space. This |
| * function is called only if: |
| * - The access (read / write) is allowed on this VR |
| * - no physical page is mapped yet |
| * This function first calls the sos_paging_try_resolve_COW() to |
| * resolve the COW if a COW access pattern is detected, and, if |
| * unsuccessful, the sos_umem_vmm_vr_ops::page_in() method of the VR. |
| * |
| * @param uaddr The address that was accessed, causing the fault. |
| * |
| * @param write_access Was it write access ? |
| * |
| * @param user_access Was it a user access ? Or a kernel access (by |
| * uaccess.h functions) ? |
| * |
| * @return SOS_OK when the fault could be solved, ie a page could be |
| * mapped for the given address. -SOS_EFAULT otherwise, meaning the |
| * faulting thread should be terminated or signalled (SIGSEGV) |
| * |
| * @note: The current mm_context MUST be that of the current thread |
| * (which caused the exception) ! |
| */ |
| sos_ret_t sos_umem_vmm_try_resolve_page_fault(sos_uaddr_t uaddr, |
| sos_bool_t write_access, |
| sos_bool_t user_access); |
| |
| |
| |
| /** |
| * Initialize the initial heap once the program code/data is mapped |
| * Called by the ELF32 program loader. |
| */ |
| sos_ret_t |
| sos_umem_vmm_init_heap(struct sos_umem_vmm_as * as, |
| sos_uaddr_t heap_start); |
| |
| #endif /* _SOS_UMEM_VMM_H_ */ |
| |
/tmp/sos-code-article7/sos.map (2005-04-27 20:08:31.000000000 +0200
) |
|
../sos-code-article7.5/sos.map (1970-01-01 01:00:00.000000000 +0100
) |
|
|
|
0020fda4 a.0 | |
002269e0 active_queue | |
0020a993 _add_action | |
0020ae62 add_in_ready_queue | |
0020dc46 AffectMovement | |
00226c14 arg_b.2 | |
00226c24 arg_c.3 | |
00226c34 arg_d.4 | |
00226c44 arg_e.5 | |
00226c54 arg_R.6 | |
00226c64 arg_S.7 | |
00209be9 backtracer.0 | |
00214000 _begin_userprog1 | |
00217000 _begin_userprog2 | |
0021a000 _begin_userprog3 | |
0021d000 _begin_userprog4 | |
00220000 _begin_userprog5 | |
00223000 _begin_userprog6 | |
0020e204 bindto_user_mm_context | |
00201000 __b_kernel | |
00200000 __b_load | |
0020fbf8 bootstrap_stack_bottom | |
0020fbfc bootstrap_stack_size | |
00206657 cache_add_slab | |
0020679a cache_grow | |
002064f8 cache_initialize | |
00226998 cache_of_struct_kslab | |
00226994 cache_of_struct_kslab_cache | |
00206895 cache_release_slab | |
00226d18 cache_struct_mm_context | |
00226f20 cache_struct_process | |
002269c4 cache_thread | |
0020e80a calcload_routine | |
00226cfc calcload_timeout | |
0020984a _change_waitq_priorities | |
00226c78 CheeseCount | |
0020d97a ChoosePosition | |
0020c2bc clk_it | |
00226c0c clock_count.0 | |
0020441a core_routine | |
00206a45 create_cache_of_caches | |
00206b6b create_cache_of_ranges | |
0020d17f CreateMap | |
0020dfcd CreateMouse | |
002056d0 create_range | |
00226f24 current_load_entry | |
00226980 current_mm_context | |
002269bc current_thread | |
0020942c delete_thread | |
00226c10 demand_paging_count.1 | |
0020c48e demo_thread | |
0020c238 display_bits | |
0020f570 __divdi3 | |
00226680 double_fault_alternate_stack | |
0020d259 DrawMap | |
0020cffc DrawPixel | |
0022b000 __e_kernel | |
0020d4b5 ElementInit | |
00225df8 __e_load | |
00216dc1 _end_userprog1 | |
00219d69 _end_userprog2 | |
0021cdd9 _end_userprog3 | |
0021fdd9 _end_userprog4 | |
00222d31 _end_userprog5 | |
00225da1 _end_userprog6 | |
0020db51 EvaluatePositions | |
002269e4 expired_queue | |
0020550f find_suitable_free_range | |
0020759a free_object | |
002269a4 free_ppage | |
0020fc00 gdt | |
002054ac get_closest_preceding_kmem_range | |
002053bd get_current_mm_context | |
002083c0 get_page_descr_at_paddr | |
0020ca6a idle_thread | |
00225e00 idt | |
0020d37b InitMapInput | |
0020d418 InitMapOutput | |
0020556c insert_range | |
0020ddf1 IsCollision | |
00226900 kernel_tss | |
0020fce0 kmalloc_cache | |
00226988 kmem_free_range_list | |
00226990 kmem_range_cache | |
0022698c kmem_used_range_list | |
0022699c kslab_cache_list | |
0020a252 _kwaitq_add_entry | |
0020a512 _kwaitq_remove_entry | |
002269d4 last_tick_time | |
00226984 list_mm_context | |
00226ce0 load_15mn | |
00226ca8 load_1mn | |
00226cc4 load_5mn | |
0020f0f2 load_elf_prog | |
00205613 lookup_range | |
0020f092 lookup_userprog | |
00201011 loop | |
00208481 memcmp | |
0020841c memcpy | |
00208455 memset | |
0020f700 __moddi3 | |
0020d697 Mouse | |
00226c74 MouseCount | |
0020df8b MouseCreator | |
0020d7a7 MouseMove | |
0020d032 MouseSim | |
0020fdb0 MouseSpeed | |
0020ffe0 msg_double_fault_not_supported | |
002100c0 msg_nested_level_overflow | |
00201000 multiboot_entry | |
00200000 multiboot_header | |
0020e26a nocheck_user_memcpy | |
0020e3fa nocheck_user_strzcpy | |
002269a8 nonfree_ppage | |
00226c7c ObstacleCount | |
00202ec4 paging_setup_map_helper | |
0020c2f9 pgflt_ex | |
002269a0 physical_page_descr_array | |
002269ac physmem_base | |
002269b8 physmem_nonfree_pages | |
002269b0 physmem_top | |
002269b4 physmem_total_pages | |
00226c80 pMap | |
0020936a _prepare_mm_context | |
00226c08 process_list | |
0020fda8 q.1 | |
0020fdac r.2 | |
002086a1 random | |
0020fda0 _random_seed | |
00226c9c recorded_loads | |
0020e59c _reinit_load_subsystem | |
0020abf6 _remove_action | |
00204e6c resume_pc | |
00226a00 sched_queue | |
00226c84 SemMap | |
00226c90 SemMouse | |
00209f99 _set_current | |
00210a80 sizeof_struct_sos_kmem_range | |
002099b0 sleep_timeout | |
00208e08 snprintf | |
00204d33 sos_backtrace | |
002042cf sos_bochs_hexdump | |
00204391 sos_bochs_printf | |
002040b6 sos_bochs_putchar | |
002040f7 sos_bochs_puthex | |
002040cf sos_bochs_putstring | |
002040ac sos_bochs_setup | |
00204862 sos_cpu_context_dump | |
00204e6d sos_cpu_context_exit_to | |
002049dc sos_cpu_context_get_EX_faulting_vaddr | |
00204997 sos_cpu_context_get_EX_info | |
002047a1 sos_cpu_context_get_PC | |
002047e6 sos_cpu_context_get_SP | |
00204612 sos_cpu_context_is_in_user_mode | |
0020484f sos_cpu_context_set_EX_return_address | |
002043dc sos_cpu_context_subsystem_setup | |
00204e20 sos_cpu_context_switch | |
00204dfc sos_cpu_context_update_kernel_tss | |
00204472 sos_cpu_kstate_init | |
002046bd sos_cpu_state_detect_kernel_stack_overflow | |
00204686 sos_cpu_state_prepare_detect_kernel_stack_overflow | |
0020455a sos_cpu_ustate_init | |
00208f95 sos_create_kernel_thread | |
00209173 sos_create_user_thread | |
0020c1a4 sos_display_fatal_error | |
0020bf2c sos_do_syscall | |
00201530 sos_exception_get_name | |
002014f2 sos_exception_get_routine | |
00226600 sos_exception_handler_array | |
00201454 sos_exception_set_routine | |
002013e7 sos_exception_subsystem_setup | |
00201560 sos_exception_wrapper_0 | |
002015cc sos_exception_wrapper_1 | |
00201fec sos_exception_wrapper_10 | |
00202058 sos_exception_wrapper_11 | |
002020c4 sos_exception_wrapper_12 | |
00202130 sos_exception_wrapper_13 | |
0020219c sos_exception_wrapper_14 | |
0020192c sos_exception_wrapper_15 | |
00201998 sos_exception_wrapper_16 | |
00202208 sos_exception_wrapper_17 | |
00201a04 sos_exception_wrapper_18 | |
00201a70 sos_exception_wrapper_19 | |
00201638 sos_exception_wrapper_2 | |
00201adc sos_exception_wrapper_20 | |
00201b48 sos_exception_wrapper_21 | |
00201bb4 sos_exception_wrapper_22 | |
00201c20 sos_exception_wrapper_23 | |
00201c8c sos_exception_wrapper_24 | |
00201cf8 sos_exception_wrapper_25 | |
00201d64 sos_exception_wrapper_26 | |
00201dd0 sos_exception_wrapper_27 | |
00201e3c sos_exception_wrapper_28 | |
00201ea8 sos_exception_wrapper_29 | |
002016a4 sos_exception_wrapper_3 | |
00201f14 sos_exception_wrapper_30 | |
00201f80 sos_exception_wrapper_31 | |
00201710 sos_exception_wrapper_4 | |
0020177c sos_exception_wrapper_5 | |
002017e8 sos_exception_wrapper_6 | |
00201854 sos_exception_wrapper_7 | |
00202274 sos_exception_wrapper_8 | |
002018c0 sos_exception_wrapper_9 | |
00210040 sos_exception_wrapper_array | |
00201217 sos_gdt_register_kernel_tss | |
002011e4 sos_gdt_subsystem_setup | |
0020134c sos_generic_ex | |
00203e2c sos_i8254_set_frequency | |
00202e75 sos_i8259_disable_irq_line | |
00202e24 sos_i8259_enable_irq_line | |
00202dd4 sos_i8259_subsystem_setup | |
00201172 sos_idt_get_handler | |
002010b7 sos_idt_set_handler | |
00201014 sos_idt_subsystem_setup | |
002023a7 sos_irq_get_nested_level | |
00202378 sos_irq_get_routine | |
00226880 sos_irq_handler_array | |
00226d14 sos_irq_nested_level_counter | |
002022a7 sos_irq_set_routine | |
00202290 sos_irq_subsystem_setup | |
002023b4 sos_irq_wrapper_0 | |
00202454 sos_irq_wrapper_1 | |
002029fc sos_irq_wrapper_10 | |
00202aa0 sos_irq_wrapper_11 | |
00202b44 sos_irq_wrapper_12 | |
00202be8 sos_irq_wrapper_13 | |
00202c8c sos_irq_wrapper_14 | |
00202d30 sos_irq_wrapper_15 | |
002024f4 sos_irq_wrapper_2 | |
00202594 sos_irq_wrapper_3 | |
00202634 sos_irq_wrapper_4 | |
002026d4 sos_irq_wrapper_5 | |
00202774 sos_irq_wrapper_6 | |
00202814 sos_irq_wrapper_7 | |
002028b4 sos_irq_wrapper_8 | |
00202958 sos_irq_wrapper_9 | |
00210100 sos_irq_wrapper_array | |
002079d5 sos_kfree | |
00207915 sos_kmalloc | |
00207854 sos_kmalloc_subsystem_setup | |
002071f9 sos_kmem_cache_alloc | |
00207050 sos_kmem_cache_create | |
0020715a sos_kmem_cache_destroy | |
00207474 sos_kmem_cache_free | |
002074c7 sos_kmem_cache_release_struct_range | |
00207034 sos_kmem_cache_subsystem_setup_commit | |
00206c81 sos_kmem_cache_subsystem_setup_prepare | |
002063e8 sos_kmem_vmm_alloc | |
00205ebf sos_kmem_vmm_del_range | |
00206427 sos_kmem_vmm_free | |
002064d6 sos_kmem_vmm_is_valid_vaddr | |
00205c5f sos_kmem_vmm_new_range | |
002064a5 sos_kmem_vmm_resolve_slab | |
0020647b sos_kmem_vmm_set_slab | |
00205913 sos_kmem_vmm_subsystem_setup | |
0020b759 sos_kmutex_dispose | |
0020b72c sos_kmutex_init | |
0020b76f sos_kmutex_lock | |
0020b7d8 sos_kmutex_trylock | |
0020b813 sos_kmutex_unlock | |
0020b660 sos_ksema_dispose | |
0020b676 sos_ksema_down | |
0020b634 sos_ksema_init | |
0020b6c1 sos_ksema_trydown | |
0020b6f2 sos_ksema_up | |
0020a0b6 sos_kwaitq_add_entry | |
0020a213 sos_kwaitq_change_priority | |
0020a02e sos_kwaitq_dispose | |
00209ff4 sos_kwaitq_init | |
0020a07f sos_kwaitq_init_entry | |
0020a05b sos_kwaitq_is_empty | |
0020a0f3 sos_kwaitq_remove_entry | |
0020a11c sos_kwaitq_wait | |
0020a194 sos_kwaitq_wakeup | |
0020e8db sos_load_do_timer_tick | |
0020ea78 sos_load_get_sload | |
0020ebde sos_load_get_sratio | |
0020e9ea sos_load_get_uload | |
0020eb5f sos_load_get_uratio | |
0020e630 sos_load_subsystem_setup | |
0020e91d sos_load_to_string | |
0020ccf5 sos_main | |
0020e2c7 sos_memcpy_from_user | |
0020e31e sos_memcpy_to_user | |
00204fc0 sos_mm_context_create | |
0020526b sos_mm_context_ref | |
00204ea0 sos_mm_context_subsystem_setup | |
002052c1 sos_mm_context_switch_to | |
00205406 sos_mm_context_synch_kernel_PDE | |
00205106 sos_mm_context_unref | |
00203d6d sos_paging_copy_kernel_space | |
00203b49 sos_paging_dispose | |
00203a78 sos_paging_get_current_PD_paddr | |
002039ba sos_paging_get_paddr | |
002037a7 sos_paging_get_prot | |
002031ca sos_paging_map | |
00203a8f sos_paging_set_current_PD_paddr | |
0020386d sos_paging_set_prot | |
0020394e sos_paging_set_prot_of_interval | |
00203077 sos_paging_subsystem_setup | |
002034cb sos_paging_unmap | |
0020372f sos_paging_unmap_interval | |
00208278 sos_physmem_dec_physpage_occupation | |
0020832d sos_physmem_get_kmem_range | |
002081cf sos_physmem_get_physpage_refcount | |
00208396 sos_physmem_get_state | |
00208200 sos_physmem_inc_physpage_occupation | |
00207ea0 sos_physmem_ref_physpage_at | |
00207cdd sos_physmem_ref_physpage_new | |
0020835e sos_physmem_set_kmem_range | |
00207a08 sos_physmem_subsystem_setup | |
0020803b sos_physmem_unref_physpage | |
0020ba55 sos_process_create_empty | |
0020b884 sos_process_dumplist | |
0020bb88 sos_process_get_mm_context | |
0020bb6b sos_process_get_nb_threads | |
0020bb4f sos_process_ref | |
0020bb93 sos_process_register_thread | |
0020bf02 sos_process_set_name | |
0020ba00 sos_process_subsystem_setup | |
0020bc90 sos_process_unref | |
0020bdd5 sos_process_unregister_thread | |
0020b284 sos_reschedule | |
0020b0bd sos_sched_change_priority | |
0020b4a6 sos_sched_do_timer_tick | |
0020b008 sos_sched_set_ready | |
0020ad98 sos_sched_subsystem_setup | |
0020e4fe sos_strndup_from_user | |
0020e375 sos_strnlen_from_user | |
0020e458 sos_strzcpy_from_user | |
0020e4ab sos_strzcpy_to_user | |
002012ac sos_swintr_subsystem_setup | |
00204a18 sos_syscall_get1arg | |
00204a40 sos_syscall_get2args | |
002049ed sos_syscall_get3args | |
00204a68 sos_syscall_get4args | |
00204ae7 sos_syscall_get5args | |
00204b6e sos_syscall_get6args | |
00204bfd sos_syscall_get7args | |
00204c94 sos_syscall_get8args | |
002012e0 sos_syscall_wrapper | |
00209dd8 sos_thread_change_current_mm_context | |
00209d58 sos_thread_dump_backtrace | |
00209582 sos_thread_exit | |
00209b80 sos_thread_force_unblock | |
00208e3c sos_thread_get_current | |
0020967e sos_thread_get_priority | |
00209698 sos_thread_get_state | |
00209ec2 sos_thread_prepare_exception_switch_back | |
00209ee2 sos_thread_prepare_irq_servicing | |
00209ef3 sos_thread_prepare_irq_switch_back | |
00209ea2 sos_thread_prepare_syscall_switch_back | |
002098e8 sos_thread_set_priority | |
00209a0d sos_thread_sleep | |
00208e87 sos_thread_subsystem_setup | |
0020998d sos_thread_yield | |
0020a81b sos_time_cmp | |
0020a6de sos_time_dec | |
0020ad22 sos_time_do_tick | |
0020a962 sos_time_get_now | |
0020a900 sos_time_get_tick_resolution | |
0020a66c sos_time_inc | |
0020a885 sos_time_is_zero | |
0020abb7 sos_time_register_action_absolute | |
0020ab78 sos_time_register_action_relative | |
0020a931 sos_time_set_tick_resolution | |
0020a8af sos_time_subsysem_setup | |
0020acfe sos_time_unregister_action | |
0020fc40 sos_x86_exnames | |
00203ec8 sos_x86_videomem_cls | |
00204038 sos_x86_videomem_printf | |
00203fb6 sos_x86_videomem_putchar | |
00203f17 sos_x86_videomem_putstring | |
00203ea8 sos_x86_videomem_setup | |
0020eda8 spawn_program | |
002086eb srandom | |
00227000 stack | |
00201000 start | |
00201000 _start | |
0020caa7 stat_thread | |
002085f7 strcmp | |
002084e1 strlen | |
0020863c strncmp | |
00208509 strnlen | |
002085a4 strzcat | |
0020853b strzcpy | |
002096b1 _switch_to_next_thread | |
0020ec60 test_art7 | |
0020c82b test_thread | |
0020b209 thread_expired_its_quantuum | |
002269c0 thread_list | |
002269cc tick_resolution | |
002269c8 timeout_action_list | |
00226d20 time_slice | |
0020f910 __udivdi3 | |
0020fa40 __umoddi3 | |
0020e6f6 update_sliding_window | |
00225dac _userprog1_entry | |
00225db8 _userprog2_entry | |
00225dc4 _userprog3_entry | |
00225dd0 _userprog4_entry | |
00225ddc _userprog5_entry | |
00225de8 _userprog6_entry | |
00225dac _userprogs_table | |
0020fcc0 video | |
002086f8 vsnprintf | |