/tmp/sos-code-article6.75/hwcore/cpu_context.c (2005-01-04 04:13:52.000000000 +0100
) |
|
../sos-code-article7/hwcore/cpu_context.c (2005-02-05 17:52:19.000000000 +0100
) |
|
|
|
/* Copyright (C) 2000-2004, The KOS team | /* Copyright (C) 2005 David Decotigny |
Copyright (C) 1999 Free Software Foundation, Inc. | Copyright (C) 2000-2004, The KOS team |
This program is free software; you can redistribute it and/or | This program is free software; you can redistribute it and/or |
modify it under the terms of the GNU General Public License | modify it under the terms of the GNU General Public License |
|
|
#include <drivers/bochs.h> | #include <drivers/bochs.h> |
#include <drivers/x86_videomem.h> | #include <drivers/x86_videomem.h> |
#include <hwcore/segment.h> | #include <hwcore/segment.h> |
| #include <hwcore/gdt.h> |
| #include <sos/uaccess.h> |
| |
#include "cpu_context.h" | #include "cpu_context.h" |
| |
|
|
* the registers are stored on the stack in | * the registers are stored on the stack in |
* irq_wrappers.S/exception_wrappers.S !!! Hence the constraint above. | * irq_wrappers.S/exception_wrappers.S !!! Hence the constraint above. |
*/ | */ |
struct sos_cpu_kstate { | struct sos_cpu_state { |
| |
/* These are SOS convention */ | /* These are SOS convention */ |
|
|
sos_ui16_t fs; | sos_ui16_t fs; |
sos_ui16_t es; | sos_ui16_t es; |
sos_ui16_t ds; | sos_ui16_t ds; |
sos_ui16_t ss; | sos_ui16_t cpl0_ss; /* This is ALWAYS the Stack Segment of the |
| Kernel context (CPL0) of the interrupted |
| thread, even for a user thread */ |
sos_ui32_t eax; | sos_ui32_t eax; |
sos_ui32_t ebx; | sos_ui32_t ebx; |
|
|
/* MUST NEVER CHANGE (dependent on the IA32 iret instruction) */ | /* MUST NEVER CHANGE (dependent on the IA32 iret instruction) */ |
sos_ui32_t error_code; | sos_ui32_t error_code; |
sos_vaddr_t eip; | sos_vaddr_t eip; |
sos_ui32_t cs; | sos_ui32_t cs; /* 32bits according to the specs ! However, the CS |
| register is really 16bits long */ |
| |
/* (Higher addresses) */ | /* (Higher addresses) */ |
} __attribute__((packed)); | } __attribute__((packed)); |
| |
| |
| /** |
| * The CS value pushed on the stack by the CPU upon interrupt, and |
| * needed by the iret instruction, is 32bits long while the real CPU |
| * CS register is 16bits only: this macro simply retrieves the CPU |
| * "CS" register value from the CS value pushed on the stack by the |
| * CPU upon interrupt. |
| * |
| * The remaining 16bits pushed by the CPU should be considered |
| * "reserved" and architecture dependent. IMHO, the specs don't say |
| * anything about them. Considering that some architectures generate |
| * non-zero values for these 16bits (at least Cyrix), we'd better |
| * ignore them. |
| */ |
| #define GET_CPU_CS_REGISTER_VALUE(pushed_ui32_cs_value) \ |
| ( (pushed_ui32_cs_value) & 0xffff ) |
| |
| |
| /** |
| * Structure of an interrupted Kernel thread's context |
| */ |
| struct sos_cpu_kstate |
| { |
| struct sos_cpu_state regs; |
| } __attribute__((packed)); |
| |
| |
| /** |
| * Structure of an interrupted User thread's context. This is almost |
| * the same as a kernel context, except that 2 additional values are |
| * pushed on the stack before the eflags/cs/eip of the interrupted |
| * context: the stack configuration of the interrupted user context. |
| * |
| * @see Section 6.4.1 of Intel x86 vol 1 |
| */ |
| struct sos_cpu_ustate |
| { |
| struct sos_cpu_state regs; |
| struct |
| { |
| sos_ui32_t cpl3_esp; |
| sos_ui16_t cpl3_ss; |
| }; |
| } __attribute__((packed)); |
| |
| |
| /* |
| * Structure of a Task State Segment on the x86 Architecture. |
| * |
| * @see Intel x86 spec vol 3, figure 6-2 |
| * |
| * @note Such a data structure should not cross any page boundary (see |
| * end of section 6.2.1 of Intel spec vol 3). This is the reason why |
| * we tell gcc to align it on a 128B boundary (its size is 104B, which |
| * is <= 128). |
| */ |
| struct x86_tss { |
| |
| /** |
| * Intel provides a way for a task to switch to another in an |
| * automatic way (call gates). In this case, the back_link field |
| * stores the source TSS of the context switch. This allows to |
| * easily implement coroutines, task backtracking, ... In SOS we |
| * don't use TSS for the context switch purpouse, so we always |
| * ignore this field. |
| * (+0) |
| */ |
| sos_ui16_t back_link; |
| |
| sos_ui16_t reserved1; |
| |
| /* CPL0 saved context. (+4) */ |
| sos_vaddr_t esp0; |
| sos_ui16_t ss0; |
| |
| sos_ui16_t reserved2; |
| |
| /* CPL1 saved context. (+12) */ |
| sos_vaddr_t esp1; |
| sos_ui16_t ss1; |
| |
| sos_ui16_t reserved3; |
| |
| /* CPL2 saved context. (+20) */ |
| sos_vaddr_t esp2; |
| sos_ui16_t ss2; |
| |
| sos_ui16_t reserved4; |
| |
| /* Interrupted context's saved registers. (+28) */ |
| sos_vaddr_t cr3; |
| sos_vaddr_t eip; |
| sos_ui32_t eflags; |
| sos_ui32_t eax; |
| sos_ui32_t ecx; |
| sos_ui32_t edx; |
| sos_ui32_t ebx; |
| sos_ui32_t esp; |
| sos_ui32_t ebp; |
| sos_ui32_t esi; |
| sos_ui32_t edi; |
| |
| /* +72 */ |
| sos_ui16_t es; |
| sos_ui16_t reserved5; |
| |
| /* +76 */ |
| sos_ui16_t cs; |
| sos_ui16_t reserved6; |
| |
| /* +80 */ |
| sos_ui16_t ss; |
| sos_ui16_t reserved7; |
| |
| /* +84 */ |
| sos_ui16_t ds; |
| sos_ui16_t reserved8; |
| |
| /* +88 */ |
| sos_ui16_t fs; |
| sos_ui16_t reserved9; |
| |
| /* +92 */ |
| sos_ui16_t gs; |
| sos_ui16_t reserved10; |
| |
| /* +96 */ |
| sos_ui16_t ldtr; |
| sos_ui16_t reserved11; |
| |
| /* +100 */ |
| sos_ui16_t debug_trap_flag :1; |
| sos_ui16_t reserved12 :15; |
| sos_ui16_t iomap_base_addr; |
| |
| /* 104 */ |
| } __attribute__((packed, aligned(128))); |
| |
| |
| static struct x86_tss kernel_tss; |
| |
| |
| sos_ret_t sos_cpu_context_subsystem_setup() |
| { |
| /* Reset the kernel TSS */ |
| memset(&kernel_tss, 0x0, sizeof(kernel_tss)); |
| |
| /** |
| * Now setup the kernel TSS. |
| * |
| * Considering the privilege change method we choose (cpl3 -> cpl0 |
| * through a software interrupt), we don't need to initialize a |
| * full-fledged TSS. See section 6.4.1 of Intel x86 vol 1. Actually, |
| * only a correct value for the kernel esp and ss are required (aka |
| * "ss0" and "esp0" fields). Since the esp0 will have to be updated |
| * at privilege change time, we don't have to set it up now. |
| */ |
| kernel_tss.ss0 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); |
| |
| /* Register this TSS into the gdt */ |
| sos_gdt_register_kernel_tss((sos_vaddr_t) &kernel_tss); |
| |
| return SOS_OK; |
| } |
| |
| |
| /** |
| * THE main operation of a kernel thread. This routine calls the |
| * kernel thread function start_func and calls exit_func when |
| * start_func returns. |
| */ |
static void core_routine (sos_cpu_kstate_function_arg1_t *start_func, | static void core_routine (sos_cpu_kstate_function_arg1_t *start_func, |
sos_ui32_t start_arg, | sos_ui32_t start_arg, |
sos_cpu_kstate_function_arg1_t *exit_func, | sos_cpu_kstate_function_arg1_t *exit_func, |
|
|
} | } |
| |
| |
sos_ret_t sos_cpu_kstate_init(struct sos_cpu_kstate **ctxt, | sos_ret_t sos_cpu_kstate_init(struct sos_cpu_state **ctxt, |
sos_ui32_t start_arg, | sos_ui32_t start_arg, |
sos_vaddr_t stack_bottom, | sos_vaddr_t stack_bottom, |
|
|
sos_cpu_kstate_function_arg1_t *exit_func, | sos_cpu_kstate_function_arg1_t *exit_func, |
sos_ui32_t exit_arg) | sos_ui32_t exit_arg) |
{ | { |
| /* We are initializing a Kernel thread's context */ |
| struct sos_cpu_kstate *kctxt; |
| |
/* This is a critical internal function, so that it is assumed that | /* This is a critical internal function, so that it is assumed that |
the caller knows what he does: we legitimally assume that values | the caller knows what he does: we legitimally assume that values |
for ctxt, start_func, stack_* and exit_func are allways VALID ! */ | for ctxt, start_func, stack_* and exit_func are allways VALID ! */ |
|
|
sos_ui32_t *stack = (sos_ui32_t*)tmp_vaddr; | sos_ui32_t *stack = (sos_ui32_t*)tmp_vaddr; |
| |
/* If needed, poison the stack */ | /* If needed, poison the stack */ |
#ifdef SOS_CPU_KSTATE_DETECT_UNINIT_VARS | #ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS |
memset((void*)stack_bottom, SOS_CPU_KSTATE_STACK_POISON, stack_size); | memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, stack_size); |
#elif defined(SOS_CPU_KSTATE_DETECT_STACK_OVERFLOW) | #elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) |
sos_cpu_kstate_prepare_detect_stack_overflow(stack_bottom, stack_size); | sos_cpu_state_prepare_detect_kernel_stack_overflow(stack_bottom, stack_size); |
| |
/* Simulate a call to the core_routine() function: prepare its | /* Simulate a call to the core_routine() function: prepare its |
|
|
/* Compute the base address of the structure, which must be located | /* Compute the base address of the structure, which must be located |
below the previous elements */ | below the previous elements */ |
tmp_vaddr = ((sos_vaddr_t)stack) - sizeof(struct sos_cpu_kstate); | tmp_vaddr = ((sos_vaddr_t)stack) - sizeof(struct sos_cpu_kstate); |
*ctxt = (struct sos_cpu_kstate*)tmp_vaddr; | kctxt = (struct sos_cpu_kstate*)tmp_vaddr; |
/* Initialize the CPU context structure */ | /* Initialize the CPU context structure */ |
memset(*ctxt, 0x0, sizeof(struct sos_cpu_kstate)); | memset(kctxt, 0x0, sizeof(struct sos_cpu_kstate)); |
/* Tell the CPU context structure that the first instruction to | /* Tell the CPU context structure that the first instruction to |
execute will be that of the core_routine() function */ | execute will be that of the core_routine() function */ |
(*ctxt)->eip = (sos_ui32_t)core_routine; | kctxt->regs.eip = (sos_ui32_t)core_routine; |
| |
| /* Setup the segment registers */ |
| kctxt->regs.cs |
| = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE); /* Code */ |
| kctxt->regs.ds |
| = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Data */ |
| kctxt->regs.es |
| = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Data */ |
| kctxt->regs.cpl0_ss |
| = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Stack */ |
| /* fs and gs unused for the moment. */ |
| |
| /* The newly created context is initially interruptible */ |
| kctxt->regs.eflags = (1 << 9); /* set IF bit */ |
| |
| /* Finally, update the generic kernel/user thread context */ |
| *ctxt = (struct sos_cpu_state*) kctxt; |
| |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t sos_cpu_ustate_init(struct sos_cpu_state **ctxt, |
| sos_uaddr_t user_start_PC, |
| sos_ui32_t user_start_arg, |
| sos_uaddr_t user_initial_SP, |
| sos_vaddr_t kernel_stack_bottom, |
| sos_size_t kernel_stack_size) |
| { |
| /* We are initializing a User thread's context */ |
| struct sos_cpu_ustate *uctxt; |
| |
| /* This is a critical internal function, so that it is assumed that |
| the caller knows what he does: we legitimally assume that values |
| for ctxt, etc. are allways VALID ! */ |
| |
| /* Compute the address of the CPU state to restore on CPU when |
| switching to this new user thread */ |
| sos_vaddr_t uctxt_vaddr = kernel_stack_bottom |
| + kernel_stack_size |
| - sizeof(struct sos_cpu_ustate); |
| uctxt = (struct sos_cpu_ustate*)uctxt_vaddr; |
| |
| /* If needed, poison the kernel stack */ |
| #ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS |
| memset((void*)kernel_stack_bottom, |
| SOS_CPU_STATE_STACK_POISON, |
| kernel_stack_size); |
| #elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) |
| sos_cpu_state_prepare_detect_kernel_stack_overflow(kernel_stack_bottom, |
| kernel_stack_size); |
| #endif |
| |
| /* |
| * Setup the initial context structure, so that the CPU will restore |
| * the initial registers' value for the user thread. The |
| * user thread argument is passed in the ax register. |
| */ |
| |
| /* Initialize the CPU context structure */ |
| memset(uctxt, 0x0, sizeof(struct sos_cpu_ustate)); |
| |
| /* Tell the CPU context structure that the first instruction to |
| execute will be located at user_start_PC (in user space) */ |
| uctxt->regs.eip = (sos_ui32_t)user_start_PC; |
| |
| /* The parameter to the start function is not passed by the stack to |
| avoid a possible page fault */ |
| uctxt->regs.eax = user_start_arg; |
| |
| /* Tell the CPU where will be the user stack */ |
| uctxt->cpl3_esp = user_initial_SP; |
/* Setup the segment registers */ | /* Setup the segment registers */ |
(*ctxt)->cs = SOS_BUILD_SEGMENT_REG_VALUE(0, 0, SOS_SEG_KCODE); /* Code */ | uctxt->regs.cs |
(*ctxt)->ds = SOS_BUILD_SEGMENT_REG_VALUE(0, 0, SOS_SEG_KDATA); /* Data */ | = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE); /* Code */ |
(*ctxt)->es = SOS_BUILD_SEGMENT_REG_VALUE(0, 0, SOS_SEG_KDATA); /* Data */ | uctxt->regs.ds |
(*ctxt)->ss = SOS_BUILD_SEGMENT_REG_VALUE(0, 0, SOS_SEG_KDATA); /* Stack */ | = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA); /* Data */ |
| uctxt->regs.es |
| = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA); /* Data */ |
| uctxt->cpl3_ss |
| = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA); /* User Stack */ |
| |
| /* We need also to update the segment for the kernel stack |
| segment. It will be used when this context will be restored on |
| CPU: initially it will be executing in kernel mode and will |
| switch immediatly to user mode */ |
| uctxt->regs.cpl0_ss |
| = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Kernel Stack */ |
| |
| |
/* The newly created context is initially interruptible */ | /* The newly created context is initially interruptible */ |
(*ctxt)->eflags = (1 << 9); /* set IF bit */ | uctxt->regs.eflags = (1 << 9); /* set IF bit */ |
| |
| /* Finally, update the generic kernel/user thread context */ |
| *ctxt = (struct sos_cpu_state*) uctxt; |
return SOS_OK; | return SOS_OK; |
} | } |
| |
| |
#if defined(SOS_CPU_KSTATE_DETECT_STACK_OVERFLOW) | inline sos_ret_t |
| sos_cpu_context_is_in_user_mode(const struct sos_cpu_state *ctxt) |
| { |
| /* An interrupted user thread has its CS register set to that of the |
| User code segment */ |
| switch (GET_CPU_CS_REGISTER_VALUE(ctxt->cs)) |
| { |
| case SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE): |
| return TRUE; |
| break; |
| |
| case SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE): |
| return FALSE; |
| break; |
| |
| default: |
| SOS_FATAL_ERROR("Invalid saved context Code segment register: 0x%x (k=%x, u=%x) !", |
| (unsigned) GET_CPU_CS_REGISTER_VALUE(ctxt->cs), |
| SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE), |
| SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE)); |
| break; |
| } |
| |
| /* Should never get here */ |
| return -SOS_EFATAL; |
| } |
| |
| |
| #if defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) |
sos_cpu_kstate_prepare_detect_stack_overflow(const struct sos_cpu_kstate *ctxt, | sos_cpu_state_prepare_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt, |
sos_vaddr_t stack_bottom, | sos_vaddr_t stack_bottom, |
sos_size_t stack_size) | sos_size_t stack_size) |
sos_size_t poison_size = SOS_CPU_KSTATE_DETECT_STACK_OVERFLOW; | sos_size_t poison_size = SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW; |
poison_size = stack_size; | poison_size = stack_size; |
| |
memset((void*)stack_bottom, SOS_CPU_KSTATE_STACK_POISON, poison_size); | memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, poison_size); |
| |
| |
void | void |
sos_cpu_kstate_detect_stack_overflow(const struct sos_cpu_kstate *ctxt, | sos_cpu_state_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt, |
sos_vaddr_t stack_bottom, | sos_vaddr_t stack_bottom, |
sos_size_t stack_size) | sos_size_t stack_size) |
unsigned char *c; | unsigned char *c; |
int i; | int i; |
| |
| /* On SOS, "ctxt" corresponds to the address of the esp register of |
| the saved context in Kernel mode (always, even for the interrupted |
| context of a user thread). Here we make sure that this stack |
| pointer is within the allowed stack area */ |
SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) >= stack_bottom); | SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) >= stack_bottom); |
SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) + sizeof(struct sos_cpu_kstate) | SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) + sizeof(struct sos_cpu_kstate) |
<= stack_bottom + stack_size); | <= stack_bottom + stack_size); |
| |
| /* Check that the bottom of the stack has not been altered */ |
for (c = (unsigned char*) stack_bottom, i = 0 ; | for (c = (unsigned char*) stack_bottom, i = 0 ; |
(i < SOS_CPU_KSTATE_DETECT_STACK_OVERFLOW) && (i < stack_size) ; | (i < SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) && (i < stack_size) ; |
{ | { |
SOS_ASSERT_FATAL(SOS_CPU_KSTATE_STACK_POISON == *c); | SOS_ASSERT_FATAL(SOS_CPU_STATE_STACK_POISON == *c); |
} | } |
#endif | #endif |
| |
| |
sos_vaddr_t sos_cpu_kstate_get_PC(const struct sos_cpu_kstate *ctxt) | /* ======================================================================= |
| * Public Accessor functions |
| */ |
| |
| |
| sos_vaddr_t sos_cpu_context_get_PC(const struct sos_cpu_state *ctxt) |
SOS_ASSERT_FATAL(NULL != ctxt); | SOS_ASSERT_FATAL(NULL != ctxt); |
| |
| /* This is the PC of the interrupted context (ie kernel or user |
| context). */ |
return ctxt->eip; | return ctxt->eip; |
} | } |
| |
| |
sos_vaddr_t sos_cpu_kstate_get_SP(const struct sos_cpu_kstate *ctxt) | sos_vaddr_t sos_cpu_context_get_SP(const struct sos_cpu_state *ctxt) |
SOS_ASSERT_FATAL(NULL != ctxt); | SOS_ASSERT_FATAL(NULL != ctxt); |
| |
| /* 'ctxt' corresponds to the SP of the interrupted context, in Kernel |
| mode. We have to test whether the original interrupted context |
| was that of a kernel or user thread */ |
| if (TRUE == sos_cpu_context_is_in_user_mode(ctxt)) |
| { |
| struct sos_cpu_ustate * uctxt = (struct sos_cpu_ustate*)ctxt; |
| return uctxt->cpl3_esp; |
| } |
| |
| /* On SOS, "ctxt" corresponds to the address of the esp register of |
| the saved context in Kernel mode (always, even for the interrupted |
| context of a user thread). */ |
return (sos_vaddr_t)ctxt; | return (sos_vaddr_t)ctxt; |
} | } |
| |
| |
void sos_cpu_kstate_dump(const struct sos_cpu_kstate *ctxt) | void sos_cpu_context_dump(const struct sos_cpu_state *ctxt) |
char buf[128]; | char buf[128]; |
snprintf(buf, sizeof(buf), | snprintf(buf, sizeof(buf), |
"CPU: eip=%x esp=%x eflags=%x cs=%x ds=%x ss=%x err=%x", | "CPU: eip=%x esp=%x eflags=%x cs=%x ds=%x ss=%x err=%x", |
(unsigned)ctxt->eip, (unsigned)ctxt, (unsigned)ctxt->eflags, | (unsigned)ctxt->eip, (unsigned)ctxt, (unsigned)ctxt->eflags, |
(unsigned)ctxt->cs, (unsigned)ctxt->ds, (unsigned)ctxt->ss, | (unsigned)GET_CPU_CS_REGISTER_VALUE(ctxt->cs), (unsigned)ctxt->ds, |
| (unsigned)ctxt->cpl0_ss, |
sos_bochs_putstring(buf); sos_bochs_putstring("\n"); | sos_bochs_putstring(buf); sos_bochs_putstring("\n"); |
sos_x86_videomem_putstring(23, 0, | sos_x86_videomem_putstring(23, 0, |
|
|
} | } |
| |
| |
sos_ui32_t sos_cpu_kstate_get_EX_info(const struct sos_cpu_kstate *ctxt) | /* ======================================================================= |
| * Public Accessor functions TO BE USED ONLY BY Exception handlers |
| */ |
| |
| |
| sos_ui32_t sos_cpu_context_get_EX_info(const struct sos_cpu_state *ctxt) |
SOS_ASSERT_FATAL(NULL != ctxt); | SOS_ASSERT_FATAL(NULL != ctxt); |
return ctxt->error_code; | return ctxt->error_code; |
|
|
| |
| |
sos_vaddr_t | sos_vaddr_t |
sos_cpu_kstate_get_EX_faulting_vaddr(const struct sos_cpu_kstate *ctxt) | sos_cpu_context_get_EX_faulting_vaddr(const struct sos_cpu_state *ctxt) |
sos_ui32_t cr2; | sos_ui32_t cr2; |
| |
/* See Intel Vol 3 (section 5.14): the address of the faulting | /* |
virtual address of a page fault is stored in the cr2 register */ | * See Intel Vol 3 (section 5.14): the address of the faulting |
| * virtual address of a page fault is stored in the cr2 |
| * register. |
| * |
| * Actually, we do not store the cr2 register in a saved |
| * kernel thread's context. So we retrieve the cr2's value directly |
| * from the processor. The value we retrieve in an exception handler |
| * is actually the correct one because an exception is synchronous |
| * with the code causing the fault, and cannot be interrupted since |
| * the IDT entries in SOS are "interrupt gates" (ie IRQ are |
| * disabled). |
| */ |
:"=r"(cr2) | :"=r"(cr2) |
: ); | : ); |
|
|
} | } |
| |
| |
sos_ui32_t sos_backtrace(const struct sos_cpu_kstate *cpu_kstate, | /* ======================================================================= |
| * Public Accessor functions TO BE USED ONLY BY the SYSCALL handler |
| */ |
| |
| |
| /* |
| * By convention, the USER SOS programs always pass 4 arguments to the |
| * kernel syscall handler: in eax/../edx. For less arguments, the |
| * unused registers are filled with 0s. For more arguments, the 4th |
| * syscall parameter gives the address of the array containing the |
| * remaining arguments. In any case, eax corresponds to the syscall |
| * IDentifier. |
| */ |
| |
| |
| inline |
| sos_ret_t sos_syscall_get3args(const struct sos_cpu_state *user_ctxt, |
| /* out */unsigned int *arg1, |
| /* out */unsigned int *arg2, |
| /* out */unsigned int *arg3) |
| { |
| *arg1 = user_ctxt->ebx; |
| *arg2 = user_ctxt->ecx; |
| *arg3 = user_ctxt->edx; |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t sos_syscall_get1arg(const struct sos_cpu_state *user_ctxt, |
| /* out */unsigned int *arg1) |
| { |
| unsigned int unused; |
| return sos_syscall_get3args(user_ctxt, arg1, & unused, & unused); |
| } |
| |
| |
| sos_ret_t sos_syscall_get2args(const struct sos_cpu_state *user_ctxt, |
| /* out */unsigned int *arg1, |
| /* out */unsigned int *arg2) |
| { |
| unsigned int unused; |
| return sos_syscall_get3args(user_ctxt, arg1, arg2, & unused); |
| } |
| |
| |
| /* |
| * sos_syscall_get3args() is defined in cpu_context.c because it needs |
| * to know the structure of a struct spu_state |
| */ |
| |
| sos_ret_t sos_syscall_get4args(const struct sos_cpu_state *user_ctxt, |
| /* out */unsigned int *arg1, |
| /* out */unsigned int *arg2, |
| /* out */unsigned int *arg3, |
| /* out */unsigned int *arg4) |
| { |
| sos_uaddr_t uaddr_other_args; |
| unsigned int other_args[2]; |
| sos_ret_t retval; |
| |
| /* Retrieve the 3 arguments. The last one is an array containing the |
| remaining arguments */ |
| retval = sos_syscall_get3args(user_ctxt, arg1, arg2, |
| (unsigned int *)& uaddr_other_args); |
| if (SOS_OK != retval) |
| return retval; |
| |
| /* Copy the array containing the remaining arguments from user |
| space */ |
| retval = sos_copy_from_user((sos_vaddr_t)other_args, |
| (sos_uaddr_t)uaddr_other_args, |
| sizeof(other_args)); |
| if (SOS_OK != retval) |
| return retval; |
| |
| *arg3 = other_args[0]; |
| *arg4 = other_args[1]; |
| return retval; |
| } |
| |
| |
| sos_ret_t sos_syscall_get5args(const struct sos_cpu_state *user_ctxt, |
| /* out */unsigned int *arg1, |
| /* out */unsigned int *arg2, |
| /* out */unsigned int *arg3, |
| /* out */unsigned int *arg4, |
| /* out */unsigned int *arg5) |
| { |
| sos_uaddr_t uaddr_other_args; |
| unsigned int other_args[3]; |
| sos_ret_t retval; |
| |
| /* Retrieve the 3 arguments. The last one is an array containing the |
| remaining arguments */ |
| retval = sos_syscall_get3args(user_ctxt, arg1, arg2, |
| (unsigned int *)& uaddr_other_args); |
| if (SOS_OK != retval) |
| return retval; |
| |
| /* Copy the array containing the remaining arguments from user |
| space */ |
| retval = sos_copy_from_user((sos_vaddr_t)other_args, |
| (sos_uaddr_t)uaddr_other_args, |
| sizeof(other_args)); |
| if (SOS_OK != retval) |
| return retval; |
| |
| *arg3 = other_args[0]; |
| *arg4 = other_args[1]; |
| *arg5 = other_args[2]; |
| return retval; |
| } |
| |
| |
| sos_ret_t sos_syscall_get6args(const struct sos_cpu_state *user_ctxt, |
| /* out */unsigned int *arg1, |
| /* out */unsigned int *arg2, |
| /* out */unsigned int *arg3, |
| /* out */unsigned int *arg4, |
| /* out */unsigned int *arg5, |
| /* out */unsigned int *arg6) |
| { |
| sos_uaddr_t uaddr_other_args; |
| unsigned int other_args[4]; |
| sos_ret_t retval; |
| |
| /* Retrieve the 3 arguments. The last one is an array containing the |
| remaining arguments */ |
| retval = sos_syscall_get3args(user_ctxt, arg1, arg2, |
| (unsigned int *)& uaddr_other_args); |
| if (SOS_OK != retval) |
| return retval; |
| |
| /* Copy the array containing the remaining arguments from user |
| space */ |
| retval = sos_copy_from_user((sos_vaddr_t)other_args, |
| (sos_uaddr_t)uaddr_other_args, |
| sizeof(other_args)); |
| if (SOS_OK != retval) |
| return retval; |
| |
| *arg3 = other_args[0]; |
| *arg4 = other_args[1]; |
| *arg5 = other_args[2]; |
| *arg6 = other_args[3]; |
| return retval; |
| } |
| |
| |
| /* ======================================================================= |
| * Backtrace facility. To be used for DEBUGging purpose ONLY. |
| */ |
| |
| |
| sos_ui32_t sos_backtrace(const struct sos_cpu_state *cpu_state, |
sos_vaddr_t stack_bottom, | sos_vaddr_t stack_bottom, |
sos_size_t stack_size, | sos_size_t stack_size, |
|
|
int depth; | int depth; |
sos_vaddr_t callee_PC, caller_frame; | sos_vaddr_t callee_PC, caller_frame; |
| |
| /* Cannot backtrace an interrupted user thread ! */ |
| if ((NULL != cpu_state) |
| && |
| (TRUE == sos_cpu_context_is_in_user_mode(cpu_state))) |
| { |
| return 0; |
| } |
| |
/* | /* |
* Layout of a frame on the x86 (compiler=gcc): | * Layout of a frame on the x86 (compiler=gcc): |
* | * |
|
|
* function will return -SOS_ENOSUP. | * function will return -SOS_ENOSUP. |
*/ | */ |
| |
if (cpu_kstate) | if (cpu_state) |
callee_PC = cpu_kstate->eip; | callee_PC = cpu_state->eip; |
caller_frame = cpu_kstate->ebp; | caller_frame = cpu_state->ebp; |
else | else |
{ | { |
|
|
| |
return depth; | return depth; |
} | } |
| |
| |
| /* ************************************************************* |
| * Function to manage the TSS. This function is not really "public": |
| * it is reserved to the assembler routines defined in |
| * cpu_context_switch.S |
| * |
| * Update the kernel stack address so that the IRQ, syscalls and |
| * exception return in a correct stack location when coming back into |
| * kernel mode. |
| */ |
| void |
| sos_cpu_context_update_kernel_tss(struct sos_cpu_state *next_ctxt) |
| { |
| /* next_ctxt corresponds to an interrupted user thread ? */ |
| if (sos_cpu_context_is_in_user_mode(next_ctxt)) |
| { |
| /* |
| * Yes: "next_ctxt" is an interrupted user thread => we are |
| * going to switch to user mode ! Setup the stack address so |
| * that the user thread "next_ctxt" can come back to the correct |
| * stack location when returning in kernel mode. |
| * |
| * This stack location corresponds to the SP of the next user |
| * thread once its context has been transferred on the CPU, ie |
| * once the CPU has executed all the pop/iret instruction of the |
| * context switch with privilege change. |
| */ |
| kernel_tss.esp0 = ((sos_vaddr_t)next_ctxt) |
| + sizeof(struct sos_cpu_ustate); |
| /* Note: no need to protect this agains IRQ because IRQs are not |
| allowed to update it by themselves, and they are not allowed |
| to block */ |
| } |
| else |
| { |
| /* No: No need to update kernel TSS when we stay in kernel |
| mode */ |
| } |
| } |
| |
/tmp/sos-code-article6.75/hwcore/cpu_context.h (2005-01-04 04:13:52.000000000 +0100
) |
|
../sos-code-article7/hwcore/cpu_context.h (2005-02-05 17:52:18.000000000 +0100
) |
|
|
|
/* Copyright (C) 2000-2004, The KOS team | /* Copyright (C) 2005 David Decotigny |
Copyright (C) 1999 Free Software Foundation, Inc. | Copyright (C) 2000-2004, The KOS team |
This program is free software; you can redistribute it and/or | This program is free software; you can redistribute it and/or |
modify it under the terms of the GNU General Public License | modify it under the terms of the GNU General Public License |
|
|
| |
| |
/** | /** |
* Opaque structure storing the CPU context of an inactive kernel | * Prepare the system to deal with multiple CPU execution contexts |
* thread, as saved by the low level primitives below or by the | */ |
| sos_ret_t sos_cpu_context_subsystem_setup(); |
| |
| |
| /** |
| * Opaque structure storing the CPU context of an inactive kernel or |
| * user thread, as saved by the low level primitives below or by the |
* | * |
* @note This is an (architecture-independent) forward declaration: | * @note This is an (architecture-independent) forward declaration: |
* see cpu_context.c and the *.S files for its | * see cpu_context.c and the *.S files for its |
* (architecture-dependent) definition. | * (architecture-dependent) definition. |
*/ | */ |
struct sos_cpu_kstate; | struct sos_cpu_state; |
| |
/** | /** |
* The type of the functions passed as arguments below | * The type of the functions passed as arguments to the Kernel thread |
| * related functions. |
typedef void (sos_cpu_kstate_function_arg1_t(sos_ui32_t arg1)); | typedef void (sos_cpu_kstate_function_arg1_t(sos_ui32_t arg1)); |
| |
|
|
* start_func function returns, the function exit_func is called with | * start_func function returns, the function exit_func is called with |
* argument exit_arg. | * argument exit_arg. |
* | * |
* @param ctxt The kernel thread CPU context to initialize. The | * @param kctxt The kernel thread CPU context to initialize. The |
* address of the newly-initialized struct sos_cpu_kstate will be | * address of the newly-initialized struct sos_cpu_state will be |
* stored in this variable. The contents of this struct sos_cpu_kstate | * stored in this variable. The contents of this struct sos_cpu_state |
* | * |
* @param start_func The address of the first instruction that will be | * @param start_func The address of the first instruction that will be |
|
|
* | * |
* @note the newly created context is INTERRUPTIBLE by default ! | * @note the newly created context is INTERRUPTIBLE by default ! |
*/ | */ |
sos_ret_t sos_cpu_kstate_init(struct sos_cpu_kstate **ctxt, | sos_ret_t sos_cpu_kstate_init(struct sos_cpu_state **kctxt, |
sos_ui32_t start_arg, | sos_ui32_t start_arg, |
sos_vaddr_t stack_bottom, | sos_vaddr_t stack_bottom, |
|
|
| |
| |
/** | /** |
* Function that performs an immediate context-switch from one kernel | * Function to create an initial context for a user thread starting |
* thread to another one. It stores the current executing context in | * its execution at function user_start_PC with the user_start_arg |
* from_ctxt, and restores to_context on CPU. | * argument. The address of the user stack before any modification by |
| * the ustate_init() function is given by user_start_SP. The user |
| * thread starts in kernel space first and needs a kernel stack for |
| * the syscalls and for handling interrupts: the address of this |
| * kernel stack is given by the kernel_stack_* parameters. |
| * |
| * @param uctxt The user thread CPU context to initialize. The |
| * address of the newly-initialized struct sos_cpu_state will be |
| * stored in this variable. The contents of this struct sos_cpu_state |
| * are actually located /inside/ the kernel stack of the thread. |
| * |
| * @param user_start_PC The address of the first instruction that will |
| * be executed in user mode when this context will be first |
| * transferred on CPU. Practically speaking, this is the address of a |
| * function that is assumed to take 1 argument. |
| * |
| * @param user_start_SP The initial user stack address. |
| * |
| * @param user_start_arg The parameter passed to the initial user |
| * thread function. |
| * |
| * @param kernel_stack_bottom The lowest address of the kernel stack |
| * used to switch to user mode and to handle interrupts/exceptions. |
| * |
| * @param kernel_stack_size The size of the kernel stack (@see |
| * kernel_stack_bottom). |
| * |
| * @note the newly thread context is INTERRUPTIBLE ! |
| */ |
| sos_ret_t sos_cpu_ustate_init(struct sos_cpu_state **uctxt, |
| sos_uaddr_t user_start_PC, |
| sos_ui32_t user_start_arg, |
| sos_uaddr_t user_initial_SP, |
| sos_vaddr_t kernel_stack_bottom, |
| sos_size_t kernel_stack_size); |
| |
| |
| /** |
| * Function that performs an immediate context-switch from one |
| * kernel/user thread to another one. It stores the current executing |
| * context in from_ctxt, and restores to_context on CPU. |
* @param from_ctxt The address of the struct sos_cpu_kstate will be | * @param from_ctxt The address of the struct sos_cpu_state will be |
* | * |
* @param to_ctxt The CPU will resume its execution with the struct | * @param to_ctxt The CPU will resume its execution with the struct |
* sos_cpu_kstate located at this address. Must NOT be NULL. | * sos_cpu_state located at this address. Must NOT be NULL. |
void sos_cpu_kstate_switch(struct sos_cpu_kstate **from_ctxt, | void sos_cpu_context_switch(struct sos_cpu_state **from_ctxt, |
struct sos_cpu_kstate *to_ctxt); | struct sos_cpu_state *to_ctxt); |
| |
/* | /* |
* Switch to the new given context (of a kernel thread) without saving | * Switch to the new given context (of a kernel/user thread) without |
* the old context (of another kernel thread), and call the function | * saving the old context (of another kernel/user thread), and call |
* reclaiming_func passing it the recalining_arg argument. The | * the function reclaiming_func passing it the recalining_arg |
* reclaining function is called from within the stack of the new | * argument. The reclaining function is called from within the stack |
* context, so that it can (among other things) safely destroy the | * of the new context, so that it can (among other things) safely |
* stack of the former context. | * destroy the stack of the former context. |
* @param switch_to_ctxt The context that will be restored on the CPU | * @param switch_to_ctxt The context that will be restored on the CPU |
* | * |
|
|
* context to switch_to_ctxt. | * context to switch_to_ctxt. |
*/ | */ |
void | void |
sos_cpu_kstate_exit_to(struct sos_cpu_kstate *switch_to_ctxt, | sos_cpu_context_exit_to(struct sos_cpu_state *switch_to_ctxt, |
sos_cpu_kstate_function_arg1_t *reclaiming_func, | sos_cpu_kstate_function_arg1_t *reclaiming_func, |
sos_ui32_t reclaiming_arg) __attribute__((noreturn)); | sos_ui32_t reclaiming_arg) __attribute__((noreturn)); |
| |
/* ======================================================================= | /* ======================================================================= |
* Public Accessor functions | * Public Accessor functions |
*/ | */ |
| |
| |
| /** |
| * Return whether the saved context was in kernel or user context |
| * |
| * @return TRUE when context was interrupted when in user mode, FALSE |
| * when in kernel mode, < 0 on error. |
| */ |
| sos_ret_t |
| sos_cpu_context_is_in_user_mode(const struct sos_cpu_state *ctxt); |
| |
| |
/** | /** |
* Return Program Counter stored in the saved context | * Return Program Counter stored in the saved kernel/user context |
sos_vaddr_t sos_cpu_kstate_get_PC(const struct sos_cpu_kstate *ctxt); | sos_vaddr_t sos_cpu_context_get_PC(const struct sos_cpu_state *ctxt); |
| |
/** | /** |
* Return Stack Pointer stored in the saved context | * Return Stack Pointer stored in the saved kernel/user context |
sos_vaddr_t sos_cpu_kstate_get_SP(const struct sos_cpu_kstate *ctxt); | sos_vaddr_t sos_cpu_context_get_SP(const struct sos_cpu_state *ctxt); |
| |
/** | /** |
* Dump the contents of the CPU context (bochs + x86_videomem) | * Dump the contents of the CPU context (bochs + x86_videomem) |
*/ | */ |
void sos_cpu_kstate_dump(const struct sos_cpu_kstate *ctxt); | void sos_cpu_context_dump(const struct sos_cpu_state *ctxt); |
| |
/* ======================================================================= | /* ======================================================================= |
|
|
* Return the argument passed by the CPU upon exception, as stored in the | * Return the argument passed by the CPU upon exception, as stored in the |
* saved context | * saved context |
*/ | */ |
sos_ui32_t sos_cpu_kstate_get_EX_info(const struct sos_cpu_kstate *ctxt); | sos_ui32_t sos_cpu_context_get_EX_info(const struct sos_cpu_state *ctxt); |
| |
/** | /** |
* Return the faulting address of the exception | * Return the faulting address of the exception |
*/ | */ |
sos_vaddr_t | sos_vaddr_t |
sos_cpu_kstate_get_EX_faulting_vaddr(const struct sos_cpu_kstate *ctxt); | sos_cpu_context_get_EX_faulting_vaddr(const struct sos_cpu_state *ctxt); |
| |
| |
| /* ======================================================================= |
| * Public Accessor functions TO BE USED ONLY BY the SYSCALL handler |
| */ |
| |
| /** |
| * Low-level functions used by the syscall handler. They are |
| * responsible for retrieving the arguments passed to the syscall when |
| * a user thread makes a syscall. Some of these arguments are |
| * available as registers' values in the user context, some of them |
| * are user-space addresses given by these registers. |
| */ |
| sos_ret_t sos_syscall_get1arg(const struct sos_cpu_state *user_ctxt, |
| /* out */unsigned int *arg1); |
| |
| sos_ret_t sos_syscall_get2args(const struct sos_cpu_state *user_ctxt, |
| /* out */unsigned int *arg1, |
| /* out */unsigned int *arg2); |
| |
| sos_ret_t sos_syscall_get3args(const struct sos_cpu_state *user_ctxt, |
| /* out */unsigned int *arg1, |
| /* out */unsigned int *arg2, |
| /* out */unsigned int *arg3); |
| |
| sos_ret_t sos_syscall_get4args(const struct sos_cpu_state *user_ctxt, |
| /* out */unsigned int *arg1, |
| /* out */unsigned int *arg2, |
| /* out */unsigned int *arg3, |
| /* out */unsigned int *arg4); |
| |
| sos_ret_t sos_syscall_get5args(const struct sos_cpu_state *user_ctxt, |
| /* out */unsigned int *arg1, |
| /* out */unsigned int *arg2, |
| /* out */unsigned int *arg3, |
| /* out */unsigned int *arg4, |
| /* out */unsigned int *arg5); |
| |
| sos_ret_t sos_syscall_get6args(const struct sos_cpu_state *user_ctxt, |
| /* out */unsigned int *arg1, |
| /* out */unsigned int *arg2, |
| /* out */unsigned int *arg3, |
| /* out */unsigned int *arg4, |
| /* out */unsigned int *arg5, |
| /* out */unsigned int *arg6); |
| |
/* ======================================================================= | /* ======================================================================= |
|
|
* - when the thread might have gone too deep in the stack | * - when the thread might have gone too deep in the stack |
*/ | */ |
/** The signature of the poison */ | /** The signature of the poison */ |
#define SOS_CPU_KSTATE_STACK_POISON 0xa5 | #define SOS_CPU_STATE_STACK_POISON 0xa5 |
/** | /** |
* When set, mean that the whole stack is poisoned to detect use of | * When set, mean that the whole stack is poisoned to detect use of |
* unititialized variables | * unititialized variables |
*/ | */ |
#define SOS_CPU_KSTATE_DETECT_UNINIT_VARS | #define SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS |
/* #undef SOS_CPU_KSTATE_DETECT_UNINIT_VARS */ | /* #undef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS */ |
/** | /** |
* When set, mean that the bottom of the stack is poisoned to detect | * When set, mean that the bottom of the stack is poisoned to detect |
* probable stack overflow. Its value indicates the number of bytes | * probable stack overflow. Its value indicates the number of bytes |
* used for this detection. | * used for this detection. |
*/ | */ |
#define SOS_CPU_KSTATE_DETECT_STACK_OVERFLOW 64 | #define SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW 64 |
/* #undef SOS_CPU_KSTATE_DETECT_STACK_OVERFLOW */ | /* #undef SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW */ |
#if defined(SOS_CPU_KSTATE_DETECT_STACK_OVERFLOW) | #if defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) |
sos_cpu_kstate_prepare_detect_stack_overflow(const struct sos_cpu_kstate *ctxt, | sos_cpu_state_prepare_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt, |
sos_vaddr_t stack_bottom, | sos_vaddr_t kernel_stack_bottom, |
sos_size_t stack_size); | sos_size_t kernel_stack_size); |
void sos_cpu_kstate_detect_stack_overflow(const struct sos_cpu_kstate *ctxt, | void sos_cpu_state_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt, |
sos_vaddr_t stack_bottom, | sos_vaddr_t kernel_stack_bottom, |
sos_size_t stack_size); | sos_size_t kernel_stack_size); |
# define sos_cpu_kstate_prepare_detect_stack_overflow(ctxt,stkbottom,stksize) \ | # define sos_cpu_state_prepare_detect_kernel_stack_overflow(ctxt,stkbottom,stksize) \ |
# define sos_cpu_kstate_detect_stack_overflow(ctxt,stkbottom,stksize) \ | # define sos_cpu_state_detect_kernel_stack_overflow(ctxt,stkbottom,stksize) \ |
#endif | #endif |
| |
|
|
| |
| |
/** | /** |
* Call the backtracer callback on each frame stored in the cpu_kstate | * Call the backtracer callback on each frame stored in the cpu_state |
* @param cpu_kstate The CPU context we want to explore. NULL to | * @param cpu_state The CPU context we want to explore. MUST be the |
* backtrace the current CPU context. | * context of a thread in Kernel mode, or NULL. When NULL: backtrace |
| * the current CPU context. |
* @param max_depth The maximum number of frames to explore | * @param max_depth The maximum number of frames to explore |
* | * |
|
|
* @note Might be inaccurate when gcc's -fomit-frame-pointer has been | * @note Might be inaccurate when gcc's -fomit-frame-pointer has been |
* used. | * used. |
*/ | */ |
sos_ui32_t sos_backtrace(const struct sos_cpu_kstate *cpu_kstate, | sos_ui32_t sos_backtrace(const struct sos_cpu_state *cpu_state, |
sos_vaddr_t stack_bottom, | sos_vaddr_t stack_bottom, |
sos_size_t stack_size, | sos_size_t stack_size, |
| |
/tmp/sos-code-article6.75/hwcore/exception.c (2005-01-04 04:13:52.000000000 +0100
) |
|
../sos-code-article7/hwcore/exception.c (2005-02-05 17:52:18.000000000 +0100
) |
|
|
|
/* Copyright (C) 2004 David Decotigny | /* Copyright (C) 2004 David Decotigny |
Copyright (C) 1999 Free Software Foundation, Inc. | |
This program is free software; you can redistribute it and/or | This program is free software; you can redistribute it and/or |
modify it under the terms of the GNU General Public License | modify it under the terms of the GNU General Public License |
|
|
#include "idt.h" | #include "idt.h" |
#include "irq.h" | #include "irq.h" |
| |
| #include <sos/assert.h> |
| #include <sos/thread.h> |
| |
#include "exception.h" | #include "exception.h" |
| |
/* array of exception wrappers, defined in exception_wrappers.S */ | /* array of exception wrappers, defined in exception_wrappers.S */ |
|
|
sos_exception_handler_t sos_exception_handler_array[SOS_EXCEPT_NUM] = | sos_exception_handler_t sos_exception_handler_array[SOS_EXCEPT_NUM] = |
{ NULL, }; | { NULL, }; |
| |
| /* List of exception names for the x86 architecture */ |
| static const char * sos_x86_exnames[] = { |
| [SOS_EXCEPT_DIVIDE_ERROR] = "Division by zero", |
| [SOS_EXCEPT_DEBUG] = "Debug", |
| [SOS_EXCEPT_NMI_INTERRUPT] = "Non Maskable Interrupt", |
| [SOS_EXCEPT_BREAKPOINT] = "Breakpoint", |
| [SOS_EXCEPT_OVERFLOW] = "Overflow", |
| [SOS_EXCEPT_BOUND_RANGE_EXCEDEED] = "Bound Range Exceeded", |
| [SOS_EXCEPT_INVALID_OPCODE] = "Invalid Opcode", |
| [SOS_EXCEPT_DEVICE_NOT_AVAILABLE] = "Device Unavailable", |
| [SOS_EXCEPT_DOUBLE_FAULT] = "Double Fault", |
| [SOS_EXCEPT_COPROCESSOR_SEGMENT_OVERRUN] = "Coprocessor Segment Overrun", |
| [SOS_EXCEPT_INVALID_TSS] = "Invalid TSS", |
| [SOS_EXCEPT_SEGMENT_NOT_PRESENT] = "Segment Not Present", |
| [SOS_EXCEPT_STACK_SEGMENT_FAULT] = "Stack Segfault", |
| [SOS_EXCEPT_GENERAL_PROTECTION] = "General Protection", |
| [SOS_EXCEPT_PAGE_FAULT] = "Page Fault", |
| [SOS_EXCEPT_INTEL_RESERVED_1] = "INTEL1", |
| [SOS_EXCEPT_FLOATING_POINT_ERROR] = "FP Error", |
| [SOS_EXCEPT_ALIGNEMENT_CHECK] = "Alignment Check", |
| [SOS_EXCEPT_MACHINE_CHECK] = "Machine Check", |
| [SOS_EXCEPT_INTEL_RESERVED_2] = "INTEL2", |
| [SOS_EXCEPT_INTEL_RESERVED_3] = "INTEL3", |
| [SOS_EXCEPT_INTEL_RESERVED_4] = "INTEL4", |
| [SOS_EXCEPT_INTEL_RESERVED_5] = "INTEL5", |
| [SOS_EXCEPT_INTEL_RESERVED_6] = "INTEL6", |
| [SOS_EXCEPT_INTEL_RESERVED_7] = "INTEL7", |
| [SOS_EXCEPT_INTEL_RESERVED_8] = "INTEL8", |
| [SOS_EXCEPT_INTEL_RESERVED_9] = "INTEL9", |
| [SOS_EXCEPT_INTEL_RESERVED_10] = "INTEL10", |
| [SOS_EXCEPT_INTEL_RESERVED_11] = "INTEL11", |
| [SOS_EXCEPT_INTEL_RESERVED_12] = "INTEL12", |
| [SOS_EXCEPT_INTEL_RESERVED_13] = "INTEL13", |
| [SOS_EXCEPT_INTEL_RESERVED_14] = "INTEL14" |
| }; |
| |
| |
| /* Catch-all exception handler */ |
| static void sos_generic_ex(int exid, const struct sos_cpu_state *ctxt) |
| { |
| const char *exname = sos_exception_get_name(exid); |
| |
| if (sos_cpu_context_is_in_user_mode(ctxt)) |
| { |
| /* Exception while in user mode */ |
| sos_bochs_printf("Exception %s in User mode at instruction 0x%x (info=%x)!\n", |
| exname, |
| sos_cpu_context_get_PC(ctxt), |
| (unsigned)sos_cpu_context_get_EX_info(ctxt)); |
| sos_bochs_printf("Terminating User thread\n"); |
| sos_thread_exit(); |
| } |
| else |
| sos_display_fatal_error("Exception %s in Kernel at instruction 0x%x (info=%x)!\n", |
| exname, |
| sos_cpu_context_get_PC(ctxt), |
| (unsigned)sos_cpu_context_get_EX_info(ctxt)); |
| } |
| |
| |
sos_ret_t sos_exception_subsystem_setup(void) | sos_ret_t sos_exception_subsystem_setup(void) |
{ | { |
| sos_ret_t retval; |
| int exid; |
| |
| /* Setup the generic exception handler by default for everybody |
| except for the double fault exception */ |
| for (exid = 0 ; exid < SOS_EXCEPT_NUM ; exid ++) |
| { |
| /* Skip double fault (see below) */ |
| if (exid == SOS_EXCEPT_DOUBLE_FAULT) |
| continue; |
| |
| retval = sos_exception_set_routine(exid, sos_generic_ex); |
| if (SOS_OK != retval) |
| return retval; |
| } |
| |
| |
/* We inidicate that the double fault exception handler is defined, | /* We inidicate that the double fault exception handler is defined, |
and give its address. this handler is a do-nothing handler (see | and give its address. this handler is a do-nothing handler (see |
exception_wrappers.S), and it can NOT be overriden by the | exception_wrappers.S), and it can NOT be overriden by the |
|
|
/* Expected to be atomic */ | /* Expected to be atomic */ |
return sos_exception_handler_array[exception_number]; | return sos_exception_handler_array[exception_number]; |
} | } |
| |
| |
| const char * sos_exception_get_name(int exception_number) |
| { |
| if ((exception_number < 0) || (exception_number >= SOS_EXCEPT_NUM)) |
| return NULL; |
| |
| return sos_x86_exnames[exception_number]; |
| } |
| |
/tmp/sos-code-article6.75/hwcore/irq_wrappers.S (2005-01-04 04:13:52.000000000 +0100
) |
|
../sos-code-article7/hwcore/irq_wrappers.S (2005-02-05 17:52:19.000000000 +0100
) |
|
|
|
/* Copyright (C) 2004 The KOS Team | /* Copyright (C) 2004 The KOS Team |
Copyright (C) 1999 Free Software Foundation, Inc. | |
This program is free software; you can redistribute it and/or | This program is free software; you can redistribute it and/or |
modify it under the terms of the GNU General Public License | modify it under the terms of the GNU General Public License |
|
|
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, | Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, |
USA. | USA. |
*/ | */ |
#define ASM_SOURCE 1 | |
| #include "segment.h" |
.file "irq_wrappers.S" | .file "irq_wrappers.S" |
| |
|
|
/** The variable holding the nested level of the IRQ handlers */ | /** The variable holding the nested level of the IRQ handlers */ |
.extern sos_irq_nested_level_counter | .extern sos_irq_nested_level_counter |
| |
| /** Update the interrupted current thread's CPU context |
| Its prototype is: |
| sos_thread_prepare_irq_servicing(struct sos_cpu_state *); |
| */ |
| .extern sos_thread_prepare_irq_servicing |
| |
| /** Update the kernel TSS in case we are switching to a thread in user |
| mode in order to come back into the correct kernel stack */ |
| .extern sos_cpu_context_update_kernel_tss |
| |
| /** Select a thread to set on CPU (this enables user-threads |
| preemption) and configure the MMU to match that of the destination |
| thread. |
| Its prototype is: |
| struct sos_cpu_state * // Selected CPU context |
| sos_thread_prepare_irq_switch_back(); |
| */ |
| .extern sos_thread_prepare_irq_switch_back |
| |
/* These pre-handlers are for IRQ (Master PIC) */ | /* These pre-handlers are for IRQ (Master PIC) */ |
.irp id, 0,1,2,3,4,5,6,7 | .irp id, 0,1,2,3,4,5,6,7 |
| |
|
|
pushw %fs | pushw %fs |
pushw %gs | pushw %gs |
| |
| /* Set correct kernel segment descriptors' value */ |
| movw $SOS_BUILD_SEGMENT_REG_VALUE(0, 0, SOS_SEG_KDATA), %di |
| pushw %di ; popw %ds |
| pushw %di ; popw %es |
| pushw %di ; popw %fs |
| pushw %di ; popw %gs |
| |
/* | /* |
* Increment IRQ nested level | * Increment IRQ nested level |
*/ | */ |
incl sos_irq_nested_level_counter | incl sos_irq_nested_level_counter |
| /* Outermost IRQ only: store the interrupted context |
| of the current thread */ |
| cmpl $1, sos_irq_nested_level_counter |
| jne 1f |
| pushl %esp |
| call sos_thread_prepare_irq_servicing |
| addl $4, %esp |
| |
| 1: |
| |
/* Send EOI to PIC. See Intel 8259 datasheet | /* Send EOI to PIC. See Intel 8259 datasheet |
available on Kos website */ | available on Kos website */ |
|
|
outb %al, $0x20 | outb %al, $0x20 |
| |
/* | /* |
* Call the handler with the IRQ number and the | * Call the handler with IRQ number as argument |
* address of the stored CPU context as arguments | |
pushl %esp | |
leal sos_irq_handler_array,%edi | leal sos_irq_handler_array,%edi |
call *\id*4(%edi) | call *\id*4(%edi) |
/* Unallocate the arguments passed to the handler */ | addl $4, %esp |
addl $8, %esp | |
/* | /* |
* Decrement IRQ nested level | * Decrement IRQ nested level |
|
|
| |
2: /* No: all right ! */ | 2: /* No: all right ! */ |
| |
| /* Was this the outermost IRQ handler ? */ |
| jnz 3f |
| |
| /* Yes: reschedule */ |
| call sos_thread_prepare_irq_switch_back |
| /* Establish new context: context switch ! */ |
| movl %eax, %esp |
| |
| /* Prepare kernel TSS in case we are switching to a |
| user thread: we make sure that we will come back |
| into the kernel at a correct stack location */ |
| pushl %esp /* Pass the location of the context we are |
| restoring to the function */ |
| call sos_cpu_context_update_kernel_tss |
| addl $4, %esp |
| 3: |
/* Restore the context */ | /* Restore the context */ |
popw %gs | popw %gs |
popw %fs | popw %fs |
|
|
pushw %fs | pushw %fs |
pushw %gs | pushw %gs |
| |
| /* Set correct kernel segment descriptors' value */ |
| movw $SOS_BUILD_SEGMENT_REG_VALUE(0, 0, SOS_SEG_KDATA), %di |
| pushw %di ; popw %ds |
| pushw %di ; popw %es |
| pushw %di ; popw %fs |
| pushw %di ; popw %gs |
| |
/* | /* |
* Increment IRQ nested level | * Increment IRQ nested level |
*/ | */ |
incl sos_irq_nested_level_counter | incl sos_irq_nested_level_counter |
| /* Outermost IRQ only: store the interrupted context |
| of the current thread */ |
| cmpl $1, sos_irq_nested_level_counter |
| jne 1f |
| pushl %esp |
| call sos_thread_prepare_irq_servicing |
| addl $4, %esp |
| |
| 1: |
| |
/* Send EOI to PIC. See Intel 8259 datasheet | /* Send EOI to PIC. See Intel 8259 datasheet |
available on Kos website */ | available on Kos website */ |
|
|
outb %al, $0x20 | outb %al, $0x20 |
| |
/* | /* |
* Call the handler with the IRQ number and the | * Call the handler with IRQ number as argument |
* address of the stored CPU context as arguments | |
pushl %esp | |
leal sos_irq_handler_array,%edi | leal sos_irq_handler_array,%edi |
call *\id*4(%edi) | call *\id*4(%edi) |
/* Unallocate the arguments passed to the handler */ | addl $4, %esp |
addl $8, %esp | |
/* | /* |
* Decrement IRQ nested level | * Decrement IRQ nested level |
|
|
| |
2: /* No: all right ! */ | 2: /* No: all right ! */ |
| |
| /* Was this the outermost IRQ handler ? */ |
| jnz 3f |
| |
| /* Yes: reschedule */ |
| call sos_thread_prepare_irq_switch_back |
| /* Establish new context: context switch ! */ |
| movl %eax, %esp |
| |
| /* Prepare kernel TSS in case we are switching to a |
| user thread: we make sure that we will come back |
| into the kernel at a correct stack location */ |
| pushl %esp /* Pass the location of the context we are |
| restoring to the function */ |
| call sos_cpu_context_update_kernel_tss |
| addl $4, %esp |
| 3: |
/* Restore the context */ | /* Restore the context */ |
popw %gs | popw %gs |
popw %fs | popw %fs |
| |
/tmp/sos-code-article6.75/hwcore/mm_context.c (1970-01-01 01:00:00.000000000 +0100
) |
|
../sos-code-article7/hwcore/mm_context.c (2005-02-05 17:52:19.000000000 +0100
) |
|
|
|
| /* Copyright (C) 2005 David Decotigny |
| |
| This program is free software; you can redistribute it and/or |
| modify it under the terms of the GNU General Public License |
| as published by the Free Software Foundation; either version 2 |
| of the License, or (at your option) any later version. |
| |
| This program is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with this program; if not, write to the Free Software |
| Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, |
| USA. |
| */ |
| |
| #include <hwcore/paging.h> |
| #include <hwcore/irq.h> |
| |
| #include <sos/assert.h> |
| #include <sos/list.h> |
| #include <sos/klibc.h> |
| #include <sos/physmem.h> |
| #include <sos/kmem_slab.h> |
| #include <sos/kmem_vmm.h> |
| |
| #include "mm_context.h" |
| |
| |
| /** |
| * Definition of an MMU context. |
| */ |
| struct sos_mm_context |
| { |
| /** Physical address of the PD for this MMU context */ |
| sos_paddr_t paddr_PD; |
| |
| /** Virtual address where it is mapped into the Kernel space */ |
| sos_vaddr_t vaddr_PD; |
| |
| /** Reference counter for this mm_context */ |
| sos_ui32_t ref_cnt; |
| |
| /** List of MMU contexts in the system */ |
| struct sos_mm_context *prev, *next; |
| }; |
| |
| |
| /** |
| * The cache of mm_context structures |
| */ |
| struct sos_kslab_cache * cache_struct_mm_context; |
| |
| |
| /** |
| * The current MMU context corresponding to the current configuration |
| * of the MMU. |
| */ |
| static struct sos_mm_context *current_mm_context = NULL; |
| |
| |
| /** |
| * System-wide list of all the mm_contexts in the system |
| */ |
| static struct sos_mm_context *list_mm_context = NULL; |
| /* The "= NULL" here is FUNDAMENTAL, because paging.c must work |
| correctly, ie synch_PDE below must behave reasonably (eg do |
| nothing), until the mm_context subsystem has been initialized. */ |
| |
| |
| sos_ret_t sos_mm_context_subsystem_setup() |
| { |
| struct sos_mm_context * initial_mm_context; |
| sos_ret_t retval; |
| |
| /* Create the new mm_context cache */ |
| cache_struct_mm_context = sos_kmem_cache_create("struct mm_context", |
| sizeof(struct sos_mm_context), |
| 1, 0, |
| SOS_KSLAB_CREATE_MAP); |
| if (NULL == cache_struct_mm_context) |
| return -SOS_ENOMEM; |
| |
| /* |
| * Allocate the initial mm_context structure |
| */ |
| initial_mm_context |
| = (struct sos_mm_context*) sos_kmem_cache_alloc(cache_struct_mm_context, |
| SOS_KSLAB_ALLOC_ATOMIC); |
| if (NULL == initial_mm_context) |
| return -SOS_ENOMEM; |
| |
| /* Retrieve the address of the current page where the PD lies */ |
| initial_mm_context->paddr_PD = sos_paging_get_current_PD_paddr(); |
| |
| /* |
| * Map it somewhere in kernel virtual memory |
| */ |
| |
| /* Allocate 1 page of kernel Virtual memory */ |
| initial_mm_context->vaddr_PD = sos_kmem_vmm_alloc(1, 0); |
| if (initial_mm_context->vaddr_PD == 0) |
| return -SOS_ENOMEM; |
| |
| /* Map the PD at this virtual address: it will thus be mapped 2 |
| times (1 time for the mirroring, 1 time for mm_context) ! */ |
| retval = sos_paging_map(initial_mm_context->paddr_PD, |
| initial_mm_context->vaddr_PD, |
| FALSE, |
| SOS_VM_MAP_PROT_READ |
| | SOS_VM_MAP_PROT_WRITE); |
| if (SOS_OK != retval) |
| return retval; |
| |
| /* Initialize the initial list of mm_contexts */ |
| list_singleton(list_mm_context, initial_mm_context); |
| |
| /* We just created this mm_context: mark it as "referenced" */ |
| initial_mm_context->ref_cnt ++; |
| |
| /* We are actually already using it ! */ |
| initial_mm_context->ref_cnt ++; /* ie reference it a 2nd time ! */ |
| current_mm_context = initial_mm_context; |
| |
| return SOS_OK; |
| } |
| |
| |
| struct sos_mm_context * sos_mm_context_create(void) |
| { |
| sos_ui32_t flags; |
| struct sos_mm_context *mmctxt; |
| sos_ui32_t * master_pd = (sos_ui32_t*) current_mm_context->vaddr_PD; |
| sos_ui32_t * new_pd; |
| int index_in_pd; |
| |
| /* |
| * Allocate the initial mm_context structure |
| */ |
| mmctxt = (struct sos_mm_context*) sos_kmem_cache_alloc(cache_struct_mm_context, 0); |
| if (NULL == mmctxt) |
| return NULL; |
| |
| /* Allocate a new page for the new PD and map it into the kernel */ |
| mmctxt->vaddr_PD = sos_kmem_vmm_alloc(1, SOS_KMEM_VMM_MAP); |
| if (mmctxt->vaddr_PD == 0) |
| { |
| sos_kmem_cache_free((sos_vaddr_t) mmctxt); |
| return NULL; |
| } |
| |
| /* Retrieve its physical address */ |
| mmctxt->paddr_PD = sos_paging_get_paddr(mmctxt->vaddr_PD); |
| if (mmctxt->paddr_PD == 0) |
| { |
| sos_kmem_cache_free((sos_vaddr_t) mmctxt); |
| return NULL; |
| } |
| |
| /* Fill it with zeros */ |
| memset((void*)mmctxt->vaddr_PD, 0x0, SOS_PAGE_SIZE); |
| |
| |
| /* Synchronize it with the master Kernel MMU context. Stop just |
| before the mirroring ! */ |
| new_pd = (sos_ui32_t*) mmctxt->vaddr_PD; |
| for (index_in_pd = 0 ; |
| index_in_pd < (SOS_PAGING_MIRROR_VADDR >> 22) ; /* 1 PDE = 1 PT |
| = 1024 Pages |
| = 4MB */ |
| index_in_pd ++) |
| { |
| /* Copy the master's configuration */ |
| new_pd[index_in_pd] = master_pd[index_in_pd]; |
| |
| /* We DON'T mark the underlying PT and pages as referenced |
| because all the PD are equivalent in the kernel space: as |
| soon as a page is mapped in the kernel, it is mapped by X |
| address spaces, and as soon as it is unmapped by 1 address |
| space, it is unmapped in all the others. So that for X |
| address spaces, the reference counter will be either 0 or X, |
| and not something else: using the reference counter correctly |
| won't be of any use and would consume some time in updating it. */ |
| } |
| |
| /* Setup the mirroring for the new address space */ |
| new_pd[SOS_PAGING_MIRROR_VADDR >> 22] |
| = sos_paging_compute_kernel_pde_value(mmctxt->paddr_PD); |
| |
| /* Update the reference count of this PD because it is mapped a |
| second time through the mirroring */ |
| SOS_ASSERT_FATAL(sos_physmem_ref_physpage_at(mmctxt->paddr_PD) > 0); |
| |
| /* Mark the mm_context as "referenced" */ |
| mmctxt->ref_cnt = 1; |
| |
| /* Add it to the list of MMU contexts */ |
| sos_disable_IRQs(flags); |
| list_add_tail(list_mm_context, mmctxt); |
| sos_restore_IRQs(flags); |
| |
| return mmctxt; |
| } |
| |
| |
| sos_ret_t sos_mm_context_unref(struct sos_mm_context *mmctxt) |
| { |
| sos_ui32_t flags; |
| |
| sos_disable_IRQs(flags); |
| |
| /* A valid mmctxt is one which is not yet unreferenced */ |
| SOS_ASSERT_FATAL(mmctxt->ref_cnt > 0); |
| |
| /* Unreference it */ |
| mmctxt->ref_cnt --; |
| |
| /* If somebody is still using it, don't release it now */ |
| if (mmctxt->ref_cnt > 0) |
| { |
| sos_restore_IRQs(flags); |
| return SOS_OK; |
| } |
| |
| /* If nobody uses it, then it cannot be the current mm_context ! */ |
| SOS_ASSERT_FATAL(mmctxt != current_mm_context); |
| |
| /* Remove it from the list of mm_contexts */ |
| list_delete(list_mm_context, mmctxt); |
| |
| sos_restore_IRQs(flags); |
| |
| /* Undo the mirroring for the MMU context */ |
| SOS_ASSERT_FATAL(sos_physmem_unref_physpage(mmctxt->paddr_PD) == FALSE); |
| |
| /* Unmap it from the kernel */ |
| sos_kmem_vmm_free(mmctxt->vaddr_PD); |
| |
| memset(mmctxt, 0x0, sizeof(*mmctxt)); |
| |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t sos_mm_context_ref(struct sos_mm_context *mmctxt) |
| { |
| sos_ui32_t flags; |
| |
| sos_disable_IRQs(flags); |
| |
| /* A valid mmctxt is one which is not yet unreferenced */ |
| SOS_ASSERT_FATAL(mmctxt->ref_cnt > 0); |
| |
| /* Reference it once again */ |
| mmctxt->ref_cnt ++; |
| |
| sos_restore_IRQs(flags); |
| |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t sos_mm_context_switch_to(struct sos_mm_context *mmctxt) |
| { |
| SOS_ASSERT_FATAL(NULL != mmctxt); |
| SOS_ASSERT_FATAL(mmctxt->ref_cnt > 0); |
| SOS_ASSERT_FATAL(current_mm_context->ref_cnt > 0); |
| if (mmctxt != current_mm_context) |
| { |
| sos_ui32_t flags; |
| struct sos_mm_context * prev_mm_context = current_mm_context; |
| |
| /* This is the most dangerous part of the whole thing. If we set |
| the wrong MMU configuration in mmctxt, this will hang or |
| reboot the machine... */ |
| sos_paging_set_current_PD_paddr(mmctxt->paddr_PD); |
| |
| /* Exchange the mm_contexts */ |
| current_mm_context = mmctxt; |
| |
| /* Update the reference counts */ |
| sos_disable_IRQs(flags); |
| mmctxt->ref_cnt ++; |
| sos_mm_context_unref(prev_mm_context); |
| sos_restore_IRQs(flags); |
| } |
| |
| return SOS_OK; |
| } |
| |
| |
| struct sos_mm_context *get_current_mm_context() |
| { |
| SOS_ASSERT_FATAL(current_mm_context->ref_cnt > 0); |
| return current_mm_context; |
| } |
| |
| |
| /* ****************************************************** |
| * Reserved functions |
| */ |
| |
| |
| sos_ret_t sos_mm_context_synch_kernel_PDE(unsigned int index_in_pd, |
| sos_ui32_t pde) |
| { |
| sos_ui32_t flags; |
| struct sos_mm_context * dest_mm_context; |
| int nb_mm_contexts; |
| |
| sos_disable_IRQs(flags); |
| list_foreach_forward(list_mm_context, dest_mm_context, nb_mm_contexts) |
| { |
| sos_ui32_t * dest_pd; |
| |
| SOS_ASSERT_FATAL(dest_mm_context->ref_cnt > 0); |
| |
| dest_pd = (sos_ui32_t*) dest_mm_context->vaddr_PD; |
| dest_pd[index_in_pd] = pde; |
| } |
| sos_restore_IRQs(flags); |
| |
| return SOS_OK; |
| } |
| |
/tmp/sos-code-article6.75/sos/calcload.c (1970-01-01 01:00:00.000000000 +0100
) |
|
../sos-code-article7/sos/calcload.c (2005-02-05 17:52:20.000000000 +0100
) |
|
|
|
| /* Copyright (C) 2004 David Decotigny |
| |
| This program is free software; you can redistribute it and/or |
| modify it under the terms of the GNU General Public License |
| as published by the Free Software Foundation; either version 2 |
| of the License, or (at your option) any later version. |
| |
| This program is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with this program; if not, write to the Free Software |
| Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, |
| USA. |
| */ |
| |
| #include <hwcore/irq.h> |
| #include <sos/kmalloc.h> |
| #include <sos/assert.h> |
| #include <sos/calcload.h> |
| |
| |
| /** |
| * Multiplicative factor to display digits after the decimal dot. The |
| * higher the value, the higher the precision, but the higher the risk |
| * that the value you get is incorrect (integer overflow). |
| * |
| * The CPU ratios will be correctly displayed as long as: |
| * 2^32 > (900 * HZ * 100 * SOS_LOAD_DISPLAY_MULTIPLICATION_FACTOR) |
| * The "900" above means 900s because the load is computed over 15mn (900s). |
| * HZ is the frequency of the timer tick because the load is updated |
| * at each timer tick. |
| * The "100" above is the multiplication factor to get the ratio value |
| * between 0 and 100 (instead of 0-1). |
| * |
| * The maximum CPU sustainable load that will be correctly displayed |
| * is given by the formula: |
| * (2^32 - 1) / (900 * HZ * SOS_LOAD_DISPLAY_MULTIPLICATION_FACTOR) |
| * With HZ=100, these maximum sustainable loads are respectively |
| * 47.721, 477.21 and 4772.1 with |
| * SOS_LOAD_DISPLAY_MULTIPLICATION_FACTOR being respectively 1000, 100 |
| * and 10. |
| * |
| * Hence, among these formulaes, the most limitative one is that |
| * concerning the CPU ratios (because of the "100" factor). Actually, |
| * for HZ=100, the only correct value is 10. |
| */ |
| #define SOS_LOAD_DISPLAY_MULTIPLICATION_FACTOR 10 /* 1/10 resolution */ |
| |
| |
| /** |
| * To compute the load, at each clock tick we store the number of |
| * threads ready in kernel/user mode, and the kind of the thread that |
| * is executing (user or kernel mode): this is stored in |
| * current_load_entry. We then compute the sum of these numbers over 3 |
| * periods of time: 1 minute, 5 minutes, 15 minutes. This is the role |
| * of the sliding windows data structures. A "sliding window" is only |
| * the synthetic sum of these figures, not a real sliding window of |
| * load_entries. At each timer tick and everytime the load is updated, |
| * the computations are in O(1). |
| * |
| * All the sliding windows share the main "recorded_load" array of |
| * load_entries for that; its role is to store the last 15mns of load |
| * data, which encompasses the data for the 1mn, 5mn and 15mn sliding |
| * windows. |
| */ |
| |
| /* Max number of seconds that we record (ie number of entries in |
| recorded_loads) */ |
| #define NB_SECS 900 |
| |
| |
| /* An entry in the longest sliding window */ |
| struct load_entry |
| { |
| sos_ui32_t nb_ticks; |
| |
| sos_ui32_t nb_user_running; |
| sos_ui32_t nb_kernel_running; |
| |
| sos_ui32_t nb_user_ready; |
| sos_ui32_t nb_kernel_ready; |
| }; |
| |
| struct load_entry current_load_entry; |
| |
| |
| /* The longest sliding window */ |
| struct recorded_loads |
| { |
| sos_ui32_t most_recent; |
| sos_ui32_t max_entries; |
| |
| struct load_entry *load_entries; |
| }; |
| |
| #define LOAD_GET_ENTRY(loads,age) \ |
| (&((loads).load_entries[( (loads).max_entries + (loads).most_recent - (age))\ |
| % ((loads).max_entries)])) |
| |
| /* A sliding window, we manage one for each time interval */ |
| struct sliding_window |
| { |
| sos_ui32_t max_entries; |
| sos_ui32_t nb_entries; |
| |
| sos_ui32_t sigma_nb_ticks; |
| sos_ui32_t sigma_nb_user_running; |
| sos_ui32_t sigma_nb_kernel_running; |
| sos_ui32_t sigma_nb_user_ready; |
| sos_ui32_t sigma_nb_kernel_ready; |
| }; |
| |
| |
| /* The main sliding window */ |
| static struct recorded_loads recorded_loads; |
| |
| /* The sliding windows for 3 tims intervals: 1min, 5min, 15min */ |
| static struct sliding_window load_1mn, load_5mn, load_15mn; |
| |
| /* Forward declaration */ |
| static struct sos_timeout_action calcload_timeout; |
| static void calcload_routine(struct sos_timeout_action *a); |
| |
| |
| static void _reinit_load_subsystem() |
| { |
| memset(& recorded_loads, 0x0, sizeof(recorded_loads)); |
| memset(& current_load_entry, 0x0, sizeof(struct load_entry)); |
| memset(& load_1mn, 0x0, sizeof(load_1mn)); |
| memset(& load_5mn, 0x0, sizeof(load_5mn)); |
| memset(& load_15mn, 0x0, sizeof(load_15mn)); |
| } |
| |
| |
| sos_ret_t sos_load_subsystem_setup(void) |
| { |
| struct sos_time period; |
| _reinit_load_subsystem(); |
| |
| if (recorded_loads.load_entries) |
| sos_kfree((sos_vaddr_t) recorded_loads.load_entries); |
| _reinit_load_subsystem(); |
| |
| /* Allocate 900 entries to store 15mn of data (because 15minutes = |
| 900s) */ |
| recorded_loads.max_entries = NB_SECS; |
| recorded_loads.load_entries |
| = (struct load_entry*) sos_kmalloc(NB_SECS * sizeof(struct load_entry), |
| 0); |
| if (! recorded_loads.load_entries) |
| { |
| return -SOS_ENOMEM; |
| } |
| |
| /* Compute the number of entries in each sliding window */ |
| load_1mn.max_entries = 60; |
| load_5mn.max_entries = 300; |
| load_15mn.max_entries = 900; |
| |
| /* Program the load computation action */ |
| sos_time_init_action(& calcload_timeout); |
| period.sec = 1; period.nanosec = 0; |
| return sos_time_register_action_relative(& calcload_timeout, |
| & period, |
| calcload_routine, |
| NULL); |
| } |
| |
| |
| /* Shift the given sliding window to record the current_load_entry */ |
| static void update_sliding_window(struct sliding_window *w) |
| { |
| /* |
| * Compute the value of the sum over the sliding window |
| */ |
| |
| /* Take the new value into account */ |
| w->sigma_nb_ticks += current_load_entry.nb_ticks; |
| w->sigma_nb_user_running += current_load_entry.nb_user_running; |
| w->sigma_nb_kernel_running += current_load_entry.nb_kernel_running; |
| w->sigma_nb_user_ready += current_load_entry.nb_user_ready; |
| w->sigma_nb_kernel_ready += current_load_entry.nb_kernel_ready; |
| |
| /* Remove the oldest entry, if it is going to be popped out of the |
| sliding window */ |
| if (w->nb_entries < w->max_entries) |
| { |
| w->nb_entries ++; |
| } |
| else |
| { |
| struct load_entry * oldest_entry; |
| oldest_entry = LOAD_GET_ENTRY(recorded_loads, w->nb_entries - 1); |
| w->sigma_nb_ticks -= oldest_entry->nb_ticks; |
| w->sigma_nb_user_running -= oldest_entry->nb_user_running; |
| w->sigma_nb_kernel_running -= oldest_entry->nb_kernel_running; |
| w->sigma_nb_user_ready -= oldest_entry->nb_user_ready; |
| w->sigma_nb_kernel_ready -= oldest_entry->nb_kernel_ready; |
| } |
| } |
| |
| |
| /* The timeout action responsible for computing the CPU load */ |
| static void calcload_routine(struct sos_timeout_action *a) |
| { |
| struct load_entry * new_head; |
| struct sos_time delay; |
| |
| if (! recorded_loads.load_entries) |
| return; |
| |
| /* Update the sliding windows */ |
| update_sliding_window(& load_1mn); |
| update_sliding_window(& load_5mn); |
| update_sliding_window(& load_15mn); |
| |
| /* Move the head of the list forward */ |
| recorded_loads.most_recent |
| = (recorded_loads.most_recent + 1) % recorded_loads.max_entries; |
| |
| /* Update the new head */ |
| new_head = & recorded_loads.load_entries[recorded_loads.most_recent]; |
| memcpy(new_head, & current_load_entry, sizeof(current_load_entry)); |
| |
| /* Reset the current load entry */ |
| memset(& current_load_entry, 0x0, sizeof(current_load_entry)); |
| |
| /* Program next occurence of the action */ |
| delay.sec = 1; |
| delay.nanosec = 0; |
| sos_time_register_action_relative(a, & delay, calcload_routine, NULL); |
| } |
| |
| |
| sos_ret_t sos_load_do_timer_tick(sos_bool_t cur_is_user, |
| sos_ui32_t nb_user_ready, |
| sos_ui32_t nb_kernel_ready) |
| { |
| sos_ui32_t flags; |
| |
| sos_disable_IRQs(flags); |
| current_load_entry.nb_ticks ++; |
| current_load_entry.nb_user_ready += nb_user_ready; |
| current_load_entry.nb_kernel_ready += nb_kernel_ready; |
| if (cur_is_user) |
| current_load_entry.nb_user_running ++; |
| else |
| current_load_entry.nb_kernel_running ++; |
| sos_restore_IRQs(flags); |
| |
| return SOS_OK; |
| } |
| |
| |
| void sos_load_to_string(char dest[11], sos_ui32_t load_value) |
| { |
| sos_bool_t print0 = FALSE; |
| sos_ui32_t d; |
| |
| #define PUTCH(c) ({ *dest = (c); dest ++; }) |
| |
| for (d = 1000000000UL ; d > 0 ; d /= 10) |
| { |
| sos_ui32_t digit = (load_value / d) % 10; |
| |
| if (digit > 0) |
| { |
| PUTCH(digit + '0'); |
| print0 = TRUE; |
| } |
| else if (print0) |
| PUTCH('0'); |
| |
| if (d == SOS_LOAD_DISPLAY_MULTIPLICATION_FACTOR) |
| { |
| if (! print0) |
| PUTCH('0'); |
| |
| PUTCH('.'); |
| print0 = TRUE; |
| } |
| } |
| *dest = '\0'; |
| } |
| |
| |
| void sos_load_get_uload(sos_ui32_t * _load_1mn, |
| sos_ui32_t * _load_5mn, |
| sos_ui32_t * _load_15mn) |
| { |
| sos_ui32_t flags; |
| |
| if (load_1mn.sigma_nb_ticks < 1) |
| return; |
| |
| sos_disable_IRQs(flags); |
| *_load_1mn = ( load_1mn.sigma_nb_user_ready |
| + load_1mn.sigma_nb_user_running) |
| * SOS_LOAD_DISPLAY_MULTIPLICATION_FACTOR |
| / load_1mn.sigma_nb_ticks; |
| *_load_5mn = ( load_5mn.sigma_nb_user_ready |
| + load_5mn.sigma_nb_user_running) |
| * SOS_LOAD_DISPLAY_MULTIPLICATION_FACTOR |
| / load_5mn.sigma_nb_ticks; |
| *_load_15mn = ( load_15mn.sigma_nb_user_ready |
| + load_15mn.sigma_nb_user_running) |
| * SOS_LOAD_DISPLAY_MULTIPLICATION_FACTOR |
| / load_15mn.sigma_nb_ticks; |
| sos_restore_IRQs(flags); |
| } |
| |
| |
| void sos_load_get_sload(sos_ui32_t * _load_1mn, |
| sos_ui32_t * _load_5mn, |
| sos_ui32_t * _load_15mn) |
| { |
| sos_ui32_t flags; |
| |
| if (load_1mn.sigma_nb_ticks < 1) |
| return; |
| |
| /* The "IDLE" thread is always either ready or running by definition */ |
| SOS_ASSERT_FATAL(load_1mn.sigma_nb_kernel_ready |
| + load_1mn.sigma_nb_kernel_running |
| >= load_1mn.sigma_nb_ticks); |
| |
| /* Remove the IDLE thread from the load calculation */ |
| sos_disable_IRQs(flags); |
| *_load_1mn = ( load_1mn.sigma_nb_kernel_ready |
| + load_1mn.sigma_nb_kernel_running |
| - load_1mn.sigma_nb_ticks) |
| * SOS_LOAD_DISPLAY_MULTIPLICATION_FACTOR |
| / load_1mn.sigma_nb_ticks; |
| *_load_5mn = ( load_5mn.sigma_nb_kernel_ready |
| + load_5mn.sigma_nb_kernel_running |
| - load_5mn.sigma_nb_ticks) |
| * SOS_LOAD_DISPLAY_MULTIPLICATION_FACTOR |
| / load_5mn.sigma_nb_ticks; |
| *_load_15mn = ( load_15mn.sigma_nb_kernel_ready |
| + load_15mn.sigma_nb_kernel_running |
| - load_15mn.sigma_nb_ticks) |
| * SOS_LOAD_DISPLAY_MULTIPLICATION_FACTOR |
| / load_15mn.sigma_nb_ticks; |
| sos_restore_IRQs(flags); |
| } |
| |
| |
| void sos_load_get_uratio(sos_ui32_t * _load_1mn, |
| sos_ui32_t * _load_5mn, |
| sos_ui32_t * _load_15mn) |
| { |
| sos_ui32_t flags; |
| |
| if (load_1mn.sigma_nb_ticks < 1) |
| return; |
| |
| sos_disable_IRQs(flags); |
| *_load_1mn = load_1mn.sigma_nb_user_running |
| * 100 * SOS_LOAD_DISPLAY_MULTIPLICATION_FACTOR |
| / load_1mn.sigma_nb_ticks; |
| *_load_5mn = load_5mn.sigma_nb_user_running |
| * 100 * SOS_LOAD_DISPLAY_MULTIPLICATION_FACTOR |
| / load_5mn.sigma_nb_ticks; |
| *_load_15mn = load_15mn.sigma_nb_user_running |
| * 100 * SOS_LOAD_DISPLAY_MULTIPLICATION_FACTOR |
| / load_15mn.sigma_nb_ticks; |
| sos_restore_IRQs(flags); |
| } |
| |
| |
| void sos_load_get_sratio(sos_ui32_t * _load_1mn, |
| sos_ui32_t * _load_5mn, |
| sos_ui32_t * _load_15mn) |
| { |
| sos_ui32_t flags; |
| |
| if (load_1mn.sigma_nb_ticks < 1) |
| return; |
| |
| /* Don't remove the CPU occupation ration of the IDLE thread |
| here... */ |
| sos_disable_IRQs(flags); |
| *_load_1mn = load_1mn.sigma_nb_kernel_running |
| * 100 * SOS_LOAD_DISPLAY_MULTIPLICATION_FACTOR |
| / load_1mn.sigma_nb_ticks; |
| *_load_5mn = load_5mn.sigma_nb_kernel_running |
| * 100 * SOS_LOAD_DISPLAY_MULTIPLICATION_FACTOR |
| / load_5mn.sigma_nb_ticks; |
| *_load_15mn = load_15mn.sigma_nb_kernel_running |
| * 100 * SOS_LOAD_DISPLAY_MULTIPLICATION_FACTOR |
| / load_15mn.sigma_nb_ticks; |
| sos_restore_IRQs(flags); |
| } |
| |
/tmp/sos-code-article6.75/sos/kthread.c (2005-01-04 04:13:53.000000000 +0100
) |
|
../sos-code-article7/sos/kthread.c (1970-01-01 01:00:00.000000000 +0100
) |
|
|
|
/* Copyright (C) 2004 David Decotigny | |
| |
This program is free software; you can redistribute it and/or | |
modify it under the terms of the GNU General Public License | |
as published by the Free Software Foundation; either version 2 | |
of the License, or (at your option) any later version. | |
| |
This program is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
GNU General Public License for more details. | |
| |
You should have received a copy of the GNU General Public License | |
along with this program; if not, write to the Free Software | |
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, | |
USA. | |
*/ | |
| |
#include <sos/physmem.h> | |
#include <sos/kmem_slab.h> | |
#include <sos/kmalloc.h> | |
#include <sos/klibc.h> | |
#include <sos/list.h> | |
#include <sos/assert.h> | |
| |
#include <hwcore/irq.h> | |
| |
#include "kthread.h" | |
| |
| |
/** | |
* The size of the stack of a kernel thread | |
*/ | |
#define SOS_KTHREAD_STACK_SIZE (1*SOS_PAGE_SIZE) | |
| |
| |
/** | |
* The identifier of the thread currently running on CPU. | |
* | |
* We only support a SINGLE processor, ie a SINGLE kernel thread | |
* running at any time in the system. This greatly simplifies the | |
* implementation of the system, since we don't have to complicate | |
* things in order to retrieve the identifier of the threads running | |
* on the CPU. On multiprocessor systems the current_kthread below is | |
* an array indexed by the id of the CPU, so that the challenge is to | |
* retrieve the identifier of the CPU. This is usually done based on | |
* the stack address (Linux implementation) or on some form of TLS | |
* ("Thread Local Storage": can be implemented by way of LDTs for the | |
* processes, accessed through the fs or gs registers). | |
*/ | |
static volatile struct sos_kthread *current_kthread = NULL; | |
| |
| |
/* | |
* The list of kernel threads currently in the system. | |
* | |
* @note We could have used current_kthread for that... | |
*/ | |
static struct sos_kthread *kthread_list = NULL; | |
| |
| |
/** | |
* The Cache of kthread structures | |
*/ | |
static struct sos_kslab_cache *cache_kthread; | |
| |
| |
struct sos_kthread *sos_kthread_get_current() | |
{ | |
SOS_ASSERT_FATAL(current_kthread->state == SOS_KTHR_RUNNING); | |
return (struct sos_kthread*)current_kthread; | |
} | |
| |
| |
inline static sos_ret_t _set_current(struct sos_kthread *thr) | |
{ | |
SOS_ASSERT_FATAL(thr->state == SOS_KTHR_READY); | |
current_kthread = thr; | |
current_kthread->state = SOS_KTHR_RUNNING; | |
return SOS_OK; | |
} | |
| |
| |
sos_ret_t sos_kthread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr, | |
sos_size_t init_thread_stack_size) | |
{ | |
struct sos_kthread *myself; | |
| |
/* Allocate the cache of kthreads */ | |
cache_kthread = sos_kmem_cache_create("kthread", | |
sizeof(struct sos_kthread), | |
2, | |
0, | |
SOS_KSLAB_CREATE_MAP | |
| SOS_KSLAB_CREATE_ZERO); | |
if (! cache_kthread) | |
return -SOS_ENOMEM; | |
| |
/* Allocate a new kthread structure for the current running thread */ | |
myself = (struct sos_kthread*) sos_kmem_cache_alloc(cache_kthread, | |
SOS_KSLAB_ALLOC_ATOMIC); | |
if (! myself) | |
return -SOS_ENOMEM; | |
| |
/* Initialize the thread attributes */ | |
strzcpy(myself->name, "[kinit]", SOS_KTHR_MAX_NAMELEN); | |
myself->state = SOS_KTHR_CREATED; | |
myself->priority = SOS_SCHED_PRIO_LOWEST; | |
myself->stack_base_addr = init_thread_stack_base_addr; | |
myself->stack_size = init_thread_stack_size; | |
| |
/* Do some stack poisoning on the bottom of the stack, if needed */ | |
sos_cpu_kstate_prepare_detect_stack_overflow(myself->cpu_kstate, | |
myself->stack_base_addr, | |
myself->stack_size); | |
| |
/* Add the thread in the global list */ | |
list_singleton_named(kthread_list, myself, gbl_prev, gbl_next); | |
| |
/* Ok, now pretend that the running thread is ourselves */ | |
myself->state = SOS_KTHR_READY; | |
_set_current(myself); | |
| |
return SOS_OK; | |
} | |
| |
| |
struct sos_kthread *sos_kthread_create(const char *name, | |
sos_kthread_start_routine_t start_func, | |
void *start_arg, | |
sos_sched_priority_t priority) | |
{ | |
__label__ undo_creation; | |
struct sos_kthread *new_thread; | |
| |
if (! start_func) | |
return NULL; | |
if (! SOS_SCHED_PRIO_IS_VALID(priority)) | |
return NULL; | |
| |
/* Allocate a new kthread structure for the current running thread */ | |
new_thread | |
= (struct sos_kthread*) sos_kmem_cache_alloc(cache_kthread, | |
SOS_KSLAB_ALLOC_ATOMIC); | |
if (! new_thread) | |
return NULL; | |
| |
/* Initialize the thread attributes */ | |
strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_KTHR_MAX_NAMELEN); | |
new_thread->state = SOS_KTHR_CREATED; | |
new_thread->priority = priority; | |
| |
/* Allocate the stack for the new thread */ | |
new_thread->stack_base_addr = sos_kmalloc(SOS_KTHREAD_STACK_SIZE, 0); | |
new_thread->stack_size = SOS_KTHREAD_STACK_SIZE; | |
if (! new_thread->stack_base_addr) | |
goto undo_creation; | |
| |
/* Initialize the CPU context of the new thread */ | |
if (SOS_OK | |
!= sos_cpu_kstate_init(& new_thread->cpu_kstate, | |
(sos_cpu_kstate_function_arg1_t*) start_func, | |
(sos_ui32_t) start_arg, | |
new_thread->stack_base_addr, | |
new_thread->stack_size, | |
(sos_cpu_kstate_function_arg1_t*) sos_kthread_exit, | |
(sos_ui32_t) NULL)) | |
goto undo_creation; | |
| |
/* Add the thread in the global list */ | |
list_add_tail_named(kthread_list, new_thread, gbl_prev, gbl_next); | |
| |
/* Mark the thread ready */ | |
if (SOS_OK != sos_sched_set_ready(new_thread)) | |
goto undo_creation; | |
| |
/* Normal non-erroneous end of function */ | |
return new_thread; | |
| |
undo_creation: | |
sos_kmem_cache_free((sos_vaddr_t) new_thread); | |
return NULL; | |
} | |
| |
| |
/** Function called after thr has terminated. Called from inside the context | |
of another thread, interrupts disabled */ | |
static void delete_thread(struct sos_kthread *thr) | |
{ | |
list_delete_named(kthread_list, thr, gbl_prev, gbl_next); | |
| |
sos_cpu_kstate_detect_stack_overflow(thr->cpu_kstate, | |
thr->stack_base_addr, | |
thr->stack_size); | |
| |
sos_kfree((sos_vaddr_t) thr->stack_base_addr); | |
memset(thr, 0x0, sizeof(struct sos_kthread)); | |
sos_kmem_cache_free((sos_vaddr_t) thr); | |
} | |
| |
| |
void sos_kthread_exit() | |
{ | |
sos_ui32_t flags; | |
struct sos_kthread *myself, *next_thread; | |
| |
myself = sos_kthread_get_current(); | |
| |
/* Refuse to end the current executing thread if it still holds a | |
resource ! */ | |
SOS_ASSERT_FATAL(list_is_empty_named(myself->kwaitq_list, | |
prev_entry_for_kthread, | |
next_entry_for_kthread)); | |
| |
/* Prepare to run the next thread */ | |
sos_disable_IRQs(flags); | |
myself->state = SOS_KTHR_ZOMBIE; | |
next_thread = sos_reschedule(myself, FALSE); | |
_set_current(next_thread); | |
| |
/* No need for sos_restore_IRQs() here because the IRQ flag will be | |
restored to that of the next thread upon context switch */ | |
| |
/* Immediate switch to next thread */ | |
sos_cpu_kstate_exit_to(next_thread->cpu_kstate, | |
(sos_cpu_kstate_function_arg1_t*) delete_thread, | |
(sos_ui32_t) myself); | |
} | |
| |
| |
sos_sched_priority_t sos_kthread_get_priority(struct sos_kthread *thr) | |
{ | |
if (! thr) | |
thr = (struct sos_kthread*)current_kthread; | |
| |
return thr->priority; | |
} | |
| |
| |
sos_kthread_state_t sos_kthread_get_state(struct sos_kthread *thr) | |
{ | |
if (! thr) | |
thr = (struct sos_kthread*)current_kthread; | |
| |
return thr->state; | |
} | |
| |
| |
typedef enum { YIELD_MYSELF, BLOCK_MYSELF } switch_type_t; | |
/** | |
* Helper function to initiate a context switch in case the current | |
* thread becomes blocked, waiting for a timeout, or calls yield. | |
*/ | |
static sos_ret_t _switch_to_next_thread(switch_type_t operation) | |
{ | |
struct sos_kthread *myself, *next_thread; | |
| |
SOS_ASSERT_FATAL(current_kthread->state == SOS_KTHR_RUNNING); | |
| |
/* Interrupt handlers are NOT allowed to block ! */ | |
SOS_ASSERT_FATAL(! sos_servicing_irq()); | |
| |
myself = (struct sos_kthread*)current_kthread; | |
| |
/* Make sure that if we are to be marked "BLOCKED", we have any | |
reason of effectively being blocked */ | |
if (BLOCK_MYSELF == operation) | |
{ | |
myself->state = SOS_KTHR_BLOCKED; | |
} | |
| |
/* Identify the next thread */ | |
next_thread = sos_reschedule(myself, YIELD_MYSELF == operation); | |
| |
/* Avoid context switch if the context does not change */ | |
if (myself != next_thread) | |
{ | |
/* Sanity checks for the next thread */ | |
sos_cpu_kstate_detect_stack_overflow(next_thread->cpu_kstate, | |
next_thread->stack_base_addr, | |
next_thread->stack_size); | |
| |
/* Actual context switch */ | |
_set_current(next_thread); | |
sos_cpu_kstate_switch(& myself->cpu_kstate, next_thread->cpu_kstate); | |
| |
/* Back here ! */ | |
SOS_ASSERT_FATAL(current_kthread == myself); | |
SOS_ASSERT_FATAL(current_kthread->state == SOS_KTHR_RUNNING); | |
} | |
else | |
{ | |
/* No context switch but still update ID of current thread */ | |
_set_current(next_thread); | |
} | |
| |
return SOS_OK; | |
} | |
| |
| |
/** | |
* Helper function to change the thread's priority in all the | |
* waitqueues associated with the thread. | |
*/ | |
static sos_ret_t _change_waitq_priorities(struct sos_kthread *thr, | |
sos_sched_priority_t priority) | |
{ | |
struct sos_kwaitq_entry *kwq_entry; | |
int nb_waitqs; | |
| |
list_foreach_forward_named(thr->kwaitq_list, kwq_entry, nb_waitqs, | |
prev_entry_for_kthread, next_entry_for_kthread) | |
{ | |
SOS_ASSERT_FATAL(SOS_OK == sos_kwaitq_change_priority(kwq_entry->kwaitq, | |
kwq_entry, | |
priority)); | |
} | |
| |
return SOS_OK; | |
} | |
| |
| |
sos_ret_t sos_kthread_set_priority(struct sos_kthread *thr, | |
sos_sched_priority_t priority) | |
{ | |
__label__ exit_set_prio; | |
sos_ui32_t flags; | |
sos_ret_t retval; | |
| |
| |
if (! SOS_SCHED_PRIO_IS_VALID(priority)) | |
return -SOS_EINVAL; | |
| |
if (! thr) | |
thr = (struct sos_kthread*)current_kthread; | |
| |
sos_disable_IRQs(flags); | |
| |
/* Signal kwaitq subsystem that the priority of the thread in all | |
the waitq it is waiting in should be updated */ | |
retval = _change_waitq_priorities(thr, priority); | |
if (SOS_OK != retval) | |
goto exit_set_prio; | |
| |
/* Signal scheduler that the thread, currently in a waiting list, | |
should take into account the change of priority */ | |
if (SOS_KTHR_READY == thr->state) | |
retval = sos_sched_change_priority(thr, priority); | |
| |
/* Update priority */ | |
thr->priority = priority; | |
| |
exit_set_prio: | |
sos_restore_IRQs(flags); | |
return retval; | |
} | |
| |
| |
sos_ret_t sos_kthread_yield() | |
{ | |
sos_ui32_t flags; | |
sos_ret_t retval; | |
| |
sos_disable_IRQs(flags); | |
| |
retval = _switch_to_next_thread(YIELD_MYSELF); | |
| |
sos_restore_IRQs(flags); | |
return retval; | |
} | |
| |
| |
/** | |
* Internal sleep timeout management | |
*/ | |
struct sleep_timeout_params | |
{ | |
struct sos_kthread *thread_to_wakeup; | |
sos_bool_t timeout_triggered; | |
}; | |
| |
| |
/** | |
* Callback called when a timeout happened | |
*/ | |
static void sleep_timeout(struct sos_timeout_action *act) | |
{ | |
struct sleep_timeout_params *sleep_timeout_params | |
= (struct sleep_timeout_params*) act->routine_data; | |
| |
/* Signal that we have been woken up by the timeout */ | |
sleep_timeout_params->timeout_triggered = TRUE; | |
| |
/* Mark the thread ready */ | |
SOS_ASSERT_FATAL(SOS_OK == | |
sos_kthread_force_unblock(sleep_timeout_params | |
->thread_to_wakeup)); | |
} | |
| |
| |
sos_ret_t sos_kthread_sleep(struct sos_time *timeout) | |
{ | |
sos_ui32_t flags; | |
struct sleep_timeout_params sleep_timeout_params; | |
struct sos_timeout_action timeout_action; | |
sos_ret_t retval; | |
| |
/* Block forever if no timeout is given */ | |
if (NULL == timeout) | |
{ | |
sos_disable_IRQs(flags); | |
retval = _switch_to_next_thread(BLOCK_MYSELF); | |
sos_restore_IRQs(flags); | |
| |
return retval; | |
} | |
| |
/* Initialize the timeout action */ | |
sos_time_init_action(& timeout_action); | |
| |
/* Prepare parameters used by the sleep timeout callback */ | |
sleep_timeout_params.thread_to_wakeup | |
= (struct sos_kthread*)current_kthread; | |
sleep_timeout_params.timeout_triggered = FALSE; | |
| |
sos_disable_IRQs(flags); | |
| |
/* Now program the timeout ! */ | |
SOS_ASSERT_FATAL(SOS_OK == | |
sos_time_register_action_relative(& timeout_action, | |
timeout, | |
sleep_timeout, | |
& sleep_timeout_params)); | |
| |
/* Prepare to block: wait for sleep_timeout() to wakeup us in the | |
timeout kwaitq, or for someone to wake us up in any other | |
waitq */ | |
retval = _switch_to_next_thread(BLOCK_MYSELF); | |
/* Unblocked by something ! */ | |
| |
/* Unblocked by timeout ? */ | |
if (sleep_timeout_params.timeout_triggered) | |
{ | |
/* Yes */ | |
SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout)); | |
retval = SOS_OK; | |
} | |
else | |
{ | |
/* No: We have probably been woken up while in some other | |
kwaitq */ | |
SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action)); | |
retval = -SOS_EINTR; | |
} | |
| |
sos_restore_IRQs(flags); | |
| |
/* Update the remaining timeout */ | |
memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time)); | |
| |
return retval; | |
} | |
| |
| |
sos_ret_t sos_kthread_force_unblock(struct sos_kthread *kthread) | |
{ | |
sos_ret_t retval; | |
sos_ui32_t flags; | |
| |
if (! kthread) | |
return -SOS_EINVAL; | |
| |
sos_disable_IRQs(flags); | |
| |
/* Thread already woken up ? */ | |
retval = SOS_OK; | |
switch(sos_kthread_get_state(kthread)) | |
{ | |
case SOS_KTHR_RUNNING: | |
case SOS_KTHR_READY: | |
/* Do nothing */ | |
break; | |
| |
case SOS_KTHR_ZOMBIE: | |
retval = -SOS_EFATAL; | |
break; | |
| |
default: | |
retval = sos_sched_set_ready(kthread); | |
break; | |
} | |
| |
sos_restore_IRQs(flags); | |
| |
return retval; | |
} | |
/tmp/sos-code-article6.75/sos/kthread.h (2005-01-04 04:13:53.000000000 +0100
) |
|
../sos-code-article7/sos/kthread.h (1970-01-01 01:00:00.000000000 +0100
) |
|
|
|
/* Copyright (C) 2004 David Decotigny | |
| |
This program is free software; you can redistribute it and/or | |
modify it under the terms of the GNU General Public License | |
as published by the Free Software Foundation; either version 2 | |
of the License, or (at your option) any later version. | |
| |
This program is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
GNU General Public License for more details. | |
| |
You should have received a copy of the GNU General Public License | |
along with this program; if not, write to the Free Software | |
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, | |
USA. | |
*/ | |
#ifndef _SOS_KTHREAD_H_ | |
#define _SOS_KTHREAD_H_ | |
| |
#include <sos/errno.h> | |
| |
/** | |
* @file kthread.h | |
* | |
* SOS Kernel thread management API | |
*/ | |
| |
| |
/* Forward declaration */ | |
struct sos_kthread; | |
| |
#include <hwcore/cpu_context.h> | |
#include <sos/sched.h> | |
#include <sos/kwaitq.h> | |
#include <sos/time.h> | |
| |
| |
/** | |
* The possible states of a valid kernel thread | |
*/ | |
typedef enum { SOS_KTHR_CREATED, /**< Thread created, not fully initialized */ | |
SOS_KTHR_READY, /**< Thread fully initialized or waiting | |
for CPU after having been blocked */ | |
SOS_KTHR_RUNNING, /**< Thread currently running on CPU */ | |
SOS_KTHR_BLOCKED, /**< Thread waiting for I/O (+ in at LEAST | |
one kwaitq) and/or sleeping (+ in NO | |
kwaitq) */ | |
SOS_KTHR_ZOMBIE, /**< Thread terminated execution, waiting to | |
be deleted by kernel */ | |
} sos_kthread_state_t; | |
| |
| |
/** | |
* TCB (Thread Control Block): structure describing a Kernel | |
* thread. Don't access these fields directly: prefer using the | |
* accessor functions below. | |
*/ | |
struct sos_kthread | |
{ | |
#define SOS_KTHR_MAX_NAMELEN 32 | |
char name[SOS_KTHR_MAX_NAMELEN]; | |
| |
sos_kthread_state_t state; | |
sos_sched_priority_t priority; | |
| |
/* The hardware context of the thread */ | |
struct sos_cpu_kstate *cpu_kstate; | |
sos_vaddr_t stack_base_addr; | |
sos_size_t stack_size; | |
| |
/* Data specific to each state */ | |
union | |
{ | |
struct | |
{ | |
struct sos_sched_queue *rdy_queue; | |
struct sos_kthread *rdy_prev, *rdy_next; | |
} ready; | |
}; /* Anonymous union (gcc extenion) */ | |
| |
| |
/* | |
* Data used by the kwaitq subsystem: list of kwaitqueues the thread | |
* is waiting for. | |
* | |
* @note: a RUNNING or READY thread might be in one or more | |
* waitqueues ! The only property we have is that, among these | |
* waitqueues (if any), _at least_ one has woken the thread. | |
*/ | |
struct sos_kwaitq_entry *kwaitq_list; | |
| |
| |
/** | |
* Chaining pointers for global ("gbl") list of threads (debug) | |
*/ | |
struct sos_kthread *gbl_prev, *gbl_next; | |
}; | |
| |
| |
/** | |
* Definition of the function executed by a kernel thread | |
*/ | |
typedef void (*sos_kthread_start_routine_t)(void *arg); | |
| |
| |
/** | |
* Initialize the subsystem responsible for kernel thread management | |
* | |
* Initialize primary kernel thread so that it can be handled the same | |
* way as an ordinary thread created by sos_kthread_create(). | |
*/ | |
sos_ret_t sos_kthread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr, | |
sos_size_t init_thread_stack_size); | |
| |
| |
/** | |
* Create a new kernel thread | |
*/ | |
struct sos_kthread *sos_kthread_create(const char *name, | |
sos_kthread_start_routine_t start_func, | |
void *start_arg, | |
sos_sched_priority_t priority); | |
| |
| |
/** | |
* Terminate the execution of the current thread. Called by default | |
* when the start routine returns. | |
*/ | |
void sos_kthread_exit() __attribute__((noreturn)); | |
| |
| |
/** | |
* Get the identifier of the thread currently running on CPU. Trivial | |
* function. | |
*/ | |
struct sos_kthread *sos_kthread_get_current(); | |
| |
| |
/** | |
* If thr == NULL, set the priority of the current thread. Trivial | |
* function. | |
* | |
* @note NOT protected against interrupts | |
*/ | |
sos_sched_priority_t sos_kthread_get_priority(struct sos_kthread *thr); | |
| |
| |
/** | |
* If thr == NULL, get the state of the current thread. Trivial | |
* function. | |
* | |
* @note NOT protected against interrupts | |
*/ | |
sos_kthread_state_t sos_kthread_get_state(struct sos_kthread *thr); | |
| |
| |
/** | |
* If thr == NULL, set the priority of the current thread | |
* | |
* @note NO context-switch ever occurs in this function ! | |
*/ | |
sos_ret_t sos_kthread_set_priority(struct sos_kthread *thr, | |
sos_sched_priority_t priority); | |
| |
| |
/** | |
* Yield CPU to another ready thread. | |
* | |
* @note This is a BLOCKING FUNCTION | |
*/ | |
sos_ret_t sos_kthread_yield(); | |
| |
| |
/** | |
* Release the CPU for (at least) the given delay. | |
* | |
* @param delay The delay to wait for. If delay == NULL then wait | |
* forever that any event occurs. | |
* | |
* @return SOS_OK when delay expired (and delay is reset to zero), | |
* -SOS_EINTR otherwise (and delay contains the amount of time | |
* remaining). | |
* | |
* @note This is a BLOCKING FUNCTION | |
*/ | |
sos_ret_t sos_kthread_sleep(/* in/out */struct sos_time *delay); | |
| |
| |
/** | |
* Mark the given thread as READY (if not already ready) even if it is | |
* blocked in a kwaitq or in a sleep ! As a result, the interrupted | |
* kwaitq/sleep function call of the thread will return with | |
* -SOS_EINTR. | |
* | |
* @return -SOS_EINVAL if thread does not exist, or -SOS_EFATAL if | |
* marked ZOMBIE. | |
* | |
* @note As a result, the semaphore/mutex/conditions/... functions | |
* return values SHOULD ALWAYS be checked ! If they are != SOS_OK, | |
* then the caller should consider that the resource is not aquired | |
* because somebody woke the thread by some way. | |
*/ | |
sos_ret_t sos_kthread_force_unblock(struct sos_kthread *kthread); | |
| |
| |
#endif /* _SOS_KTHREAD_H_ */ | |
/tmp/sos-code-article6.75/sos/kwaitq.c (2005-01-04 04:13:53.000000000 +0100
) |
|
../sos-code-article7/sos/kwaitq.c (2005-02-05 17:52:19.000000000 +0100
) |
|
|
|
sos_ret_t sos_kwaitq_init_entry(struct sos_kwaitq_entry *kwq_entry) | sos_ret_t sos_kwaitq_init_entry(struct sos_kwaitq_entry *kwq_entry) |
{ | { |
memset(kwq_entry, 0x0, sizeof(struct sos_kwaitq_entry)); | memset(kwq_entry, 0x0, sizeof(struct sos_kwaitq_entry)); |
kwq_entry->kthread = sos_kthread_get_current(); | kwq_entry->thread = sos_thread_get_current(); |
} | } |
| |
|
|
SOS_ASSERT_FATAL(NULL == kwq_entry->kwaitq); | SOS_ASSERT_FATAL(NULL == kwq_entry->kwaitq); |
| |
/* sos_kwaitq_init_entry() has not been called ?! */ | /* sos_kwaitq_init_entry() has not been called ?! */ |
SOS_ASSERT_FATAL(NULL != kwq_entry->kthread); | SOS_ASSERT_FATAL(NULL != kwq_entry->thread); |
/* (Re-)Initialize wakeup status of the entry */ | /* (Re-)Initialize wakeup status of the entry */ |
kwq_entry->wakeup_triggered = FALSE; | kwq_entry->wakeup_triggered = FALSE; |
|
|
/* Does the thread we want to insert have higher priority than | /* Does the thread we want to insert have higher priority than |
the given thread in the queue ? */ | the given thread in the queue ? */ |
if (SOS_SCHED_PRIO_CMP(prio, | if (SOS_SCHED_PRIO_CMP(prio, |
sos_kthread_get_priority(entry->kthread)) | sos_thread_get_priority(entry->thread)) |
{ | { |
/* Yes: we insert before this given thread */ | /* Yes: we insert before this given thread */ |
|
|
} | } |
| |
/* Update the list of waitqueues for the thread */ | /* Update the list of waitqueues for the thread */ |
list_add_tail_named(kwq_entry->kthread->kwaitq_list, kwq_entry, | list_add_tail_named(kwq_entry->thread->kwaitq_list, kwq_entry, |
prev_entry_for_kthread, next_entry_for_kthread); | prev_entry_for_thread, next_entry_for_thread); |
kwq_entry->kwaitq = kwq; | kwq_entry->kwaitq = kwq; |
| |
|
|
| |
sos_disable_IRQs(flags); | sos_disable_IRQs(flags); |
retval = _kwaitq_add_entry(kwq, kwq_entry, | retval = _kwaitq_add_entry(kwq, kwq_entry, |
sos_kthread_get_priority(kwq_entry->kthread)); | sos_thread_get_priority(kwq_entry->thread)); |
| |
return retval; | return retval; |
|
|
list_delete_named(kwq->waiting_list, kwq_entry, | list_delete_named(kwq->waiting_list, kwq_entry, |
prev_entry_in_kwaitq, next_entry_in_kwaitq); | prev_entry_in_kwaitq, next_entry_in_kwaitq); |
| |
list_delete_named(kwq_entry->kthread->kwaitq_list, kwq_entry, | list_delete_named(kwq_entry->thread->kwaitq_list, kwq_entry, |
prev_entry_for_kthread, next_entry_for_kthread); | prev_entry_for_thread, next_entry_for_thread); |
kwq_entry->kwaitq = NULL; | kwq_entry->kwaitq = NULL; |
return SOS_OK; | return SOS_OK; |
|
|
sos_disable_IRQs(flags); | sos_disable_IRQs(flags); |
| |
retval = _kwaitq_add_entry(kwq, & kwq_entry, | retval = _kwaitq_add_entry(kwq, & kwq_entry, |
sos_kthread_get_priority(kwq_entry.kthread)); | sos_thread_get_priority(kwq_entry.thread)); |
/* Wait for wakeup or timeout */ | /* Wait for wakeup or timeout */ |
sos_kthread_sleep(timeout); | sos_thread_sleep(timeout); |
| |
/* Sleep delay elapsed ? */ | /* Sleep delay elapsed ? */ |
|
|
| |
| |
sos_ret_t sos_kwaitq_wakeup(struct sos_kwaitq *kwq, | sos_ret_t sos_kwaitq_wakeup(struct sos_kwaitq *kwq, |
unsigned int nb_kthreads, | unsigned int nb_threads, |
{ | { |
sos_ui32_t flags; | sos_ui32_t flags; |
|
|
sos_disable_IRQs(flags); | sos_disable_IRQs(flags); |
| |
/* Wake up as much threads waiting in waitqueue as possible (up to | /* Wake up as much threads waiting in waitqueue as possible (up to |
nb_kthreads), scanning the list in FIFO/decreasing priority order | nb_threads), scanning the list in FIFO/decreasing priority order |
while (! list_is_empty_named(kwq->waiting_list, | while (! list_is_empty_named(kwq->waiting_list, |
prev_entry_in_kwaitq, next_entry_in_kwaitq)) | prev_entry_in_kwaitq, next_entry_in_kwaitq)) |
|
|
= list_get_head_named(kwq->waiting_list, | = list_get_head_named(kwq->waiting_list, |
prev_entry_in_kwaitq, next_entry_in_kwaitq); | prev_entry_in_kwaitq, next_entry_in_kwaitq); |
| |
/* Enough kthreads woken up ? */ | /* Enough threads woken up ? */ |
if (nb_kthreads <= 0) | if (nb_threads <= 0) |
| |
/* | /* |
|
|
*/ | */ |
| |
/* Thread already woken up ? */ | /* Thread already woken up ? */ |
if (SOS_KTHR_RUNNING == sos_kthread_get_state(kwq_entry->kthread)) | if (SOS_THR_RUNNING == sos_thread_get_state(kwq_entry->thread)) |
/* Yes => Do nothing because WE are that woken-up thread. In | /* Yes => Do nothing because WE are that woken-up thread. In |
particular: don't call set_ready() here because this | particular: don't call set_ready() here because this |
|
|
else | else |
{ | { |
/* No => wake it up now. */ | /* No => wake it up now. */ |
sos_sched_set_ready(kwq_entry->kthread); | sos_sched_set_ready(kwq_entry->thread); |
| |
/* Remove this waitq entry */ | /* Remove this waitq entry */ |
|
|
kwq_entry->wakeup_status = wakeup_status; | kwq_entry->wakeup_status = wakeup_status; |
| |
/* Next iteration... */ | /* Next iteration... */ |
nb_kthreads --; | nb_threads --; |
| |
sos_restore_IRQs(flags); | sos_restore_IRQs(flags); |
|
|
} | } |
| |
| |
/* Internal function (callback for kthread subsystem) */ | /* Internal function (callback for thread subsystem) */ |
struct sos_kwaitq_entry *kwq_entry, | struct sos_kwaitq_entry *kwq_entry, |
sos_sched_priority_t priority) | sos_sched_priority_t priority) |
| |
/tmp/sos-code-article6.75/sos/main.c (2005-01-04 04:13:53.000000000 +0100
) |
|
../sos-code-article7/sos/main.c (2005-02-05 17:52:19.000000000 +0100
) |
|
|
|
/* Copyright (C) 2004 The SOS Team | /* Copyright (C) 2004 The SOS Team |
Copyright (C) 1999 Free Software Foundation, Inc. | |
This program is free software; you can redistribute it and/or | This program is free software; you can redistribute it and/or |
modify it under the terms of the GNU General Public License | modify it under the terms of the GNU General Public License |
|
|
#include <sos/list.h> | #include <sos/list.h> |
#include <sos/physmem.h> | #include <sos/physmem.h> |
#include <hwcore/paging.h> | #include <hwcore/paging.h> |
| #include <hwcore/mm_context.h> |
| #include <hwcore/swintr.h> |
#include <sos/kmem_vmm.h> | #include <sos/kmem_vmm.h> |
#include <sos/kmalloc.h> | #include <sos/kmalloc.h> |
#include <sos/time.h> | #include <sos/time.h> |
#include <sos/kthread.h> | #include <sos/thread.h> |
| #include <sos/process.h> |
#include <sos/assert.h> | #include <sos/assert.h> |
#include <drivers/x86_videomem.h> | #include <drivers/x86_videomem.h> |
#include <drivers/bochs.h> | #include <drivers/bochs.h> |
| #include <sos/calcload.h> |
| |
| |
/* Helper function to display each bits of a 32bits integer on the | /* Helper function to display each bits of a 32bits integer on the |
|
|
| |
| |
/* Clock IRQ handler */ | /* Clock IRQ handler */ |
static void clk_it(int intid, | static void clk_it(int intid) |
const struct sos_cpu_kstate *cpu_kstate) | |
static sos_ui32_t clock_count = 0; | static sos_ui32_t clock_count = 0; |
| |
|
|
| |
/* Execute the expired timeout actions (if any) */ | /* Execute the expired timeout actions (if any) */ |
sos_time_do_tick(); | sos_time_do_tick(); |
| |
| /* Update scheduler statistics and status */ |
| sos_sched_do_timer_tick(); |
} | } |
| |
| |
|
|
*/ | */ |
| |
/* Helper function to dump a backtrace on bochs and/or the console */ | /* Helper function to dump a backtrace on bochs and/or the console */ |
static void dump_backtrace(const struct sos_cpu_kstate *cpu_kstate, | static void dump_backtrace(const struct sos_cpu_state *cpu_state, |
sos_size_t stack_size, | sos_size_t stack_size, |
sos_bool_t on_console, | sos_bool_t on_console, |
|
|
| |
} | } |
| |
sos_backtrace(cpu_kstate, 15, stack_bottom, stack_size, backtracer, NULL); | sos_backtrace(cpu_state, 15, stack_bottom, stack_size, backtracer, NULL); |
| |
| |
/* Page fault exception handler with demand paging for the kernel */ | /* Page fault exception handler with demand paging for the kernel */ |
static void pgflt_ex(int intid, const struct sos_cpu_kstate *ctxt) | static void pgflt_ex(int intid, const struct sos_cpu_state *ctxt) |
static sos_ui32_t demand_paging_count = 0; | static sos_ui32_t demand_paging_count = 0; |
sos_vaddr_t faulting_vaddr = sos_cpu_kstate_get_EX_faulting_vaddr(ctxt); | sos_vaddr_t faulting_vaddr = sos_cpu_context_get_EX_faulting_vaddr(ctxt); |
| |
| if (sos_cpu_context_is_in_user_mode(ctxt)) |
| { |
| /* User-mode page faults are considered unresolved for the |
| moment */ |
| sos_bochs_printf("Unresolved USER page Fault at instruction 0x%x on access to address 0x%x (info=%x)!\n", |
| sos_cpu_context_get_PC(ctxt), |
| (unsigned)faulting_vaddr, |
| (unsigned)sos_cpu_context_get_EX_info(ctxt)); |
| sos_bochs_printf("Terminating User thread\n"); |
| sos_thread_exit(); |
| } |
| |
/* Check if address is covered by any VMM range */ | /* Check if address is covered by any VMM range */ |
if (! sos_kmem_vmm_is_valid_vaddr(faulting_vaddr)) | if (! sos_kmem_vmm_is_valid_vaddr(faulting_vaddr)) |
{ | { |
|
|
bootstrap_stack_bottom, | bootstrap_stack_bottom, |
bootstrap_stack_size, | bootstrap_stack_size, |
TRUE, TRUE); | TRUE, TRUE); |
sos_display_fatal_error("Unresolved page Fault on access to address 0x%x (info=%x)!", | sos_display_fatal_error("Unresolved page Fault at instruction 0x%x on access to address 0x%x (info=%x)!", |
| sos_cpu_context_get_PC(ctxt), |
(unsigned)sos_cpu_kstate_get_EX_info(ctxt)); | (unsigned)sos_cpu_context_get_EX_info(ctxt)); |
} | } |
| |
|
|
{ | { |
sos_bochs_printf("[37myield(%c)[m\n", thr_arg->character); | sos_bochs_printf("[37myield(%c)[m\n", thr_arg->character); |
sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'Y'); | sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'Y'); |
SOS_ASSERT_FATAL(SOS_OK == sos_kthread_yield()); | SOS_ASSERT_FATAL(SOS_OK == sos_thread_yield()); |
} | } |
| |
|
|
struct sos_time t = (struct sos_time){ .sec=0, .nanosec=50000000 }; | struct sos_time t = (struct sos_time){ .sec=0, .nanosec=50000000 }; |
sos_bochs_printf("[37msleep1(%c)[m\n", thr_arg->character); | sos_bochs_printf("[37msleep1(%c)[m\n", thr_arg->character); |
sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 's'); | sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 's'); |
SOS_ASSERT_FATAL(SOS_OK == sos_kthread_sleep(& t)); | SOS_ASSERT_FATAL(SOS_OK == sos_thread_sleep(& t)); |
sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'R'); | sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'R'); |
} | } |
|
|
struct sos_time t = (struct sos_time){ .sec=0, .nanosec=300000000 }; | struct sos_time t = (struct sos_time){ .sec=0, .nanosec=300000000 }; |
sos_bochs_printf("[37msleep2(%c)[m\n", thr_arg->character); | sos_bochs_printf("[37msleep2(%c)[m\n", thr_arg->character); |
sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'S'); | sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'S'); |
SOS_ASSERT_FATAL(SOS_OK == sos_kthread_sleep(& t)); | SOS_ASSERT_FATAL(SOS_OK == sos_thread_sleep(& t)); |
sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'R'); | sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'R'); |
} | } |
|
|
} | } |
| |
| |
static void test_kthread() | static void test_thread() |
/* "static" variables because we want them to remain even when the | /* "static" variables because we want them to remain even when the |
function returns */ | function returns */ |
|
|
sos_disable_IRQs(flags); | sos_disable_IRQs(flags); |
| |
arg_b = (struct thr_arg) { .character='b', .col=0, .row=21, .color=0x14 }; | arg_b = (struct thr_arg) { .character='b', .col=0, .row=21, .color=0x14 }; |
sos_kthread_create("YO[b]", demo_thread, (void*)&arg_b, SOS_SCHED_PRIO_TS_LOWEST); | sos_create_kernel_thread("YO[b]", demo_thread, (void*)&arg_b, SOS_SCHED_PRIO_TS_LOWEST); |
arg_c = (struct thr_arg) { .character='c', .col=46, .row=21, .color=0x14 }; | arg_c = (struct thr_arg) { .character='c', .col=46, .row=21, .color=0x14 }; |
sos_kthread_create("YO[c]", demo_thread, (void*)&arg_c, SOS_SCHED_PRIO_TS_LOWEST); | sos_create_kernel_thread("YO[c]", demo_thread, (void*)&arg_c, SOS_SCHED_PRIO_TS_LOWEST); |
arg_d = (struct thr_arg) { .character='d', .col=0, .row=20, .color=0x14 }; | arg_d = (struct thr_arg) { .character='d', .col=0, .row=20, .color=0x14 }; |
sos_kthread_create("YO[d]", demo_thread, (void*)&arg_d, SOS_SCHED_PRIO_TS_LOWEST-1); | sos_create_kernel_thread("YO[d]", demo_thread, (void*)&arg_d, SOS_SCHED_PRIO_TS_LOWEST-1); |
arg_e = (struct thr_arg) { .character='e', .col=0, .row=19, .color=0x14 }; | arg_e = (struct thr_arg) { .character='e', .col=0, .row=19, .color=0x14 }; |
sos_kthread_create("YO[e]", demo_thread, (void*)&arg_e, SOS_SCHED_PRIO_TS_LOWEST-2); | sos_create_kernel_thread("YO[e]", demo_thread, (void*)&arg_e, SOS_SCHED_PRIO_TS_LOWEST-2); |
arg_R = (struct thr_arg) { .character='R', .col=0, .row=17, .color=0x1c }; | arg_R = (struct thr_arg) { .character='R', .col=0, .row=17, .color=0x1c }; |
sos_kthread_create("YO[R]", demo_thread, (void*)&arg_R, SOS_SCHED_PRIO_RT_LOWEST); | sos_create_kernel_thread("YO[R]", demo_thread, (void*)&arg_R, SOS_SCHED_PRIO_RT_LOWEST); |
arg_S = (struct thr_arg) { .character='S', .col=0, .row=16, .color=0x1c }; | arg_S = (struct thr_arg) { .character='S', .col=0, .row=16, .color=0x1c }; |
sos_kthread_create("YO[S]", demo_thread, (void*)&arg_S, SOS_SCHED_PRIO_RT_LOWEST-1); | sos_create_kernel_thread("YO[S]", demo_thread, (void*)&arg_S, SOS_SCHED_PRIO_RT_LOWEST-1); |
sos_restore_IRQs(flags); | sos_restore_IRQs(flags); |
} | } |
|
|
* An operating system MUST always have a ready thread ! Otherwise: | * An operating system MUST always have a ready thread ! Otherwise: |
* what would the CPU have to execute ?! | * what would the CPU have to execute ?! |
*/ | */ |
static void idle_kthread() | static void idle_thread() |
sos_ui32_t idle_twiddle = 0; | sos_ui32_t idle_twiddle = 0; |
| |
|
|
idle_twiddle); | idle_twiddle); |
| |
/* Lend the CPU to some other thread */ | /* Lend the CPU to some other thread */ |
sos_kthread_yield(); | sos_thread_yield(); |
| } |
| } |
| |
| |
| /* ====================================================================== |
| * Kernel thread showing some CPU usage statistics on the console every 1s |
| */ |
| static void stat_thread() |
| { |
| while (1) |
| { |
| sos_ui32_t flags; |
| sos_ui32_t load1, load5, load15; |
| char str1[11], str5[11], str15[11]; |
| struct sos_time t; |
| t.sec = 1; |
| t.nanosec = 0; |
| |
| sos_thread_sleep(& t); |
| |
| sos_disable_IRQs(flags); |
| |
| /* The IDLE task is EXcluded in the following computation */ |
| sos_load_get_sload(&load1, &load5, &load15); |
| sos_load_to_string(str1, load1); |
| sos_load_to_string(str5, load5); |
| sos_load_to_string(str15, load15); |
| sos_x86_videomem_printf(16, 34, |
| SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, |
| "Kernel (- Idle): %s %s %s ", |
| str1, str5, str15); |
| |
| sos_load_get_uload(&load1, &load5, &load15); |
| sos_load_to_string(str1, load1); |
| sos_load_to_string(str5, load5); |
| sos_load_to_string(str15, load15); |
| sos_x86_videomem_printf(17, 34, |
| SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, |
| "User: %s %s %s ", |
| str1, str5, str15); |
| |
| sos_load_get_uratio(&load1, &load5, &load15); |
| sos_load_to_string(str1, load1); |
| sos_load_to_string(str5, load5); |
| sos_load_to_string(str15, load15); |
| sos_x86_videomem_printf(18, 34, |
| SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, |
| "User CPU %%: %s %s %s ", |
| str1, str5, str15); |
| |
| /* The IDLE task is INcluded in the following computation */ |
| sos_load_get_sratio(&load1, &load5, &load15); |
| sos_load_to_string(str1, load1); |
| sos_load_to_string(str5, load5); |
| sos_load_to_string(str15, load15); |
| sos_x86_videomem_printf(19, 34, |
| SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, |
| "Kernel CPU %% (+ Idle): %s %s %s ", |
| str1, str5, str15); |
| sos_restore_IRQs(flags); |
} | } |
| |
|
|
sos_x86_videomem_printf(1, 0, | sos_x86_videomem_printf(1, 0, |
SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, | SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, |
"Welcome From GRUB to %s%c RAM is %dMB (upper mem = 0x%x kB)", | "Welcome From GRUB to %s%c RAM is %dMB (upper mem = 0x%x kB)", |
"SOS", ',', | "SOS article 7", ',', |
(unsigned)mbi->mem_upper); | (unsigned)mbi->mem_upper); |
else | else |
/* Not loaded with grub */ | /* Not loaded with grub */ |
sos_x86_videomem_printf(1, 0, | sos_x86_videomem_printf(1, 0, |
SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, | SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, |
"Welcome to SOS"); | "Welcome to SOS article 7"); |
sos_bochs_putstring("Message in a bochs\n"); | sos_bochs_putstring("Message in a bochs: This is SOS article 7.\n"); |
/* Setup CPU segmentation and IRQ subsystem */ | /* Setup CPU segmentation and IRQ subsystem */ |
sos_gdt_subsystem_setup(); | sos_gdt_subsystem_setup(); |
|
|
| |
/* | /* |
* Setup kernel virtual memory allocator | * Setup kernel virtual memory allocator |
*/ | */ |
if (sos_kmem_vmm_subsystem_setup(sos_kernel_core_base_paddr, | if (sos_kmem_vmm_subsystem_setup(sos_kernel_core_base_paddr, |
sos_kernel_core_top_paddr, | sos_kernel_core_top_paddr, |
|
|
sos_bochs_printf("Could not setup the Kmalloc subsystem\n"); | sos_bochs_printf("Could not setup the Kmalloc subsystem\n"); |
| |
/* | /* |
| * Initialize the MMU context subsystem |
| */ |
| sos_mm_context_subsystem_setup(); |
| |
| /* |
| * Initialize the CPU context subsystem |
| */ |
| sos_cpu_context_subsystem_setup(); |
| |
| /* |
| * Bind the syscall handler to its software interrupt handler |
| */ |
| sos_swintr_subsystem_setup(); |
| |
| |
| /* |
* Initialize the Kernel thread and scheduler subsystems | * Initialize the Kernel thread and scheduler subsystems |
*/ | */ |
| |
/* Initialize kernel thread subsystem */ | /* Initialize kernel thread subsystem */ |
sos_kthread_subsystem_setup(bootstrap_stack_bottom, | sos_thread_subsystem_setup(bootstrap_stack_bottom, |
bootstrap_stack_size); | bootstrap_stack_size); |
/* Initialize the scheduler */ | /* Initialize the scheduler */ |
sos_sched_subsystem_setup(); | sos_sched_subsystem_setup(); |
| |
/* Declare the IDLE thread */ | /* Declare the IDLE thread */ |
SOS_ASSERT_FATAL(sos_kthread_create("idle", idle_kthread, NULL, | SOS_ASSERT_FATAL(sos_create_kernel_thread("idle", idle_thread, NULL, |
SOS_SCHED_PRIO_TS_LOWEST) != NULL); | SOS_SCHED_PRIO_TS_LOWEST) != NULL); |
| /* Prepare the stats subsystem */ |
| sos_load_subsystem_setup(); |
| |
| /* Declare a thread that prints some stats */ |
| SOS_ASSERT_FATAL(sos_create_kernel_thread("stat_thread", stat_thread, |
| NULL, |
| SOS_SCHED_PRIO_TS_LOWEST) != NULL); |
| |
/* Enabling the HW interrupts here, this will make the timer HW | |
interrupt call the scheduler */ | |
asm volatile ("sti\n"); | |
/* | /* |
* Force the idle thread to run at least once to force a context | * Initialize process stuff |
* switch. This way the "cpu_kstate" of the kernel thread for the | |
* sos_main thread gets a chance to be filled with the current CPU | |
* context. Useful only if we call sos_kthread_exit() too early from | |
* sos_main: a "stack overflow" will be wrongly detected simply | |
* because the "cpu_kstate" of the thread has not be correctly | |
* initialised. A context switch is a good way to initialise it. | |
sos_kthread_yield(); | sos_process_subsystem_setup(); |
| |
| |
| /* Enabling the HW interrupts here, this will make the timer HW |
| interrupt call the scheduler */ |
| asm volatile ("sti\n"); |
| /* Run some tests involving USER processes and threads */ |
| extern void test_art7(); |
| test_art7(); |
| |
/* Now run some Kernel threads just for fun ! */ | /* Now run some Kernel threads just for fun ! */ |
extern void MouseSim(); | extern void MouseSim(); |
MouseSim(); | MouseSim(); |
test_kthread(); | test_thread(); |
/* | /* |
* We can safely exit from this function now, for there is already | * We can safely exit from this function now, for there is already |
* an idle Kernel thread ready to make the CPU busy working... | * an idle Kernel thread ready to make the CPU busy working... |
* | * |
* However, we must EXPLICITELY call sos_kthread_exit() because a | * However, we must EXPLICITELY call sos_thread_exit() because a |
* was initialized by the Grub bootstrap stage, at a time when the | * was initialized by the Grub bootstrap stage, at a time when the |
* word "thread" did not exist. This means that the stack was not | * word "thread" did not exist. This means that the stack was not |
* setup in order for a return here to call sos_kthread_exit() | * setup in order for a return here to call sos_thread_exit() |
* kernel thread where we must do this manually. | * kernel thread where we must do this manually. |
*/ | */ |
sos_bochs_printf("Bye from primary thread !\n"); | sos_bochs_printf("Bye from primary thread !\n"); |
sos_kthread_exit(); | sos_thread_exit(); |
} | } |
| |
/tmp/sos-code-article6.75/sos/sched.c (2005-01-04 04:13:53.000000000 +0100
) |
|
../sos-code-article7/sos/sched.c (2005-02-05 17:52:19.000000000 +0100
) |
|
|
|
USA. | USA. |
*/ | */ |
| |
| #include <sos/errno.h> |
#include <sos/klibc.h> | #include <sos/klibc.h> |
#include <sos/assert.h> | #include <sos/assert.h> |
#include <sos/list.h> | #include <sos/list.h> |
| #include <sos/calcload.h> |
| |
#include "sched.h" | #include "sched.h" |
| |
|
|
struct sos_sched_queue | struct sos_sched_queue |
{ | { |
unsigned int nr_threads; | unsigned int nr_threads; |
struct sos_kthread *kthread_list[SOS_SCHED_NUM_PRIO]; | struct sos_thread *thread_list[SOS_SCHED_NUM_PRIO]; |
| |
| |
|
|
static struct sos_sched_queue sched_queue[2]; | static struct sos_sched_queue sched_queue[2]; |
| |
| |
| /** |
| * The array giving the timeslice corresponding to each priority level |
| */ |
| struct sos_time time_slice[SOS_SCHED_NUM_PRIO]; |
| |
| |
sos_ret_t sos_sched_subsystem_setup() | sos_ret_t sos_sched_subsystem_setup() |
{ | { |
| sos_sched_priority_t prio; |
| |
memset(sched_queue, 0x0, sizeof(sched_queue)); | memset(sched_queue, 0x0, sizeof(sched_queue)); |
active_queue = & sched_queue[0]; | active_queue = & sched_queue[0]; |
expired_queue = & sched_queue[1]; | expired_queue = & sched_queue[1]; |
| |
| /* pre-compute time slices */ |
| for (prio = SOS_SCHED_PRIO_TS_HIGHEST ; |
| prio <= SOS_SCHED_PRIO_TS_LOWEST ; |
| prio ++) |
| { |
| unsigned int ms; |
| ms = SOS_TIME_SLICE_MIN |
| + (SOS_TIME_SLICE_MAX - SOS_TIME_SLICE_MIN) |
| * (prio - SOS_SCHED_PRIO_TS_HIGHEST) |
| / (SOS_SCHED_PRIO_TS_LOWEST - SOS_SCHED_PRIO_TS_HIGHEST); |
| time_slice[prio].sec = ms / 1000; |
| time_slice[prio].nanosec = 1000000UL * (ms % 1000); |
| } |
| |
return SOS_OK; | return SOS_OK; |
} | } |
| |
|
|
* the ready list. Otherwise it is added at the head of it. | * the ready list. Otherwise it is added at the head of it. |
*/ | */ |
static sos_ret_t add_in_ready_queue(struct sos_sched_queue *q, | static sos_ret_t add_in_ready_queue(struct sos_sched_queue *q, |
struct sos_kthread *thr, | struct sos_thread *thr, |
{ | { |
sos_sched_priority_t prio; | sos_sched_priority_t prio; |
| |
SOS_ASSERT_FATAL( (SOS_KTHR_CREATED == thr->state) | SOS_ASSERT_FATAL( (SOS_THR_CREATED == thr->state) |
|| (SOS_KTHR_RUNNING == thr->state) /* Yield */ | || (SOS_THR_RUNNING == thr->state) /* Yield */ |
|| (SOS_KTHR_BLOCKED == thr->state) ); | || (SOS_THR_BLOCKED == thr->state) ); |
/* Add the thread to the CPU queue */ | /* Add the thread to the CPU queue */ |
prio = sos_kthread_get_priority(thr); | prio = sos_thread_get_priority(thr); |
list_add_tail_named(q->kthread_list[prio], thr, | list_add_tail_named(q->thread_list[prio], thr, |
else | else |
list_add_head_named(q->kthread_list[prio], thr, | list_add_head_named(q->thread_list[prio], thr, |
thr->ready.rdy_queue = q; | thr->ready.rdy_queue = q; |
q->nr_threads ++; | q->nr_threads ++; |
| |
/* Ok, thread is now really ready to be (re)started */ | /* Ok, thread is now really ready to be (re)started */ |
thr->state = SOS_KTHR_READY; | thr->state = SOS_THR_READY; |
return SOS_OK; | return SOS_OK; |
} | } |
| |
| |
sos_ret_t sos_sched_set_ready(struct sos_kthread *thr) | sos_ret_t sos_sched_set_ready(struct sos_thread *thr) |
sos_ret_t retval; | sos_ret_t retval; |
| |
/* Don't do anything for already ready threads */ | /* Don't do anything for already ready threads */ |
if (SOS_KTHR_READY == thr->state) | if (SOS_THR_READY == thr->state) |
| |
if (SOS_SCHED_PRIO_IS_RT(sos_kthread_get_priority(thr))) | /* Reset the CPU time used in the quantuum */ |
| memset(& thr->running.user_time_spent_in_slice, 0x0, sizeof(struct sos_time)); |
| |
| if (SOS_SCHED_PRIO_IS_RT(sos_thread_get_priority(thr))) |
/* Real-time thread: schedule it for the present turn */ | /* Real-time thread: schedule it for the present turn */ |
retval = add_in_ready_queue(active_queue, thr, TRUE); | retval = add_in_ready_queue(active_queue, thr, TRUE); |
|
|
} | } |
| |
| |
sos_ret_t sos_sched_change_priority(struct sos_kthread *thr, | sos_ret_t sos_sched_change_priority(struct sos_thread *thr, |
{ | { |
struct sos_kthread *kthread_list; | struct sos_thread *thread_list; |
SOS_ASSERT_FATAL(SOS_KTHR_READY == thr->state); | SOS_ASSERT_FATAL(SOS_THR_READY == thr->state); |
/* Temp variable */ | /* Temp variable */ |
kthread_list | thread_list |
= thr->ready.rdy_queue->kthread_list[sos_kthread_get_priority(thr)]; | = thr->ready.rdy_queue->thread_list[sos_thread_get_priority(thr)]; |
list_delete_named(kthread_list, thr, ready.rdy_prev, ready.rdy_next); | list_delete_named(thread_list, thr, ready.rdy_prev, ready.rdy_next); |
/* Update lists */ | /* Update lists */ |
kthread_list = thr->ready.rdy_queue->kthread_list[priority]; | thread_list = thr->ready.rdy_queue->thread_list[priority]; |
list_add_tail_named(kthread_list, thr, ready.rdy_prev, ready.rdy_next); | list_add_tail_named(thread_list, thr, ready.rdy_prev, ready.rdy_next); |
thr->ready.rdy_queue->kthread_list[priority] = kthread_list; | thr->ready.rdy_queue->thread_list[priority] = thread_list; |
return SOS_OK; | return SOS_OK; |
} | } |
| |
| |
struct sos_kthread * sos_reschedule(struct sos_kthread *current_kthread, | /** |
sos_bool_t do_yield) | * Helper function to determine whether the current thread expired its |
| * time quantuum |
| */ |
| static sos_bool_t |
| thread_expired_its_quantuum(struct sos_thread *thr) |
| { |
| sos_sched_priority_t prio = sos_thread_get_priority(thr); |
| |
| /* No timesharing/round-robin for "real-time" threads */ |
| if (SOS_SCHED_PRIO_IS_RT(prio)) |
| return FALSE; |
| |
| /* Current (user) thread expired its time quantuum ? A kernel |
| thread never expires because sos_sched_do_timer_tick() below |
| won't update its user_time_spent_in_slice */ |
| if (sos_time_cmp(& thr->running.user_time_spent_in_slice, |
| & time_slice[prio]) >= 0) |
| return TRUE; |
| |
| return FALSE; |
| } |
| |
| |
| struct sos_thread * sos_reschedule(struct sos_thread *current_thread, |
| sos_bool_t do_yield) |
sos_sched_priority_t prio; | sos_sched_priority_t prio; |
| |
if (SOS_KTHR_ZOMBIE == current_kthread->state) | /* Force the current thread to release the CPU if it expired its |
| quantuum */ |
| if (thread_expired_its_quantuum(current_thread)) |
| { |
| /* Reset the CPU time used in the quantuum */ |
| memset(& current_thread->running.user_time_spent_in_slice, |
| 0x0, sizeof(struct sos_time)); |
| |
| do_yield = TRUE; |
| } |
| |
| if (SOS_THR_ZOMBIE == current_thread->state) |
/* Don't think of returning to this thread since it is | /* Don't think of returning to this thread since it is |
terminated */ | terminated */ |
/* Nop */ | /* Nop */ |
} | } |
else if (SOS_KTHR_BLOCKED != current_kthread->state) | else if (SOS_THR_BLOCKED != current_thread->state) |
/* Take into account the current executing thread unless it is | /* Take into account the current executing thread unless it is |
marked blocked */ | marked blocked */ |
if (do_yield) | if (do_yield) |
{ | { |
/* Ok, reserve it for next turn */ | /* Ok, reserve it for next turn */ |
if (SOS_SCHED_PRIO_IS_RT(sos_kthread_get_priority(current_kthread))) | if (SOS_SCHED_PRIO_IS_RT(sos_thread_get_priority(current_thread))) |
add_in_ready_queue(active_queue, current_kthread, TRUE); | add_in_ready_queue(active_queue, current_thread, TRUE); |
add_in_ready_queue(expired_queue, current_kthread, TRUE); | add_in_ready_queue(expired_queue, current_thread, TRUE); |
else | else |
{ | { |
/* Put it at the head of the active list */ | /* Put it at the head of the active list */ |
add_in_ready_queue(active_queue, current_kthread, FALSE); | add_in_ready_queue(active_queue, current_thread, FALSE); |
} | } |
| |
|
|
non-empty queue */ | non-empty queue */ |
for (prio = SOS_SCHED_PRIO_HIGHEST ; prio <= SOS_SCHED_PRIO_LOWEST ; prio ++) | for (prio = SOS_SCHED_PRIO_HIGHEST ; prio <= SOS_SCHED_PRIO_LOWEST ; prio ++) |
{ | { |
struct sos_kthread *next_thr; | struct sos_thread *next_thr; |
if (list_is_empty_named(active_queue->kthread_list[prio], | if (list_is_empty_named(active_queue->thread_list[prio], |
continue; | continue; |
| |
/* Queue is not empty: take the thread at its head */ | /* Queue is not empty: take the thread at its head */ |
next_thr = list_pop_head_named(active_queue->kthread_list[prio], | next_thr = list_pop_head_named(active_queue->thread_list[prio], |
active_queue->nr_threads --; | active_queue->nr_threads --; |
| |
|
|
SOS_FATAL_ERROR("No kernel thread ready ?!"); | SOS_FATAL_ERROR("No kernel thread ready ?!"); |
return NULL; | return NULL; |
} | } |
| |
| |
| sos_ret_t sos_sched_do_timer_tick() |
| { |
| struct sos_thread *interrupted_thread = sos_thread_get_current(); |
| struct sos_time tick_duration; |
| sos_bool_t cur_is_user; |
| sos_ui32_t nb_user_ready = 0; |
| sos_ui32_t nb_kernel_ready = 0; |
| int prio; |
| |
| sos_time_get_tick_resolution(& tick_duration); |
| |
| /* Update the timing statistics */ |
| if (sos_cpu_context_is_in_user_mode(interrupted_thread->cpu_state)) |
| { |
| cur_is_user = TRUE; |
| |
| /* User time */ |
| sos_time_inc(& interrupted_thread->rusage.ru_utime, |
| & tick_duration); |
| |
| /* Update time spent is current timeslice ONLY for a user thread */ |
| sos_time_inc(& interrupted_thread->running.user_time_spent_in_slice, |
| & tick_duration); |
| } |
| else |
| { |
| cur_is_user = FALSE; |
| |
| /* System time */ |
| sos_time_inc(& interrupted_thread->rusage.ru_stime, |
| & tick_duration); |
| } |
| |
| |
| /* Update load stats */ |
| for (prio = SOS_SCHED_PRIO_HIGHEST ; prio <= SOS_SCHED_PRIO_LOWEST ; prio ++) |
| { |
| struct sos_thread *thr; |
| int nb_thrs; |
| |
| list_foreach_forward_named(active_queue->thread_list[prio], |
| thr, nb_thrs, |
| ready.rdy_prev, ready.rdy_next) |
| { |
| if (sos_cpu_context_is_in_user_mode(thr->cpu_state)) |
| nb_user_ready ++; |
| else |
| nb_kernel_ready ++; |
| } |
| |
| list_foreach_forward_named(expired_queue->thread_list[prio], |
| thr, nb_thrs, |
| ready.rdy_prev, ready.rdy_next) |
| { |
| if (sos_cpu_context_is_in_user_mode(thr->cpu_state)) |
| nb_user_ready ++; |
| else |
| nb_kernel_ready ++; |
| } |
| } |
| |
| sos_load_do_timer_tick(cur_is_user, |
| nb_user_ready, |
| nb_kernel_ready); |
| |
| return SOS_OK; |
| } |
| |
/tmp/sos-code-article6.75/sos/test-art7.c (1970-01-01 01:00:00.000000000 +0100
) |
|
../sos-code-article7/sos/test-art7.c (2005-02-05 17:52:19.000000000 +0100
) |
|
|
|
| /* Copyright (C) 2005 David Decotigny |
| Copyright (C) 1995 TIS Committee (ELF typedefs, constants and macros) |
| |
| This program is free software; you can redistribute it and/or |
| modify it under the terms of the GNU General Public License |
| as published by the Free Software Foundation; either version 2 |
| of the License, or (at your option) any later version. |
| |
| This program is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with this program; if not, write to the Free Software |
| Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, |
| USA. |
| */ |
| |
| #include <sos/types.h> |
| #include <sos/klibc.h> |
| #include <drivers/bochs.h> |
| #include <sos/physmem.h> |
| #include <sos/assert.h> |
| |
| #include <sos/process.h> |
| #include <sos/thread.h> |
| |
| |
| /** |
| * @file test-art7.c |
| * |
| * Basic tests for the user thread/process management API |
| */ |
| |
| |
| /** |
| * The "C" structure of a user program image in the kernel. Structures |
| * like this are created by the Makefile in the userland/ directory |
| */ |
| struct userprog_entry |
| { |
| const char *name; |
| sos_vaddr_t bottom_vaddr; |
| sos_vaddr_t top_vaddr; |
| }; |
| |
| |
| /** |
| * Symbol marking the start of the userprogs table, as setup by the |
| * ld script in the userland/ directory |
| */ |
| extern char _userprogs_table; |
| |
| |
| /* |
| * Local functions |
| */ |
| |
| |
| /** |
| * Function to locate the given user program image in the kernel memory |
| */ |
| static struct userprog_entry * lookup_userprog(const char *name); |
| |
| |
| /** |
| * Function to create a new process containing the given USER program |
| * image. This function automatically locates the destination addresses |
| * of the program by examinating its ELF header |
| * |
| * @return The address of the first instruction of the program, as |
| * given by its ELF header, or 0 when the program is not a correct ELF |
| * image. |
| */ |
| static sos_uaddr_t load_elf_prog(const struct userprog_entry *prog); |
| |
| |
| /** |
| * Function that locates a USER program in the kernel image, creates a |
| * new USER process for it, and creates the given nb_uthreads USER |
| * threads inside it. |
| */ |
| static sos_ret_t spawn_program(const char *progname, |
| unsigned nb_uthreads); |
| |
| |
| /** |
| * The main function for our tests |
| */ |
| void test_art7() |
| { |
| spawn_program("myprog5", 5); |
| spawn_program("myprog1", 10); |
| spawn_program("myprog5", 1); |
| spawn_program("myprog6", 12); |
| spawn_program("myprog2", 10); |
| spawn_program("myprog5", 1); |
| spawn_program("myprog3", 10); |
| spawn_program("myprog5", 1); |
| spawn_program("myprog1", 10); |
| spawn_program("myprog6", 12); |
| spawn_program("myprog5", 1); |
| spawn_program("myprog4", 10); |
| spawn_program("myprog5", 1); |
| spawn_program("myprog2", 10); |
| spawn_program("myprog6", 12); |
| spawn_program("myprog5", 1); |
| } |
| |
| |
| static sos_ret_t spawn_program(const char *progname, |
| unsigned nb_uthreads) |
| { |
| int i; |
| |
| sos_uaddr_t prog_entry, stack_top_uaddr; |
| struct userprog_entry *prog; |
| struct sos_process *new_proc; |
| |
| prog = lookup_userprog(progname); |
| if (! prog) |
| return -SOS_EINVAL; |
| |
| new_proc = sos_process_create_empty(progname); |
| if (! new_proc) |
| return -SOS_ENOMEM; |
| |
| /* Squat this new process to map the user program into it */ |
| SOS_ASSERT_FATAL(SOS_OK |
| == sos_thread_change_current_mm_context(sos_process_get_mm_context(new_proc))); |
| |
| /* Load the user program image */ |
| prog_entry = load_elf_prog(prog); |
| if (! prog_entry) |
| { |
| sos_process_unref(new_proc); |
| return -SOS_ENOMEM; |
| } |
| |
| /* Map the user stacks into it and create the user threads */ |
| /* By default, the first user stack will be located at the end of |
| the user address space (ie 4GB), the stacks of the other threads |
| will be located (12 pages) below */ |
| for (i = 0, stack_top_uaddr = 0xfffffffc ; |
| i < nb_uthreads ; |
| i++, stack_top_uaddr -= 12*SOS_PAGE_SIZE) |
| { |
| char thrname[16]; |
| |
| /* Allocate 1 page for the stack */ |
| sos_ret_t retval; |
| sos_uaddr_t stack_base = SOS_PAGE_ALIGN_INF(stack_top_uaddr); |
| sos_paddr_t ppage; |
| |
| ppage = sos_physmem_ref_physpage_new(FALSE); |
| SOS_ASSERT_FATAL(ppage != 0); |
| |
| /* Map it in the process space. Might fail is there is not |
| enough RAM (we don't support swap-out for the moment) */ |
| retval = sos_paging_map(ppage, stack_base, TRUE, |
| SOS_VM_MAP_PROT_READ |
| | SOS_VM_MAP_PROT_WRITE); |
| SOS_ASSERT_FATAL(retval == SOS_OK); |
| |
| retval = sos_physmem_unref_physpage(ppage); |
| SOS_ASSERT_FATAL(retval == SOS_OK); |
| |
| /* Poison the stack to detect the use of uninitialized |
| variables */ |
| memset((void*)stack_base, 0xa5, SOS_PAGE_SIZE); |
| |
| /* Create the user thread */ |
| snprintf(thrname, sizeof(thrname), "%s:%d", progname, i); |
| sos_bochs_printf("Spawning %s\n", thrname); |
| sos_create_user_thread(thrname, |
| new_proc, |
| prog_entry, |
| 0, |
| stack_top_uaddr, |
| SOS_SCHED_PRIO_TS_LOWEST); |
| |
| sos_thread_yield(); |
| } |
| |
| /* Don't need the reference to the process anymore */ |
| sos_process_unref(new_proc); |
| |
| /* Revert to normal kernel thread's address space */ |
| SOS_ASSERT_FATAL(SOS_OK |
| == sos_thread_change_current_mm_context(NULL)); |
| |
| return SOS_OK; |
| } |
| |
| |
| /** |
| * Lookup a user program located inside the kernel's image |
| */ |
| static struct userprog_entry * lookup_userprog(const char *name) |
| { |
| struct userprog_entry *prog; |
| |
| if (! name) |
| return NULL; |
| |
| /* Walk through the table of user program description structures to |
| find the user program with the given name */ |
| for (prog = (struct userprog_entry*) & _userprogs_table ; |
| prog && (prog->name != NULL) ; |
| prog++) |
| { |
| if (0 == strcmp(name, prog->name)) |
| /* Found it ! */ |
| return prog; |
| } |
| |
| return NULL; |
| } |
| |
| |
| /** |
| * Make sure the program is in a valid ELF format, map it into memory, |
| * and return the address of its entry point (ie _start function) |
| * |
| * @return 0 when the program is not a valid ELF |
| */ |
| static sos_uaddr_t load_elf_prog(const struct userprog_entry *prog) |
| { |
| int i; |
| |
| /** |
| * Typedefs, constants and structure definitions as given by the ELF |
| * standard specifications. |
| */ |
| typedef unsigned long Elf32_Addr; |
| typedef unsigned long Elf32_Word; |
| typedef unsigned short Elf32_Half; |
| typedef unsigned long Elf32_Off; |
| typedef signed long Elf32_Sword; |
| |
| /* Elf identification */ |
| |
| #define EI_NIDENT 16 |
| typedef struct { |
| unsigned char e_ident[EI_NIDENT]; |
| Elf32_Half e_type; |
| Elf32_Half e_machine; |
| Elf32_Word e_version; |
| Elf32_Addr e_entry; |
| Elf32_Off e_phoff; |
| Elf32_Off e_shoff; |
| Elf32_Word e_flags; |
| Elf32_Half e_ehsize; |
| Elf32_Half e_phentsize; |
| Elf32_Half e_phnum; |
| Elf32_Half e_shentsize; |
| Elf32_Half e_shnum; |
| Elf32_Half e_shstrndx; |
| } __attribute__((packed)) Elf32_Ehdr_t; |
| |
| /* e_ident value */ |
| #define ELFMAG0 0x7f |
| #define ELFMAG1 'E' |
| #define ELFMAG2 'L' |
| #define ELFMAG3 'F' |
| |
| /* e_ident offsets */ |
| #define EI_MAG0 0 |
| #define EI_MAG1 1 |
| #define EI_MAG2 2 |
| #define EI_MAG3 3 |
| #define EI_CLASS 4 |
| #define EI_DATA 5 |
| #define EI_VERSION 6 |
| #define EI_PAD 7 |
| |
| /* e_ident[EI_CLASS] */ |
| #define ELFCLASSNONE 0 |
| #define ELFCLASS32 1 |
| #define ELFCLASS64 2 |
| |
| /* e_ident[EI_DATA] */ |
| #define ELFDATANONE 0 |
| #define ELFDATA2LSB 1 |
| #define ELFDATA2MSB 2 |
| |
| /* e_type */ |
| #define ET_NONE 0 /* No file type */ |
| #define ET_REL 1 /* Relocatable file */ |
| #define ET_EXEC 2 /* Executable file */ |
| #define ET_DYN 3 /* Shared object file */ |
| #define ET_CORE 4 /* Core file */ |
| #define ET_LOPROC 0xff00 /* Processor-specific */ |
| #define ET_HIPROC 0xffff /* Processor-specific */ |
| |
| /* e_machine */ |
| #define EM_NONE 0 /* No machine */ |
| #define EM_M32 1 /* AT&T WE 32100 */ |
| #define EM_SPARC 2 /* SPARC */ |
| #define EM_386 3 /* Intel 80386 */ |
| #define EM_68K 4 /* Motorola 68000 */ |
| #define EM_88K 5 /* Motorola 88000 */ |
| #define EM_860 7 /* Intel 80860 */ |
| #define EM_MIPS 8 /* MIPS RS3000 */ |
| |
| /* e_version */ |
| #define EV_NONE 0 /* invalid version */ |
| #define EV_CURRENT 1 /* current version */ |
| |
| typedef struct { |
| Elf32_Word p_type; |
| Elf32_Off p_offset; |
| Elf32_Addr p_vaddr; |
| Elf32_Addr p_paddr; |
| Elf32_Word p_filesz; |
| Elf32_Word p_memsz; |
| Elf32_Word p_flags; |
| Elf32_Word p_align; |
| } __attribute__((packed)) Elf32_Phdr_t; |
| |
| /* Reserved segment types p_type */ |
| #define PT_NULL 0 |
| #define PT_LOAD 1 |
| #define PT_DYNAMIC 2 |
| #define PT_INTERP 3 |
| #define PT_NOTE 4 |
| #define PT_SHLIB 5 |
| #define PT_PHDR 6 |
| #define PT_LOPROC 0x70000000 |
| #define PT_HIPROC 0x7fffffff |
| |
| /* p_flags */ |
| #define PF_X 1 |
| #define PF_W 2 |
| #define PF_R 4 |
| |
| |
| Elf32_Ehdr_t *elf_hdr = (Elf32_Ehdr_t*) prog->bottom_vaddr; |
| Elf32_Phdr_t *elf_phdrs; |
| |
| /* Make sure the image is large enough to contain at least the ELF |
| header */ |
| if (prog->bottom_vaddr + sizeof(Elf32_Ehdr_t) > prog->top_vaddr) |
| { |
| sos_bochs_printf("ELF prog %s: incorrect header\n", prog->name); |
| return 0; |
| } |
| |
| /* Macro to check expected values for some fields in the ELF header */ |
| #define ELF_CHECK(hdr,field,expected_value) \ |
| ({ if ((hdr)->field != (expected_value)) \ |
| { \ |
| sos_bochs_printf("ELF prog %s: for %s, expected %x, got %x\n", \ |
| prog->name, \ |
| #field, \ |
| (unsigned)(expected_value), \ |
| (unsigned)(hdr)->field); \ |
| return 0; \ |
| } \ |
| }) |
| |
| ELF_CHECK(elf_hdr, e_ident[EI_MAG0], ELFMAG0); |
| ELF_CHECK(elf_hdr, e_ident[EI_MAG1], ELFMAG1); |
| ELF_CHECK(elf_hdr, e_ident[EI_MAG2], ELFMAG2); |
| ELF_CHECK(elf_hdr, e_ident[EI_MAG3], ELFMAG3); |
| ELF_CHECK(elf_hdr, e_ident[EI_CLASS], ELFCLASS32); |
| ELF_CHECK(elf_hdr, e_ident[EI_DATA], ELFDATA2LSB); |
| ELF_CHECK(elf_hdr, e_type, ET_EXEC); |
| ELF_CHECK(elf_hdr, e_version, EV_CURRENT); |
| |
| /* Get the begining of the program header table */ |
| elf_phdrs = (Elf32_Phdr_t*) (prog->bottom_vaddr + elf_hdr->e_phoff); |
| |
| /* Map the program segment in R/W mode. To make things clean, we |
| should iterate over the sections, not the program header */ |
| for (i = 0 ; i < elf_hdr->e_phnum ; i++) |
| { |
| sos_uaddr_t uaddr; |
| |
| /* Ignore the empty program headers that are not marked "LOAD" */ |
| if (elf_phdrs[i].p_type != PT_LOAD) |
| { |
| if (elf_phdrs[i].p_memsz != 0) |
| { |
| SOS_FATAL_ERROR("ELF: non-empty non-LOAD segments not supported yet"); |
| } |
| continue; |
| } |
| |
| if (elf_phdrs[i].p_vaddr < SOS_PAGING_BASE_USER_ADDRESS) |
| { |
| SOS_FATAL_ERROR("User program has an incorrect address"); |
| } |
| |
| /* Map pages of physical memory into user space */ |
| for (uaddr = SOS_PAGE_ALIGN_INF(elf_phdrs[i].p_vaddr) ; |
| uaddr < elf_phdrs[i].p_vaddr + elf_phdrs[i].p_memsz ; |
| uaddr += SOS_PAGE_SIZE) |
| { |
| sos_ret_t retval; |
| sos_paddr_t ppage; |
| ppage = sos_physmem_ref_physpage_new(TRUE); |
| |
| retval = sos_paging_map(ppage, uaddr, TRUE, |
| SOS_VM_MAP_PROT_READ |
| | SOS_VM_MAP_PROT_WRITE); |
| SOS_ASSERT_FATAL(retval == SOS_OK); |
| |
| retval = sos_physmem_unref_physpage(ppage); |
| SOS_ASSERT_FATAL(retval == SOS_OK); |
| } |
| |
| /* Copy segment into memory */ |
| memcpy((void*) elf_phdrs[i].p_vaddr, |
| (void*) (prog->bottom_vaddr + elf_phdrs[i].p_offset), |
| elf_phdrs[i].p_filesz); |
| } |
| |
| return elf_hdr->e_entry; |
| } |
| |
/tmp/sos-code-article6.75/sos/thread.c (1970-01-01 01:00:00.000000000 +0100
) |
|
../sos-code-article7/sos/thread.c (2005-02-05 17:52:19.000000000 +0100
) |
|
|
|
| /* Copyright (C) 2004,2005 David Decotigny |
| |
| This program is free software; you can redistribute it and/or |
| modify it under the terms of the GNU General Public License |
| as published by the Free Software Foundation; either version 2 |
| of the License, or (at your option) any later version. |
| |
| This program is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with this program; if not, write to the Free Software |
| Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, |
| USA. |
| */ |
| |
| #include <sos/physmem.h> |
| #include <sos/kmem_slab.h> |
| #include <sos/kmalloc.h> |
| #include <sos/klibc.h> |
| #include <sos/list.h> |
| #include <sos/assert.h> |
| #include <hwcore/mm_context.h> |
| #include <sos/process.h> |
| |
| #include <hwcore/irq.h> |
| |
| #include "thread.h" |
| |
| |
| /** |
| * The size of the stack of a kernel thread |
| */ |
| #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PAGE_SIZE) |
| |
| |
| /** |
| * The identifier of the thread currently running on CPU. |
| * |
| * We only support a SINGLE processor, ie a SINGLE thread |
| * running at any time in the system. This greatly simplifies the |
| * implementation of the system, since we don't have to complicate |
| * things in order to retrieve the identifier of the threads running |
| * on the CPU. On multiprocessor systems the current_thread below is |
| * an array indexed by the id of the CPU, so that the challenge is to |
| * retrieve the identifier of the CPU. This is usually done based on |
| * the stack address (Linux implementation) or on some form of TLS |
| * ("Thread Local Storage": can be implemented by way of LDTs for the |
| * processes, accessed through the fs or gs registers). |
| */ |
| static volatile struct sos_thread *current_thread = NULL; |
| |
| |
| /* |
| * The list of threads currently in the system. |
| * |
| * @note We could have used current_thread for that... |
| */ |
| static struct sos_thread *thread_list = NULL; |
| |
| |
| /** |
| * The Cache of thread structures |
| */ |
| static struct sos_kslab_cache *cache_thread; |
| |
| |
| struct sos_thread *sos_thread_get_current() |
| { |
| SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING); |
| return (struct sos_thread*)current_thread; |
| } |
| |
| |
| inline static sos_ret_t _set_current(struct sos_thread *thr) |
| { |
| SOS_ASSERT_FATAL(thr->state == SOS_THR_READY); |
| current_thread = thr; |
| current_thread->state = SOS_THR_RUNNING; |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr, |
| sos_size_t init_thread_stack_size) |
| { |
| struct sos_thread *myself; |
| |
| /* Allocate the cache of threads */ |
| cache_thread = sos_kmem_cache_create("thread", |
| sizeof(struct sos_thread), |
| 2, |
| 0, |
| SOS_KSLAB_CREATE_MAP |
| | SOS_KSLAB_CREATE_ZERO); |
| if (! cache_thread) |
| return -SOS_ENOMEM; |
| |
| /* Allocate a new thread structure for the current running thread */ |
| myself = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread, |
| SOS_KSLAB_ALLOC_ATOMIC); |
| if (! myself) |
| return -SOS_ENOMEM; |
| |
| /* Initialize the thread attributes */ |
| strzcpy(myself->name, "[kinit]", SOS_THR_MAX_NAMELEN); |
| myself->state = SOS_THR_CREATED; |
| myself->priority = SOS_SCHED_PRIO_LOWEST; |
| myself->kernel_stack_base_addr = init_thread_stack_base_addr; |
| myself->kernel_stack_size = init_thread_stack_size; |
| |
| /* Do some stack poisoning on the bottom of the stack, if needed */ |
| sos_cpu_state_prepare_detect_kernel_stack_overflow(myself->cpu_state, |
| myself->kernel_stack_base_addr, |
| myself->kernel_stack_size); |
| |
| /* Add the thread in the global list */ |
| list_singleton_named(thread_list, myself, gbl_prev, gbl_next); |
| |
| /* Ok, now pretend that the running thread is ourselves */ |
| myself->state = SOS_THR_READY; |
| _set_current(myself); |
| |
| return SOS_OK; |
| } |
| |
| |
| struct sos_thread * |
| sos_create_kernel_thread(const char *name, |
| sos_kernel_thread_start_routine_t start_func, |
| void *start_arg, |
| sos_sched_priority_t priority) |
| { |
| __label__ undo_creation; |
| sos_ui32_t flags; |
| struct sos_thread *new_thread; |
| |
| if (! start_func) |
| return NULL; |
| if (! SOS_SCHED_PRIO_IS_VALID(priority)) |
| return NULL; |
| |
| /* Allocate a new thread structure for the current running thread */ |
| new_thread |
| = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread, |
| SOS_KSLAB_ALLOC_ATOMIC); |
| if (! new_thread) |
| return NULL; |
| |
| /* Initialize the thread attributes */ |
| strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN); |
| new_thread->state = SOS_THR_CREATED; |
| new_thread->priority = priority; |
| |
| /* Allocate the stack for the new thread */ |
| new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0); |
| new_thread->kernel_stack_size = SOS_THREAD_KERNEL_STACK_SIZE; |
| if (! new_thread->kernel_stack_base_addr) |
| goto undo_creation; |
| |
| /* Initialize the CPU context of the new thread */ |
| if (SOS_OK |
| != sos_cpu_kstate_init(& new_thread->cpu_state, |
| (sos_cpu_kstate_function_arg1_t*) start_func, |
| (sos_ui32_t) start_arg, |
| new_thread->kernel_stack_base_addr, |
| new_thread->kernel_stack_size, |
| (sos_cpu_kstate_function_arg1_t*) sos_thread_exit, |
| (sos_ui32_t) NULL)) |
| goto undo_creation; |
| |
| /* Add the thread in the global list */ |
| sos_disable_IRQs(flags); |
| list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next); |
| sos_restore_IRQs(flags); |
| |
| /* Mark the thread ready */ |
| if (SOS_OK != sos_sched_set_ready(new_thread)) |
| goto undo_creation; |
| |
| /* Normal non-erroneous end of function */ |
| return new_thread; |
| |
| undo_creation: |
| if (new_thread->kernel_stack_base_addr) |
| sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr); |
| sos_kmem_cache_free((sos_vaddr_t) new_thread); |
| return NULL; |
| } |
| |
| |
| struct sos_thread * |
| sos_create_user_thread(const char *name, |
| struct sos_process *process, |
| sos_uaddr_t user_initial_PC, |
| sos_ui32_t user_start_arg, |
| sos_uaddr_t user_initial_SP, |
| sos_sched_priority_t priority) |
| { |
| __label__ undo_creation; |
| sos_ui32_t flags; |
| struct sos_thread *new_thread; |
| |
| if (! SOS_SCHED_PRIO_IS_VALID(priority)) |
| return NULL; |
| |
| /* For a user thread, the process must be given */ |
| if (! process) |
| return NULL; |
| |
| /* Allocate a new thread structure for the current running thread */ |
| new_thread |
| = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread, |
| SOS_KSLAB_ALLOC_ATOMIC); |
| if (! new_thread) |
| return NULL; |
| |
| /* Initialize the thread attributes */ |
| strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN); |
| new_thread->state = SOS_THR_CREATED; |
| new_thread->priority = priority; |
| |
| /* Allocate the stack for the new thread */ |
| new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0); |
| new_thread->kernel_stack_size = SOS_THREAD_KERNEL_STACK_SIZE; |
| if (! new_thread->kernel_stack_base_addr) |
| goto undo_creation; |
| |
| /* Initialize the CPU context of the new thread */ |
| if (SOS_OK |
| != sos_cpu_ustate_init(& new_thread->cpu_state, |
| user_initial_PC, |
| user_start_arg, |
| user_initial_SP, |
| new_thread->kernel_stack_base_addr, |
| new_thread->kernel_stack_size)) |
| goto undo_creation; |
| |
| /* Attach the new thread to the process */ |
| if (SOS_OK != sos_process_register_thread(process, new_thread)) |
| goto undo_creation; |
| |
| /* Add the thread in the global list */ |
| sos_disable_IRQs(flags); |
| list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next); |
| sos_restore_IRQs(flags); |
| |
| /* Mark the thread ready */ |
| if (SOS_OK != sos_sched_set_ready(new_thread)) |
| goto undo_creation; |
| |
| /* Normal non-erroneous end of function */ |
| return new_thread; |
| |
| undo_creation: |
| if (new_thread->kernel_stack_base_addr) |
| sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr); |
| sos_kmem_cache_free((sos_vaddr_t) new_thread); |
| return NULL; |
| } |
| |
| |
| /** |
| * Helper function to switch to the correct MMU configuration to suit |
| * the_thread's needs. |
| * - When switching to a user-mode thread, force the reconfiguration |
| * of the MMU |
| * - When switching to a kernel-mode thread, only change the MMU |
| * configuration if the thread was squatting someone else's space |
| */ |
| static void _prepare_mm_context(struct sos_thread *the_thread) |
| { |
| /* Going to restore a thread in user mode ? */ |
| if (sos_cpu_context_is_in_user_mode(the_thread->cpu_state) |
| == TRUE) |
| { |
| /* Yes: force the MMU to be correctly setup with the correct |
| user's address space */ |
| |
| /* The thread should be a user thread */ |
| SOS_ASSERT_FATAL(the_thread->process != NULL); |
| |
| /* It should not squat any other's address space */ |
| SOS_ASSERT_FATAL(the_thread->squatted_mm_context == NULL); |
| |
| /* Perform an MMU context switch if needed */ |
| sos_mm_context_switch_to(sos_process_get_mm_context(the_thread->process)); |
| } |
| |
| /* the_thread is a kernel thread squatting a precise address |
| space ? */ |
| else if (the_thread->squatted_mm_context != NULL) |
| sos_mm_context_switch_to(the_thread->squatted_mm_context); |
| } |
| |
| |
| /** Function called after thr has terminated. Called from inside the context |
| of another thread, interrupts disabled */ |
| static void delete_thread(struct sos_thread *thr) |
| { |
| sos_ui32_t flags; |
| |
| sos_disable_IRQs(flags); |
| list_delete_named(thread_list, thr, gbl_prev, gbl_next); |
| sos_restore_IRQs(flags); |
| |
| sos_kfree((sos_vaddr_t) thr->kernel_stack_base_addr); |
| |
| /* If the thread squats an address space, release it */ |
| if (thr->squatted_mm_context) |
| SOS_ASSERT_FATAL(SOS_OK == sos_thread_change_current_mm_context(NULL)); |
| |
| /* For a user thread: remove the thread from the process threads' list */ |
| if (thr->process) |
| SOS_ASSERT_FATAL(SOS_OK == sos_process_unregister_thread(thr)); |
| |
| memset(thr, 0x0, sizeof(struct sos_thread)); |
| sos_kmem_cache_free((sos_vaddr_t) thr); |
| } |
| |
| |
| void sos_thread_exit() |
| { |
| sos_ui32_t flags; |
| struct sos_thread *myself, *next_thread; |
| |
| /* Interrupt handlers are NOT allowed to exit the current thread ! */ |
| SOS_ASSERT_FATAL(! sos_servicing_irq()); |
| |
| myself = sos_thread_get_current(); |
| |
| /* Refuse to end the current executing thread if it still holds a |
| resource ! */ |
| SOS_ASSERT_FATAL(list_is_empty_named(myself->kwaitq_list, |
| prev_entry_for_thread, |
| next_entry_for_thread)); |
| |
| /* Prepare to run the next thread */ |
| sos_disable_IRQs(flags); |
| myself->state = SOS_THR_ZOMBIE; |
| next_thread = sos_reschedule(myself, FALSE); |
| |
| /* Make sure that the next_thread is valid */ |
| sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state, |
| next_thread->kernel_stack_base_addr, |
| next_thread->kernel_stack_size); |
| |
| /* |
| * Perform an MMU context switch if needed |
| */ |
| _prepare_mm_context(next_thread); |
| |
| /* No need for sos_restore_IRQs() here because the IRQ flag will be |
| restored to that of the next thread upon context switch */ |
| |
| /* Immediate switch to next thread */ |
| _set_current(next_thread); |
| sos_cpu_context_exit_to(next_thread->cpu_state, |
| (sos_cpu_kstate_function_arg1_t*) delete_thread, |
| (sos_ui32_t) myself); |
| } |
| |
| |
| sos_sched_priority_t sos_thread_get_priority(struct sos_thread *thr) |
| { |
| if (! thr) |
| thr = (struct sos_thread*)current_thread; |
| |
| return thr->priority; |
| } |
| |
| |
| sos_thread_state_t sos_thread_get_state(struct sos_thread *thr) |
| { |
| if (! thr) |
| thr = (struct sos_thread*)current_thread; |
| |
| return thr->state; |
| } |
| |
| |
| typedef enum { YIELD_MYSELF, BLOCK_MYSELF } switch_type_t; |
| /** |
| * Helper function to initiate a context switch in case the current |
| * thread becomes blocked, waiting for a timeout, or calls yield. |
| */ |
| static sos_ret_t _switch_to_next_thread(switch_type_t operation) |
| { |
| struct sos_thread *myself, *next_thread; |
| |
| SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING); |
| |
| /* Interrupt handlers are NOT allowed to block ! */ |
| SOS_ASSERT_FATAL(! sos_servicing_irq()); |
| |
| myself = (struct sos_thread*)current_thread; |
| |
| /* Make sure that if we are to be marked "BLOCKED", we have any |
| reason of effectively being blocked */ |
| if (BLOCK_MYSELF == operation) |
| { |
| myself->state = SOS_THR_BLOCKED; |
| } |
| |
| /* Identify the next thread */ |
| next_thread = sos_reschedule(myself, YIELD_MYSELF == operation); |
| |
| /* Avoid context switch if the context does not change */ |
| if (myself != next_thread) |
| { |
| /* Sanity checks for the next thread */ |
| sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state, |
| next_thread->kernel_stack_base_addr, |
| next_thread->kernel_stack_size); |
| |
| /* |
| * Perform an MMU context switch if needed |
| */ |
| _prepare_mm_context(next_thread); |
| |
| /* |
| * Actual CPU context switch |
| */ |
| _set_current(next_thread); |
| sos_cpu_context_switch(& myself->cpu_state, next_thread->cpu_state); |
| |
| /* Back here ! */ |
| SOS_ASSERT_FATAL(current_thread == myself); |
| SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING); |
| } |
| else |
| { |
| /* No context switch but still update ID of current thread */ |
| _set_current(next_thread); |
| } |
| |
| return SOS_OK; |
| } |
| |
| |
| /** |
| * Helper function to change the thread's priority in all the |
| * waitqueues associated with the thread. |
| */ |
| static sos_ret_t _change_waitq_priorities(struct sos_thread *thr, |
| sos_sched_priority_t priority) |
| { |
| struct sos_kwaitq_entry *kwq_entry; |
| int nb_waitqs; |
| |
| list_foreach_forward_named(thr->kwaitq_list, kwq_entry, nb_waitqs, |
| prev_entry_for_thread, next_entry_for_thread) |
| { |
| SOS_ASSERT_FATAL(SOS_OK == sos_kwaitq_change_priority(kwq_entry->kwaitq, |
| kwq_entry, |
| priority)); |
| } |
| |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t sos_thread_set_priority(struct sos_thread *thr, |
| sos_sched_priority_t priority) |
| { |
| __label__ exit_set_prio; |
| sos_ui32_t flags; |
| sos_ret_t retval; |
| |
| |
| if (! SOS_SCHED_PRIO_IS_VALID(priority)) |
| return -SOS_EINVAL; |
| |
| if (! thr) |
| thr = (struct sos_thread*)current_thread; |
| |
| sos_disable_IRQs(flags); |
| |
| /* Signal kwaitq subsystem that the priority of the thread in all |
| the waitq it is waiting in should be updated */ |
| retval = _change_waitq_priorities(thr, priority); |
| if (SOS_OK != retval) |
| goto exit_set_prio; |
| |
| /* Signal scheduler that the thread, currently in a waiting list, |
| should take into account the change of priority */ |
| if (SOS_THR_READY == thr->state) |
| retval = sos_sched_change_priority(thr, priority); |
| |
| /* Update priority */ |
| thr->priority = priority; |
| |
| exit_set_prio: |
| sos_restore_IRQs(flags); |
| return retval; |
| } |
| |
| |
| sos_ret_t sos_thread_yield() |
| { |
| sos_ui32_t flags; |
| sos_ret_t retval; |
| |
| sos_disable_IRQs(flags); |
| |
| retval = _switch_to_next_thread(YIELD_MYSELF); |
| |
| sos_restore_IRQs(flags); |
| return retval; |
| } |
| |
| |
| /** |
| * Internal sleep timeout management |
| */ |
| struct sleep_timeout_params |
| { |
| struct sos_thread *thread_to_wakeup; |
| sos_bool_t timeout_triggered; |
| }; |
| |
| |
| /** |
| * Callback called when a timeout happened |
| */ |
| static void sleep_timeout(struct sos_timeout_action *act) |
| { |
| struct sleep_timeout_params *sleep_timeout_params |
| = (struct sleep_timeout_params*) act->routine_data; |
| |
| /* Signal that we have been woken up by the timeout */ |
| sleep_timeout_params->timeout_triggered = TRUE; |
| |
| /* Mark the thread ready */ |
| SOS_ASSERT_FATAL(SOS_OK == |
| sos_thread_force_unblock(sleep_timeout_params |
| ->thread_to_wakeup)); |
| } |
| |
| |
| sos_ret_t sos_thread_sleep(struct sos_time *timeout) |
| { |
| sos_ui32_t flags; |
| struct sleep_timeout_params sleep_timeout_params; |
| struct sos_timeout_action timeout_action; |
| sos_ret_t retval; |
| |
| /* Block forever if no timeout is given */ |
| if (NULL == timeout) |
| { |
| sos_disable_IRQs(flags); |
| retval = _switch_to_next_thread(BLOCK_MYSELF); |
| sos_restore_IRQs(flags); |
| |
| return retval; |
| } |
| |
| /* Initialize the timeout action */ |
| sos_time_init_action(& timeout_action); |
| |
| /* Prepare parameters used by the sleep timeout callback */ |
| sleep_timeout_params.thread_to_wakeup |
| = (struct sos_thread*)current_thread; |
| sleep_timeout_params.timeout_triggered = FALSE; |
| |
| sos_disable_IRQs(flags); |
| |
| /* Now program the timeout ! */ |
| SOS_ASSERT_FATAL(SOS_OK == |
| sos_time_register_action_relative(& timeout_action, |
| timeout, |
| sleep_timeout, |
| & sleep_timeout_params)); |
| |
| /* Prepare to block: wait for sleep_timeout() to wakeup us in the |
| timeout kwaitq, or for someone to wake us up in any other |
| waitq */ |
| retval = _switch_to_next_thread(BLOCK_MYSELF); |
| /* Unblocked by something ! */ |
| |
| /* Unblocked by timeout ? */ |
| if (sleep_timeout_params.timeout_triggered) |
| { |
| /* Yes */ |
| SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout)); |
| retval = SOS_OK; |
| } |
| else |
| { |
| /* No: We have probably been woken up while in some other |
| kwaitq */ |
| SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action)); |
| retval = -SOS_EINTR; |
| } |
| |
| sos_restore_IRQs(flags); |
| |
| /* Update the remaining timeout */ |
| memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time)); |
| |
| return retval; |
| } |
| |
| |
| sos_ret_t sos_thread_force_unblock(struct sos_thread *thread) |
| { |
| sos_ret_t retval; |
| sos_ui32_t flags; |
| |
| if (! thread) |
| return -SOS_EINVAL; |
| |
| sos_disable_IRQs(flags); |
| |
| /* Thread already woken up ? */ |
| retval = SOS_OK; |
| switch(sos_thread_get_state(thread)) |
| { |
| case SOS_THR_RUNNING: |
| case SOS_THR_READY: |
| /* Do nothing */ |
| break; |
| |
| case SOS_THR_ZOMBIE: |
| retval = -SOS_EFATAL; |
| break; |
| |
| default: |
| retval = sos_sched_set_ready(thread); |
| break; |
| } |
| |
| sos_restore_IRQs(flags); |
| |
| return retval; |
| } |
| |
| |
| |
| /* ********************************************** |
| * Restricted functions |
| */ |
| |
| sos_ret_t |
| sos_thread_change_current_mm_context(struct sos_mm_context *mm_ctxt) |
| { |
| sos_ui32_t flags; |
| |
| /* Retrieve the previous mm context */ |
| struct sos_mm_context * prev_mm_ctxt |
| = current_thread->squatted_mm_context; |
| |
| /* We should either select a new squatted_mm_context or revert to |
| the default */ |
| if (mm_ctxt != NULL) |
| SOS_ASSERT_FATAL(prev_mm_ctxt == NULL); |
| else |
| SOS_ASSERT_FATAL(prev_mm_ctxt != NULL); |
| |
| sos_disable_IRQs(flags); |
| |
| /* Update current thread's squatted mm context */ |
| current_thread->squatted_mm_context = mm_ctxt; |
| |
| /* Update the reference counts and switch the MMU configuration if |
| needed */ |
| if (mm_ctxt != NULL) |
| { |
| sos_mm_context_ref(mm_ctxt); /* Because it is now referenced as |
| the squatted_mm_context field of |
| the thread */ |
| sos_mm_context_switch_to(mm_ctxt); |
| } |
| else |
| sos_mm_context_unref(prev_mm_ctxt); /* Because it is not referenced as |
| the squatted_mm_context field of |
| the thread any more */ |
| |
| sos_restore_IRQs(flags); |
| |
| return SOS_OK; |
| } |
| |
| |
| void sos_thread_prepare_syscall_switch_back(struct sos_cpu_state *cpu_state) |
| { |
| /* Don't preempt the current thread */ |
| |
| /* |
| * Save the state of the interrupted context to make sure that: |
| * - The list of threads correctly reflects that the thread is back |
| * in user mode |
| * - _prepare_mm_context() deals with the correct mm_context |
| */ |
| current_thread->cpu_state = cpu_state; |
| |
| /* Perform an MMU context switch if needed */ |
| _prepare_mm_context((struct sos_thread*) current_thread); |
| } |
| |
| |
| void sos_thread_prepare_exception_switch_back(struct sos_cpu_state *cpu_state) |
| { |
| /* Don't preempt the current thread */ |
| |
| /* |
| * Save the state of the interrupted context to make sure that: |
| * - The list of threads correctly reflects that the thread is |
| * running in user or kernel mode |
| * - _prepare_mm_context() deals with the correct mm_context |
| */ |
| current_thread->cpu_state = cpu_state; |
| |
| /* Perform an MMU context switch if needed */ |
| _prepare_mm_context((struct sos_thread*) current_thread); |
| } |
| |
| |
| void |
| sos_thread_prepare_irq_servicing(struct sos_cpu_state *interrupted_state) |
| { |
| current_thread->cpu_state = interrupted_state; |
| } |
| |
| |
| struct sos_cpu_state * |
| sos_thread_prepare_irq_switch_back(void) |
| { |
| struct sos_thread *myself, *next_thread; |
| |
| /* In SOS, threads in kernel mode are NEVER preempted from the |
| interrupt handlers ! */ |
| if (! sos_cpu_context_is_in_user_mode(current_thread->cpu_state)) |
| return current_thread->cpu_state; |
| |
| /* |
| * Here we are dealing only with possible preemption of user threads |
| * in user context ! |
| */ |
| |
| /* Make sure the thread actually is a user thread */ |
| SOS_ASSERT_FATAL(current_thread->process != NULL); |
| |
| /* Save the state of the interrupted context */ |
| myself = (struct sos_thread*)current_thread; |
| |
| /* Select the next thread to run */ |
| next_thread = sos_reschedule(myself, FALSE); |
| |
| /* Perform an MMU context switch if needed */ |
| _prepare_mm_context(next_thread); |
| |
| /* Setup the next_thread's context into the CPU */ |
| _set_current(next_thread); |
| return next_thread->cpu_state; |
| } |
| |
/tmp/sos-code-article6.75/sos/thread.h (1970-01-01 01:00:00.000000000 +0100
) |
|
../sos-code-article7/sos/thread.h (2005-02-05 17:52:20.000000000 +0100
) |
|
|
|
| /* Copyright (C) 2004,2005 David Decotigny |
| |
| This program is free software; you can redistribute it and/or |
| modify it under the terms of the GNU General Public License |
| as published by the Free Software Foundation; either version 2 |
| of the License, or (at your option) any later version. |
| |
| This program is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with this program; if not, write to the Free Software |
| Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, |
| USA. |
| */ |
| #ifndef _SOS_THREAD_H_ |
| #define _SOS_THREAD_H_ |
| |
| /** |
| * @file thread.h |
| * |
| * SOS Thread management API |
| */ |
| |
| #include <sos/errno.h> |
| |
| /* Forward declaration */ |
| struct sos_thread; |
| |
| #include <hwcore/cpu_context.h> |
| #include <sos/sched.h> |
| #include <sos/kwaitq.h> |
| #include <sos/time.h> |
| #include <sos/process.h> |
| |
| /** |
| * The possible states of a valid thread |
| */ |
| typedef enum { SOS_THR_CREATED, /**< Thread created, not fully initialized */ |
| SOS_THR_READY, /**< Thread fully initialized or |
| waiting for CPU after having been |
| blocked or preempted */ |
| SOS_THR_RUNNING, /**< Thread currently running on CPU */ |
| SOS_THR_BLOCKED, /**< Thread waiting for I/O (+ in at LEAST |
| one kwaitq) and/or sleeping (+ in NO |
| kwaitq) */ |
| SOS_THR_ZOMBIE, /**< Thread terminated execution, waiting to |
| be deleted by kernel */ |
| } sos_thread_state_t; |
| |
| |
| /** |
| * TCB (Thread Control Block): structure describing a thread. Don't |
| * access these fields directly: prefer using the accessor functions |
| * below. |
| */ |
| struct sos_thread |
| { |
| #define SOS_THR_MAX_NAMELEN 32 |
| char name[SOS_THR_MAX_NAMELEN]; |
| |
| sos_thread_state_t state; |
| sos_sched_priority_t priority; |
| |
| /** |
| * The hardware context of the thread. |
| * |
| * It will reflect the CPU state of the thread: |
| * - From an interrupt handler: the state of the thread at the time |
| * of the OUTERMOST irq. An IRQ is not allowed to make context |
| * switches, so this context will remain valid from the begining of |
| * the outermost IRQ handler to the end of it, no matter if there |
| * are other IRQ handlers nesting in one another. You may safely |
| * use it from IRQ handlers to query the state of the interrupted |
| * thread, no matter if there has been other IRQ handlers |
| * executing meanwhile. |
| * - From normal kernel code, exceptions and syscall: the state of |
| * the thread the last time there was a context switch from this |
| * thread to another one. Thus this field WON'T reflect the |
| * current's thread cpu_state in these cases. So, in these cases, |
| * simply DO NOT USE IT outside thread.c ! Note: for syscall and |
| * exception handlers, the VALID state of the interrupted thread is |
| * passed as an argument to the handlers. |
| */ |
| struct sos_cpu_state *cpu_state; |
| |
| /* Kernel stack parameters */ |
| sos_vaddr_t kernel_stack_base_addr; |
| sos_size_t kernel_stack_size; |
| |
| /* Process this thread belongs to. Always NULL for a kernel |
| thread */ |
| struct sos_process *process; |
| |
| /** |
| * Address space currently "squatted" by the thread, or used to be |
| * active when the thread was interrupted/preempted. This is the MMU |
| * configuration expected before the cpu_state of the thread is |
| * restored on CPU. |
| * - For kernel threads: should normally be NULL, meaning that the |
| * thread will squat the current mm_context currently set in the |
| * MMU. Might be NON NULL when a kernel thread squats a given |
| * process to manipulate its address space. |
| * - For user threads: should normally be NULL. More precisely: |
| * - in user mode: the thread->process.mm_context is ALWAYS |
| * set on MMU. squatted_mm_context is ALWAYS NULL in this |
| * situation, meaning that the thread in user mode uses its |
| * process-space as expected |
| * - in kernel mode: NULL means that we keep on using the |
| * mm_context currently set on MMU, which might be the |
| * mm_context of another process. This is natural since a |
| * thread in kernel mode normally only uses data in kernel |
| * space. BTW, this limits the number of TLB flushes. However, |
| * there are exceptions where this squatted_mm_context will |
| * NOT be NULL. One is the copy_from/to_user API, which can |
| * force the effective mm_context so that the MMU will be |
| * (re)configured upon every context to the thread to match |
| * the squatted_mm_context. Another exception is when a parent |
| * thread creates the address space of a child process, in |
| * which case the parent thread might temporarilly decide to |
| * switch to the child's process space. |
| * |
| * This is the SOS implementation of the Linux "Lazy TLB" and |
| * address-space loaning. |
| */ |
| struct sos_mm_context *squatted_mm_context; |
| |
| /* Data specific to each state */ |
| union |
| { |
| struct |
| { |
| struct sos_sched_queue *rdy_queue; |
| struct sos_thread *rdy_prev, *rdy_next; |
| } ready; |
| |
| struct |
| { |
| struct sos_time user_time_spent_in_slice; |
| } running; |
| }; /* Anonymous union (gcc extenion) */ |
| |
| |
| /* |
| * Data used by the kwaitq subsystem: list of kwaitqueues the thread |
| * is waiting for. |
| * |
| * @note: a RUNNING or READY thread might be in one or more |
| * waitqueues ! The only property we have is that, among these |
| * waitqueues (if any), _at least_ one has woken the thread. |
| */ |
| struct sos_kwaitq_entry *kwaitq_list; |
| |
| |
| /** |
| * Some statistics |
| */ |
| struct rusage |
| { |
| /* Updated by sched.c */ |
| struct sos_time ru_utime; /* Time spent in user mode */ |
| struct sos_time ru_stime; /* Time spent in kernel mode */ |
| } rusage; |
| |
| |
| /** |
| * Chaining pointers for the list of threads in the parent process |
| */ |
| struct sos_thread *prev_in_process, *next_in_process; |
| |
| |
| /** |
| * Chaining pointers for global ("gbl") list of threads (debug) |
| */ |
| struct sos_thread *gbl_prev, *gbl_next; |
| }; |
| |
| |
| /** |
| * Definition of the function executed by a kernel thread |
| */ |
| typedef void (*sos_kernel_thread_start_routine_t)(void *arg); |
| |
| |
| /** |
| * Initialize the subsystem responsible for thread management |
| * |
| * Initialize the primary kernel thread so that it can be handled the |
| * same way as an ordinary thread created by sos_thread_create(). |
| */ |
| sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr, |
| sos_size_t init_thread_stack_size); |
| |
| |
| /** |
| * Create a new kernel thread |
| */ |
| struct sos_thread * |
| sos_create_kernel_thread(const char *name, |
| sos_kernel_thread_start_routine_t start_func, |
| void *start_arg, |
| sos_sched_priority_t priority); |
| |
| |
| /** |
| * Create a new user thread |
| */ |
| struct sos_thread * |
| sos_create_user_thread(const char *name, |
| struct sos_process *process, |
| sos_uaddr_t user_initial_PC, |
| sos_ui32_t user_start_arg, |
| sos_uaddr_t user_initial_SP, |
| sos_sched_priority_t priority); |
| |
| |
| /** |
| * Terminate the execution of the current thread. For kernel threads, |
| * it is called by default when the start routine returns. |
| */ |
| void sos_thread_exit() __attribute__((noreturn)); |
| |
| |
| /** |
| * Get the identifier of the thread currently running on CPU. Trivial |
| * function. |
| */ |
| struct sos_thread *sos_thread_get_current(); |
| |
| |
| /** |
| * If thr == NULL, set the priority of the current thread. Trivial |
| * function. |
| * |
| * @note NOT protected against interrupts |
| */ |
| sos_sched_priority_t sos_thread_get_priority(struct sos_thread *thr); |
| |
| |
| /** |
| * If thr == NULL, get the state of the current thread. Trivial |
| * function. |
| * |
| * @note NOT protected against interrupts |
| */ |
| sos_thread_state_t sos_thread_get_state(struct sos_thread *thr); |
| |
| |
| /** |
| * If thr == NULL, set the priority of the current thread |
| * |
| * @note NO context-switch ever occurs in this function ! |
| */ |
| sos_ret_t sos_thread_set_priority(struct sos_thread *thr, |
| sos_sched_priority_t priority); |
| |
| |
| /** |
| * Yield CPU to another ready thread. |
| * |
| * @note This is a BLOCKING FUNCTION |
| */ |
| sos_ret_t sos_thread_yield(); |
| |
| |
| /** |
| * Release the CPU for (at least) the given delay. |
| * |
| * @param delay The delay to wait for. If delay == NULL then wait |
| * forever that any event occurs. |
| * |
| * @return SOS_OK when delay expired (and delay is reset to zero), |
| * -SOS_EINTR otherwise (and delay contains the amount of time |
| * remaining). |
| * |
| * @note This is a BLOCKING FUNCTION |
| */ |
| sos_ret_t sos_thread_sleep(/* in/out */struct sos_time *delay); |
| |
| |
| /** |
| * Mark the given thread as READY (if not already ready) even if it is |
| * blocked in a kwaitq or in a sleep ! As a result, the interrupted |
| * kwaitq/sleep function call of the thread will return with |
| * -SOS_EINTR. |
| * |
| * @return -SOS_EINVAL if thread does not exist, or -SOS_EFATAL if |
| * marked ZOMBIE. |
| * |
| * @note As a result, the semaphore/mutex/conditions/... functions |
| * return values SHOULD ALWAYS be checked ! If they are != SOS_OK, |
| * then the caller should consider that the resource is not aquired |
| * because somebody woke the thread by some way. |
| */ |
| sos_ret_t sos_thread_force_unblock(struct sos_thread *thread); |
| |
| |
| /* ********************************************** |
| * Restricted functions |
| */ |
| |
| |
| /** |
| * Restricted function to change the current mm_context AND the |
| * squatted_mm_context of the current thread in order to access the data |
| * in this context |
| * |
| * @param mm_ctxt The mm_ctxt to restore. Might be NULL, meaning that: |
| * - for a Kernel thread: the current MMU configuration is never |
| * modified. The address space to use is limited to the kernel |
| * space, user space might change due to preemptions to other |
| * processes |
| * - for a User thread in kernel mode: same as for kernel threads |
| * - when a User thread will go back in user context: the MMU will |
| * be reconfigured to match the mm_context of the thread's |
| * process |
| * |
| * @note A non NULL parameter is allowed only if the |
| * squatted_mm_context is not already set. A NULL parameter is allowed |
| * only if the squatted_mm_context was already set. |
| * |
| * @note The use of this function is RESERVED to the syscall handler |
| * and the copy_from/to_user functions |
| */ |
| sos_ret_t |
| sos_thread_change_current_mm_context(struct sos_mm_context *mm_ctxt); |
| |
| |
| /** |
| * Restricted callback called when a syscall goes back in user mode, |
| * to reconfigure the MMU to match that of the current thread's |
| * process MMU context. |
| * |
| * @note The use of this function is RESERVED to the syscall wrapper |
| */ |
| void sos_thread_prepare_syscall_switch_back(struct sos_cpu_state *cpu_state); |
| |
| |
| /** |
| * Restricted callback called when an exception handler goes back to |
| * the interrupted thread to reconfigure the MMU to match that of the |
| * current thread's process MMU context. |
| * |
| * @note The use of this function is RESERVED to the exception wrappers |
| */ |
| void sos_thread_prepare_exception_switch_back(struct sos_cpu_state *cpu_state); |
| |
| |
| /** |
| * Restricted callback called when an IRQ is entered while the CPU was |
| * NOT already servicing any other IRQ (ie the outermost IRQ handler |
| * is entered). This callback simply updates the "cpu_state" field so |
| * that IRQ handlers always know the state of the interrupted thread, |
| * even if they are imbricated in other IRQ handlers. |
| * |
| * @note The use of this function is RESERVED to the irq wrappers |
| */ |
| void |
| sos_thread_prepare_irq_servicing(struct sos_cpu_state *interrupted_state); |
| |
| |
| /** |
| * Restricted callback called when the outermost IRQ handler returns, |
| * to select the thread to return to. This callbacks implements: |
| * - preemption of user threads in user mode (time sharing / FIFO) |
| * - non-preemption of user threads in kernel mode (interrupted thread |
| * is restored on CPU "as is") |
| * - non-preemption of kernel threads (same remark) |
| * The MMU is reconfigured correctly to match the address space of the |
| * selected thread. |
| * |
| * @return The CPU context of the thread to return to |
| * |
| * @note The use of this function is RESERVED to the irq wrappers |
| */ |
| struct sos_cpu_state * |
| sos_thread_prepare_irq_switch_back(void); |
| |
| |
| #endif /* _SOS_THREAD_H_ */ |
| |