/tmp/sos-code-article6.5/hwcore/cpu_context.c (2005-01-04 04:13:48.000000000 +0100
) |
|
../sos-code-article6.75/hwcore/cpu_context.c (2005-03-02 17:30:43.000000000 +0100
) |
|
|
|
/* Copyright (C) 2000-2004, The KOS team | /* Copyright (C) 2005 David Decotigny |
Copyright (C) 1999 Free Software Foundation, Inc. | Copyright (C) 2000-2004, The KOS team |
This program is free software; you can redistribute it and/or | This program is free software; you can redistribute it and/or |
modify it under the terms of the GNU General Public License | modify it under the terms of the GNU General Public License |
|
|
* the registers are stored on the stack in | * the registers are stored on the stack in |
* irq_wrappers.S/exception_wrappers.S !!! Hence the constraint above. | * irq_wrappers.S/exception_wrappers.S !!! Hence the constraint above. |
*/ | */ |
struct sos_cpu_kstate { | struct sos_cpu_state { |
| |
/* These are SOS convention */ | /* These are SOS convention */ |
|
|
sos_ui16_t fs; | sos_ui16_t fs; |
sos_ui16_t es; | sos_ui16_t es; |
sos_ui16_t ds; | sos_ui16_t ds; |
sos_ui16_t ss; | sos_ui16_t cpl0_ss; /* This is ALWAYS the Stack Segment of the |
| Kernel context (CPL0) of the interrupted |
| thread, even for a user thread */ |
sos_ui32_t eax; | sos_ui32_t eax; |
sos_ui32_t ebx; | sos_ui32_t ebx; |
|
|
/* MUST NEVER CHANGE (dependent on the IA32 iret instruction) */ | /* MUST NEVER CHANGE (dependent on the IA32 iret instruction) */ |
sos_ui32_t error_code; | sos_ui32_t error_code; |
sos_vaddr_t eip; | sos_vaddr_t eip; |
sos_ui32_t cs; | sos_ui32_t cs; /* 32bits according to the specs ! However, the CS |
| register is really 16bits long */ |
| |
/* (Higher addresses) */ | /* (Higher addresses) */ |
} __attribute__((packed)); | } __attribute__((packed)); |
| |
| |
| /** |
| * The CS value pushed on the stack by the CPU upon interrupt, and |
| * needed by the iret instruction, is 32bits long while the real CPU |
| * CS register is 16bits only: this macro simply retrieves the CPU |
| * "CS" register value from the CS value pushed on the stack by the |
| * CPU upon interrupt. |
| * |
| * The remaining 16bits pushed by the CPU should be considered |
| * "reserved" and architecture dependent. IMHO, the specs don't say |
| * anything about them. Considering that some architectures generate |
| * non-zero values for these 16bits (at least Cyrix), we'd better |
| * ignore them. |
| */ |
| #define GET_CPU_CS_REGISTER_VALUE(pushed_ui32_cs_value) \ |
| ( (pushed_ui32_cs_value) & 0xffff ) |
| |
| |
| /** |
| * Structure of an interrupted Kernel thread's context |
| */ |
| struct sos_cpu_kstate |
| { |
| struct sos_cpu_state regs; |
| } __attribute__((packed)); |
| |
| |
| /** |
| * THE main operation of a kernel thread. This routine calls the |
| * kernel thread function start_func and calls exit_func when |
| * start_func returns. |
| */ |
static void core_routine (sos_cpu_kstate_function_arg1_t *start_func, | static void core_routine (sos_cpu_kstate_function_arg1_t *start_func, |
sos_ui32_t start_arg, | sos_ui32_t start_arg, |
sos_cpu_kstate_function_arg1_t *exit_func, | sos_cpu_kstate_function_arg1_t *exit_func, |
|
|
} | } |
| |
| |
sos_ret_t sos_cpu_kstate_init(struct sos_cpu_kstate **ctxt, | sos_ret_t sos_cpu_kstate_init(struct sos_cpu_state **ctxt, |
sos_ui32_t start_arg, | sos_ui32_t start_arg, |
sos_vaddr_t stack_bottom, | sos_vaddr_t stack_bottom, |
|
|
sos_cpu_kstate_function_arg1_t *exit_func, | sos_cpu_kstate_function_arg1_t *exit_func, |
sos_ui32_t exit_arg) | sos_ui32_t exit_arg) |
{ | { |
| /* We are initializing a Kernel thread's context */ |
| struct sos_cpu_kstate *kctxt; |
| |
/* This is a critical internal function, so that it is assumed that | /* This is a critical internal function, so that it is assumed that |
the caller knows what he does: we legitimally assume that values | the caller knows what he does: we legitimally assume that values |
for ctxt, start_func, stack_* and exit_func are allways VALID ! */ | for ctxt, start_func, stack_* and exit_func are allways VALID ! */ |
|
|
sos_ui32_t *stack = (sos_ui32_t*)tmp_vaddr; | sos_ui32_t *stack = (sos_ui32_t*)tmp_vaddr; |
| |
/* If needed, poison the stack */ | /* If needed, poison the stack */ |
#ifdef SOS_CPU_KSTATE_DETECT_UNINIT_VARS | #ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS |
memset((void*)stack_bottom, SOS_CPU_KSTATE_STACK_POISON, stack_size); | memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, stack_size); |
#elif defined(SOS_CPU_KSTATE_DETECT_STACK_OVERFLOW) | #elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) |
sos_cpu_kstate_prepare_detect_stack_overflow(stack_bottom, stack_size); | sos_cpu_state_prepare_detect_kernel_stack_overflow(stack_bottom, stack_size); |
| |
/* Simulate a call to the core_routine() function: prepare its | /* Simulate a call to the core_routine() function: prepare its |
|
|
/* Compute the base address of the structure, which must be located | /* Compute the base address of the structure, which must be located |
below the previous elements */ | below the previous elements */ |
tmp_vaddr = ((sos_vaddr_t)stack) - sizeof(struct sos_cpu_kstate); | tmp_vaddr = ((sos_vaddr_t)stack) - sizeof(struct sos_cpu_kstate); |
*ctxt = (struct sos_cpu_kstate*)tmp_vaddr; | kctxt = (struct sos_cpu_kstate*)tmp_vaddr; |
/* Initialize the CPU context structure */ | /* Initialize the CPU context structure */ |
memset(*ctxt, 0x0, sizeof(struct sos_cpu_kstate)); | memset(kctxt, 0x0, sizeof(struct sos_cpu_kstate)); |
/* Tell the CPU context structure that the first instruction to | /* Tell the CPU context structure that the first instruction to |
execute will be that of the core_routine() function */ | execute will be that of the core_routine() function */ |
(*ctxt)->eip = (sos_ui32_t)core_routine; | kctxt->regs.eip = (sos_ui32_t)core_routine; |
/* Setup the segment registers */ | /* Setup the segment registers */ |
(*ctxt)->cs = SOS_BUILD_SEGMENT_REG_VALUE(0, 0, SOS_SEG_KCODE); /* Code */ | kctxt->regs.cs |
(*ctxt)->ds = SOS_BUILD_SEGMENT_REG_VALUE(0, 0, SOS_SEG_KDATA); /* Data */ | = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE); /* Code */ |
(*ctxt)->es = SOS_BUILD_SEGMENT_REG_VALUE(0, 0, SOS_SEG_KDATA); /* Data */ | kctxt->regs.ds |
(*ctxt)->ss = SOS_BUILD_SEGMENT_REG_VALUE(0, 0, SOS_SEG_KDATA); /* Stack */ | = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Data */ |
| kctxt->regs.es |
| = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Data */ |
| kctxt->regs.cpl0_ss |
| = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Stack */ |
| |
/* The newly created context is initially interruptible */ | /* The newly created context is initially interruptible */ |
(*ctxt)->eflags = (1 << 9); /* set IF bit */ | kctxt->regs.eflags = (1 << 9); /* set IF bit */ |
| |
| /* Finally, update the generic kernel/user thread context */ |
| *ctxt = (struct sos_cpu_state*) kctxt; |
return SOS_OK; | return SOS_OK; |
} | } |
| |
| |
#if defined(SOS_CPU_KSTATE_DETECT_STACK_OVERFLOW) | #if defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) |
sos_cpu_kstate_prepare_detect_stack_overflow(const struct sos_cpu_kstate *ctxt, | sos_cpu_state_prepare_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt, |
sos_vaddr_t stack_bottom, | sos_vaddr_t stack_bottom, |
sos_size_t stack_size) | sos_size_t stack_size) |
sos_size_t poison_size = SOS_CPU_KSTATE_DETECT_STACK_OVERFLOW; | sos_size_t poison_size = SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW; |
poison_size = stack_size; | poison_size = stack_size; |
| |
memset((void*)stack_bottom, SOS_CPU_KSTATE_STACK_POISON, poison_size); | memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, poison_size); |
| |
| |
void | void |
sos_cpu_kstate_detect_stack_overflow(const struct sos_cpu_kstate *ctxt, | sos_cpu_state_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt, |
sos_vaddr_t stack_bottom, | sos_vaddr_t stack_bottom, |
sos_size_t stack_size) | sos_size_t stack_size) |
unsigned char *c; | unsigned char *c; |
int i; | int i; |
| |
| /* On SOS, "ctxt" corresponds to the address of the esp register of |
| the saved context in Kernel mode (always, even for the interrupted |
| context of a user thread). Here we make sure that this stack |
| pointer is within the allowed stack area */ |
SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) >= stack_bottom); | SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) >= stack_bottom); |
SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) + sizeof(struct sos_cpu_kstate) | SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) + sizeof(struct sos_cpu_kstate) |
<= stack_bottom + stack_size); | <= stack_bottom + stack_size); |
| |
| /* Check that the bottom of the stack has not been altered */ |
for (c = (unsigned char*) stack_bottom, i = 0 ; | for (c = (unsigned char*) stack_bottom, i = 0 ; |
(i < SOS_CPU_KSTATE_DETECT_STACK_OVERFLOW) && (i < stack_size) ; | (i < SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) && (i < stack_size) ; |
{ | { |
SOS_ASSERT_FATAL(SOS_CPU_KSTATE_STACK_POISON == *c); | SOS_ASSERT_FATAL(SOS_CPU_STATE_STACK_POISON == *c); |
} | } |
#endif | #endif |
| |
| |
sos_vaddr_t sos_cpu_kstate_get_PC(const struct sos_cpu_kstate *ctxt) | /* ======================================================================= |
| * Public Accessor functions |
| */ |
| |
| |
| sos_vaddr_t sos_cpu_context_get_PC(const struct sos_cpu_state *ctxt) |
SOS_ASSERT_FATAL(NULL != ctxt); | SOS_ASSERT_FATAL(NULL != ctxt); |
| |
| /* This is the PC of the interrupted context (ie kernel or user |
| context). */ |
return ctxt->eip; | return ctxt->eip; |
} | } |
| |
| |
sos_vaddr_t sos_cpu_kstate_get_SP(const struct sos_cpu_kstate *ctxt) | sos_vaddr_t sos_cpu_context_get_SP(const struct sos_cpu_state *ctxt) |
SOS_ASSERT_FATAL(NULL != ctxt); | SOS_ASSERT_FATAL(NULL != ctxt); |
| |
| /* On SOS, "ctxt" corresponds to the address of the esp register of |
| the saved context in Kernel mode (always, even for the interrupted |
| context of a user thread). */ |
return (sos_vaddr_t)ctxt; | return (sos_vaddr_t)ctxt; |
} | } |
| |
| |
void sos_cpu_kstate_dump(const struct sos_cpu_kstate *ctxt) | void sos_cpu_context_dump(const struct sos_cpu_state *ctxt) |
char buf[128]; | char buf[128]; |
snprintf(buf, sizeof(buf), | snprintf(buf, sizeof(buf), |
"CPU: eip=%x esp=%x eflags=%x cs=%x ds=%x ss=%x err=%x", | "CPU: eip=%x esp=%x eflags=%x cs=%x ds=%x ss=%x err=%x", |
(unsigned)ctxt->eip, (unsigned)ctxt, (unsigned)ctxt->eflags, | (unsigned)ctxt->eip, (unsigned)ctxt, (unsigned)ctxt->eflags, |
(unsigned)ctxt->cs, (unsigned)ctxt->ds, (unsigned)ctxt->ss, | (unsigned)GET_CPU_CS_REGISTER_VALUE(ctxt->cs), (unsigned)ctxt->ds, |
| (unsigned)ctxt->cpl0_ss, |
sos_bochs_putstring(buf); sos_bochs_putstring("\n"); | sos_bochs_putstring(buf); sos_bochs_putstring("\n"); |
sos_x86_videomem_putstring(23, 0, | sos_x86_videomem_putstring(23, 0, |
|
|
} | } |
| |
| |
sos_ui32_t sos_cpu_kstate_get_EX_info(const struct sos_cpu_kstate *ctxt) | /* ======================================================================= |
| * Public Accessor functions TO BE USED ONLY BY Exception handlers |
| */ |
| |
| |
| sos_ui32_t sos_cpu_context_get_EX_info(const struct sos_cpu_state *ctxt) |
SOS_ASSERT_FATAL(NULL != ctxt); | SOS_ASSERT_FATAL(NULL != ctxt); |
return ctxt->error_code; | return ctxt->error_code; |
|
|
| |
| |
sos_vaddr_t | sos_vaddr_t |
sos_cpu_kstate_get_EX_faulting_vaddr(const struct sos_cpu_kstate *ctxt) | sos_cpu_context_get_EX_faulting_vaddr(const struct sos_cpu_state *ctxt) |
sos_ui32_t cr2; | sos_ui32_t cr2; |
| |
/* See Intel Vol 3 (section 5.14): the address of the faulting | /* |
virtual address of a page fault is stored in the cr2 register */ | * See Intel Vol 3 (section 5.14): the address of the faulting |
| * virtual address of a page fault is stored in the cr2 |
| * register. |
| * |
| * Actually, we do not store the cr2 register in a saved |
| * kernel thread's context. So we retrieve the cr2's value directly |
| * from the processor. The value we retrieve in an exception handler |
| * is actually the correct one because an exception is synchronous |
| * with the code causing the fault, and cannot be interrupted since |
| * the IDT entries in SOS are "interrupt gates" (ie IRQ are |
| * disabled). |
| */ |
:"=r"(cr2) | :"=r"(cr2) |
: ); | : ); |
|
|
} | } |
| |
| |
sos_ui32_t sos_backtrace(const struct sos_cpu_kstate *cpu_kstate, | /* ======================================================================= |
| * Backtrace facility. To be used for DEBUGging purpose ONLY. |
| */ |
| |
| |
| sos_ui32_t sos_backtrace(const struct sos_cpu_state *cpu_state, |
sos_vaddr_t stack_bottom, | sos_vaddr_t stack_bottom, |
sos_size_t stack_size, | sos_size_t stack_size, |
|
|
* function will return -SOS_ENOSUP. | * function will return -SOS_ENOSUP. |
*/ | */ |
| |
if (cpu_kstate) | if (cpu_state) |
callee_PC = cpu_kstate->eip; | callee_PC = cpu_state->eip; |
caller_frame = cpu_kstate->ebp; | caller_frame = cpu_state->ebp; |
else | else |
{ | { |
| |
/tmp/sos-code-article6.5/hwcore/cpu_context.h (2005-01-04 04:13:48.000000000 +0100
) |
|
../sos-code-article6.75/hwcore/cpu_context.h (2005-03-02 17:30:43.000000000 +0100
) |
|
|
|
/* Copyright (C) 2000-2004, The KOS team | /* Copyright (C) 2005 David Decotigny |
Copyright (C) 1999 Free Software Foundation, Inc. | Copyright (C) 2000-2004, The KOS team |
This program is free software; you can redistribute it and/or | This program is free software; you can redistribute it and/or |
modify it under the terms of the GNU General Public License | modify it under the terms of the GNU General Public License |
|
|
| |
| |
/** | /** |
* Opaque structure storing the CPU context of an inactive kernel | * Opaque structure storing the CPU context of an inactive kernel or |
* thread, as saved by the low level primitives below or by the | * user thread, as saved by the low level primitives below or by the |
* | * |
* @note This is an (architecture-independent) forward declaration: | * @note This is an (architecture-independent) forward declaration: |
* see cpu_context.c and the *.S files for its | * see cpu_context.c and the *.S files for its |
* (architecture-dependent) definition. | * (architecture-dependent) definition. |
*/ | */ |
struct sos_cpu_kstate; | struct sos_cpu_state; |
| |
/** | /** |
* The type of the functions passed as arguments below | * The type of the functions passed as arguments to the Kernel thread |
| * related functions. |
typedef void (sos_cpu_kstate_function_arg1_t(sos_ui32_t arg1)); | typedef void (sos_cpu_kstate_function_arg1_t(sos_ui32_t arg1)); |
| |
|
|
* start_func function returns, the function exit_func is called with | * start_func function returns, the function exit_func is called with |
* argument exit_arg. | * argument exit_arg. |
* | * |
* @param ctxt The kernel thread CPU context to initialize. The | * @param kctxt The kernel thread CPU context to initialize. The |
* address of the newly-initialized struct sos_cpu_kstate will be | * address of the newly-initialized struct sos_cpu_state will be |
* stored in this variable. The contents of this struct sos_cpu_kstate | * stored in this variable. The contents of this struct sos_cpu_state |
* | * |
* @param start_func The address of the first instruction that will be | * @param start_func The address of the first instruction that will be |
|
|
* | * |
* @note the newly created context is INTERRUPTIBLE by default ! | * @note the newly created context is INTERRUPTIBLE by default ! |
*/ | */ |
sos_ret_t sos_cpu_kstate_init(struct sos_cpu_kstate **ctxt, | sos_ret_t sos_cpu_kstate_init(struct sos_cpu_state **kctxt, |
sos_ui32_t start_arg, | sos_ui32_t start_arg, |
sos_vaddr_t stack_bottom, | sos_vaddr_t stack_bottom, |
|
|
| |
| |
/** | /** |
* Function that performs an immediate context-switch from one kernel | * Function that performs an immediate context-switch from one |
* thread to another one. It stores the current executing context in | * kernel/user thread to another one. It stores the current executing |
* from_ctxt, and restores to_context on CPU. | * context in from_ctxt, and restores to_context on CPU. |
* @param from_ctxt The address of the struct sos_cpu_kstate will be | * @param from_ctxt The address of the struct sos_cpu_state will be |
* | * |
* @param to_ctxt The CPU will resume its execution with the struct | * @param to_ctxt The CPU will resume its execution with the struct |
* sos_cpu_kstate located at this address. Must NOT be NULL. | * sos_cpu_state located at this address. Must NOT be NULL. |
void sos_cpu_kstate_switch(struct sos_cpu_kstate **from_ctxt, | void sos_cpu_context_switch(struct sos_cpu_state **from_ctxt, |
struct sos_cpu_kstate *to_ctxt); | struct sos_cpu_state *to_ctxt); |
| |
/* | /* |
* Switch to the new given context (of a kernel thread) without saving | * Switch to the new given context (of a kernel/user thread) without |
* the old context (of another kernel thread), and call the function | * saving the old context (of another kernel/user thread), and call |
* reclaiming_func passing it the recalining_arg argument. The | * the function reclaiming_func passing it the recalining_arg |
* reclaining function is called from within the stack of the new | * argument. The reclaining function is called from within the stack |
* context, so that it can (among other things) safely destroy the | * of the new context, so that it can (among other things) safely |
* stack of the former context. | * destroy the stack of the former context. |
* @param switch_to_ctxt The context that will be restored on the CPU | * @param switch_to_ctxt The context that will be restored on the CPU |
* | * |
|
|
* context to switch_to_ctxt. | * context to switch_to_ctxt. |
*/ | */ |
void | void |
sos_cpu_kstate_exit_to(struct sos_cpu_kstate *switch_to_ctxt, | sos_cpu_context_exit_to(struct sos_cpu_state *switch_to_ctxt, |
sos_cpu_kstate_function_arg1_t *reclaiming_func, | sos_cpu_kstate_function_arg1_t *reclaiming_func, |
sos_ui32_t reclaiming_arg) __attribute__((noreturn)); | sos_ui32_t reclaiming_arg) __attribute__((noreturn)); |
| |
/* ======================================================================= | /* ======================================================================= |
* Public Accessor functions | * Public Accessor functions |
*/ | */ |
| |
| |
/** | /** |
* Return Program Counter stored in the saved context | * Return Program Counter stored in the saved kernel/user context |
sos_vaddr_t sos_cpu_kstate_get_PC(const struct sos_cpu_kstate *ctxt); | sos_vaddr_t sos_cpu_context_get_PC(const struct sos_cpu_state *ctxt); |
| |
/** | /** |
* Return Stack Pointer stored in the saved context | * Return Stack Pointer stored in the saved kernel/user context |
sos_vaddr_t sos_cpu_kstate_get_SP(const struct sos_cpu_kstate *ctxt); | sos_vaddr_t sos_cpu_context_get_SP(const struct sos_cpu_state *ctxt); |
| |
/** | /** |
* Dump the contents of the CPU context (bochs + x86_videomem) | * Dump the contents of the CPU context (bochs + x86_videomem) |
*/ | */ |
void sos_cpu_kstate_dump(const struct sos_cpu_kstate *ctxt); | void sos_cpu_context_dump(const struct sos_cpu_state *ctxt); |
| |
/* ======================================================================= | /* ======================================================================= |
|
|
* Return the argument passed by the CPU upon exception, as stored in the | * Return the argument passed by the CPU upon exception, as stored in the |
* saved context | * saved context |
*/ | */ |
sos_ui32_t sos_cpu_kstate_get_EX_info(const struct sos_cpu_kstate *ctxt); | sos_ui32_t sos_cpu_context_get_EX_info(const struct sos_cpu_state *ctxt); |
| |
/** | /** |
* Return the faulting address of the exception | * Return the faulting address of the exception |
*/ | */ |
sos_vaddr_t | sos_vaddr_t |
sos_cpu_kstate_get_EX_faulting_vaddr(const struct sos_cpu_kstate *ctxt); | sos_cpu_context_get_EX_faulting_vaddr(const struct sos_cpu_state *ctxt); |
| |
/* ======================================================================= | /* ======================================================================= |
|
|
* - when the thread might have gone too deep in the stack | * - when the thread might have gone too deep in the stack |
*/ | */ |
/** The signature of the poison */ | /** The signature of the poison */ |
#define SOS_CPU_KSTATE_STACK_POISON 0xa5 | #define SOS_CPU_STATE_STACK_POISON 0xa5 |
/** | /** |
* When set, mean that the whole stack is poisoned to detect use of | * When set, mean that the whole stack is poisoned to detect use of |
* unititialized variables | * unititialized variables |
*/ | */ |
#define SOS_CPU_KSTATE_DETECT_UNINIT_VARS | #define SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS |
/* #undef SOS_CPU_KSTATE_DETECT_UNINIT_VARS */ | /* #undef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS */ |
/** | /** |
* When set, mean that the bottom of the stack is poisoned to detect | * When set, mean that the bottom of the stack is poisoned to detect |
* probable stack overflow. Its value indicates the number of bytes | * probable stack overflow. Its value indicates the number of bytes |
* used for this detection. | * used for this detection. |
*/ | */ |
#define SOS_CPU_KSTATE_DETECT_STACK_OVERFLOW 64 | #define SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW 64 |
/* #undef SOS_CPU_KSTATE_DETECT_STACK_OVERFLOW */ | /* #undef SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW */ |
#if defined(SOS_CPU_KSTATE_DETECT_STACK_OVERFLOW) | #if defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) |
sos_cpu_kstate_prepare_detect_stack_overflow(const struct sos_cpu_kstate *ctxt, | sos_cpu_state_prepare_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt, |
sos_vaddr_t stack_bottom, | sos_vaddr_t kernel_stack_bottom, |
sos_size_t stack_size); | sos_size_t kernel_stack_size); |
void sos_cpu_kstate_detect_stack_overflow(const struct sos_cpu_kstate *ctxt, | void sos_cpu_state_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt, |
sos_vaddr_t stack_bottom, | sos_vaddr_t kernel_stack_bottom, |
sos_size_t stack_size); | sos_size_t kernel_stack_size); |
# define sos_cpu_kstate_prepare_detect_stack_overflow(ctxt,stkbottom,stksize) \ | # define sos_cpu_state_prepare_detect_kernel_stack_overflow(ctxt,stkbottom,stksize) \ |
# define sos_cpu_kstate_detect_stack_overflow(ctxt,stkbottom,stksize) \ | # define sos_cpu_state_detect_kernel_stack_overflow(ctxt,stkbottom,stksize) \ |
#endif | #endif |
| |
|
|
| |
| |
/** | /** |
* Call the backtracer callback on each frame stored in the cpu_kstate | * Call the backtracer callback on each frame stored in the cpu_state |
* @param cpu_kstate The CPU context we want to explore. NULL to | * @param cpu_state The CPU context we want to explore. MUST be the |
* backtrace the current CPU context. | * context of a thread in Kernel mode, or NULL. When NULL: backtrace |
| * the current CPU context. |
* @param max_depth The maximum number of frames to explore | * @param max_depth The maximum number of frames to explore |
* | * |
|
|
* @note Might be inaccurate when gcc's -fomit-frame-pointer has been | * @note Might be inaccurate when gcc's -fomit-frame-pointer has been |
* used. | * used. |
*/ | */ |
sos_ui32_t sos_backtrace(const struct sos_cpu_kstate *cpu_kstate, | sos_ui32_t sos_backtrace(const struct sos_cpu_state *cpu_state, |
sos_vaddr_t stack_bottom, | sos_vaddr_t stack_bottom, |
sos_size_t stack_size, | sos_size_t stack_size, |
| |
/tmp/sos-code-article6.5/hwcore/exception.c (2005-01-04 04:13:48.000000000 +0100
) |
|
../sos-code-article6.75/hwcore/exception.c (2005-03-02 17:30:43.000000000 +0100
) |
|
|
|
/* Copyright (C) 2004 David Decotigny | /* Copyright (C) 2004 David Decotigny |
Copyright (C) 1999 Free Software Foundation, Inc. | |
This program is free software; you can redistribute it and/or | This program is free software; you can redistribute it and/or |
modify it under the terms of the GNU General Public License | modify it under the terms of the GNU General Public License |
|
|
#include "idt.h" | #include "idt.h" |
#include "irq.h" | #include "irq.h" |
| |
| #include <sos/assert.h> |
#include "exception.h" | #include "exception.h" |
| |
/* array of exception wrappers, defined in exception_wrappers.S */ | /* array of exception wrappers, defined in exception_wrappers.S */ |
|
|
sos_exception_handler_t sos_exception_handler_array[SOS_EXCEPT_NUM] = | sos_exception_handler_t sos_exception_handler_array[SOS_EXCEPT_NUM] = |
{ NULL, }; | { NULL, }; |
| |
| /* List of exception names for the x86 architecture */ |
| static const char * sos_x86_exnames[] = { |
| [SOS_EXCEPT_DIVIDE_ERROR] = "Division by zero", |
| [SOS_EXCEPT_DEBUG] = "Debug", |
| [SOS_EXCEPT_NMI_INTERRUPT] = "Non Maskable Interrupt", |
| [SOS_EXCEPT_BREAKPOINT] = "Breakpoint", |
| [SOS_EXCEPT_OVERFLOW] = "Overflow", |
| [SOS_EXCEPT_BOUND_RANGE_EXCEDEED] = "Bound Range Exceeded", |
| [SOS_EXCEPT_INVALID_OPCODE] = "Invalid Opcode", |
| [SOS_EXCEPT_DEVICE_NOT_AVAILABLE] = "Device Unavailable", |
| [SOS_EXCEPT_DOUBLE_FAULT] = "Double Fault", |
| [SOS_EXCEPT_COPROCESSOR_SEGMENT_OVERRUN] = "Coprocessor Segment Overrun", |
| [SOS_EXCEPT_INVALID_TSS] = "Invalid TSS", |
| [SOS_EXCEPT_SEGMENT_NOT_PRESENT] = "Segment Not Present", |
| [SOS_EXCEPT_STACK_SEGMENT_FAULT] = "Stack Segfault", |
| [SOS_EXCEPT_GENERAL_PROTECTION] = "General Protection", |
| [SOS_EXCEPT_PAGE_FAULT] = "Page Fault", |
| [SOS_EXCEPT_INTEL_RESERVED_1] = "INTEL1", |
| [SOS_EXCEPT_FLOATING_POINT_ERROR] = "FP Error", |
| [SOS_EXCEPT_ALIGNEMENT_CHECK] = "Alignment Check", |
| [SOS_EXCEPT_MACHINE_CHECK] = "Machine Check", |
| [SOS_EXCEPT_INTEL_RESERVED_2] = "INTEL2", |
| [SOS_EXCEPT_INTEL_RESERVED_3] = "INTEL3", |
| [SOS_EXCEPT_INTEL_RESERVED_4] = "INTEL4", |
| [SOS_EXCEPT_INTEL_RESERVED_5] = "INTEL5", |
| [SOS_EXCEPT_INTEL_RESERVED_6] = "INTEL6", |
| [SOS_EXCEPT_INTEL_RESERVED_7] = "INTEL7", |
| [SOS_EXCEPT_INTEL_RESERVED_8] = "INTEL8", |
| [SOS_EXCEPT_INTEL_RESERVED_9] = "INTEL9", |
| [SOS_EXCEPT_INTEL_RESERVED_10] = "INTEL10", |
| [SOS_EXCEPT_INTEL_RESERVED_11] = "INTEL11", |
| [SOS_EXCEPT_INTEL_RESERVED_12] = "INTEL12", |
| [SOS_EXCEPT_INTEL_RESERVED_13] = "INTEL13", |
| [SOS_EXCEPT_INTEL_RESERVED_14] = "INTEL14" |
| }; |
| |
| |
| /* Catch-all exception handler */ |
| static void sos_generic_ex(int exid, const struct sos_cpu_state *ctxt) |
| { |
| const char *exname = sos_exception_get_name(exid); |
| |
| sos_display_fatal_error("Exception %s in Kernel at instruction 0x%x (info=%x)!\n", |
| exname, |
| sos_cpu_context_get_PC(ctxt), |
| (unsigned)sos_cpu_context_get_EX_info(ctxt)); |
| } |
| |
| |
sos_ret_t sos_exception_subsystem_setup(void) | sos_ret_t sos_exception_subsystem_setup(void) |
{ | { |
| sos_ret_t retval; |
| int exid; |
| |
| /* Setup the generic exception handler by default for everybody |
| except for the double fault exception */ |
| for (exid = 0 ; exid < SOS_EXCEPT_NUM ; exid ++) |
| { |
| /* Skip double fault (see below) */ |
| if (exid == SOS_EXCEPT_DOUBLE_FAULT) |
| continue; |
| |
| retval = sos_exception_set_routine(exid, sos_generic_ex); |
| if (SOS_OK != retval) |
| return retval; |
| } |
| |
| |
/* We inidicate that the double fault exception handler is defined, | /* We inidicate that the double fault exception handler is defined, |
and give its address. this handler is a do-nothing handler (see | and give its address. this handler is a do-nothing handler (see |
exception_wrappers.S), and it can NOT be overriden by the | exception_wrappers.S), and it can NOT be overriden by the |
|
|
/* Expected to be atomic */ | /* Expected to be atomic */ |
return sos_exception_handler_array[exception_number]; | return sos_exception_handler_array[exception_number]; |
} | } |
| |
| |
| const char * sos_exception_get_name(int exception_number) |
| { |
| if ((exception_number < 0) || (exception_number >= SOS_EXCEPT_NUM)) |
| return NULL; |
| |
| return sos_x86_exnames[exception_number]; |
| } |
| |
/tmp/sos-code-article6.5/sos/ksynch.c (2005-01-04 04:13:49.000000000 +0100
) |
|
../sos-code-article6.75/sos/ksynch.c (2005-03-02 17:30:44.000000000 +0100
) |
|
|
|
| |
| |
sos_ret_t sos_ksema_init(struct sos_ksema *sema, const char *name, | sos_ret_t sos_ksema_init(struct sos_ksema *sema, const char *name, |
int initial_value) | int initial_value, |
| sos_kwaitq_ordering_t ordering) |
sema->value = initial_value; | sema->value = initial_value; |
return sos_kwaitq_init(& sema->kwaitq, name); | return sos_kwaitq_init(& sema->kwaitq, name, ordering); |
| |
| |
|
|
} | } |
| |
| |
sos_ret_t sos_kmutex_init(struct sos_kmutex *mutex, const char *name) | sos_ret_t sos_kmutex_init(struct sos_kmutex *mutex, const char *name, |
| sos_kwaitq_ordering_t ordering) |
mutex->owner = NULL; | mutex->owner = NULL; |
return sos_kwaitq_init(& mutex->kwaitq, name); | return sos_kwaitq_init(& mutex->kwaitq, name, ordering); |
| |
| |
|
|
if (NULL != mutex->owner) | if (NULL != mutex->owner) |
{ | { |
/* Owned by us or by someone else ? */ | /* Owned by us or by someone else ? */ |
if (sos_kthread_get_current() == mutex->owner) | if (sos_thread_get_current() == mutex->owner) |
/* Owned by us: do nothing */ | /* Owned by us: do nothing */ |
retval = -SOS_EBUSY; | retval = -SOS_EBUSY; |
|
|
} | } |
| |
/* Ok, the mutex is available to us: take it */ | /* Ok, the mutex is available to us: take it */ |
mutex->owner = sos_kthread_get_current(); | mutex->owner = sos_thread_get_current(); |
exit_kmutex_lock: | exit_kmutex_lock: |
sos_restore_IRQs(flags); | sos_restore_IRQs(flags); |
|
|
if (NULL == mutex->owner) | if (NULL == mutex->owner) |
{ | { |
/* Great ! Take it now */ | /* Great ! Take it now */ |
mutex->owner = sos_kthread_get_current(); | mutex->owner = sos_thread_get_current(); |
retval = SOS_OK; | retval = SOS_OK; |
} | } |
|
|
| |
sos_disable_IRQs(flags); | sos_disable_IRQs(flags); |
| |
if (sos_kthread_get_current() != mutex->owner) | if (sos_thread_get_current() != mutex->owner) |
| |
else if (sos_kwaitq_is_empty(& mutex->kwaitq)) | else if (sos_kwaitq_is_empty(& mutex->kwaitq)) |
{ | { |
| /* |
| * There is NOT ANY thread waiting => we really mark the mutex |
| * as FREE |
| */ |
mutex->owner = NULL; | mutex->owner = NULL; |
retval = SOS_OK; | retval = SOS_OK; |
} | } |
else | else |
retval = sos_kwaitq_wakeup(& mutex->kwaitq, 1, SOS_OK); | { |
| /* |
| * There is at least 1 thread waiting => we DO NOT mark the |
| * mutex as free ! |
| * Actually, we should have written: |
| * mutex->owner = thread_that_is_woken_up; |
| * But the real Id of the next thread owning the mutex is not |
| * that important. What is important here is that mutex->owner |
| * IS NOT NULL. Otherwise there will be a possibility for the |
| * thread woken up here to have the mutex stolen by a thread |
| * locking the mutex in the meantime. |
| */ |
| retval = sos_kwaitq_wakeup(& mutex->kwaitq, 1, SOS_OK); |
| } |
| |
return retval; | return retval; |
} | } |
| |
/tmp/sos-code-article6.5/sos/kthread.c (2005-01-04 04:13:49.000000000 +0100
) |
|
../sos-code-article6.75/sos/kthread.c (1970-01-01 01:00:00.000000000 +0100
) |
|
|
|
/* Copyright (C) 2004 David Decotigny | |
| |
This program is free software; you can redistribute it and/or | |
modify it under the terms of the GNU General Public License | |
as published by the Free Software Foundation; either version 2 | |
of the License, or (at your option) any later version. | |
| |
This program is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
GNU General Public License for more details. | |
| |
You should have received a copy of the GNU General Public License | |
along with this program; if not, write to the Free Software | |
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, | |
USA. | |
*/ | |
| |
#include <sos/physmem.h> | |
#include <sos/kmem_slab.h> | |
#include <sos/kmalloc.h> | |
#include <sos/klibc.h> | |
#include <sos/list.h> | |
#include <sos/assert.h> | |
| |
#include <hwcore/irq.h> | |
| |
#include "kthread.h" | |
| |
| |
/** | |
* The size of the stack of a kernel thread | |
*/ | |
#define SOS_KTHREAD_STACK_SIZE (1*SOS_PAGE_SIZE) | |
| |
| |
/** | |
* The identifier of the thread currently running on CPU. | |
* | |
* We only support a SINGLE processor, ie a SINGLE kernel thread | |
* running at any time in the system. This greatly simplifies the | |
* implementation of the system, since we don't have to complicate | |
* things in order to retrieve the identifier of the threads running | |
* on the CPU. On multiprocessor systems the current_kthread below is | |
* an array indexed by the id of the CPU, so that the challenge is to | |
* retrieve the identifier of the CPU. This is usually done based on | |
* the stack address (Linux implementation) or on some form of TLS | |
* ("Thread Local Storage": can be implemented by way of LDTs for the | |
* processes, accessed through the fs or gs registers). | |
*/ | |
static volatile struct sos_kthread *current_kthread = NULL; | |
| |
| |
/* | |
* The list of kernel threads currently in the system. | |
* | |
* @note We could have used current_kthread for that... | |
*/ | |
static struct sos_kthread *kthread_list = NULL; | |
| |
| |
/** | |
* The Cache of kthread structures | |
*/ | |
static struct sos_kslab_cache *cache_kthread; | |
| |
| |
struct sos_kthread *sos_kthread_get_current() | |
{ | |
SOS_ASSERT_FATAL(current_kthread->state == SOS_KTHR_RUNNING); | |
return (struct sos_kthread*)current_kthread; | |
} | |
| |
| |
inline static sos_ret_t _set_current(struct sos_kthread *thr) | |
{ | |
SOS_ASSERT_FATAL(thr->state == SOS_KTHR_READY); | |
current_kthread = thr; | |
current_kthread->state = SOS_KTHR_RUNNING; | |
return SOS_OK; | |
} | |
| |
| |
sos_ret_t sos_kthread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr, | |
sos_size_t init_thread_stack_size) | |
{ | |
struct sos_kthread *myself; | |
| |
/* Allocate the cache of kthreads */ | |
cache_kthread = sos_kmem_cache_create("kthread", | |
sizeof(struct sos_kthread), | |
2, | |
0, | |
SOS_KSLAB_CREATE_MAP | |
| SOS_KSLAB_CREATE_ZERO); | |
if (! cache_kthread) | |
return -SOS_ENOMEM; | |
| |
/* Allocate a new kthread structure for the current running thread */ | |
myself = (struct sos_kthread*) sos_kmem_cache_alloc(cache_kthread, | |
SOS_KSLAB_ALLOC_ATOMIC); | |
if (! myself) | |
return -SOS_ENOMEM; | |
| |
/* Initialize the thread attributes */ | |
strzcpy(myself->name, "[kinit]", SOS_KTHR_MAX_NAMELEN); | |
myself->state = SOS_KTHR_CREATED; | |
myself->stack_base_addr = init_thread_stack_base_addr; | |
myself->stack_size = init_thread_stack_size; | |
| |
/* Do some stack poisoning on the bottom of the stack, if needed */ | |
sos_cpu_kstate_prepare_detect_stack_overflow(myself->cpu_kstate, | |
myself->stack_base_addr, | |
myself->stack_size); | |
| |
/* Add the thread in the global list */ | |
list_singleton_named(kthread_list, myself, gbl_prev, gbl_next); | |
| |
/* Ok, now pretend that the running thread is ourselves */ | |
myself->state = SOS_KTHR_READY; | |
_set_current(myself); | |
| |
return SOS_OK; | |
} | |
| |
| |
struct sos_kthread *sos_kthread_create(const char *name, | |
sos_kthread_start_routine_t start_func, | |
void *start_arg) | |
{ | |
__label__ undo_creation; | |
struct sos_kthread *new_thread; | |
| |
if (! start_func) | |
return NULL; | |
| |
/* Allocate a new kthread structure for the current running thread */ | |
new_thread | |
= (struct sos_kthread*) sos_kmem_cache_alloc(cache_kthread, | |
SOS_KSLAB_ALLOC_ATOMIC); | |
if (! new_thread) | |
return NULL; | |
| |
/* Initialize the thread attributes */ | |
strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_KTHR_MAX_NAMELEN); | |
new_thread->state = SOS_KTHR_CREATED; | |
| |
/* Allocate the stack for the new thread */ | |
new_thread->stack_base_addr = sos_kmalloc(SOS_KTHREAD_STACK_SIZE, 0); | |
new_thread->stack_size = SOS_KTHREAD_STACK_SIZE; | |
if (! new_thread->stack_base_addr) | |
goto undo_creation; | |
| |
/* Initialize the CPU context of the new thread */ | |
if (SOS_OK | |
!= sos_cpu_kstate_init(& new_thread->cpu_kstate, | |
(sos_cpu_kstate_function_arg1_t*) start_func, | |
(sos_ui32_t) start_arg, | |
new_thread->stack_base_addr, | |
new_thread->stack_size, | |
(sos_cpu_kstate_function_arg1_t*) sos_kthread_exit, | |
(sos_ui32_t) NULL)) | |
goto undo_creation; | |
| |
/* Add the thread in the global list */ | |
list_add_tail_named(kthread_list, new_thread, gbl_prev, gbl_next); | |
| |
/* Mark the thread ready */ | |
if (SOS_OK != sos_sched_set_ready(new_thread)) | |
goto undo_creation; | |
| |
/* Normal non-erroneous end of function */ | |
return new_thread; | |
| |
undo_creation: | |
sos_kmem_cache_free((sos_vaddr_t) new_thread); | |
return NULL; | |
} | |
| |
| |
/** Function called after thr has terminated. Called from inside the context | |
of another thread, interrupts disabled */ | |
static void delete_thread(struct sos_kthread *thr) | |
{ | |
list_delete_named(kthread_list, thr, gbl_prev, gbl_next); | |
| |
sos_cpu_kstate_detect_stack_overflow(thr->cpu_kstate, | |
thr->stack_base_addr, | |
thr->stack_size); | |
| |
sos_kfree((sos_vaddr_t) thr->stack_base_addr); | |
memset(thr, 0x0, sizeof(struct sos_kthread)); | |
sos_kmem_cache_free((sos_vaddr_t) thr); | |
} | |
| |
| |
void sos_kthread_exit() | |
{ | |
sos_ui32_t flags; | |
struct sos_kthread *myself, *next_thread; | |
| |
myself = sos_kthread_get_current(); | |
| |
/* Refuse to end the current executing thread if it still holds a | |
resource ! */ | |
SOS_ASSERT_FATAL(list_is_empty_named(myself->kwaitq_list, | |
prev_entry_for_kthread, | |
next_entry_for_kthread)); | |
| |
/* Prepare to run the next thread */ | |
sos_disable_IRQs(flags); | |
myself->state = SOS_KTHR_ZOMBIE; | |
next_thread = sos_reschedule(myself, FALSE); | |
_set_current(next_thread); | |
| |
/* No need for sos_restore_IRQs() here because the IRQ flag will be | |
restored to that of the next thread upon context switch */ | |
| |
/* Immediate switch to next thread */ | |
sos_cpu_kstate_exit_to(next_thread->cpu_kstate, | |
(sos_cpu_kstate_function_arg1_t*) delete_thread, | |
(sos_ui32_t) myself); | |
} | |
| |
| |
sos_kthread_state_t sos_kthread_get_state(struct sos_kthread *thr) | |
{ | |
if (! thr) | |
thr = (struct sos_kthread*)current_kthread; | |
| |
return thr->state; | |
} | |
| |
| |
typedef enum { YIELD_MYSELF, BLOCK_MYSELF } switch_type_t; | |
/** | |
* Helper function to initiate a context switch in case the current | |
* thread becomes blocked, waiting for a timeout, or calls yield. | |
*/ | |
static sos_ret_t _switch_to_next_thread(switch_type_t operation) | |
{ | |
struct sos_kthread *myself, *next_thread; | |
| |
SOS_ASSERT_FATAL(current_kthread->state == SOS_KTHR_RUNNING); | |
| |
/* Interrupt handlers are NOT allowed to block ! */ | |
SOS_ASSERT_FATAL(! sos_servicing_irq()); | |
| |
myself = (struct sos_kthread*)current_kthread; | |
| |
/* Make sure that if we are to be marked "BLOCKED", we have any | |
reason of effectively being blocked */ | |
if (BLOCK_MYSELF == operation) | |
{ | |
myself->state = SOS_KTHR_BLOCKED; | |
} | |
| |
/* Identify the next thread */ | |
next_thread = sos_reschedule(myself, YIELD_MYSELF == operation); | |
| |
/* Avoid context switch if the context does not change */ | |
if (myself != next_thread) | |
{ | |
/* Sanity checks for the next thread */ | |
sos_cpu_kstate_detect_stack_overflow(next_thread->cpu_kstate, | |
next_thread->stack_base_addr, | |
next_thread->stack_size); | |
| |
/* Actual context switch */ | |
_set_current(next_thread); | |
sos_cpu_kstate_switch(& myself->cpu_kstate, next_thread->cpu_kstate); | |
| |
/* Back here ! */ | |
SOS_ASSERT_FATAL(current_kthread == myself); | |
SOS_ASSERT_FATAL(current_kthread->state == SOS_KTHR_RUNNING); | |
} | |
else | |
{ | |
/* No context switch but still update ID of current thread */ | |
_set_current(next_thread); | |
} | |
| |
return SOS_OK; | |
} | |
| |
| |
sos_ret_t sos_kthread_yield() | |
{ | |
sos_ui32_t flags; | |
sos_ret_t retval; | |
| |
sos_disable_IRQs(flags); | |
| |
retval = _switch_to_next_thread(YIELD_MYSELF); | |
| |
sos_restore_IRQs(flags); | |
return retval; | |
} | |
| |
| |
/** | |
* Internal sleep timeout management | |
*/ | |
struct sleep_timeout_params | |
{ | |
struct sos_kthread *thread_to_wakeup; | |
sos_bool_t timeout_triggered; | |
}; | |
| |
| |
/** | |
* Callback called when a timeout happened | |
*/ | |
static void sleep_timeout(struct sos_timeout_action *act) | |
{ | |
struct sleep_timeout_params *sleep_timeout_params | |
= (struct sleep_timeout_params*) act->routine_data; | |
| |
/* Signal that we have been woken up by the timeout */ | |
sleep_timeout_params->timeout_triggered = TRUE; | |
| |
/* Mark the thread ready */ | |
SOS_ASSERT_FATAL(SOS_OK == | |
sos_kthread_force_unblock(sleep_timeout_params | |
->thread_to_wakeup)); | |
} | |
| |
| |
sos_ret_t sos_kthread_sleep(struct sos_time *timeout) | |
{ | |
sos_ui32_t flags; | |
struct sleep_timeout_params sleep_timeout_params; | |
struct sos_timeout_action timeout_action; | |
sos_ret_t retval; | |
| |
/* Block forever if no timeout is given */ | |
if (NULL == timeout) | |
{ | |
sos_disable_IRQs(flags); | |
retval = _switch_to_next_thread(BLOCK_MYSELF); | |
sos_restore_IRQs(flags); | |
| |
return retval; | |
} | |
| |
/* Initialize the timeout action */ | |
sos_time_init_action(& timeout_action); | |
| |
/* Prepare parameters used by the sleep timeout callback */ | |
sleep_timeout_params.thread_to_wakeup | |
= (struct sos_kthread*)current_kthread; | |
sleep_timeout_params.timeout_triggered = FALSE; | |
| |
sos_disable_IRQs(flags); | |
| |
/* Now program the timeout ! */ | |
SOS_ASSERT_FATAL(SOS_OK == | |
sos_time_register_action_relative(& timeout_action, | |
timeout, | |
sleep_timeout, | |
& sleep_timeout_params)); | |
| |
/* Prepare to block: wait for sleep_timeout() to wakeup us in the | |
timeout kwaitq, or for someone to wake us up in any other | |
waitq */ | |
retval = _switch_to_next_thread(BLOCK_MYSELF); | |
/* Unblocked by something ! */ | |
| |
/* Unblocked by timeout ? */ | |
if (sleep_timeout_params.timeout_triggered) | |
{ | |
/* Yes */ | |
SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout)); | |
retval = SOS_OK; | |
} | |
else | |
{ | |
/* No: We have probably been woken up while in some other | |
kwaitq */ | |
SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action)); | |
retval = -SOS_EINTR; | |
} | |
| |
sos_restore_IRQs(flags); | |
| |
/* Update the remaining timeout */ | |
memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time)); | |
| |
return retval; | |
} | |
| |
| |
sos_ret_t sos_kthread_force_unblock(struct sos_kthread *kthread) | |
{ | |
sos_ret_t retval; | |
sos_ui32_t flags; | |
| |
if (! kthread) | |
return -SOS_EINVAL; | |
| |
sos_disable_IRQs(flags); | |
| |
/* Thread already woken up ? */ | |
retval = SOS_OK; | |
switch(sos_kthread_get_state(kthread)) | |
{ | |
case SOS_KTHR_RUNNING: | |
case SOS_KTHR_READY: | |
/* Do nothing */ | |
break; | |
| |
case SOS_KTHR_ZOMBIE: | |
retval = -SOS_EFATAL; | |
break; | |
| |
default: | |
retval = sos_sched_set_ready(kthread); | |
break; | |
} | |
| |
sos_restore_IRQs(flags); | |
| |
return retval; | |
} | |
/tmp/sos-code-article6.5/sos/kthread.h (2005-01-04 04:13:49.000000000 +0100
) |
|
../sos-code-article6.75/sos/kthread.h (1970-01-01 01:00:00.000000000 +0100
) |
|
|
|
/* Copyright (C) 2004 David Decotigny | |
| |
This program is free software; you can redistribute it and/or | |
modify it under the terms of the GNU General Public License | |
as published by the Free Software Foundation; either version 2 | |
of the License, or (at your option) any later version. | |
| |
This program is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
GNU General Public License for more details. | |
| |
You should have received a copy of the GNU General Public License | |
along with this program; if not, write to the Free Software | |
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, | |
USA. | |
*/ | |
#ifndef _SOS_KTHREAD_H_ | |
#define _SOS_KTHREAD_H_ | |
| |
#include <sos/errno.h> | |
| |
/** | |
* @file kthread.h | |
* | |
* SOS Kernel thread management API | |
*/ | |
| |
| |
/* Forward declaration */ | |
struct sos_kthread; | |
| |
#include <hwcore/cpu_context.h> | |
#include <sos/sched.h> | |
#include <sos/kwaitq.h> | |
#include <sos/time.h> | |
| |
| |
/** | |
* The possible states of a valid kernel thread | |
*/ | |
typedef enum { SOS_KTHR_CREATED, /**< Thread created, not fully initialized */ | |
SOS_KTHR_READY, /**< Thread fully initialized or waiting | |
for CPU after having been blocked */ | |
SOS_KTHR_RUNNING, /**< Thread currently running on CPU */ | |
SOS_KTHR_BLOCKED, /**< Thread waiting for I/O (+ in at LEAST | |
one kwaitq) and/or sleeping (+ in NO | |
kwaitq) */ | |
SOS_KTHR_ZOMBIE, /**< Thread terminated execution, waiting to | |
be deleted by kernel */ | |
} sos_kthread_state_t; | |
| |
| |
/** | |
* TCB (Thread Control Block): structure describing a Kernel | |
* thread. Don't access these fields directly: prefer using the | |
* accessor functions below. | |
*/ | |
struct sos_kthread | |
{ | |
#define SOS_KTHR_MAX_NAMELEN 32 | |
char name[SOS_KTHR_MAX_NAMELEN]; | |
| |
sos_kthread_state_t state; | |
| |
/* The hardware context of the thread */ | |
struct sos_cpu_kstate *cpu_kstate; | |
sos_vaddr_t stack_base_addr; | |
sos_size_t stack_size; | |
| |
/* Data specific to each state */ | |
union | |
{ | |
struct | |
{ | |
struct sos_kthread *rdy_prev, *rdy_next; | |
} ready; | |
}; /* Anonymous union (gcc extenion) */ | |
| |
| |
/* | |
* Data used by the kwaitq subsystem: list of kwaitqueues the thread | |
* is waiting for. | |
* | |
* @note: a RUNNING or READY thread might be in one or more | |
* waitqueues ! The only property we have is that, among these | |
* waitqueues (if any), _at least_ one has woken the thread. | |
*/ | |
struct sos_kwaitq_entry *kwaitq_list; | |
| |
| |
/** | |
* Chaining pointers for global ("gbl") list of threads (debug) | |
*/ | |
struct sos_kthread *gbl_prev, *gbl_next; | |
}; | |
| |
| |
/** | |
* Definition of the function executed by a kernel thread | |
*/ | |
typedef void (*sos_kthread_start_routine_t)(void *arg); | |
| |
| |
/** | |
* Initialize the subsystem responsible for kernel thread management | |
* | |
* Initialize primary kernel thread so that it can be handled the same | |
* way as an ordinary thread created by sos_kthread_create(). | |
*/ | |
sos_ret_t sos_kthread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr, | |
sos_size_t init_thread_stack_size); | |
| |
| |
/** | |
* Create a new kernel thread | |
*/ | |
struct sos_kthread *sos_kthread_create(const char *name, | |
sos_kthread_start_routine_t start_func, | |
void *start_arg); | |
| |
| |
/** | |
* Terminate the execution of the current thread. Called by default | |
* when the start routine returns. | |
*/ | |
void sos_kthread_exit() __attribute__((noreturn)); | |
| |
| |
/** | |
* Get the identifier of the thread currently running on CPU. Trivial | |
* function. | |
*/ | |
struct sos_kthread *sos_kthread_get_current(); | |
| |
| |
/** | |
* If thr == NULL, get the state of the current thread. Trivial | |
* function. | |
* | |
* @note NOT protected against interrupts | |
*/ | |
sos_kthread_state_t sos_kthread_get_state(struct sos_kthread *thr); | |
| |
| |
/** | |
* Yield CPU to another ready thread. | |
* | |
* @note This is a BLOCKING FUNCTION | |
*/ | |
sos_ret_t sos_kthread_yield(); | |
| |
| |
/** | |
* Release the CPU for (at least) the given delay. | |
* | |
* @param delay The delay to wait for. If delay == NULL then wait | |
* forever that any event occurs. | |
* | |
* @return SOS_OK when delay expired (and delay is reset to zero), | |
* -SOS_EINTR otherwise (and delay contains the amount of time | |
* remaining). | |
* | |
* @note This is a BLOCKING FUNCTION | |
*/ | |
sos_ret_t sos_kthread_sleep(/* in/out */struct sos_time *delay); | |
| |
| |
/** | |
* Mark the given thread as READY (if not already ready) even if it is | |
* blocked in a kwaitq or in a sleep ! As a result, the interrupted | |
* kwaitq/sleep function call of the thread will return with | |
* -SOS_EINTR. | |
* | |
* @return -SOS_EINVAL if thread does not exist, or -SOS_EFATAL if | |
* marked ZOMBIE. | |
* | |
* @note As a result, the semaphore/mutex/conditions/... functions | |
* return values SHOULD ALWAYS be checked ! If they are != SOS_OK, | |
* then the caller should consider that the resource is not aquired | |
* because somebody woke the thread by some way. | |
*/ | |
sos_ret_t sos_kthread_force_unblock(struct sos_kthread *kthread); | |
| |
| |
#endif /* _SOS_KTHREAD_H_ */ | |
/tmp/sos-code-article6.5/sos/kwaitq.c (2005-01-04 04:13:49.000000000 +0100
) |
|
../sos-code-article6.75/sos/kwaitq.c (2005-03-02 17:30:44.000000000 +0100
) |
|
|
|
| |
| |
sos_ret_t sos_kwaitq_init(struct sos_kwaitq *kwq, | sos_ret_t sos_kwaitq_init(struct sos_kwaitq *kwq, |
const char *name) | const char *name, |
| sos_kwaitq_ordering_t ordering) |
memset(kwq, 0x0, sizeof(struct sos_kwaitq)); | memset(kwq, 0x0, sizeof(struct sos_kwaitq)); |
| |
|
|
name = "<unknown>"; | name = "<unknown>"; |
strzcpy(kwq->name, name, SOS_KWQ_DEBUG_MAX_NAMELEN); | strzcpy(kwq->name, name, SOS_KWQ_DEBUG_MAX_NAMELEN); |
#endif | #endif |
| kwq->ordering = ordering; |
list_init_named(kwq->waiting_list, | list_init_named(kwq->waiting_list, |
prev_entry_in_kwaitq, next_entry_in_kwaitq); | prev_entry_in_kwaitq, next_entry_in_kwaitq); |
| |
|
|
sos_ret_t sos_kwaitq_init_entry(struct sos_kwaitq_entry *kwq_entry) | sos_ret_t sos_kwaitq_init_entry(struct sos_kwaitq_entry *kwq_entry) |
{ | { |
memset(kwq_entry, 0x0, sizeof(struct sos_kwaitq_entry)); | memset(kwq_entry, 0x0, sizeof(struct sos_kwaitq_entry)); |
kwq_entry->kthread = sos_kthread_get_current(); | kwq_entry->thread = sos_thread_get_current(); |
} | } |
| |
| |
/** Internal helper function equivalent to sos_kwaitq_add_entry(), but | /** Internal helper function equivalent to sos_kwaitq_add_entry(), but |
without interrupt protection scheme, and explicit priority | without interrupt protection scheme */ |
ordering */ | inline static sos_ret_t |
inline static sos_ret_t _kwaitq_add_entry(struct sos_kwaitq *kwq, | _kwaitq_add_entry(struct sos_kwaitq *kwq, |
struct sos_kwaitq_entry *kwq_entry) | struct sos_kwaitq_entry *kwq_entry, |
| sos_sched_priority_t prio) |
| struct sos_kwaitq_entry *next_entry = NULL, *entry; |
| int nb_entries; |
| |
/* This entry is already added in the kwaitq ! */ | /* This entry is already added in the kwaitq ! */ |
SOS_ASSERT_FATAL(NULL == kwq_entry->kwaitq); | SOS_ASSERT_FATAL(NULL == kwq_entry->kwaitq); |
| |
/* sos_kwaitq_init_entry() has not been called ?! */ | /* sos_kwaitq_init_entry() has not been called ?! */ |
SOS_ASSERT_FATAL(NULL != kwq_entry->kthread); | SOS_ASSERT_FATAL(NULL != kwq_entry->thread); |
/* (Re-)Initialize wakeup status of the entry */ | /* (Re-)Initialize wakeup status of the entry */ |
kwq_entry->wakeup_triggered = FALSE; | kwq_entry->wakeup_triggered = FALSE; |
kwq_entry->wakeup_status = SOS_OK; | kwq_entry->wakeup_status = SOS_OK; |
| |
/* Add the thread in the list */ | /* Insert this entry in the kwaitq waiting list */ |
list_add_tail_named(kwq->waiting_list, kwq_entry, | switch (kwq->ordering) |
prev_entry_in_kwaitq, next_entry_in_kwaitq); | { |
| case SOS_KWQ_ORDER_FIFO: |
| /* Insertion in the list in FIFO order */ |
| { |
| /* Add the thread in the list */ |
| list_add_tail_named(kwq->waiting_list, kwq_entry, |
| prev_entry_in_kwaitq, next_entry_in_kwaitq); |
| } |
| break; |
| |
| case SOS_KWQ_ORDER_PRIO: |
| /* Priority-driven insertion in the list */ |
| { |
| /* Look for the place where to insert the thread in the queue (we |
| want to order them in order of increasing priorities) */ |
| list_foreach_forward_named(kwq->waiting_list, entry, nb_entries, |
| prev_entry_in_kwaitq, next_entry_in_kwaitq) |
| { |
| /* Does the thread we want to insert have higher priority than |
| the given thread in the queue ? */ |
| if (SOS_SCHED_PRIO_CMP(prio, |
| sos_thread_get_priority(entry->thread)) |
| > 0) |
| { |
| /* Yes: we insert before this given thread */ |
| next_entry = entry; |
| break; |
| } |
| } |
| |
| /* Actually insert the entry in the list */ |
| if (next_entry != NULL) |
| { |
| list_insert_before_named(kwq->waiting_list, kwq_entry, next_entry, |
| prev_entry_in_kwaitq, |
| next_entry_in_kwaitq); |
| } |
| else |
| { |
| /* The thread we want to insert has less priority than any |
| other in the list */ |
| list_add_tail_named(kwq->waiting_list, kwq_entry, |
| prev_entry_in_kwaitq, next_entry_in_kwaitq); |
| } |
| } |
| break; |
| |
| default: |
| SOS_FATAL_ERROR("Invalid kwq ordering %d !\n", kwq->ordering); |
| break; |
| } |
/* Update the list of waitqueues for the thread */ | /* Update the list of waitqueues for the thread */ |
list_add_tail_named(kwq_entry->kthread->kwaitq_list, kwq_entry, | list_add_tail_named(kwq_entry->thread->kwaitq_list, kwq_entry, |
prev_entry_for_kthread, next_entry_for_kthread); | prev_entry_for_thread, next_entry_for_thread); |
kwq_entry->kwaitq = kwq; | kwq_entry->kwaitq = kwq; |
| |
} | } |
| |
|
|
sos_ret_t retval; | sos_ret_t retval; |
| |
sos_disable_IRQs(flags); | sos_disable_IRQs(flags); |
retval = _kwaitq_add_entry(kwq, kwq_entry); | retval = _kwaitq_add_entry(kwq, kwq_entry, |
| sos_thread_get_priority(kwq_entry->thread)); |
| |
return retval; | return retval; |
|
|
list_delete_named(kwq->waiting_list, kwq_entry, | list_delete_named(kwq->waiting_list, kwq_entry, |
prev_entry_in_kwaitq, next_entry_in_kwaitq); | prev_entry_in_kwaitq, next_entry_in_kwaitq); |
| |
list_delete_named(kwq_entry->kthread->kwaitq_list, kwq_entry, | list_delete_named(kwq_entry->thread->kwaitq_list, kwq_entry, |
prev_entry_for_kthread, next_entry_for_kthread); | prev_entry_for_thread, next_entry_for_thread); |
kwq_entry->kwaitq = NULL; | kwq_entry->kwaitq = NULL; |
return SOS_OK; | return SOS_OK; |
|
|
| |
sos_disable_IRQs(flags); | sos_disable_IRQs(flags); |
| |
retval = _kwaitq_add_entry(kwq, & kwq_entry); | retval = _kwaitq_add_entry(kwq, & kwq_entry, |
| sos_thread_get_priority(kwq_entry.thread)); |
/* Wait for wakeup or timeout */ | /* Wait for wakeup or timeout */ |
sos_kthread_sleep(timeout); | sos_thread_sleep(timeout); |
| |
/* Sleep delay elapsed ? */ | /* Sleep delay elapsed ? */ |
|
|
| |
| |
sos_ret_t sos_kwaitq_wakeup(struct sos_kwaitq *kwq, | sos_ret_t sos_kwaitq_wakeup(struct sos_kwaitq *kwq, |
unsigned int nb_kthreads, | unsigned int nb_threads, |
{ | { |
sos_ui32_t flags; | sos_ui32_t flags; |
|
|
sos_disable_IRQs(flags); | sos_disable_IRQs(flags); |
| |
/* Wake up as much threads waiting in waitqueue as possible (up to | /* Wake up as much threads waiting in waitqueue as possible (up to |
nb_kthreads), scanning the list in FIFO order */ | nb_threads), scanning the list in FIFO/decreasing priority order |
| (depends on the kwaitq ordering) */ |
prev_entry_in_kwaitq, next_entry_in_kwaitq)) | prev_entry_in_kwaitq, next_entry_in_kwaitq)) |
{ | { |
|
|
= list_get_head_named(kwq->waiting_list, | = list_get_head_named(kwq->waiting_list, |
prev_entry_in_kwaitq, next_entry_in_kwaitq); | prev_entry_in_kwaitq, next_entry_in_kwaitq); |
| |
/* Enough kthreads woken up ? */ | /* Enough threads woken up ? */ |
if (nb_kthreads <= 0) | if (nb_threads <= 0) |
| |
/* | /* |
|
|
*/ | */ |
| |
/* Thread already woken up ? */ | /* Thread already woken up ? */ |
if (SOS_KTHR_RUNNING == sos_kthread_get_state(kwq_entry->kthread)) | if (SOS_THR_RUNNING == sos_thread_get_state(kwq_entry->thread)) |
/* Yes => Do nothing because WE are that woken-up thread. In | /* Yes => Do nothing because WE are that woken-up thread. In |
particular: don't call set_ready() here because this | particular: don't call set_ready() here because this |
|
|
else | else |
{ | { |
/* No => wake it up now. */ | /* No => wake it up now. */ |
sos_sched_set_ready(kwq_entry->kthread); | sos_sched_set_ready(kwq_entry->thread); |
| |
/* Remove this waitq entry */ | /* Remove this waitq entry */ |
|
|
kwq_entry->wakeup_status = wakeup_status; | kwq_entry->wakeup_status = wakeup_status; |
| |
/* Next iteration... */ | /* Next iteration... */ |
nb_kthreads --; | nb_threads --; |
| |
sos_restore_IRQs(flags); | sos_restore_IRQs(flags); |
| |
return SOS_OK; | return SOS_OK; |
} | } |
| |
| |
| /* Internal function (callback for thread subsystem) */ |
| sos_ret_t sos_kwaitq_change_priority(struct sos_kwaitq *kwq, |
| struct sos_kwaitq_entry *kwq_entry, |
| sos_sched_priority_t priority) |
| { |
| /* Reorder the waiting list */ |
| _kwaitq_remove_entry(kwq, kwq_entry); |
| _kwaitq_add_entry(kwq, kwq_entry, priority); |
| |
| return SOS_OK; |
| } |
| |
/tmp/sos-code-article6.5/sos/kwaitq.h (2005-01-04 04:13:49.000000000 +0100
) |
|
../sos-code-article6.75/sos/kwaitq.h (2005-03-02 17:30:44.000000000 +0100
) |
|
|
|
#define _SOS_KWAITQ_H_ | #define _SOS_KWAITQ_H_ |
| |
#include <sos/errno.h> | #include <sos/errno.h> |
#include <sos/kthread.h> | #include <sos/thread.h> |
#include <sos/sched.h> | |
| |
/** | /** |
* @kwaitq.h | * @kwaitq.h |
* | * |
* Low-level functions to manage queues of threads waiting for a | * Low-level functions to manage queues of threads waiting for a |
* resource. These functions are public. For higher-level | * resource. These functions are public, except |
* synchronization primitives such as mutex, semaphores, conditions, | * sos_kwaitq_change_priority() that is a callback for the thread |
* ... prefer looking at the corresponding libraries. | * subsystem. However, for higher-level synchronization primitives |
| * such as mutex, semaphores, conditions, ... prefer to look at the |
| * corresponding libraries. |
| |
| |
|
|
| |
| |
/** | /** |
* Definition of a waitqueue. In a kwaitq, the threads are ordererd in | * The threads in the kwaitqs can be ordered in FIFO or in decreasing |
* FIFO order. | * priority order. |
| */ |
| typedef enum { SOS_KWQ_ORDER_FIFO, SOS_KWQ_ORDER_PRIO } sos_kwaitq_ordering_t; |
| |
| |
| #include <sos/sched.h> |
| |
| |
| /** |
| * Definition of a waitqueue. In a kwaitq, the threads can be ordererd |
| * either in FIFO order (SOS_KWQ_ORDER_FIFO) or in decreasing priority |
| * order (SOS_KWQ_ORDER_PRIO ; with FIFO ordering for same-prio |
| * threads). |
| * |
| * A more efficient method to store the threads ordered by their |
| * priority would have been to use 1 list for each priority level. But |
| * we have to be careful to the size of a kwaitq structure here: |
| * potentially there are thousands of kwaitq in a running system |
| * (basically: 1 per opened file !). The algorithm we use to order the |
| * threads in the kwaitq in this case is highly under-optimal (naive |
| * linear insertion): as an exercise, one can implement a more |
| * efficient algorithm (think of a heap). |
struct sos_kwaitq | struct sos_kwaitq |
{ | { |
|
|
# define SOS_KWQ_DEBUG_MAX_NAMELEN 32 | # define SOS_KWQ_DEBUG_MAX_NAMELEN 32 |
char name[SOS_KWQ_DEBUG_MAX_NAMELEN]; | char name[SOS_KWQ_DEBUG_MAX_NAMELEN]; |
#endif | #endif |
| sos_kwaitq_ordering_t ordering; |
struct sos_kwaitq_entry *waiting_list; | struct sos_kwaitq_entry *waiting_list; |
}; | }; |
| |
|
|
struct sos_kwaitq_entry | struct sos_kwaitq_entry |
{ | { |
/** The thread associted with this entry */ | /** The thread associted with this entry */ |
struct sos_kthread *kthread; | struct sos_thread *thread; |
/** The kwaitqueue this entry belongs to */ | /** The kwaitqueue this entry belongs to */ |
struct sos_kwaitq *kwaitq; | struct sos_kwaitq *kwaitq; |
|
|
struct sos_kwaitq_entry *prev_entry_in_kwaitq, *next_entry_in_kwaitq; | struct sos_kwaitq_entry *prev_entry_in_kwaitq, *next_entry_in_kwaitq; |
| |
/** Other entries for the thread */ | /** Other entries for the thread */ |
struct sos_kwaitq_entry *prev_entry_for_kthread, *next_entry_for_kthread; | struct sos_kwaitq_entry *prev_entry_for_thread, *next_entry_for_thread; |
| |
| |
|
|
* copied]) | * copied]) |
*/ | */ |
sos_ret_t sos_kwaitq_init(struct sos_kwaitq *kwq, | sos_ret_t sos_kwaitq_init(struct sos_kwaitq *kwq, |
const char *name); | const char *name, |
| sos_kwaitq_ordering_t ordering); |
| |
/** | /** |
|
|
| |
/** | /** |
* Initialize a waitqueue entry. Mainly consists in updating the | * Initialize a waitqueue entry. Mainly consists in updating the |
* "kthread" field of the entry (set to current running thread), and | * "thread" field of the entry (set to current running thread), and |
* belong to any waitq. | * belong to any waitq. |
*/ | */ |
|
|
| |
| |
/** | /** |
* Wake up as much as nb_kthread threads (SOS_KWQ_WAKEUP_ALL to wake | * Wake up as much as nb_thread threads (SOS_KWQ_WAKEUP_ALL to wake |
* up all threads) in the kwaitq kwq, in FIFO order. | * up all threads) in the kwaitq kwq, in FIFO or decreasing priority |
| * order (depends on the ordering scheme selected at kwaitq |
| * initialization time). |
* @param wakeup_status The value returned by sos_kwaitq_wait() when | * @param wakeup_status The value returned by sos_kwaitq_wait() when |
* the thread will effectively woken up due to this wakeup. | * the thread will effectively woken up due to this wakeup. |
*/ | */ |
sos_ret_t sos_kwaitq_wakeup(struct sos_kwaitq *kwq, | sos_ret_t sos_kwaitq_wakeup(struct sos_kwaitq *kwq, |
unsigned int nb_kthreads, | unsigned int nb_threads, |
#define SOS_KWQ_WAKEUP_ALL (~((unsigned int)0)) | #define SOS_KWQ_WAKEUP_ALL (~((unsigned int)0)) |
| |
| |
| /** |
| * @note INTERNAL function (in particular: interrupts not disabled) ! |
| * |
| * @note: The use of this function is RESERVED (to thread.c). Do not |
| * call it directly: use sos_thread_set_priority() for that ! |
| */ |
| sos_ret_t sos_kwaitq_change_priority(struct sos_kwaitq *kwq, |
| struct sos_kwaitq_entry *kwq_entry, |
| sos_sched_priority_t priority); |
| |
#endif /* _SOS_KWAITQ_H_ */ | #endif /* _SOS_KWAITQ_H_ */ |
| |
/tmp/sos-code-article6.5/sos/main.c (2005-01-04 04:13:49.000000000 +0100
) |
|
../sos-code-article6.75/sos/main.c (2005-03-02 17:30:44.000000000 +0100
) |
|
|
|
/* Copyright (C) 2004 The SOS Team | /* Copyright (C) 2004 The SOS Team |
Copyright (C) 1999 Free Software Foundation, Inc. | |
This program is free software; you can redistribute it and/or | This program is free software; you can redistribute it and/or |
modify it under the terms of the GNU General Public License | modify it under the terms of the GNU General Public License |
|
|
#include <sos/kmem_vmm.h> | #include <sos/kmem_vmm.h> |
#include <sos/kmalloc.h> | #include <sos/kmalloc.h> |
#include <sos/time.h> | #include <sos/time.h> |
#include <sos/kthread.h> | #include <sos/thread.h> |
#include <sos/assert.h> | #include <sos/assert.h> |
#include <drivers/x86_videomem.h> | #include <drivers/x86_videomem.h> |
|
|
| |
| |
/* Clock IRQ handler */ | /* Clock IRQ handler */ |
static void clk_it(int intid, | static void clk_it(int intid) |
const struct sos_cpu_kstate *cpu_kstate) | |
static sos_ui32_t clock_count = 0; | static sos_ui32_t clock_count = 0; |
| |
|
|
*/ | */ |
| |
/* Helper function to dump a backtrace on bochs and/or the console */ | /* Helper function to dump a backtrace on bochs and/or the console */ |
static void dump_backtrace(const struct sos_cpu_kstate *cpu_kstate, | static void dump_backtrace(const struct sos_cpu_state *cpu_state, |
sos_size_t stack_size, | sos_size_t stack_size, |
sos_bool_t on_console, | sos_bool_t on_console, |
|
|
| |
} | } |
| |
sos_backtrace(cpu_kstate, 15, stack_bottom, stack_size, backtracer, NULL); | sos_backtrace(cpu_state, 15, stack_bottom, stack_size, backtracer, NULL); |
| |
| |
/* Page fault exception handler with demand paging for the kernel */ | /* Page fault exception handler with demand paging for the kernel */ |
static void pgflt_ex(int intid, const struct sos_cpu_kstate *ctxt) | static void pgflt_ex(int intid, const struct sos_cpu_state *ctxt) |
static sos_ui32_t demand_paging_count = 0; | static sos_ui32_t demand_paging_count = 0; |
sos_vaddr_t faulting_vaddr = sos_cpu_kstate_get_EX_faulting_vaddr(ctxt); | sos_vaddr_t faulting_vaddr = sos_cpu_context_get_EX_faulting_vaddr(ctxt); |
| |
/* Check if address is covered by any VMM range */ | /* Check if address is covered by any VMM range */ |
|
|
bootstrap_stack_bottom, | bootstrap_stack_bottom, |
bootstrap_stack_size, | bootstrap_stack_size, |
TRUE, TRUE); | TRUE, TRUE); |
sos_display_fatal_error("Unresolved page Fault on access to address 0x%x (info=%x)!", | sos_display_fatal_error("Unresolved page Fault at instruction 0x%x on access to address 0x%x (info=%x)!", |
| sos_cpu_context_get_PC(ctxt), |
(unsigned)sos_cpu_kstate_get_EX_info(ctxt)); | (unsigned)sos_cpu_context_get_EX_info(ctxt)); |
} | } |
| |
|
|
{ | { |
sos_bochs_printf("[37myield(%c)[m\n", thr_arg->character); | sos_bochs_printf("[37myield(%c)[m\n", thr_arg->character); |
sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'Y'); | sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'Y'); |
SOS_ASSERT_FATAL(SOS_OK == sos_kthread_yield()); | SOS_ASSERT_FATAL(SOS_OK == sos_thread_yield()); |
} | } |
| |
|
|
struct sos_time t = (struct sos_time){ .sec=0, .nanosec=50000000 }; | struct sos_time t = (struct sos_time){ .sec=0, .nanosec=50000000 }; |
sos_bochs_printf("[37msleep1(%c)[m\n", thr_arg->character); | sos_bochs_printf("[37msleep1(%c)[m\n", thr_arg->character); |
sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 's'); | sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 's'); |
SOS_ASSERT_FATAL(SOS_OK == sos_kthread_sleep(& t)); | SOS_ASSERT_FATAL(SOS_OK == sos_thread_sleep(& t)); |
sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'R'); | sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'R'); |
} | } |
|
|
struct sos_time t = (struct sos_time){ .sec=0, .nanosec=300000000 }; | struct sos_time t = (struct sos_time){ .sec=0, .nanosec=300000000 }; |
sos_bochs_printf("[37msleep2(%c)[m\n", thr_arg->character); | sos_bochs_printf("[37msleep2(%c)[m\n", thr_arg->character); |
sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'S'); | sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'S'); |
SOS_ASSERT_FATAL(SOS_OK == sos_kthread_sleep(& t)); | SOS_ASSERT_FATAL(SOS_OK == sos_thread_sleep(& t)); |
sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'R'); | sos_x86_videomem_putchar(thr_arg->row, thr_arg->col, 0x1e, 'R'); |
} | } |
|
|
} | } |
| |
| |
static void test_kthread() | static void test_thread() |
/* "static" variables because we want them to remain even when the | /* "static" variables because we want them to remain even when the |
function returns */ | function returns */ |
|
|
sos_disable_IRQs(flags); | sos_disable_IRQs(flags); |
| |
arg_b = (struct thr_arg) { .character='b', .col=0, .row=21, .color=0x14 }; | arg_b = (struct thr_arg) { .character='b', .col=0, .row=21, .color=0x14 }; |
sos_kthread_create("YO[b]", demo_thread, (void*)&arg_b); | sos_create_kernel_thread("YO[b]", demo_thread, (void*)&arg_b, SOS_SCHED_PRIO_TS_LOWEST); |
arg_c = (struct thr_arg) { .character='c', .col=46, .row=21, .color=0x14 }; | arg_c = (struct thr_arg) { .character='c', .col=46, .row=21, .color=0x14 }; |
sos_kthread_create("YO[c]", demo_thread, (void*)&arg_c); | sos_create_kernel_thread("YO[c]", demo_thread, (void*)&arg_c, SOS_SCHED_PRIO_TS_LOWEST); |
arg_d = (struct thr_arg) { .character='d', .col=0, .row=20, .color=0x14 }; | arg_d = (struct thr_arg) { .character='d', .col=0, .row=20, .color=0x14 }; |
sos_kthread_create("YO[d]", demo_thread, (void*)&arg_d); | sos_create_kernel_thread("YO[d]", demo_thread, (void*)&arg_d, SOS_SCHED_PRIO_TS_LOWEST-1); |
arg_e = (struct thr_arg) { .character='e', .col=0, .row=19, .color=0x14 }; | arg_e = (struct thr_arg) { .character='e', .col=0, .row=19, .color=0x14 }; |
sos_kthread_create("YO[e]", demo_thread, (void*)&arg_e); | sos_create_kernel_thread("YO[e]", demo_thread, (void*)&arg_e, SOS_SCHED_PRIO_TS_LOWEST-2); |
arg_R = (struct thr_arg) { .character='R', .col=0, .row=17, .color=0x1c }; | arg_R = (struct thr_arg) { .character='R', .col=0, .row=17, .color=0x1c }; |
sos_kthread_create("YO[R]", demo_thread, (void*)&arg_R); | sos_create_kernel_thread("YO[R]", demo_thread, (void*)&arg_R, SOS_SCHED_PRIO_RT_LOWEST); |
arg_S = (struct thr_arg) { .character='S', .col=0, .row=16, .color=0x1c }; | arg_S = (struct thr_arg) { .character='S', .col=0, .row=16, .color=0x1c }; |
sos_kthread_create("YO[S]", demo_thread, (void*)&arg_S); | sos_create_kernel_thread("YO[S]", demo_thread, (void*)&arg_S, SOS_SCHED_PRIO_RT_LOWEST-1); |
sos_restore_IRQs(flags); | sos_restore_IRQs(flags); |
} | } |
|
|
* An operating system MUST always have a ready thread ! Otherwise: | * An operating system MUST always have a ready thread ! Otherwise: |
* what would the CPU have to execute ?! | * what would the CPU have to execute ?! |
*/ | */ |
static void idle_kthread() | static void idle_thread() |
sos_ui32_t idle_twiddle = 0; | sos_ui32_t idle_twiddle = 0; |
| |
|
|
idle_twiddle); | idle_twiddle); |
| |
/* Lend the CPU to some other thread */ | /* Lend the CPU to some other thread */ |
sos_kthread_yield(); | sos_thread_yield(); |
} | } |
| |
|
|
sos_x86_videomem_printf(1, 0, | sos_x86_videomem_printf(1, 0, |
SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, | SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, |
"Welcome From GRUB to %s%c RAM is %dMB (upper mem = 0x%x kB)", | "Welcome From GRUB to %s%c RAM is %dMB (upper mem = 0x%x kB)", |
"SOS", ',', | "SOS article 6.75", ',', |
(unsigned)mbi->mem_upper); | (unsigned)mbi->mem_upper); |
else | else |
/* Not loaded with grub */ | /* Not loaded with grub */ |
sos_x86_videomem_printf(1, 0, | sos_x86_videomem_printf(1, 0, |
SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, | SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, |
"Welcome to SOS"); | "Welcome to SOS article 6.75"); |
sos_bochs_putstring("Message in a bochs\n"); | sos_bochs_putstring("Message in a bochs: This is SOS article 6.75.\n"); |
/* Setup CPU segmentation and IRQ subsystem */ | /* Setup CPU segmentation and IRQ subsystem */ |
sos_gdt_subsystem_setup(); | sos_gdt_subsystem_setup(); |
|
|
| |
/* | /* |
* Setup kernel virtual memory allocator | * Setup kernel virtual memory allocator |
*/ | */ |
if (sos_kmem_vmm_subsystem_setup(sos_kernel_core_base_paddr, | if (sos_kmem_vmm_subsystem_setup(sos_kernel_core_base_paddr, |
sos_kernel_core_top_paddr, | sos_kernel_core_top_paddr, |
|
|
*/ | */ |
| |
/* Initialize kernel thread subsystem */ | /* Initialize kernel thread subsystem */ |
sos_kthread_subsystem_setup(bootstrap_stack_bottom, | sos_thread_subsystem_setup(bootstrap_stack_bottom, |
bootstrap_stack_size); | bootstrap_stack_size); |
/* Initialize the scheduler */ | /* Initialize the scheduler */ |
sos_sched_subsystem_setup(); | sos_sched_subsystem_setup(); |
| |
/* Declare the IDLE thread */ | /* Declare the IDLE thread */ |
SOS_ASSERT_FATAL(sos_kthread_create("idle", idle_kthread, NULL) != NULL); | SOS_ASSERT_FATAL(sos_create_kernel_thread("idle", idle_thread, NULL, |
| SOS_SCHED_PRIO_TS_LOWEST) != NULL); |
/* Enabling the HW interrupts here, this will make the timer HW | /* Enabling the HW interrupts here, this will make the timer HW |
interrupt call the scheduler */ | interrupt call the scheduler */ |
asm volatile ("sti\n"); | asm volatile ("sti\n"); |
| |
/* | |
* Force the idle thread to run at least once to force a context | |
* switch. This way the "cpu_kstate" of the kernel thread for the | |
* sos_main thread gets a chance to be filled with the current CPU | |
* context. Useful only if we call sos_kthread_exit() too early from | |
* sos_main: a "stack overflow" will be wrongly detected simply | |
* because the "cpu_kstate" of the thread has not be correctly | |
* initialised. A context switch is a good way to initialise it. | |
*/ | |
sos_kthread_yield(); | |
| |
/* Now run some Kernel threads just for fun ! */ | /* Now run some Kernel threads just for fun ! */ |
extern void MouseSim(); | extern void MouseSim(); |
MouseSim(); | MouseSim(); |
test_kthread(); | test_thread(); |
/* | /* |
* We can safely exit from this function now, for there is already | * We can safely exit from this function now, for there is already |
* an idle Kernel thread ready to make the CPU busy working... | * an idle Kernel thread ready to make the CPU busy working... |
* | * |
* However, we must EXPLICITELY call sos_kthread_exit() because a | * However, we must EXPLICITELY call sos_thread_exit() because a |
* was initialized by the Grub bootstrap stage, at a time when the | * was initialized by the Grub bootstrap stage, at a time when the |
* word "thread" did not exist. This means that the stack was not | * word "thread" did not exist. This means that the stack was not |
* setup in order for a return here to call sos_kthread_exit() | * setup in order for a return here to call sos_thread_exit() |
* kernel thread where we must do this manually. | * kernel thread where we must do this manually. |
*/ | */ |
sos_bochs_printf("Bye from primary thread !\n"); | sos_bochs_printf("Bye from primary thread !\n"); |
sos_kthread_exit(); | sos_thread_exit(); |
} | } |
| |
/tmp/sos-code-article6.5/sos/mouse_sim.c (2005-01-04 04:13:49.000000000 +0100
) |
|
../sos-code-article6.75/sos/mouse_sim.c (2005-03-02 17:30:43.000000000 +0100
) |
|
|
|
| |
#include <sos/assert.h> | #include <sos/assert.h> |
#include <sos/klibc.h> | #include <sos/klibc.h> |
#include <sos/kthread.h> | #include <sos/thread.h> |
#include <sos/kmalloc.h> | #include <sos/kmalloc.h> |
#include <drivers/x86_videomem.h> | #include <drivers/x86_videomem.h> |
|
|
sos_ui32_t Status; | sos_ui32_t Status; |
Color_t Color;//Couleur de l'element | Color_t Color;//Couleur de l'element |
Point_t P;//Coordonnees de l'element | Point_t P;//Coordonnees de l'element |
struct sos_kthread * ThreadID;//Thread associe a la souris | struct sos_thread * ThreadID;//Thread associe a la souris |
}; | }; |
| |
|
|
} | } |
| |
| |
void ThreadDelete(struct sos_kthread *tid) | |
{ | |
SOS_ASSERT_FATAL(! "TODO !"); | |
} | |
//***************************************************************************** | //***************************************************************************** |
// Point d'entre de la 'simulation' | // Point d'entre de la 'simulation' |
|
|
void MouseSim(void) | void MouseSim(void) |
{ | { |
//Creation du semaphore de protection de la carte | //Creation du semaphore de protection de la carte |
SOS_ASSERT_FATAL(SOS_OK == sos_ksema_init(& SemMap, "SemMap", 1)); | SOS_ASSERT_FATAL(SOS_OK == sos_ksema_init(& SemMap, "SemMap", 1, |
| SOS_KWQ_ORDER_FIFO)); |
//Creation du semaphore de creation de souris | //Creation du semaphore de creation de souris |
SOS_ASSERT_FATAL(SOS_OK == sos_ksema_init(& SemMouse, "SemMouse", 2)); | SOS_ASSERT_FATAL(SOS_OK == sos_ksema_init(& SemMouse, "SemMouse", 2, |
| SOS_KWQ_ORDER_FIFO)); |
//Creation de la carte | //Creation de la carte |
SOS_ASSERT_FATAL(SOS_OK == CreateMap()); | SOS_ASSERT_FATAL(SOS_OK == CreateMap()); |
| |
//Creation du thread createur de souris | //Creation du thread createur de souris |
SOS_ASSERT_FATAL(sos_kthread_create("MouseCreator", | SOS_ASSERT_FATAL(sos_create_kernel_thread("MouseCreator", |
(sos_kthread_start_routine_t)MouseCreator, | (sos_kernel_thread_start_routine_t)MouseCreator, |
0) != NULL); | 0, SOS_SCHED_PRIO_TS_LOWEST-1) != NULL); |
} | } |
| |
|
|
delay_ms = MOUSE_SPEED_MIN + (random() % MouseSpeed); | delay_ms = MOUSE_SPEED_MIN + (random() % MouseSpeed); |
delay.sec = delay_ms / 1000; | delay.sec = delay_ms / 1000; |
delay.nanosec = (delay_ms % 1000) * 1000000; | delay.nanosec = (delay_ms % 1000) * 1000000; |
sos_kthread_sleep(& delay); | sos_thread_sleep(& delay); |
| |
// Libere la structure associee | // Libere la structure associee |
|
|
pElement->Color = SOS_X86_VIDEO_FG_LTRED; | pElement->Color = SOS_X86_VIDEO_FG_LTRED; |
pElement->P = p; | pElement->P = p; |
pElement->Way = 0; | pElement->Way = 0; |
pElement->ThreadID = sos_kthread_create("Mouse", (sos_kthread_start_routine_t)Mouse, pElement); | pElement->ThreadID |
| = sos_create_kernel_thread("Mouse", |
| (sos_kernel_thread_start_routine_t)Mouse, |
| pElement, SOS_SCHED_PRIO_TS_LOWEST-1); |
{ | { |
sos_kfree((sos_vaddr_t)pElement); | sos_kfree((sos_vaddr_t)pElement); |
pElement = NULL; | pElement = NULL; |
| return -SOS_ENOMEM; |
} | } |
pMap[p.X + (p.Y * MAP_X)] = pElement; | pMap[p.X + (p.Y * MAP_X)] = pElement; |
MouseCount++; | MouseCount++; |
| |
/tmp/sos-code-article6.5/sos/sched.c (2005-01-04 04:13:49.000000000 +0100
) |
|
../sos-code-article6.75/sos/sched.c (2005-03-02 17:30:44.000000000 +0100
) |
|
|
|
USA. | USA. |
*/ | */ |
| |
| #include <sos/errno.h> |
#include <sos/klibc.h> | #include <sos/klibc.h> |
#include <sos/assert.h> | #include <sos/assert.h> |
#include <sos/list.h> | #include <sos/list.h> |
|
|
| |
/** | /** |
* The definition of the scheduler queue. We could have used a normal | * The definition of the scheduler queue. We could have used a normal |
* kwaitq here, it would have had the same properties. But, in the | * kwaitq here, it would have had the same properties (regarding |
* definitive version (O(1) scheduler), the structure has to be a bit | * priority ordering mainly). But we don't bother with size |
* more complicated. So, in order to keep the changes as small as | * considerations here (in kwaitq, we had better make the kwaitq |
* possible between this version and the definitive one, we don't use | * structure as small as possible because there are a lot of kwaitq in |
* kwaitq here. | * the system: at least 1 per opened file), so that we can implement a |
| * much faster way of handling the prioritized jobs. |
static struct | struct sos_sched_queue |
unsigned int nr_threads; | unsigned int nr_threads; |
struct sos_kthread *kthread_list; | struct sos_thread *thread_list[SOS_SCHED_NUM_PRIO]; |
} ready_queue; | }; |
| |
| |
| /** |
| * We manage 2 queues: a queue being scanned for ready threads |
| * (active_queue) and a queue to store the threads the threads having |
| * expired their time quantuum. |
| */ |
| static struct sos_sched_queue *active_queue, *expired_queue; |
| |
| |
| /** |
| * The instances for the active/expired queues |
| */ |
| static struct sos_sched_queue sched_queue[2]; |
| |
sos_ret_t sos_sched_subsystem_setup() | sos_ret_t sos_sched_subsystem_setup() |
{ | { |
memset(& ready_queue, 0x0, sizeof(ready_queue)); | memset(sched_queue, 0x0, sizeof(sched_queue)); |
| active_queue = & sched_queue[0]; |
| expired_queue = & sched_queue[1]; |
return SOS_OK; | return SOS_OK; |
} | } |
|
|
* @param insert_at_tail TRUE to tell to add the thread at the end of | * @param insert_at_tail TRUE to tell to add the thread at the end of |
* the ready list. Otherwise it is added at the head of it. | * the ready list. Otherwise it is added at the head of it. |
*/ | */ |
static sos_ret_t add_in_ready_queue(struct sos_kthread *thr, | static sos_ret_t add_in_ready_queue(struct sos_sched_queue *q, |
| struct sos_thread *thr, |
{ | { |
| sos_sched_priority_t prio; |
| |
SOS_ASSERT_FATAL( (SOS_KTHR_CREATED == thr->state) | SOS_ASSERT_FATAL( (SOS_THR_CREATED == thr->state) |
|| (SOS_KTHR_RUNNING == thr->state) /* Yield */ | || (SOS_THR_RUNNING == thr->state) /* Yield */ |
|| (SOS_KTHR_BLOCKED == thr->state) ); | || (SOS_THR_BLOCKED == thr->state) ); |
/* Add the thread to the CPU queue */ | /* Add the thread to the CPU queue */ |
| prio = sos_thread_get_priority(thr); |
if (insert_at_tail) | if (insert_at_tail) |
list_add_tail_named(ready_queue.kthread_list, thr, | list_add_tail_named(q->thread_list[prio], thr, |
else | else |
list_add_head_named(ready_queue.kthread_list, thr, | list_add_head_named(q->thread_list[prio], thr, |
ready_queue.nr_threads ++; | thr->ready.rdy_queue = q; |
| q->nr_threads ++; |
/* Ok, thread is now really ready to be (re)started */ | /* Ok, thread is now really ready to be (re)started */ |
thr->state = SOS_KTHR_READY; | thr->state = SOS_THR_READY; |
return SOS_OK; | return SOS_OK; |
} | } |
| |
| |
sos_ret_t sos_sched_set_ready(struct sos_kthread *thr) | sos_ret_t sos_sched_set_ready(struct sos_thread *thr) |
sos_ret_t retval; | sos_ret_t retval; |
| |
/* Don't do anything for already ready threads */ | /* Don't do anything for already ready threads */ |
if (SOS_KTHR_READY == thr->state) | if (SOS_THR_READY == thr->state) |
| |
/* Real-time thread: schedule it for the present turn */ | if (SOS_SCHED_PRIO_IS_RT(sos_thread_get_priority(thr))) |
retval = add_in_ready_queue(thr, TRUE); | { |
| /* Real-time thread: schedule it for the present turn */ |
| retval = add_in_ready_queue(active_queue, thr, TRUE); |
| } |
| else |
| { |
| /* Non real-time thread: schedule it for next turn */ |
| retval = add_in_ready_queue(expired_queue, thr, TRUE); |
| } |
return retval; | return retval; |
} | } |
| |
| |
struct sos_kthread * sos_reschedule(struct sos_kthread *current_kthread, | sos_ret_t sos_sched_change_priority(struct sos_thread *thr, |
sos_bool_t do_yield) | sos_sched_priority_t priority) |
| struct sos_thread *thread_list; |
| SOS_ASSERT_FATAL(SOS_THR_READY == thr->state); |
| |
if (SOS_KTHR_ZOMBIE == current_kthread->state) | /* Temp variable */ |
| thread_list |
| = thr->ready.rdy_queue->thread_list[sos_thread_get_priority(thr)]; |
| |
| list_delete_named(thread_list, thr, ready.rdy_prev, ready.rdy_next); |
| |
| /* Update lists */ |
| thread_list = thr->ready.rdy_queue->thread_list[priority]; |
| list_add_tail_named(thread_list, thr, ready.rdy_prev, ready.rdy_next); |
| thr->ready.rdy_queue->thread_list[priority] = thread_list; |
| |
| return SOS_OK; |
| } |
| |
| |
| struct sos_thread * sos_reschedule(struct sos_thread *current_thread, |
| sos_bool_t do_yield) |
| { |
| sos_sched_priority_t prio; |
| |
| if (SOS_THR_ZOMBIE == current_thread->state) |
/* Don't think of returning to this thread since it is | /* Don't think of returning to this thread since it is |
terminated */ | terminated */ |
/* Nop */ | /* Nop */ |
} | } |
else if (SOS_KTHR_BLOCKED != current_kthread->state) | else if (SOS_THR_BLOCKED != current_thread->state) |
/* Take into account the current executing thread unless it is | /* Take into account the current executing thread unless it is |
marked blocked */ | marked blocked */ |
if (do_yield) | if (do_yield) |
/* Ok, reserve it for next turn */ | { |
add_in_ready_queue(current_kthread, TRUE); | /* Ok, reserve it for next turn */ |
| if (SOS_SCHED_PRIO_IS_RT(sos_thread_get_priority(current_thread))) |
| add_in_ready_queue(active_queue, current_thread, TRUE); |
| else |
| add_in_ready_queue(expired_queue, current_thread, TRUE); |
| } |
/* Put it at the head of the active list */ | { |
add_in_ready_queue(current_kthread, FALSE); | /* Put it at the head of the active list */ |
| add_in_ready_queue(active_queue, current_thread, FALSE); |
| } |
| } |
| |
| |
| /* Active queue is empty ? */ |
| if (active_queue->nr_threads <= 0) |
| { |
| /* Yes: Exchange it with the expired queue */ |
| struct sos_sched_queue *q; |
| q = active_queue; |
| active_queue = expired_queue; |
| expired_queue = q; |
| |
/* The next thread is that at the head of the ready list */ | /* Now loop over the priorities in the active queue, looking for a |
if (ready_queue.nr_threads > 0) | non-empty queue */ |
| for (prio = SOS_SCHED_PRIO_HIGHEST ; prio <= SOS_SCHED_PRIO_LOWEST ; prio ++) |
struct sos_kthread *next_thr; | struct sos_thread *next_thr; |
| if (list_is_empty_named(active_queue->thread_list[prio], |
| ready.rdy_prev, ready.rdy_next)) |
| continue; |
| |
/* Queue is not empty: take the thread at its head */ | /* Queue is not empty: take the thread at its head */ |
next_thr = list_pop_head_named(ready_queue.kthread_list, | next_thr = list_pop_head_named(active_queue->thread_list[prio], |
ready_queue.nr_threads --; | active_queue->nr_threads --; |
return next_thr; | return next_thr; |
} | } |
| |
| |
SOS_FATAL_ERROR("No kernel thread ready ?!"); | SOS_FATAL_ERROR("No kernel thread ready ?!"); |
return NULL; | return NULL; |
} | } |
| |
/tmp/sos-code-article6.5/sos/sched.h (2005-01-04 04:13:49.000000000 +0100
) |
|
../sos-code-article6.75/sos/sched.h (2005-03-02 17:30:44.000000000 +0100
) |
|
|
|
/** | /** |
* @file sched.h | * @file sched.h |
* | * |
* A basic scheduler with simple FIFO threads' ordering. | * A basic scheduler inspired from the O(1) Linux scheduler. Supports |
| * 2 classes of thread priorities: |
| * - so-called 'real-time' threads scheduled according to a simple |
| * traditional static priority real-time scheduler. "Real-time" round |
| * robin scheduling is not supported. |
| * - "fair" time-sharing scheduling for non real-time threads. "fair" |
| * because no starvation among the non real-time threads is |
| * possible. Contrary to the original O(1) Linux scheduler, the |
| * on-line adjustment of the scheduling priorities to cope with |
| * interactive/non interactive threads discrimination is not |
| * supported: threads keep having the same priority as long as the |
| * user does not change it. |
* The functions below manage CPU queues, and are NEVER responsible | * The functions below manage CPU queues, and are NEVER responsible |
* for context switches (see kthread.h for that) or synchronizations | * for context switches (see thread.h for that) or synchronizations |
* ...] for that). | * ...] for that). |
* | * |
* @note IMPORTANT: all the functions below are meant to be called | * @note IMPORTANT: all the functions below are meant to be called |
* ONLY by the kthread/timer/kwaitq subsystems. DO NOT use them | * ONLY by the thread/timer/kwaitq subsystems. DO NOT use them |
* directly from anywhere else: use ONLY the kthread/kwaitq functions! | * directly from anywhere else: use ONLY the thread/kwaitq functions! |
* simply disable interrupts before clling them. | * simply disable interrupts before clling them. |
*/ | */ |
|
|
#include <sos/errno.h> | #include <sos/errno.h> |
| |
| |
#include <sos/kthread.h> | /** |
| * The definition of a priority |
| */ |
| typedef unsigned char sos_sched_priority_t; |
| |
| |
| #include <sos/thread.h> |
| |
| |
| /** |
| * Valid priority interval ("real-time" and non real-time threads altogether) |
| */ |
| #define SOS_SCHED_PRIO_HIGHEST 0 |
| #define SOS_SCHED_PRIO_LOWEST 63 |
| #define SOS_SCHED_NUM_PRIO 64 |
| |
| |
| /** |
| * Class-specific priorities |
| */ |
| #define SOS_SCHED_PRIO_RT_HIGHEST 0 /**< Highest 'real-time' static prio. */ |
| #define SOS_SCHED_PRIO_RT_LOWEST 15 /**< Lowest 'real-time' static priority */ |
| #define SOS_SCHED_PRIO_TS_HIGHEST 16 /**< Highest time-sharing priority */ |
| #define SOS_SCHED_PRIO_TS_LOWEST 63 /**< Lowest time-sharing priority */ |
| |
| #define SOS_SCHED_PRIO_DEFAULT 40 /**< Default priority */ |
| |
| |
| /** |
| * Helper macros (Yes, priorities ordered in decreasing numerical value) |
| * |
| * @note: The use of this function is RESERVED |
| */ |
| #define SOS_SCHED_PRIO_CMP(prio1,prio2) ((prio1) - (prio2)) |
| |
| #define SOS_SCHED_PRIO_IS_VALID(prio) \ |
| ({ int __prio = (int)(prio); \ |
| ((__prio) <= SOS_SCHED_PRIO_LOWEST) \ |
| && \ |
| ((__prio) >= SOS_SCHED_PRIO_HIGHEST); }) |
| |
| #define SOS_SCHED_PRIO_IS_RT(prio) \ |
| ({ int __prio = (int)(prio); \ |
| ((__prio) <= SOS_SCHED_PRIO_RT_LOWEST) \ |
| && \ |
| ((__prio) >= SOS_SCHED_PRIO_RT_HIGHEST); }) |
| |
/** | /** |
|
|
* | * |
* @note: The use of this function is RESERVED | * @note: The use of this function is RESERVED |
*/ | */ |
sos_ret_t sos_sched_set_ready(struct sos_kthread * thr); | sos_ret_t sos_sched_set_ready(struct sos_thread * thr); |
| |
/** | /** |
* Return the identifier of the next kthread to run. Also removes it | * Return the identifier of the next thread to run. Also removes it |
* from the ready list, but does NOT set is as current_kthread ! | * from the ready list, but does NOT set is as current_thread ! |
* @param current_kthread TCB of the thread calling the function | * @param current_thread TCB of the thread calling the function |
* @param do_yield When TRUE, put the current executing thread at the | * @param do_yield When TRUE, put the current executing thread at the |
* end of the ready list. Otherwise it is kept at the head of it. | * end of the ready list. Otherwise it is kept at the head of it. |
* | * |
* @note: The use of this function is RESERVED | * @note: The use of this function is RESERVED |
*/ | */ |
struct sos_kthread * sos_reschedule(struct sos_kthread * current_kthread, | struct sos_thread * sos_reschedule(struct sos_thread * current_thread, |
| |
| /** |
| * Called by thread subsystem each time a READY thread's priority is |
| * changed |
| * |
| * @note: The use of this function is RESERVED (to thread.c) |
| */ |
| sos_ret_t sos_sched_change_priority(struct sos_thread * thr, |
| sos_sched_priority_t priority); |
| |
| |
#endif /* _SOS_WAITQUEUE_H_ */ | #endif /* _SOS_WAITQUEUE_H_ */ |
| |
/tmp/sos-code-article6.5/sos/thread.c (1970-01-01 01:00:00.000000000 +0100
) |
|
../sos-code-article6.75/sos/thread.c (2005-03-02 17:30:44.000000000 +0100
) |
|
|
|
| /* Copyright (C) 2004,2005 David Decotigny |
| |
| This program is free software; you can redistribute it and/or |
| modify it under the terms of the GNU General Public License |
| as published by the Free Software Foundation; either version 2 |
| of the License, or (at your option) any later version. |
| |
| This program is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with this program; if not, write to the Free Software |
| Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, |
| USA. |
| */ |
| |
| #include <sos/physmem.h> |
| #include <sos/kmem_slab.h> |
| #include <sos/kmalloc.h> |
| #include <sos/klibc.h> |
| #include <sos/list.h> |
| #include <sos/assert.h> |
| |
| #include <hwcore/irq.h> |
| |
| #include "thread.h" |
| |
| |
| /** |
| * The size of the stack of a kernel thread |
| */ |
| #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PAGE_SIZE) |
| |
| |
| /** |
| * The identifier of the thread currently running on CPU. |
| * |
| * We only support a SINGLE processor, ie a SINGLE thread |
| * running at any time in the system. This greatly simplifies the |
| * implementation of the system, since we don't have to complicate |
| * things in order to retrieve the identifier of the threads running |
| * on the CPU. On multiprocessor systems the current_thread below is |
| * an array indexed by the id of the CPU, so that the challenge is to |
| * retrieve the identifier of the CPU. This is usually done based on |
| * the stack address (Linux implementation) or on some form of TLS |
| * ("Thread Local Storage": can be implemented by way of LDTs for the |
| * processes, accessed through the fs or gs registers). |
| */ |
| static volatile struct sos_thread *current_thread = NULL; |
| |
| |
| /* |
| * The list of threads currently in the system. |
| * |
| * @note We could have used current_thread for that... |
| */ |
| static struct sos_thread *thread_list = NULL; |
| |
| |
| /** |
| * The Cache of thread structures |
| */ |
| static struct sos_kslab_cache *cache_thread; |
| |
| |
| struct sos_thread *sos_thread_get_current() |
| { |
| SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING); |
| return (struct sos_thread*)current_thread; |
| } |
| |
| |
| inline static sos_ret_t _set_current(struct sos_thread *thr) |
| { |
| SOS_ASSERT_FATAL(thr->state == SOS_THR_READY); |
| current_thread = thr; |
| current_thread->state = SOS_THR_RUNNING; |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr, |
| sos_size_t init_thread_stack_size) |
| { |
| struct sos_thread *myself; |
| |
| /* Allocate the cache of threads */ |
| cache_thread = sos_kmem_cache_create("thread", |
| sizeof(struct sos_thread), |
| 2, |
| 0, |
| SOS_KSLAB_CREATE_MAP |
| | SOS_KSLAB_CREATE_ZERO); |
| if (! cache_thread) |
| return -SOS_ENOMEM; |
| |
| /* Allocate a new thread structure for the current running thread */ |
| myself = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread, |
| SOS_KSLAB_ALLOC_ATOMIC); |
| if (! myself) |
| return -SOS_ENOMEM; |
| |
| /* Initialize the thread attributes */ |
| strzcpy(myself->name, "[kinit]", SOS_THR_MAX_NAMELEN); |
| myself->state = SOS_THR_CREATED; |
| myself->priority = SOS_SCHED_PRIO_LOWEST; |
| myself->kernel_stack_base_addr = init_thread_stack_base_addr; |
| myself->kernel_stack_size = init_thread_stack_size; |
| |
| /* Do some stack poisoning on the bottom of the stack, if needed */ |
| sos_cpu_state_prepare_detect_kernel_stack_overflow(myself->cpu_state, |
| myself->kernel_stack_base_addr, |
| myself->kernel_stack_size); |
| |
| /* Add the thread in the global list */ |
| list_singleton_named(thread_list, myself, gbl_prev, gbl_next); |
| |
| /* Ok, now pretend that the running thread is ourselves */ |
| myself->state = SOS_THR_READY; |
| _set_current(myself); |
| |
| return SOS_OK; |
| } |
| |
| |
| struct sos_thread * |
| sos_create_kernel_thread(const char *name, |
| sos_kernel_thread_start_routine_t start_func, |
| void *start_arg, |
| sos_sched_priority_t priority) |
| { |
| __label__ undo_creation; |
| sos_ui32_t flags; |
| struct sos_thread *new_thread; |
| |
| if (! start_func) |
| return NULL; |
| if (! SOS_SCHED_PRIO_IS_VALID(priority)) |
| return NULL; |
| |
| /* Allocate a new thread structure for the current running thread */ |
| new_thread |
| = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread, |
| SOS_KSLAB_ALLOC_ATOMIC); |
| if (! new_thread) |
| return NULL; |
| |
| /* Initialize the thread attributes */ |
| strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN); |
| new_thread->state = SOS_THR_CREATED; |
| new_thread->priority = priority; |
| |
| /* Allocate the stack for the new thread */ |
| new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0); |
| new_thread->kernel_stack_size = SOS_THREAD_KERNEL_STACK_SIZE; |
| if (! new_thread->kernel_stack_base_addr) |
| goto undo_creation; |
| |
| /* Initialize the CPU context of the new thread */ |
| if (SOS_OK |
| != sos_cpu_kstate_init(& new_thread->cpu_state, |
| (sos_cpu_kstate_function_arg1_t*) start_func, |
| (sos_ui32_t) start_arg, |
| new_thread->kernel_stack_base_addr, |
| new_thread->kernel_stack_size, |
| (sos_cpu_kstate_function_arg1_t*) sos_thread_exit, |
| (sos_ui32_t) NULL)) |
| goto undo_creation; |
| |
| /* Add the thread in the global list */ |
| sos_disable_IRQs(flags); |
| list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next); |
| sos_restore_IRQs(flags); |
| |
| /* Mark the thread ready */ |
| if (SOS_OK != sos_sched_set_ready(new_thread)) |
| goto undo_creation; |
| |
| /* Normal non-erroneous end of function */ |
| return new_thread; |
| |
| undo_creation: |
| if (new_thread->kernel_stack_base_addr) |
| sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr); |
| sos_kmem_cache_free((sos_vaddr_t) new_thread); |
| return NULL; |
| } |
| |
| |
| /** Function called after thr has terminated. Called from inside the context |
| of another thread, interrupts disabled */ |
| static void delete_thread(struct sos_thread *thr) |
| { |
| sos_ui32_t flags; |
| |
| sos_disable_IRQs(flags); |
| list_delete_named(thread_list, thr, gbl_prev, gbl_next); |
| sos_restore_IRQs(flags); |
| |
| sos_kfree((sos_vaddr_t) thr->kernel_stack_base_addr); |
| memset(thr, 0x0, sizeof(struct sos_thread)); |
| sos_kmem_cache_free((sos_vaddr_t) thr); |
| } |
| |
| |
| void sos_thread_exit() |
| { |
| sos_ui32_t flags; |
| struct sos_thread *myself, *next_thread; |
| |
| /* Interrupt handlers are NOT allowed to exit the current thread ! */ |
| SOS_ASSERT_FATAL(! sos_servicing_irq()); |
| |
| myself = sos_thread_get_current(); |
| |
| /* Refuse to end the current executing thread if it still holds a |
| resource ! */ |
| SOS_ASSERT_FATAL(list_is_empty_named(myself->kwaitq_list, |
| prev_entry_for_thread, |
| next_entry_for_thread)); |
| |
| /* Prepare to run the next thread */ |
| sos_disable_IRQs(flags); |
| myself->state = SOS_THR_ZOMBIE; |
| next_thread = sos_reschedule(myself, FALSE); |
| |
| /* Make sure that the next_thread is valid */ |
| sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state, |
| next_thread->kernel_stack_base_addr, |
| next_thread->kernel_stack_size); |
| |
| /* No need for sos_restore_IRQs() here because the IRQ flag will be |
| restored to that of the next thread upon context switch */ |
| |
| /* Immediate switch to next thread */ |
| _set_current(next_thread); |
| sos_cpu_context_exit_to(next_thread->cpu_state, |
| (sos_cpu_kstate_function_arg1_t*) delete_thread, |
| (sos_ui32_t) myself); |
| } |
| |
| |
| sos_sched_priority_t sos_thread_get_priority(struct sos_thread *thr) |
| { |
| if (! thr) |
| thr = (struct sos_thread*)current_thread; |
| |
| return thr->priority; |
| } |
| |
| |
| sos_thread_state_t sos_thread_get_state(struct sos_thread *thr) |
| { |
| if (! thr) |
| thr = (struct sos_thread*)current_thread; |
| |
| return thr->state; |
| } |
| |
| |
| typedef enum { YIELD_MYSELF, BLOCK_MYSELF } switch_type_t; |
| /** |
| * Helper function to initiate a context switch in case the current |
| * thread becomes blocked, waiting for a timeout, or calls yield. |
| */ |
| static sos_ret_t _switch_to_next_thread(switch_type_t operation) |
| { |
| struct sos_thread *myself, *next_thread; |
| |
| SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING); |
| |
| /* Interrupt handlers are NOT allowed to block ! */ |
| SOS_ASSERT_FATAL(! sos_servicing_irq()); |
| |
| myself = (struct sos_thread*)current_thread; |
| |
| /* Make sure that if we are to be marked "BLOCKED", we have any |
| reason of effectively being blocked */ |
| if (BLOCK_MYSELF == operation) |
| { |
| myself->state = SOS_THR_BLOCKED; |
| } |
| |
| /* Identify the next thread */ |
| next_thread = sos_reschedule(myself, YIELD_MYSELF == operation); |
| |
| /* Avoid context switch if the context does not change */ |
| if (myself != next_thread) |
| { |
| /* Sanity checks for the next thread */ |
| sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state, |
| next_thread->kernel_stack_base_addr, |
| next_thread->kernel_stack_size); |
| |
| |
| /* |
| * Actual CPU context switch |
| */ |
| _set_current(next_thread); |
| sos_cpu_context_switch(& myself->cpu_state, next_thread->cpu_state); |
| |
| /* Back here ! */ |
| SOS_ASSERT_FATAL(current_thread == myself); |
| SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING); |
| } |
| else |
| { |
| /* No context switch but still update ID of current thread */ |
| _set_current(next_thread); |
| } |
| |
| return SOS_OK; |
| } |
| |
| |
| /** |
| * Helper function to change the thread's priority in all the |
| * waitqueues associated with the thread. |
| */ |
| static sos_ret_t _change_waitq_priorities(struct sos_thread *thr, |
| sos_sched_priority_t priority) |
| { |
| struct sos_kwaitq_entry *kwq_entry; |
| int nb_waitqs; |
| |
| list_foreach_forward_named(thr->kwaitq_list, kwq_entry, nb_waitqs, |
| prev_entry_for_thread, next_entry_for_thread) |
| { |
| SOS_ASSERT_FATAL(SOS_OK == sos_kwaitq_change_priority(kwq_entry->kwaitq, |
| kwq_entry, |
| priority)); |
| } |
| |
| return SOS_OK; |
| } |
| |
| |
| sos_ret_t sos_thread_set_priority(struct sos_thread *thr, |
| sos_sched_priority_t priority) |
| { |
| __label__ exit_set_prio; |
| sos_ui32_t flags; |
| sos_ret_t retval; |
| |
| |
| if (! SOS_SCHED_PRIO_IS_VALID(priority)) |
| return -SOS_EINVAL; |
| |
| if (! thr) |
| thr = (struct sos_thread*)current_thread; |
| |
| sos_disable_IRQs(flags); |
| |
| /* Signal kwaitq subsystem that the priority of the thread in all |
| the waitq it is waiting in should be updated */ |
| retval = _change_waitq_priorities(thr, priority); |
| if (SOS_OK != retval) |
| goto exit_set_prio; |
| |
| /* Signal scheduler that the thread, currently in a waiting list, |
| should take into account the change of priority */ |
| if (SOS_THR_READY == thr->state) |
| retval = sos_sched_change_priority(thr, priority); |
| |
| /* Update priority */ |
| thr->priority = priority; |
| |
| exit_set_prio: |
| sos_restore_IRQs(flags); |
| return retval; |
| } |
| |
| |
| sos_ret_t sos_thread_yield() |
| { |
| sos_ui32_t flags; |
| sos_ret_t retval; |
| |
| sos_disable_IRQs(flags); |
| |
| retval = _switch_to_next_thread(YIELD_MYSELF); |
| |
| sos_restore_IRQs(flags); |
| return retval; |
| } |
| |
| |
| /** |
| * Internal sleep timeout management |
| */ |
| struct sleep_timeout_params |
| { |
| struct sos_thread *thread_to_wakeup; |
| sos_bool_t timeout_triggered; |
| }; |
| |
| |
| /** |
| * Callback called when a timeout happened |
| */ |
| static void sleep_timeout(struct sos_timeout_action *act) |
| { |
| struct sleep_timeout_params *sleep_timeout_params |
| = (struct sleep_timeout_params*) act->routine_data; |
| |
| /* Signal that we have been woken up by the timeout */ |
| sleep_timeout_params->timeout_triggered = TRUE; |
| |
| /* Mark the thread ready */ |
| SOS_ASSERT_FATAL(SOS_OK == |
| sos_thread_force_unblock(sleep_timeout_params |
| ->thread_to_wakeup)); |
| } |
| |
| |
| sos_ret_t sos_thread_sleep(struct sos_time *timeout) |
| { |
| sos_ui32_t flags; |
| struct sleep_timeout_params sleep_timeout_params; |
| struct sos_timeout_action timeout_action; |
| sos_ret_t retval; |
| |
| /* Block forever if no timeout is given */ |
| if (NULL == timeout) |
| { |
| sos_disable_IRQs(flags); |
| retval = _switch_to_next_thread(BLOCK_MYSELF); |
| sos_restore_IRQs(flags); |
| |
| return retval; |
| } |
| |
| /* Initialize the timeout action */ |
| sos_time_init_action(& timeout_action); |
| |
| /* Prepare parameters used by the sleep timeout callback */ |
| sleep_timeout_params.thread_to_wakeup |
| = (struct sos_thread*)current_thread; |
| sleep_timeout_params.timeout_triggered = FALSE; |
| |
| sos_disable_IRQs(flags); |
| |
| /* Now program the timeout ! */ |
| SOS_ASSERT_FATAL(SOS_OK == |
| sos_time_register_action_relative(& timeout_action, |
| timeout, |
| sleep_timeout, |
| & sleep_timeout_params)); |
| |
| /* Prepare to block: wait for sleep_timeout() to wakeup us in the |
| timeout kwaitq, or for someone to wake us up in any other |
| waitq */ |
| retval = _switch_to_next_thread(BLOCK_MYSELF); |
| /* Unblocked by something ! */ |
| |
| /* Unblocked by timeout ? */ |
| if (sleep_timeout_params.timeout_triggered) |
| { |
| /* Yes */ |
| SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout)); |
| retval = SOS_OK; |
| } |
| else |
| { |
| /* No: We have probably been woken up while in some other |
| kwaitq */ |
| SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action)); |
| retval = -SOS_EINTR; |
| } |
| |
| sos_restore_IRQs(flags); |
| |
| /* Update the remaining timeout */ |
| memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time)); |
| |
| return retval; |
| } |
| |
| |
| sos_ret_t sos_thread_force_unblock(struct sos_thread *thread) |
| { |
| sos_ret_t retval; |
| sos_ui32_t flags; |
| |
| if (! thread) |
| return -SOS_EINVAL; |
| |
| sos_disable_IRQs(flags); |
| |
| /* Thread already woken up ? */ |
| retval = SOS_OK; |
| switch(sos_thread_get_state(thread)) |
| { |
| case SOS_THR_RUNNING: |
| case SOS_THR_READY: |
| /* Do nothing */ |
| break; |
| |
| case SOS_THR_ZOMBIE: |
| retval = -SOS_EFATAL; |
| break; |
| |
| default: |
| retval = sos_sched_set_ready(thread); |
| break; |
| } |
| |
| sos_restore_IRQs(flags); |
| |
| return retval; |
| } |
| |
/tmp/sos-code-article6.5/sos/thread.h (1970-01-01 01:00:00.000000000 +0100
) |
|
../sos-code-article6.75/sos/thread.h (2005-03-02 17:30:44.000000000 +0100
) |
|
|
|
| /* Copyright (C) 2004,2005 David Decotigny |
| |
| This program is free software; you can redistribute it and/or |
| modify it under the terms of the GNU General Public License |
| as published by the Free Software Foundation; either version 2 |
| of the License, or (at your option) any later version. |
| |
| This program is distributed in the hope that it will be useful, |
| but WITHOUT ANY WARRANTY; without even the implied warranty of |
| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| GNU General Public License for more details. |
| |
| You should have received a copy of the GNU General Public License |
| along with this program; if not, write to the Free Software |
| Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, |
| USA. |
| */ |
| #ifndef _SOS_THREAD_H_ |
| #define _SOS_THREAD_H_ |
| |
| /** |
| * @file thread.h |
| * |
| * SOS Thread management API |
| */ |
| |
| #include <sos/errno.h> |
| |
| /* Forward declaration */ |
| struct sos_thread; |
| |
| #include <hwcore/cpu_context.h> |
| #include <sos/sched.h> |
| #include <sos/kwaitq.h> |
| #include <sos/time.h> |
| |
| /** |
| * The possible states of a valid thread |
| */ |
| typedef enum { SOS_THR_CREATED, /**< Thread created, not fully initialized */ |
| SOS_THR_READY, /**< Thread fully initialized or |
| waiting for CPU after having been |
| blocked or preempted */ |
| SOS_THR_RUNNING, /**< Thread currently running on CPU */ |
| SOS_THR_BLOCKED, /**< Thread waiting for I/O (+ in at LEAST |
| one kwaitq) and/or sleeping (+ in NO |
| kwaitq) */ |
| SOS_THR_ZOMBIE, /**< Thread terminated execution, waiting to |
| be deleted by kernel */ |
| } sos_thread_state_t; |
| |
| |
| /** |
| * TCB (Thread Control Block): structure describing a thread. Don't |
| * access these fields directly: prefer using the accessor functions |
| * below. |
| */ |
| struct sos_thread |
| { |
| #define SOS_THR_MAX_NAMELEN 32 |
| char name[SOS_THR_MAX_NAMELEN]; |
| |
| sos_thread_state_t state; |
| sos_sched_priority_t priority; |
| |
| /** |
| * The hardware context of the thread. |
| * |
| * It will reflect the CPU state of the thread: |
| * - From an interrupt handler: the state of the thread at the time |
| * of the OUTERMOST irq. An IRQ is not allowed to make context |
| * switches, so this context will remain valid from the begining of |
| * the outermost IRQ handler to the end of it, no matter if there |
| * are other IRQ handlers nesting in one another. You may safely |
| * use it from IRQ handlers to query the state of the interrupted |
| * thread, no matter if there has been other IRQ handlers |
| * executing meanwhile. |
| * - From normal kernel code, exceptions and syscall: the state of |
| * the thread the last time there was a context switch from this |
| * thread to another one. Thus this field WON'T reflect the |
| * current's thread cpu_state in these cases. So, in these cases, |
| * simply DO NOT USE IT outside thread.c ! Note: for syscall and |
| * exception handlers, the VALID state of the interrupted thread is |
| * passed as an argument to the handlers. |
| */ |
| struct sos_cpu_state *cpu_state; |
| |
| /* Kernel stack parameters */ |
| sos_vaddr_t kernel_stack_base_addr; |
| sos_size_t kernel_stack_size; |
| |
| /* Data specific to each state */ |
| union |
| { |
| struct |
| { |
| struct sos_sched_queue *rdy_queue; |
| struct sos_thread *rdy_prev, *rdy_next; |
| } ready; |
| }; /* Anonymous union (gcc extenion) */ |
| |
| |
| /* |
| * Data used by the kwaitq subsystem: list of kwaitqueues the thread |
| * is waiting for. |
| * |
| * @note: a RUNNING or READY thread might be in one or more |
| * waitqueues ! The only property we have is that, among these |
| * waitqueues (if any), _at least_ one has woken the thread. |
| */ |
| struct sos_kwaitq_entry *kwaitq_list; |
| |
| |
| /** |
| * Chaining pointers for global ("gbl") list of threads (debug) |
| */ |
| struct sos_thread *gbl_prev, *gbl_next; |
| }; |
| |
| |
| /** |
| * Definition of the function executed by a kernel thread |
| */ |
| typedef void (*sos_kernel_thread_start_routine_t)(void *arg); |
| |
| |
| /** |
| * Initialize the subsystem responsible for thread management |
| * |
| * Initialize the primary kernel thread so that it can be handled the |
| * same way as an ordinary thread created by sos_thread_create(). |
| */ |
| sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr, |
| sos_size_t init_thread_stack_size); |
| |
| |
| /** |
| * Create a new kernel thread |
| */ |
| struct sos_thread * |
| sos_create_kernel_thread(const char *name, |
| sos_kernel_thread_start_routine_t start_func, |
| void *start_arg, |
| sos_sched_priority_t priority); |
| |
| |
| /** |
| * Terminate the execution of the current thread. For kernel threads, |
| * it is called by default when the start routine returns. |
| */ |
| void sos_thread_exit() __attribute__((noreturn)); |
| |
| |
| /** |
| * Get the identifier of the thread currently running on CPU. Trivial |
| * function. |
| */ |
| struct sos_thread *sos_thread_get_current(); |
| |
| |
| /** |
| * If thr == NULL, set the priority of the current thread. Trivial |
| * function. |
| * |
| * @note NOT protected against interrupts |
| */ |
| sos_sched_priority_t sos_thread_get_priority(struct sos_thread *thr); |
| |
| |
| /** |
| * If thr == NULL, get the state of the current thread. Trivial |
| * function. |
| * |
| * @note NOT protected against interrupts |
| */ |
| sos_thread_state_t sos_thread_get_state(struct sos_thread *thr); |
| |
| |
| /** |
| * If thr == NULL, set the priority of the current thread |
| * |
| * @note NO context-switch ever occurs in this function ! |
| */ |
| sos_ret_t sos_thread_set_priority(struct sos_thread *thr, |
| sos_sched_priority_t priority); |
| |
| |
| /** |
| * Yield CPU to another ready thread. |
| * |
| * @note This is a BLOCKING FUNCTION |
| */ |
| sos_ret_t sos_thread_yield(); |
| |
| |
| /** |
| * Release the CPU for (at least) the given delay. |
| * |
| * @param delay The delay to wait for. If delay == NULL then wait |
| * forever that any event occurs. |
| * |
| * @return SOS_OK when delay expired (and delay is reset to zero), |
| * -SOS_EINTR otherwise (and delay contains the amount of time |
| * remaining). |
| * |
| * @note This is a BLOCKING FUNCTION |
| */ |
| sos_ret_t sos_thread_sleep(/* in/out */struct sos_time *delay); |
| |
| |
| /** |
| * Mark the given thread as READY (if not already ready) even if it is |
| * blocked in a kwaitq or in a sleep ! As a result, the interrupted |
| * kwaitq/sleep function call of the thread will return with |
| * -SOS_EINTR. |
| * |
| * @return -SOS_EINVAL if thread does not exist, or -SOS_EFATAL if |
| * marked ZOMBIE. |
| * |
| * @note As a result, the semaphore/mutex/conditions/... functions |
| * return values SHOULD ALWAYS be checked ! If they are != SOS_OK, |
| * then the caller should consider that the resource is not aquired |
| * because somebody woke the thread by some way. |
| */ |
| sos_ret_t sos_thread_force_unblock(struct sos_thread *thread); |
| |
| |
| #endif /* _SOS_THREAD_H_ */ |
| |