SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

Diff markup

Differences between /sos/thread.c (Article 7) and /sos/thread.c (Article 6.5)


001 /* Copyright (C) 2004,2005 David Decotigny        001 /* Copyright (C) 2004,2005 David Decotigny
002                                                   002 
003    This program is free software; you can redi    003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU Genera    004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundatio    005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any lat    006    of the License, or (at your option) any later version.
007                                                   007    
008    This program is distributed in the hope tha    008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the     009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR    010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details    011    GNU General Public License for more details.
012                                                   012    
013    You should have received a copy of the GNU     013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to t    014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 3    015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA.                                           016    USA. 
017 */                                                017 */
018                                                   018 
019 #include <sos/physmem.h>                          019 #include <sos/physmem.h>
020 #include <sos/kmem_slab.h>                        020 #include <sos/kmem_slab.h>
021 #include <sos/kmalloc.h>                          021 #include <sos/kmalloc.h>
022 #include <sos/klibc.h>                            022 #include <sos/klibc.h>
023 #include <sos/list.h>                             023 #include <sos/list.h>
024 #include <sos/assert.h>                           024 #include <sos/assert.h>
025 #include <hwcore/mm_context.h>                 << 
026 #include <sos/process.h>                       << 
027                                                << 
028 #include <drivers/bochs.h>                     << 
029 #include <drivers/x86_videomem.h>              << 
030                                                   025 
031 #include <hwcore/irq.h>                           026 #include <hwcore/irq.h>
032                                                   027 
033 #include "thread.h"                               028 #include "thread.h"
034                                                   029 
035                                                   030 
036 /**                                               031 /**
037  * The size of the stack of a kernel thread       032  * The size of the stack of a kernel thread
038  */                                               033  */
039 #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PA    034 #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PAGE_SIZE)
040                                                   035 
041                                                   036 
042 /**                                               037 /**
043  * The identifier of the thread currently runn    038  * The identifier of the thread currently running on CPU.
044  *                                                039  *
045  * We only support a SINGLE processor, ie a SI    040  * We only support a SINGLE processor, ie a SINGLE thread
046  * running at any time in the system. This gre    041  * running at any time in the system. This greatly simplifies the
047  * implementation of the system, since we don'    042  * implementation of the system, since we don't have to complicate
048  * things in order to retrieve the identifier     043  * things in order to retrieve the identifier of the threads running
049  * on the CPU. On multiprocessor systems the c    044  * on the CPU. On multiprocessor systems the current_thread below is
050  * an array indexed by the id of the CPU, so t    045  * an array indexed by the id of the CPU, so that the challenge is to
051  * retrieve the identifier of the CPU. This is    046  * retrieve the identifier of the CPU. This is usually done based on
052  * the stack address (Linux implementation) or    047  * the stack address (Linux implementation) or on some form of TLS
053  * ("Thread Local Storage": can be implemented    048  * ("Thread Local Storage": can be implemented by way of LDTs for the
054  * processes, accessed through the fs or gs re    049  * processes, accessed through the fs or gs registers).
055  */                                               050  */
056 static volatile struct sos_thread *current_thr    051 static volatile struct sos_thread *current_thread = NULL;
057                                                   052 
058                                                   053 
059 /*                                                054 /*
060  * The list of threads currently in the system    055  * The list of threads currently in the system.
061  *                                                056  *
062  * @note We could have used current_thread for    057  * @note We could have used current_thread for that...
063  */                                               058  */
064 static struct sos_thread *thread_list = NULL;     059 static struct sos_thread *thread_list = NULL;
065                                                   060 
066                                                   061 
067 /**                                               062 /**
068  * The Cache of thread structures                 063  * The Cache of thread structures
069  */                                               064  */
070 static struct sos_kslab_cache *cache_thread;      065 static struct sos_kslab_cache *cache_thread;
071                                                   066 
072                                                   067 
073 struct sos_thread *sos_thread_get_current()       068 struct sos_thread *sos_thread_get_current()
074 {                                                 069 {
075   SOS_ASSERT_FATAL(current_thread->state == SO    070   SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
076   return (struct sos_thread*)current_thread;      071   return (struct sos_thread*)current_thread;
077 }                                                 072 }
078                                                   073 
079                                                   074 
080 inline static sos_ret_t _set_current(struct so    075 inline static sos_ret_t _set_current(struct sos_thread *thr)
081 {                                                 076 {
082   SOS_ASSERT_FATAL(thr->state == SOS_THR_READY    077   SOS_ASSERT_FATAL(thr->state == SOS_THR_READY);
083   current_thread = thr;                           078   current_thread = thr;
084   current_thread->state = SOS_THR_RUNNING;        079   current_thread->state = SOS_THR_RUNNING;
085   return SOS_OK;                                  080   return SOS_OK;
086 }                                                 081 }
087                                                   082 
088                                                   083 
089 sos_ret_t sos_thread_subsystem_setup(sos_vaddr    084 sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
090                                      sos_size_    085                                      sos_size_t init_thread_stack_size)
091 {                                                 086 {
092   struct sos_thread *myself;                      087   struct sos_thread *myself;
093                                                   088 
094   /* Allocate the cache of threads */             089   /* Allocate the cache of threads */
095   cache_thread = sos_kmem_cache_create("thread    090   cache_thread = sos_kmem_cache_create("thread",
096                                        sizeof(    091                                        sizeof(struct sos_thread),
097                                        2,         092                                        2,
098                                        0,         093                                        0,
099                                        SOS_KSL    094                                        SOS_KSLAB_CREATE_MAP
100                                        | SOS_K    095                                        | SOS_KSLAB_CREATE_ZERO);
101   if (! cache_thread)                             096   if (! cache_thread)
102     return -SOS_ENOMEM;                           097     return -SOS_ENOMEM;
103                                                   098 
104   /* Allocate a new thread structure for the c    099   /* Allocate a new thread structure for the current running thread */
105   myself = (struct sos_thread*) sos_kmem_cache    100   myself = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
106                                                   101                                                      SOS_KSLAB_ALLOC_ATOMIC);
107   if (! myself)                                   102   if (! myself)
108     return -SOS_ENOMEM;                           103     return -SOS_ENOMEM;
109                                                   104 
110   /* Initialize the thread attributes */          105   /* Initialize the thread attributes */
111   strzcpy(myself->name, "[kinit]", SOS_THR_MAX    106   strzcpy(myself->name, "[kinit]", SOS_THR_MAX_NAMELEN);
112   myself->state           = SOS_THR_CREATED;      107   myself->state           = SOS_THR_CREATED;
113   myself->priority        = SOS_SCHED_PRIO_LOW << 
114   myself->kernel_stack_base_addr = init_thread    108   myself->kernel_stack_base_addr = init_thread_stack_base_addr;
115   myself->kernel_stack_size      = init_thread    109   myself->kernel_stack_size      = init_thread_stack_size;
116                                                   110 
117   /* Do some stack poisoning on the bottom of     111   /* Do some stack poisoning on the bottom of the stack, if needed */
118   sos_cpu_state_prepare_detect_kernel_stack_ov    112   sos_cpu_state_prepare_detect_kernel_stack_overflow(myself->cpu_state,
119                                                   113                                                      myself->kernel_stack_base_addr,
120                                                   114                                                      myself->kernel_stack_size);
121                                                   115 
122   /* Add the thread in the global list */         116   /* Add the thread in the global list */
123   list_singleton_named(thread_list, myself, gb    117   list_singleton_named(thread_list, myself, gbl_prev, gbl_next);
124                                                   118 
125   /* Ok, now pretend that the running thread i    119   /* Ok, now pretend that the running thread is ourselves */
126   myself->state = SOS_THR_READY;                  120   myself->state = SOS_THR_READY;
127   _set_current(myself);                           121   _set_current(myself);
128                                                   122 
129   return SOS_OK;                                  123   return SOS_OK;
130 }                                                 124 }
131                                                   125 
132                                                   126 
133 struct sos_thread *                               127 struct sos_thread *
134 sos_create_kernel_thread(const char *name,        128 sos_create_kernel_thread(const char *name,
135                          sos_kernel_thread_sta    129                          sos_kernel_thread_start_routine_t start_func,
136                          void *start_arg,      !! 130                          void *start_arg)
137                          sos_sched_priority_t  << 
138 {                                                 131 {
139   __label__ undo_creation;                        132   __label__ undo_creation;
140   sos_ui32_t flags;                               133   sos_ui32_t flags;
141   struct sos_thread *new_thread;                  134   struct sos_thread *new_thread;
142                                                   135 
143   if (! start_func)                               136   if (! start_func)
144     return NULL;                                  137     return NULL;
145   if (! SOS_SCHED_PRIO_IS_VALID(priority))     << 
146     return NULL;                               << 
147                                                   138 
148   /* Allocate a new thread structure for the c    139   /* Allocate a new thread structure for the current running thread */
149   new_thread                                      140   new_thread
150     = (struct sos_thread*) sos_kmem_cache_allo    141     = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
151                                                   142                                                 SOS_KSLAB_ALLOC_ATOMIC);
152   if (! new_thread)                               143   if (! new_thread)
153     return NULL;                                  144     return NULL;
154                                                   145 
155   /* Initialize the thread attributes */          146   /* Initialize the thread attributes */
156   strzcpy(new_thread->name, ((name)?name:"[NON    147   strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
157   new_thread->state    = SOS_THR_CREATED;         148   new_thread->state    = SOS_THR_CREATED;
158   new_thread->priority = priority;             << 
159                                                   149 
160   /* Allocate the stack for the new thread */     150   /* Allocate the stack for the new thread */
161   new_thread->kernel_stack_base_addr = sos_kma    151   new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
162   new_thread->kernel_stack_size      = SOS_THR    152   new_thread->kernel_stack_size      = SOS_THREAD_KERNEL_STACK_SIZE;
163   if (! new_thread->kernel_stack_base_addr)       153   if (! new_thread->kernel_stack_base_addr)
164     goto undo_creation;                           154     goto undo_creation;
165                                                   155 
166   /* Initialize the CPU context of the new thr    156   /* Initialize the CPU context of the new thread */
167   if (SOS_OK                                      157   if (SOS_OK
168       != sos_cpu_kstate_init(& new_thread->cpu    158       != sos_cpu_kstate_init(& new_thread->cpu_state,
169                              (sos_cpu_kstate_f    159                              (sos_cpu_kstate_function_arg1_t*) start_func,
170                              (sos_ui32_t) star    160                              (sos_ui32_t) start_arg,
171                              new_thread->kerne    161                              new_thread->kernel_stack_base_addr,
172                              new_thread->kerne    162                              new_thread->kernel_stack_size,
173                              (sos_cpu_kstate_f    163                              (sos_cpu_kstate_function_arg1_t*) sos_thread_exit,
174                              (sos_ui32_t) NULL    164                              (sos_ui32_t) NULL))
175     goto undo_creation;                           165     goto undo_creation;
176                                                   166 
177   /* Add the thread in the global list */         167   /* Add the thread in the global list */
178   sos_disable_IRQs(flags);                        168   sos_disable_IRQs(flags);
179   list_add_tail_named(thread_list, new_thread,    169   list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
180   sos_restore_IRQs(flags);                        170   sos_restore_IRQs(flags);
181                                                   171 
182   /* Mark the thread ready */                     172   /* Mark the thread ready */
183   if (SOS_OK != sos_sched_set_ready(new_thread    173   if (SOS_OK != sos_sched_set_ready(new_thread))
184     goto undo_creation;                           174     goto undo_creation;
185                                                   175 
186   /* Normal non-erroneous end of function */      176   /* Normal non-erroneous end of function */
187   return new_thread;                              177   return new_thread;
188                                                   178 
189  undo_creation:                                   179  undo_creation:
190   if (new_thread->kernel_stack_base_addr)         180   if (new_thread->kernel_stack_base_addr)
191     sos_kfree((sos_vaddr_t) new_thread->kernel    181     sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
192   sos_kmem_cache_free((sos_vaddr_t) new_thread    182   sos_kmem_cache_free((sos_vaddr_t) new_thread);
193   return NULL;                                    183   return NULL;
194 }                                                 184 }
195                                                   185 
196                                                   186 
197 struct sos_thread *                            << 
198 sos_create_user_thread(const char *name,       << 
199                        struct sos_process *pro << 
200                        sos_uaddr_t user_initia << 
201                        sos_ui32_t  user_start_ << 
202                        sos_ui32_t  user_start_ << 
203                        sos_uaddr_t user_initia << 
204                        sos_sched_priority_t pr << 
205 {                                              << 
206   __label__ undo_creation;                     << 
207   sos_ui32_t flags;                            << 
208   struct sos_thread *new_thread;               << 
209                                                << 
210   if (! SOS_SCHED_PRIO_IS_VALID(priority))     << 
211     return NULL;                               << 
212                                                << 
213   /* For a user thread, the process must be gi << 
214   if (! process)                               << 
215     return NULL;                               << 
216                                                << 
217   /* Allocate a new thread structure for the c << 
218   new_thread                                   << 
219     = (struct sos_thread*) sos_kmem_cache_allo << 
220                                                << 
221   if (! new_thread)                            << 
222     return NULL;                               << 
223                                                << 
224   /* Initialize the thread attributes */       << 
225   strzcpy(new_thread->name, ((name)?name:"[NON << 
226   new_thread->state    = SOS_THR_CREATED;      << 
227   new_thread->priority = priority;             << 
228                                                << 
229   /* Allocate the stack for the new thread */  << 
230   new_thread->kernel_stack_base_addr = sos_kma << 
231   new_thread->kernel_stack_size      = SOS_THR << 
232   if (! new_thread->kernel_stack_base_addr)    << 
233     goto undo_creation;                        << 
234                                                << 
235   if (SOS_OK                                   << 
236       != sos_cpu_ustate_init(& new_thread->cpu << 
237                              user_initial_PC,  << 
238                              user_start_arg1,  << 
239                              user_start_arg2,  << 
240                              user_initial_SP,  << 
241                              new_thread->kerne << 
242                              new_thread->kerne << 
243     goto undo_creation;                        << 
244                                                << 
245   /* Attach the new thread to the process */   << 
246   if (SOS_OK != sos_process_register_thread(pr << 
247     goto undo_creation;                        << 
248                                                << 
249   /* Add the thread in the global list */      << 
250   sos_disable_IRQs(flags);                     << 
251   list_add_tail_named(thread_list, new_thread, << 
252   sos_restore_IRQs(flags);                     << 
253                                                << 
254   /* Mark the thread ready */                  << 
255   if (SOS_OK != sos_sched_set_ready(new_thread << 
256     goto undo_creation;                        << 
257                                                << 
258   /* Normal non-erroneous end of function */   << 
259   return new_thread;                           << 
260                                                << 
261  undo_creation:                                << 
262   if (new_thread->kernel_stack_base_addr)      << 
263     sos_kfree((sos_vaddr_t) new_thread->kernel << 
264   sos_kmem_cache_free((sos_vaddr_t) new_thread << 
265   return NULL;                                 << 
266 }                                              << 
267                                                << 
268                                                << 
269 /**                                            << 
270  * Helper function to switch to the correct MM << 
271  * the_thread's needs.                         << 
272  *   - When switching to a user-mode thread, f << 
273  *     of the MMU                              << 
274  *   - When switching to a kernel-mode thread, << 
275  *     configuration if the thread was squatti << 
276  */                                            << 
277 static void _prepare_mm_context(struct sos_thr << 
278 {                                              << 
279   /* Going to restore a thread in user mode ?  << 
280   if (sos_cpu_context_is_in_user_mode(the_thre << 
281       == TRUE)                                 << 
282     {                                          << 
283       /* Yes: force the MMU to be correctly se << 
284          user's address space */               << 
285                                                << 
286       /* The thread should be a user thread */ << 
287       SOS_ASSERT_FATAL(the_thread->process !=  << 
288                                                << 
289       /* It should not squat any other's addre << 
290       SOS_ASSERT_FATAL(the_thread->squatted_mm << 
291                                                << 
292       /* Perform an MMU context switch if need << 
293       sos_mm_context_switch_to(sos_process_get << 
294     }                                          << 
295                                                << 
296   /* the_thread is a kernel thread squatting a << 
297      space ? */                                << 
298   else if (the_thread->squatted_mm_context !=  << 
299     sos_mm_context_switch_to(the_thread->squat << 
300 }                                              << 
301                                                << 
302                                                << 
303 /** Function called after thr has terminated.     187 /** Function called after thr has terminated. Called from inside the context
304     of another thread, interrupts disabled */     188     of another thread, interrupts disabled */
305 static void delete_thread(struct sos_thread *t    189 static void delete_thread(struct sos_thread *thr)
306 {                                                 190 {
307   sos_ui32_t flags;                               191   sos_ui32_t flags;
308                                                   192 
309   sos_disable_IRQs(flags);                        193   sos_disable_IRQs(flags);
310   list_delete_named(thread_list, thr, gbl_prev    194   list_delete_named(thread_list, thr, gbl_prev, gbl_next);
311   sos_restore_IRQs(flags);                        195   sos_restore_IRQs(flags);
312                                                   196 
313   sos_kfree((sos_vaddr_t) thr->kernel_stack_ba    197   sos_kfree((sos_vaddr_t) thr->kernel_stack_base_addr);
314                                                << 
315   /* If the thread squats an address space, re << 
316   if (thr->squatted_mm_context)                << 
317     SOS_ASSERT_FATAL(SOS_OK == sos_thread_chan << 
318                                                << 
319   /* For a user thread: remove the thread from << 
320   if (thr->process)                            << 
321     SOS_ASSERT_FATAL(SOS_OK == sos_process_unr << 
322                                                << 
323   memset(thr, 0x0, sizeof(struct sos_thread));    198   memset(thr, 0x0, sizeof(struct sos_thread));
324   sos_kmem_cache_free((sos_vaddr_t) thr);         199   sos_kmem_cache_free((sos_vaddr_t) thr);
325 }                                                 200 }
326                                                   201 
327                                                   202 
328 void sos_thread_exit()                            203 void sos_thread_exit()
329 {                                                 204 {
330   sos_ui32_t flags;                               205   sos_ui32_t flags;
331   struct sos_thread *myself, *next_thread;        206   struct sos_thread *myself, *next_thread;
332                                                   207 
333   /* Interrupt handlers are NOT allowed to exi    208   /* Interrupt handlers are NOT allowed to exit the current thread ! */
334   SOS_ASSERT_FATAL(! sos_servicing_irq());        209   SOS_ASSERT_FATAL(! sos_servicing_irq());
335                                                   210 
336   myself = sos_thread_get_current();              211   myself = sos_thread_get_current();
337                                                   212 
338   /* Refuse to end the current executing threa    213   /* Refuse to end the current executing thread if it still holds a
339      resource ! */                                214      resource ! */
340   SOS_ASSERT_FATAL(list_is_empty_named(myself-    215   SOS_ASSERT_FATAL(list_is_empty_named(myself->kwaitq_list,
341                                        prev_en    216                                        prev_entry_for_thread,
342                                        next_en    217                                        next_entry_for_thread));
343                                                   218 
344   /* Prepare to run the next thread */            219   /* Prepare to run the next thread */
345   sos_disable_IRQs(flags);                        220   sos_disable_IRQs(flags);
346   myself->state = SOS_THR_ZOMBIE;                 221   myself->state = SOS_THR_ZOMBIE;
347   next_thread = sos_reschedule(myself, FALSE);    222   next_thread = sos_reschedule(myself, FALSE);
348                                                   223 
349   /* Make sure that the next_thread is valid *    224   /* Make sure that the next_thread is valid */
350   sos_cpu_state_detect_kernel_stack_overflow(n    225   sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
351                                              n    226                                              next_thread->kernel_stack_base_addr,
352                                              n    227                                              next_thread->kernel_stack_size);
353                                                   228 
354   /*                                           << 
355    * Perform an MMU context switch if needed   << 
356    */                                          << 
357   _prepare_mm_context(next_thread);            << 
358                                                << 
359   /* No need for sos_restore_IRQs() here becau    229   /* No need for sos_restore_IRQs() here because the IRQ flag will be
360      restored to that of the next thread upon     230      restored to that of the next thread upon context switch */
361                                                   231 
362   /* Immediate switch to next thread */           232   /* Immediate switch to next thread */
363   _set_current(next_thread);                      233   _set_current(next_thread);
364   sos_cpu_context_exit_to(next_thread->cpu_sta    234   sos_cpu_context_exit_to(next_thread->cpu_state,
365                           (sos_cpu_kstate_func    235                           (sos_cpu_kstate_function_arg1_t*) delete_thread,
366                           (sos_ui32_t) myself)    236                           (sos_ui32_t) myself);
367 }                                                 237 }
368                                                   238 
369                                                   239 
370 sos_sched_priority_t sos_thread_get_priority(s << 
371 {                                              << 
372   if (! thr)                                   << 
373     thr = (struct sos_thread*)current_thread;  << 
374                                                << 
375   return thr->priority;                        << 
376 }                                              << 
377                                                << 
378                                                << 
379 sos_thread_state_t sos_thread_get_state(struct    240 sos_thread_state_t sos_thread_get_state(struct sos_thread *thr)
380 {                                                 241 {
381   if (! thr)                                      242   if (! thr)
382     thr = (struct sos_thread*)current_thread;     243     thr = (struct sos_thread*)current_thread;
383                                                   244 
384   return thr->state;                              245   return thr->state;
385 }                                                 246 }
386                                                   247 
387                                                   248 
388 typedef enum { YIELD_MYSELF, BLOCK_MYSELF } sw    249 typedef enum { YIELD_MYSELF, BLOCK_MYSELF } switch_type_t;
389 /**                                               250 /**
390  * Helper function to initiate a context switc    251  * Helper function to initiate a context switch in case the current
391  * thread becomes blocked, waiting for a timeo    252  * thread becomes blocked, waiting for a timeout, or calls yield.
392  */                                               253  */
393 static sos_ret_t _switch_to_next_thread(switch    254 static sos_ret_t _switch_to_next_thread(switch_type_t operation)
394 {                                                 255 {
395   struct sos_thread *myself, *next_thread;        256   struct sos_thread *myself, *next_thread;
396                                                   257 
397   SOS_ASSERT_FATAL(current_thread->state == SO    258   SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
398                                                   259 
399   /* Interrupt handlers are NOT allowed to blo    260   /* Interrupt handlers are NOT allowed to block ! */
400   SOS_ASSERT_FATAL(! sos_servicing_irq());        261   SOS_ASSERT_FATAL(! sos_servicing_irq());
401                                                   262 
402   myself = (struct sos_thread*)current_thread;    263   myself = (struct sos_thread*)current_thread;
403                                                   264 
404   /* Make sure that if we are to be marked "BL    265   /* Make sure that if we are to be marked "BLOCKED", we have any
405      reason of effectively being blocked */       266      reason of effectively being blocked */
406   if (BLOCK_MYSELF == operation)                  267   if (BLOCK_MYSELF == operation)
407     {                                             268     {
408       myself->state = SOS_THR_BLOCKED;            269       myself->state = SOS_THR_BLOCKED;
409     }                                             270     }
410                                                   271 
411   /* Identify the next thread */                  272   /* Identify the next thread */
412   next_thread = sos_reschedule(myself, YIELD_M    273   next_thread = sos_reschedule(myself, YIELD_MYSELF == operation);
413                                                   274 
414   /* Avoid context switch if the context does     275   /* Avoid context switch if the context does not change */
415   if (myself != next_thread)                      276   if (myself != next_thread)
416     {                                             277     {
417       /* Sanity checks for the next thread */     278       /* Sanity checks for the next thread */
418       sos_cpu_state_detect_kernel_stack_overfl    279       sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
419                                                   280                                                  next_thread->kernel_stack_base_addr,
420                                                   281                                                  next_thread->kernel_stack_size);
421                                                   282 
422       /*                                       << 
423        * Perform an MMU context switch if need << 
424        */                                      << 
425       _prepare_mm_context(next_thread);        << 
426                                                   283 
427       /*                                          284       /*
428        * Actual CPU context switch                285        * Actual CPU context switch
429        */                                         286        */
430       _set_current(next_thread);                  287       _set_current(next_thread);
431       sos_cpu_context_switch(& myself->cpu_sta    288       sos_cpu_context_switch(& myself->cpu_state, next_thread->cpu_state);
432                                                   289       
433       /* Back here ! */                           290       /* Back here ! */
434       SOS_ASSERT_FATAL(current_thread == mysel    291       SOS_ASSERT_FATAL(current_thread == myself);
435       SOS_ASSERT_FATAL(current_thread->state =    292       SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
436     }                                             293     }
437   else                                            294   else
438     {                                             295     {
439       /* No context switch but still update ID    296       /* No context switch but still update ID of current thread */
440       _set_current(next_thread);                  297       _set_current(next_thread);
441     }                                             298     }
442                                                   299 
443   return SOS_OK;                                  300   return SOS_OK;
444 }                                                 301 }
445                                                   302 
446                                                   303 
447 /**                                            << 
448  * Helper function to change the thread's prio << 
449  * waitqueues associated with the thread.      << 
450  */                                            << 
451 static sos_ret_t _change_waitq_priorities(stru << 
452                                           sos_ << 
453 {                                              << 
454   struct sos_kwaitq_entry *kwq_entry;          << 
455   int nb_waitqs;                               << 
456                                                << 
457   list_foreach_forward_named(thr->kwaitq_list, << 
458                              prev_entry_for_th << 
459     {                                          << 
460       SOS_ASSERT_FATAL(SOS_OK == sos_kwaitq_ch << 
461                                                << 
462                                                << 
463     }                                          << 
464                                                << 
465   return SOS_OK;                               << 
466 }                                              << 
467                                                << 
468                                                << 
469 sos_ret_t sos_thread_set_priority(struct sos_t << 
470                                    sos_sched_p << 
471 {                                              << 
472   __label__ exit_set_prio;                     << 
473   sos_ui32_t flags;                            << 
474   sos_ret_t retval;                            << 
475                                                << 
476                                                << 
477   if (! SOS_SCHED_PRIO_IS_VALID(priority))     << 
478     return -SOS_EINVAL;                        << 
479                                                << 
480   if (! thr)                                   << 
481     thr = (struct sos_thread*)current_thread;  << 
482                                                << 
483   sos_disable_IRQs(flags);                     << 
484                                                << 
485   /* Signal kwaitq subsystem that the priority << 
486      the waitq it is waiting in should be upda << 
487   retval = _change_waitq_priorities(thr, prior << 
488   if (SOS_OK != retval)                        << 
489     goto exit_set_prio;                        << 
490                                                << 
491   /* Signal scheduler that the thread, current << 
492      should take into account the change of pr << 
493   if (SOS_THR_READY == thr->state)             << 
494     retval = sos_sched_change_priority(thr, pr << 
495                                                << 
496   /* Update priority */                        << 
497   thr->priority = priority;                    << 
498                                                << 
499  exit_set_prio:                                << 
500   sos_restore_IRQs(flags);                     << 
501   return retval;                               << 
502 }                                              << 
503                                                << 
504                                                << 
505 sos_ret_t sos_thread_yield()                      304 sos_ret_t sos_thread_yield()
506 {                                                 305 {
507   sos_ui32_t flags;                               306   sos_ui32_t flags;
508   sos_ret_t retval;                               307   sos_ret_t retval;
509                                                   308 
510   sos_disable_IRQs(flags);                        309   sos_disable_IRQs(flags);
511                                                   310 
512   retval = _switch_to_next_thread(YIELD_MYSELF    311   retval = _switch_to_next_thread(YIELD_MYSELF);
513                                                   312 
514   sos_restore_IRQs(flags);                        313   sos_restore_IRQs(flags);
515   return retval;                                  314   return retval;
516 }                                                 315 }
517                                                   316 
518                                                   317 
519 /**                                               318 /**
520  * Internal sleep timeout management              319  * Internal sleep timeout management
521  */                                               320  */
522 struct sleep_timeout_params                       321 struct sleep_timeout_params
523 {                                                 322 {
524   struct sos_thread *thread_to_wakeup;            323   struct sos_thread *thread_to_wakeup;
525   sos_bool_t timeout_triggered;                   324   sos_bool_t timeout_triggered;
526 };                                                325 };
527                                                   326 
528                                                   327 
529 /**                                               328 /**
530  * Callback called when a timeout happened        329  * Callback called when a timeout happened
531  */                                               330  */
532 static void sleep_timeout(struct sos_timeout_a    331 static void sleep_timeout(struct sos_timeout_action *act)
533 {                                                 332 {
534   struct sleep_timeout_params *sleep_timeout_p    333   struct sleep_timeout_params *sleep_timeout_params
535     = (struct sleep_timeout_params*) act->rout    334     = (struct sleep_timeout_params*) act->routine_data;
536                                                   335 
537   /* Signal that we have been woken up by the     336   /* Signal that we have been woken up by the timeout */
538   sleep_timeout_params->timeout_triggered = TR    337   sleep_timeout_params->timeout_triggered = TRUE;
539                                                   338 
540   /* Mark the thread ready */                     339   /* Mark the thread ready */
541   SOS_ASSERT_FATAL(SOS_OK ==                      340   SOS_ASSERT_FATAL(SOS_OK ==
542                    sos_thread_force_unblock(sl    341                    sos_thread_force_unblock(sleep_timeout_params
543                                              -    342                                              ->thread_to_wakeup));
544 }                                                 343 }
545                                                   344 
546                                                   345 
547 sos_ret_t sos_thread_sleep(struct sos_time *ti    346 sos_ret_t sos_thread_sleep(struct sos_time *timeout)
548 {                                                 347 {
549   sos_ui32_t flags;                               348   sos_ui32_t flags;
550   struct sleep_timeout_params sleep_timeout_pa    349   struct sleep_timeout_params sleep_timeout_params;
551   struct sos_timeout_action timeout_action;       350   struct sos_timeout_action timeout_action;
552   sos_ret_t retval;                               351   sos_ret_t retval;
553                                                   352 
554   /* Block forever if no timeout is given */      353   /* Block forever if no timeout is given */
555   if (NULL == timeout)                            354   if (NULL == timeout)
556     {                                             355     {
557       sos_disable_IRQs(flags);                    356       sos_disable_IRQs(flags);
558       retval = _switch_to_next_thread(BLOCK_MY    357       retval = _switch_to_next_thread(BLOCK_MYSELF);
559       sos_restore_IRQs(flags);                    358       sos_restore_IRQs(flags);
560                                                   359 
561       return retval;                              360       return retval;
562     }                                             361     }
563                                                   362 
564   /* Initialize the timeout action */             363   /* Initialize the timeout action */
565   sos_time_init_action(& timeout_action);         364   sos_time_init_action(& timeout_action);
566                                                   365 
567   /* Prepare parameters used by the sleep time    366   /* Prepare parameters used by the sleep timeout callback */
568   sleep_timeout_params.thread_to_wakeup           367   sleep_timeout_params.thread_to_wakeup 
569     = (struct sos_thread*)current_thread;         368     = (struct sos_thread*)current_thread;
570   sleep_timeout_params.timeout_triggered = FAL    369   sleep_timeout_params.timeout_triggered = FALSE;
571                                                   370 
572   sos_disable_IRQs(flags);                        371   sos_disable_IRQs(flags);
573                                                   372 
574   /* Now program the timeout ! */                 373   /* Now program the timeout ! */
575   SOS_ASSERT_FATAL(SOS_OK ==                      374   SOS_ASSERT_FATAL(SOS_OK ==
576                    sos_time_register_action_re    375                    sos_time_register_action_relative(& timeout_action,
577                                                   376                                                      timeout,
578                                                   377                                                      sleep_timeout,
579                                                   378                                                      & sleep_timeout_params));
580                                                   379 
581   /* Prepare to block: wait for sleep_timeout(    380   /* Prepare to block: wait for sleep_timeout() to wakeup us in the
582      timeout kwaitq, or for someone to wake us    381      timeout kwaitq, or for someone to wake us up in any other
583      waitq */                                     382      waitq */
584   retval = _switch_to_next_thread(BLOCK_MYSELF    383   retval = _switch_to_next_thread(BLOCK_MYSELF);
585   /* Unblocked by something ! */                  384   /* Unblocked by something ! */
586                                                   385 
587   /* Unblocked by timeout ? */                    386   /* Unblocked by timeout ? */
588   if (sleep_timeout_params.timeout_triggered)     387   if (sleep_timeout_params.timeout_triggered)
589     {                                             388     {
590       /* Yes */                                   389       /* Yes */
591       SOS_ASSERT_FATAL(sos_time_is_zero(& time    390       SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout));
592       retval = SOS_OK;                            391       retval = SOS_OK;
593     }                                             392     }
594   else                                            393   else
595     {                                             394     {
596       /* No: We have probably been woken up wh    395       /* No: We have probably been woken up while in some other
597          kwaitq */                                396          kwaitq */
598       SOS_ASSERT_FATAL(SOS_OK == sos_time_unre    397       SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action));
599       retval = -SOS_EINTR;                        398       retval = -SOS_EINTR;
600     }                                             399     }
601                                                   400 
602   sos_restore_IRQs(flags);                        401   sos_restore_IRQs(flags);
603                                                   402 
604   /* Update the remaining timeout */              403   /* Update the remaining timeout */
605   memcpy(timeout, & timeout_action.timeout, si    404   memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time));
606                                                   405 
607   return retval;                                  406   return retval;
608 }                                                 407 }
609                                                   408 
610                                                   409 
611 sos_ret_t sos_thread_force_unblock(struct sos_    410 sos_ret_t sos_thread_force_unblock(struct sos_thread *thread)
612 {                                                 411 {
613   sos_ret_t retval;                               412   sos_ret_t retval;
614   sos_ui32_t flags;                               413   sos_ui32_t flags;
615                                                   414 
616   if (! thread)                                   415   if (! thread)
617     return -SOS_EINVAL;                           416     return -SOS_EINVAL;
618                                                   417   
619   sos_disable_IRQs(flags);                        418   sos_disable_IRQs(flags);
620                                                   419 
621   /* Thread already woken up ? */                 420   /* Thread already woken up ? */
622   retval = SOS_OK;                                421   retval = SOS_OK;
623   switch(sos_thread_get_state(thread))            422   switch(sos_thread_get_state(thread))
624     {                                             423     {
625     case SOS_THR_RUNNING:                         424     case SOS_THR_RUNNING:
626     case SOS_THR_READY:                           425     case SOS_THR_READY:
627       /* Do nothing */                            426       /* Do nothing */
628       break;                                      427       break;
629                                                   428 
630     case SOS_THR_ZOMBIE:                          429     case SOS_THR_ZOMBIE:
631       retval = -SOS_EFATAL;                       430       retval = -SOS_EFATAL;
632       break;                                      431       break;
633                                                   432 
634     default:                                      433     default:
635       retval = sos_sched_set_ready(thread);       434       retval = sos_sched_set_ready(thread);
636       break;                                      435       break;
637     }                                             436     }
638                                                   437 
639   sos_restore_IRQs(flags);                        438   sos_restore_IRQs(flags);
640                                                   439 
641   return retval;                                  440   return retval;
642 }                                              << 
643                                                << 
644                                                << 
645 void sos_thread_dump_backtrace(sos_bool_t on_c << 
646                                sos_bool_t on_b << 
647 {                                              << 
648   sos_vaddr_t stack_bottom = current_thread->k << 
649   sos_size_t stack_size    = current_thread->k << 
650                                                << 
651   static void backtracer(sos_vaddr_t PC,       << 
652                          sos_vaddr_t params,   << 
653                          sos_ui32_t depth,     << 
654                          void *custom_arg)     << 
655     {                                          << 
656       sos_ui32_t invalid = 0xffffffff, *arg1,  << 
657                                                << 
658       /* Get the address of the first 3 argume << 
659          frame. Among these arguments, 0, 1, 2 << 
660          meaningful (depending on how many arg << 
661          take). */                             << 
662       arg1 = (sos_ui32_t*)params;              << 
663       arg2 = (sos_ui32_t*)(params+4);          << 
664       arg3 = (sos_ui32_t*)(params+8);          << 
665       arg4 = (sos_ui32_t*)(params+12);         << 
666                                                << 
667       /* Make sure the addresses of these argu << 
668          stack boundaries */                   << 
669 #define INTERVAL_OK(b,v,u) ( ((b) <= (sos_vadd << 
670                              && ((sos_vaddr_t) << 
671       if (!INTERVAL_OK(stack_bottom, arg1, sta << 
672         arg1 = &invalid;                       << 
673       if (!INTERVAL_OK(stack_bottom, arg2, sta << 
674         arg2 = &invalid;                       << 
675       if (!INTERVAL_OK(stack_bottom, arg3, sta << 
676         arg3 = &invalid;                       << 
677       if (!INTERVAL_OK(stack_bottom, arg4, sta << 
678         arg4 = &invalid;                       << 
679                                                << 
680       /* Print the function context for this f << 
681       if (on_bochs)                            << 
682         sos_bochs_printf("[%d] PC=0x%x arg1=0x << 
683                          (unsigned)depth, (uns << 
684                          (unsigned)*arg1, (uns << 
685                          (unsigned)*arg3);     << 
686                                                << 
687       if (on_console)                          << 
688         sos_x86_videomem_printf(23-depth, 3,   << 
689                                 SOS_X86_VIDEO_ << 
690                                   | SOS_X86_VI << 
691                                 "[%d] PC=0x%x  << 
692                                 (unsigned)dept << 
693                                 (unsigned)*arg << 
694                                 (unsigned)*arg << 
695                                                << 
696     }                                          << 
697                                                << 
698   sos_backtrace(NULL, 15, stack_bottom, stack_ << 
699                 backtracer, NULL);             << 
700 }                                              << 
701                                                << 
702                                                << 
703                                                << 
704 /* ******************************************* << 
705  * Restricted functions                        << 
706  */                                            << 
707                                                << 
708                                                << 
709 sos_ret_t                                      << 
710 sos_thread_change_current_mm_context(struct so << 
711 {                                              << 
712   sos_ui32_t flags;                            << 
713                                                << 
714   /* Retrieve the previous mm context */       << 
715   struct sos_mm_context * prev_mm_ctxt         << 
716     = current_thread->squatted_mm_context;     << 
717                                                << 
718   /* We should either select a new squatted_mm << 
719      the default */                            << 
720   if (mm_ctxt != NULL)                         << 
721     SOS_ASSERT_FATAL(prev_mm_ctxt == NULL);    << 
722   else                                         << 
723     SOS_ASSERT_FATAL(prev_mm_ctxt != NULL);    << 
724                                                << 
725   sos_disable_IRQs(flags);                     << 
726                                                << 
727   /* Update current thread's squatted mm conte << 
728   current_thread->squatted_mm_context = mm_ctx << 
729                                                << 
730   /* Update the reference counts and switch th << 
731      needed */                                 << 
732   if (mm_ctxt != NULL)                         << 
733     {                                          << 
734       sos_mm_context_ref(mm_ctxt); /* Because  << 
735                                       the squa << 
736                                       the thre << 
737       sos_mm_context_switch_to(mm_ctxt);       << 
738     }                                          << 
739   else                                         << 
740     sos_mm_context_unref(prev_mm_ctxt); /* Bec << 
741                                            the << 
742                                            the << 
743                                                << 
744   sos_restore_IRQs(flags);                     << 
745                                                << 
746   return SOS_OK;                               << 
747 }                                              << 
748                                                << 
749                                                << 
750 void sos_thread_prepare_syscall_switch_back(st << 
751 {                                              << 
752   /* Don't preempt the current thread */       << 
753                                                << 
754   /*                                           << 
755    * Save the state of the interrupted context << 
756    *   - The list of threads correctly reflect << 
757    *     in user mode                          << 
758    *   - _prepare_mm_context() deals with the  << 
759    */                                          << 
760   current_thread->cpu_state = cpu_state;       << 
761                                                << 
762   /* Perform an MMU context switch if needed * << 
763   _prepare_mm_context((struct sos_thread*) cur << 
764 }                                              << 
765                                                << 
766                                                << 
767 void sos_thread_prepare_exception_switch_back( << 
768 {                                              << 
769   /* Don't preempt the current thread */       << 
770                                                << 
771   /*                                           << 
772    * Save the state of the interrupted context << 
773    *   - The list of threads correctly reflect << 
774    *     running in user or kernel mode        << 
775    *   - _prepare_mm_context() deals with the  << 
776    */                                          << 
777   current_thread->cpu_state = cpu_state;       << 
778                                                << 
779   /* Perform an MMU context switch if needed * << 
780   _prepare_mm_context((struct sos_thread*) cur << 
781 }                                              << 
782                                                << 
783                                                << 
784 void                                           << 
785 sos_thread_prepare_irq_servicing(struct sos_cp << 
786 {                                              << 
787   current_thread->cpu_state = interrupted_stat << 
788 }                                              << 
789                                                << 
790                                                << 
791 struct sos_cpu_state *                         << 
792 sos_thread_prepare_irq_switch_back(void)       << 
793 {                                              << 
794   struct sos_thread *myself, *next_thread;     << 
795                                                << 
796   /* In SOS, threads in kernel mode are NEVER  << 
797      interrupt handlers ! */                   << 
798   if (! sos_cpu_context_is_in_user_mode(curren << 
799     return current_thread->cpu_state;          << 
800                                                << 
801   /*                                           << 
802    * Here we are dealing only with possible pr << 
803    * in user context !                         << 
804    */                                          << 
805                                                << 
806   /* Make sure the thread actually is a user t << 
807   SOS_ASSERT_FATAL(current_thread->process !=  << 
808                                                << 
809   /* Save the state of the interrupted context << 
810   myself = (struct sos_thread*)current_thread; << 
811                                                << 
812   /* Select the next thread to run */          << 
813   next_thread = sos_reschedule(myself, FALSE); << 
814                                                << 
815   /* Perform an MMU context switch if needed * << 
816   _prepare_mm_context(next_thread);            << 
817                                                << 
818   /* Setup the next_thread's context into the  << 
819   _set_current(next_thread);                   << 
820   return next_thread->cpu_state;               << 
821 }                                                 441 }
                                                      

source navigation ] diff markup ] identifier search ] general search ]