SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

Diff markup

Differences between /sos/thread.c (Article 9.5) and /sos/thread.c (Article 6.5)


001 /* Copyright (C) 2004,2005 David Decotigny        001 /* Copyright (C) 2004,2005 David Decotigny
002                                                   002 
003    This program is free software; you can redi    003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU Genera    004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundatio    005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any lat    006    of the License, or (at your option) any later version.
007                                                   007    
008    This program is distributed in the hope tha    008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the     009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR    010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details    011    GNU General Public License for more details.
012                                                   012    
013    You should have received a copy of the GNU     013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to t    014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 3    015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA.                                           016    USA. 
017 */                                                017 */
018                                                   018 
019 #include <sos/physmem.h>                          019 #include <sos/physmem.h>
020 #include <sos/kmem_slab.h>                        020 #include <sos/kmem_slab.h>
021 #include <sos/kmalloc.h>                          021 #include <sos/kmalloc.h>
022 #include <sos/klibc.h>                            022 #include <sos/klibc.h>
023 #include <sos/list.h>                             023 #include <sos/list.h>
024 #include <sos/assert.h>                           024 #include <sos/assert.h>
025 #include <hwcore/mm_context.h>                 << 
026 #include <sos/process.h>                       << 
027                                                << 
028 #include <drivers/bochs.h>                     << 
029 #include <drivers/x86_videomem.h>              << 
030                                                   025 
031 #include <hwcore/irq.h>                           026 #include <hwcore/irq.h>
032                                                   027 
033 #include "thread.h"                               028 #include "thread.h"
034                                                   029 
035                                                   030 
036 /**                                               031 /**
037  * The size of the stack of a kernel thread       032  * The size of the stack of a kernel thread
038  */                                               033  */
039 #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PA    034 #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PAGE_SIZE)
040                                                   035 
041                                                   036 
042 /**                                               037 /**
043  * The identifier of the thread currently runn    038  * The identifier of the thread currently running on CPU.
044  *                                                039  *
045  * We only support a SINGLE processor, ie a SI    040  * We only support a SINGLE processor, ie a SINGLE thread
046  * running at any time in the system. This gre    041  * running at any time in the system. This greatly simplifies the
047  * implementation of the system, since we don'    042  * implementation of the system, since we don't have to complicate
048  * things in order to retrieve the identifier     043  * things in order to retrieve the identifier of the threads running
049  * on the CPU. On multiprocessor systems the c    044  * on the CPU. On multiprocessor systems the current_thread below is
050  * an array indexed by the id of the CPU, so t    045  * an array indexed by the id of the CPU, so that the challenge is to
051  * retrieve the identifier of the CPU. This is    046  * retrieve the identifier of the CPU. This is usually done based on
052  * the stack address (Linux implementation) or    047  * the stack address (Linux implementation) or on some form of TLS
053  * ("Thread Local Storage": can be implemented    048  * ("Thread Local Storage": can be implemented by way of LDTs for the
054  * processes, accessed through the fs or gs re    049  * processes, accessed through the fs or gs registers).
055  */                                               050  */
056 static volatile struct sos_thread *current_thr    051 static volatile struct sos_thread *current_thread = NULL;
057                                                   052 
058                                                   053 
059 /*                                                054 /*
060  * The list of threads currently in the system    055  * The list of threads currently in the system.
061  *                                                056  *
062  * @note We could have used current_thread for    057  * @note We could have used current_thread for that...
063  */                                               058  */
064 static struct sos_thread *thread_list = NULL;     059 static struct sos_thread *thread_list = NULL;
065                                                   060 
066                                                   061 
067 /**                                               062 /**
068  * The Cache of thread structures                 063  * The Cache of thread structures
069  */                                               064  */
070 static struct sos_kslab_cache *cache_thread;      065 static struct sos_kslab_cache *cache_thread;
071                                                   066 
072                                                   067 
073 /**                                            << 
074  * (Forwad declaration) Helper function to cha << 
075  * the current executing thread. Analogous to  << 
076  * sos_thread_change_current_mm_context() of a << 
077  */                                            << 
078 static sos_ret_t change_current_mm_context(str << 
079                                                << 
080                                                << 
081 struct sos_thread *sos_thread_get_current()       068 struct sos_thread *sos_thread_get_current()
082 {                                                 069 {
083   SOS_ASSERT_FATAL(current_thread->state == SO    070   SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
084   return (struct sos_thread*)current_thread;      071   return (struct sos_thread*)current_thread;
085 }                                                 072 }
086                                                   073 
087                                                   074 
088 inline static sos_ret_t _set_current(struct so    075 inline static sos_ret_t _set_current(struct sos_thread *thr)
089 {                                                 076 {
090   SOS_ASSERT_FATAL(thr->state == SOS_THR_READY    077   SOS_ASSERT_FATAL(thr->state == SOS_THR_READY);
091   current_thread = thr;                           078   current_thread = thr;
092   current_thread->state = SOS_THR_RUNNING;        079   current_thread->state = SOS_THR_RUNNING;
093   return SOS_OK;                                  080   return SOS_OK;
094 }                                                 081 }
095                                                   082 
096                                                   083 
097 sos_ret_t sos_thread_subsystem_setup(sos_vaddr    084 sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
098                                      sos_size_    085                                      sos_size_t init_thread_stack_size)
099 {                                                 086 {
100   struct sos_thread *myself;                      087   struct sos_thread *myself;
101                                                   088 
102   /* Allocate the cache of threads */             089   /* Allocate the cache of threads */
103   cache_thread = sos_kmem_cache_create("thread    090   cache_thread = sos_kmem_cache_create("thread",
104                                        sizeof(    091                                        sizeof(struct sos_thread),
105                                        2,         092                                        2,
106                                        0,         093                                        0,
107                                        SOS_KSL    094                                        SOS_KSLAB_CREATE_MAP
108                                        | SOS_K    095                                        | SOS_KSLAB_CREATE_ZERO);
109   if (! cache_thread)                             096   if (! cache_thread)
110     return -SOS_ENOMEM;                           097     return -SOS_ENOMEM;
111                                                   098 
112   /* Allocate a new thread structure for the c    099   /* Allocate a new thread structure for the current running thread */
113   myself = (struct sos_thread*) sos_kmem_cache    100   myself = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
114                                                   101                                                      SOS_KSLAB_ALLOC_ATOMIC);
115   if (! myself)                                   102   if (! myself)
116     return -SOS_ENOMEM;                           103     return -SOS_ENOMEM;
117                                                   104 
118   /* Initialize the thread attributes */          105   /* Initialize the thread attributes */
119   strzcpy(myself->name, "[kinit]", SOS_THR_MAX    106   strzcpy(myself->name, "[kinit]", SOS_THR_MAX_NAMELEN);
120   myself->state           = SOS_THR_CREATED;      107   myself->state           = SOS_THR_CREATED;
121   myself->priority        = SOS_SCHED_PRIO_LOW << 
122   myself->kernel_stack_base_addr = init_thread    108   myself->kernel_stack_base_addr = init_thread_stack_base_addr;
123   myself->kernel_stack_size      = init_thread    109   myself->kernel_stack_size      = init_thread_stack_size;
124                                                   110 
125   /* Do some stack poisoning on the bottom of     111   /* Do some stack poisoning on the bottom of the stack, if needed */
126   sos_cpu_state_prepare_detect_kernel_stack_ov    112   sos_cpu_state_prepare_detect_kernel_stack_overflow(myself->cpu_state,
127                                                   113                                                      myself->kernel_stack_base_addr,
128                                                   114                                                      myself->kernel_stack_size);
129                                                   115 
130   /* Add the thread in the global list */         116   /* Add the thread in the global list */
131   list_singleton_named(thread_list, myself, gb    117   list_singleton_named(thread_list, myself, gbl_prev, gbl_next);
132                                                   118 
133   /* Ok, now pretend that the running thread i    119   /* Ok, now pretend that the running thread is ourselves */
134   myself->state = SOS_THR_READY;                  120   myself->state = SOS_THR_READY;
135   _set_current(myself);                           121   _set_current(myself);
136                                                   122 
137   return SOS_OK;                                  123   return SOS_OK;
138 }                                                 124 }
139                                                   125 
140                                                   126 
141 struct sos_thread *                               127 struct sos_thread *
142 sos_create_kernel_thread(const char *name,        128 sos_create_kernel_thread(const char *name,
143                          sos_kernel_thread_sta    129                          sos_kernel_thread_start_routine_t start_func,
144                          void *start_arg,      !! 130                          void *start_arg)
145                          sos_sched_priority_t  << 
146 {                                                 131 {
147   __label__ undo_creation;                        132   __label__ undo_creation;
148   sos_ui32_t flags;                               133   sos_ui32_t flags;
149   struct sos_thread *new_thread;                  134   struct sos_thread *new_thread;
150                                                   135 
151   if (! start_func)                               136   if (! start_func)
152     return NULL;                                  137     return NULL;
153   if (! SOS_SCHED_PRIO_IS_VALID(priority))     << 
154     return NULL;                               << 
155                                                   138 
156   /* Allocate a new thread structure for the c    139   /* Allocate a new thread structure for the current running thread */
157   new_thread                                      140   new_thread
158     = (struct sos_thread*) sos_kmem_cache_allo    141     = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
159                                                   142                                                 SOS_KSLAB_ALLOC_ATOMIC);
160   if (! new_thread)                               143   if (! new_thread)
161     return NULL;                                  144     return NULL;
162                                                   145 
163   /* Initialize the thread attributes */          146   /* Initialize the thread attributes */
164   strzcpy(new_thread->name, ((name)?name:"[NON    147   strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
165   new_thread->state    = SOS_THR_CREATED;         148   new_thread->state    = SOS_THR_CREATED;
166   new_thread->priority = priority;             << 
167                                                   149 
168   /* Allocate the stack for the new thread */     150   /* Allocate the stack for the new thread */
169   new_thread->kernel_stack_base_addr = sos_kma    151   new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
170   new_thread->kernel_stack_size      = SOS_THR    152   new_thread->kernel_stack_size      = SOS_THREAD_KERNEL_STACK_SIZE;
171   if (! new_thread->kernel_stack_base_addr)       153   if (! new_thread->kernel_stack_base_addr)
172     goto undo_creation;                           154     goto undo_creation;
173                                                   155 
174   /* Initialize the CPU context of the new thr    156   /* Initialize the CPU context of the new thread */
175   if (SOS_OK                                      157   if (SOS_OK
176       != sos_cpu_kstate_init(& new_thread->cpu    158       != sos_cpu_kstate_init(& new_thread->cpu_state,
177                              (sos_cpu_kstate_f    159                              (sos_cpu_kstate_function_arg1_t*) start_func,
178                              (sos_ui32_t) star    160                              (sos_ui32_t) start_arg,
179                              new_thread->kerne    161                              new_thread->kernel_stack_base_addr,
180                              new_thread->kerne    162                              new_thread->kernel_stack_size,
181                              (sos_cpu_kstate_f    163                              (sos_cpu_kstate_function_arg1_t*) sos_thread_exit,
182                              (sos_ui32_t) NULL    164                              (sos_ui32_t) NULL))
183     goto undo_creation;                           165     goto undo_creation;
184                                                   166 
185   /* Add the thread in the global list */         167   /* Add the thread in the global list */
186   sos_disable_IRQs(flags);                        168   sos_disable_IRQs(flags);
187   list_add_tail_named(thread_list, new_thread,    169   list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
188   sos_restore_IRQs(flags);                        170   sos_restore_IRQs(flags);
189                                                   171 
190   /* Mark the thread ready */                     172   /* Mark the thread ready */
191   if (SOS_OK != sos_sched_set_ready(new_thread    173   if (SOS_OK != sos_sched_set_ready(new_thread))
192     goto undo_creation;                           174     goto undo_creation;
193                                                   175 
194   /* Normal non-erroneous end of function */      176   /* Normal non-erroneous end of function */
195   return new_thread;                              177   return new_thread;
196                                                   178 
197  undo_creation:                                   179  undo_creation:
198   if (new_thread->kernel_stack_base_addr)         180   if (new_thread->kernel_stack_base_addr)
199     sos_kfree((sos_vaddr_t) new_thread->kernel    181     sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
200   sos_kmem_cache_free((sos_vaddr_t) new_thread    182   sos_kmem_cache_free((sos_vaddr_t) new_thread);
201   return NULL;                                    183   return NULL;
202 }                                                 184 }
203                                                   185 
204                                                   186 
205 /**                                            << 
206  * Helper function to create a new user thread << 
207  * given, then the new thread will be the copy << 
208  * thread. Otherwise the thread will have its  << 
209  * initialized with the user_initial_PC/SP arg << 
210  */                                            << 
211 static struct sos_thread *                     << 
212 create_user_thread(const char *name,           << 
213                    struct sos_process *process << 
214                    const struct sos_thread * m << 
215                    const struct sos_cpu_state  << 
216                    sos_uaddr_t user_initial_PC << 
217                    sos_ui32_t  user_start_arg1 << 
218                    sos_ui32_t  user_start_arg2 << 
219                    sos_uaddr_t user_initial_SP << 
220                    sos_sched_priority_t priori << 
221 {                                              << 
222   __label__ undo_creation;                     << 
223   sos_ui32_t flags;                            << 
224   struct sos_thread *new_thread;               << 
225                                                << 
226   if (model_thread)                            << 
227     {                                          << 
228       SOS_ASSERT_FATAL(model_uctxt);           << 
229     }                                          << 
230   else                                         << 
231     {                                          << 
232       if (! SOS_SCHED_PRIO_IS_VALID(priority)) << 
233         return NULL;                           << 
234     }                                          << 
235                                                << 
236   /* For a user thread, the process must be gi << 
237   if (! process)                               << 
238     return NULL;                               << 
239                                                << 
240   /* Allocate a new thread structure for the c << 
241   new_thread                                   << 
242     = (struct sos_thread*) sos_kmem_cache_allo << 
243                                                << 
244   if (! new_thread)                            << 
245     return NULL;                               << 
246                                                << 
247   /* Initialize the thread attributes */       << 
248   strzcpy(new_thread->name, ((name)?name:"[NON << 
249   new_thread->state    = SOS_THR_CREATED;      << 
250   if (model_thread)                            << 
251     new_thread->priority = model_thread->prior << 
252   else                                         << 
253     new_thread->priority = priority;           << 
254                                                << 
255   /* Allocate the stack for the new thread */  << 
256   new_thread->kernel_stack_base_addr = sos_kma << 
257   new_thread->kernel_stack_size      = SOS_THR << 
258   if (! new_thread->kernel_stack_base_addr)    << 
259     goto undo_creation;                        << 
260                                                << 
261   /* Initialize the CPU context of the new thr << 
262   if (model_thread)                            << 
263     {                                          << 
264       if (SOS_OK                               << 
265           != sos_cpu_ustate_duplicate(& new_th << 
266                                       model_uc << 
267                                       user_sta << 
268                                       new_thre << 
269                                       new_thre << 
270         goto undo_creation;                    << 
271     }                                          << 
272   else                                         << 
273     {                                          << 
274       if (SOS_OK                               << 
275           != sos_cpu_ustate_init(& new_thread- << 
276                                  user_initial_ << 
277                                  user_start_ar << 
278                                  user_start_ar << 
279                                  user_initial_ << 
280                                  new_thread->k << 
281                                  new_thread->k << 
282         goto undo_creation;                    << 
283     }                                          << 
284                                                << 
285   /* Attach the new thread to the process */   << 
286   if (SOS_OK != sos_process_register_thread(pr << 
287     goto undo_creation;                        << 
288                                                << 
289   /* Add the thread in the global list */      << 
290   sos_disable_IRQs(flags);                     << 
291   list_add_tail_named(thread_list, new_thread, << 
292   sos_restore_IRQs(flags);                     << 
293                                                << 
294   /* Mark the thread ready */                  << 
295   if (SOS_OK != sos_sched_set_ready(new_thread << 
296     goto undo_creation;                        << 
297                                                << 
298   /* Normal non-erroneous end of function */   << 
299   return new_thread;                           << 
300                                                << 
301  undo_creation:                                << 
302   if (new_thread->kernel_stack_base_addr)      << 
303     sos_kfree((sos_vaddr_t) new_thread->kernel << 
304   sos_kmem_cache_free((sos_vaddr_t) new_thread << 
305   return NULL;                                 << 
306 }                                              << 
307                                                << 
308                                                << 
309 struct sos_thread *                            << 
310 sos_create_user_thread(const char *name,       << 
311                        struct sos_process *pro << 
312                        sos_uaddr_t user_initia << 
313                        sos_ui32_t  user_start_ << 
314                        sos_ui32_t  user_start_ << 
315                        sos_uaddr_t user_initia << 
316                        sos_sched_priority_t pr << 
317 {                                              << 
318   return create_user_thread(name, process, NUL << 
319                             user_initial_PC,   << 
320                             user_start_arg1,   << 
321                             user_start_arg2,   << 
322                             user_initial_SP,   << 
323                             priority);         << 
324 }                                              << 
325                                                << 
326                                                << 
327 /**                                            << 
328  * Create a new user thread, copy of the given << 
329  * given user context                          << 
330  */                                            << 
331 struct sos_thread *                            << 
332 sos_duplicate_user_thread(const char *name,    << 
333                           struct sos_process * << 
334                           const struct sos_thr << 
335                           const struct sos_cpu << 
336                           sos_ui32_t retval)   << 
337 {                                              << 
338   return create_user_thread(name, process, mod << 
339                             0, retval, 0, 0, 0 << 
340 }                                              << 
341                                                << 
342                                                << 
343 /**                                            << 
344  * Helper function to switch to the correct MM << 
345  * the_thread's needs.                         << 
346  *   - When switching to a user-mode thread, f << 
347  *     of the MMU                              << 
348  *   - When switching to a kernel-mode thread, << 
349  *     configuration if the thread was squatti << 
350  */                                            << 
351 static void _prepare_mm_context(struct sos_thr << 
352 {                                              << 
353   /* Going to restore a thread in user mode ?  << 
354   if (sos_cpu_context_is_in_user_mode(the_thre << 
355       == TRUE)                                 << 
356     {                                          << 
357       /* Yes: force the MMU to be correctly se << 
358          user's address space */               << 
359                                                << 
360       /* The thread should be a user thread */ << 
361       SOS_ASSERT_FATAL(the_thread->process !=  << 
362                                                << 
363       /* It should not squat any other's addre << 
364       SOS_ASSERT_FATAL(the_thread->squatted_mm << 
365                                                << 
366       /* Perform an MMU context switch if need << 
367       sos_mm_context_switch_to(sos_process_get << 
368     }                                          << 
369                                                << 
370   /* the_thread is a kernel thread squatting a << 
371      space ? */                                << 
372   else if (the_thread->squatted_mm_context !=  << 
373     sos_mm_context_switch_to(the_thread->squat << 
374 }                                              << 
375                                                << 
376                                                << 
377 /** Function called after thr has terminated.     187 /** Function called after thr has terminated. Called from inside the context
378     of another thread, interrupts disabled */     188     of another thread, interrupts disabled */
379 static void delete_thread(struct sos_thread *t    189 static void delete_thread(struct sos_thread *thr)
380 {                                                 190 {
381   sos_ui32_t flags;                               191   sos_ui32_t flags;
382                                                   192 
383   sos_disable_IRQs(flags);                        193   sos_disable_IRQs(flags);
384   list_delete_named(thread_list, thr, gbl_prev    194   list_delete_named(thread_list, thr, gbl_prev, gbl_next);
385   sos_restore_IRQs(flags);                        195   sos_restore_IRQs(flags);
386                                                   196 
387   sos_kfree((sos_vaddr_t) thr->kernel_stack_ba    197   sos_kfree((sos_vaddr_t) thr->kernel_stack_base_addr);
388                                                << 
389   /* If the thread squats an address space, re << 
390   if (thr->squatted_mm_context)                << 
391     SOS_ASSERT_FATAL(SOS_OK == change_current_ << 
392                                                << 
393   /* For a user thread: remove the thread from << 
394   if (thr->process)                            << 
395     SOS_ASSERT_FATAL(SOS_OK == sos_process_unr << 
396                                                << 
397   memset(thr, 0x0, sizeof(struct sos_thread));    198   memset(thr, 0x0, sizeof(struct sos_thread));
398   sos_kmem_cache_free((sos_vaddr_t) thr);         199   sos_kmem_cache_free((sos_vaddr_t) thr);
399 }                                                 200 }
400                                                   201 
401                                                   202 
402 void sos_thread_exit()                            203 void sos_thread_exit()
403 {                                                 204 {
404   sos_ui32_t flags;                               205   sos_ui32_t flags;
405   struct sos_thread *myself, *next_thread;        206   struct sos_thread *myself, *next_thread;
406                                                   207 
407   /* Interrupt handlers are NOT allowed to exi    208   /* Interrupt handlers are NOT allowed to exit the current thread ! */
408   SOS_ASSERT_FATAL(! sos_servicing_irq());        209   SOS_ASSERT_FATAL(! sos_servicing_irq());
409                                                   210 
410   myself = sos_thread_get_current();              211   myself = sos_thread_get_current();
411                                                   212 
412   /* Refuse to end the current executing threa    213   /* Refuse to end the current executing thread if it still holds a
413      resource ! */                                214      resource ! */
414   SOS_ASSERT_FATAL(list_is_empty_named(myself-    215   SOS_ASSERT_FATAL(list_is_empty_named(myself->kwaitq_list,
415                                        prev_en    216                                        prev_entry_for_thread,
416                                        next_en    217                                        next_entry_for_thread));
417                                                   218 
418   /* Prepare to run the next thread */            219   /* Prepare to run the next thread */
419   sos_disable_IRQs(flags);                        220   sos_disable_IRQs(flags);
420   myself->state = SOS_THR_ZOMBIE;                 221   myself->state = SOS_THR_ZOMBIE;
421   next_thread = sos_reschedule(myself, FALSE);    222   next_thread = sos_reschedule(myself, FALSE);
422                                                   223 
423   /* Make sure that the next_thread is valid *    224   /* Make sure that the next_thread is valid */
424   sos_cpu_state_detect_kernel_stack_overflow(n    225   sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
425                                              n    226                                              next_thread->kernel_stack_base_addr,
426                                              n    227                                              next_thread->kernel_stack_size);
427                                                   228 
428   /*                                           << 
429    * Perform an MMU context switch if needed   << 
430    */                                          << 
431   _prepare_mm_context(next_thread);            << 
432                                                << 
433   /* No need for sos_restore_IRQs() here becau    229   /* No need for sos_restore_IRQs() here because the IRQ flag will be
434      restored to that of the next thread upon     230      restored to that of the next thread upon context switch */
435                                                   231 
436   /* Immediate switch to next thread */           232   /* Immediate switch to next thread */
437   _set_current(next_thread);                      233   _set_current(next_thread);
438   sos_cpu_context_exit_to(next_thread->cpu_sta    234   sos_cpu_context_exit_to(next_thread->cpu_state,
439                           (sos_cpu_kstate_func    235                           (sos_cpu_kstate_function_arg1_t*) delete_thread,
440                           (sos_ui32_t) myself)    236                           (sos_ui32_t) myself);
441 }                                                 237 }
442                                                   238 
443                                                   239 
444 sos_sched_priority_t sos_thread_get_priority(s << 
445 {                                              << 
446   if (! thr)                                   << 
447     thr = (struct sos_thread*)current_thread;  << 
448                                                << 
449   return thr->priority;                        << 
450 }                                              << 
451                                                << 
452                                                << 
453 sos_thread_state_t sos_thread_get_state(struct    240 sos_thread_state_t sos_thread_get_state(struct sos_thread *thr)
454 {                                                 241 {
455   if (! thr)                                      242   if (! thr)
456     thr = (struct sos_thread*)current_thread;     243     thr = (struct sos_thread*)current_thread;
457                                                   244 
458   return thr->state;                              245   return thr->state;
459 }                                                 246 }
460                                                   247 
461                                                   248 
462 typedef enum { YIELD_MYSELF, BLOCK_MYSELF } sw    249 typedef enum { YIELD_MYSELF, BLOCK_MYSELF } switch_type_t;
463 /**                                               250 /**
464  * Helper function to initiate a context switc    251  * Helper function to initiate a context switch in case the current
465  * thread becomes blocked, waiting for a timeo    252  * thread becomes blocked, waiting for a timeout, or calls yield.
466  */                                               253  */
467 static sos_ret_t _switch_to_next_thread(switch    254 static sos_ret_t _switch_to_next_thread(switch_type_t operation)
468 {                                                 255 {
469   struct sos_thread *myself, *next_thread;        256   struct sos_thread *myself, *next_thread;
470                                                   257 
471   SOS_ASSERT_FATAL(current_thread->state == SO    258   SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
472                                                   259 
473   /* Interrupt handlers are NOT allowed to blo    260   /* Interrupt handlers are NOT allowed to block ! */
474   SOS_ASSERT_FATAL(! sos_servicing_irq());        261   SOS_ASSERT_FATAL(! sos_servicing_irq());
475                                                   262 
476   myself = (struct sos_thread*)current_thread;    263   myself = (struct sos_thread*)current_thread;
477                                                   264 
478   /* Make sure that if we are to be marked "BL    265   /* Make sure that if we are to be marked "BLOCKED", we have any
479      reason of effectively being blocked */       266      reason of effectively being blocked */
480   if (BLOCK_MYSELF == operation)                  267   if (BLOCK_MYSELF == operation)
481     {                                             268     {
482       myself->state = SOS_THR_BLOCKED;            269       myself->state = SOS_THR_BLOCKED;
483     }                                             270     }
484                                                   271 
485   /* Identify the next thread */                  272   /* Identify the next thread */
486   next_thread = sos_reschedule(myself, YIELD_M    273   next_thread = sos_reschedule(myself, YIELD_MYSELF == operation);
487                                                   274 
488   /* Avoid context switch if the context does     275   /* Avoid context switch if the context does not change */
489   if (myself != next_thread)                      276   if (myself != next_thread)
490     {                                             277     {
491       /* Sanity checks for the next thread */     278       /* Sanity checks for the next thread */
492       sos_cpu_state_detect_kernel_stack_overfl    279       sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
493                                                   280                                                  next_thread->kernel_stack_base_addr,
494                                                   281                                                  next_thread->kernel_stack_size);
495                                                   282 
496       /*                                       << 
497        * Perform an MMU context switch if need << 
498        */                                      << 
499       _prepare_mm_context(next_thread);        << 
500                                                   283 
501       /*                                          284       /*
502        * Actual CPU context switch                285        * Actual CPU context switch
503        */                                         286        */
504       _set_current(next_thread);                  287       _set_current(next_thread);
505       sos_cpu_context_switch(& myself->cpu_sta    288       sos_cpu_context_switch(& myself->cpu_state, next_thread->cpu_state);
506                                                   289       
507       /* Back here ! */                           290       /* Back here ! */
508       SOS_ASSERT_FATAL(current_thread == mysel    291       SOS_ASSERT_FATAL(current_thread == myself);
509       SOS_ASSERT_FATAL(current_thread->state =    292       SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
510     }                                             293     }
511   else                                            294   else
512     {                                             295     {
513       /* No context switch but still update ID    296       /* No context switch but still update ID of current thread */
514       _set_current(next_thread);                  297       _set_current(next_thread);
515     }                                             298     }
516                                                   299 
517   return SOS_OK;                                  300   return SOS_OK;
518 }                                                 301 }
519                                                   302 
520                                                   303 
521 /**                                            << 
522  * Helper function to change the thread's prio << 
523  * waitqueues associated with the thread.      << 
524  */                                            << 
525 static sos_ret_t _change_waitq_priorities(stru << 
526                                           sos_ << 
527 {                                              << 
528   struct sos_kwaitq_entry *kwq_entry;          << 
529   int nb_waitqs;                               << 
530                                                << 
531   list_foreach_forward_named(thr->kwaitq_list, << 
532                              prev_entry_for_th << 
533     {                                          << 
534       SOS_ASSERT_FATAL(SOS_OK == sos_kwaitq_ch << 
535                                                << 
536                                                << 
537     }                                          << 
538                                                << 
539   return SOS_OK;                               << 
540 }                                              << 
541                                                << 
542                                                << 
543 sos_ret_t sos_thread_set_priority(struct sos_t << 
544                                    sos_sched_p << 
545 {                                              << 
546   __label__ exit_set_prio;                     << 
547   sos_ui32_t flags;                            << 
548   sos_ret_t retval;                            << 
549                                                << 
550                                                << 
551   if (! SOS_SCHED_PRIO_IS_VALID(priority))     << 
552     return -SOS_EINVAL;                        << 
553                                                << 
554   if (! thr)                                   << 
555     thr = (struct sos_thread*)current_thread;  << 
556                                                << 
557   sos_disable_IRQs(flags);                     << 
558                                                << 
559   /* Signal kwaitq subsystem that the priority << 
560      the waitq it is waiting in should be upda << 
561   retval = _change_waitq_priorities(thr, prior << 
562   if (SOS_OK != retval)                        << 
563     goto exit_set_prio;                        << 
564                                                << 
565   /* Signal scheduler that the thread, current << 
566      should take into account the change of pr << 
567   if (SOS_THR_READY == thr->state)             << 
568     retval = sos_sched_change_priority(thr, pr << 
569                                                << 
570   /* Update priority */                        << 
571   thr->priority = priority;                    << 
572                                                << 
573  exit_set_prio:                                << 
574   sos_restore_IRQs(flags);                     << 
575   return retval;                               << 
576 }                                              << 
577                                                << 
578                                                << 
579 sos_ret_t sos_thread_yield()                      304 sos_ret_t sos_thread_yield()
580 {                                                 305 {
581   sos_ui32_t flags;                               306   sos_ui32_t flags;
582   sos_ret_t retval;                               307   sos_ret_t retval;
583                                                   308 
584   sos_disable_IRQs(flags);                        309   sos_disable_IRQs(flags);
585                                                   310 
586   retval = _switch_to_next_thread(YIELD_MYSELF    311   retval = _switch_to_next_thread(YIELD_MYSELF);
587                                                   312 
588   sos_restore_IRQs(flags);                        313   sos_restore_IRQs(flags);
589   return retval;                                  314   return retval;
590 }                                                 315 }
591                                                   316 
592                                                   317 
593 /**                                               318 /**
594  * Internal sleep timeout management              319  * Internal sleep timeout management
595  */                                               320  */
596 struct sleep_timeout_params                       321 struct sleep_timeout_params
597 {                                                 322 {
598   struct sos_thread *thread_to_wakeup;            323   struct sos_thread *thread_to_wakeup;
599   sos_bool_t timeout_triggered;                   324   sos_bool_t timeout_triggered;
600 };                                                325 };
601                                                   326 
602                                                   327 
603 /**                                               328 /**
604  * Callback called when a timeout happened        329  * Callback called when a timeout happened
605  */                                               330  */
606 static void sleep_timeout(struct sos_timeout_a    331 static void sleep_timeout(struct sos_timeout_action *act)
607 {                                                 332 {
608   struct sleep_timeout_params *sleep_timeout_p    333   struct sleep_timeout_params *sleep_timeout_params
609     = (struct sleep_timeout_params*) act->rout    334     = (struct sleep_timeout_params*) act->routine_data;
610                                                   335 
611   /* Signal that we have been woken up by the     336   /* Signal that we have been woken up by the timeout */
612   sleep_timeout_params->timeout_triggered = TR    337   sleep_timeout_params->timeout_triggered = TRUE;
613                                                   338 
614   /* Mark the thread ready */                     339   /* Mark the thread ready */
615   SOS_ASSERT_FATAL(SOS_OK ==                      340   SOS_ASSERT_FATAL(SOS_OK ==
616                    sos_thread_force_unblock(sl    341                    sos_thread_force_unblock(sleep_timeout_params
617                                              -    342                                              ->thread_to_wakeup));
618 }                                                 343 }
619                                                   344 
620                                                   345 
621 sos_ret_t sos_thread_sleep(struct sos_time *ti    346 sos_ret_t sos_thread_sleep(struct sos_time *timeout)
622 {                                                 347 {
623   sos_ui32_t flags;                               348   sos_ui32_t flags;
624   struct sleep_timeout_params sleep_timeout_pa    349   struct sleep_timeout_params sleep_timeout_params;
625   struct sos_timeout_action timeout_action;       350   struct sos_timeout_action timeout_action;
626   sos_ret_t retval;                               351   sos_ret_t retval;
627                                                   352 
628   /* Block forever if no timeout is given */      353   /* Block forever if no timeout is given */
629   if (NULL == timeout)                            354   if (NULL == timeout)
630     {                                             355     {
631       sos_disable_IRQs(flags);                    356       sos_disable_IRQs(flags);
632       retval = _switch_to_next_thread(BLOCK_MY    357       retval = _switch_to_next_thread(BLOCK_MYSELF);
633       sos_restore_IRQs(flags);                    358       sos_restore_IRQs(flags);
634                                                   359 
635       return retval;                              360       return retval;
636     }                                             361     }
637                                                   362 
638   /* Initialize the timeout action */             363   /* Initialize the timeout action */
639   sos_time_init_action(& timeout_action);         364   sos_time_init_action(& timeout_action);
640                                                   365 
641   /* Prepare parameters used by the sleep time    366   /* Prepare parameters used by the sleep timeout callback */
642   sleep_timeout_params.thread_to_wakeup           367   sleep_timeout_params.thread_to_wakeup 
643     = (struct sos_thread*)current_thread;         368     = (struct sos_thread*)current_thread;
644   sleep_timeout_params.timeout_triggered = FAL    369   sleep_timeout_params.timeout_triggered = FALSE;
645                                                   370 
646   sos_disable_IRQs(flags);                        371   sos_disable_IRQs(flags);
647                                                   372 
648   /* Now program the timeout ! */                 373   /* Now program the timeout ! */
649   SOS_ASSERT_FATAL(SOS_OK ==                      374   SOS_ASSERT_FATAL(SOS_OK ==
650                    sos_time_register_action_re    375                    sos_time_register_action_relative(& timeout_action,
651                                                   376                                                      timeout,
652                                                   377                                                      sleep_timeout,
653                                                   378                                                      & sleep_timeout_params));
654                                                   379 
655   /* Prepare to block: wait for sleep_timeout(    380   /* Prepare to block: wait for sleep_timeout() to wakeup us in the
656      timeout kwaitq, or for someone to wake us    381      timeout kwaitq, or for someone to wake us up in any other
657      waitq */                                     382      waitq */
658   retval = _switch_to_next_thread(BLOCK_MYSELF    383   retval = _switch_to_next_thread(BLOCK_MYSELF);
659   /* Unblocked by something ! */                  384   /* Unblocked by something ! */
660                                                   385 
661   /* Unblocked by timeout ? */                    386   /* Unblocked by timeout ? */
662   if (sleep_timeout_params.timeout_triggered)     387   if (sleep_timeout_params.timeout_triggered)
663     {                                             388     {
664       /* Yes */                                   389       /* Yes */
665       SOS_ASSERT_FATAL(sos_time_is_zero(& time    390       SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout));
666       retval = SOS_OK;                            391       retval = SOS_OK;
667     }                                             392     }
668   else                                            393   else
669     {                                             394     {
670       /* No: We have probably been woken up wh    395       /* No: We have probably been woken up while in some other
671          kwaitq */                                396          kwaitq */
672       SOS_ASSERT_FATAL(SOS_OK == sos_time_unre    397       SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action));
673       retval = -SOS_EINTR;                        398       retval = -SOS_EINTR;
674     }                                             399     }
675                                                   400 
676   sos_restore_IRQs(flags);                        401   sos_restore_IRQs(flags);
677                                                   402 
678   /* Update the remaining timeout */              403   /* Update the remaining timeout */
679   memcpy(timeout, & timeout_action.timeout, si    404   memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time));
680                                                   405 
681   return retval;                                  406   return retval;
682 }                                                 407 }
683                                                   408 
684                                                   409 
685 sos_ret_t sos_thread_force_unblock(struct sos_    410 sos_ret_t sos_thread_force_unblock(struct sos_thread *thread)
686 {                                                 411 {
687   sos_ret_t retval;                               412   sos_ret_t retval;
688   sos_ui32_t flags;                               413   sos_ui32_t flags;
689                                                   414 
690   if (! thread)                                   415   if (! thread)
691     return -SOS_EINVAL;                           416     return -SOS_EINVAL;
692                                                   417   
693   sos_disable_IRQs(flags);                        418   sos_disable_IRQs(flags);
694                                                   419 
695   /* Thread already woken up ? */                 420   /* Thread already woken up ? */
696   retval = SOS_OK;                                421   retval = SOS_OK;
697   switch(sos_thread_get_state(thread))            422   switch(sos_thread_get_state(thread))
698     {                                             423     {
699     case SOS_THR_RUNNING:                         424     case SOS_THR_RUNNING:
700     case SOS_THR_READY:                           425     case SOS_THR_READY:
701       /* Do nothing */                            426       /* Do nothing */
702       break;                                      427       break;
703                                                   428 
704     case SOS_THR_ZOMBIE:                          429     case SOS_THR_ZOMBIE:
705       retval = -SOS_EFATAL;                       430       retval = -SOS_EFATAL;
706       break;                                      431       break;
707                                                   432 
708     default:                                      433     default:
709       retval = sos_sched_set_ready(thread);       434       retval = sos_sched_set_ready(thread);
710       break;                                      435       break;
711     }                                             436     }
712                                                   437 
713   sos_restore_IRQs(flags);                        438   sos_restore_IRQs(flags);
714                                                   439 
715   return retval;                                  440   return retval;
716 }                                              << 
717                                                << 
718                                                << 
719 void sos_thread_dump_backtrace(sos_bool_t on_c << 
720                                sos_bool_t on_b << 
721 {                                              << 
722   sos_vaddr_t stack_bottom = current_thread->k << 
723   sos_size_t stack_size    = current_thread->k << 
724                                                << 
725   void backtracer(sos_vaddr_t PC,              << 
726                   sos_vaddr_t params,          << 
727                   sos_ui32_t depth,            << 
728                   void *custom_arg)            << 
729     {                                          << 
730       sos_ui32_t invalid = 0xffffffff, *arg1,  << 
731                                                << 
732       /* Get the address of the first 3 argume << 
733          frame. Among these arguments, 0, 1, 2 << 
734          meaningful (depending on how many arg << 
735          take). */                             << 
736       arg1 = (sos_ui32_t*)params;              << 
737       arg2 = (sos_ui32_t*)(params+4);          << 
738       arg3 = (sos_ui32_t*)(params+8);          << 
739       arg4 = (sos_ui32_t*)(params+12);         << 
740                                                << 
741       /* Make sure the addresses of these argu << 
742          stack boundaries */                   << 
743 #define INTERVAL_OK(b,v,u) ( ((b) <= (sos_vadd << 
744                              && ((sos_vaddr_t) << 
745       if (!INTERVAL_OK(stack_bottom, arg1, sta << 
746         arg1 = &invalid;                       << 
747       if (!INTERVAL_OK(stack_bottom, arg2, sta << 
748         arg2 = &invalid;                       << 
749       if (!INTERVAL_OK(stack_bottom, arg3, sta << 
750         arg3 = &invalid;                       << 
751       if (!INTERVAL_OK(stack_bottom, arg4, sta << 
752         arg4 = &invalid;                       << 
753                                                << 
754       /* Print the function context for this f << 
755       if (on_bochs)                            << 
756         sos_bochs_printf("[%d] PC=0x%x arg1=0x << 
757                          (unsigned)depth, (uns << 
758                          (unsigned)*arg1, (uns << 
759                          (unsigned)*arg3);     << 
760                                                << 
761       if (on_console)                          << 
762         sos_x86_videomem_printf(23-depth, 3,   << 
763                                 SOS_X86_VIDEO_ << 
764                                   | SOS_X86_VI << 
765                                 "[%d] PC=0x%x  << 
766                                 (unsigned)dept << 
767                                 (unsigned)*arg << 
768                                 (unsigned)*arg << 
769                                                << 
770     }                                          << 
771                                                << 
772   sos_backtrace(NULL, 15, stack_bottom, stack_ << 
773                 backtracer, NULL);             << 
774 }                                              << 
775                                                << 
776                                                << 
777                                                << 
778 /* ******************************************* << 
779  * Restricted functions                        << 
780  */                                            << 
781                                                << 
782                                                << 
783 static sos_ret_t                               << 
784 change_current_mm_context(struct sos_mm_contex << 
785 {                                              << 
786   /* Retrieve the previous mm context */       << 
787   struct sos_mm_context * prev_mm_ctxt         << 
788     = current_thread->squatted_mm_context;     << 
789                                                << 
790   /* Update current thread's squatted mm conte << 
791   current_thread->squatted_mm_context = mm_ctx << 
792                                                << 
793   /* Update the reference counts and switch th << 
794      needed */                                 << 
795   if (mm_ctxt != NULL)                         << 
796     {                                          << 
797       sos_mm_context_ref(mm_ctxt); /* Because  << 
798                                       the squa << 
799                                       the thre << 
800       sos_mm_context_switch_to(mm_ctxt);       << 
801     }                                          << 
802   else                                         << 
803     sos_mm_context_unref(prev_mm_ctxt); /* Bec << 
804                                            the << 
805                                            the << 
806                                                << 
807   return SOS_OK;                               << 
808 }                                              << 
809                                                << 
810                                                << 
811 sos_ret_t                                      << 
812 sos_thread_prepare_user_space_access(struct so << 
813                                      sos_vaddr << 
814 {                                              << 
815   sos_ret_t  retval;                           << 
816   sos_ui32_t flags;                            << 
817                                                << 
818   if (! dest_as)                               << 
819     {                                          << 
820       /* Thread is not a user thread: do nothi << 
821       if (! current_thread->process)           << 
822         return -SOS_EINVAL;                    << 
823                                                << 
824       dest_as = sos_process_get_address_space( << 
825     }                                          << 
826   else                                         << 
827     /* Don't allow to access to an address spa << 
828        of the current thread if the page fault << 
829     SOS_ASSERT_FATAL(! fixup_retvaddr);        << 
830                                                << 
831   sos_disable_IRQs(flags);                     << 
832   SOS_ASSERT_FATAL(NULL == current_thread->squ << 
833   SOS_ASSERT_FATAL(0 == current_thread->fixup_ << 
834                                                << 
835   /* Change the MMU configuration and init the << 
836   retval = change_current_mm_context(sos_umem_ << 
837   if (SOS_OK == retval)                        << 
838     {                                          << 
839       current_thread->fixup_uaccess.return_vad << 
840       current_thread->fixup_uaccess.faulted_ua << 
841     }                                          << 
842                                                << 
843   sos_restore_IRQs(flags);                     << 
844   return retval;                               << 
845 }                                              << 
846                                                << 
847                                                << 
848 sos_ret_t                                      << 
849 sos_thread_end_user_space_access(void)         << 
850 {                                              << 
851   sos_ret_t  retval;                           << 
852   sos_ui32_t flags;                            << 
853                                                << 
854   sos_disable_IRQs(flags);                     << 
855   SOS_ASSERT_FATAL(NULL != current_thread->squ << 
856                                                << 
857   /* Don't impose anything regarding the curre << 
858   retval = change_current_mm_context(NULL);    << 
859   current_thread->fixup_uaccess.return_vaddr   << 
860   current_thread->fixup_uaccess.faulted_uaddr  << 
861                                                << 
862   sos_restore_IRQs(flags);                     << 
863   return retval;                               << 
864 }                                              << 
865                                                << 
866                                                << 
867 void sos_thread_prepare_syscall_switch_back(st << 
868 {                                              << 
869   /* Don't preempt the current thread */       << 
870                                                << 
871   /*                                           << 
872    * Save the state of the interrupted context << 
873    *   - The list of threads correctly reflect << 
874    *     in user mode                          << 
875    *   - _prepare_mm_context() deals with the  << 
876    */                                          << 
877   current_thread->cpu_state = cpu_state;       << 
878                                                << 
879   /* Perform an MMU context switch if needed * << 
880   _prepare_mm_context((struct sos_thread*) cur << 
881 }                                              << 
882                                                << 
883                                                << 
884 void sos_thread_prepare_exception_switch_back( << 
885 {                                              << 
886   /* Don't preempt the current thread */       << 
887                                                << 
888   /*                                           << 
889    * Save the state of the interrupted context << 
890    *   - The list of threads correctly reflect << 
891    *     running in user or kernel mode        << 
892    *   - _prepare_mm_context() deals with the  << 
893    */                                          << 
894   current_thread->cpu_state = cpu_state;       << 
895                                                << 
896   /* Perform an MMU context switch if needed * << 
897   _prepare_mm_context((struct sos_thread*) cur << 
898 }                                              << 
899                                                << 
900                                                << 
901 void                                           << 
902 sos_thread_prepare_irq_servicing(struct sos_cp << 
903 {                                              << 
904   current_thread->cpu_state = interrupted_stat << 
905 }                                              << 
906                                                << 
907                                                << 
908 struct sos_cpu_state *                         << 
909 sos_thread_prepare_irq_switch_back(void)       << 
910 {                                              << 
911   struct sos_thread *myself, *next_thread;     << 
912                                                << 
913   /* In SOS, threads in kernel mode are NEVER  << 
914      interrupt handlers ! */                   << 
915   if (! sos_cpu_context_is_in_user_mode(curren << 
916     return current_thread->cpu_state;          << 
917                                                << 
918   /*                                           << 
919    * Here we are dealing only with possible pr << 
920    * in user context !                         << 
921    */                                          << 
922                                                << 
923   /* Make sure the thread actually is a user t << 
924   SOS_ASSERT_FATAL(current_thread->process !=  << 
925                                                << 
926   /* Save the state of the interrupted context << 
927   myself = (struct sos_thread*)current_thread; << 
928                                                << 
929   /* Select the next thread to run */          << 
930   next_thread = sos_reschedule(myself, FALSE); << 
931                                                << 
932   /* Perform an MMU context switch if needed * << 
933   _prepare_mm_context(next_thread);            << 
934                                                << 
935   /* Setup the next_thread's context into the  << 
936   _set_current(next_thread);                   << 
937   return next_thread->cpu_state;               << 
938 }                                                 441 }
                                                      

source navigation ] diff markup ] identifier search ] general search ]