SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

Diff markup

Differences between /sos/thread.h (Article 7.5) and /sos/thread.h (Article 7)


001 /* Copyright (C) 2004,2005 David Decotigny        001 /* Copyright (C) 2004,2005 David Decotigny
002                                                   002 
003    This program is free software; you can redi    003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU Genera    004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundatio    005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any lat    006    of the License, or (at your option) any later version.
007                                                   007    
008    This program is distributed in the hope tha    008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the     009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR    010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details    011    GNU General Public License for more details.
012                                                   012    
013    You should have received a copy of the GNU     013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to t    014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 3    015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA.                                           016    USA. 
017 */                                                017 */
018 #ifndef _SOS_THREAD_H_                            018 #ifndef _SOS_THREAD_H_
019 #define _SOS_THREAD_H_                            019 #define _SOS_THREAD_H_
020                                                   020 
021 /**                                               021 /**
022  * @file thread.h                                 022  * @file thread.h
023  *                                                023  *
024  * SOS Thread management API                      024  * SOS Thread management API
025  */                                               025  */
026                                                   026 
027 #include <sos/errno.h>                            027 #include <sos/errno.h>
028                                                   028 
029 /* Forward declaration */                         029 /* Forward declaration */
030 struct sos_thread;                                030 struct sos_thread;
031                                                   031 
032 #include <hwcore/cpu_context.h>                   032 #include <hwcore/cpu_context.h>
033 #include <sos/sched.h>                            033 #include <sos/sched.h>
034 #include <sos/kwaitq.h>                           034 #include <sos/kwaitq.h>
035 #include <sos/time.h>                             035 #include <sos/time.h>
036 #include <sos/process.h>                          036 #include <sos/process.h>
037 #include <sos/umem_vmm.h>                      << 
038                                                   037 
039 /**                                               038 /**
040  * The possible states of a valid thread          039  * The possible states of a valid thread
041  */                                               040  */
042 typedef enum { SOS_THR_CREATED, /**< Thread cr    041 typedef enum { SOS_THR_CREATED, /**< Thread created, not fully initialized */
043                SOS_THR_READY,   /**< Thread fu    042                SOS_THR_READY,   /**< Thread fully initialized or
044                                      waiting f    043                                      waiting for CPU after having been
045                                      blocked o    044                                      blocked or preempted */
046                SOS_THR_RUNNING, /**< Thread cu    045                SOS_THR_RUNNING, /**< Thread currently running on CPU */
047                SOS_THR_BLOCKED, /**< Thread wa    046                SOS_THR_BLOCKED, /**< Thread waiting for I/O (+ in at LEAST
048                                      one kwait    047                                      one kwaitq) and/or sleeping (+ in NO
049                                      kwaitq) *    048                                      kwaitq) */
050                SOS_THR_ZOMBIE,  /**< Thread te    049                SOS_THR_ZOMBIE,  /**< Thread terminated execution, waiting to
051                                      be delete    050                                      be deleted by kernel */
052              } sos_thread_state_t;                051              } sos_thread_state_t;
053                                                   052 
054                                                   053 
055 /**                                               054 /**
056  * TCB (Thread Control Block): structure descr    055  * TCB (Thread Control Block): structure describing a thread. Don't
057  * access these fields directly: prefer using     056  * access these fields directly: prefer using the accessor functions
058  * below.                                         057  * below.
059  */                                               058  */
060 struct sos_thread                                 059 struct sos_thread
061 {                                                 060 {
062 #define SOS_THR_MAX_NAMELEN 32                    061 #define SOS_THR_MAX_NAMELEN 32
063   char name[SOS_THR_MAX_NAMELEN];                 062   char name[SOS_THR_MAX_NAMELEN];
064                                                   063 
065   sos_thread_state_t  state;                      064   sos_thread_state_t  state;
066   sos_sched_priority_t priority;                  065   sos_sched_priority_t priority;
067                                                   066 
068   /**                                             067   /**
069    * The hardware context of the thread.          068    * The hardware context of the thread.
070    *                                              069    *
071    * It will reflect the CPU state of the thre    070    * It will reflect the CPU state of the thread:
072    *  - From an interrupt handler: the state o    071    *  - From an interrupt handler: the state of the thread at the time
073    *    of the OUTERMOST irq. An IRQ is not al    072    *    of the OUTERMOST irq. An IRQ is not allowed to make context
074    *    switches, so this context will remain     073    *    switches, so this context will remain valid from the begining of
075    *    the outermost IRQ handler to the end o    074    *    the outermost IRQ handler to the end of it, no matter if there
076    *    are other IRQ handlers nesting in one     075    *    are other IRQ handlers nesting in one another. You may safely
077    *    use it from IRQ handlers to query the     076    *    use it from IRQ handlers to query the state of the interrupted
078    *    thread, no matter if there has been ot    077    *    thread, no matter if there has been other IRQ handlers
079    *    executing meanwhile.                      078    *    executing meanwhile.
080    *  - From normal kernel code, exceptions an    079    *  - From normal kernel code, exceptions and syscall: the state of
081    *    the thread the last time there was a c    080    *    the thread the last time there was a context switch from this
082    *    thread to another one. Thus this field    081    *    thread to another one. Thus this field WON'T reflect the
083    *    current's thread cpu_state in these ca    082    *    current's thread cpu_state in these cases. So, in these cases,
084    *    simply DO NOT USE IT outside thread.c     083    *    simply DO NOT USE IT outside thread.c ! Note: for syscall and
085    *    exception handlers, the VALID state of    084    *    exception handlers, the VALID state of the interrupted thread is
086    *    passed as an argument to the handlers.    085    *    passed as an argument to the handlers.
087    */                                             086    */
088   struct sos_cpu_state *cpu_state;                087   struct sos_cpu_state *cpu_state;
089                                                   088 
090   /* Kernel stack parameters */                   089   /* Kernel stack parameters */
091   sos_vaddr_t kernel_stack_base_addr;             090   sos_vaddr_t kernel_stack_base_addr;
092   sos_size_t  kernel_stack_size;                  091   sos_size_t  kernel_stack_size;
093                                                   092 
094   /* Process this thread belongs to. Always NU    093   /* Process this thread belongs to. Always NULL for a kernel
095      thread */                                    094      thread */
096   struct sos_process *process;                    095   struct sos_process *process;
097                                                   096 
098   /**                                             097   /**
099    * Address space currently "squatted" by the    098    * Address space currently "squatted" by the thread, or used to be
100    * active when the thread was interrupted/pr    099    * active when the thread was interrupted/preempted. This is the MMU
101    * configuration expected before the cpu_sta    100    * configuration expected before the cpu_state of the thread is
102    * restored on CPU.                             101    * restored on CPU.
103    *   - For kernel threads: should normally b    102    *   - For kernel threads: should normally be NULL, meaning that the
104    *     thread will squat the current mm_cont    103    *     thread will squat the current mm_context currently set in the
105    *     MMU. Might be NON NULL when a kernel     104    *     MMU. Might be NON NULL when a kernel thread squats a given
106    *     process to manipulate its address spa    105    *     process to manipulate its address space.
107    *   - For user threads: should normally be     106    *   - For user threads: should normally be NULL. More precisely:
108    *       - in user mode: the thread->process    107    *       - in user mode: the thread->process.mm_context is ALWAYS
109    *         set on MMU. squatted_mm_context i    108    *         set on MMU. squatted_mm_context is ALWAYS NULL in this
110    *         situation, meaning that the threa    109    *         situation, meaning that the thread in user mode uses its
111    *         process-space as expected            110    *         process-space as expected
112    *       - in kernel mode: NULL means that w    111    *       - in kernel mode: NULL means that we keep on using the
113    *         mm_context currently set on MMU,     112    *         mm_context currently set on MMU, which might be the
114    *         mm_context of another process. Th    113    *         mm_context of another process. This is natural since a
115    *         thread in kernel mode normally on    114    *         thread in kernel mode normally only uses data in kernel
116    *         space. BTW, this limits the numbe    115    *         space. BTW, this limits the number of TLB flushes. However,
117    *         there are exceptions where this s    116    *         there are exceptions where this squatted_mm_context will
118    *         NOT be NULL. One is the copy_from    117    *         NOT be NULL. One is the copy_from/to_user API, which can
119    *         force the effective mm_context so    118    *         force the effective mm_context so that the MMU will be
120    *         (re)configured upon every context    119    *         (re)configured upon every context to the thread to match
121    *         the squatted_mm_context. Another     120    *         the squatted_mm_context. Another exception is when a parent
122    *         thread creates the address space     121    *         thread creates the address space of a child process, in
123    *         which case the parent thread migh    122    *         which case the parent thread might temporarilly decide to
124    *         switch to the child's process spa    123    *         switch to the child's process space.
125    *                                              124    *
126    * This is the SOS implementation of the Lin    125    * This is the SOS implementation of the Linux "Lazy TLB" and
127    * address-space loaning.                       126    * address-space loaning.
128    */                                             127    */
129   struct sos_mm_context *squatted_mm_context;     128   struct sos_mm_context *squatted_mm_context;
130                                                   129 
131   /* Data specific to each state */               130   /* Data specific to each state */
132   union                                           131   union
133   {                                               132   {
134     struct                                        133     struct
135     {                                             134     {
136       struct sos_sched_queue *rdy_queue;          135       struct sos_sched_queue *rdy_queue;
137       struct sos_thread     *rdy_prev, *rdy_ne    136       struct sos_thread     *rdy_prev, *rdy_next;
138     } ready;                                      137     } ready;
139                                                   138 
140     struct                                        139     struct
141     {                                             140     {
142       struct sos_time user_time_spent_in_slice    141       struct sos_time user_time_spent_in_slice;
143     } running;                                    142     } running;
144   }; /* Anonymous union (gcc extenion) */         143   }; /* Anonymous union (gcc extenion) */
145                                                   144 
146                                                   145 
147   /**                                          << 
148    * When a thread in kernel mode is accessing << 
149    * page fault in the usual way only if retur << 
150    * set. This structure holds information reg << 
151    * page fault from kernel into user space co << 
152    *                                           << 
153    * @note the fields below should be consider << 
154    * sos_thread_prepare_user_space_access() an << 
155    * sos_thread_end_user_space_access() to mod << 
156    */                                          << 
157   struct                                       << 
158   {                                            << 
159     /** This is the address (in kernel code) t << 
160         user-space page fault from a kernel-mo << 
161         resolved.  @see sos_thread_prepare_use << 
162     sos_vaddr_t return_vaddr;                  << 
163                                                << 
164     /** This is the address of the user-space  << 
165         unresolved page fault (set by the page << 
166     sos_uaddr_t faulted_uaddr;                 << 
167   } fixup_uaccess;                             << 
168                                                << 
169                                                << 
170   /*                                              146   /*
171    * Data used by the kwaitq subsystem: list o    147    * Data used by the kwaitq subsystem: list of kwaitqueues the thread
172    * is waiting for.                              148    * is waiting for.
173    *                                              149    *
174    * @note: a RUNNING or READY thread might be    150    * @note: a RUNNING or READY thread might be in one or more
175    * waitqueues ! The only property we have is    151    * waitqueues ! The only property we have is that, among these
176    * waitqueues (if any), _at least_ one has w    152    * waitqueues (if any), _at least_ one has woken the thread.
177    */                                             153    */
178   struct sos_kwaitq_entry *kwaitq_list;           154   struct sos_kwaitq_entry *kwaitq_list;
179                                                   155 
180                                                   156 
181   /**                                             157   /**
182    * Some statistics                              158    * Some statistics
183    */                                             159    */
184   struct rusage                                   160   struct rusage
185   {                                               161   {
186     /* Updated by sched.c */                      162     /* Updated by sched.c */
187     struct sos_time ru_utime; /* Time spent in    163     struct sos_time ru_utime; /* Time spent in user mode */
188     struct sos_time ru_stime; /* Time spent in    164     struct sos_time ru_stime; /* Time spent in kernel mode */
189   } rusage;                                       165   } rusage;
190                                                   166 
191                                                   167 
192   /**                                             168   /**
193    * Chaining pointers for the list of threads    169    * Chaining pointers for the list of threads in the parent process
194    */                                             170    */
195   struct sos_thread *prev_in_process, *next_in    171   struct sos_thread *prev_in_process, *next_in_process;
196                                                   172 
197                                                   173 
198   /**                                             174   /**
199    * Chaining pointers for global ("gbl") list    175    * Chaining pointers for global ("gbl") list of threads (debug)
200    */                                             176    */
201   struct sos_thread *gbl_prev, *gbl_next;         177   struct sos_thread *gbl_prev, *gbl_next;
202 };                                                178 };
203                                                   179 
204                                                   180 
205 /**                                               181 /**
206  * Definition of the function executed by a ke    182  * Definition of the function executed by a kernel thread
207  */                                               183  */
208 typedef void (*sos_kernel_thread_start_routine    184 typedef void (*sos_kernel_thread_start_routine_t)(void *arg);
209                                                   185 
210                                                   186 
211 /**                                               187 /**
212  * Initialize the subsystem responsible for th    188  * Initialize the subsystem responsible for thread management
213  *                                                189  *
214  * Initialize the primary kernel thread so tha    190  * Initialize the primary kernel thread so that it can be handled the
215  * same way as an ordinary thread created by s    191  * same way as an ordinary thread created by sos_thread_create().
216  */                                               192  */
217 sos_ret_t sos_thread_subsystem_setup(sos_vaddr    193 sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
218                                      sos_size_    194                                      sos_size_t init_thread_stack_size);
219                                                   195 
220                                                   196 
221 /**                                               197 /**
222  * Create a new kernel thread                     198  * Create a new kernel thread
223  */                                               199  */
224 struct sos_thread *                               200 struct sos_thread *
225 sos_create_kernel_thread(const char *name,        201 sos_create_kernel_thread(const char *name,
226                          sos_kernel_thread_sta    202                          sos_kernel_thread_start_routine_t start_func,
227                          void *start_arg,         203                          void *start_arg,
228                          sos_sched_priority_t     204                          sos_sched_priority_t priority);
229                                                   205 
230                                                   206 
231 /**                                               207 /**
232  * Create a new user thread                       208  * Create a new user thread
233  */                                               209  */
234 struct sos_thread *                               210 struct sos_thread *
235 sos_create_user_thread(const char *name,          211 sos_create_user_thread(const char *name,
236                        struct sos_process *pro    212                        struct sos_process *process,
237                        sos_uaddr_t user_initia    213                        sos_uaddr_t user_initial_PC,
238                        sos_ui32_t  user_start_    214                        sos_ui32_t  user_start_arg1,
239                        sos_ui32_t  user_start_    215                        sos_ui32_t  user_start_arg2,
240                        sos_uaddr_t user_initia    216                        sos_uaddr_t user_initial_SP,
241                        sos_sched_priority_t pr    217                        sos_sched_priority_t priority);
242                                                   218 
243                                                   219 
244 /**                                               220 /**
245  * Create a new user thread, copy of the given << 
246  * given user context                          << 
247  */                                            << 
248 struct sos_thread *                            << 
249 sos_duplicate_user_thread(const char *name,    << 
250                           struct sos_process * << 
251                           const struct sos_thr << 
252                           const struct sos_cpu << 
253                           sos_ui32_t retval);  << 
254                                                << 
255                                                << 
256 /**                                            << 
257  * Terminate the execution of the current thre    221  * Terminate the execution of the current thread. For kernel threads,
258  * it is called by default when the start rout    222  * it is called by default when the start routine returns.
259  */                                               223  */
260 void sos_thread_exit() __attribute__((noreturn    224 void sos_thread_exit() __attribute__((noreturn));
261                                                   225 
262                                                   226 
263 /**                                               227 /**
264  * Get the identifier of the thread currently     228  * Get the identifier of the thread currently running on CPU. Trivial
265  * function.                                      229  * function.
266  */                                               230  */
267 struct sos_thread *sos_thread_get_current();      231 struct sos_thread *sos_thread_get_current();
268                                                   232 
269                                                   233 
270 /**                                               234 /**
271  * If thr == NULL, set the priority of the cur    235  * If thr == NULL, set the priority of the current thread. Trivial
272  * function.                                      236  * function.
273  *                                                237  *
274  * @note NOT protected against interrupts         238  * @note NOT protected against interrupts
275  */                                               239  */
276 sos_sched_priority_t sos_thread_get_priority(s    240 sos_sched_priority_t sos_thread_get_priority(struct sos_thread *thr);
277                                                   241 
278                                                   242 
279 /**                                               243 /**
280  * If thr == NULL, get the state of the curren    244  * If thr == NULL, get the state of the current thread. Trivial
281  * function.                                      245  * function.
282  *                                                246  *
283  * @note NOT protected against interrupts         247  * @note NOT protected against interrupts
284  */                                               248  */
285 sos_thread_state_t sos_thread_get_state(struct    249 sos_thread_state_t sos_thread_get_state(struct sos_thread *thr);
286                                                   250 
287                                                   251 
288 /**                                               252 /**
289  * If thr == NULL, set the priority of the cur    253  * If thr == NULL, set the priority of the current thread
290  *                                                254  *
291  * @note NO context-switch ever occurs in this    255  * @note NO context-switch ever occurs in this function !
292  */                                               256  */
293 sos_ret_t sos_thread_set_priority(struct sos_t    257 sos_ret_t sos_thread_set_priority(struct sos_thread *thr,
294                                   sos_sched_pr    258                                   sos_sched_priority_t priority);
295                                                   259 
296                                                   260 
297 /**                                               261 /**
298  * Yield CPU to another ready thread.             262  * Yield CPU to another ready thread.
299  *                                                263  *
300  * @note This is a BLOCKING FUNCTION              264  * @note This is a BLOCKING FUNCTION
301  */                                               265  */
302 sos_ret_t sos_thread_yield();                     266 sos_ret_t sos_thread_yield();
303                                                   267 
304                                                   268 
305 /**                                               269 /**
306  * Release the CPU for (at least) the given de    270  * Release the CPU for (at least) the given delay.
307  *                                                271  *
308  * @param delay The delay to wait for. If dela    272  * @param delay The delay to wait for. If delay == NULL then wait
309  * forever that any event occurs.                 273  * forever that any event occurs.
310  *                                                274  *
311  * @return SOS_OK when delay expired (and dela    275  * @return SOS_OK when delay expired (and delay is reset to zero),
312  * -SOS_EINTR otherwise (and delay contains th    276  * -SOS_EINTR otherwise (and delay contains the amount of time
313  * remaining).                                    277  * remaining).
314  *                                                278  *
315  * @note This is a BLOCKING FUNCTION              279  * @note This is a BLOCKING FUNCTION
316  */                                               280  */
317 sos_ret_t sos_thread_sleep(/* in/out */struct     281 sos_ret_t sos_thread_sleep(/* in/out */struct sos_time *delay);
318                                                   282 
319                                                   283 
320 /**                                               284 /**
321  * Mark the given thread as READY (if not alre    285  * Mark the given thread as READY (if not already ready) even if it is
322  * blocked in a kwaitq or in a sleep ! As a re    286  * blocked in a kwaitq or in a sleep ! As a result, the interrupted
323  * kwaitq/sleep function call of the thread wi    287  * kwaitq/sleep function call of the thread will return with
324  * -SOS_EINTR.                                    288  * -SOS_EINTR.
325  *                                                289  *
326  * @return -SOS_EINVAL if thread does not exis    290  * @return -SOS_EINVAL if thread does not exist, or -SOS_EFATAL if
327  * marked ZOMBIE.                                 291  * marked ZOMBIE.
328  *                                                292  *
329  * @note As a result, the semaphore/mutex/cond    293  * @note As a result, the semaphore/mutex/conditions/... functions
330  * return values SHOULD ALWAYS be checked ! If    294  * return values SHOULD ALWAYS be checked ! If they are != SOS_OK,
331  * then the caller should consider that the re    295  * then the caller should consider that the resource is not aquired
332  * because somebody woke the thread by some wa    296  * because somebody woke the thread by some way.
333  */                                               297  */
334 sos_ret_t sos_thread_force_unblock(struct sos_    298 sos_ret_t sos_thread_force_unblock(struct sos_thread *thread);
335                                                   299 
336 /**                                               300 /**
337  * Dump the backtrace of the current thread to    301  * Dump the backtrace of the current thread to console and/or bochs
338  */                                               302  */
339 void sos_thread_dump_backtrace(sos_bool_t on_c    303 void sos_thread_dump_backtrace(sos_bool_t on_console,
340                                sos_bool_t on_b    304                                sos_bool_t on_bochs);
341                                                   305 
342                                                   306 
343 /* *******************************************    307 /* **********************************************
344  * Restricted functions                           308  * Restricted functions
345  */                                               309  */
346                                                   310 
347                                                   311 
348 /**                                               312 /**
349  * Restricted function to indicate that we are !! 313  * Restricted function to change the current mm_context AND the
350  * user address space from inside the kernel.  !! 314  * squatted_mm_context of the current thread in order to access the data
                                                   >> 315  * in this context
                                                   >> 316  *
                                                   >> 317  *   @param mm_ctxt The mm_ctxt to restore. Might be NULL, meaning that:
                                                   >> 318  *    - for a Kernel thread: the current MMU configuration is never
                                                   >> 319  *      modified. The address space to use is limited to the kernel
                                                   >> 320  *      space, user space might change due to preemptions to other
                                                   >> 321  *      processes
                                                   >> 322  *    - for a User thread in kernel mode: same as for kernel threads
                                                   >> 323  *    - when a User thread will go back in user context: the MMU will
                                                   >> 324  *      be reconfigured to match the mm_context of the thread's
                                                   >> 325  *      process
                                                   >> 326  *
                                                   >> 327  * @note A non NULL parameter is allowed only if the
                                                   >> 328  * squatted_mm_context is not already set. A NULL parameter is allowed
                                                   >> 329  * only if the squatted_mm_context was already set.
351  *                                                330  *
352  * @param dest_as The address space we want to !! 331  * @note The use of this function is RESERVED to the syscall handler
353  * access current thread's address space       !! 332  * and the copy_from/to_user functions
354  *                                             << 
355  * @param fixup_retvaddr When != 0, then dest_ << 
356  * don't allow controlled access from kernel i << 
357  * foreign thread). In this case, the page fau << 
358  * page faults from the kernel in user space,  << 
359  * usual way. The value in retvaddr is where t << 
360  * has to return to in case the page fault rem << 
361  * address of the faulting address is kept in  << 
362  * éthread->fixup_uaccess.faulted_uaddr        << 
363  *                                             << 
364  * @note typical values for fixup_retvaddr are << 
365  * values" (see gcc's doc: operator "&&"). See << 
366  * code.                                       << 
367  */                                            << 
368 sos_ret_t                                      << 
369 sos_thread_prepare_user_space_access(struct so << 
370                                      sos_vaddr << 
371                                                << 
372                                                << 
373 /**                                            << 
374  * Restricted function to signal we are not ac << 
375  * space anymore                               << 
376  */                                               333  */
377 sos_ret_t                                         334 sos_ret_t
378 sos_thread_end_user_space_access(void);        !! 335 sos_thread_change_current_mm_context(struct sos_mm_context *mm_ctxt);
379                                                   336 
380                                                   337 
381 /**                                               338 /**
382  * Restricted callback called when a syscall g    339  * Restricted callback called when a syscall goes back in user mode,
383  * to reconfigure the MMU to match that of the    340  * to reconfigure the MMU to match that of the current thread's
384  * process MMU context.                           341  * process MMU context.
385  *                                                342  *
386  * @note The use of this function is RESERVED     343  * @note The use of this function is RESERVED to the syscall wrapper
387  */                                               344  */
388 void sos_thread_prepare_syscall_switch_back(st    345 void sos_thread_prepare_syscall_switch_back(struct sos_cpu_state *cpu_state);
389                                                   346 
390                                                   347 
391 /**                                               348 /**
392  * Restricted callback called when an exceptio    349  * Restricted callback called when an exception handler goes back to
393  * the interrupted thread to reconfigure the M    350  * the interrupted thread to reconfigure the MMU to match that of the
394  * current thread's process MMU context.          351  * current thread's process MMU context.
395  *                                                352  *
396  * @note The use of this function is RESERVED     353  * @note The use of this function is RESERVED to the exception wrappers
397  */                                               354  */
398 void sos_thread_prepare_exception_switch_back(    355 void sos_thread_prepare_exception_switch_back(struct sos_cpu_state *cpu_state);
399                                                   356 
400                                                   357 
401 /**                                               358 /**
402  * Restricted callback called when an IRQ is e    359  * Restricted callback called when an IRQ is entered while the CPU was
403  * NOT already servicing any other IRQ (ie the    360  * NOT already servicing any other IRQ (ie the outermost IRQ handler
404  * is entered). This callback simply updates t    361  * is entered). This callback simply updates the "cpu_state" field so
405  * that IRQ handlers always know the state of     362  * that IRQ handlers always know the state of the interrupted thread,
406  * even if they are imbricated in other IRQ ha    363  * even if they are imbricated in other IRQ handlers.
407  *                                                364  *
408  * @note The use of this function is RESERVED     365  * @note The use of this function is RESERVED to the irq wrappers
409  */                                               366  */
410 void                                              367 void
411 sos_thread_prepare_irq_servicing(struct sos_cp    368 sos_thread_prepare_irq_servicing(struct sos_cpu_state *interrupted_state);
412                                                   369 
413                                                   370 
414 /**                                               371 /**
415  * Restricted callback called when the outermo    372  * Restricted callback called when the outermost IRQ handler returns,
416  * to select the thread to return to. This cal    373  * to select the thread to return to. This callbacks implements:
417  *   - preemption of user threads in user mode    374  *   - preemption of user threads in user mode (time sharing / FIFO)
418  *   - non-preemption of user threads in kerne    375  *   - non-preemption of user threads in kernel mode (interrupted thread
419  *     is restored on CPU "as is")                376  *     is restored on CPU "as is")
420  *   - non-preemption of kernel threads (same     377  *   - non-preemption of kernel threads (same remark)
421  * The MMU is reconfigured correctly to match     378  * The MMU is reconfigured correctly to match the address space of the
422  * selected thread.                               379  * selected thread.
423  *                                                380  *
424  * @return The CPU context of the thread to re    381  * @return The CPU context of the thread to return to
425  *                                                382  *
426  * @note The use of this function is RESERVED     383  * @note The use of this function is RESERVED to the irq wrappers
427  */                                               384  */
428 struct sos_cpu_state *                            385 struct sos_cpu_state *
429 sos_thread_prepare_irq_switch_back(void);         386 sos_thread_prepare_irq_switch_back(void);
430                                                   387 
431                                                   388 
432 #endif /* _SOS_THREAD_H_ */                       389 #endif /* _SOS_THREAD_H_ */
                                                      

source navigation ] diff markup ] identifier search ] general search ]