SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

Diff markup

Differences between /sos/thread.h (Article 7.5) and /sos/thread.h (Article 9.5)


001 /* Copyright (C) 2004,2005 David Decotigny        001 /* Copyright (C) 2004,2005 David Decotigny
002                                                   002 
003    This program is free software; you can redi    003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU Genera    004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundatio    005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any lat    006    of the License, or (at your option) any later version.
007                                                   007    
008    This program is distributed in the hope tha    008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the     009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR    010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details    011    GNU General Public License for more details.
012                                                   012    
013    You should have received a copy of the GNU     013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to t    014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 3    015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA.                                           016    USA. 
017 */                                                017 */
018 #ifndef _SOS_THREAD_H_                            018 #ifndef _SOS_THREAD_H_
019 #define _SOS_THREAD_H_                            019 #define _SOS_THREAD_H_
020                                                   020 
021 /**                                               021 /**
022  * @file thread.h                                 022  * @file thread.h
023  *                                                023  *
024  * SOS Thread management API                      024  * SOS Thread management API
025  */                                               025  */
026                                                   026 
027 #include <sos/errno.h>                            027 #include <sos/errno.h>
028                                                   028 
029 /* Forward declaration */                         029 /* Forward declaration */
030 struct sos_thread;                                030 struct sos_thread;
031                                                   031 
032 #include <hwcore/cpu_context.h>                   032 #include <hwcore/cpu_context.h>
033 #include <sos/sched.h>                            033 #include <sos/sched.h>
034 #include <sos/kwaitq.h>                           034 #include <sos/kwaitq.h>
035 #include <sos/time.h>                             035 #include <sos/time.h>
036 #include <sos/process.h>                          036 #include <sos/process.h>
037 #include <sos/umem_vmm.h>                         037 #include <sos/umem_vmm.h>
038                                                   038 
039 /**                                               039 /**
040  * The possible states of a valid thread          040  * The possible states of a valid thread
041  */                                               041  */
042 typedef enum { SOS_THR_CREATED, /**< Thread cr    042 typedef enum { SOS_THR_CREATED, /**< Thread created, not fully initialized */
043                SOS_THR_READY,   /**< Thread fu    043                SOS_THR_READY,   /**< Thread fully initialized or
044                                      waiting f    044                                      waiting for CPU after having been
045                                      blocked o    045                                      blocked or preempted */
046                SOS_THR_RUNNING, /**< Thread cu    046                SOS_THR_RUNNING, /**< Thread currently running on CPU */
047                SOS_THR_BLOCKED, /**< Thread wa    047                SOS_THR_BLOCKED, /**< Thread waiting for I/O (+ in at LEAST
048                                      one kwait    048                                      one kwaitq) and/or sleeping (+ in NO
049                                      kwaitq) *    049                                      kwaitq) */
050                SOS_THR_ZOMBIE,  /**< Thread te    050                SOS_THR_ZOMBIE,  /**< Thread terminated execution, waiting to
051                                      be delete    051                                      be deleted by kernel */
052              } sos_thread_state_t;                052              } sos_thread_state_t;
053                                                   053 
054                                                   054 
055 /**                                               055 /**
056  * TCB (Thread Control Block): structure descr    056  * TCB (Thread Control Block): structure describing a thread. Don't
057  * access these fields directly: prefer using     057  * access these fields directly: prefer using the accessor functions
058  * below.                                         058  * below.
059  */                                               059  */
060 struct sos_thread                                 060 struct sos_thread
061 {                                                 061 {
062 #define SOS_THR_MAX_NAMELEN 32                    062 #define SOS_THR_MAX_NAMELEN 32
063   char name[SOS_THR_MAX_NAMELEN];                 063   char name[SOS_THR_MAX_NAMELEN];
064                                                   064 
065   sos_thread_state_t  state;                      065   sos_thread_state_t  state;
066   sos_sched_priority_t priority;                  066   sos_sched_priority_t priority;
067                                                   067 
068   /**                                             068   /**
069    * The hardware context of the thread.          069    * The hardware context of the thread.
070    *                                              070    *
071    * It will reflect the CPU state of the thre    071    * It will reflect the CPU state of the thread:
072    *  - From an interrupt handler: the state o    072    *  - From an interrupt handler: the state of the thread at the time
073    *    of the OUTERMOST irq. An IRQ is not al    073    *    of the OUTERMOST irq. An IRQ is not allowed to make context
074    *    switches, so this context will remain     074    *    switches, so this context will remain valid from the begining of
075    *    the outermost IRQ handler to the end o    075    *    the outermost IRQ handler to the end of it, no matter if there
076    *    are other IRQ handlers nesting in one     076    *    are other IRQ handlers nesting in one another. You may safely
077    *    use it from IRQ handlers to query the     077    *    use it from IRQ handlers to query the state of the interrupted
078    *    thread, no matter if there has been ot    078    *    thread, no matter if there has been other IRQ handlers
079    *    executing meanwhile.                      079    *    executing meanwhile.
080    *  - From normal kernel code, exceptions an    080    *  - From normal kernel code, exceptions and syscall: the state of
081    *    the thread the last time there was a c    081    *    the thread the last time there was a context switch from this
082    *    thread to another one. Thus this field    082    *    thread to another one. Thus this field WON'T reflect the
083    *    current's thread cpu_state in these ca    083    *    current's thread cpu_state in these cases. So, in these cases,
084    *    simply DO NOT USE IT outside thread.c     084    *    simply DO NOT USE IT outside thread.c ! Note: for syscall and
085    *    exception handlers, the VALID state of    085    *    exception handlers, the VALID state of the interrupted thread is
086    *    passed as an argument to the handlers.    086    *    passed as an argument to the handlers.
087    */                                             087    */
088   struct sos_cpu_state *cpu_state;                088   struct sos_cpu_state *cpu_state;
089                                                   089 
090   /* Kernel stack parameters */                   090   /* Kernel stack parameters */
091   sos_vaddr_t kernel_stack_base_addr;             091   sos_vaddr_t kernel_stack_base_addr;
092   sos_size_t  kernel_stack_size;                  092   sos_size_t  kernel_stack_size;
093                                                   093 
094   /* Process this thread belongs to. Always NU    094   /* Process this thread belongs to. Always NULL for a kernel
095      thread */                                    095      thread */
096   struct sos_process *process;                    096   struct sos_process *process;
097                                                   097 
098   /**                                             098   /**
099    * Address space currently "squatted" by the    099    * Address space currently "squatted" by the thread, or used to be
100    * active when the thread was interrupted/pr    100    * active when the thread was interrupted/preempted. This is the MMU
101    * configuration expected before the cpu_sta    101    * configuration expected before the cpu_state of the thread is
102    * restored on CPU.                             102    * restored on CPU.
103    *   - For kernel threads: should normally b    103    *   - For kernel threads: should normally be NULL, meaning that the
104    *     thread will squat the current mm_cont    104    *     thread will squat the current mm_context currently set in the
105    *     MMU. Might be NON NULL when a kernel     105    *     MMU. Might be NON NULL when a kernel thread squats a given
106    *     process to manipulate its address spa    106    *     process to manipulate its address space.
107    *   - For user threads: should normally be     107    *   - For user threads: should normally be NULL. More precisely:
108    *       - in user mode: the thread->process    108    *       - in user mode: the thread->process.mm_context is ALWAYS
109    *         set on MMU. squatted_mm_context i    109    *         set on MMU. squatted_mm_context is ALWAYS NULL in this
110    *         situation, meaning that the threa    110    *         situation, meaning that the thread in user mode uses its
111    *         process-space as expected            111    *         process-space as expected
112    *       - in kernel mode: NULL means that w    112    *       - in kernel mode: NULL means that we keep on using the
113    *         mm_context currently set on MMU,     113    *         mm_context currently set on MMU, which might be the
114    *         mm_context of another process. Th    114    *         mm_context of another process. This is natural since a
115    *         thread in kernel mode normally on    115    *         thread in kernel mode normally only uses data in kernel
116    *         space. BTW, this limits the numbe    116    *         space. BTW, this limits the number of TLB flushes. However,
117    *         there are exceptions where this s    117    *         there are exceptions where this squatted_mm_context will
118    *         NOT be NULL. One is the copy_from    118    *         NOT be NULL. One is the copy_from/to_user API, which can
119    *         force the effective mm_context so    119    *         force the effective mm_context so that the MMU will be
120    *         (re)configured upon every context    120    *         (re)configured upon every context to the thread to match
121    *         the squatted_mm_context. Another     121    *         the squatted_mm_context. Another exception is when a parent
122    *         thread creates the address space     122    *         thread creates the address space of a child process, in
123    *         which case the parent thread migh    123    *         which case the parent thread might temporarilly decide to
124    *         switch to the child's process spa    124    *         switch to the child's process space.
125    *                                              125    *
126    * This is the SOS implementation of the Lin    126    * This is the SOS implementation of the Linux "Lazy TLB" and
127    * address-space loaning.                       127    * address-space loaning.
128    */                                             128    */
129   struct sos_mm_context *squatted_mm_context;     129   struct sos_mm_context *squatted_mm_context;
130                                                   130 
131   /* Data specific to each state */               131   /* Data specific to each state */
132   union                                           132   union
133   {                                               133   {
134     struct                                        134     struct
135     {                                             135     {
136       struct sos_sched_queue *rdy_queue;          136       struct sos_sched_queue *rdy_queue;
137       struct sos_thread     *rdy_prev, *rdy_ne    137       struct sos_thread     *rdy_prev, *rdy_next;
138     } ready;                                      138     } ready;
139                                                << 
140     struct                                     << 
141     {                                          << 
142       struct sos_time user_time_spent_in_slice << 
143     } running;                                 << 
144   }; /* Anonymous union (gcc extenion) */         139   }; /* Anonymous union (gcc extenion) */
145                                                   140 
                                                   >> 141   struct sos_time user_time_spent_in_slice;
                                                   >> 142 
146                                                   143 
147   /**                                             144   /**
148    * When a thread in kernel mode is accessing    145    * When a thread in kernel mode is accessing the user space, it may
149    * page fault in the usual way only if retur    146    * page fault in the usual way only if return_vaddr below is
150    * set. This structure holds information reg    147    * set. This structure holds information regarding what to do when a
151    * page fault from kernel into user space co    148    * page fault from kernel into user space could not be resolved.
152    *                                              149    *
153    * @note the fields below should be consider    150    * @note the fields below should be considered read-only. @see
154    * sos_thread_prepare_user_space_access() an    151    * sos_thread_prepare_user_space_access() and @see
155    * sos_thread_end_user_space_access() to mod    152    * sos_thread_end_user_space_access() to modify them.
156    */                                             153    */
157   struct                                          154   struct
158   {                                               155   {
159     /** This is the address (in kernel code) t    156     /** This is the address (in kernel code) to return to when a
160         user-space page fault from a kernel-mo    157         user-space page fault from a kernel-mode thread could not be
161         resolved.  @see sos_thread_prepare_use    158         resolved.  @see sos_thread_prepare_user_space_access() */
162     sos_vaddr_t return_vaddr;                     159     sos_vaddr_t return_vaddr;
163                                                   160 
164     /** This is the address of the user-space     161     /** This is the address of the user-space address that caused the
165         unresolved page fault (set by the page    162         unresolved page fault (set by the page fault handler) */
166     sos_uaddr_t faulted_uaddr;                    163     sos_uaddr_t faulted_uaddr;
167   } fixup_uaccess;                                164   } fixup_uaccess;
168                                                   165 
169                                                   166 
170   /*                                              167   /*
171    * Data used by the kwaitq subsystem: list o    168    * Data used by the kwaitq subsystem: list of kwaitqueues the thread
172    * is waiting for.                              169    * is waiting for.
173    *                                              170    *
174    * @note: a RUNNING or READY thread might be    171    * @note: a RUNNING or READY thread might be in one or more
175    * waitqueues ! The only property we have is    172    * waitqueues ! The only property we have is that, among these
176    * waitqueues (if any), _at least_ one has w    173    * waitqueues (if any), _at least_ one has woken the thread.
177    */                                             174    */
178   struct sos_kwaitq_entry *kwaitq_list;           175   struct sos_kwaitq_entry *kwaitq_list;
179                                                   176 
180                                                   177 
181   /**                                             178   /**
182    * Some statistics                              179    * Some statistics
183    */                                             180    */
184   struct rusage                                   181   struct rusage
185   {                                               182   {
186     /* Updated by sched.c */                      183     /* Updated by sched.c */
187     struct sos_time ru_utime; /* Time spent in    184     struct sos_time ru_utime; /* Time spent in user mode */
188     struct sos_time ru_stime; /* Time spent in    185     struct sos_time ru_stime; /* Time spent in kernel mode */
189   } rusage;                                       186   } rusage;
190                                                   187 
191                                                   188 
192   /**                                             189   /**
193    * Chaining pointers for the list of threads    190    * Chaining pointers for the list of threads in the parent process
194    */                                             191    */
195   struct sos_thread *prev_in_process, *next_in    192   struct sos_thread *prev_in_process, *next_in_process;
196                                                   193 
197                                                   194 
198   /**                                             195   /**
199    * Chaining pointers for global ("gbl") list    196    * Chaining pointers for global ("gbl") list of threads (debug)
200    */                                             197    */
201   struct sos_thread *gbl_prev, *gbl_next;         198   struct sos_thread *gbl_prev, *gbl_next;
202 };                                                199 };
203                                                   200 
204                                                   201 
205 /**                                               202 /**
206  * Definition of the function executed by a ke    203  * Definition of the function executed by a kernel thread
207  */                                               204  */
208 typedef void (*sos_kernel_thread_start_routine    205 typedef void (*sos_kernel_thread_start_routine_t)(void *arg);
209                                                   206 
210                                                   207 
211 /**                                               208 /**
212  * Initialize the subsystem responsible for th    209  * Initialize the subsystem responsible for thread management
213  *                                                210  *
214  * Initialize the primary kernel thread so tha    211  * Initialize the primary kernel thread so that it can be handled the
215  * same way as an ordinary thread created by s    212  * same way as an ordinary thread created by sos_thread_create().
216  */                                               213  */
217 sos_ret_t sos_thread_subsystem_setup(sos_vaddr    214 sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
218                                      sos_size_    215                                      sos_size_t init_thread_stack_size);
219                                                   216 
220                                                   217 
221 /**                                               218 /**
222  * Create a new kernel thread                     219  * Create a new kernel thread
223  */                                               220  */
224 struct sos_thread *                               221 struct sos_thread *
225 sos_create_kernel_thread(const char *name,        222 sos_create_kernel_thread(const char *name,
226                          sos_kernel_thread_sta    223                          sos_kernel_thread_start_routine_t start_func,
227                          void *start_arg,         224                          void *start_arg,
228                          sos_sched_priority_t     225                          sos_sched_priority_t priority);
229                                                   226 
230                                                   227 
231 /**                                               228 /**
232  * Create a new user thread                       229  * Create a new user thread
233  */                                               230  */
234 struct sos_thread *                               231 struct sos_thread *
235 sos_create_user_thread(const char *name,          232 sos_create_user_thread(const char *name,
236                        struct sos_process *pro    233                        struct sos_process *process,
237                        sos_uaddr_t user_initia    234                        sos_uaddr_t user_initial_PC,
238                        sos_ui32_t  user_start_    235                        sos_ui32_t  user_start_arg1,
239                        sos_ui32_t  user_start_    236                        sos_ui32_t  user_start_arg2,
240                        sos_uaddr_t user_initia    237                        sos_uaddr_t user_initial_SP,
241                        sos_sched_priority_t pr    238                        sos_sched_priority_t priority);
242                                                   239 
243                                                   240 
244 /**                                               241 /**
245  * Create a new user thread, copy of the given    242  * Create a new user thread, copy of the given user thread with the
246  * given user context                             243  * given user context
247  */                                               244  */
248 struct sos_thread *                               245 struct sos_thread *
249 sos_duplicate_user_thread(const char *name,       246 sos_duplicate_user_thread(const char *name,
250                           struct sos_process *    247                           struct sos_process *process,
251                           const struct sos_thr    248                           const struct sos_thread * model_thread,
252                           const struct sos_cpu    249                           const struct sos_cpu_state * model_uctxt,
253                           sos_ui32_t retval);     250                           sos_ui32_t retval);
254                                                   251 
255                                                   252 
256 /**                                               253 /**
257  * Terminate the execution of the current thre    254  * Terminate the execution of the current thread. For kernel threads,
258  * it is called by default when the start rout    255  * it is called by default when the start routine returns.
259  */                                               256  */
260 void sos_thread_exit() __attribute__((noreturn !! 257 void sos_thread_exit(void) __attribute__((noreturn));
261                                                   258 
262                                                   259 
263 /**                                               260 /**
264  * Get the identifier of the thread currently     261  * Get the identifier of the thread currently running on CPU. Trivial
265  * function.                                      262  * function.
266  */                                               263  */
267 struct sos_thread *sos_thread_get_current();   !! 264 struct sos_thread *sos_thread_get_current(void);
268                                                   265 
269                                                   266 
270 /**                                               267 /**
271  * If thr == NULL, set the priority of the cur    268  * If thr == NULL, set the priority of the current thread. Trivial
272  * function.                                      269  * function.
273  *                                                270  *
274  * @note NOT protected against interrupts         271  * @note NOT protected against interrupts
275  */                                               272  */
276 sos_sched_priority_t sos_thread_get_priority(s    273 sos_sched_priority_t sos_thread_get_priority(struct sos_thread *thr);
277                                                   274 
278                                                   275 
279 /**                                               276 /**
280  * If thr == NULL, get the state of the curren    277  * If thr == NULL, get the state of the current thread. Trivial
281  * function.                                      278  * function.
282  *                                                279  *
283  * @note NOT protected against interrupts         280  * @note NOT protected against interrupts
284  */                                               281  */
285 sos_thread_state_t sos_thread_get_state(struct    282 sos_thread_state_t sos_thread_get_state(struct sos_thread *thr);
286                                                   283 
287                                                   284 
288 /**                                               285 /**
289  * If thr == NULL, set the priority of the cur    286  * If thr == NULL, set the priority of the current thread
290  *                                                287  *
291  * @note NO context-switch ever occurs in this    288  * @note NO context-switch ever occurs in this function !
292  */                                               289  */
293 sos_ret_t sos_thread_set_priority(struct sos_t    290 sos_ret_t sos_thread_set_priority(struct sos_thread *thr,
294                                   sos_sched_pr    291                                   sos_sched_priority_t priority);
295                                                   292 
296                                                   293 
297 /**                                               294 /**
298  * Yield CPU to another ready thread.             295  * Yield CPU to another ready thread.
299  *                                                296  *
300  * @note This is a BLOCKING FUNCTION              297  * @note This is a BLOCKING FUNCTION
301  */                                               298  */
302 sos_ret_t sos_thread_yield();                  !! 299 sos_ret_t sos_thread_yield(void);
303                                                   300 
304                                                   301 
305 /**                                               302 /**
306  * Release the CPU for (at least) the given de    303  * Release the CPU for (at least) the given delay.
307  *                                                304  *
308  * @param delay The delay to wait for. If dela    305  * @param delay The delay to wait for. If delay == NULL then wait
309  * forever that any event occurs.                 306  * forever that any event occurs.
310  *                                                307  *
311  * @return SOS_OK when delay expired (and dela    308  * @return SOS_OK when delay expired (and delay is reset to zero),
312  * -SOS_EINTR otherwise (and delay contains th    309  * -SOS_EINTR otherwise (and delay contains the amount of time
313  * remaining).                                    310  * remaining).
314  *                                                311  *
315  * @note This is a BLOCKING FUNCTION              312  * @note This is a BLOCKING FUNCTION
316  */                                               313  */
317 sos_ret_t sos_thread_sleep(/* in/out */struct     314 sos_ret_t sos_thread_sleep(/* in/out */struct sos_time *delay);
318                                                   315 
319                                                   316 
320 /**                                               317 /**
321  * Mark the given thread as READY (if not alre    318  * Mark the given thread as READY (if not already ready) even if it is
322  * blocked in a kwaitq or in a sleep ! As a re    319  * blocked in a kwaitq or in a sleep ! As a result, the interrupted
323  * kwaitq/sleep function call of the thread wi    320  * kwaitq/sleep function call of the thread will return with
324  * -SOS_EINTR.                                    321  * -SOS_EINTR.
325  *                                                322  *
326  * @return -SOS_EINVAL if thread does not exis    323  * @return -SOS_EINVAL if thread does not exist, or -SOS_EFATAL if
327  * marked ZOMBIE.                                 324  * marked ZOMBIE.
328  *                                                325  *
329  * @note As a result, the semaphore/mutex/cond    326  * @note As a result, the semaphore/mutex/conditions/... functions
330  * return values SHOULD ALWAYS be checked ! If    327  * return values SHOULD ALWAYS be checked ! If they are != SOS_OK,
331  * then the caller should consider that the re    328  * then the caller should consider that the resource is not aquired
332  * because somebody woke the thread by some wa    329  * because somebody woke the thread by some way.
333  */                                               330  */
334 sos_ret_t sos_thread_force_unblock(struct sos_    331 sos_ret_t sos_thread_force_unblock(struct sos_thread *thread);
335                                                   332 
336 /**                                               333 /**
337  * Dump the backtrace of the current thread to    334  * Dump the backtrace of the current thread to console and/or bochs
338  */                                               335  */
339 void sos_thread_dump_backtrace(sos_bool_t on_c    336 void sos_thread_dump_backtrace(sos_bool_t on_console,
340                                sos_bool_t on_b    337                                sos_bool_t on_bochs);
341                                                   338 
342                                                   339 
343 /* *******************************************    340 /* **********************************************
344  * Restricted functions                           341  * Restricted functions
345  */                                               342  */
346                                                   343 
347                                                   344 
348 /**                                               345 /**
349  * Restricted function to indicate that we are    346  * Restricted function to indicate that we are to access the given
350  * user address space from inside the kernel.     347  * user address space from inside the kernel.
351  *                                                348  *
352  * @param dest_as The address space we want to    349  * @param dest_as The address space we want to access, or NULL to
353  * access current thread's address space          350  * access current thread's address space
354  *                                                351  *
355  * @param fixup_retvaddr When != 0, then dest_    352  * @param fixup_retvaddr When != 0, then dest_as MUST BE NULL (we
356  * don't allow controlled access from kernel i    353  * don't allow controlled access from kernel into user space from a
357  * foreign thread). In this case, the page fau    354  * foreign thread). In this case, the page fault handler should accept
358  * page faults from the kernel in user space,     355  * page faults from the kernel in user space, and resolve them in the
359  * usual way. The value in retvaddr is where t    356  * usual way. The value in retvaddr is where the page fault handler
360  * has to return to in case the page fault rem    357  * has to return to in case the page fault remains unresolved. The
361  * address of the faulting address is kept in     358  * address of the faulting address is kept in
362  * éthread->fixup_uaccess.faulted_uaddr           359  * éthread->fixup_uaccess.faulted_uaddr
363  *                                                360  *
364  * @note typical values for fixup_retvaddr are    361  * @note typical values for fixup_retvaddr are obtained by "Labels as
365  * values" (see gcc's doc: operator "&&"). See    362  * values" (see gcc's doc: operator "&&"). See uaccess.c for example
366  * code.                                          363  * code.
367  */                                               364  */
368 sos_ret_t                                         365 sos_ret_t
369 sos_thread_prepare_user_space_access(struct so    366 sos_thread_prepare_user_space_access(struct sos_umem_vmm_as * dest_as,
370                                      sos_vaddr    367                                      sos_vaddr_t fixup_retvaddr);
371                                                   368 
372                                                   369 
373 /**                                               370 /**
374  * Restricted function to signal we are not ac    371  * Restricted function to signal we are not accessing any user address
375  * space anymore                                  372  * space anymore
376  */                                               373  */
377 sos_ret_t                                         374 sos_ret_t
378 sos_thread_end_user_space_access(void);           375 sos_thread_end_user_space_access(void);
379                                                   376 
380                                                   377 
381 /**                                               378 /**
382  * Restricted callback called when a syscall g    379  * Restricted callback called when a syscall goes back in user mode,
383  * to reconfigure the MMU to match that of the    380  * to reconfigure the MMU to match that of the current thread's
384  * process MMU context.                           381  * process MMU context.
385  *                                                382  *
386  * @note The use of this function is RESERVED     383  * @note The use of this function is RESERVED to the syscall wrapper
387  */                                               384  */
388 void sos_thread_prepare_syscall_switch_back(st    385 void sos_thread_prepare_syscall_switch_back(struct sos_cpu_state *cpu_state);
389                                                   386 
390                                                   387 
391 /**                                               388 /**
392  * Restricted callback called when an exceptio    389  * Restricted callback called when an exception handler goes back to
393  * the interrupted thread to reconfigure the M    390  * the interrupted thread to reconfigure the MMU to match that of the
394  * current thread's process MMU context.          391  * current thread's process MMU context.
395  *                                                392  *
396  * @note The use of this function is RESERVED     393  * @note The use of this function is RESERVED to the exception wrappers
397  */                                               394  */
398 void sos_thread_prepare_exception_switch_back(    395 void sos_thread_prepare_exception_switch_back(struct sos_cpu_state *cpu_state);
399                                                   396 
400                                                   397 
401 /**                                               398 /**
402  * Restricted callback called when an IRQ is e    399  * Restricted callback called when an IRQ is entered while the CPU was
403  * NOT already servicing any other IRQ (ie the    400  * NOT already servicing any other IRQ (ie the outermost IRQ handler
404  * is entered). This callback simply updates t    401  * is entered). This callback simply updates the "cpu_state" field so
405  * that IRQ handlers always know the state of     402  * that IRQ handlers always know the state of the interrupted thread,
406  * even if they are imbricated in other IRQ ha    403  * even if they are imbricated in other IRQ handlers.
407  *                                                404  *
408  * @note The use of this function is RESERVED     405  * @note The use of this function is RESERVED to the irq wrappers
409  */                                               406  */
410 void                                              407 void
411 sos_thread_prepare_irq_servicing(struct sos_cp    408 sos_thread_prepare_irq_servicing(struct sos_cpu_state *interrupted_state);
412                                                   409 
413                                                   410 
414 /**                                               411 /**
415  * Restricted callback called when the outermo    412  * Restricted callback called when the outermost IRQ handler returns,
416  * to select the thread to return to. This cal    413  * to select the thread to return to. This callbacks implements:
417  *   - preemption of user threads in user mode    414  *   - preemption of user threads in user mode (time sharing / FIFO)
418  *   - non-preemption of user threads in kerne    415  *   - non-preemption of user threads in kernel mode (interrupted thread
419  *     is restored on CPU "as is")                416  *     is restored on CPU "as is")
420  *   - non-preemption of kernel threads (same     417  *   - non-preemption of kernel threads (same remark)
421  * The MMU is reconfigured correctly to match     418  * The MMU is reconfigured correctly to match the address space of the
422  * selected thread.                               419  * selected thread.
423  *                                                420  *
424  * @return The CPU context of the thread to re    421  * @return The CPU context of the thread to return to
425  *                                                422  *
426  * @note The use of this function is RESERVED     423  * @note The use of this function is RESERVED to the irq wrappers
427  */                                               424  */
428 struct sos_cpu_state *                            425 struct sos_cpu_state *
429 sos_thread_prepare_irq_switch_back(void);         426 sos_thread_prepare_irq_switch_back(void);
430                                                   427 
431                                                   428 
432 #endif /* _SOS_THREAD_H_ */                       429 #endif /* _SOS_THREAD_H_ */
                                                      

source navigation ] diff markup ] identifier search ] general search ]