SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

Diff markup

Differences between /sos/thread.h (Article 9.5) and /sos/thread.h (Article 7.5)


001 /* Copyright (C) 2004,2005 David Decotigny        001 /* Copyright (C) 2004,2005 David Decotigny
002                                                   002 
003    This program is free software; you can redi    003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU Genera    004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundatio    005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any lat    006    of the License, or (at your option) any later version.
007                                                   007    
008    This program is distributed in the hope tha    008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the     009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR    010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details    011    GNU General Public License for more details.
012                                                   012    
013    You should have received a copy of the GNU     013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to t    014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 3    015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA.                                           016    USA. 
017 */                                                017 */
018 #ifndef _SOS_THREAD_H_                            018 #ifndef _SOS_THREAD_H_
019 #define _SOS_THREAD_H_                            019 #define _SOS_THREAD_H_
020                                                   020 
021 /**                                               021 /**
022  * @file thread.h                                 022  * @file thread.h
023  *                                                023  *
024  * SOS Thread management API                      024  * SOS Thread management API
025  */                                               025  */
026                                                   026 
027 #include <sos/errno.h>                            027 #include <sos/errno.h>
028                                                   028 
029 /* Forward declaration */                         029 /* Forward declaration */
030 struct sos_thread;                                030 struct sos_thread;
031                                                   031 
032 #include <hwcore/cpu_context.h>                   032 #include <hwcore/cpu_context.h>
033 #include <sos/sched.h>                            033 #include <sos/sched.h>
034 #include <sos/kwaitq.h>                           034 #include <sos/kwaitq.h>
035 #include <sos/time.h>                             035 #include <sos/time.h>
036 #include <sos/process.h>                          036 #include <sos/process.h>
037 #include <sos/umem_vmm.h>                         037 #include <sos/umem_vmm.h>
038                                                   038 
039 /**                                               039 /**
040  * The possible states of a valid thread          040  * The possible states of a valid thread
041  */                                               041  */
042 typedef enum { SOS_THR_CREATED, /**< Thread cr    042 typedef enum { SOS_THR_CREATED, /**< Thread created, not fully initialized */
043                SOS_THR_READY,   /**< Thread fu    043                SOS_THR_READY,   /**< Thread fully initialized or
044                                      waiting f    044                                      waiting for CPU after having been
045                                      blocked o    045                                      blocked or preempted */
046                SOS_THR_RUNNING, /**< Thread cu    046                SOS_THR_RUNNING, /**< Thread currently running on CPU */
047                SOS_THR_BLOCKED, /**< Thread wa    047                SOS_THR_BLOCKED, /**< Thread waiting for I/O (+ in at LEAST
048                                      one kwait    048                                      one kwaitq) and/or sleeping (+ in NO
049                                      kwaitq) *    049                                      kwaitq) */
050                SOS_THR_ZOMBIE,  /**< Thread te    050                SOS_THR_ZOMBIE,  /**< Thread terminated execution, waiting to
051                                      be delete    051                                      be deleted by kernel */
052              } sos_thread_state_t;                052              } sos_thread_state_t;
053                                                   053 
054                                                   054 
055 /**                                               055 /**
056  * TCB (Thread Control Block): structure descr    056  * TCB (Thread Control Block): structure describing a thread. Don't
057  * access these fields directly: prefer using     057  * access these fields directly: prefer using the accessor functions
058  * below.                                         058  * below.
059  */                                               059  */
060 struct sos_thread                                 060 struct sos_thread
061 {                                                 061 {
062 #define SOS_THR_MAX_NAMELEN 32                    062 #define SOS_THR_MAX_NAMELEN 32
063   char name[SOS_THR_MAX_NAMELEN];                 063   char name[SOS_THR_MAX_NAMELEN];
064                                                   064 
065   sos_thread_state_t  state;                      065   sos_thread_state_t  state;
066   sos_sched_priority_t priority;                  066   sos_sched_priority_t priority;
067                                                   067 
068   /**                                             068   /**
069    * The hardware context of the thread.          069    * The hardware context of the thread.
070    *                                              070    *
071    * It will reflect the CPU state of the thre    071    * It will reflect the CPU state of the thread:
072    *  - From an interrupt handler: the state o    072    *  - From an interrupt handler: the state of the thread at the time
073    *    of the OUTERMOST irq. An IRQ is not al    073    *    of the OUTERMOST irq. An IRQ is not allowed to make context
074    *    switches, so this context will remain     074    *    switches, so this context will remain valid from the begining of
075    *    the outermost IRQ handler to the end o    075    *    the outermost IRQ handler to the end of it, no matter if there
076    *    are other IRQ handlers nesting in one     076    *    are other IRQ handlers nesting in one another. You may safely
077    *    use it from IRQ handlers to query the     077    *    use it from IRQ handlers to query the state of the interrupted
078    *    thread, no matter if there has been ot    078    *    thread, no matter if there has been other IRQ handlers
079    *    executing meanwhile.                      079    *    executing meanwhile.
080    *  - From normal kernel code, exceptions an    080    *  - From normal kernel code, exceptions and syscall: the state of
081    *    the thread the last time there was a c    081    *    the thread the last time there was a context switch from this
082    *    thread to another one. Thus this field    082    *    thread to another one. Thus this field WON'T reflect the
083    *    current's thread cpu_state in these ca    083    *    current's thread cpu_state in these cases. So, in these cases,
084    *    simply DO NOT USE IT outside thread.c     084    *    simply DO NOT USE IT outside thread.c ! Note: for syscall and
085    *    exception handlers, the VALID state of    085    *    exception handlers, the VALID state of the interrupted thread is
086    *    passed as an argument to the handlers.    086    *    passed as an argument to the handlers.
087    */                                             087    */
088   struct sos_cpu_state *cpu_state;                088   struct sos_cpu_state *cpu_state;
089                                                   089 
090   /* Kernel stack parameters */                   090   /* Kernel stack parameters */
091   sos_vaddr_t kernel_stack_base_addr;             091   sos_vaddr_t kernel_stack_base_addr;
092   sos_size_t  kernel_stack_size;                  092   sos_size_t  kernel_stack_size;
093                                                   093 
094   /* Process this thread belongs to. Always NU    094   /* Process this thread belongs to. Always NULL for a kernel
095      thread */                                    095      thread */
096   struct sos_process *process;                    096   struct sos_process *process;
097                                                   097 
098   /**                                             098   /**
099    * Address space currently "squatted" by the    099    * Address space currently "squatted" by the thread, or used to be
100    * active when the thread was interrupted/pr    100    * active when the thread was interrupted/preempted. This is the MMU
101    * configuration expected before the cpu_sta    101    * configuration expected before the cpu_state of the thread is
102    * restored on CPU.                             102    * restored on CPU.
103    *   - For kernel threads: should normally b    103    *   - For kernel threads: should normally be NULL, meaning that the
104    *     thread will squat the current mm_cont    104    *     thread will squat the current mm_context currently set in the
105    *     MMU. Might be NON NULL when a kernel     105    *     MMU. Might be NON NULL when a kernel thread squats a given
106    *     process to manipulate its address spa    106    *     process to manipulate its address space.
107    *   - For user threads: should normally be     107    *   - For user threads: should normally be NULL. More precisely:
108    *       - in user mode: the thread->process    108    *       - in user mode: the thread->process.mm_context is ALWAYS
109    *         set on MMU. squatted_mm_context i    109    *         set on MMU. squatted_mm_context is ALWAYS NULL in this
110    *         situation, meaning that the threa    110    *         situation, meaning that the thread in user mode uses its
111    *         process-space as expected            111    *         process-space as expected
112    *       - in kernel mode: NULL means that w    112    *       - in kernel mode: NULL means that we keep on using the
113    *         mm_context currently set on MMU,     113    *         mm_context currently set on MMU, which might be the
114    *         mm_context of another process. Th    114    *         mm_context of another process. This is natural since a
115    *         thread in kernel mode normally on    115    *         thread in kernel mode normally only uses data in kernel
116    *         space. BTW, this limits the numbe    116    *         space. BTW, this limits the number of TLB flushes. However,
117    *         there are exceptions where this s    117    *         there are exceptions where this squatted_mm_context will
118    *         NOT be NULL. One is the copy_from    118    *         NOT be NULL. One is the copy_from/to_user API, which can
119    *         force the effective mm_context so    119    *         force the effective mm_context so that the MMU will be
120    *         (re)configured upon every context    120    *         (re)configured upon every context to the thread to match
121    *         the squatted_mm_context. Another     121    *         the squatted_mm_context. Another exception is when a parent
122    *         thread creates the address space     122    *         thread creates the address space of a child process, in
123    *         which case the parent thread migh    123    *         which case the parent thread might temporarilly decide to
124    *         switch to the child's process spa    124    *         switch to the child's process space.
125    *                                              125    *
126    * This is the SOS implementation of the Lin    126    * This is the SOS implementation of the Linux "Lazy TLB" and
127    * address-space loaning.                       127    * address-space loaning.
128    */                                             128    */
129   struct sos_mm_context *squatted_mm_context;     129   struct sos_mm_context *squatted_mm_context;
130                                                   130 
131   /* Data specific to each state */               131   /* Data specific to each state */
132   union                                           132   union
133   {                                               133   {
134     struct                                        134     struct
135     {                                             135     {
136       struct sos_sched_queue *rdy_queue;          136       struct sos_sched_queue *rdy_queue;
137       struct sos_thread     *rdy_prev, *rdy_ne    137       struct sos_thread     *rdy_prev, *rdy_next;
138     } ready;                                      138     } ready;
139   }; /* Anonymous union (gcc extenion) */      << 
140                                                   139 
141   struct sos_time user_time_spent_in_slice;    !! 140     struct
                                                   >> 141     {
                                                   >> 142       struct sos_time user_time_spent_in_slice;
                                                   >> 143     } running;
                                                   >> 144   }; /* Anonymous union (gcc extenion) */
142                                                   145 
143                                                   146 
144   /**                                             147   /**
145    * When a thread in kernel mode is accessing    148    * When a thread in kernel mode is accessing the user space, it may
146    * page fault in the usual way only if retur    149    * page fault in the usual way only if return_vaddr below is
147    * set. This structure holds information reg    150    * set. This structure holds information regarding what to do when a
148    * page fault from kernel into user space co    151    * page fault from kernel into user space could not be resolved.
149    *                                              152    *
150    * @note the fields below should be consider    153    * @note the fields below should be considered read-only. @see
151    * sos_thread_prepare_user_space_access() an    154    * sos_thread_prepare_user_space_access() and @see
152    * sos_thread_end_user_space_access() to mod    155    * sos_thread_end_user_space_access() to modify them.
153    */                                             156    */
154   struct                                          157   struct
155   {                                               158   {
156     /** This is the address (in kernel code) t    159     /** This is the address (in kernel code) to return to when a
157         user-space page fault from a kernel-mo    160         user-space page fault from a kernel-mode thread could not be
158         resolved.  @see sos_thread_prepare_use    161         resolved.  @see sos_thread_prepare_user_space_access() */
159     sos_vaddr_t return_vaddr;                     162     sos_vaddr_t return_vaddr;
160                                                   163 
161     /** This is the address of the user-space     164     /** This is the address of the user-space address that caused the
162         unresolved page fault (set by the page    165         unresolved page fault (set by the page fault handler) */
163     sos_uaddr_t faulted_uaddr;                    166     sos_uaddr_t faulted_uaddr;
164   } fixup_uaccess;                                167   } fixup_uaccess;
165                                                   168 
166                                                   169 
167   /*                                              170   /*
168    * Data used by the kwaitq subsystem: list o    171    * Data used by the kwaitq subsystem: list of kwaitqueues the thread
169    * is waiting for.                              172    * is waiting for.
170    *                                              173    *
171    * @note: a RUNNING or READY thread might be    174    * @note: a RUNNING or READY thread might be in one or more
172    * waitqueues ! The only property we have is    175    * waitqueues ! The only property we have is that, among these
173    * waitqueues (if any), _at least_ one has w    176    * waitqueues (if any), _at least_ one has woken the thread.
174    */                                             177    */
175   struct sos_kwaitq_entry *kwaitq_list;           178   struct sos_kwaitq_entry *kwaitq_list;
176                                                   179 
177                                                   180 
178   /**                                             181   /**
179    * Some statistics                              182    * Some statistics
180    */                                             183    */
181   struct rusage                                   184   struct rusage
182   {                                               185   {
183     /* Updated by sched.c */                      186     /* Updated by sched.c */
184     struct sos_time ru_utime; /* Time spent in    187     struct sos_time ru_utime; /* Time spent in user mode */
185     struct sos_time ru_stime; /* Time spent in    188     struct sos_time ru_stime; /* Time spent in kernel mode */
186   } rusage;                                       189   } rusage;
187                                                   190 
188                                                   191 
189   /**                                             192   /**
190    * Chaining pointers for the list of threads    193    * Chaining pointers for the list of threads in the parent process
191    */                                             194    */
192   struct sos_thread *prev_in_process, *next_in    195   struct sos_thread *prev_in_process, *next_in_process;
193                                                   196 
194                                                   197 
195   /**                                             198   /**
196    * Chaining pointers for global ("gbl") list    199    * Chaining pointers for global ("gbl") list of threads (debug)
197    */                                             200    */
198   struct sos_thread *gbl_prev, *gbl_next;         201   struct sos_thread *gbl_prev, *gbl_next;
199 };                                                202 };
200                                                   203 
201                                                   204 
202 /**                                               205 /**
203  * Definition of the function executed by a ke    206  * Definition of the function executed by a kernel thread
204  */                                               207  */
205 typedef void (*sos_kernel_thread_start_routine    208 typedef void (*sos_kernel_thread_start_routine_t)(void *arg);
206                                                   209 
207                                                   210 
208 /**                                               211 /**
209  * Initialize the subsystem responsible for th    212  * Initialize the subsystem responsible for thread management
210  *                                                213  *
211  * Initialize the primary kernel thread so tha    214  * Initialize the primary kernel thread so that it can be handled the
212  * same way as an ordinary thread created by s    215  * same way as an ordinary thread created by sos_thread_create().
213  */                                               216  */
214 sos_ret_t sos_thread_subsystem_setup(sos_vaddr    217 sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
215                                      sos_size_    218                                      sos_size_t init_thread_stack_size);
216                                                   219 
217                                                   220 
218 /**                                               221 /**
219  * Create a new kernel thread                     222  * Create a new kernel thread
220  */                                               223  */
221 struct sos_thread *                               224 struct sos_thread *
222 sos_create_kernel_thread(const char *name,        225 sos_create_kernel_thread(const char *name,
223                          sos_kernel_thread_sta    226                          sos_kernel_thread_start_routine_t start_func,
224                          void *start_arg,         227                          void *start_arg,
225                          sos_sched_priority_t     228                          sos_sched_priority_t priority);
226                                                   229 
227                                                   230 
228 /**                                               231 /**
229  * Create a new user thread                       232  * Create a new user thread
230  */                                               233  */
231 struct sos_thread *                               234 struct sos_thread *
232 sos_create_user_thread(const char *name,          235 sos_create_user_thread(const char *name,
233                        struct sos_process *pro    236                        struct sos_process *process,
234                        sos_uaddr_t user_initia    237                        sos_uaddr_t user_initial_PC,
235                        sos_ui32_t  user_start_    238                        sos_ui32_t  user_start_arg1,
236                        sos_ui32_t  user_start_    239                        sos_ui32_t  user_start_arg2,
237                        sos_uaddr_t user_initia    240                        sos_uaddr_t user_initial_SP,
238                        sos_sched_priority_t pr    241                        sos_sched_priority_t priority);
239                                                   242 
240                                                   243 
241 /**                                               244 /**
242  * Create a new user thread, copy of the given    245  * Create a new user thread, copy of the given user thread with the
243  * given user context                             246  * given user context
244  */                                               247  */
245 struct sos_thread *                               248 struct sos_thread *
246 sos_duplicate_user_thread(const char *name,       249 sos_duplicate_user_thread(const char *name,
247                           struct sos_process *    250                           struct sos_process *process,
248                           const struct sos_thr    251                           const struct sos_thread * model_thread,
249                           const struct sos_cpu    252                           const struct sos_cpu_state * model_uctxt,
250                           sos_ui32_t retval);     253                           sos_ui32_t retval);
251                                                   254 
252                                                   255 
253 /**                                               256 /**
254  * Terminate the execution of the current thre    257  * Terminate the execution of the current thread. For kernel threads,
255  * it is called by default when the start rout    258  * it is called by default when the start routine returns.
256  */                                               259  */
257 void sos_thread_exit(void) __attribute__((nore !! 260 void sos_thread_exit() __attribute__((noreturn));
258                                                   261 
259                                                   262 
260 /**                                               263 /**
261  * Get the identifier of the thread currently     264  * Get the identifier of the thread currently running on CPU. Trivial
262  * function.                                      265  * function.
263  */                                               266  */
264 struct sos_thread *sos_thread_get_current(void !! 267 struct sos_thread *sos_thread_get_current();
265                                                   268 
266                                                   269 
267 /**                                               270 /**
268  * If thr == NULL, set the priority of the cur    271  * If thr == NULL, set the priority of the current thread. Trivial
269  * function.                                      272  * function.
270  *                                                273  *
271  * @note NOT protected against interrupts         274  * @note NOT protected against interrupts
272  */                                               275  */
273 sos_sched_priority_t sos_thread_get_priority(s    276 sos_sched_priority_t sos_thread_get_priority(struct sos_thread *thr);
274                                                   277 
275                                                   278 
276 /**                                               279 /**
277  * If thr == NULL, get the state of the curren    280  * If thr == NULL, get the state of the current thread. Trivial
278  * function.                                      281  * function.
279  *                                                282  *
280  * @note NOT protected against interrupts         283  * @note NOT protected against interrupts
281  */                                               284  */
282 sos_thread_state_t sos_thread_get_state(struct    285 sos_thread_state_t sos_thread_get_state(struct sos_thread *thr);
283                                                   286 
284                                                   287 
285 /**                                               288 /**
286  * If thr == NULL, set the priority of the cur    289  * If thr == NULL, set the priority of the current thread
287  *                                                290  *
288  * @note NO context-switch ever occurs in this    291  * @note NO context-switch ever occurs in this function !
289  */                                               292  */
290 sos_ret_t sos_thread_set_priority(struct sos_t    293 sos_ret_t sos_thread_set_priority(struct sos_thread *thr,
291                                   sos_sched_pr    294                                   sos_sched_priority_t priority);
292                                                   295 
293                                                   296 
294 /**                                               297 /**
295  * Yield CPU to another ready thread.             298  * Yield CPU to another ready thread.
296  *                                                299  *
297  * @note This is a BLOCKING FUNCTION              300  * @note This is a BLOCKING FUNCTION
298  */                                               301  */
299 sos_ret_t sos_thread_yield(void);              !! 302 sos_ret_t sos_thread_yield();
300                                                   303 
301                                                   304 
302 /**                                               305 /**
303  * Release the CPU for (at least) the given de    306  * Release the CPU for (at least) the given delay.
304  *                                                307  *
305  * @param delay The delay to wait for. If dela    308  * @param delay The delay to wait for. If delay == NULL then wait
306  * forever that any event occurs.                 309  * forever that any event occurs.
307  *                                                310  *
308  * @return SOS_OK when delay expired (and dela    311  * @return SOS_OK when delay expired (and delay is reset to zero),
309  * -SOS_EINTR otherwise (and delay contains th    312  * -SOS_EINTR otherwise (and delay contains the amount of time
310  * remaining).                                    313  * remaining).
311  *                                                314  *
312  * @note This is a BLOCKING FUNCTION              315  * @note This is a BLOCKING FUNCTION
313  */                                               316  */
314 sos_ret_t sos_thread_sleep(/* in/out */struct     317 sos_ret_t sos_thread_sleep(/* in/out */struct sos_time *delay);
315                                                   318 
316                                                   319 
317 /**                                               320 /**
318  * Mark the given thread as READY (if not alre    321  * Mark the given thread as READY (if not already ready) even if it is
319  * blocked in a kwaitq or in a sleep ! As a re    322  * blocked in a kwaitq or in a sleep ! As a result, the interrupted
320  * kwaitq/sleep function call of the thread wi    323  * kwaitq/sleep function call of the thread will return with
321  * -SOS_EINTR.                                    324  * -SOS_EINTR.
322  *                                                325  *
323  * @return -SOS_EINVAL if thread does not exis    326  * @return -SOS_EINVAL if thread does not exist, or -SOS_EFATAL if
324  * marked ZOMBIE.                                 327  * marked ZOMBIE.
325  *                                                328  *
326  * @note As a result, the semaphore/mutex/cond    329  * @note As a result, the semaphore/mutex/conditions/... functions
327  * return values SHOULD ALWAYS be checked ! If    330  * return values SHOULD ALWAYS be checked ! If they are != SOS_OK,
328  * then the caller should consider that the re    331  * then the caller should consider that the resource is not aquired
329  * because somebody woke the thread by some wa    332  * because somebody woke the thread by some way.
330  */                                               333  */
331 sos_ret_t sos_thread_force_unblock(struct sos_    334 sos_ret_t sos_thread_force_unblock(struct sos_thread *thread);
332                                                   335 
333 /**                                               336 /**
334  * Dump the backtrace of the current thread to    337  * Dump the backtrace of the current thread to console and/or bochs
335  */                                               338  */
336 void sos_thread_dump_backtrace(sos_bool_t on_c    339 void sos_thread_dump_backtrace(sos_bool_t on_console,
337                                sos_bool_t on_b    340                                sos_bool_t on_bochs);
338                                                   341 
339                                                   342 
340 /* *******************************************    343 /* **********************************************
341  * Restricted functions                           344  * Restricted functions
342  */                                               345  */
343                                                   346 
344                                                   347 
345 /**                                               348 /**
346  * Restricted function to indicate that we are    349  * Restricted function to indicate that we are to access the given
347  * user address space from inside the kernel.     350  * user address space from inside the kernel.
348  *                                                351  *
349  * @param dest_as The address space we want to    352  * @param dest_as The address space we want to access, or NULL to
350  * access current thread's address space          353  * access current thread's address space
351  *                                                354  *
352  * @param fixup_retvaddr When != 0, then dest_    355  * @param fixup_retvaddr When != 0, then dest_as MUST BE NULL (we
353  * don't allow controlled access from kernel i    356  * don't allow controlled access from kernel into user space from a
354  * foreign thread). In this case, the page fau    357  * foreign thread). In this case, the page fault handler should accept
355  * page faults from the kernel in user space,     358  * page faults from the kernel in user space, and resolve them in the
356  * usual way. The value in retvaddr is where t    359  * usual way. The value in retvaddr is where the page fault handler
357  * has to return to in case the page fault rem    360  * has to return to in case the page fault remains unresolved. The
358  * address of the faulting address is kept in     361  * address of the faulting address is kept in
359  * éthread->fixup_uaccess.faulted_uaddr           362  * éthread->fixup_uaccess.faulted_uaddr
360  *                                                363  *
361  * @note typical values for fixup_retvaddr are    364  * @note typical values for fixup_retvaddr are obtained by "Labels as
362  * values" (see gcc's doc: operator "&&"). See    365  * values" (see gcc's doc: operator "&&"). See uaccess.c for example
363  * code.                                          366  * code.
364  */                                               367  */
365 sos_ret_t                                         368 sos_ret_t
366 sos_thread_prepare_user_space_access(struct so    369 sos_thread_prepare_user_space_access(struct sos_umem_vmm_as * dest_as,
367                                      sos_vaddr    370                                      sos_vaddr_t fixup_retvaddr);
368                                                   371 
369                                                   372 
370 /**                                               373 /**
371  * Restricted function to signal we are not ac    374  * Restricted function to signal we are not accessing any user address
372  * space anymore                                  375  * space anymore
373  */                                               376  */
374 sos_ret_t                                         377 sos_ret_t
375 sos_thread_end_user_space_access(void);           378 sos_thread_end_user_space_access(void);
376                                                   379 
377                                                   380 
378 /**                                               381 /**
379  * Restricted callback called when a syscall g    382  * Restricted callback called when a syscall goes back in user mode,
380  * to reconfigure the MMU to match that of the    383  * to reconfigure the MMU to match that of the current thread's
381  * process MMU context.                           384  * process MMU context.
382  *                                                385  *
383  * @note The use of this function is RESERVED     386  * @note The use of this function is RESERVED to the syscall wrapper
384  */                                               387  */
385 void sos_thread_prepare_syscall_switch_back(st    388 void sos_thread_prepare_syscall_switch_back(struct sos_cpu_state *cpu_state);
386                                                   389 
387                                                   390 
388 /**                                               391 /**
389  * Restricted callback called when an exceptio    392  * Restricted callback called when an exception handler goes back to
390  * the interrupted thread to reconfigure the M    393  * the interrupted thread to reconfigure the MMU to match that of the
391  * current thread's process MMU context.          394  * current thread's process MMU context.
392  *                                                395  *
393  * @note The use of this function is RESERVED     396  * @note The use of this function is RESERVED to the exception wrappers
394  */                                               397  */
395 void sos_thread_prepare_exception_switch_back(    398 void sos_thread_prepare_exception_switch_back(struct sos_cpu_state *cpu_state);
396                                                   399 
397                                                   400 
398 /**                                               401 /**
399  * Restricted callback called when an IRQ is e    402  * Restricted callback called when an IRQ is entered while the CPU was
400  * NOT already servicing any other IRQ (ie the    403  * NOT already servicing any other IRQ (ie the outermost IRQ handler
401  * is entered). This callback simply updates t    404  * is entered). This callback simply updates the "cpu_state" field so
402  * that IRQ handlers always know the state of     405  * that IRQ handlers always know the state of the interrupted thread,
403  * even if they are imbricated in other IRQ ha    406  * even if they are imbricated in other IRQ handlers.
404  *                                                407  *
405  * @note The use of this function is RESERVED     408  * @note The use of this function is RESERVED to the irq wrappers
406  */                                               409  */
407 void                                              410 void
408 sos_thread_prepare_irq_servicing(struct sos_cp    411 sos_thread_prepare_irq_servicing(struct sos_cpu_state *interrupted_state);
409                                                   412 
410                                                   413 
411 /**                                               414 /**
412  * Restricted callback called when the outermo    415  * Restricted callback called when the outermost IRQ handler returns,
413  * to select the thread to return to. This cal    416  * to select the thread to return to. This callbacks implements:
414  *   - preemption of user threads in user mode    417  *   - preemption of user threads in user mode (time sharing / FIFO)
415  *   - non-preemption of user threads in kerne    418  *   - non-preemption of user threads in kernel mode (interrupted thread
416  *     is restored on CPU "as is")                419  *     is restored on CPU "as is")
417  *   - non-preemption of kernel threads (same     420  *   - non-preemption of kernel threads (same remark)
418  * The MMU is reconfigured correctly to match     421  * The MMU is reconfigured correctly to match the address space of the
419  * selected thread.                               422  * selected thread.
420  *                                                423  *
421  * @return The CPU context of the thread to re    424  * @return The CPU context of the thread to return to
422  *                                                425  *
423  * @note The use of this function is RESERVED     426  * @note The use of this function is RESERVED to the irq wrappers
424  */                                               427  */
425 struct sos_cpu_state *                            428 struct sos_cpu_state *
426 sos_thread_prepare_irq_switch_back(void);         429 sos_thread_prepare_irq_switch_back(void);
427                                                   430 
428                                                   431 
429 #endif /* _SOS_THREAD_H_ */                       432 #endif /* _SOS_THREAD_H_ */
                                                      

source navigation ] diff markup ] identifier search ] general search ]