SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

Diff markup

Differences between /sos/thread.h (Article 7) and /sos/thread.h (Article 7.5)


001 /* Copyright (C) 2004,2005 David Decotigny        001 /* Copyright (C) 2004,2005 David Decotigny
002                                                   002 
003    This program is free software; you can redi    003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU Genera    004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundatio    005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any lat    006    of the License, or (at your option) any later version.
007                                                   007    
008    This program is distributed in the hope tha    008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the     009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR    010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details    011    GNU General Public License for more details.
012                                                   012    
013    You should have received a copy of the GNU     013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to t    014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 3    015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA.                                           016    USA. 
017 */                                                017 */
018 #ifndef _SOS_THREAD_H_                            018 #ifndef _SOS_THREAD_H_
019 #define _SOS_THREAD_H_                            019 #define _SOS_THREAD_H_
020                                                   020 
021 /**                                               021 /**
022  * @file thread.h                                 022  * @file thread.h
023  *                                                023  *
024  * SOS Thread management API                      024  * SOS Thread management API
025  */                                               025  */
026                                                   026 
027 #include <sos/errno.h>                            027 #include <sos/errno.h>
028                                                   028 
029 /* Forward declaration */                         029 /* Forward declaration */
030 struct sos_thread;                                030 struct sos_thread;
031                                                   031 
032 #include <hwcore/cpu_context.h>                   032 #include <hwcore/cpu_context.h>
033 #include <sos/sched.h>                            033 #include <sos/sched.h>
034 #include <sos/kwaitq.h>                           034 #include <sos/kwaitq.h>
035 #include <sos/time.h>                             035 #include <sos/time.h>
036 #include <sos/process.h>                          036 #include <sos/process.h>
                                                   >> 037 #include <sos/umem_vmm.h>
037                                                   038 
038 /**                                               039 /**
039  * The possible states of a valid thread          040  * The possible states of a valid thread
040  */                                               041  */
041 typedef enum { SOS_THR_CREATED, /**< Thread cr    042 typedef enum { SOS_THR_CREATED, /**< Thread created, not fully initialized */
042                SOS_THR_READY,   /**< Thread fu    043                SOS_THR_READY,   /**< Thread fully initialized or
043                                      waiting f    044                                      waiting for CPU after having been
044                                      blocked o    045                                      blocked or preempted */
045                SOS_THR_RUNNING, /**< Thread cu    046                SOS_THR_RUNNING, /**< Thread currently running on CPU */
046                SOS_THR_BLOCKED, /**< Thread wa    047                SOS_THR_BLOCKED, /**< Thread waiting for I/O (+ in at LEAST
047                                      one kwait    048                                      one kwaitq) and/or sleeping (+ in NO
048                                      kwaitq) *    049                                      kwaitq) */
049                SOS_THR_ZOMBIE,  /**< Thread te    050                SOS_THR_ZOMBIE,  /**< Thread terminated execution, waiting to
050                                      be delete    051                                      be deleted by kernel */
051              } sos_thread_state_t;                052              } sos_thread_state_t;
052                                                   053 
053                                                   054 
054 /**                                               055 /**
055  * TCB (Thread Control Block): structure descr    056  * TCB (Thread Control Block): structure describing a thread. Don't
056  * access these fields directly: prefer using     057  * access these fields directly: prefer using the accessor functions
057  * below.                                         058  * below.
058  */                                               059  */
059 struct sos_thread                                 060 struct sos_thread
060 {                                                 061 {
061 #define SOS_THR_MAX_NAMELEN 32                    062 #define SOS_THR_MAX_NAMELEN 32
062   char name[SOS_THR_MAX_NAMELEN];                 063   char name[SOS_THR_MAX_NAMELEN];
063                                                   064 
064   sos_thread_state_t  state;                      065   sos_thread_state_t  state;
065   sos_sched_priority_t priority;                  066   sos_sched_priority_t priority;
066                                                   067 
067   /**                                             068   /**
068    * The hardware context of the thread.          069    * The hardware context of the thread.
069    *                                              070    *
070    * It will reflect the CPU state of the thre    071    * It will reflect the CPU state of the thread:
071    *  - From an interrupt handler: the state o    072    *  - From an interrupt handler: the state of the thread at the time
072    *    of the OUTERMOST irq. An IRQ is not al    073    *    of the OUTERMOST irq. An IRQ is not allowed to make context
073    *    switches, so this context will remain     074    *    switches, so this context will remain valid from the begining of
074    *    the outermost IRQ handler to the end o    075    *    the outermost IRQ handler to the end of it, no matter if there
075    *    are other IRQ handlers nesting in one     076    *    are other IRQ handlers nesting in one another. You may safely
076    *    use it from IRQ handlers to query the     077    *    use it from IRQ handlers to query the state of the interrupted
077    *    thread, no matter if there has been ot    078    *    thread, no matter if there has been other IRQ handlers
078    *    executing meanwhile.                      079    *    executing meanwhile.
079    *  - From normal kernel code, exceptions an    080    *  - From normal kernel code, exceptions and syscall: the state of
080    *    the thread the last time there was a c    081    *    the thread the last time there was a context switch from this
081    *    thread to another one. Thus this field    082    *    thread to another one. Thus this field WON'T reflect the
082    *    current's thread cpu_state in these ca    083    *    current's thread cpu_state in these cases. So, in these cases,
083    *    simply DO NOT USE IT outside thread.c     084    *    simply DO NOT USE IT outside thread.c ! Note: for syscall and
084    *    exception handlers, the VALID state of    085    *    exception handlers, the VALID state of the interrupted thread is
085    *    passed as an argument to the handlers.    086    *    passed as an argument to the handlers.
086    */                                             087    */
087   struct sos_cpu_state *cpu_state;                088   struct sos_cpu_state *cpu_state;
088                                                   089 
089   /* Kernel stack parameters */                   090   /* Kernel stack parameters */
090   sos_vaddr_t kernel_stack_base_addr;             091   sos_vaddr_t kernel_stack_base_addr;
091   sos_size_t  kernel_stack_size;                  092   sos_size_t  kernel_stack_size;
092                                                   093 
093   /* Process this thread belongs to. Always NU    094   /* Process this thread belongs to. Always NULL for a kernel
094      thread */                                    095      thread */
095   struct sos_process *process;                    096   struct sos_process *process;
096                                                   097 
097   /**                                             098   /**
098    * Address space currently "squatted" by the    099    * Address space currently "squatted" by the thread, or used to be
099    * active when the thread was interrupted/pr    100    * active when the thread was interrupted/preempted. This is the MMU
100    * configuration expected before the cpu_sta    101    * configuration expected before the cpu_state of the thread is
101    * restored on CPU.                             102    * restored on CPU.
102    *   - For kernel threads: should normally b    103    *   - For kernel threads: should normally be NULL, meaning that the
103    *     thread will squat the current mm_cont    104    *     thread will squat the current mm_context currently set in the
104    *     MMU. Might be NON NULL when a kernel     105    *     MMU. Might be NON NULL when a kernel thread squats a given
105    *     process to manipulate its address spa    106    *     process to manipulate its address space.
106    *   - For user threads: should normally be     107    *   - For user threads: should normally be NULL. More precisely:
107    *       - in user mode: the thread->process    108    *       - in user mode: the thread->process.mm_context is ALWAYS
108    *         set on MMU. squatted_mm_context i    109    *         set on MMU. squatted_mm_context is ALWAYS NULL in this
109    *         situation, meaning that the threa    110    *         situation, meaning that the thread in user mode uses its
110    *         process-space as expected            111    *         process-space as expected
111    *       - in kernel mode: NULL means that w    112    *       - in kernel mode: NULL means that we keep on using the
112    *         mm_context currently set on MMU,     113    *         mm_context currently set on MMU, which might be the
113    *         mm_context of another process. Th    114    *         mm_context of another process. This is natural since a
114    *         thread in kernel mode normally on    115    *         thread in kernel mode normally only uses data in kernel
115    *         space. BTW, this limits the numbe    116    *         space. BTW, this limits the number of TLB flushes. However,
116    *         there are exceptions where this s    117    *         there are exceptions where this squatted_mm_context will
117    *         NOT be NULL. One is the copy_from    118    *         NOT be NULL. One is the copy_from/to_user API, which can
118    *         force the effective mm_context so    119    *         force the effective mm_context so that the MMU will be
119    *         (re)configured upon every context    120    *         (re)configured upon every context to the thread to match
120    *         the squatted_mm_context. Another     121    *         the squatted_mm_context. Another exception is when a parent
121    *         thread creates the address space     122    *         thread creates the address space of a child process, in
122    *         which case the parent thread migh    123    *         which case the parent thread might temporarilly decide to
123    *         switch to the child's process spa    124    *         switch to the child's process space.
124    *                                              125    *
125    * This is the SOS implementation of the Lin    126    * This is the SOS implementation of the Linux "Lazy TLB" and
126    * address-space loaning.                       127    * address-space loaning.
127    */                                             128    */
128   struct sos_mm_context *squatted_mm_context;     129   struct sos_mm_context *squatted_mm_context;
129                                                   130 
130   /* Data specific to each state */               131   /* Data specific to each state */
131   union                                           132   union
132   {                                               133   {
133     struct                                        134     struct
134     {                                             135     {
135       struct sos_sched_queue *rdy_queue;          136       struct sos_sched_queue *rdy_queue;
136       struct sos_thread     *rdy_prev, *rdy_ne    137       struct sos_thread     *rdy_prev, *rdy_next;
137     } ready;                                      138     } ready;
138                                                   139 
139     struct                                        140     struct
140     {                                             141     {
141       struct sos_time user_time_spent_in_slice    142       struct sos_time user_time_spent_in_slice;
142     } running;                                    143     } running;
143   }; /* Anonymous union (gcc extenion) */         144   }; /* Anonymous union (gcc extenion) */
144                                                   145 
145                                                   146 
                                                   >> 147   /**
                                                   >> 148    * When a thread in kernel mode is accessing the user space, it may
                                                   >> 149    * page fault in the usual way only if return_vaddr below is
                                                   >> 150    * set. This structure holds information regarding what to do when a
                                                   >> 151    * page fault from kernel into user space could not be resolved.
                                                   >> 152    *
                                                   >> 153    * @note the fields below should be considered read-only. @see
                                                   >> 154    * sos_thread_prepare_user_space_access() and @see
                                                   >> 155    * sos_thread_end_user_space_access() to modify them.
                                                   >> 156    */
                                                   >> 157   struct
                                                   >> 158   {
                                                   >> 159     /** This is the address (in kernel code) to return to when a
                                                   >> 160         user-space page fault from a kernel-mode thread could not be
                                                   >> 161         resolved.  @see sos_thread_prepare_user_space_access() */
                                                   >> 162     sos_vaddr_t return_vaddr;
                                                   >> 163 
                                                   >> 164     /** This is the address of the user-space address that caused the
                                                   >> 165         unresolved page fault (set by the page fault handler) */
                                                   >> 166     sos_uaddr_t faulted_uaddr;
                                                   >> 167   } fixup_uaccess;
                                                   >> 168 
                                                   >> 169 
146   /*                                              170   /*
147    * Data used by the kwaitq subsystem: list o    171    * Data used by the kwaitq subsystem: list of kwaitqueues the thread
148    * is waiting for.                              172    * is waiting for.
149    *                                              173    *
150    * @note: a RUNNING or READY thread might be    174    * @note: a RUNNING or READY thread might be in one or more
151    * waitqueues ! The only property we have is    175    * waitqueues ! The only property we have is that, among these
152    * waitqueues (if any), _at least_ one has w    176    * waitqueues (if any), _at least_ one has woken the thread.
153    */                                             177    */
154   struct sos_kwaitq_entry *kwaitq_list;           178   struct sos_kwaitq_entry *kwaitq_list;
155                                                   179 
156                                                   180 
157   /**                                             181   /**
158    * Some statistics                              182    * Some statistics
159    */                                             183    */
160   struct rusage                                   184   struct rusage
161   {                                               185   {
162     /* Updated by sched.c */                      186     /* Updated by sched.c */
163     struct sos_time ru_utime; /* Time spent in    187     struct sos_time ru_utime; /* Time spent in user mode */
164     struct sos_time ru_stime; /* Time spent in    188     struct sos_time ru_stime; /* Time spent in kernel mode */
165   } rusage;                                       189   } rusage;
166                                                   190 
167                                                   191 
168   /**                                             192   /**
169    * Chaining pointers for the list of threads    193    * Chaining pointers for the list of threads in the parent process
170    */                                             194    */
171   struct sos_thread *prev_in_process, *next_in    195   struct sos_thread *prev_in_process, *next_in_process;
172                                                   196 
173                                                   197 
174   /**                                             198   /**
175    * Chaining pointers for global ("gbl") list    199    * Chaining pointers for global ("gbl") list of threads (debug)
176    */                                             200    */
177   struct sos_thread *gbl_prev, *gbl_next;         201   struct sos_thread *gbl_prev, *gbl_next;
178 };                                                202 };
179                                                   203 
180                                                   204 
181 /**                                               205 /**
182  * Definition of the function executed by a ke    206  * Definition of the function executed by a kernel thread
183  */                                               207  */
184 typedef void (*sos_kernel_thread_start_routine    208 typedef void (*sos_kernel_thread_start_routine_t)(void *arg);
185                                                   209 
186                                                   210 
187 /**                                               211 /**
188  * Initialize the subsystem responsible for th    212  * Initialize the subsystem responsible for thread management
189  *                                                213  *
190  * Initialize the primary kernel thread so tha    214  * Initialize the primary kernel thread so that it can be handled the
191  * same way as an ordinary thread created by s    215  * same way as an ordinary thread created by sos_thread_create().
192  */                                               216  */
193 sos_ret_t sos_thread_subsystem_setup(sos_vaddr    217 sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
194                                      sos_size_    218                                      sos_size_t init_thread_stack_size);
195                                                   219 
196                                                   220 
197 /**                                               221 /**
198  * Create a new kernel thread                     222  * Create a new kernel thread
199  */                                               223  */
200 struct sos_thread *                               224 struct sos_thread *
201 sos_create_kernel_thread(const char *name,        225 sos_create_kernel_thread(const char *name,
202                          sos_kernel_thread_sta    226                          sos_kernel_thread_start_routine_t start_func,
203                          void *start_arg,         227                          void *start_arg,
204                          sos_sched_priority_t     228                          sos_sched_priority_t priority);
205                                                   229 
206                                                   230 
207 /**                                               231 /**
208  * Create a new user thread                       232  * Create a new user thread
209  */                                               233  */
210 struct sos_thread *                               234 struct sos_thread *
211 sos_create_user_thread(const char *name,          235 sos_create_user_thread(const char *name,
212                        struct sos_process *pro    236                        struct sos_process *process,
213                        sos_uaddr_t user_initia    237                        sos_uaddr_t user_initial_PC,
214                        sos_ui32_t  user_start_    238                        sos_ui32_t  user_start_arg1,
215                        sos_ui32_t  user_start_    239                        sos_ui32_t  user_start_arg2,
216                        sos_uaddr_t user_initia    240                        sos_uaddr_t user_initial_SP,
217                        sos_sched_priority_t pr    241                        sos_sched_priority_t priority);
218                                                   242 
219                                                   243 
220 /**                                               244 /**
                                                   >> 245  * Create a new user thread, copy of the given user thread with the
                                                   >> 246  * given user context
                                                   >> 247  */
                                                   >> 248 struct sos_thread *
                                                   >> 249 sos_duplicate_user_thread(const char *name,
                                                   >> 250                           struct sos_process *process,
                                                   >> 251                           const struct sos_thread * model_thread,
                                                   >> 252                           const struct sos_cpu_state * model_uctxt,
                                                   >> 253                           sos_ui32_t retval);
                                                   >> 254 
                                                   >> 255 
                                                   >> 256 /**
221  * Terminate the execution of the current thre    257  * Terminate the execution of the current thread. For kernel threads,
222  * it is called by default when the start rout    258  * it is called by default when the start routine returns.
223  */                                               259  */
224 void sos_thread_exit() __attribute__((noreturn    260 void sos_thread_exit() __attribute__((noreturn));
225                                                   261 
226                                                   262 
227 /**                                               263 /**
228  * Get the identifier of the thread currently     264  * Get the identifier of the thread currently running on CPU. Trivial
229  * function.                                      265  * function.
230  */                                               266  */
231 struct sos_thread *sos_thread_get_current();      267 struct sos_thread *sos_thread_get_current();
232                                                   268 
233                                                   269 
234 /**                                               270 /**
235  * If thr == NULL, set the priority of the cur    271  * If thr == NULL, set the priority of the current thread. Trivial
236  * function.                                      272  * function.
237  *                                                273  *
238  * @note NOT protected against interrupts         274  * @note NOT protected against interrupts
239  */                                               275  */
240 sos_sched_priority_t sos_thread_get_priority(s    276 sos_sched_priority_t sos_thread_get_priority(struct sos_thread *thr);
241                                                   277 
242                                                   278 
243 /**                                               279 /**
244  * If thr == NULL, get the state of the curren    280  * If thr == NULL, get the state of the current thread. Trivial
245  * function.                                      281  * function.
246  *                                                282  *
247  * @note NOT protected against interrupts         283  * @note NOT protected against interrupts
248  */                                               284  */
249 sos_thread_state_t sos_thread_get_state(struct    285 sos_thread_state_t sos_thread_get_state(struct sos_thread *thr);
250                                                   286 
251                                                   287 
252 /**                                               288 /**
253  * If thr == NULL, set the priority of the cur    289  * If thr == NULL, set the priority of the current thread
254  *                                                290  *
255  * @note NO context-switch ever occurs in this    291  * @note NO context-switch ever occurs in this function !
256  */                                               292  */
257 sos_ret_t sos_thread_set_priority(struct sos_t    293 sos_ret_t sos_thread_set_priority(struct sos_thread *thr,
258                                   sos_sched_pr    294                                   sos_sched_priority_t priority);
259                                                   295 
260                                                   296 
261 /**                                               297 /**
262  * Yield CPU to another ready thread.             298  * Yield CPU to another ready thread.
263  *                                                299  *
264  * @note This is a BLOCKING FUNCTION              300  * @note This is a BLOCKING FUNCTION
265  */                                               301  */
266 sos_ret_t sos_thread_yield();                     302 sos_ret_t sos_thread_yield();
267                                                   303 
268                                                   304 
269 /**                                               305 /**
270  * Release the CPU for (at least) the given de    306  * Release the CPU for (at least) the given delay.
271  *                                                307  *
272  * @param delay The delay to wait for. If dela    308  * @param delay The delay to wait for. If delay == NULL then wait
273  * forever that any event occurs.                 309  * forever that any event occurs.
274  *                                                310  *
275  * @return SOS_OK when delay expired (and dela    311  * @return SOS_OK when delay expired (and delay is reset to zero),
276  * -SOS_EINTR otherwise (and delay contains th    312  * -SOS_EINTR otherwise (and delay contains the amount of time
277  * remaining).                                    313  * remaining).
278  *                                                314  *
279  * @note This is a BLOCKING FUNCTION              315  * @note This is a BLOCKING FUNCTION
280  */                                               316  */
281 sos_ret_t sos_thread_sleep(/* in/out */struct     317 sos_ret_t sos_thread_sleep(/* in/out */struct sos_time *delay);
282                                                   318 
283                                                   319 
284 /**                                               320 /**
285  * Mark the given thread as READY (if not alre    321  * Mark the given thread as READY (if not already ready) even if it is
286  * blocked in a kwaitq or in a sleep ! As a re    322  * blocked in a kwaitq or in a sleep ! As a result, the interrupted
287  * kwaitq/sleep function call of the thread wi    323  * kwaitq/sleep function call of the thread will return with
288  * -SOS_EINTR.                                    324  * -SOS_EINTR.
289  *                                                325  *
290  * @return -SOS_EINVAL if thread does not exis    326  * @return -SOS_EINVAL if thread does not exist, or -SOS_EFATAL if
291  * marked ZOMBIE.                                 327  * marked ZOMBIE.
292  *                                                328  *
293  * @note As a result, the semaphore/mutex/cond    329  * @note As a result, the semaphore/mutex/conditions/... functions
294  * return values SHOULD ALWAYS be checked ! If    330  * return values SHOULD ALWAYS be checked ! If they are != SOS_OK,
295  * then the caller should consider that the re    331  * then the caller should consider that the resource is not aquired
296  * because somebody woke the thread by some wa    332  * because somebody woke the thread by some way.
297  */                                               333  */
298 sos_ret_t sos_thread_force_unblock(struct sos_    334 sos_ret_t sos_thread_force_unblock(struct sos_thread *thread);
299                                                   335 
300 /**                                               336 /**
301  * Dump the backtrace of the current thread to    337  * Dump the backtrace of the current thread to console and/or bochs
302  */                                               338  */
303 void sos_thread_dump_backtrace(sos_bool_t on_c    339 void sos_thread_dump_backtrace(sos_bool_t on_console,
304                                sos_bool_t on_b    340                                sos_bool_t on_bochs);
305                                                   341 
306                                                   342 
307 /* *******************************************    343 /* **********************************************
308  * Restricted functions                           344  * Restricted functions
309  */                                               345  */
310                                                   346 
311                                                   347 
312 /**                                               348 /**
313  * Restricted function to change the current m !! 349  * Restricted function to indicate that we are to access the given
314  * squatted_mm_context of the current thread i !! 350  * user address space from inside the kernel.
315  * in this context                             << 
316  *                                             << 
317  *   @param mm_ctxt The mm_ctxt to restore. Mi << 
318  *    - for a Kernel thread: the current MMU c << 
319  *      modified. The address space to use is  << 
320  *      space, user space might change due to  << 
321  *      processes                              << 
322  *    - for a User thread in kernel mode: same << 
323  *    - when a User thread will go back in use << 
324  *      be reconfigured to match the mm_contex << 
325  *      process                                << 
326  *                                             << 
327  * @note A non NULL parameter is allowed only  << 
328  * squatted_mm_context is not already set. A N << 
329  * only if the squatted_mm_context was already << 
330  *                                                351  *
331  * @note The use of this function is RESERVED  !! 352  * @param dest_as The address space we want to access, or NULL to
332  * and the copy_from/to_user functions         !! 353  * access current thread's address space
                                                   >> 354  *
                                                   >> 355  * @param fixup_retvaddr When != 0, then dest_as MUST BE NULL (we
                                                   >> 356  * don't allow controlled access from kernel into user space from a
                                                   >> 357  * foreign thread). In this case, the page fault handler should accept
                                                   >> 358  * page faults from the kernel in user space, and resolve them in the
                                                   >> 359  * usual way. The value in retvaddr is where the page fault handler
                                                   >> 360  * has to return to in case the page fault remains unresolved. The
                                                   >> 361  * address of the faulting address is kept in
                                                   >> 362  * éthread->fixup_uaccess.faulted_uaddr
                                                   >> 363  *
                                                   >> 364  * @note typical values for fixup_retvaddr are obtained by "Labels as
                                                   >> 365  * values" (see gcc's doc: operator "&&"). See uaccess.c for example
                                                   >> 366  * code.
                                                   >> 367  */
                                                   >> 368 sos_ret_t
                                                   >> 369 sos_thread_prepare_user_space_access(struct sos_umem_vmm_as * dest_as,
                                                   >> 370                                      sos_vaddr_t fixup_retvaddr);
                                                   >> 371 
                                                   >> 372 
                                                   >> 373 /**
                                                   >> 374  * Restricted function to signal we are not accessing any user address
                                                   >> 375  * space anymore
333  */                                               376  */
334 sos_ret_t                                         377 sos_ret_t
335 sos_thread_change_current_mm_context(struct so !! 378 sos_thread_end_user_space_access(void);
336                                                   379 
337                                                   380 
338 /**                                               381 /**
339  * Restricted callback called when a syscall g    382  * Restricted callback called when a syscall goes back in user mode,
340  * to reconfigure the MMU to match that of the    383  * to reconfigure the MMU to match that of the current thread's
341  * process MMU context.                           384  * process MMU context.
342  *                                                385  *
343  * @note The use of this function is RESERVED     386  * @note The use of this function is RESERVED to the syscall wrapper
344  */                                               387  */
345 void sos_thread_prepare_syscall_switch_back(st    388 void sos_thread_prepare_syscall_switch_back(struct sos_cpu_state *cpu_state);
346                                                   389 
347                                                   390 
348 /**                                               391 /**
349  * Restricted callback called when an exceptio    392  * Restricted callback called when an exception handler goes back to
350  * the interrupted thread to reconfigure the M    393  * the interrupted thread to reconfigure the MMU to match that of the
351  * current thread's process MMU context.          394  * current thread's process MMU context.
352  *                                                395  *
353  * @note The use of this function is RESERVED     396  * @note The use of this function is RESERVED to the exception wrappers
354  */                                               397  */
355 void sos_thread_prepare_exception_switch_back(    398 void sos_thread_prepare_exception_switch_back(struct sos_cpu_state *cpu_state);
356                                                   399 
357                                                   400 
358 /**                                               401 /**
359  * Restricted callback called when an IRQ is e    402  * Restricted callback called when an IRQ is entered while the CPU was
360  * NOT already servicing any other IRQ (ie the    403  * NOT already servicing any other IRQ (ie the outermost IRQ handler
361  * is entered). This callback simply updates t    404  * is entered). This callback simply updates the "cpu_state" field so
362  * that IRQ handlers always know the state of     405  * that IRQ handlers always know the state of the interrupted thread,
363  * even if they are imbricated in other IRQ ha    406  * even if they are imbricated in other IRQ handlers.
364  *                                                407  *
365  * @note The use of this function is RESERVED     408  * @note The use of this function is RESERVED to the irq wrappers
366  */                                               409  */
367 void                                              410 void
368 sos_thread_prepare_irq_servicing(struct sos_cp    411 sos_thread_prepare_irq_servicing(struct sos_cpu_state *interrupted_state);
369                                                   412 
370                                                   413 
371 /**                                               414 /**
372  * Restricted callback called when the outermo    415  * Restricted callback called when the outermost IRQ handler returns,
373  * to select the thread to return to. This cal    416  * to select the thread to return to. This callbacks implements:
374  *   - preemption of user threads in user mode    417  *   - preemption of user threads in user mode (time sharing / FIFO)
375  *   - non-preemption of user threads in kerne    418  *   - non-preemption of user threads in kernel mode (interrupted thread
376  *     is restored on CPU "as is")                419  *     is restored on CPU "as is")
377  *   - non-preemption of kernel threads (same     420  *   - non-preemption of kernel threads (same remark)
378  * The MMU is reconfigured correctly to match     421  * The MMU is reconfigured correctly to match the address space of the
379  * selected thread.                               422  * selected thread.
380  *                                                423  *
381  * @return The CPU context of the thread to re    424  * @return The CPU context of the thread to return to
382  *                                                425  *
383  * @note The use of this function is RESERVED     426  * @note The use of this function is RESERVED to the irq wrappers
384  */                                               427  */
385 struct sos_cpu_state *                            428 struct sos_cpu_state *
386 sos_thread_prepare_irq_switch_back(void);         429 sos_thread_prepare_irq_switch_back(void);
387                                                   430 
388                                                   431 
389 #endif /* _SOS_THREAD_H_ */                       432 #endif /* _SOS_THREAD_H_ */
                                                      

source navigation ] diff markup ] identifier search ] general search ]