SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2004,2005 David Decotigny
002 
003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any later version.
007    
008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details.
012    
013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA. 
017 */
018 #ifndef _SOS_THREAD_H_
019 #define _SOS_THREAD_H_
020 
021 /**
022  * @file thread.h
023  *
024  * SOS Thread management API
025  */
026 
027 #include <sos/errno.h>
028 
029 /* Forward declaration */
030 struct sos_thread;
031 
032 #include <hwcore/cpu_context.h>
033 #include <sos/sched.h>
034 #include <sos/kwaitq.h>
035 #include <sos/time.h>
036 #include <sos/process.h>
037 
038 /**
039  * The possible states of a valid thread
040  */
041 typedef enum { SOS_THR_CREATED, /**< Thread created, not fully initialized */
042                SOS_THR_READY,   /**< Thread fully initialized or
043                                      waiting for CPU after having been
044                                      blocked or preempted */
045                SOS_THR_RUNNING, /**< Thread currently running on CPU */
046                SOS_THR_BLOCKED, /**< Thread waiting for I/O (+ in at LEAST
047                                      one kwaitq) and/or sleeping (+ in NO
048                                      kwaitq) */
049                SOS_THR_ZOMBIE,  /**< Thread terminated execution, waiting to
050                                      be deleted by kernel */
051              } sos_thread_state_t;
052 
053 
054 /**
055  * TCB (Thread Control Block): structure describing a thread. Don't
056  * access these fields directly: prefer using the accessor functions
057  * below.
058  */
059 struct sos_thread
060 {
061 #define SOS_THR_MAX_NAMELEN 32
062   char name[SOS_THR_MAX_NAMELEN];
063 
064   sos_thread_state_t  state;
065   sos_sched_priority_t priority;
066 
067   /**
068    * The hardware context of the thread.
069    *
070    * It will reflect the CPU state of the thread:
071    *  - From an interrupt handler: the state of the thread at the time
072    *    of the OUTERMOST irq. An IRQ is not allowed to make context
073    *    switches, so this context will remain valid from the begining of
074    *    the outermost IRQ handler to the end of it, no matter if there
075    *    are other IRQ handlers nesting in one another. You may safely
076    *    use it from IRQ handlers to query the state of the interrupted
077    *    thread, no matter if there has been other IRQ handlers
078    *    executing meanwhile.
079    *  - From normal kernel code, exceptions and syscall: the state of
080    *    the thread the last time there was a context switch from this
081    *    thread to another one. Thus this field WON'T reflect the
082    *    current's thread cpu_state in these cases. So, in these cases,
083    *    simply DO NOT USE IT outside thread.c ! Note: for syscall and
084    *    exception handlers, the VALID state of the interrupted thread is
085    *    passed as an argument to the handlers.
086    */
087   struct sos_cpu_state *cpu_state;
088 
089   /* Kernel stack parameters */
090   sos_vaddr_t kernel_stack_base_addr;
091   sos_size_t  kernel_stack_size;
092 
093   /* Process this thread belongs to. Always NULL for a kernel
094      thread */
095   struct sos_process *process;
096 
097   /**
098    * Address space currently "squatted" by the thread, or used to be
099    * active when the thread was interrupted/preempted. This is the MMU
100    * configuration expected before the cpu_state of the thread is
101    * restored on CPU.
102    *   - For kernel threads: should normally be NULL, meaning that the
103    *     thread will squat the current mm_context currently set in the
104    *     MMU. Might be NON NULL when a kernel thread squats a given
105    *     process to manipulate its address space.
106    *   - For user threads: should normally be NULL. More precisely:
107    *       - in user mode: the thread->process.mm_context is ALWAYS
108    *         set on MMU. squatted_mm_context is ALWAYS NULL in this
109    *         situation, meaning that the thread in user mode uses its
110    *         process-space as expected
111    *       - in kernel mode: NULL means that we keep on using the
112    *         mm_context currently set on MMU, which might be the
113    *         mm_context of another process. This is natural since a
114    *         thread in kernel mode normally only uses data in kernel
115    *         space. BTW, this limits the number of TLB flushes. However,
116    *         there are exceptions where this squatted_mm_context will
117    *         NOT be NULL. One is the copy_from/to_user API, which can
118    *         force the effective mm_context so that the MMU will be
119    *         (re)configured upon every context to the thread to match
120    *         the squatted_mm_context. Another exception is when a parent
121    *         thread creates the address space of a child process, in
122    *         which case the parent thread might temporarilly decide to
123    *         switch to the child's process space.
124    *
125    * This is the SOS implementation of the Linux "Lazy TLB" and
126    * address-space loaning.
127    */
128   struct sos_mm_context *squatted_mm_context;
129 
130   /* Data specific to each state */
131   union
132   {
133     struct
134     {
135       struct sos_sched_queue *rdy_queue;
136       struct sos_thread     *rdy_prev, *rdy_next;
137     } ready;
138 
139     struct
140     {
141       struct sos_time user_time_spent_in_slice;
142     } running;
143   }; /* Anonymous union (gcc extenion) */
144 
145 
146   /*
147    * Data used by the kwaitq subsystem: list of kwaitqueues the thread
148    * is waiting for.
149    *
150    * @note: a RUNNING or READY thread might be in one or more
151    * waitqueues ! The only property we have is that, among these
152    * waitqueues (if any), _at least_ one has woken the thread.
153    */
154   struct sos_kwaitq_entry *kwaitq_list;
155 
156 
157   /**
158    * Some statistics
159    */
160   struct rusage
161   {
162     /* Updated by sched.c */
163     struct sos_time ru_utime; /* Time spent in user mode */
164     struct sos_time ru_stime; /* Time spent in kernel mode */
165   } rusage;
166 
167 
168   /**
169    * Chaining pointers for the list of threads in the parent process
170    */
171   struct sos_thread *prev_in_process, *next_in_process;
172 
173 
174   /**
175    * Chaining pointers for global ("gbl") list of threads (debug)
176    */
177   struct sos_thread *gbl_prev, *gbl_next;
178 };
179 
180 
181 /**
182  * Definition of the function executed by a kernel thread
183  */
184 typedef void (*sos_kernel_thread_start_routine_t)(void *arg);
185 
186 
187 /**
188  * Initialize the subsystem responsible for thread management
189  *
190  * Initialize the primary kernel thread so that it can be handled the
191  * same way as an ordinary thread created by sos_thread_create().
192  */
193 sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
194                                      sos_size_t init_thread_stack_size);
195 
196 
197 /**
198  * Create a new kernel thread
199  */
200 struct sos_thread *
201 sos_create_kernel_thread(const char *name,
202                          sos_kernel_thread_start_routine_t start_func,
203                          void *start_arg,
204                          sos_sched_priority_t priority);
205 
206 
207 /**
208  * Create a new user thread
209  */
210 struct sos_thread *
211 sos_create_user_thread(const char *name,
212                        struct sos_process *process,
213                        sos_uaddr_t user_initial_PC,
214                        sos_ui32_t  user_start_arg1,
215                        sos_ui32_t  user_start_arg2,
216                        sos_uaddr_t user_initial_SP,
217                        sos_sched_priority_t priority);
218 
219 
220 /**
221  * Terminate the execution of the current thread. For kernel threads,
222  * it is called by default when the start routine returns.
223  */
224 void sos_thread_exit() __attribute__((noreturn));
225 
226 
227 /**
228  * Get the identifier of the thread currently running on CPU. Trivial
229  * function.
230  */
231 struct sos_thread *sos_thread_get_current();
232 
233 
234 /**
235  * If thr == NULL, set the priority of the current thread. Trivial
236  * function.
237  *
238  * @note NOT protected against interrupts
239  */
240 sos_sched_priority_t sos_thread_get_priority(struct sos_thread *thr);
241 
242 
243 /**
244  * If thr == NULL, get the state of the current thread. Trivial
245  * function.
246  *
247  * @note NOT protected against interrupts
248  */
249 sos_thread_state_t sos_thread_get_state(struct sos_thread *thr);
250 
251 
252 /**
253  * If thr == NULL, set the priority of the current thread
254  *
255  * @note NO context-switch ever occurs in this function !
256  */
257 sos_ret_t sos_thread_set_priority(struct sos_thread *thr,
258                                   sos_sched_priority_t priority);
259 
260 
261 /**
262  * Yield CPU to another ready thread.
263  *
264  * @note This is a BLOCKING FUNCTION
265  */
266 sos_ret_t sos_thread_yield();
267 
268 
269 /**
270  * Release the CPU for (at least) the given delay.
271  *
272  * @param delay The delay to wait for. If delay == NULL then wait
273  * forever that any event occurs.
274  *
275  * @return SOS_OK when delay expired (and delay is reset to zero),
276  * -SOS_EINTR otherwise (and delay contains the amount of time
277  * remaining).
278  *
279  * @note This is a BLOCKING FUNCTION
280  */
281 sos_ret_t sos_thread_sleep(/* in/out */struct sos_time *delay);
282 
283 
284 /**
285  * Mark the given thread as READY (if not already ready) even if it is
286  * blocked in a kwaitq or in a sleep ! As a result, the interrupted
287  * kwaitq/sleep function call of the thread will return with
288  * -SOS_EINTR.
289  *
290  * @return -SOS_EINVAL if thread does not exist, or -SOS_EFATAL if
291  * marked ZOMBIE.
292  *
293  * @note As a result, the semaphore/mutex/conditions/... functions
294  * return values SHOULD ALWAYS be checked ! If they are != SOS_OK,
295  * then the caller should consider that the resource is not aquired
296  * because somebody woke the thread by some way.
297  */
298 sos_ret_t sos_thread_force_unblock(struct sos_thread *thread);
299 
300 /**
301  * Dump the backtrace of the current thread to console and/or bochs
302  */
303 void sos_thread_dump_backtrace(sos_bool_t on_console,
304                                sos_bool_t on_bochs);
305 
306 
307 /* **********************************************
308  * Restricted functions
309  */
310 
311 
312 /**
313  * Restricted function to change the current mm_context AND the
314  * squatted_mm_context of the current thread in order to access the data
315  * in this context
316  *
317  *   @param mm_ctxt The mm_ctxt to restore. Might be NULL, meaning that:
318  *    - for a Kernel thread: the current MMU configuration is never
319  *      modified. The address space to use is limited to the kernel
320  *      space, user space might change due to preemptions to other
321  *      processes
322  *    - for a User thread in kernel mode: same as for kernel threads
323  *    - when a User thread will go back in user context: the MMU will
324  *      be reconfigured to match the mm_context of the thread's
325  *      process
326  *
327  * @note A non NULL parameter is allowed only if the
328  * squatted_mm_context is not already set. A NULL parameter is allowed
329  * only if the squatted_mm_context was already set.
330  *
331  * @note The use of this function is RESERVED to the syscall handler
332  * and the copy_from/to_user functions
333  */
334 sos_ret_t
335 sos_thread_change_current_mm_context(struct sos_mm_context *mm_ctxt);
336 
337 
338 /**
339  * Restricted callback called when a syscall goes back in user mode,
340  * to reconfigure the MMU to match that of the current thread's
341  * process MMU context.
342  *
343  * @note The use of this function is RESERVED to the syscall wrapper
344  */
345 void sos_thread_prepare_syscall_switch_back(struct sos_cpu_state *cpu_state);
346 
347 
348 /**
349  * Restricted callback called when an exception handler goes back to
350  * the interrupted thread to reconfigure the MMU to match that of the
351  * current thread's process MMU context.
352  *
353  * @note The use of this function is RESERVED to the exception wrappers
354  */
355 void sos_thread_prepare_exception_switch_back(struct sos_cpu_state *cpu_state);
356 
357 
358 /**
359  * Restricted callback called when an IRQ is entered while the CPU was
360  * NOT already servicing any other IRQ (ie the outermost IRQ handler
361  * is entered). This callback simply updates the "cpu_state" field so
362  * that IRQ handlers always know the state of the interrupted thread,
363  * even if they are imbricated in other IRQ handlers.
364  *
365  * @note The use of this function is RESERVED to the irq wrappers
366  */
367 void
368 sos_thread_prepare_irq_servicing(struct sos_cpu_state *interrupted_state);
369 
370 
371 /**
372  * Restricted callback called when the outermost IRQ handler returns,
373  * to select the thread to return to. This callbacks implements:
374  *   - preemption of user threads in user mode (time sharing / FIFO)
375  *   - non-preemption of user threads in kernel mode (interrupted thread
376  *     is restored on CPU "as is")
377  *   - non-preemption of kernel threads (same remark)
378  * The MMU is reconfigured correctly to match the address space of the
379  * selected thread.
380  *
381  * @return The CPU context of the thread to return to
382  *
383  * @note The use of this function is RESERVED to the irq wrappers
384  */
385 struct sos_cpu_state *
386 sos_thread_prepare_irq_switch_back(void);
387 
388 
389 #endif /* _SOS_THREAD_H_ */

source navigation ] diff markup ] identifier search ] general search ]