SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2004,2005 David Decotigny
002 
003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any later version.
007    
008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details.
012    
013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA. 
017 */
018 #ifndef _SOS_THREAD_H_
019 #define _SOS_THREAD_H_
020 
021 /**
022  * @file thread.h
023  *
024  * SOS Thread management API
025  */
026 
027 #include <sos/errno.h>
028 
029 /* Forward declaration */
030 struct sos_thread;
031 
032 #include <hwcore/cpu_context.h>
033 #include <sos/sched.h>
034 #include <sos/kwaitq.h>
035 #include <sos/time.h>
036 #include <sos/process.h>
037 #include <sos/umem_vmm.h>
038 
039 /**
040  * The possible states of a valid thread
041  */
042 typedef enum { SOS_THR_CREATED, /**< Thread created, not fully initialized */
043                SOS_THR_READY,   /**< Thread fully initialized or
044                                      waiting for CPU after having been
045                                      blocked or preempted */
046                SOS_THR_RUNNING, /**< Thread currently running on CPU */
047                SOS_THR_BLOCKED, /**< Thread waiting for I/O (+ in at LEAST
048                                      one kwaitq) and/or sleeping (+ in NO
049                                      kwaitq) */
050                SOS_THR_ZOMBIE,  /**< Thread terminated execution, waiting to
051                                      be deleted by kernel */
052              } sos_thread_state_t;
053 
054 
055 /**
056  * TCB (Thread Control Block): structure describing a thread. Don't
057  * access these fields directly: prefer using the accessor functions
058  * below.
059  */
060 struct sos_thread
061 {
062 #define SOS_THR_MAX_NAMELEN 32
063   char name[SOS_THR_MAX_NAMELEN];
064 
065   sos_thread_state_t  state;
066   sos_sched_priority_t priority;
067 
068   /**
069    * The hardware context of the thread.
070    *
071    * It will reflect the CPU state of the thread:
072    *  - From an interrupt handler: the state of the thread at the time
073    *    of the OUTERMOST irq. An IRQ is not allowed to make context
074    *    switches, so this context will remain valid from the begining of
075    *    the outermost IRQ handler to the end of it, no matter if there
076    *    are other IRQ handlers nesting in one another. You may safely
077    *    use it from IRQ handlers to query the state of the interrupted
078    *    thread, no matter if there has been other IRQ handlers
079    *    executing meanwhile.
080    *  - From normal kernel code, exceptions and syscall: the state of
081    *    the thread the last time there was a context switch from this
082    *    thread to another one. Thus this field WON'T reflect the
083    *    current's thread cpu_state in these cases. So, in these cases,
084    *    simply DO NOT USE IT outside thread.c ! Note: for syscall and
085    *    exception handlers, the VALID state of the interrupted thread is
086    *    passed as an argument to the handlers.
087    */
088   struct sos_cpu_state *cpu_state;
089 
090   /* Kernel stack parameters */
091   sos_vaddr_t kernel_stack_base_addr;
092   sos_size_t  kernel_stack_size;
093 
094   /* Process this thread belongs to. Always NULL for a kernel
095      thread */
096   struct sos_process *process;
097 
098   /**
099    * Address space currently "squatted" by the thread, or used to be
100    * active when the thread was interrupted/preempted. This is the MMU
101    * configuration expected before the cpu_state of the thread is
102    * restored on CPU.
103    *   - For kernel threads: should normally be NULL, meaning that the
104    *     thread will squat the current mm_context currently set in the
105    *     MMU. Might be NON NULL when a kernel thread squats a given
106    *     process to manipulate its address space.
107    *   - For user threads: should normally be NULL. More precisely:
108    *       - in user mode: the thread->process.mm_context is ALWAYS
109    *         set on MMU. squatted_mm_context is ALWAYS NULL in this
110    *         situation, meaning that the thread in user mode uses its
111    *         process-space as expected
112    *       - in kernel mode: NULL means that we keep on using the
113    *         mm_context currently set on MMU, which might be the
114    *         mm_context of another process. This is natural since a
115    *         thread in kernel mode normally only uses data in kernel
116    *         space. BTW, this limits the number of TLB flushes. However,
117    *         there are exceptions where this squatted_mm_context will
118    *         NOT be NULL. One is the copy_from/to_user API, which can
119    *         force the effective mm_context so that the MMU will be
120    *         (re)configured upon every context to the thread to match
121    *         the squatted_mm_context. Another exception is when a parent
122    *         thread creates the address space of a child process, in
123    *         which case the parent thread might temporarilly decide to
124    *         switch to the child's process space.
125    *
126    * This is the SOS implementation of the Linux "Lazy TLB" and
127    * address-space loaning.
128    */
129   struct sos_mm_context *squatted_mm_context;
130 
131   /* Data specific to each state */
132   union
133   {
134     struct
135     {
136       struct sos_sched_queue *rdy_queue;
137       struct sos_thread     *rdy_prev, *rdy_next;
138     } ready;
139   }; /* Anonymous union (gcc extenion) */
140 
141   struct sos_time user_time_spent_in_slice;
142 
143 
144   /**
145    * When a thread in kernel mode is accessing the user space, it may
146    * page fault in the usual way only if return_vaddr below is
147    * set. This structure holds information regarding what to do when a
148    * page fault from kernel into user space could not be resolved.
149    *
150    * @note the fields below should be considered read-only. @see
151    * sos_thread_prepare_user_space_access() and @see
152    * sos_thread_end_user_space_access() to modify them.
153    */
154   struct
155   {
156     /** This is the address (in kernel code) to return to when a
157         user-space page fault from a kernel-mode thread could not be
158         resolved.  @see sos_thread_prepare_user_space_access() */
159     sos_vaddr_t return_vaddr;
160 
161     /** This is the address of the user-space address that caused the
162         unresolved page fault (set by the page fault handler) */
163     sos_uaddr_t faulted_uaddr;
164   } fixup_uaccess;
165 
166 
167   /*
168    * Data used by the kwaitq subsystem: list of kwaitqueues the thread
169    * is waiting for.
170    *
171    * @note: a RUNNING or READY thread might be in one or more
172    * waitqueues ! The only property we have is that, among these
173    * waitqueues (if any), _at least_ one has woken the thread.
174    */
175   struct sos_kwaitq_entry *kwaitq_list;
176 
177 
178   /**
179    * Some statistics
180    */
181   struct rusage
182   {
183     /* Updated by sched.c */
184     struct sos_time ru_utime; /* Time spent in user mode */
185     struct sos_time ru_stime; /* Time spent in kernel mode */
186   } rusage;
187 
188 
189   /**
190    * Chaining pointers for the list of threads in the parent process
191    */
192   struct sos_thread *prev_in_process, *next_in_process;
193 
194 
195   /**
196    * Chaining pointers for global ("gbl") list of threads (debug)
197    */
198   struct sos_thread *gbl_prev, *gbl_next;
199 };
200 
201 
202 /**
203  * Definition of the function executed by a kernel thread
204  */
205 typedef void (*sos_kernel_thread_start_routine_t)(void *arg);
206 
207 
208 /**
209  * Initialize the subsystem responsible for thread management
210  *
211  * Initialize the primary kernel thread so that it can be handled the
212  * same way as an ordinary thread created by sos_thread_create().
213  */
214 sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
215                                      sos_size_t init_thread_stack_size);
216 
217 
218 /**
219  * Create a new kernel thread
220  */
221 struct sos_thread *
222 sos_create_kernel_thread(const char *name,
223                          sos_kernel_thread_start_routine_t start_func,
224                          void *start_arg,
225                          sos_sched_priority_t priority);
226 
227 
228 /**
229  * Create a new user thread
230  */
231 struct sos_thread *
232 sos_create_user_thread(const char *name,
233                        struct sos_process *process,
234                        sos_uaddr_t user_initial_PC,
235                        sos_ui32_t  user_start_arg1,
236                        sos_ui32_t  user_start_arg2,
237                        sos_uaddr_t user_initial_SP,
238                        sos_sched_priority_t priority);
239 
240 
241 /**
242  * Create a new user thread, copy of the given user thread with the
243  * given user context
244  */
245 struct sos_thread *
246 sos_duplicate_user_thread(const char *name,
247                           struct sos_process *process,
248                           const struct sos_thread * model_thread,
249                           const struct sos_cpu_state * model_uctxt,
250                           sos_ui32_t retval);
251 
252 
253 /**
254  * Terminate the execution of the current thread. For kernel threads,
255  * it is called by default when the start routine returns.
256  */
257 void sos_thread_exit(void) __attribute__((noreturn));
258 
259 
260 /**
261  * Get the identifier of the thread currently running on CPU. Trivial
262  * function.
263  */
264 struct sos_thread *sos_thread_get_current(void);
265 
266 
267 /**
268  * If thr == NULL, set the priority of the current thread. Trivial
269  * function.
270  *
271  * @note NOT protected against interrupts
272  */
273 sos_sched_priority_t sos_thread_get_priority(struct sos_thread *thr);
274 
275 
276 /**
277  * If thr == NULL, get the state of the current thread. Trivial
278  * function.
279  *
280  * @note NOT protected against interrupts
281  */
282 sos_thread_state_t sos_thread_get_state(struct sos_thread *thr);
283 
284 
285 /**
286  * If thr == NULL, set the priority of the current thread
287  *
288  * @note NO context-switch ever occurs in this function !
289  */
290 sos_ret_t sos_thread_set_priority(struct sos_thread *thr,
291                                   sos_sched_priority_t priority);
292 
293 
294 /**
295  * Yield CPU to another ready thread.
296  *
297  * @note This is a BLOCKING FUNCTION
298  */
299 sos_ret_t sos_thread_yield(void);
300 
301 
302 /**
303  * Release the CPU for (at least) the given delay.
304  *
305  * @param delay The delay to wait for. If delay == NULL then wait
306  * forever that any event occurs.
307  *
308  * @return SOS_OK when delay expired (and delay is reset to zero),
309  * -SOS_EINTR otherwise (and delay contains the amount of time
310  * remaining).
311  *
312  * @note This is a BLOCKING FUNCTION
313  */
314 sos_ret_t sos_thread_sleep(/* in/out */struct sos_time *delay);
315 
316 
317 /**
318  * Mark the given thread as READY (if not already ready) even if it is
319  * blocked in a kwaitq or in a sleep ! As a result, the interrupted
320  * kwaitq/sleep function call of the thread will return with
321  * -SOS_EINTR.
322  *
323  * @return -SOS_EINVAL if thread does not exist, or -SOS_EFATAL if
324  * marked ZOMBIE.
325  *
326  * @note As a result, the semaphore/mutex/conditions/... functions
327  * return values SHOULD ALWAYS be checked ! If they are != SOS_OK,
328  * then the caller should consider that the resource is not aquired
329  * because somebody woke the thread by some way.
330  */
331 sos_ret_t sos_thread_force_unblock(struct sos_thread *thread);
332 
333 /**
334  * Dump the backtrace of the current thread to console and/or bochs
335  */
336 void sos_thread_dump_backtrace(sos_bool_t on_console,
337                                sos_bool_t on_bochs);
338 
339 
340 /* **********************************************
341  * Restricted functions
342  */
343 
344 
345 /**
346  * Restricted function to indicate that we are to access the given
347  * user address space from inside the kernel.
348  *
349  * @param dest_as The address space we want to access, or NULL to
350  * access current thread's address space
351  *
352  * @param fixup_retvaddr When != 0, then dest_as MUST BE NULL (we
353  * don't allow controlled access from kernel into user space from a
354  * foreign thread). In this case, the page fault handler should accept
355  * page faults from the kernel in user space, and resolve them in the
356  * usual way. The value in retvaddr is where the page fault handler
357  * has to return to in case the page fault remains unresolved. The
358  * address of the faulting address is kept in
359  * éthread->fixup_uaccess.faulted_uaddr
360  *
361  * @note typical values for fixup_retvaddr are obtained by "Labels as
362  * values" (see gcc's doc: operator "&&"). See uaccess.c for example
363  * code.
364  */
365 sos_ret_t
366 sos_thread_prepare_user_space_access(struct sos_umem_vmm_as * dest_as,
367                                      sos_vaddr_t fixup_retvaddr);
368 
369 
370 /**
371  * Restricted function to signal we are not accessing any user address
372  * space anymore
373  */
374 sos_ret_t
375 sos_thread_end_user_space_access(void);
376 
377 
378 /**
379  * Restricted callback called when a syscall goes back in user mode,
380  * to reconfigure the MMU to match that of the current thread's
381  * process MMU context.
382  *
383  * @note The use of this function is RESERVED to the syscall wrapper
384  */
385 void sos_thread_prepare_syscall_switch_back(struct sos_cpu_state *cpu_state);
386 
387 
388 /**
389  * Restricted callback called when an exception handler goes back to
390  * the interrupted thread to reconfigure the MMU to match that of the
391  * current thread's process MMU context.
392  *
393  * @note The use of this function is RESERVED to the exception wrappers
394  */
395 void sos_thread_prepare_exception_switch_back(struct sos_cpu_state *cpu_state);
396 
397 
398 /**
399  * Restricted callback called when an IRQ is entered while the CPU was
400  * NOT already servicing any other IRQ (ie the outermost IRQ handler
401  * is entered). This callback simply updates the "cpu_state" field so
402  * that IRQ handlers always know the state of the interrupted thread,
403  * even if they are imbricated in other IRQ handlers.
404  *
405  * @note The use of this function is RESERVED to the irq wrappers
406  */
407 void
408 sos_thread_prepare_irq_servicing(struct sos_cpu_state *interrupted_state);
409 
410 
411 /**
412  * Restricted callback called when the outermost IRQ handler returns,
413  * to select the thread to return to. This callbacks implements:
414  *   - preemption of user threads in user mode (time sharing / FIFO)
415  *   - non-preemption of user threads in kernel mode (interrupted thread
416  *     is restored on CPU "as is")
417  *   - non-preemption of kernel threads (same remark)
418  * The MMU is reconfigured correctly to match the address space of the
419  * selected thread.
420  *
421  * @return The CPU context of the thread to return to
422  *
423  * @note The use of this function is RESERVED to the irq wrappers
424  */
425 struct sos_cpu_state *
426 sos_thread_prepare_irq_switch_back(void);
427 
428 
429 #endif /* _SOS_THREAD_H_ */

source navigation ] diff markup ] identifier search ] general search ]