SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2004,2005 David Decotigny
002 
003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any later version.
007    
008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details.
012    
013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA. 
017 */
018 
019 #include <sos/physmem.h>
020 #include <sos/kmem_slab.h>
021 #include <sos/kmalloc.h>
022 #include <sos/klibc.h>
023 #include <sos/list.h>
024 #include <sos/assert.h>
025 
026 #include <hwcore/irq.h>
027 
028 #include "thread.h"
029 
030 
031 /**
032  * The size of the stack of a kernel thread
033  */
034 #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PAGE_SIZE)
035 
036 
037 /**
038  * The identifier of the thread currently running on CPU.
039  *
040  * We only support a SINGLE processor, ie a SINGLE thread
041  * running at any time in the system. This greatly simplifies the
042  * implementation of the system, since we don't have to complicate
043  * things in order to retrieve the identifier of the threads running
044  * on the CPU. On multiprocessor systems the current_thread below is
045  * an array indexed by the id of the CPU, so that the challenge is to
046  * retrieve the identifier of the CPU. This is usually done based on
047  * the stack address (Linux implementation) or on some form of TLS
048  * ("Thread Local Storage": can be implemented by way of LDTs for the
049  * processes, accessed through the fs or gs registers).
050  */
051 static volatile struct sos_thread *current_thread = NULL;
052 
053 
054 /*
055  * The list of threads currently in the system.
056  *
057  * @note We could have used current_thread for that...
058  */
059 static struct sos_thread *thread_list = NULL;
060 
061 
062 /**
063  * The Cache of thread structures
064  */
065 static struct sos_kslab_cache *cache_thread;
066 
067 
068 struct sos_thread *sos_thread_get_current()
069 {
070   SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
071   return (struct sos_thread*)current_thread;
072 }
073 
074 
075 inline static sos_ret_t _set_current(struct sos_thread *thr)
076 {
077   SOS_ASSERT_FATAL(thr->state == SOS_THR_READY);
078   current_thread = thr;
079   current_thread->state = SOS_THR_RUNNING;
080   return SOS_OK;
081 }
082 
083 
084 sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
085                                      sos_size_t init_thread_stack_size)
086 {
087   struct sos_thread *myself;
088 
089   /* Allocate the cache of threads */
090   cache_thread = sos_kmem_cache_create("thread",
091                                        sizeof(struct sos_thread),
092                                        2,
093                                        0,
094                                        SOS_KSLAB_CREATE_MAP
095                                        | SOS_KSLAB_CREATE_ZERO);
096   if (! cache_thread)
097     return -SOS_ENOMEM;
098 
099   /* Allocate a new thread structure for the current running thread */
100   myself = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
101                                                      SOS_KSLAB_ALLOC_ATOMIC);
102   if (! myself)
103     return -SOS_ENOMEM;
104 
105   /* Initialize the thread attributes */
106   strzcpy(myself->name, "[kinit]", SOS_THR_MAX_NAMELEN);
107   myself->state           = SOS_THR_CREATED;
108   myself->kernel_stack_base_addr = init_thread_stack_base_addr;
109   myself->kernel_stack_size      = init_thread_stack_size;
110 
111   /* Do some stack poisoning on the bottom of the stack, if needed */
112   sos_cpu_state_prepare_detect_kernel_stack_overflow(myself->cpu_state,
113                                                      myself->kernel_stack_base_addr,
114                                                      myself->kernel_stack_size);
115 
116   /* Add the thread in the global list */
117   list_singleton_named(thread_list, myself, gbl_prev, gbl_next);
118 
119   /* Ok, now pretend that the running thread is ourselves */
120   myself->state = SOS_THR_READY;
121   _set_current(myself);
122 
123   return SOS_OK;
124 }
125 
126 
127 struct sos_thread *
128 sos_create_kernel_thread(const char *name,
129                          sos_kernel_thread_start_routine_t start_func,
130                          void *start_arg)
131 {
132   __label__ undo_creation;
133   sos_ui32_t flags;
134   struct sos_thread *new_thread;
135 
136   if (! start_func)
137     return NULL;
138 
139   /* Allocate a new thread structure for the current running thread */
140   new_thread
141     = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
142                                                 SOS_KSLAB_ALLOC_ATOMIC);
143   if (! new_thread)
144     return NULL;
145 
146   /* Initialize the thread attributes */
147   strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
148   new_thread->state    = SOS_THR_CREATED;
149 
150   /* Allocate the stack for the new thread */
151   new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
152   new_thread->kernel_stack_size      = SOS_THREAD_KERNEL_STACK_SIZE;
153   if (! new_thread->kernel_stack_base_addr)
154     goto undo_creation;
155 
156   /* Initialize the CPU context of the new thread */
157   if (SOS_OK
158       != sos_cpu_kstate_init(& new_thread->cpu_state,
159                              (sos_cpu_kstate_function_arg1_t*) start_func,
160                              (sos_ui32_t) start_arg,
161                              new_thread->kernel_stack_base_addr,
162                              new_thread->kernel_stack_size,
163                              (sos_cpu_kstate_function_arg1_t*) sos_thread_exit,
164                              (sos_ui32_t) NULL))
165     goto undo_creation;
166 
167   /* Add the thread in the global list */
168   sos_disable_IRQs(flags);
169   list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
170   sos_restore_IRQs(flags);
171 
172   /* Mark the thread ready */
173   if (SOS_OK != sos_sched_set_ready(new_thread))
174     goto undo_creation;
175 
176   /* Normal non-erroneous end of function */
177   return new_thread;
178 
179  undo_creation:
180   if (new_thread->kernel_stack_base_addr)
181     sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
182   sos_kmem_cache_free((sos_vaddr_t) new_thread);
183   return NULL;
184 }
185 
186 
187 /** Function called after thr has terminated. Called from inside the context
188     of another thread, interrupts disabled */
189 static void delete_thread(struct sos_thread *thr)
190 {
191   sos_ui32_t flags;
192 
193   sos_disable_IRQs(flags);
194   list_delete_named(thread_list, thr, gbl_prev, gbl_next);
195   sos_restore_IRQs(flags);
196 
197   sos_kfree((sos_vaddr_t) thr->kernel_stack_base_addr);
198   memset(thr, 0x0, sizeof(struct sos_thread));
199   sos_kmem_cache_free((sos_vaddr_t) thr);
200 }
201 
202 
203 void sos_thread_exit()
204 {
205   sos_ui32_t flags;
206   struct sos_thread *myself, *next_thread;
207 
208   /* Interrupt handlers are NOT allowed to exit the current thread ! */
209   SOS_ASSERT_FATAL(! sos_servicing_irq());
210 
211   myself = sos_thread_get_current();
212 
213   /* Refuse to end the current executing thread if it still holds a
214      resource ! */
215   SOS_ASSERT_FATAL(list_is_empty_named(myself->kwaitq_list,
216                                        prev_entry_for_thread,
217                                        next_entry_for_thread));
218 
219   /* Prepare to run the next thread */
220   sos_disable_IRQs(flags);
221   myself->state = SOS_THR_ZOMBIE;
222   next_thread = sos_reschedule(myself, FALSE);
223 
224   /* Make sure that the next_thread is valid */
225   sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
226                                              next_thread->kernel_stack_base_addr,
227                                              next_thread->kernel_stack_size);
228 
229   /* No need for sos_restore_IRQs() here because the IRQ flag will be
230      restored to that of the next thread upon context switch */
231 
232   /* Immediate switch to next thread */
233   _set_current(next_thread);
234   sos_cpu_context_exit_to(next_thread->cpu_state,
235                           (sos_cpu_kstate_function_arg1_t*) delete_thread,
236                           (sos_ui32_t) myself);
237 }
238 
239 
240 sos_thread_state_t sos_thread_get_state(struct sos_thread *thr)
241 {
242   if (! thr)
243     thr = (struct sos_thread*)current_thread;
244 
245   return thr->state;
246 }
247 
248 
249 typedef enum { YIELD_MYSELF, BLOCK_MYSELF } switch_type_t;
250 /**
251  * Helper function to initiate a context switch in case the current
252  * thread becomes blocked, waiting for a timeout, or calls yield.
253  */
254 static sos_ret_t _switch_to_next_thread(switch_type_t operation)
255 {
256   struct sos_thread *myself, *next_thread;
257 
258   SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
259 
260   /* Interrupt handlers are NOT allowed to block ! */
261   SOS_ASSERT_FATAL(! sos_servicing_irq());
262 
263   myself = (struct sos_thread*)current_thread;
264 
265   /* Make sure that if we are to be marked "BLOCKED", we have any
266      reason of effectively being blocked */
267   if (BLOCK_MYSELF == operation)
268     {
269       myself->state = SOS_THR_BLOCKED;
270     }
271 
272   /* Identify the next thread */
273   next_thread = sos_reschedule(myself, YIELD_MYSELF == operation);
274 
275   /* Avoid context switch if the context does not change */
276   if (myself != next_thread)
277     {
278       /* Sanity checks for the next thread */
279       sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
280                                                  next_thread->kernel_stack_base_addr,
281                                                  next_thread->kernel_stack_size);
282 
283 
284       /*
285        * Actual CPU context switch
286        */
287       _set_current(next_thread);
288       sos_cpu_context_switch(& myself->cpu_state, next_thread->cpu_state);
289       
290       /* Back here ! */
291       SOS_ASSERT_FATAL(current_thread == myself);
292       SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
293     }
294   else
295     {
296       /* No context switch but still update ID of current thread */
297       _set_current(next_thread);
298     }
299 
300   return SOS_OK;
301 }
302 
303 
304 sos_ret_t sos_thread_yield()
305 {
306   sos_ui32_t flags;
307   sos_ret_t retval;
308 
309   sos_disable_IRQs(flags);
310 
311   retval = _switch_to_next_thread(YIELD_MYSELF);
312 
313   sos_restore_IRQs(flags);
314   return retval;
315 }
316 
317 
318 /**
319  * Internal sleep timeout management
320  */
321 struct sleep_timeout_params
322 {
323   struct sos_thread *thread_to_wakeup;
324   sos_bool_t timeout_triggered;
325 };
326 
327 
328 /**
329  * Callback called when a timeout happened
330  */
331 static void sleep_timeout(struct sos_timeout_action *act)
332 {
333   struct sleep_timeout_params *sleep_timeout_params
334     = (struct sleep_timeout_params*) act->routine_data;
335 
336   /* Signal that we have been woken up by the timeout */
337   sleep_timeout_params->timeout_triggered = TRUE;
338 
339   /* Mark the thread ready */
340   SOS_ASSERT_FATAL(SOS_OK ==
341                    sos_thread_force_unblock(sleep_timeout_params
342                                              ->thread_to_wakeup));
343 }
344 
345 
346 sos_ret_t sos_thread_sleep(struct sos_time *timeout)
347 {
348   sos_ui32_t flags;
349   struct sleep_timeout_params sleep_timeout_params;
350   struct sos_timeout_action timeout_action;
351   sos_ret_t retval;
352 
353   /* Block forever if no timeout is given */
354   if (NULL == timeout)
355     {
356       sos_disable_IRQs(flags);
357       retval = _switch_to_next_thread(BLOCK_MYSELF);
358       sos_restore_IRQs(flags);
359 
360       return retval;
361     }
362 
363   /* Initialize the timeout action */
364   sos_time_init_action(& timeout_action);
365 
366   /* Prepare parameters used by the sleep timeout callback */
367   sleep_timeout_params.thread_to_wakeup 
368     = (struct sos_thread*)current_thread;
369   sleep_timeout_params.timeout_triggered = FALSE;
370 
371   sos_disable_IRQs(flags);
372 
373   /* Now program the timeout ! */
374   SOS_ASSERT_FATAL(SOS_OK ==
375                    sos_time_register_action_relative(& timeout_action,
376                                                      timeout,
377                                                      sleep_timeout,
378                                                      & sleep_timeout_params));
379 
380   /* Prepare to block: wait for sleep_timeout() to wakeup us in the
381      timeout kwaitq, or for someone to wake us up in any other
382      waitq */
383   retval = _switch_to_next_thread(BLOCK_MYSELF);
384   /* Unblocked by something ! */
385 
386   /* Unblocked by timeout ? */
387   if (sleep_timeout_params.timeout_triggered)
388     {
389       /* Yes */
390       SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout));
391       retval = SOS_OK;
392     }
393   else
394     {
395       /* No: We have probably been woken up while in some other
396          kwaitq */
397       SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action));
398       retval = -SOS_EINTR;
399     }
400 
401   sos_restore_IRQs(flags);
402 
403   /* Update the remaining timeout */
404   memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time));
405 
406   return retval;
407 }
408 
409 
410 sos_ret_t sos_thread_force_unblock(struct sos_thread *thread)
411 {
412   sos_ret_t retval;
413   sos_ui32_t flags;
414 
415   if (! thread)
416     return -SOS_EINVAL;
417   
418   sos_disable_IRQs(flags);
419 
420   /* Thread already woken up ? */
421   retval = SOS_OK;
422   switch(sos_thread_get_state(thread))
423     {
424     case SOS_THR_RUNNING:
425     case SOS_THR_READY:
426       /* Do nothing */
427       break;
428 
429     case SOS_THR_ZOMBIE:
430       retval = -SOS_EFATAL;
431       break;
432 
433     default:
434       retval = sos_sched_set_ready(thread);
435       break;
436     }
437 
438   sos_restore_IRQs(flags);
439 
440   return retval;
441 }

source navigation ] diff markup ] identifier search ] general search ]