SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2004,2005 David Decotigny
002 
003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any later version.
007    
008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details.
012    
013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA. 
017 */
018 
019 #include <sos/physmem.h>
020 #include <sos/kmem_slab.h>
021 #include <sos/kmalloc.h>
022 #include <sos/klibc.h>
023 #include <sos/list.h>
024 #include <sos/assert.h>
025 #include <hwcore/mm_context.h>
026 #include <sos/process.h>
027 
028 #include <drivers/bochs.h>
029 #include <drivers/x86_videomem.h>
030 
031 #include <hwcore/irq.h>
032 
033 #include "thread.h"
034 
035 
036 /**
037  * The size of the stack of a kernel thread
038  */
039 #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PAGE_SIZE)
040 
041 
042 /**
043  * The identifier of the thread currently running on CPU.
044  *
045  * We only support a SINGLE processor, ie a SINGLE thread
046  * running at any time in the system. This greatly simplifies the
047  * implementation of the system, since we don't have to complicate
048  * things in order to retrieve the identifier of the threads running
049  * on the CPU. On multiprocessor systems the current_thread below is
050  * an array indexed by the id of the CPU, so that the challenge is to
051  * retrieve the identifier of the CPU. This is usually done based on
052  * the stack address (Linux implementation) or on some form of TLS
053  * ("Thread Local Storage": can be implemented by way of LDTs for the
054  * processes, accessed through the fs or gs registers).
055  */
056 static volatile struct sos_thread *current_thread = NULL;
057 
058 
059 /*
060  * The list of threads currently in the system.
061  *
062  * @note We could have used current_thread for that...
063  */
064 static struct sos_thread *thread_list = NULL;
065 
066 
067 /**
068  * The Cache of thread structures
069  */
070 static struct sos_kslab_cache *cache_thread;
071 
072 
073 struct sos_thread *sos_thread_get_current()
074 {
075   SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
076   return (struct sos_thread*)current_thread;
077 }
078 
079 
080 inline static sos_ret_t _set_current(struct sos_thread *thr)
081 {
082   SOS_ASSERT_FATAL(thr->state == SOS_THR_READY);
083   current_thread = thr;
084   current_thread->state = SOS_THR_RUNNING;
085   return SOS_OK;
086 }
087 
088 
089 sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
090                                      sos_size_t init_thread_stack_size)
091 {
092   struct sos_thread *myself;
093 
094   /* Allocate the cache of threads */
095   cache_thread = sos_kmem_cache_create("thread",
096                                        sizeof(struct sos_thread),
097                                        2,
098                                        0,
099                                        SOS_KSLAB_CREATE_MAP
100                                        | SOS_KSLAB_CREATE_ZERO);
101   if (! cache_thread)
102     return -SOS_ENOMEM;
103 
104   /* Allocate a new thread structure for the current running thread */
105   myself = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
106                                                      SOS_KSLAB_ALLOC_ATOMIC);
107   if (! myself)
108     return -SOS_ENOMEM;
109 
110   /* Initialize the thread attributes */
111   strzcpy(myself->name, "[kinit]", SOS_THR_MAX_NAMELEN);
112   myself->state           = SOS_THR_CREATED;
113   myself->priority        = SOS_SCHED_PRIO_LOWEST;
114   myself->kernel_stack_base_addr = init_thread_stack_base_addr;
115   myself->kernel_stack_size      = init_thread_stack_size;
116 
117   /* Do some stack poisoning on the bottom of the stack, if needed */
118   sos_cpu_state_prepare_detect_kernel_stack_overflow(myself->cpu_state,
119                                                      myself->kernel_stack_base_addr,
120                                                      myself->kernel_stack_size);
121 
122   /* Add the thread in the global list */
123   list_singleton_named(thread_list, myself, gbl_prev, gbl_next);
124 
125   /* Ok, now pretend that the running thread is ourselves */
126   myself->state = SOS_THR_READY;
127   _set_current(myself);
128 
129   return SOS_OK;
130 }
131 
132 
133 struct sos_thread *
134 sos_create_kernel_thread(const char *name,
135                          sos_kernel_thread_start_routine_t start_func,
136                          void *start_arg,
137                          sos_sched_priority_t priority)
138 {
139   __label__ undo_creation;
140   sos_ui32_t flags;
141   struct sos_thread *new_thread;
142 
143   if (! start_func)
144     return NULL;
145   if (! SOS_SCHED_PRIO_IS_VALID(priority))
146     return NULL;
147 
148   /* Allocate a new thread structure for the current running thread */
149   new_thread
150     = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
151                                                 SOS_KSLAB_ALLOC_ATOMIC);
152   if (! new_thread)
153     return NULL;
154 
155   /* Initialize the thread attributes */
156   strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
157   new_thread->state    = SOS_THR_CREATED;
158   new_thread->priority = priority;
159 
160   /* Allocate the stack for the new thread */
161   new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
162   new_thread->kernel_stack_size      = SOS_THREAD_KERNEL_STACK_SIZE;
163   if (! new_thread->kernel_stack_base_addr)
164     goto undo_creation;
165 
166   /* Initialize the CPU context of the new thread */
167   if (SOS_OK
168       != sos_cpu_kstate_init(& new_thread->cpu_state,
169                              (sos_cpu_kstate_function_arg1_t*) start_func,
170                              (sos_ui32_t) start_arg,
171                              new_thread->kernel_stack_base_addr,
172                              new_thread->kernel_stack_size,
173                              (sos_cpu_kstate_function_arg1_t*) sos_thread_exit,
174                              (sos_ui32_t) NULL))
175     goto undo_creation;
176 
177   /* Add the thread in the global list */
178   sos_disable_IRQs(flags);
179   list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
180   sos_restore_IRQs(flags);
181 
182   /* Mark the thread ready */
183   if (SOS_OK != sos_sched_set_ready(new_thread))
184     goto undo_creation;
185 
186   /* Normal non-erroneous end of function */
187   return new_thread;
188 
189  undo_creation:
190   if (new_thread->kernel_stack_base_addr)
191     sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
192   sos_kmem_cache_free((sos_vaddr_t) new_thread);
193   return NULL;
194 }
195 
196 
197 struct sos_thread *
198 sos_create_user_thread(const char *name,
199                        struct sos_process *process,
200                        sos_uaddr_t user_initial_PC,
201                        sos_ui32_t  user_start_arg1,
202                        sos_ui32_t  user_start_arg2,
203                        sos_uaddr_t user_initial_SP,
204                        sos_sched_priority_t priority)
205 {
206   __label__ undo_creation;
207   sos_ui32_t flags;
208   struct sos_thread *new_thread;
209 
210   if (! SOS_SCHED_PRIO_IS_VALID(priority))
211     return NULL;
212 
213   /* For a user thread, the process must be given */
214   if (! process)
215     return NULL;
216 
217   /* Allocate a new thread structure for the current running thread */
218   new_thread
219     = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
220                                                 SOS_KSLAB_ALLOC_ATOMIC);
221   if (! new_thread)
222     return NULL;
223 
224   /* Initialize the thread attributes */
225   strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
226   new_thread->state    = SOS_THR_CREATED;
227   new_thread->priority = priority;
228 
229   /* Allocate the stack for the new thread */
230   new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
231   new_thread->kernel_stack_size      = SOS_THREAD_KERNEL_STACK_SIZE;
232   if (! new_thread->kernel_stack_base_addr)
233     goto undo_creation;
234 
235   if (SOS_OK
236       != sos_cpu_ustate_init(& new_thread->cpu_state,
237                              user_initial_PC,
238                              user_start_arg1,
239                              user_start_arg2,
240                              user_initial_SP,
241                              new_thread->kernel_stack_base_addr,
242                              new_thread->kernel_stack_size))
243     goto undo_creation;
244 
245   /* Attach the new thread to the process */
246   if (SOS_OK != sos_process_register_thread(process, new_thread))
247     goto undo_creation;
248 
249   /* Add the thread in the global list */
250   sos_disable_IRQs(flags);
251   list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
252   sos_restore_IRQs(flags);
253 
254   /* Mark the thread ready */
255   if (SOS_OK != sos_sched_set_ready(new_thread))
256     goto undo_creation;
257 
258   /* Normal non-erroneous end of function */
259   return new_thread;
260 
261  undo_creation:
262   if (new_thread->kernel_stack_base_addr)
263     sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
264   sos_kmem_cache_free((sos_vaddr_t) new_thread);
265   return NULL;
266 }
267 
268 
269 /**
270  * Helper function to switch to the correct MMU configuration to suit
271  * the_thread's needs.
272  *   - When switching to a user-mode thread, force the reconfiguration
273  *     of the MMU
274  *   - When switching to a kernel-mode thread, only change the MMU
275  *     configuration if the thread was squatting someone else's space
276  */
277 static void _prepare_mm_context(struct sos_thread *the_thread)
278 {
279   /* Going to restore a thread in user mode ? */
280   if (sos_cpu_context_is_in_user_mode(the_thread->cpu_state)
281       == TRUE)
282     {
283       /* Yes: force the MMU to be correctly setup with the correct
284          user's address space */
285 
286       /* The thread should be a user thread */
287       SOS_ASSERT_FATAL(the_thread->process != NULL);
288 
289       /* It should not squat any other's address space */
290       SOS_ASSERT_FATAL(the_thread->squatted_mm_context == NULL);
291 
292       /* Perform an MMU context switch if needed */
293       sos_mm_context_switch_to(sos_process_get_mm_context(the_thread->process));
294     }
295 
296   /* the_thread is a kernel thread squatting a precise address
297      space ? */
298   else if (the_thread->squatted_mm_context != NULL)
299     sos_mm_context_switch_to(the_thread->squatted_mm_context);
300 }
301 
302 
303 /** Function called after thr has terminated. Called from inside the context
304     of another thread, interrupts disabled */
305 static void delete_thread(struct sos_thread *thr)
306 {
307   sos_ui32_t flags;
308 
309   sos_disable_IRQs(flags);
310   list_delete_named(thread_list, thr, gbl_prev, gbl_next);
311   sos_restore_IRQs(flags);
312 
313   sos_kfree((sos_vaddr_t) thr->kernel_stack_base_addr);
314 
315   /* If the thread squats an address space, release it */
316   if (thr->squatted_mm_context)
317     SOS_ASSERT_FATAL(SOS_OK == sos_thread_change_current_mm_context(NULL));
318 
319   /* For a user thread: remove the thread from the process threads' list */
320   if (thr->process)
321     SOS_ASSERT_FATAL(SOS_OK == sos_process_unregister_thread(thr));
322 
323   memset(thr, 0x0, sizeof(struct sos_thread));
324   sos_kmem_cache_free((sos_vaddr_t) thr);
325 }
326 
327 
328 void sos_thread_exit()
329 {
330   sos_ui32_t flags;
331   struct sos_thread *myself, *next_thread;
332 
333   /* Interrupt handlers are NOT allowed to exit the current thread ! */
334   SOS_ASSERT_FATAL(! sos_servicing_irq());
335 
336   myself = sos_thread_get_current();
337 
338   /* Refuse to end the current executing thread if it still holds a
339      resource ! */
340   SOS_ASSERT_FATAL(list_is_empty_named(myself->kwaitq_list,
341                                        prev_entry_for_thread,
342                                        next_entry_for_thread));
343 
344   /* Prepare to run the next thread */
345   sos_disable_IRQs(flags);
346   myself->state = SOS_THR_ZOMBIE;
347   next_thread = sos_reschedule(myself, FALSE);
348 
349   /* Make sure that the next_thread is valid */
350   sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
351                                              next_thread->kernel_stack_base_addr,
352                                              next_thread->kernel_stack_size);
353 
354   /*
355    * Perform an MMU context switch if needed
356    */
357   _prepare_mm_context(next_thread);
358 
359   /* No need for sos_restore_IRQs() here because the IRQ flag will be
360      restored to that of the next thread upon context switch */
361 
362   /* Immediate switch to next thread */
363   _set_current(next_thread);
364   sos_cpu_context_exit_to(next_thread->cpu_state,
365                           (sos_cpu_kstate_function_arg1_t*) delete_thread,
366                           (sos_ui32_t) myself);
367 }
368 
369 
370 sos_sched_priority_t sos_thread_get_priority(struct sos_thread *thr)
371 {
372   if (! thr)
373     thr = (struct sos_thread*)current_thread;
374 
375   return thr->priority;
376 }
377 
378 
379 sos_thread_state_t sos_thread_get_state(struct sos_thread *thr)
380 {
381   if (! thr)
382     thr = (struct sos_thread*)current_thread;
383 
384   return thr->state;
385 }
386 
387 
388 typedef enum { YIELD_MYSELF, BLOCK_MYSELF } switch_type_t;
389 /**
390  * Helper function to initiate a context switch in case the current
391  * thread becomes blocked, waiting for a timeout, or calls yield.
392  */
393 static sos_ret_t _switch_to_next_thread(switch_type_t operation)
394 {
395   struct sos_thread *myself, *next_thread;
396 
397   SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
398 
399   /* Interrupt handlers are NOT allowed to block ! */
400   SOS_ASSERT_FATAL(! sos_servicing_irq());
401 
402   myself = (struct sos_thread*)current_thread;
403 
404   /* Make sure that if we are to be marked "BLOCKED", we have any
405      reason of effectively being blocked */
406   if (BLOCK_MYSELF == operation)
407     {
408       myself->state = SOS_THR_BLOCKED;
409     }
410 
411   /* Identify the next thread */
412   next_thread = sos_reschedule(myself, YIELD_MYSELF == operation);
413 
414   /* Avoid context switch if the context does not change */
415   if (myself != next_thread)
416     {
417       /* Sanity checks for the next thread */
418       sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
419                                                  next_thread->kernel_stack_base_addr,
420                                                  next_thread->kernel_stack_size);
421 
422       /*
423        * Perform an MMU context switch if needed
424        */
425       _prepare_mm_context(next_thread);
426 
427       /*
428        * Actual CPU context switch
429        */
430       _set_current(next_thread);
431       sos_cpu_context_switch(& myself->cpu_state, next_thread->cpu_state);
432       
433       /* Back here ! */
434       SOS_ASSERT_FATAL(current_thread == myself);
435       SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
436     }
437   else
438     {
439       /* No context switch but still update ID of current thread */
440       _set_current(next_thread);
441     }
442 
443   return SOS_OK;
444 }
445 
446 
447 /**
448  * Helper function to change the thread's priority in all the
449  * waitqueues associated with the thread.
450  */
451 static sos_ret_t _change_waitq_priorities(struct sos_thread *thr,
452                                           sos_sched_priority_t priority)
453 {
454   struct sos_kwaitq_entry *kwq_entry;
455   int nb_waitqs;
456 
457   list_foreach_forward_named(thr->kwaitq_list, kwq_entry, nb_waitqs,
458                              prev_entry_for_thread, next_entry_for_thread)
459     {
460       SOS_ASSERT_FATAL(SOS_OK == sos_kwaitq_change_priority(kwq_entry->kwaitq,
461                                                             kwq_entry,
462                                                             priority));
463     }
464 
465   return SOS_OK;
466 }
467 
468 
469 sos_ret_t sos_thread_set_priority(struct sos_thread *thr,
470                                    sos_sched_priority_t priority)
471 {
472   __label__ exit_set_prio;
473   sos_ui32_t flags;
474   sos_ret_t retval;
475 
476 
477   if (! SOS_SCHED_PRIO_IS_VALID(priority))
478     return -SOS_EINVAL;
479 
480   if (! thr)
481     thr = (struct sos_thread*)current_thread;
482 
483   sos_disable_IRQs(flags);
484 
485   /* Signal kwaitq subsystem that the priority of the thread in all
486      the waitq it is waiting in should be updated */
487   retval = _change_waitq_priorities(thr, priority);
488   if (SOS_OK != retval)
489     goto exit_set_prio;
490 
491   /* Signal scheduler that the thread, currently in a waiting list,
492      should take into account the change of priority */
493   if (SOS_THR_READY == thr->state)
494     retval = sos_sched_change_priority(thr, priority);
495 
496   /* Update priority */
497   thr->priority = priority;
498 
499  exit_set_prio:
500   sos_restore_IRQs(flags);
501   return retval;
502 }
503 
504 
505 sos_ret_t sos_thread_yield()
506 {
507   sos_ui32_t flags;
508   sos_ret_t retval;
509 
510   sos_disable_IRQs(flags);
511 
512   retval = _switch_to_next_thread(YIELD_MYSELF);
513 
514   sos_restore_IRQs(flags);
515   return retval;
516 }
517 
518 
519 /**
520  * Internal sleep timeout management
521  */
522 struct sleep_timeout_params
523 {
524   struct sos_thread *thread_to_wakeup;
525   sos_bool_t timeout_triggered;
526 };
527 
528 
529 /**
530  * Callback called when a timeout happened
531  */
532 static void sleep_timeout(struct sos_timeout_action *act)
533 {
534   struct sleep_timeout_params *sleep_timeout_params
535     = (struct sleep_timeout_params*) act->routine_data;
536 
537   /* Signal that we have been woken up by the timeout */
538   sleep_timeout_params->timeout_triggered = TRUE;
539 
540   /* Mark the thread ready */
541   SOS_ASSERT_FATAL(SOS_OK ==
542                    sos_thread_force_unblock(sleep_timeout_params
543                                              ->thread_to_wakeup));
544 }
545 
546 
547 sos_ret_t sos_thread_sleep(struct sos_time *timeout)
548 {
549   sos_ui32_t flags;
550   struct sleep_timeout_params sleep_timeout_params;
551   struct sos_timeout_action timeout_action;
552   sos_ret_t retval;
553 
554   /* Block forever if no timeout is given */
555   if (NULL == timeout)
556     {
557       sos_disable_IRQs(flags);
558       retval = _switch_to_next_thread(BLOCK_MYSELF);
559       sos_restore_IRQs(flags);
560 
561       return retval;
562     }
563 
564   /* Initialize the timeout action */
565   sos_time_init_action(& timeout_action);
566 
567   /* Prepare parameters used by the sleep timeout callback */
568   sleep_timeout_params.thread_to_wakeup 
569     = (struct sos_thread*)current_thread;
570   sleep_timeout_params.timeout_triggered = FALSE;
571 
572   sos_disable_IRQs(flags);
573 
574   /* Now program the timeout ! */
575   SOS_ASSERT_FATAL(SOS_OK ==
576                    sos_time_register_action_relative(& timeout_action,
577                                                      timeout,
578                                                      sleep_timeout,
579                                                      & sleep_timeout_params));
580 
581   /* Prepare to block: wait for sleep_timeout() to wakeup us in the
582      timeout kwaitq, or for someone to wake us up in any other
583      waitq */
584   retval = _switch_to_next_thread(BLOCK_MYSELF);
585   /* Unblocked by something ! */
586 
587   /* Unblocked by timeout ? */
588   if (sleep_timeout_params.timeout_triggered)
589     {
590       /* Yes */
591       SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout));
592       retval = SOS_OK;
593     }
594   else
595     {
596       /* No: We have probably been woken up while in some other
597          kwaitq */
598       SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action));
599       retval = -SOS_EINTR;
600     }
601 
602   sos_restore_IRQs(flags);
603 
604   /* Update the remaining timeout */
605   memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time));
606 
607   return retval;
608 }
609 
610 
611 sos_ret_t sos_thread_force_unblock(struct sos_thread *thread)
612 {
613   sos_ret_t retval;
614   sos_ui32_t flags;
615 
616   if (! thread)
617     return -SOS_EINVAL;
618   
619   sos_disable_IRQs(flags);
620 
621   /* Thread already woken up ? */
622   retval = SOS_OK;
623   switch(sos_thread_get_state(thread))
624     {
625     case SOS_THR_RUNNING:
626     case SOS_THR_READY:
627       /* Do nothing */
628       break;
629 
630     case SOS_THR_ZOMBIE:
631       retval = -SOS_EFATAL;
632       break;
633 
634     default:
635       retval = sos_sched_set_ready(thread);
636       break;
637     }
638 
639   sos_restore_IRQs(flags);
640 
641   return retval;
642 }
643 
644 
645 void sos_thread_dump_backtrace(sos_bool_t on_console,
646                                sos_bool_t on_bochs)
647 {
648   sos_vaddr_t stack_bottom = current_thread->kernel_stack_base_addr;
649   sos_size_t stack_size    = current_thread->kernel_stack_size;
650 
651   static void backtracer(sos_vaddr_t PC,
652                          sos_vaddr_t params,
653                          sos_ui32_t depth,
654                          void *custom_arg)
655     {
656       sos_ui32_t invalid = 0xffffffff, *arg1, *arg2, *arg3, *arg4;
657 
658       /* Get the address of the first 3 arguments from the
659          frame. Among these arguments, 0, 1, 2, 3 arguments might be
660          meaningful (depending on how many arguments the function may
661          take). */
662       arg1 = (sos_ui32_t*)params;
663       arg2 = (sos_ui32_t*)(params+4);
664       arg3 = (sos_ui32_t*)(params+8);
665       arg4 = (sos_ui32_t*)(params+12);
666 
667       /* Make sure the addresses of these arguments fit inside the
668          stack boundaries */
669 #define INTERVAL_OK(b,v,u) ( ((b) <= (sos_vaddr_t)(v)) \
670                              && ((sos_vaddr_t)(v) < (u)) )
671       if (!INTERVAL_OK(stack_bottom, arg1, stack_bottom + stack_size))
672         arg1 = &invalid;
673       if (!INTERVAL_OK(stack_bottom, arg2, stack_bottom + stack_size))
674         arg2 = &invalid;
675       if (!INTERVAL_OK(stack_bottom, arg3, stack_bottom + stack_size))
676         arg3 = &invalid;
677       if (!INTERVAL_OK(stack_bottom, arg4, stack_bottom + stack_size))
678         arg4 = &invalid;
679 
680       /* Print the function context for this frame */
681       if (on_bochs)
682         sos_bochs_printf("[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x\n",
683                          (unsigned)depth, (unsigned)PC,
684                          (unsigned)*arg1, (unsigned)*arg2,
685                          (unsigned)*arg3);
686 
687       if (on_console)
688         sos_x86_videomem_printf(23-depth, 3,
689                                 SOS_X86_VIDEO_BG_BLUE
690                                   | SOS_X86_VIDEO_FG_LTGREEN,
691                                 "[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x arg4=0x%x",
692                                 (unsigned)depth, PC,
693                                 (unsigned)*arg1, (unsigned)*arg2,
694                                 (unsigned)*arg3, (unsigned)*arg4);
695       
696     }
697 
698   sos_backtrace(NULL, 15, stack_bottom, stack_size,
699                 backtracer, NULL);
700 }
701 
702 
703 
704 /* **********************************************
705  * Restricted functions
706  */
707 
708 
709 sos_ret_t
710 sos_thread_change_current_mm_context(struct sos_mm_context *mm_ctxt)
711 {
712   sos_ui32_t flags;
713 
714   /* Retrieve the previous mm context */
715   struct sos_mm_context * prev_mm_ctxt
716     = current_thread->squatted_mm_context;
717 
718   /* We should either select a new squatted_mm_context or revert to
719      the default */
720   if (mm_ctxt != NULL)
721     SOS_ASSERT_FATAL(prev_mm_ctxt == NULL);
722   else
723     SOS_ASSERT_FATAL(prev_mm_ctxt != NULL);
724 
725   sos_disable_IRQs(flags);
726 
727   /* Update current thread's squatted mm context */
728   current_thread->squatted_mm_context = mm_ctxt;
729 
730   /* Update the reference counts and switch the MMU configuration if
731      needed */
732   if (mm_ctxt != NULL)
733     {
734       sos_mm_context_ref(mm_ctxt); /* Because it is now referenced as
735                                       the squatted_mm_context field of
736                                       the thread */
737       sos_mm_context_switch_to(mm_ctxt);
738     }
739   else
740     sos_mm_context_unref(prev_mm_ctxt); /* Because it is not referenced as
741                                            the squatted_mm_context field of
742                                            the thread any more */
743 
744   sos_restore_IRQs(flags);
745 
746   return SOS_OK;
747 }
748 
749 
750 void sos_thread_prepare_syscall_switch_back(struct sos_cpu_state *cpu_state)
751 {
752   /* Don't preempt the current thread */
753 
754   /*
755    * Save the state of the interrupted context to make sure that:
756    *   - The list of threads correctly reflects that the thread is back
757    *     in user mode
758    *   - _prepare_mm_context() deals with the correct mm_context
759    */
760   current_thread->cpu_state = cpu_state;
761 
762   /* Perform an MMU context switch if needed */
763   _prepare_mm_context((struct sos_thread*) current_thread);
764 }
765 
766 
767 void sos_thread_prepare_exception_switch_back(struct sos_cpu_state *cpu_state)
768 {
769   /* Don't preempt the current thread */
770 
771   /*
772    * Save the state of the interrupted context to make sure that:
773    *   - The list of threads correctly reflects that the thread is
774    *     running in user or kernel mode
775    *   - _prepare_mm_context() deals with the correct mm_context
776    */
777   current_thread->cpu_state = cpu_state;
778 
779   /* Perform an MMU context switch if needed */
780   _prepare_mm_context((struct sos_thread*) current_thread);
781 }
782 
783 
784 void
785 sos_thread_prepare_irq_servicing(struct sos_cpu_state *interrupted_state)
786 {
787   current_thread->cpu_state = interrupted_state;
788 }
789 
790 
791 struct sos_cpu_state *
792 sos_thread_prepare_irq_switch_back(void)
793 {
794   struct sos_thread *myself, *next_thread;
795 
796   /* In SOS, threads in kernel mode are NEVER preempted from the
797      interrupt handlers ! */
798   if (! sos_cpu_context_is_in_user_mode(current_thread->cpu_state))
799     return current_thread->cpu_state;
800 
801   /*
802    * Here we are dealing only with possible preemption of user threads
803    * in user context !
804    */
805 
806   /* Make sure the thread actually is a user thread */
807   SOS_ASSERT_FATAL(current_thread->process != NULL);
808 
809   /* Save the state of the interrupted context */
810   myself = (struct sos_thread*)current_thread;
811 
812   /* Select the next thread to run */
813   next_thread = sos_reschedule(myself, FALSE);
814 
815   /* Perform an MMU context switch if needed */
816   _prepare_mm_context(next_thread);
817 
818   /* Setup the next_thread's context into the CPU */
819   _set_current(next_thread);
820   return next_thread->cpu_state;
821 }

source navigation ] diff markup ] identifier search ] general search ]