SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2004,2005 David Decotigny
002 
003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any later version.
007    
008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details.
012    
013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA. 
017 */
018 
019 #include <sos/physmem.h>
020 #include <sos/kmem_slab.h>
021 #include <sos/kmalloc.h>
022 #include <sos/klibc.h>
023 #include <sos/list.h>
024 #include <sos/assert.h>
025 #include <hwcore/mm_context.h>
026 #include <sos/process.h>
027 
028 #include <drivers/bochs.h>
029 #include <drivers/x86_videomem.h>
030 
031 #include <hwcore/irq.h>
032 
033 #include "thread.h"
034 
035 
036 /**
037  * The size of the stack of a kernel thread
038  */
039 #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PAGE_SIZE)
040 
041 
042 /**
043  * The identifier of the thread currently running on CPU.
044  *
045  * We only support a SINGLE processor, ie a SINGLE thread
046  * running at any time in the system. This greatly simplifies the
047  * implementation of the system, since we don't have to complicate
048  * things in order to retrieve the identifier of the threads running
049  * on the CPU. On multiprocessor systems the current_thread below is
050  * an array indexed by the id of the CPU, so that the challenge is to
051  * retrieve the identifier of the CPU. This is usually done based on
052  * the stack address (Linux implementation) or on some form of TLS
053  * ("Thread Local Storage": can be implemented by way of LDTs for the
054  * processes, accessed through the fs or gs registers).
055  */
056 static volatile struct sos_thread *current_thread = NULL;
057 
058 
059 /*
060  * The list of threads currently in the system.
061  *
062  * @note We could have used current_thread for that...
063  */
064 static struct sos_thread *thread_list = NULL;
065 
066 
067 /**
068  * The Cache of thread structures
069  */
070 static struct sos_kslab_cache *cache_thread;
071 
072 
073 /**
074  * (Forwad declaration) Helper function to change the MMU config of
075  * the current executing thread. Analogous to function
076  * sos_thread_change_current_mm_context() of article 7
077  */
078 static sos_ret_t change_current_mm_context(struct sos_mm_context *mm_ctxt);
079 
080 
081 struct sos_thread *sos_thread_get_current()
082 {
083   SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
084   return (struct sos_thread*)current_thread;
085 }
086 
087 
088 inline static sos_ret_t _set_current(struct sos_thread *thr)
089 {
090   SOS_ASSERT_FATAL(thr->state == SOS_THR_READY);
091   current_thread = thr;
092   current_thread->state = SOS_THR_RUNNING;
093   return SOS_OK;
094 }
095 
096 
097 sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
098                                      sos_size_t init_thread_stack_size)
099 {
100   struct sos_thread *myself;
101 
102   /* Allocate the cache of threads */
103   cache_thread = sos_kmem_cache_create("thread",
104                                        sizeof(struct sos_thread),
105                                        2,
106                                        0,
107                                        SOS_KSLAB_CREATE_MAP
108                                        | SOS_KSLAB_CREATE_ZERO);
109   if (! cache_thread)
110     return -SOS_ENOMEM;
111 
112   /* Allocate a new thread structure for the current running thread */
113   myself = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
114                                                      SOS_KSLAB_ALLOC_ATOMIC);
115   if (! myself)
116     return -SOS_ENOMEM;
117 
118   /* Initialize the thread attributes */
119   strzcpy(myself->name, "[kinit]", SOS_THR_MAX_NAMELEN);
120   myself->state           = SOS_THR_CREATED;
121   myself->priority        = SOS_SCHED_PRIO_LOWEST;
122   myself->kernel_stack_base_addr = init_thread_stack_base_addr;
123   myself->kernel_stack_size      = init_thread_stack_size;
124 
125   /* Do some stack poisoning on the bottom of the stack, if needed */
126   sos_cpu_state_prepare_detect_kernel_stack_overflow(myself->cpu_state,
127                                                      myself->kernel_stack_base_addr,
128                                                      myself->kernel_stack_size);
129 
130   /* Add the thread in the global list */
131   list_singleton_named(thread_list, myself, gbl_prev, gbl_next);
132 
133   /* Ok, now pretend that the running thread is ourselves */
134   myself->state = SOS_THR_READY;
135   _set_current(myself);
136 
137   return SOS_OK;
138 }
139 
140 
141 struct sos_thread *
142 sos_create_kernel_thread(const char *name,
143                          sos_kernel_thread_start_routine_t start_func,
144                          void *start_arg,
145                          sos_sched_priority_t priority)
146 {
147   __label__ undo_creation;
148   sos_ui32_t flags;
149   struct sos_thread *new_thread;
150 
151   if (! start_func)
152     return NULL;
153   if (! SOS_SCHED_PRIO_IS_VALID(priority))
154     return NULL;
155 
156   /* Allocate a new thread structure for the current running thread */
157   new_thread
158     = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
159                                                 SOS_KSLAB_ALLOC_ATOMIC);
160   if (! new_thread)
161     return NULL;
162 
163   /* Initialize the thread attributes */
164   strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
165   new_thread->state    = SOS_THR_CREATED;
166   new_thread->priority = priority;
167 
168   /* Allocate the stack for the new thread */
169   new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
170   new_thread->kernel_stack_size      = SOS_THREAD_KERNEL_STACK_SIZE;
171   if (! new_thread->kernel_stack_base_addr)
172     goto undo_creation;
173 
174   /* Initialize the CPU context of the new thread */
175   if (SOS_OK
176       != sos_cpu_kstate_init(& new_thread->cpu_state,
177                              (sos_cpu_kstate_function_arg1_t*) start_func,
178                              (sos_ui32_t) start_arg,
179                              new_thread->kernel_stack_base_addr,
180                              new_thread->kernel_stack_size,
181                              (sos_cpu_kstate_function_arg1_t*) sos_thread_exit,
182                              (sos_ui32_t) NULL))
183     goto undo_creation;
184 
185   /* Add the thread in the global list */
186   sos_disable_IRQs(flags);
187   list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
188   sos_restore_IRQs(flags);
189 
190   /* Mark the thread ready */
191   if (SOS_OK != sos_sched_set_ready(new_thread))
192     goto undo_creation;
193 
194   /* Normal non-erroneous end of function */
195   return new_thread;
196 
197  undo_creation:
198   if (new_thread->kernel_stack_base_addr)
199     sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
200   sos_kmem_cache_free((sos_vaddr_t) new_thread);
201   return NULL;
202 }
203 
204 
205 /**
206  * Helper function to create a new user thread. If model_thread is
207  * given, then the new thread will be the copy of this
208  * thread. Otherwise the thread will have its initial SP/PC correctly
209  * initialized with the user_initial_PC/SP arguments
210  */
211 static struct sos_thread *
212 create_user_thread(const char *name,
213                    struct sos_process *process,
214                    const struct sos_thread * model_thread,
215                    const struct sos_cpu_state * model_uctxt,
216                    sos_uaddr_t user_initial_PC,
217                    sos_ui32_t  user_start_arg1,
218                    sos_ui32_t  user_start_arg2,
219                    sos_uaddr_t user_initial_SP,
220                    sos_sched_priority_t priority)
221 {
222   __label__ undo_creation;
223   sos_ui32_t flags;
224   struct sos_thread *new_thread;
225 
226   if (model_thread)
227     {
228       SOS_ASSERT_FATAL(model_uctxt);
229     }
230   else
231     {
232       if (! SOS_SCHED_PRIO_IS_VALID(priority))
233         return NULL;
234     }
235 
236   /* For a user thread, the process must be given */
237   if (! process)
238     return NULL;
239 
240   /* Allocate a new thread structure for the current running thread */
241   new_thread
242     = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
243                                                 SOS_KSLAB_ALLOC_ATOMIC);
244   if (! new_thread)
245     return NULL;
246 
247   /* Initialize the thread attributes */
248   strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
249   new_thread->state    = SOS_THR_CREATED;
250   if (model_thread)
251     new_thread->priority = model_thread->priority;
252   else
253     new_thread->priority = priority;
254 
255   /* Allocate the stack for the new thread */
256   new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
257   new_thread->kernel_stack_size      = SOS_THREAD_KERNEL_STACK_SIZE;
258   if (! new_thread->kernel_stack_base_addr)
259     goto undo_creation;
260 
261   /* Initialize the CPU context of the new thread */
262   if (model_thread)
263     {
264       if (SOS_OK
265           != sos_cpu_ustate_duplicate(& new_thread->cpu_state,
266                                       model_uctxt,
267                                       user_start_arg1,
268                                       new_thread->kernel_stack_base_addr,
269                                       new_thread->kernel_stack_size))
270         goto undo_creation;
271     }
272   else
273     {
274       if (SOS_OK
275           != sos_cpu_ustate_init(& new_thread->cpu_state,
276                                  user_initial_PC,
277                                  user_start_arg1,
278                                  user_start_arg2,
279                                  user_initial_SP,
280                                  new_thread->kernel_stack_base_addr,
281                                  new_thread->kernel_stack_size))
282         goto undo_creation;
283     }
284 
285   /* Attach the new thread to the process */
286   if (SOS_OK != sos_process_register_thread(process, new_thread))
287     goto undo_creation;
288 
289   /* Add the thread in the global list */
290   sos_disable_IRQs(flags);
291   list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
292   sos_restore_IRQs(flags);
293 
294   /* Mark the thread ready */
295   if (SOS_OK != sos_sched_set_ready(new_thread))
296     goto undo_creation;
297 
298   /* Normal non-erroneous end of function */
299   return new_thread;
300 
301  undo_creation:
302   if (new_thread->kernel_stack_base_addr)
303     sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
304   sos_kmem_cache_free((sos_vaddr_t) new_thread);
305   return NULL;
306 }
307 
308 
309 struct sos_thread *
310 sos_create_user_thread(const char *name,
311                        struct sos_process *process,
312                        sos_uaddr_t user_initial_PC,
313                        sos_ui32_t  user_start_arg1,
314                        sos_ui32_t  user_start_arg2,
315                        sos_uaddr_t user_initial_SP,
316                        sos_sched_priority_t priority)
317 {
318   return create_user_thread(name, process, NULL, NULL,
319                             user_initial_PC,
320                             user_start_arg1,
321                             user_start_arg2,
322                             user_initial_SP,
323                             priority);
324 }
325 
326 
327 /**
328  * Create a new user thread, copy of the given user thread with the
329  * given user context
330  */
331 struct sos_thread *
332 sos_duplicate_user_thread(const char *name,
333                           struct sos_process *process,
334                           const struct sos_thread * model_thread,
335                           const struct sos_cpu_state * model_uctxt,
336                           sos_ui32_t retval)
337 {
338   return create_user_thread(name, process, model_thread, model_uctxt,
339                             0, retval, 0, 0, 0);
340 }
341 
342 
343 /**
344  * Helper function to switch to the correct MMU configuration to suit
345  * the_thread's needs.
346  *   - When switching to a user-mode thread, force the reconfiguration
347  *     of the MMU
348  *   - When switching to a kernel-mode thread, only change the MMU
349  *     configuration if the thread was squatting someone else's space
350  */
351 static void _prepare_mm_context(struct sos_thread *the_thread)
352 {
353   /* Going to restore a thread in user mode ? */
354   if (sos_cpu_context_is_in_user_mode(the_thread->cpu_state)
355       == TRUE)
356     {
357       /* Yes: force the MMU to be correctly setup with the correct
358          user's address space */
359 
360       /* The thread should be a user thread */
361       SOS_ASSERT_FATAL(the_thread->process != NULL);
362 
363       /* It should not squat any other's address space */
364       SOS_ASSERT_FATAL(the_thread->squatted_mm_context == NULL);
365 
366       /* Perform an MMU context switch if needed */
367       sos_mm_context_switch_to(sos_process_get_mm_context(the_thread->process));
368     }
369 
370   /* the_thread is a kernel thread squatting a precise address
371      space ? */
372   else if (the_thread->squatted_mm_context != NULL)
373     sos_mm_context_switch_to(the_thread->squatted_mm_context);
374 }
375 
376 
377 /** Function called after thr has terminated. Called from inside the context
378     of another thread, interrupts disabled */
379 static void delete_thread(struct sos_thread *thr)
380 {
381   sos_ui32_t flags;
382 
383   sos_disable_IRQs(flags);
384   list_delete_named(thread_list, thr, gbl_prev, gbl_next);
385   sos_restore_IRQs(flags);
386 
387   sos_kfree((sos_vaddr_t) thr->kernel_stack_base_addr);
388 
389   /* If the thread squats an address space, release it */
390   if (thr->squatted_mm_context)
391     SOS_ASSERT_FATAL(SOS_OK == change_current_mm_context(NULL));
392 
393   /* For a user thread: remove the thread from the process threads' list */
394   if (thr->process)
395     SOS_ASSERT_FATAL(SOS_OK == sos_process_unregister_thread(thr));
396 
397   memset(thr, 0x0, sizeof(struct sos_thread));
398   sos_kmem_cache_free((sos_vaddr_t) thr);
399 }
400 
401 
402 void sos_thread_exit()
403 {
404   sos_ui32_t flags;
405   struct sos_thread *myself, *next_thread;
406 
407   /* Interrupt handlers are NOT allowed to exit the current thread ! */
408   SOS_ASSERT_FATAL(! sos_servicing_irq());
409 
410   myself = sos_thread_get_current();
411 
412   /* Refuse to end the current executing thread if it still holds a
413      resource ! */
414   SOS_ASSERT_FATAL(list_is_empty_named(myself->kwaitq_list,
415                                        prev_entry_for_thread,
416                                        next_entry_for_thread));
417 
418   /* Prepare to run the next thread */
419   sos_disable_IRQs(flags);
420   myself->state = SOS_THR_ZOMBIE;
421   next_thread = sos_reschedule(myself, FALSE);
422 
423   /* Make sure that the next_thread is valid */
424   sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
425                                              next_thread->kernel_stack_base_addr,
426                                              next_thread->kernel_stack_size);
427 
428   /*
429    * Perform an MMU context switch if needed
430    */
431   _prepare_mm_context(next_thread);
432 
433   /* No need for sos_restore_IRQs() here because the IRQ flag will be
434      restored to that of the next thread upon context switch */
435 
436   /* Immediate switch to next thread */
437   _set_current(next_thread);
438   sos_cpu_context_exit_to(next_thread->cpu_state,
439                           (sos_cpu_kstate_function_arg1_t*) delete_thread,
440                           (sos_ui32_t) myself);
441 }
442 
443 
444 sos_sched_priority_t sos_thread_get_priority(struct sos_thread *thr)
445 {
446   if (! thr)
447     thr = (struct sos_thread*)current_thread;
448 
449   return thr->priority;
450 }
451 
452 
453 sos_thread_state_t sos_thread_get_state(struct sos_thread *thr)
454 {
455   if (! thr)
456     thr = (struct sos_thread*)current_thread;
457 
458   return thr->state;
459 }
460 
461 
462 typedef enum { YIELD_MYSELF, BLOCK_MYSELF } switch_type_t;
463 /**
464  * Helper function to initiate a context switch in case the current
465  * thread becomes blocked, waiting for a timeout, or calls yield.
466  */
467 static sos_ret_t _switch_to_next_thread(switch_type_t operation)
468 {
469   struct sos_thread *myself, *next_thread;
470 
471   SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
472 
473   /* Interrupt handlers are NOT allowed to block ! */
474   SOS_ASSERT_FATAL(! sos_servicing_irq());
475 
476   myself = (struct sos_thread*)current_thread;
477 
478   /* Make sure that if we are to be marked "BLOCKED", we have any
479      reason of effectively being blocked */
480   if (BLOCK_MYSELF == operation)
481     {
482       myself->state = SOS_THR_BLOCKED;
483     }
484 
485   /* Identify the next thread */
486   next_thread = sos_reschedule(myself, YIELD_MYSELF == operation);
487 
488   /* Avoid context switch if the context does not change */
489   if (myself != next_thread)
490     {
491       /* Sanity checks for the next thread */
492       sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
493                                                  next_thread->kernel_stack_base_addr,
494                                                  next_thread->kernel_stack_size);
495 
496       /*
497        * Perform an MMU context switch if needed
498        */
499       _prepare_mm_context(next_thread);
500 
501       /*
502        * Actual CPU context switch
503        */
504       _set_current(next_thread);
505       sos_cpu_context_switch(& myself->cpu_state, next_thread->cpu_state);
506       
507       /* Back here ! */
508       SOS_ASSERT_FATAL(current_thread == myself);
509       SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
510     }
511   else
512     {
513       /* No context switch but still update ID of current thread */
514       _set_current(next_thread);
515     }
516 
517   return SOS_OK;
518 }
519 
520 
521 /**
522  * Helper function to change the thread's priority in all the
523  * waitqueues associated with the thread.
524  */
525 static sos_ret_t _change_waitq_priorities(struct sos_thread *thr,
526                                           sos_sched_priority_t priority)
527 {
528   struct sos_kwaitq_entry *kwq_entry;
529   int nb_waitqs;
530 
531   list_foreach_forward_named(thr->kwaitq_list, kwq_entry, nb_waitqs,
532                              prev_entry_for_thread, next_entry_for_thread)
533     {
534       SOS_ASSERT_FATAL(SOS_OK == sos_kwaitq_change_priority(kwq_entry->kwaitq,
535                                                             kwq_entry,
536                                                             priority));
537     }
538 
539   return SOS_OK;
540 }
541 
542 
543 sos_ret_t sos_thread_set_priority(struct sos_thread *thr,
544                                    sos_sched_priority_t priority)
545 {
546   __label__ exit_set_prio;
547   sos_ui32_t flags;
548   sos_ret_t retval;
549 
550 
551   if (! SOS_SCHED_PRIO_IS_VALID(priority))
552     return -SOS_EINVAL;
553 
554   if (! thr)
555     thr = (struct sos_thread*)current_thread;
556 
557   sos_disable_IRQs(flags);
558 
559   /* Signal kwaitq subsystem that the priority of the thread in all
560      the waitq it is waiting in should be updated */
561   retval = _change_waitq_priorities(thr, priority);
562   if (SOS_OK != retval)
563     goto exit_set_prio;
564 
565   /* Signal scheduler that the thread, currently in a waiting list,
566      should take into account the change of priority */
567   if (SOS_THR_READY == thr->state)
568     retval = sos_sched_change_priority(thr, priority);
569 
570   /* Update priority */
571   thr->priority = priority;
572 
573  exit_set_prio:
574   sos_restore_IRQs(flags);
575   return retval;
576 }
577 
578 
579 sos_ret_t sos_thread_yield()
580 {
581   sos_ui32_t flags;
582   sos_ret_t retval;
583 
584   sos_disable_IRQs(flags);
585 
586   retval = _switch_to_next_thread(YIELD_MYSELF);
587 
588   sos_restore_IRQs(flags);
589   return retval;
590 }
591 
592 
593 /**
594  * Internal sleep timeout management
595  */
596 struct sleep_timeout_params
597 {
598   struct sos_thread *thread_to_wakeup;
599   sos_bool_t timeout_triggered;
600 };
601 
602 
603 /**
604  * Callback called when a timeout happened
605  */
606 static void sleep_timeout(struct sos_timeout_action *act)
607 {
608   struct sleep_timeout_params *sleep_timeout_params
609     = (struct sleep_timeout_params*) act->routine_data;
610 
611   /* Signal that we have been woken up by the timeout */
612   sleep_timeout_params->timeout_triggered = TRUE;
613 
614   /* Mark the thread ready */
615   SOS_ASSERT_FATAL(SOS_OK ==
616                    sos_thread_force_unblock(sleep_timeout_params
617                                              ->thread_to_wakeup));
618 }
619 
620 
621 sos_ret_t sos_thread_sleep(struct sos_time *timeout)
622 {
623   sos_ui32_t flags;
624   struct sleep_timeout_params sleep_timeout_params;
625   struct sos_timeout_action timeout_action;
626   sos_ret_t retval;
627 
628   /* Block forever if no timeout is given */
629   if (NULL == timeout)
630     {
631       sos_disable_IRQs(flags);
632       retval = _switch_to_next_thread(BLOCK_MYSELF);
633       sos_restore_IRQs(flags);
634 
635       return retval;
636     }
637 
638   /* Initialize the timeout action */
639   sos_time_init_action(& timeout_action);
640 
641   /* Prepare parameters used by the sleep timeout callback */
642   sleep_timeout_params.thread_to_wakeup 
643     = (struct sos_thread*)current_thread;
644   sleep_timeout_params.timeout_triggered = FALSE;
645 
646   sos_disable_IRQs(flags);
647 
648   /* Now program the timeout ! */
649   SOS_ASSERT_FATAL(SOS_OK ==
650                    sos_time_register_action_relative(& timeout_action,
651                                                      timeout,
652                                                      sleep_timeout,
653                                                      & sleep_timeout_params));
654 
655   /* Prepare to block: wait for sleep_timeout() to wakeup us in the
656      timeout kwaitq, or for someone to wake us up in any other
657      waitq */
658   retval = _switch_to_next_thread(BLOCK_MYSELF);
659   /* Unblocked by something ! */
660 
661   /* Unblocked by timeout ? */
662   if (sleep_timeout_params.timeout_triggered)
663     {
664       /* Yes */
665       SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout));
666       retval = SOS_OK;
667     }
668   else
669     {
670       /* No: We have probably been woken up while in some other
671          kwaitq */
672       SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action));
673       retval = -SOS_EINTR;
674     }
675 
676   sos_restore_IRQs(flags);
677 
678   /* Update the remaining timeout */
679   memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time));
680 
681   return retval;
682 }
683 
684 
685 sos_ret_t sos_thread_force_unblock(struct sos_thread *thread)
686 {
687   sos_ret_t retval;
688   sos_ui32_t flags;
689 
690   if (! thread)
691     return -SOS_EINVAL;
692   
693   sos_disable_IRQs(flags);
694 
695   /* Thread already woken up ? */
696   retval = SOS_OK;
697   switch(sos_thread_get_state(thread))
698     {
699     case SOS_THR_RUNNING:
700     case SOS_THR_READY:
701       /* Do nothing */
702       break;
703 
704     case SOS_THR_ZOMBIE:
705       retval = -SOS_EFATAL;
706       break;
707 
708     default:
709       retval = sos_sched_set_ready(thread);
710       break;
711     }
712 
713   sos_restore_IRQs(flags);
714 
715   return retval;
716 }
717 
718 
719 void sos_thread_dump_backtrace(sos_bool_t on_console,
720                                sos_bool_t on_bochs)
721 {
722   sos_vaddr_t stack_bottom = current_thread->kernel_stack_base_addr;
723   sos_size_t stack_size    = current_thread->kernel_stack_size;
724 
725   void backtracer(sos_vaddr_t PC,
726                   sos_vaddr_t params,
727                   sos_ui32_t depth,
728                   void *custom_arg)
729     {
730       sos_ui32_t invalid = 0xffffffff, *arg1, *arg2, *arg3, *arg4;
731 
732       /* Get the address of the first 3 arguments from the
733          frame. Among these arguments, 0, 1, 2, 3 arguments might be
734          meaningful (depending on how many arguments the function may
735          take). */
736       arg1 = (sos_ui32_t*)params;
737       arg2 = (sos_ui32_t*)(params+4);
738       arg3 = (sos_ui32_t*)(params+8);
739       arg4 = (sos_ui32_t*)(params+12);
740 
741       /* Make sure the addresses of these arguments fit inside the
742          stack boundaries */
743 #define INTERVAL_OK(b,v,u) ( ((b) <= (sos_vaddr_t)(v)) \
744                              && ((sos_vaddr_t)(v) < (u)) )
745       if (!INTERVAL_OK(stack_bottom, arg1, stack_bottom + stack_size))
746         arg1 = &invalid;
747       if (!INTERVAL_OK(stack_bottom, arg2, stack_bottom + stack_size))
748         arg2 = &invalid;
749       if (!INTERVAL_OK(stack_bottom, arg3, stack_bottom + stack_size))
750         arg3 = &invalid;
751       if (!INTERVAL_OK(stack_bottom, arg4, stack_bottom + stack_size))
752         arg4 = &invalid;
753 
754       /* Print the function context for this frame */
755       if (on_bochs)
756         sos_bochs_printf("[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x\n",
757                          (unsigned)depth, (unsigned)PC,
758                          (unsigned)*arg1, (unsigned)*arg2,
759                          (unsigned)*arg3);
760 
761       if (on_console)
762         sos_x86_videomem_printf(23-depth, 3,
763                                 SOS_X86_VIDEO_BG_BLUE
764                                   | SOS_X86_VIDEO_FG_LTGREEN,
765                                 "[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x arg4=0x%x",
766                                 (unsigned)depth, PC,
767                                 (unsigned)*arg1, (unsigned)*arg2,
768                                 (unsigned)*arg3, (unsigned)*arg4);
769       
770     }
771 
772   sos_backtrace(NULL, 15, stack_bottom, stack_size,
773                 backtracer, NULL);
774 }
775 
776 
777 
778 /* **********************************************
779  * Restricted functions
780  */
781 
782 
783 static sos_ret_t
784 change_current_mm_context(struct sos_mm_context *mm_ctxt)
785 {
786   /* Retrieve the previous mm context */
787   struct sos_mm_context * prev_mm_ctxt
788     = current_thread->squatted_mm_context;
789 
790   /* Update current thread's squatted mm context */
791   current_thread->squatted_mm_context = mm_ctxt;
792 
793   /* Update the reference counts and switch the MMU configuration if
794      needed */
795   if (mm_ctxt != NULL)
796     {
797       sos_mm_context_ref(mm_ctxt); /* Because it is now referenced as
798                                       the squatted_mm_context field of
799                                       the thread */
800       sos_mm_context_switch_to(mm_ctxt);
801     }
802   else
803     sos_mm_context_unref(prev_mm_ctxt); /* Because it is not referenced as
804                                            the squatted_mm_context field of
805                                            the thread any more */
806 
807   return SOS_OK;
808 }
809 
810 
811 sos_ret_t
812 sos_thread_prepare_user_space_access(struct sos_umem_vmm_as * dest_as,
813                                      sos_vaddr_t fixup_retvaddr)
814 {
815   sos_ret_t  retval;
816   sos_ui32_t flags;
817 
818   if (! dest_as)
819     {
820       /* Thread is not a user thread: do nothing */
821       if (! current_thread->process)
822         return -SOS_EINVAL;
823 
824       dest_as = sos_process_get_address_space(current_thread->process);
825     }
826   else
827     /* Don't allow to access to an address space different than that
828        of the current thread if the page fault are allowed ! */
829     SOS_ASSERT_FATAL(! fixup_retvaddr);
830 
831   sos_disable_IRQs(flags);
832   SOS_ASSERT_FATAL(NULL == current_thread->squatted_mm_context);
833   SOS_ASSERT_FATAL(0 == current_thread->fixup_uaccess.return_vaddr);
834 
835   /* Change the MMU configuration and init the fixup return address */
836   retval = change_current_mm_context(sos_umem_vmm_get_mm_context(dest_as));
837   if (SOS_OK == retval)
838     {
839       current_thread->fixup_uaccess.return_vaddr  = fixup_retvaddr;
840       current_thread->fixup_uaccess.faulted_uaddr = 0;      
841     }
842 
843   sos_restore_IRQs(flags);
844   return retval;
845 }
846 
847 
848 sos_ret_t
849 sos_thread_end_user_space_access(void)
850 {
851   sos_ret_t  retval;
852   sos_ui32_t flags;
853 
854   sos_disable_IRQs(flags);
855   SOS_ASSERT_FATAL(NULL != current_thread->squatted_mm_context);
856 
857   /* Don't impose anything regarding the current MMU configuration anymore */
858   retval = change_current_mm_context(NULL);
859   current_thread->fixup_uaccess.return_vaddr  = 0;
860   current_thread->fixup_uaccess.faulted_uaddr = 0;
861 
862   sos_restore_IRQs(flags);
863   return retval;
864 }
865 
866 
867 void sos_thread_prepare_syscall_switch_back(struct sos_cpu_state *cpu_state)
868 {
869   /* Don't preempt the current thread */
870 
871   /*
872    * Save the state of the interrupted context to make sure that:
873    *   - The list of threads correctly reflects that the thread is back
874    *     in user mode
875    *   - _prepare_mm_context() deals with the correct mm_context
876    */
877   current_thread->cpu_state = cpu_state;
878 
879   /* Perform an MMU context switch if needed */
880   _prepare_mm_context((struct sos_thread*) current_thread);
881 }
882 
883 
884 void sos_thread_prepare_exception_switch_back(struct sos_cpu_state *cpu_state)
885 {
886   /* Don't preempt the current thread */
887 
888   /*
889    * Save the state of the interrupted context to make sure that:
890    *   - The list of threads correctly reflects that the thread is
891    *     running in user or kernel mode
892    *   - _prepare_mm_context() deals with the correct mm_context
893    */
894   current_thread->cpu_state = cpu_state;
895 
896   /* Perform an MMU context switch if needed */
897   _prepare_mm_context((struct sos_thread*) current_thread);
898 }
899 
900 
901 void
902 sos_thread_prepare_irq_servicing(struct sos_cpu_state *interrupted_state)
903 {
904   current_thread->cpu_state = interrupted_state;
905 }
906 
907 
908 struct sos_cpu_state *
909 sos_thread_prepare_irq_switch_back(void)
910 {
911   struct sos_thread *myself, *next_thread;
912 
913   /* In SOS, threads in kernel mode are NEVER preempted from the
914      interrupt handlers ! */
915   if (! sos_cpu_context_is_in_user_mode(current_thread->cpu_state))
916     return current_thread->cpu_state;
917 
918   /*
919    * Here we are dealing only with possible preemption of user threads
920    * in user context !
921    */
922 
923   /* Make sure the thread actually is a user thread */
924   SOS_ASSERT_FATAL(current_thread->process != NULL);
925 
926   /* Save the state of the interrupted context */
927   myself = (struct sos_thread*)current_thread;
928 
929   /* Select the next thread to run */
930   next_thread = sos_reschedule(myself, FALSE);
931 
932   /* Perform an MMU context switch if needed */
933   _prepare_mm_context(next_thread);
934 
935   /* Setup the next_thread's context into the CPU */
936   _set_current(next_thread);
937   return next_thread->cpu_state;
938 }

source navigation ] diff markup ] identifier search ] general search ]