001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019 #include <sos/physmem.h>
020 #include <sos/kmem_slab.h>
021 #include <sos/kmalloc.h>
022 #include <sos/klibc.h>
023 #include <sos/list.h>
024 #include <sos/assert.h>
025 #include <hwcore/mm_context.h>
026 #include <sos/process.h>
027
028 #include <drivers/bochs.h>
029 #include <drivers/x86_videomem.h>
030
031 #include <hwcore/irq.h>
032
033 #include "thread.h"
034
035
036
037
038
039 #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PAGE_SIZE)
040
041
042
043
044
045
046
047
048
049
050
051
052
053
054
055
056 static volatile struct sos_thread *current_thread = NULL;
057
058
059
060
061
062
063
064 static struct sos_thread *thread_list = NULL;
065
066
067
068
069
070 static struct sos_kslab_cache *cache_thread;
071
072
073
074
075
076
077
078 static sos_ret_t change_current_mm_context(struct sos_mm_context *mm_ctxt);
079
080
081 struct sos_thread *sos_thread_get_current()
082 {
083 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
084 return (struct sos_thread*)current_thread;
085 }
086
087
088 inline static sos_ret_t _set_current(struct sos_thread *thr)
089 {
090 SOS_ASSERT_FATAL(thr->state == SOS_THR_READY);
091 current_thread = thr;
092 current_thread->state = SOS_THR_RUNNING;
093 return SOS_OK;
094 }
095
096
097 sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
098 sos_size_t init_thread_stack_size)
099 {
100 struct sos_thread *myself;
101
102
103 cache_thread = sos_kmem_cache_create("thread",
104 sizeof(struct sos_thread),
105 2,
106 0,
107 SOS_KSLAB_CREATE_MAP
108 | SOS_KSLAB_CREATE_ZERO);
109 if (! cache_thread)
110 return -SOS_ENOMEM;
111
112
113 myself = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
114 SOS_KSLAB_ALLOC_ATOMIC);
115 if (! myself)
116 return -SOS_ENOMEM;
117
118
119 strzcpy(myself->name, "[kinit]", SOS_THR_MAX_NAMELEN);
120 myself->state = SOS_THR_CREATED;
121 myself->priority = SOS_SCHED_PRIO_LOWEST;
122 myself->kernel_stack_base_addr = init_thread_stack_base_addr;
123 myself->kernel_stack_size = init_thread_stack_size;
124
125
126 sos_cpu_state_prepare_detect_kernel_stack_overflow(myself->cpu_state,
127 myself->kernel_stack_base_addr,
128 myself->kernel_stack_size);
129
130
131 list_singleton_named(thread_list, myself, gbl_prev, gbl_next);
132
133
134 myself->state = SOS_THR_READY;
135 _set_current(myself);
136
137 return SOS_OK;
138 }
139
140
141 struct sos_thread *
142 sos_create_kernel_thread(const char *name,
143 sos_kernel_thread_start_routine_t start_func,
144 void *start_arg,
145 sos_sched_priority_t priority)
146 {
147 __label__ undo_creation;
148 sos_ui32_t flags;
149 struct sos_thread *new_thread;
150
151 if (! start_func)
152 return NULL;
153 if (! SOS_SCHED_PRIO_IS_VALID(priority))
154 return NULL;
155
156
157 new_thread
158 = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
159 SOS_KSLAB_ALLOC_ATOMIC);
160 if (! new_thread)
161 return NULL;
162
163
164 strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
165 new_thread->state = SOS_THR_CREATED;
166 new_thread->priority = priority;
167
168
169 new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
170 new_thread->kernel_stack_size = SOS_THREAD_KERNEL_STACK_SIZE;
171 if (! new_thread->kernel_stack_base_addr)
172 goto undo_creation;
173
174
175 if (SOS_OK
176 != sos_cpu_kstate_init(& new_thread->cpu_state,
177 (sos_cpu_kstate_function_arg1_t*) start_func,
178 (sos_ui32_t) start_arg,
179 new_thread->kernel_stack_base_addr,
180 new_thread->kernel_stack_size,
181 (sos_cpu_kstate_function_arg1_t*) sos_thread_exit,
182 (sos_ui32_t) NULL))
183 goto undo_creation;
184
185
186 sos_disable_IRQs(flags);
187 list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
188 sos_restore_IRQs(flags);
189
190
191 if (SOS_OK != sos_sched_set_ready(new_thread))
192 goto undo_creation;
193
194
195 return new_thread;
196
197 undo_creation:
198 if (new_thread->kernel_stack_base_addr)
199 sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
200 sos_kmem_cache_free((sos_vaddr_t) new_thread);
201 return NULL;
202 }
203
204
205
206
207
208
209
210
211 static struct sos_thread *
212 create_user_thread(const char *name,
213 struct sos_process *process,
214 const struct sos_thread * model_thread,
215 const struct sos_cpu_state * model_uctxt,
216 sos_uaddr_t user_initial_PC,
217 sos_ui32_t user_start_arg1,
218 sos_ui32_t user_start_arg2,
219 sos_uaddr_t user_initial_SP,
220 sos_sched_priority_t priority)
221 {
222 __label__ undo_creation;
223 sos_ui32_t flags;
224 struct sos_thread *new_thread;
225
226 if (model_thread)
227 {
228 SOS_ASSERT_FATAL(model_uctxt);
229 }
230 else
231 {
232 if (! SOS_SCHED_PRIO_IS_VALID(priority))
233 return NULL;
234 }
235
236
237 if (! process)
238 return NULL;
239
240
241 new_thread
242 = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
243 SOS_KSLAB_ALLOC_ATOMIC);
244 if (! new_thread)
245 return NULL;
246
247
248 strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
249 new_thread->state = SOS_THR_CREATED;
250 if (model_thread)
251 new_thread->priority = model_thread->priority;
252 else
253 new_thread->priority = priority;
254
255
256 new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
257 new_thread->kernel_stack_size = SOS_THREAD_KERNEL_STACK_SIZE;
258 if (! new_thread->kernel_stack_base_addr)
259 goto undo_creation;
260
261
262 if (model_thread)
263 {
264 if (SOS_OK
265 != sos_cpu_ustate_duplicate(& new_thread->cpu_state,
266 model_uctxt,
267 user_start_arg1,
268 new_thread->kernel_stack_base_addr,
269 new_thread->kernel_stack_size))
270 goto undo_creation;
271 }
272 else
273 {
274 if (SOS_OK
275 != sos_cpu_ustate_init(& new_thread->cpu_state,
276 user_initial_PC,
277 user_start_arg1,
278 user_start_arg2,
279 user_initial_SP,
280 new_thread->kernel_stack_base_addr,
281 new_thread->kernel_stack_size))
282 goto undo_creation;
283 }
284
285
286 if (SOS_OK != sos_process_register_thread(process, new_thread))
287 goto undo_creation;
288
289
290 sos_disable_IRQs(flags);
291 list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
292 sos_restore_IRQs(flags);
293
294
295 if (SOS_OK != sos_sched_set_ready(new_thread))
296 goto undo_creation;
297
298
299 return new_thread;
300
301 undo_creation:
302 if (new_thread->kernel_stack_base_addr)
303 sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
304 sos_kmem_cache_free((sos_vaddr_t) new_thread);
305 return NULL;
306 }
307
308
309 struct sos_thread *
310 sos_create_user_thread(const char *name,
311 struct sos_process *process,
312 sos_uaddr_t user_initial_PC,
313 sos_ui32_t user_start_arg1,
314 sos_ui32_t user_start_arg2,
315 sos_uaddr_t user_initial_SP,
316 sos_sched_priority_t priority)
317 {
318 return create_user_thread(name, process, NULL, NULL,
319 user_initial_PC,
320 user_start_arg1,
321 user_start_arg2,
322 user_initial_SP,
323 priority);
324 }
325
326
327
328
329
330
331 struct sos_thread *
332 sos_duplicate_user_thread(const char *name,
333 struct sos_process *process,
334 const struct sos_thread * model_thread,
335 const struct sos_cpu_state * model_uctxt,
336 sos_ui32_t retval)
337 {
338 return create_user_thread(name, process, model_thread, model_uctxt,
339 0, retval, 0, 0, 0);
340 }
341
342
343
344
345
346
347
348
349
350
351 static void _prepare_mm_context(struct sos_thread *the_thread)
352 {
353
354 if (sos_cpu_context_is_in_user_mode(the_thread->cpu_state)
355 == TRUE)
356 {
357
358
359
360
361 SOS_ASSERT_FATAL(the_thread->process != NULL);
362
363
364 SOS_ASSERT_FATAL(the_thread->squatted_mm_context == NULL);
365
366
367 sos_mm_context_switch_to(sos_process_get_mm_context(the_thread->process));
368 }
369
370
371
372 else if (the_thread->squatted_mm_context != NULL)
373 sos_mm_context_switch_to(the_thread->squatted_mm_context);
374 }
375
376
377
378
379 static void delete_thread(struct sos_thread *thr)
380 {
381 sos_ui32_t flags;
382
383 sos_disable_IRQs(flags);
384 list_delete_named(thread_list, thr, gbl_prev, gbl_next);
385 sos_restore_IRQs(flags);
386
387 sos_kfree((sos_vaddr_t) thr->kernel_stack_base_addr);
388
389
390 if (thr->squatted_mm_context)
391 SOS_ASSERT_FATAL(SOS_OK == change_current_mm_context(NULL));
392
393
394 if (thr->process)
395 SOS_ASSERT_FATAL(SOS_OK == sos_process_unregister_thread(thr));
396
397 memset(thr, 0x0, sizeof(struct sos_thread));
398 sos_kmem_cache_free((sos_vaddr_t) thr);
399 }
400
401
402 void sos_thread_exit()
403 {
404 sos_ui32_t flags;
405 struct sos_thread *myself, *next_thread;
406
407
408 SOS_ASSERT_FATAL(! sos_servicing_irq());
409
410 myself = sos_thread_get_current();
411
412
413
414 SOS_ASSERT_FATAL(list_is_empty_named(myself->kwaitq_list,
415 prev_entry_for_thread,
416 next_entry_for_thread));
417
418
419 sos_disable_IRQs(flags);
420 myself->state = SOS_THR_ZOMBIE;
421 next_thread = sos_reschedule(myself, FALSE);
422
423
424 sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
425 next_thread->kernel_stack_base_addr,
426 next_thread->kernel_stack_size);
427
428
429
430
431 _prepare_mm_context(next_thread);
432
433
434
435
436
437 _set_current(next_thread);
438 sos_cpu_context_exit_to(next_thread->cpu_state,
439 (sos_cpu_kstate_function_arg1_t*) delete_thread,
440 (sos_ui32_t) myself);
441 }
442
443
444 sos_sched_priority_t sos_thread_get_priority(struct sos_thread *thr)
445 {
446 if (! thr)
447 thr = (struct sos_thread*)current_thread;
448
449 return thr->priority;
450 }
451
452
453 sos_thread_state_t sos_thread_get_state(struct sos_thread *thr)
454 {
455 if (! thr)
456 thr = (struct sos_thread*)current_thread;
457
458 return thr->state;
459 }
460
461
462 typedef enum { YIELD_MYSELF, BLOCK_MYSELF } switch_type_t;
463
464
465
466
467 static sos_ret_t _switch_to_next_thread(switch_type_t operation)
468 {
469 struct sos_thread *myself, *next_thread;
470
471 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
472
473
474 SOS_ASSERT_FATAL(! sos_servicing_irq());
475
476 myself = (struct sos_thread*)current_thread;
477
478
479
480 if (BLOCK_MYSELF == operation)
481 {
482 myself->state = SOS_THR_BLOCKED;
483 }
484
485
486 next_thread = sos_reschedule(myself, YIELD_MYSELF == operation);
487
488
489 if (myself != next_thread)
490 {
491
492 sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
493 next_thread->kernel_stack_base_addr,
494 next_thread->kernel_stack_size);
495
496
497
498
499 _prepare_mm_context(next_thread);
500
501
502
503
504 _set_current(next_thread);
505 sos_cpu_context_switch(& myself->cpu_state, next_thread->cpu_state);
506
507
508 SOS_ASSERT_FATAL(current_thread == myself);
509 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
510 }
511 else
512 {
513
514 _set_current(next_thread);
515 }
516
517 return SOS_OK;
518 }
519
520
521
522
523
524
525 static sos_ret_t _change_waitq_priorities(struct sos_thread *thr,
526 sos_sched_priority_t priority)
527 {
528 struct sos_kwaitq_entry *kwq_entry;
529 int nb_waitqs;
530
531 list_foreach_forward_named(thr->kwaitq_list, kwq_entry, nb_waitqs,
532 prev_entry_for_thread, next_entry_for_thread)
533 {
534 SOS_ASSERT_FATAL(SOS_OK == sos_kwaitq_change_priority(kwq_entry->kwaitq,
535 kwq_entry,
536 priority));
537 }
538
539 return SOS_OK;
540 }
541
542
543 sos_ret_t sos_thread_set_priority(struct sos_thread *thr,
544 sos_sched_priority_t priority)
545 {
546 __label__ exit_set_prio;
547 sos_ui32_t flags;
548 sos_ret_t retval;
549
550
551 if (! SOS_SCHED_PRIO_IS_VALID(priority))
552 return -SOS_EINVAL;
553
554 if (! thr)
555 thr = (struct sos_thread*)current_thread;
556
557 sos_disable_IRQs(flags);
558
559
560
561 retval = _change_waitq_priorities(thr, priority);
562 if (SOS_OK != retval)
563 goto exit_set_prio;
564
565
566
567 if (SOS_THR_READY == thr->state)
568 retval = sos_sched_change_priority(thr, priority);
569
570
571 thr->priority = priority;
572
573 exit_set_prio:
574 sos_restore_IRQs(flags);
575 return retval;
576 }
577
578
579 sos_ret_t sos_thread_yield()
580 {
581 sos_ui32_t flags;
582 sos_ret_t retval;
583
584 sos_disable_IRQs(flags);
585
586 retval = _switch_to_next_thread(YIELD_MYSELF);
587
588 sos_restore_IRQs(flags);
589 return retval;
590 }
591
592
593
594
595
596 struct sleep_timeout_params
597 {
598 struct sos_thread *thread_to_wakeup;
599 sos_bool_t timeout_triggered;
600 };
601
602
603
604
605
606 static void sleep_timeout(struct sos_timeout_action *act)
607 {
608 struct sleep_timeout_params *sleep_timeout_params
609 = (struct sleep_timeout_params*) act->routine_data;
610
611
612 sleep_timeout_params->timeout_triggered = TRUE;
613
614
615 SOS_ASSERT_FATAL(SOS_OK ==
616 sos_thread_force_unblock(sleep_timeout_params
617 ->thread_to_wakeup));
618 }
619
620
621 sos_ret_t sos_thread_sleep(struct sos_time *timeout)
622 {
623 sos_ui32_t flags;
624 struct sleep_timeout_params sleep_timeout_params;
625 struct sos_timeout_action timeout_action;
626 sos_ret_t retval;
627
628
629 if (NULL == timeout)
630 {
631 sos_disable_IRQs(flags);
632 retval = _switch_to_next_thread(BLOCK_MYSELF);
633 sos_restore_IRQs(flags);
634
635 return retval;
636 }
637
638
639 sos_time_init_action(& timeout_action);
640
641
642 sleep_timeout_params.thread_to_wakeup
643 = (struct sos_thread*)current_thread;
644 sleep_timeout_params.timeout_triggered = FALSE;
645
646 sos_disable_IRQs(flags);
647
648
649 SOS_ASSERT_FATAL(SOS_OK ==
650 sos_time_register_action_relative(& timeout_action,
651 timeout,
652 sleep_timeout,
653 & sleep_timeout_params));
654
655
656
657
658 retval = _switch_to_next_thread(BLOCK_MYSELF);
659
660
661
662 if (sleep_timeout_params.timeout_triggered)
663 {
664
665 SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout));
666 retval = SOS_OK;
667 }
668 else
669 {
670
671
672 SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action));
673 retval = -SOS_EINTR;
674 }
675
676 sos_restore_IRQs(flags);
677
678
679 memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time));
680
681 return retval;
682 }
683
684
685 sos_ret_t sos_thread_force_unblock(struct sos_thread *thread)
686 {
687 sos_ret_t retval;
688 sos_ui32_t flags;
689
690 if (! thread)
691 return -SOS_EINVAL;
692
693 sos_disable_IRQs(flags);
694
695
696 retval = SOS_OK;
697 switch(sos_thread_get_state(thread))
698 {
699 case SOS_THR_RUNNING:
700 case SOS_THR_READY:
701
702 break;
703
704 case SOS_THR_ZOMBIE:
705 retval = -SOS_EFATAL;
706 break;
707
708 default:
709 retval = sos_sched_set_ready(thread);
710 break;
711 }
712
713 sos_restore_IRQs(flags);
714
715 return retval;
716 }
717
718
719 void sos_thread_dump_backtrace(sos_bool_t on_console,
720 sos_bool_t on_bochs)
721 {
722 sos_vaddr_t stack_bottom = current_thread->kernel_stack_base_addr;
723 sos_size_t stack_size = current_thread->kernel_stack_size;
724
725 static void backtracer(sos_vaddr_t PC,
726 sos_vaddr_t params,
727 sos_ui32_t depth,
728 void *custom_arg)
729 {
730 sos_ui32_t invalid = 0xffffffff, *arg1, *arg2, *arg3, *arg4;
731
732
733
734
735
736 arg1 = (sos_ui32_t*)params;
737 arg2 = (sos_ui32_t*)(params+4);
738 arg3 = (sos_ui32_t*)(params+8);
739 arg4 = (sos_ui32_t*)(params+12);
740
741
742
743 #define INTERVAL_OK(b,v,u) ( ((b) <= (sos_vaddr_t)(v)) \
744 && ((sos_vaddr_t)(v) < (u)) )
745 if (!INTERVAL_OK(stack_bottom, arg1, stack_bottom + stack_size))
746 arg1 = &invalid;
747 if (!INTERVAL_OK(stack_bottom, arg2, stack_bottom + stack_size))
748 arg2 = &invalid;
749 if (!INTERVAL_OK(stack_bottom, arg3, stack_bottom + stack_size))
750 arg3 = &invalid;
751 if (!INTERVAL_OK(stack_bottom, arg4, stack_bottom + stack_size))
752 arg4 = &invalid;
753
754
755 if (on_bochs)
756 sos_bochs_printf("[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x\n",
757 (unsigned)depth, (unsigned)PC,
758 (unsigned)*arg1, (unsigned)*arg2,
759 (unsigned)*arg3);
760
761 if (on_console)
762 sos_x86_videomem_printf(23-depth, 3,
763 SOS_X86_VIDEO_BG_BLUE
764 | SOS_X86_VIDEO_FG_LTGREEN,
765 "[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x arg4=0x%x",
766 (unsigned)depth, PC,
767 (unsigned)*arg1, (unsigned)*arg2,
768 (unsigned)*arg3, (unsigned)*arg4);
769
770 }
771
772 sos_backtrace(NULL, 15, stack_bottom, stack_size,
773 backtracer, NULL);
774 }
775
776
777
778
779
780
781
782
783 static sos_ret_t
784 change_current_mm_context(struct sos_mm_context *mm_ctxt)
785 {
786
787 struct sos_mm_context * prev_mm_ctxt
788 = current_thread->squatted_mm_context;
789
790
791 current_thread->squatted_mm_context = mm_ctxt;
792
793
794
795 if (mm_ctxt != NULL)
796 {
797 sos_mm_context_ref(mm_ctxt);
798
799
800 sos_mm_context_switch_to(mm_ctxt);
801 }
802 else
803 sos_mm_context_unref(prev_mm_ctxt);
804
805
806
807 return SOS_OK;
808 }
809
810
811 sos_ret_t
812 sos_thread_prepare_user_space_access(struct sos_umem_vmm_as * dest_as,
813 sos_vaddr_t fixup_retvaddr)
814 {
815 sos_ret_t retval;
816 sos_ui32_t flags;
817
818 if (! dest_as)
819 {
820
821 if (! current_thread->process)
822 return -SOS_EINVAL;
823
824 dest_as = sos_process_get_address_space(current_thread->process);
825 }
826 else
827
828
829 SOS_ASSERT_FATAL(! fixup_retvaddr);
830
831 sos_disable_IRQs(flags);
832 SOS_ASSERT_FATAL(NULL == current_thread->squatted_mm_context);
833 SOS_ASSERT_FATAL(0 == current_thread->fixup_uaccess.return_vaddr);
834
835
836 retval = change_current_mm_context(sos_umem_vmm_get_mm_context(dest_as));
837 if (SOS_OK == retval)
838 {
839 current_thread->fixup_uaccess.return_vaddr = fixup_retvaddr;
840 current_thread->fixup_uaccess.faulted_uaddr = 0;
841 }
842
843 sos_restore_IRQs(flags);
844 return retval;
845 }
846
847
848 sos_ret_t
849 sos_thread_end_user_space_access(void)
850 {
851 sos_ret_t retval;
852 sos_ui32_t flags;
853
854 sos_disable_IRQs(flags);
855 SOS_ASSERT_FATAL(NULL != current_thread->squatted_mm_context);
856
857
858 retval = change_current_mm_context(NULL);
859 current_thread->fixup_uaccess.return_vaddr = 0;
860 current_thread->fixup_uaccess.faulted_uaddr = 0;
861
862 sos_restore_IRQs(flags);
863 return retval;
864 }
865
866
867 void sos_thread_prepare_syscall_switch_back(struct sos_cpu_state *cpu_state)
868 {
869
870
871
872
873
874
875
876
877 current_thread->cpu_state = cpu_state;
878
879
880 _prepare_mm_context((struct sos_thread*) current_thread);
881 }
882
883
884 void sos_thread_prepare_exception_switch_back(struct sos_cpu_state *cpu_state)
885 {
886
887
888
889
890
891
892
893
894 current_thread->cpu_state = cpu_state;
895
896
897 _prepare_mm_context((struct sos_thread*) current_thread);
898 }
899
900
901 void
902 sos_thread_prepare_irq_servicing(struct sos_cpu_state *interrupted_state)
903 {
904 current_thread->cpu_state = interrupted_state;
905 }
906
907
908 struct sos_cpu_state *
909 sos_thread_prepare_irq_switch_back(void)
910 {
911 struct sos_thread *myself, *next_thread;
912
913
914
915 if (! sos_cpu_context_is_in_user_mode(current_thread->cpu_state))
916 return current_thread->cpu_state;
917
918
919
920
921
922
923
924 SOS_ASSERT_FATAL(current_thread->process != NULL);
925
926
927 myself = (struct sos_thread*)current_thread;
928
929
930 next_thread = sos_reschedule(myself, FALSE);
931
932
933 _prepare_mm_context(next_thread);
934
935
936 _set_current(next_thread);
937 return next_thread->cpu_state;
938 }