001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019 #include <sos/physmem.h>
020 #include <sos/kmem_slab.h>
021 #include <sos/kmalloc.h>
022 #include <sos/klibc.h>
023 #include <sos/list.h>
024 #include <sos/assert.h>
025 #include <hwcore/mm_context.h>
026 #include <sos/process.h>
027
028 #include <drivers/bochs.h>
029 #include <drivers/x86_videomem.h>
030
031 #include <hwcore/irq.h>
032
033 #include "thread.h"
034
035
036
037
038
039 #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PAGE_SIZE)
040
041
042
043
044
045
046
047
048
049
050
051
052
053
054
055
056 static volatile struct sos_thread *current_thread = NULL;
057
058
059
060
061
062
063
064 static struct sos_thread *thread_list = NULL;
065
066
067
068
069
070 static struct sos_kslab_cache *cache_thread;
071
072
073 struct sos_thread *sos_thread_get_current()
074 {
075 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
076 return (struct sos_thread*)current_thread;
077 }
078
079
080 inline static sos_ret_t _set_current(struct sos_thread *thr)
081 {
082 SOS_ASSERT_FATAL(thr->state == SOS_THR_READY);
083 current_thread = thr;
084 current_thread->state = SOS_THR_RUNNING;
085 return SOS_OK;
086 }
087
088
089 sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
090 sos_size_t init_thread_stack_size)
091 {
092 struct sos_thread *myself;
093
094
095 cache_thread = sos_kmem_cache_create("thread",
096 sizeof(struct sos_thread),
097 2,
098 0,
099 SOS_KSLAB_CREATE_MAP
100 | SOS_KSLAB_CREATE_ZERO);
101 if (! cache_thread)
102 return -SOS_ENOMEM;
103
104
105 myself = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
106 SOS_KSLAB_ALLOC_ATOMIC);
107 if (! myself)
108 return -SOS_ENOMEM;
109
110
111 strzcpy(myself->name, "[kinit]", SOS_THR_MAX_NAMELEN);
112 myself->state = SOS_THR_CREATED;
113 myself->priority = SOS_SCHED_PRIO_LOWEST;
114 myself->kernel_stack_base_addr = init_thread_stack_base_addr;
115 myself->kernel_stack_size = init_thread_stack_size;
116
117
118 sos_cpu_state_prepare_detect_kernel_stack_overflow(myself->cpu_state,
119 myself->kernel_stack_base_addr,
120 myself->kernel_stack_size);
121
122
123 list_singleton_named(thread_list, myself, gbl_prev, gbl_next);
124
125
126 myself->state = SOS_THR_READY;
127 _set_current(myself);
128
129 return SOS_OK;
130 }
131
132
133 struct sos_thread *
134 sos_create_kernel_thread(const char *name,
135 sos_kernel_thread_start_routine_t start_func,
136 void *start_arg,
137 sos_sched_priority_t priority)
138 {
139 __label__ undo_creation;
140 sos_ui32_t flags;
141 struct sos_thread *new_thread;
142
143 if (! start_func)
144 return NULL;
145 if (! SOS_SCHED_PRIO_IS_VALID(priority))
146 return NULL;
147
148
149 new_thread
150 = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
151 SOS_KSLAB_ALLOC_ATOMIC);
152 if (! new_thread)
153 return NULL;
154
155
156 strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
157 new_thread->state = SOS_THR_CREATED;
158 new_thread->priority = priority;
159
160
161 new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
162 new_thread->kernel_stack_size = SOS_THREAD_KERNEL_STACK_SIZE;
163 if (! new_thread->kernel_stack_base_addr)
164 goto undo_creation;
165
166
167 if (SOS_OK
168 != sos_cpu_kstate_init(& new_thread->cpu_state,
169 (sos_cpu_kstate_function_arg1_t*) start_func,
170 (sos_ui32_t) start_arg,
171 new_thread->kernel_stack_base_addr,
172 new_thread->kernel_stack_size,
173 (sos_cpu_kstate_function_arg1_t*) sos_thread_exit,
174 (sos_ui32_t) NULL))
175 goto undo_creation;
176
177
178 sos_disable_IRQs(flags);
179 list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
180 sos_restore_IRQs(flags);
181
182
183 if (SOS_OK != sos_sched_set_ready(new_thread))
184 goto undo_creation;
185
186
187 return new_thread;
188
189 undo_creation:
190 if (new_thread->kernel_stack_base_addr)
191 sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
192 sos_kmem_cache_free((sos_vaddr_t) new_thread);
193 return NULL;
194 }
195
196
197 struct sos_thread *
198 sos_create_user_thread(const char *name,
199 struct sos_process *process,
200 sos_uaddr_t user_initial_PC,
201 sos_ui32_t user_start_arg1,
202 sos_ui32_t user_start_arg2,
203 sos_uaddr_t user_initial_SP,
204 sos_sched_priority_t priority)
205 {
206 __label__ undo_creation;
207 sos_ui32_t flags;
208 struct sos_thread *new_thread;
209
210 if (! SOS_SCHED_PRIO_IS_VALID(priority))
211 return NULL;
212
213
214 if (! process)
215 return NULL;
216
217
218 new_thread
219 = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
220 SOS_KSLAB_ALLOC_ATOMIC);
221 if (! new_thread)
222 return NULL;
223
224
225 strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
226 new_thread->state = SOS_THR_CREATED;
227 new_thread->priority = priority;
228
229
230 new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
231 new_thread->kernel_stack_size = SOS_THREAD_KERNEL_STACK_SIZE;
232 if (! new_thread->kernel_stack_base_addr)
233 goto undo_creation;
234
235 if (SOS_OK
236 != sos_cpu_ustate_init(& new_thread->cpu_state,
237 user_initial_PC,
238 user_start_arg1,
239 user_start_arg2,
240 user_initial_SP,
241 new_thread->kernel_stack_base_addr,
242 new_thread->kernel_stack_size))
243 goto undo_creation;
244
245
246 if (SOS_OK != sos_process_register_thread(process, new_thread))
247 goto undo_creation;
248
249
250 sos_disable_IRQs(flags);
251 list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
252 sos_restore_IRQs(flags);
253
254
255 if (SOS_OK != sos_sched_set_ready(new_thread))
256 goto undo_creation;
257
258
259 return new_thread;
260
261 undo_creation:
262 if (new_thread->kernel_stack_base_addr)
263 sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
264 sos_kmem_cache_free((sos_vaddr_t) new_thread);
265 return NULL;
266 }
267
268
269
270
271
272
273
274
275
276
277 static void _prepare_mm_context(struct sos_thread *the_thread)
278 {
279
280 if (sos_cpu_context_is_in_user_mode(the_thread->cpu_state)
281 == TRUE)
282 {
283
284
285
286
287 SOS_ASSERT_FATAL(the_thread->process != NULL);
288
289
290 SOS_ASSERT_FATAL(the_thread->squatted_mm_context == NULL);
291
292
293 sos_mm_context_switch_to(sos_process_get_mm_context(the_thread->process));
294 }
295
296
297
298 else if (the_thread->squatted_mm_context != NULL)
299 sos_mm_context_switch_to(the_thread->squatted_mm_context);
300 }
301
302
303
304
305 static void delete_thread(struct sos_thread *thr)
306 {
307 sos_ui32_t flags;
308
309 sos_disable_IRQs(flags);
310 list_delete_named(thread_list, thr, gbl_prev, gbl_next);
311 sos_restore_IRQs(flags);
312
313 sos_kfree((sos_vaddr_t) thr->kernel_stack_base_addr);
314
315
316 if (thr->squatted_mm_context)
317 SOS_ASSERT_FATAL(SOS_OK == sos_thread_change_current_mm_context(NULL));
318
319
320 if (thr->process)
321 SOS_ASSERT_FATAL(SOS_OK == sos_process_unregister_thread(thr));
322
323 memset(thr, 0x0, sizeof(struct sos_thread));
324 sos_kmem_cache_free((sos_vaddr_t) thr);
325 }
326
327
328 void sos_thread_exit()
329 {
330 sos_ui32_t flags;
331 struct sos_thread *myself, *next_thread;
332
333
334 SOS_ASSERT_FATAL(! sos_servicing_irq());
335
336 myself = sos_thread_get_current();
337
338
339
340 SOS_ASSERT_FATAL(list_is_empty_named(myself->kwaitq_list,
341 prev_entry_for_thread,
342 next_entry_for_thread));
343
344
345 sos_disable_IRQs(flags);
346 myself->state = SOS_THR_ZOMBIE;
347 next_thread = sos_reschedule(myself, FALSE);
348
349
350 sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
351 next_thread->kernel_stack_base_addr,
352 next_thread->kernel_stack_size);
353
354
355
356
357 _prepare_mm_context(next_thread);
358
359
360
361
362
363 _set_current(next_thread);
364 sos_cpu_context_exit_to(next_thread->cpu_state,
365 (sos_cpu_kstate_function_arg1_t*) delete_thread,
366 (sos_ui32_t) myself);
367 }
368
369
370 sos_sched_priority_t sos_thread_get_priority(struct sos_thread *thr)
371 {
372 if (! thr)
373 thr = (struct sos_thread*)current_thread;
374
375 return thr->priority;
376 }
377
378
379 sos_thread_state_t sos_thread_get_state(struct sos_thread *thr)
380 {
381 if (! thr)
382 thr = (struct sos_thread*)current_thread;
383
384 return thr->state;
385 }
386
387
388 typedef enum { YIELD_MYSELF, BLOCK_MYSELF } switch_type_t;
389
390
391
392
393 static sos_ret_t _switch_to_next_thread(switch_type_t operation)
394 {
395 struct sos_thread *myself, *next_thread;
396
397 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
398
399
400 SOS_ASSERT_FATAL(! sos_servicing_irq());
401
402 myself = (struct sos_thread*)current_thread;
403
404
405
406 if (BLOCK_MYSELF == operation)
407 {
408 myself->state = SOS_THR_BLOCKED;
409 }
410
411
412 next_thread = sos_reschedule(myself, YIELD_MYSELF == operation);
413
414
415 if (myself != next_thread)
416 {
417
418 sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
419 next_thread->kernel_stack_base_addr,
420 next_thread->kernel_stack_size);
421
422
423
424
425 _prepare_mm_context(next_thread);
426
427
428
429
430 _set_current(next_thread);
431 sos_cpu_context_switch(& myself->cpu_state, next_thread->cpu_state);
432
433
434 SOS_ASSERT_FATAL(current_thread == myself);
435 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
436 }
437 else
438 {
439
440 _set_current(next_thread);
441 }
442
443 return SOS_OK;
444 }
445
446
447
448
449
450
451 static sos_ret_t _change_waitq_priorities(struct sos_thread *thr,
452 sos_sched_priority_t priority)
453 {
454 struct sos_kwaitq_entry *kwq_entry;
455 int nb_waitqs;
456
457 list_foreach_forward_named(thr->kwaitq_list, kwq_entry, nb_waitqs,
458 prev_entry_for_thread, next_entry_for_thread)
459 {
460 SOS_ASSERT_FATAL(SOS_OK == sos_kwaitq_change_priority(kwq_entry->kwaitq,
461 kwq_entry,
462 priority));
463 }
464
465 return SOS_OK;
466 }
467
468
469 sos_ret_t sos_thread_set_priority(struct sos_thread *thr,
470 sos_sched_priority_t priority)
471 {
472 __label__ exit_set_prio;
473 sos_ui32_t flags;
474 sos_ret_t retval;
475
476
477 if (! SOS_SCHED_PRIO_IS_VALID(priority))
478 return -SOS_EINVAL;
479
480 if (! thr)
481 thr = (struct sos_thread*)current_thread;
482
483 sos_disable_IRQs(flags);
484
485
486
487 retval = _change_waitq_priorities(thr, priority);
488 if (SOS_OK != retval)
489 goto exit_set_prio;
490
491
492
493 if (SOS_THR_READY == thr->state)
494 retval = sos_sched_change_priority(thr, priority);
495
496
497 thr->priority = priority;
498
499 exit_set_prio:
500 sos_restore_IRQs(flags);
501 return retval;
502 }
503
504
505 sos_ret_t sos_thread_yield()
506 {
507 sos_ui32_t flags;
508 sos_ret_t retval;
509
510 sos_disable_IRQs(flags);
511
512 retval = _switch_to_next_thread(YIELD_MYSELF);
513
514 sos_restore_IRQs(flags);
515 return retval;
516 }
517
518
519
520
521
522 struct sleep_timeout_params
523 {
524 struct sos_thread *thread_to_wakeup;
525 sos_bool_t timeout_triggered;
526 };
527
528
529
530
531
532 static void sleep_timeout(struct sos_timeout_action *act)
533 {
534 struct sleep_timeout_params *sleep_timeout_params
535 = (struct sleep_timeout_params*) act->routine_data;
536
537
538 sleep_timeout_params->timeout_triggered = TRUE;
539
540
541 SOS_ASSERT_FATAL(SOS_OK ==
542 sos_thread_force_unblock(sleep_timeout_params
543 ->thread_to_wakeup));
544 }
545
546
547 sos_ret_t sos_thread_sleep(struct sos_time *timeout)
548 {
549 sos_ui32_t flags;
550 struct sleep_timeout_params sleep_timeout_params;
551 struct sos_timeout_action timeout_action;
552 sos_ret_t retval;
553
554
555 if (NULL == timeout)
556 {
557 sos_disable_IRQs(flags);
558 retval = _switch_to_next_thread(BLOCK_MYSELF);
559 sos_restore_IRQs(flags);
560
561 return retval;
562 }
563
564
565 sos_time_init_action(& timeout_action);
566
567
568 sleep_timeout_params.thread_to_wakeup
569 = (struct sos_thread*)current_thread;
570 sleep_timeout_params.timeout_triggered = FALSE;
571
572 sos_disable_IRQs(flags);
573
574
575 SOS_ASSERT_FATAL(SOS_OK ==
576 sos_time_register_action_relative(& timeout_action,
577 timeout,
578 sleep_timeout,
579 & sleep_timeout_params));
580
581
582
583
584 retval = _switch_to_next_thread(BLOCK_MYSELF);
585
586
587
588 if (sleep_timeout_params.timeout_triggered)
589 {
590
591 SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout));
592 retval = SOS_OK;
593 }
594 else
595 {
596
597
598 SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action));
599 retval = -SOS_EINTR;
600 }
601
602 sos_restore_IRQs(flags);
603
604
605 memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time));
606
607 return retval;
608 }
609
610
611 sos_ret_t sos_thread_force_unblock(struct sos_thread *thread)
612 {
613 sos_ret_t retval;
614 sos_ui32_t flags;
615
616 if (! thread)
617 return -SOS_EINVAL;
618
619 sos_disable_IRQs(flags);
620
621
622 retval = SOS_OK;
623 switch(sos_thread_get_state(thread))
624 {
625 case SOS_THR_RUNNING:
626 case SOS_THR_READY:
627
628 break;
629
630 case SOS_THR_ZOMBIE:
631 retval = -SOS_EFATAL;
632 break;
633
634 default:
635 retval = sos_sched_set_ready(thread);
636 break;
637 }
638
639 sos_restore_IRQs(flags);
640
641 return retval;
642 }
643
644
645 void sos_thread_dump_backtrace(sos_bool_t on_console,
646 sos_bool_t on_bochs)
647 {
648 sos_vaddr_t stack_bottom = current_thread->kernel_stack_base_addr;
649 sos_size_t stack_size = current_thread->kernel_stack_size;
650
651 static void backtracer(sos_vaddr_t PC,
652 sos_vaddr_t params,
653 sos_ui32_t depth,
654 void *custom_arg)
655 {
656 sos_ui32_t invalid = 0xffffffff, *arg1, *arg2, *arg3, *arg4;
657
658
659
660
661
662 arg1 = (sos_ui32_t*)params;
663 arg2 = (sos_ui32_t*)(params+4);
664 arg3 = (sos_ui32_t*)(params+8);
665 arg4 = (sos_ui32_t*)(params+12);
666
667
668
669 #define INTERVAL_OK(b,v,u) ( ((b) <= (sos_vaddr_t)(v)) \
670 && ((sos_vaddr_t)(v) < (u)) )
671 if (!INTERVAL_OK(stack_bottom, arg1, stack_bottom + stack_size))
672 arg1 = &invalid;
673 if (!INTERVAL_OK(stack_bottom, arg2, stack_bottom + stack_size))
674 arg2 = &invalid;
675 if (!INTERVAL_OK(stack_bottom, arg3, stack_bottom + stack_size))
676 arg3 = &invalid;
677 if (!INTERVAL_OK(stack_bottom, arg4, stack_bottom + stack_size))
678 arg4 = &invalid;
679
680
681 if (on_bochs)
682 sos_bochs_printf("[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x\n",
683 (unsigned)depth, (unsigned)PC,
684 (unsigned)*arg1, (unsigned)*arg2,
685 (unsigned)*arg3);
686
687 if (on_console)
688 sos_x86_videomem_printf(23-depth, 3,
689 SOS_X86_VIDEO_BG_BLUE
690 | SOS_X86_VIDEO_FG_LTGREEN,
691 "[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x arg4=0x%x",
692 (unsigned)depth, PC,
693 (unsigned)*arg1, (unsigned)*arg2,
694 (unsigned)*arg3, (unsigned)*arg4);
695
696 }
697
698 sos_backtrace(NULL, 15, stack_bottom, stack_size,
699 backtracer, NULL);
700 }
701
702
703
704
705
706
707
708
709 sos_ret_t
710 sos_thread_change_current_mm_context(struct sos_mm_context *mm_ctxt)
711 {
712 sos_ui32_t flags;
713
714
715 struct sos_mm_context * prev_mm_ctxt
716 = current_thread->squatted_mm_context;
717
718
719
720 if (mm_ctxt != NULL)
721 SOS_ASSERT_FATAL(prev_mm_ctxt == NULL);
722 else
723 SOS_ASSERT_FATAL(prev_mm_ctxt != NULL);
724
725 sos_disable_IRQs(flags);
726
727
728 current_thread->squatted_mm_context = mm_ctxt;
729
730
731
732 if (mm_ctxt != NULL)
733 {
734 sos_mm_context_ref(mm_ctxt);
735
736
737 sos_mm_context_switch_to(mm_ctxt);
738 }
739 else
740 sos_mm_context_unref(prev_mm_ctxt);
741
742
743
744 sos_restore_IRQs(flags);
745
746 return SOS_OK;
747 }
748
749
750 void sos_thread_prepare_syscall_switch_back(struct sos_cpu_state *cpu_state)
751 {
752
753
754
755
756
757
758
759
760 current_thread->cpu_state = cpu_state;
761
762
763 _prepare_mm_context((struct sos_thread*) current_thread);
764 }
765
766
767 void sos_thread_prepare_exception_switch_back(struct sos_cpu_state *cpu_state)
768 {
769
770
771
772
773
774
775
776
777 current_thread->cpu_state = cpu_state;
778
779
780 _prepare_mm_context((struct sos_thread*) current_thread);
781 }
782
783
784 void
785 sos_thread_prepare_irq_servicing(struct sos_cpu_state *interrupted_state)
786 {
787 current_thread->cpu_state = interrupted_state;
788 }
789
790
791 struct sos_cpu_state *
792 sos_thread_prepare_irq_switch_back(void)
793 {
794 struct sos_thread *myself, *next_thread;
795
796
797
798 if (! sos_cpu_context_is_in_user_mode(current_thread->cpu_state))
799 return current_thread->cpu_state;
800
801
802
803
804
805
806
807 SOS_ASSERT_FATAL(current_thread->process != NULL);
808
809
810 myself = (struct sos_thread*)current_thread;
811
812
813 next_thread = sos_reschedule(myself, FALSE);
814
815
816 _prepare_mm_context(next_thread);
817
818
819 _set_current(next_thread);
820 return next_thread->cpu_state;
821 }