001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019
020
021 #include <sos/assert.h>
022 #include <sos/klibc.h>
023 #include <drivers/bochs.h>
024 #include <drivers/x86_videomem.h>
025 #include <hwcore/segment.h>
026 #include <hwcore/gdt.h>
027 #include <sos/uaccess.h>
028
029 #include "cpu_context.h"
030
031
032
033
034
035
036
037
038
039
040
041
042
043
044
045 struct sos_cpu_state {
046
047
048
049 sos_ui16_t gs;
050 sos_ui16_t fs;
051 sos_ui16_t es;
052 sos_ui16_t ds;
053 sos_ui16_t cpl0_ss;
054
055
056 sos_ui16_t alignment_padding;
057 sos_ui32_t eax;
058 sos_ui32_t ebx;
059 sos_ui32_t ecx;
060 sos_ui32_t edx;
061 sos_ui32_t esi;
062 sos_ui32_t edi;
063 sos_ui32_t ebp;
064
065
066 sos_ui32_t error_code;
067 sos_vaddr_t eip;
068 sos_ui32_t cs;
069
070 sos_ui32_t eflags;
071
072
073 } __attribute__((packed));
074
075
076
077
078
079
080
081
082
083
084
085
086
087
088
089 #define GET_CPU_CS_REGISTER_VALUE(pushed_ui32_cs_value) \
090 ( (pushed_ui32_cs_value) & 0xffff )
091
092
093
094
095
096 struct sos_cpu_kstate
097 {
098 struct sos_cpu_state regs;
099 } __attribute__((packed));
100
101
102
103
104
105
106
107
108
109
110 struct sos_cpu_ustate
111 {
112 struct sos_cpu_state regs;
113 struct
114 {
115 sos_ui32_t cpl3_esp;
116 sos_ui16_t cpl3_ss;
117 };
118 } __attribute__((packed));
119
120
121
122
123
124
125
126
127
128
129
130
131 struct x86_tss {
132
133
134
135
136
137
138
139
140
141
142 sos_ui16_t back_link;
143
144 sos_ui16_t reserved1;
145
146
147 sos_vaddr_t esp0;
148 sos_ui16_t ss0;
149
150 sos_ui16_t reserved2;
151
152
153 sos_vaddr_t esp1;
154 sos_ui16_t ss1;
155
156 sos_ui16_t reserved3;
157
158
159 sos_vaddr_t esp2;
160 sos_ui16_t ss2;
161
162 sos_ui16_t reserved4;
163
164
165 sos_vaddr_t cr3;
166 sos_vaddr_t eip;
167 sos_ui32_t eflags;
168 sos_ui32_t eax;
169 sos_ui32_t ecx;
170 sos_ui32_t edx;
171 sos_ui32_t ebx;
172 sos_ui32_t esp;
173 sos_ui32_t ebp;
174 sos_ui32_t esi;
175 sos_ui32_t edi;
176
177
178 sos_ui16_t es;
179 sos_ui16_t reserved5;
180
181
182 sos_ui16_t cs;
183 sos_ui16_t reserved6;
184
185
186 sos_ui16_t ss;
187 sos_ui16_t reserved7;
188
189
190 sos_ui16_t ds;
191 sos_ui16_t reserved8;
192
193
194 sos_ui16_t fs;
195 sos_ui16_t reserved9;
196
197
198 sos_ui16_t gs;
199 sos_ui16_t reserved10;
200
201
202 sos_ui16_t ldtr;
203 sos_ui16_t reserved11;
204
205
206 sos_ui16_t debug_trap_flag :1;
207 sos_ui16_t reserved12 :15;
208 sos_ui16_t iomap_base_addr;
209
210
211 } __attribute__((packed, aligned(128)));
212
213
214 static struct x86_tss kernel_tss;
215
216
217 sos_ret_t sos_cpu_context_subsystem_setup()
218 {
219
220 memset(&kernel_tss, 0x0, sizeof(kernel_tss));
221
222
223
224
225
226
227
228
229
230
231
232 kernel_tss.ss0 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
233
234
235 sos_gdt_register_kernel_tss((sos_vaddr_t) &kernel_tss);
236
237 return SOS_OK;
238 }
239
240
241
242
243
244
245
246 static void core_routine (sos_cpu_kstate_function_arg1_t *start_func,
247 sos_ui32_t start_arg,
248 sos_cpu_kstate_function_arg1_t *exit_func,
249 sos_ui32_t exit_arg)
250 __attribute__((noreturn));
251
252 static void core_routine (sos_cpu_kstate_function_arg1_t *start_func,
253 sos_ui32_t start_arg,
254 sos_cpu_kstate_function_arg1_t *exit_func,
255 sos_ui32_t exit_arg)
256 {
257 start_func(start_arg);
258 exit_func(exit_arg);
259
260 SOS_ASSERT_FATAL(! "The exit function of the thread should NOT return !");
261 for(;;);
262 }
263
264
265 sos_ret_t sos_cpu_kstate_init(struct sos_cpu_state **ctxt,
266 sos_cpu_kstate_function_arg1_t *start_func,
267 sos_ui32_t start_arg,
268 sos_vaddr_t stack_bottom,
269 sos_size_t stack_size,
270 sos_cpu_kstate_function_arg1_t *exit_func,
271 sos_ui32_t exit_arg)
272 {
273
274 struct sos_cpu_kstate *kctxt;
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298 sos_vaddr_t tmp_vaddr = stack_bottom + stack_size;
299 sos_ui32_t *stack = (sos_ui32_t*)tmp_vaddr;
300
301
302 #ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS
303 memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, stack_size);
304 #elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
305 sos_cpu_state_prepare_detect_kernel_stack_overflow(stack_bottom, stack_size);
306 #endif
307
308
309
310 *(--stack) = exit_arg;
311 *(--stack) = (sos_ui32_t)exit_func;
312 *(--stack) = start_arg;
313 *(--stack) = (sos_ui32_t)start_func;
314 *(--stack) = 0;
315
316
317
318
319
320
321
322
323
324 tmp_vaddr = ((sos_vaddr_t)stack) - sizeof(struct sos_cpu_kstate);
325 kctxt = (struct sos_cpu_kstate*)tmp_vaddr;
326
327
328 memset(kctxt, 0x0, sizeof(struct sos_cpu_kstate));
329
330
331
332 kctxt->regs.eip = (sos_ui32_t)core_routine;
333
334
335 kctxt->regs.cs
336 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE);
337 kctxt->regs.ds
338 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
339 kctxt->regs.es
340 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
341 kctxt->regs.cpl0_ss
342 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
343
344
345
346 kctxt->regs.eflags = (1 << 9);
347
348
349 *ctxt = (struct sos_cpu_state*) kctxt;
350
351 return SOS_OK;
352 }
353
354
355
356
357
358
359
360
361 static sos_ret_t cpu_ustate_init(struct sos_cpu_state **ctxt,
362 const struct sos_cpu_state *model_uctxt,
363 sos_uaddr_t user_start_PC,
364 sos_ui32_t user_start_arg1,
365 sos_ui32_t user_start_arg2,
366 sos_uaddr_t user_initial_SP,
367 sos_vaddr_t kernel_stack_bottom,
368 sos_size_t kernel_stack_size)
369 {
370
371 struct sos_cpu_ustate *uctxt;
372
373
374
375
376
377
378
379 sos_vaddr_t uctxt_vaddr = kernel_stack_bottom
380 + kernel_stack_size
381 - sizeof(struct sos_cpu_ustate);
382 uctxt = (struct sos_cpu_ustate*)uctxt_vaddr;
383
384 if (model_uctxt && !sos_cpu_context_is_in_user_mode(model_uctxt))
385 return -SOS_EINVAL;
386
387
388 #ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS
389 memset((void*)kernel_stack_bottom,
390 SOS_CPU_STATE_STACK_POISON,
391 kernel_stack_size);
392 #elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
393 sos_cpu_state_prepare_detect_kernel_stack_overflow(kernel_stack_bottom,
394 kernel_stack_size);
395 #endif
396
397
398
399
400
401
402
403
404 if (! model_uctxt)
405 {
406 memset(uctxt, 0x0, sizeof(struct sos_cpu_ustate));
407
408
409
410 uctxt->regs.eip = (sos_ui32_t)user_start_PC;
411
412
413 uctxt->cpl3_esp = user_initial_SP;
414 }
415 else
416 memcpy(uctxt, model_uctxt, sizeof(struct sos_cpu_ustate));
417
418
419
420 uctxt->regs.eax = user_start_arg1;
421
422
423 if (! model_uctxt)
424 uctxt->regs.ebx = user_start_arg2;
425
426
427 uctxt->regs.cs
428 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE);
429 uctxt->regs.ds
430 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA);
431 uctxt->regs.es
432 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA);
433 uctxt->cpl3_ss
434 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA);
435
436
437
438
439
440 uctxt->regs.cpl0_ss
441 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
442
443
444
445
446 uctxt->regs.eflags = (1 << 9);
447
448
449 *ctxt = (struct sos_cpu_state*) uctxt;
450
451 return SOS_OK;
452 }
453
454
455 sos_ret_t sos_cpu_ustate_init(struct sos_cpu_state **ctxt,
456 sos_uaddr_t user_start_PC,
457 sos_ui32_t user_start_arg1,
458 sos_ui32_t user_start_arg2,
459 sos_uaddr_t user_initial_SP,
460 sos_vaddr_t kernel_stack_bottom,
461 sos_size_t kernel_stack_size)
462 {
463 return cpu_ustate_init(ctxt, NULL,
464 user_start_PC,
465 user_start_arg1, user_start_arg2,
466 user_initial_SP,
467 kernel_stack_bottom, kernel_stack_size);
468 }
469
470
471 sos_ret_t sos_cpu_ustate_duplicate(struct sos_cpu_state **ctxt,
472 const struct sos_cpu_state *model_uctxt,
473 sos_ui32_t user_retval,
474 sos_vaddr_t kernel_stack_bottom,
475 sos_size_t kernel_stack_size)
476 {
477 return cpu_ustate_init(ctxt, model_uctxt,
478 0,
479 user_retval, 0,
480 0,
481 kernel_stack_bottom, kernel_stack_size);
482 }
483
484
485 sos_ret_t
486 sos_cpu_context_is_in_user_mode(const struct sos_cpu_state *ctxt)
487 {
488
489
490 switch (GET_CPU_CS_REGISTER_VALUE(ctxt->cs))
491 {
492 case SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE):
493 return TRUE;
494 break;
495
496 case SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE):
497 return FALSE;
498 break;
499
500 default:
501 SOS_FATAL_ERROR("Invalid saved context Code segment register: 0x%x (k=%x, u=%x) !",
502 (unsigned) GET_CPU_CS_REGISTER_VALUE(ctxt->cs),
503 SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE),
504 SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE));
505 break;
506 }
507
508
509 return -SOS_EFATAL;
510 }
511
512
513 #if defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
514 void
515 sos_cpu_state_prepare_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt,
516 sos_vaddr_t stack_bottom,
517 sos_size_t stack_size)
518 {
519 sos_size_t poison_size = SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW;
520 if (poison_size > stack_size)
521 poison_size = stack_size;
522
523 memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, poison_size);
524 }
525
526
527 void
528 sos_cpu_state_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt,
529 sos_vaddr_t stack_bottom,
530 sos_size_t stack_size)
531 {
532 unsigned char *c;
533 unsigned int i;
534
535
536
537
538
539 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) >= stack_bottom);
540 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) + sizeof(struct sos_cpu_kstate)
541 <= stack_bottom + stack_size);
542
543
544 for (c = (unsigned char*) stack_bottom, i = 0 ;
545 (i < SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) && (i < stack_size) ;
546 c++, i++)
547 {
548 SOS_ASSERT_FATAL(SOS_CPU_STATE_STACK_POISON == *c);
549 }
550 }
551 #endif
552
553
554
555
556
557
558
559 sos_vaddr_t sos_cpu_context_get_PC(const struct sos_cpu_state *ctxt)
560 {
561 SOS_ASSERT_FATAL(NULL != ctxt);
562
563
564
565 return ctxt->eip;
566 }
567
568
569 sos_vaddr_t sos_cpu_context_get_SP(const struct sos_cpu_state *ctxt)
570 {
571 SOS_ASSERT_FATAL(NULL != ctxt);
572
573
574
575
576 if (TRUE == sos_cpu_context_is_in_user_mode(ctxt))
577 {
578 struct sos_cpu_ustate const* uctxt = (struct sos_cpu_ustate const*)ctxt;
579 return uctxt->cpl3_esp;
580 }
581
582
583
584
585 return (sos_vaddr_t)ctxt;
586 }
587
588
589 sos_ret_t
590 sos_cpu_context_set_EX_return_address(struct sos_cpu_state *ctxt,
591 sos_vaddr_t ret_vaddr)
592 {
593 ctxt->eip = ret_vaddr;
594 return SOS_OK;
595 }
596
597
598 void sos_cpu_context_dump(const struct sos_cpu_state *ctxt)
599 {
600 char buf[128];
601
602 snprintf(buf, sizeof(buf),
603 "CPU: eip=%x esp0=%x eflags=%x cs=%x ds=%x ss0=%x err=%x",
604 (unsigned)ctxt->eip, (unsigned)ctxt, (unsigned)ctxt->eflags,
605 (unsigned)GET_CPU_CS_REGISTER_VALUE(ctxt->cs), (unsigned)ctxt->ds,
606 (unsigned)ctxt->cpl0_ss,
607 (unsigned)ctxt->error_code);
608 if (TRUE == sos_cpu_context_is_in_user_mode(ctxt))
609 {
610 struct sos_cpu_ustate const* uctxt = (struct sos_cpu_ustate const*)ctxt;
611 snprintf(buf, sizeof(buf),
612 "%s esp3=%x ss3=%x",
613 buf, (unsigned)uctxt->cpl3_esp, (unsigned)uctxt->cpl3_ss);
614 }
615 else
616 snprintf(buf, sizeof(buf), "%s [KERNEL MODE]", buf);
617
618 sos_bochs_putstring(buf); sos_bochs_putstring("\n");
619 sos_x86_videomem_putstring(23, 0,
620 SOS_X86_VIDEO_FG_BLACK | SOS_X86_VIDEO_BG_LTGRAY,
621 buf);
622 }
623
624
625
626
627
628
629
630 sos_ui32_t sos_cpu_context_get_EX_info(const struct sos_cpu_state *ctxt)
631 {
632 SOS_ASSERT_FATAL(NULL != ctxt);
633 return ctxt->error_code;
634 }
635
636
637 sos_vaddr_t
638 sos_cpu_context_get_EX_faulting_vaddr(const struct sos_cpu_state *ctxt)
639 {
640 sos_ui32_t cr2;
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655 asm volatile ("movl %%cr2, %0"
656 :"=r"(cr2)
657 : );
658
659 return cr2;
660 }
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678 inline
679 sos_ret_t sos_syscall_get3args(const struct sos_cpu_state *user_ctxt,
680 unsigned int *arg1,
681 unsigned int *arg2,
682 unsigned int *arg3)
683 {
684 *arg1 = user_ctxt->ebx;
685 *arg2 = user_ctxt->ecx;
686 *arg3 = user_ctxt->edx;
687 return SOS_OK;
688 }
689
690
691 sos_ret_t sos_syscall_get1arg(const struct sos_cpu_state *user_ctxt,
692 unsigned int *arg1)
693 {
694 unsigned int unused;
695 return sos_syscall_get3args(user_ctxt, arg1, & unused, & unused);
696 }
697
698
699 sos_ret_t sos_syscall_get2args(const struct sos_cpu_state *user_ctxt,
700 unsigned int *arg1,
701 unsigned int *arg2)
702 {
703 unsigned int unused;
704 return sos_syscall_get3args(user_ctxt, arg1, arg2, & unused);
705 }
706
707
708
709
710
711
712
713 sos_ret_t sos_syscall_get4args(const struct sos_cpu_state *user_ctxt,
714 unsigned int *arg1,
715 unsigned int *arg2,
716 unsigned int *arg3,
717 unsigned int *arg4)
718 {
719 sos_uaddr_t uaddr_other_args;
720 unsigned int other_args[2];
721 sos_ret_t retval;
722
723
724
725 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
726 (unsigned int *)& uaddr_other_args);
727 if (SOS_OK != retval)
728 return retval;
729
730
731
732 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
733 (sos_uaddr_t)uaddr_other_args,
734 sizeof(other_args));
735 if (sizeof(other_args) != retval)
736 return -SOS_EFAULT;
737
738 *arg3 = other_args[0];
739 *arg4 = other_args[1];
740 return SOS_OK;
741 }
742
743
744 sos_ret_t sos_syscall_get5args(const struct sos_cpu_state *user_ctxt,
745 unsigned int *arg1,
746 unsigned int *arg2,
747 unsigned int *arg3,
748 unsigned int *arg4,
749 unsigned int *arg5)
750 {
751 sos_uaddr_t uaddr_other_args;
752 unsigned int other_args[3];
753 sos_ret_t retval;
754
755
756
757 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
758 (unsigned int *)& uaddr_other_args);
759 if (SOS_OK != retval)
760 return retval;
761
762
763
764 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
765 (sos_uaddr_t)uaddr_other_args,
766 sizeof(other_args));
767 if (sizeof(other_args) != retval)
768 return -SOS_EFAULT;
769
770 *arg3 = other_args[0];
771 *arg4 = other_args[1];
772 *arg5 = other_args[2];
773 return SOS_OK;
774 }
775
776
777 sos_ret_t sos_syscall_get6args(const struct sos_cpu_state *user_ctxt,
778 unsigned int *arg1,
779 unsigned int *arg2,
780 unsigned int *arg3,
781 unsigned int *arg4,
782 unsigned int *arg5,
783 unsigned int *arg6)
784 {
785 sos_uaddr_t uaddr_other_args;
786 unsigned int other_args[4];
787 sos_ret_t retval;
788
789
790
791 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
792 (unsigned int *)& uaddr_other_args);
793 if (SOS_OK != retval)
794 return retval;
795
796
797
798 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
799 (sos_uaddr_t)uaddr_other_args,
800 sizeof(other_args));
801 if (sizeof(other_args) != retval)
802 return -SOS_EFAULT;
803
804 *arg3 = other_args[0];
805 *arg4 = other_args[1];
806 *arg5 = other_args[2];
807 *arg6 = other_args[3];
808 return SOS_OK;
809 }
810
811
812 sos_ret_t sos_syscall_get7args(const struct sos_cpu_state *user_ctxt,
813 unsigned int *arg1,
814 unsigned int *arg2,
815 unsigned int *arg3,
816 unsigned int *arg4,
817 unsigned int *arg5,
818 unsigned int *arg6,
819 unsigned int *arg7)
820 {
821 sos_uaddr_t uaddr_other_args;
822 unsigned int other_args[5];
823 sos_ret_t retval;
824
825
826
827 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
828 (unsigned int *)& uaddr_other_args);
829 if (SOS_OK != retval)
830 return retval;
831
832
833
834 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
835 (sos_uaddr_t)uaddr_other_args,
836 sizeof(other_args));
837 if (sizeof(other_args) != retval)
838 return -SOS_EFAULT;
839
840 *arg3 = other_args[0];
841 *arg4 = other_args[1];
842 *arg5 = other_args[2];
843 *arg6 = other_args[3];
844 *arg7 = other_args[4];
845 return SOS_OK;
846 }
847
848
849 sos_ret_t sos_syscall_get8args(const struct sos_cpu_state *user_ctxt,
850 unsigned int *arg1,
851 unsigned int *arg2,
852 unsigned int *arg3,
853 unsigned int *arg4,
854 unsigned int *arg5,
855 unsigned int *arg6,
856 unsigned int *arg7,
857 unsigned int *arg8)
858 {
859 sos_uaddr_t uaddr_other_args;
860 unsigned int other_args[6];
861 sos_ret_t retval;
862
863
864
865 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
866 (unsigned int *)& uaddr_other_args);
867 if (SOS_OK != retval)
868 return retval;
869
870
871
872 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
873 (sos_uaddr_t)uaddr_other_args,
874 sizeof(other_args));
875 if (sizeof(other_args) != retval)
876 return -SOS_EFAULT;
877
878 *arg3 = other_args[0];
879 *arg4 = other_args[1];
880 *arg5 = other_args[2];
881 *arg6 = other_args[3];
882 *arg7 = other_args[4];
883 *arg8 = other_args[5];
884 return SOS_OK;
885 }
886
887
888
889
890
891
892
893 sos_ui32_t sos_backtrace(const struct sos_cpu_state *cpu_state,
894 sos_ui32_t max_depth,
895 sos_vaddr_t stack_bottom,
896 sos_size_t stack_size,
897 sos_backtrace_callback_t * backtracer,
898 void *custom_arg)
899 {
900 unsigned int depth;
901 sos_vaddr_t callee_PC, caller_frame;
902
903
904 if ((NULL != cpu_state)
905 &&
906 (TRUE == sos_cpu_context_is_in_user_mode(cpu_state)))
907 {
908 return 0;
909 }
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940 if (cpu_state)
941 {
942 callee_PC = cpu_state->eip;
943 caller_frame = cpu_state->ebp;
944 }
945 else
946 {
947
948 callee_PC = (sos_vaddr_t)__builtin_return_address(0);
949 caller_frame = (sos_vaddr_t)__builtin_frame_address(1);
950 }
951
952 for(depth=0 ; depth < max_depth ; depth ++)
953 {
954
955 backtracer(callee_PC, caller_frame + 8, depth, custom_arg);
956
957
958 if ( (caller_frame < stack_bottom)
959 || (caller_frame + 4 >= stack_bottom + stack_size) )
960 return depth;
961
962
963 callee_PC = *((sos_vaddr_t*) (caller_frame + 4));
964 caller_frame = *((sos_vaddr_t*) caller_frame);
965 }
966
967 return depth;
968 }
969
970
971
972
973
974
975
976
977
978
979
980 void
981 sos_cpu_context_update_kernel_tss(struct sos_cpu_state *next_ctxt)
982 {
983
984 if (sos_cpu_context_is_in_user_mode(next_ctxt))
985 {
986
987
988
989
990
991
992
993
994
995
996
997 kernel_tss.esp0 = ((sos_vaddr_t)next_ctxt)
998 + sizeof(struct sos_cpu_ustate);
999
1000
1001
1002 }
1003 else
1004 {
1005
1006
1007 }
1008 }