001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019
020
021 #include <sos/assert.h>
022 #include <sos/klibc.h>
023 #include <drivers/bochs.h>
024 #include <drivers/x86_videomem.h>
025 #include <hwcore/segment.h>
026 #include <hwcore/gdt.h>
027 #include <sos/uaccess.h>
028
029 #include "cpu_context.h"
030
031
032
033
034
035
036
037
038
039
040
041
042
043
044
045 struct sos_cpu_state {
046
047
048
049 sos_ui16_t gs;
050 sos_ui16_t fs;
051 sos_ui16_t es;
052 sos_ui16_t ds;
053 sos_ui16_t cpl0_ss;
054
055
056 sos_ui16_t alignment_padding;
057 sos_ui32_t eax;
058 sos_ui32_t ebx;
059 sos_ui32_t ecx;
060 sos_ui32_t edx;
061 sos_ui32_t esi;
062 sos_ui32_t edi;
063 sos_ui32_t ebp;
064
065
066 sos_ui32_t error_code;
067 sos_vaddr_t eip;
068 sos_ui32_t cs;
069
070 sos_ui32_t eflags;
071
072
073 } __attribute__((packed));
074
075
076
077
078
079
080
081
082
083
084
085
086
087
088
089 #define GET_CPU_CS_REGISTER_VALUE(pushed_ui32_cs_value) \
090 ( (pushed_ui32_cs_value) & 0xffff )
091
092
093
094
095
096 struct sos_cpu_kstate
097 {
098 struct sos_cpu_state regs;
099 } __attribute__((packed));
100
101
102
103
104
105
106
107
108
109
110 struct sos_cpu_ustate
111 {
112 struct sos_cpu_state regs;
113 struct
114 {
115 sos_ui32_t cpl3_esp;
116 sos_ui16_t cpl3_ss;
117 };
118 } __attribute__((packed));
119
120
121
122
123
124
125
126
127
128
129
130
131 struct x86_tss {
132
133
134
135
136
137
138
139
140
141
142 sos_ui16_t back_link;
143
144 sos_ui16_t reserved1;
145
146
147 sos_vaddr_t esp0;
148 sos_ui16_t ss0;
149
150 sos_ui16_t reserved2;
151
152
153 sos_vaddr_t esp1;
154 sos_ui16_t ss1;
155
156 sos_ui16_t reserved3;
157
158
159 sos_vaddr_t esp2;
160 sos_ui16_t ss2;
161
162 sos_ui16_t reserved4;
163
164
165 sos_vaddr_t cr3;
166 sos_vaddr_t eip;
167 sos_ui32_t eflags;
168 sos_ui32_t eax;
169 sos_ui32_t ecx;
170 sos_ui32_t edx;
171 sos_ui32_t ebx;
172 sos_ui32_t esp;
173 sos_ui32_t ebp;
174 sos_ui32_t esi;
175 sos_ui32_t edi;
176
177
178 sos_ui16_t es;
179 sos_ui16_t reserved5;
180
181
182 sos_ui16_t cs;
183 sos_ui16_t reserved6;
184
185
186 sos_ui16_t ss;
187 sos_ui16_t reserved7;
188
189
190 sos_ui16_t ds;
191 sos_ui16_t reserved8;
192
193
194 sos_ui16_t fs;
195 sos_ui16_t reserved9;
196
197
198 sos_ui16_t gs;
199 sos_ui16_t reserved10;
200
201
202 sos_ui16_t ldtr;
203 sos_ui16_t reserved11;
204
205
206 sos_ui16_t debug_trap_flag :1;
207 sos_ui16_t reserved12 :15;
208 sos_ui16_t iomap_base_addr;
209
210
211 } __attribute__((packed, aligned(128)));
212
213
214 static struct x86_tss kernel_tss;
215
216
217 sos_ret_t sos_cpu_context_subsystem_setup()
218 {
219
220 memset(&kernel_tss, 0x0, sizeof(kernel_tss));
221
222
223
224
225
226
227
228
229
230
231
232 kernel_tss.ss0 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
233
234
235 sos_gdt_register_kernel_tss((sos_vaddr_t) &kernel_tss);
236
237 return SOS_OK;
238 }
239
240
241
242
243
244
245
246 static void core_routine (sos_cpu_kstate_function_arg1_t *start_func,
247 sos_ui32_t start_arg,
248 sos_cpu_kstate_function_arg1_t *exit_func,
249 sos_ui32_t exit_arg)
250 __attribute__((noreturn));
251
252 static void core_routine (sos_cpu_kstate_function_arg1_t *start_func,
253 sos_ui32_t start_arg,
254 sos_cpu_kstate_function_arg1_t *exit_func,
255 sos_ui32_t exit_arg)
256 {
257 start_func(start_arg);
258 exit_func(exit_arg);
259
260 SOS_ASSERT_FATAL(! "The exit function of the thread should NOT return !");
261 for(;;);
262 }
263
264
265 sos_ret_t sos_cpu_kstate_init(struct sos_cpu_state **ctxt,
266 sos_cpu_kstate_function_arg1_t *start_func,
267 sos_ui32_t start_arg,
268 sos_vaddr_t stack_bottom,
269 sos_size_t stack_size,
270 sos_cpu_kstate_function_arg1_t *exit_func,
271 sos_ui32_t exit_arg)
272 {
273
274 struct sos_cpu_kstate *kctxt;
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298 sos_vaddr_t tmp_vaddr = stack_bottom + stack_size;
299 sos_ui32_t *stack = (sos_ui32_t*)tmp_vaddr;
300
301
302 #ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS
303 memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, stack_size);
304 #elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
305 sos_cpu_state_prepare_detect_kernel_stack_overflow(stack_bottom, stack_size);
306 #endif
307
308
309
310 *(--stack) = exit_arg;
311 *(--stack) = (sos_ui32_t)exit_func;
312 *(--stack) = start_arg;
313 *(--stack) = (sos_ui32_t)start_func;
314 *(--stack) = 0;
315
316
317
318
319
320
321
322
323
324 tmp_vaddr = ((sos_vaddr_t)stack) - sizeof(struct sos_cpu_kstate);
325 kctxt = (struct sos_cpu_kstate*)tmp_vaddr;
326
327
328 memset(kctxt, 0x0, sizeof(struct sos_cpu_kstate));
329
330
331
332 kctxt->regs.eip = (sos_ui32_t)core_routine;
333
334
335 kctxt->regs.cs
336 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE);
337 kctxt->regs.ds
338 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
339 kctxt->regs.es
340 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
341 kctxt->regs.cpl0_ss
342 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
343
344
345
346 kctxt->regs.eflags = (1 << 9);
347
348
349 *ctxt = (struct sos_cpu_state*) kctxt;
350
351 return SOS_OK;
352 }
353
354
355 sos_ret_t sos_cpu_ustate_init(struct sos_cpu_state **ctxt,
356 sos_uaddr_t user_start_PC,
357 sos_ui32_t user_start_arg1,
358 sos_ui32_t user_start_arg2,
359 sos_uaddr_t user_initial_SP,
360 sos_vaddr_t kernel_stack_bottom,
361 sos_size_t kernel_stack_size)
362 {
363
364 struct sos_cpu_ustate *uctxt;
365
366
367
368
369
370
371
372 sos_vaddr_t uctxt_vaddr = kernel_stack_bottom
373 + kernel_stack_size
374 - sizeof(struct sos_cpu_ustate);
375 uctxt = (struct sos_cpu_ustate*)uctxt_vaddr;
376
377
378 #ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS
379 memset((void*)kernel_stack_bottom,
380 SOS_CPU_STATE_STACK_POISON,
381 kernel_stack_size);
382 #elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
383 sos_cpu_state_prepare_detect_kernel_stack_overflow(kernel_stack_bottom,
384 kernel_stack_size);
385 #endif
386
387
388
389
390
391
392
393 memset(uctxt, 0x0, sizeof(struct sos_cpu_ustate));
394
395
396
397 uctxt->regs.eip = (sos_ui32_t)user_start_PC;
398
399
400 uctxt->cpl3_esp = user_initial_SP;
401
402
403
404 uctxt->regs.eax = user_start_arg1;
405 uctxt->regs.ebx = user_start_arg2;
406
407
408 uctxt->regs.cs
409 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE);
410 uctxt->regs.ds
411 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA);
412 uctxt->regs.es
413 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA);
414 uctxt->cpl3_ss
415 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA);
416
417
418
419
420
421 uctxt->regs.cpl0_ss
422 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
423
424
425
426
427 uctxt->regs.eflags = (1 << 9);
428
429
430 *ctxt = (struct sos_cpu_state*) uctxt;
431
432 return SOS_OK;
433 }
434
435
436 sos_ret_t
437 sos_cpu_context_is_in_user_mode(const struct sos_cpu_state *ctxt)
438 {
439
440
441 switch (GET_CPU_CS_REGISTER_VALUE(ctxt->cs))
442 {
443 case SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE):
444 return TRUE;
445 break;
446
447 case SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE):
448 return FALSE;
449 break;
450
451 default:
452 SOS_FATAL_ERROR("Invalid saved context Code segment register: 0x%x (k=%x, u=%x) !",
453 (unsigned) GET_CPU_CS_REGISTER_VALUE(ctxt->cs),
454 SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE),
455 SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE));
456 break;
457 }
458
459
460 return -SOS_EFATAL;
461 }
462
463
464 #if defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
465 void
466 sos_cpu_state_prepare_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt,
467 sos_vaddr_t stack_bottom,
468 sos_size_t stack_size)
469 {
470 sos_size_t poison_size = SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW;
471 if (poison_size > stack_size)
472 poison_size = stack_size;
473
474 memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, poison_size);
475 }
476
477
478 void
479 sos_cpu_state_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt,
480 sos_vaddr_t stack_bottom,
481 sos_size_t stack_size)
482 {
483 unsigned char *c;
484 int i;
485
486
487
488
489
490 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) >= stack_bottom);
491 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) + sizeof(struct sos_cpu_kstate)
492 <= stack_bottom + stack_size);
493
494
495 for (c = (unsigned char*) stack_bottom, i = 0 ;
496 (i < SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) && (i < stack_size) ;
497 c++, i++)
498 {
499 SOS_ASSERT_FATAL(SOS_CPU_STATE_STACK_POISON == *c);
500 }
501 }
502 #endif
503
504
505
506
507
508
509
510 sos_vaddr_t sos_cpu_context_get_PC(const struct sos_cpu_state *ctxt)
511 {
512 SOS_ASSERT_FATAL(NULL != ctxt);
513
514
515
516 return ctxt->eip;
517 }
518
519
520 sos_vaddr_t sos_cpu_context_get_SP(const struct sos_cpu_state *ctxt)
521 {
522 SOS_ASSERT_FATAL(NULL != ctxt);
523
524
525
526
527 if (TRUE == sos_cpu_context_is_in_user_mode(ctxt))
528 {
529 struct sos_cpu_ustate * uctxt = (struct sos_cpu_ustate*)ctxt;
530 return uctxt->cpl3_esp;
531 }
532
533
534
535
536 return (sos_vaddr_t)ctxt;
537 }
538
539
540 sos_ret_t
541 sos_cpu_context_set_EX_return_address(struct sos_cpu_state *ctxt,
542 sos_vaddr_t ret_vaddr)
543 {
544 ctxt->eip = ret_vaddr;
545 return SOS_OK;
546 }
547
548
549 void sos_cpu_context_dump(const struct sos_cpu_state *ctxt)
550 {
551 char buf[128];
552
553 snprintf(buf, sizeof(buf),
554 "CPU: eip=%x esp0=%x eflags=%x cs=%x ds=%x ss0=%x err=%x",
555 (unsigned)ctxt->eip, (unsigned)ctxt, (unsigned)ctxt->eflags,
556 (unsigned)GET_CPU_CS_REGISTER_VALUE(ctxt->cs), (unsigned)ctxt->ds,
557 (unsigned)ctxt->cpl0_ss,
558 (unsigned)ctxt->error_code);
559 if (TRUE == sos_cpu_context_is_in_user_mode(ctxt))
560 {
561 struct sos_cpu_ustate * uctxt = (struct sos_cpu_ustate*)ctxt;
562 snprintf(buf, sizeof(buf),
563 "%s esp3=%x ss3=%x",
564 buf, (unsigned)uctxt->cpl3_esp, (unsigned)uctxt->cpl3_ss);
565 }
566 else
567 snprintf(buf, sizeof(buf), "%s [KERNEL MODE]", buf);
568
569 sos_bochs_putstring(buf); sos_bochs_putstring("\n");
570 sos_x86_videomem_putstring(23, 0,
571 SOS_X86_VIDEO_FG_BLACK | SOS_X86_VIDEO_BG_LTGRAY,
572 buf);
573 }
574
575
576
577
578
579
580
581 sos_ui32_t sos_cpu_context_get_EX_info(const struct sos_cpu_state *ctxt)
582 {
583 SOS_ASSERT_FATAL(NULL != ctxt);
584 return ctxt->error_code;
585 }
586
587
588 sos_vaddr_t
589 sos_cpu_context_get_EX_faulting_vaddr(const struct sos_cpu_state *ctxt)
590 {
591 sos_ui32_t cr2;
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606 asm volatile ("movl %%cr2, %0"
607 :"=r"(cr2)
608 : );
609
610 return cr2;
611 }
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629 inline
630 sos_ret_t sos_syscall_get3args(const struct sos_cpu_state *user_ctxt,
631 unsigned int *arg1,
632 unsigned int *arg2,
633 unsigned int *arg3)
634 {
635 *arg1 = user_ctxt->ebx;
636 *arg2 = user_ctxt->ecx;
637 *arg3 = user_ctxt->edx;
638 return SOS_OK;
639 }
640
641
642 sos_ret_t sos_syscall_get1arg(const struct sos_cpu_state *user_ctxt,
643 unsigned int *arg1)
644 {
645 unsigned int unused;
646 return sos_syscall_get3args(user_ctxt, arg1, & unused, & unused);
647 }
648
649
650 sos_ret_t sos_syscall_get2args(const struct sos_cpu_state *user_ctxt,
651 unsigned int *arg1,
652 unsigned int *arg2)
653 {
654 unsigned int unused;
655 return sos_syscall_get3args(user_ctxt, arg1, arg2, & unused);
656 }
657
658
659
660
661
662
663
664 sos_ret_t sos_syscall_get4args(const struct sos_cpu_state *user_ctxt,
665 unsigned int *arg1,
666 unsigned int *arg2,
667 unsigned int *arg3,
668 unsigned int *arg4)
669 {
670 sos_uaddr_t uaddr_other_args;
671 unsigned int other_args[2];
672 sos_ret_t retval;
673
674
675
676 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
677 (unsigned int *)& uaddr_other_args);
678 if (SOS_OK != retval)
679 return retval;
680
681
682
683 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
684 (sos_uaddr_t)uaddr_other_args,
685 sizeof(other_args));
686 if (sizeof(other_args) != retval)
687 return -SOS_EFAULT;
688
689 *arg3 = other_args[0];
690 *arg4 = other_args[1];
691 return SOS_OK;
692 }
693
694
695 sos_ret_t sos_syscall_get5args(const struct sos_cpu_state *user_ctxt,
696 unsigned int *arg1,
697 unsigned int *arg2,
698 unsigned int *arg3,
699 unsigned int *arg4,
700 unsigned int *arg5)
701 {
702 sos_uaddr_t uaddr_other_args;
703 unsigned int other_args[3];
704 sos_ret_t retval;
705
706
707
708 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
709 (unsigned int *)& uaddr_other_args);
710 if (SOS_OK != retval)
711 return retval;
712
713
714
715 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
716 (sos_uaddr_t)uaddr_other_args,
717 sizeof(other_args));
718 if (sizeof(other_args) != retval)
719 return -SOS_EFAULT;
720
721 *arg3 = other_args[0];
722 *arg4 = other_args[1];
723 *arg5 = other_args[2];
724 return SOS_OK;
725 }
726
727
728 sos_ret_t sos_syscall_get6args(const struct sos_cpu_state *user_ctxt,
729 unsigned int *arg1,
730 unsigned int *arg2,
731 unsigned int *arg3,
732 unsigned int *arg4,
733 unsigned int *arg5,
734 unsigned int *arg6)
735 {
736 sos_uaddr_t uaddr_other_args;
737 unsigned int other_args[4];
738 sos_ret_t retval;
739
740
741
742 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
743 (unsigned int *)& uaddr_other_args);
744 if (SOS_OK != retval)
745 return retval;
746
747
748
749 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
750 (sos_uaddr_t)uaddr_other_args,
751 sizeof(other_args));
752 if (sizeof(other_args) != retval)
753 return -SOS_EFAULT;
754
755 *arg3 = other_args[0];
756 *arg4 = other_args[1];
757 *arg5 = other_args[2];
758 *arg6 = other_args[3];
759 return SOS_OK;
760 }
761
762
763 sos_ret_t sos_syscall_get7args(const struct sos_cpu_state *user_ctxt,
764 unsigned int *arg1,
765 unsigned int *arg2,
766 unsigned int *arg3,
767 unsigned int *arg4,
768 unsigned int *arg5,
769 unsigned int *arg6,
770 unsigned int *arg7)
771 {
772 sos_uaddr_t uaddr_other_args;
773 unsigned int other_args[5];
774 sos_ret_t retval;
775
776
777
778 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
779 (unsigned int *)& uaddr_other_args);
780 if (SOS_OK != retval)
781 return retval;
782
783
784
785 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
786 (sos_uaddr_t)uaddr_other_args,
787 sizeof(other_args));
788 if (sizeof(other_args) != retval)
789 return -SOS_EFAULT;
790
791 *arg3 = other_args[0];
792 *arg4 = other_args[1];
793 *arg5 = other_args[2];
794 *arg6 = other_args[3];
795 *arg7 = other_args[4];
796 return SOS_OK;
797 }
798
799
800 sos_ret_t sos_syscall_get8args(const struct sos_cpu_state *user_ctxt,
801 unsigned int *arg1,
802 unsigned int *arg2,
803 unsigned int *arg3,
804 unsigned int *arg4,
805 unsigned int *arg5,
806 unsigned int *arg6,
807 unsigned int *arg7,
808 unsigned int *arg8)
809 {
810 sos_uaddr_t uaddr_other_args;
811 unsigned int other_args[6];
812 sos_ret_t retval;
813
814
815
816 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
817 (unsigned int *)& uaddr_other_args);
818 if (SOS_OK != retval)
819 return retval;
820
821
822
823 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
824 (sos_uaddr_t)uaddr_other_args,
825 sizeof(other_args));
826 if (sizeof(other_args) != retval)
827 return -SOS_EFAULT;
828
829 *arg3 = other_args[0];
830 *arg4 = other_args[1];
831 *arg5 = other_args[2];
832 *arg6 = other_args[3];
833 *arg7 = other_args[4];
834 *arg8 = other_args[5];
835 return SOS_OK;
836 }
837
838
839
840
841
842
843
844 sos_ui32_t sos_backtrace(const struct sos_cpu_state *cpu_state,
845 sos_ui32_t max_depth,
846 sos_vaddr_t stack_bottom,
847 sos_size_t stack_size,
848 sos_backtrace_callback_t * backtracer,
849 void *custom_arg)
850 {
851 int depth;
852 sos_vaddr_t callee_PC, caller_frame;
853
854
855 if ((NULL != cpu_state)
856 &&
857 (TRUE == sos_cpu_context_is_in_user_mode(cpu_state)))
858 {
859 return 0;
860 }
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891 if (cpu_state)
892 {
893 callee_PC = cpu_state->eip;
894 caller_frame = cpu_state->ebp;
895 }
896 else
897 {
898
899 callee_PC = (sos_vaddr_t)__builtin_return_address(0);
900 caller_frame = (sos_vaddr_t)__builtin_frame_address(1);
901 }
902
903 for(depth=0 ; depth < max_depth ; depth ++)
904 {
905
906 backtracer(callee_PC, caller_frame + 8, depth, custom_arg);
907
908
909 if ( (caller_frame < stack_bottom)
910 || (caller_frame + 4 >= stack_bottom + stack_size) )
911 return depth;
912
913
914 callee_PC = *((sos_vaddr_t*) (caller_frame + 4));
915 caller_frame = *((sos_vaddr_t*) caller_frame);
916 }
917
918 return depth;
919 }
920
921
922
923
924
925
926
927
928
929
930
931 void
932 sos_cpu_context_update_kernel_tss(struct sos_cpu_state *next_ctxt)
933 {
934
935 if (sos_cpu_context_is_in_user_mode(next_ctxt))
936 {
937
938
939
940
941
942
943
944
945
946
947
948 kernel_tss.esp0 = ((sos_vaddr_t)next_ctxt)
949 + sizeof(struct sos_cpu_ustate);
950
951
952
953 }
954 else
955 {
956
957
958 }
959 }