Diff markup
001 !! 001
002 !! 002
003 003
004 004
005 005
006 006
007 007
008 008
009 009
010 010
011 011
012 012
013 013
014 014
015 015
016 016
017 017
018 018
019 019
020 020
021 #include <sos/assert.h> 021 #include <sos/assert.h>
022 #include <sos/klibc.h> 022 #include <sos/klibc.h>
023 #include <drivers/bochs.h> 023 #include <drivers/bochs.h>
024 #include <drivers/x86_videomem.h> 024 #include <drivers/x86_videomem.h>
025 #include <hwcore/segment.h> 025 #include <hwcore/segment.h>
>> 026 #include <hwcore/gdt.h>
>> 027 #include <sos/uaccess.h>
026 028
027 #include "cpu_context.h" 029 #include "cpu_context.h"
028 030
029 031
030 032
031 033
032 034
033 035
034 036
035 037
036 038
037 039
038 040
039 041
040 042
041 043
042 044
043 struct sos_cpu_kstate { !! 045 struct sos_cpu_state {
044 046
045 047
046 048
047 sos_ui16_t gs; 049 sos_ui16_t gs;
048 sos_ui16_t fs; 050 sos_ui16_t fs;
049 sos_ui16_t es; 051 sos_ui16_t es;
050 sos_ui16_t ds; 052 sos_ui16_t ds;
051 sos_ui16_t ss; !! 053 sos_ui16_t cpl0_ss;
>> 054
>> 055
052 sos_ui16_t alignment_padding; 056 sos_ui16_t alignment_padding;
053 sos_ui32_t eax; 057 sos_ui32_t eax;
054 sos_ui32_t ebx; 058 sos_ui32_t ebx;
055 sos_ui32_t ecx; 059 sos_ui32_t ecx;
056 sos_ui32_t edx; 060 sos_ui32_t edx;
057 sos_ui32_t esi; 061 sos_ui32_t esi;
058 sos_ui32_t edi; 062 sos_ui32_t edi;
059 sos_ui32_t ebp; 063 sos_ui32_t ebp;
060 064
061 065
062 sos_ui32_t error_code; 066 sos_ui32_t error_code;
063 sos_vaddr_t eip; 067 sos_vaddr_t eip;
064 sos_ui32_t cs; !! 068 sos_ui32_t cs;
>> 069
065 sos_ui32_t eflags; 070 sos_ui32_t eflags;
066 071
067 072
068 } __attribute__((packed)); 073 } __attribute__((packed));
069 074
070 075
>> 076
>> 077
>> 078
>> 079
>> 080
>> 081
>> 082
>> 083
>> 084
>> 085
>> 086
>> 087
>> 088
>> 089 #define GET_CPU_CS_REGISTER_VALUE(pushed_ui32_cs_value) \
>> 090 ( (pushed_ui32_cs_value) & 0xffff )
>> 091
>> 092
>> 093
>> 094
>> 095
>> 096 struct sos_cpu_kstate
>> 097 {
>> 098 struct sos_cpu_state regs;
>> 099 } __attribute__((packed));
>> 100
>> 101
>> 102
>> 103
>> 104
>> 105
>> 106
>> 107
>> 108
>> 109
>> 110 struct sos_cpu_ustate
>> 111 {
>> 112 struct sos_cpu_state regs;
>> 113 struct
>> 114 {
>> 115 sos_ui32_t cpl3_esp;
>> 116 sos_ui16_t cpl3_ss;
>> 117 };
>> 118 } __attribute__((packed));
>> 119
>> 120
>> 121
>> 122
>> 123
>> 124
>> 125
>> 126
>> 127
>> 128
>> 129
>> 130
>> 131 struct x86_tss {
>> 132
>> 133
>> 134
>> 135
>> 136
>> 137
>> 138
>> 139
>> 140
>> 141
>> 142 sos_ui16_t back_link;
>> 143
>> 144 sos_ui16_t reserved1;
>> 145
>> 146
>> 147 sos_vaddr_t esp0;
>> 148 sos_ui16_t ss0;
>> 149
>> 150 sos_ui16_t reserved2;
>> 151
>> 152
>> 153 sos_vaddr_t esp1;
>> 154 sos_ui16_t ss1;
>> 155
>> 156 sos_ui16_t reserved3;
>> 157
>> 158
>> 159 sos_vaddr_t esp2;
>> 160 sos_ui16_t ss2;
>> 161
>> 162 sos_ui16_t reserved4;
>> 163
>> 164
>> 165 sos_vaddr_t cr3;
>> 166 sos_vaddr_t eip;
>> 167 sos_ui32_t eflags;
>> 168 sos_ui32_t eax;
>> 169 sos_ui32_t ecx;
>> 170 sos_ui32_t edx;
>> 171 sos_ui32_t ebx;
>> 172 sos_ui32_t esp;
>> 173 sos_ui32_t ebp;
>> 174 sos_ui32_t esi;
>> 175 sos_ui32_t edi;
>> 176
>> 177
>> 178 sos_ui16_t es;
>> 179 sos_ui16_t reserved5;
>> 180
>> 181
>> 182 sos_ui16_t cs;
>> 183 sos_ui16_t reserved6;
>> 184
>> 185
>> 186 sos_ui16_t ss;
>> 187 sos_ui16_t reserved7;
>> 188
>> 189
>> 190 sos_ui16_t ds;
>> 191 sos_ui16_t reserved8;
>> 192
>> 193
>> 194 sos_ui16_t fs;
>> 195 sos_ui16_t reserved9;
>> 196
>> 197
>> 198 sos_ui16_t gs;
>> 199 sos_ui16_t reserved10;
>> 200
>> 201
>> 202 sos_ui16_t ldtr;
>> 203 sos_ui16_t reserved11;
>> 204
>> 205
>> 206 sos_ui16_t debug_trap_flag :1;
>> 207 sos_ui16_t reserved12 :15;
>> 208 sos_ui16_t iomap_base_addr;
>> 209
>> 210
>> 211 } __attribute__((packed, aligned(128)));
>> 212
>> 213
>> 214 static struct x86_tss kernel_tss;
>> 215
>> 216
>> 217 sos_ret_t sos_cpu_context_subsystem_setup()
>> 218 {
>> 219
>> 220 memset(&kernel_tss, 0x0, sizeof(kernel_tss));
>> 221
>> 222
>> 223
>> 224
>> 225
>> 226
>> 227
>> 228
>> 229
>> 230
>> 231
>> 232 kernel_tss.ss0 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
>> 233
>> 234
>> 235 sos_gdt_register_kernel_tss((sos_vaddr_t) &kernel_tss);
>> 236
>> 237 return SOS_OK;
>> 238 }
>> 239
>> 240
>> 241
>> 242
>> 243
>> 244
>> 245
071 static void core_routine (sos_cpu_kstate_funct 246 static void core_routine (sos_cpu_kstate_function_arg1_t *start_func,
072 sos_ui32_t start_arg 247 sos_ui32_t start_arg,
073 sos_cpu_kstate_funct 248 sos_cpu_kstate_function_arg1_t *exit_func,
074 sos_ui32_t exit_arg) 249 sos_ui32_t exit_arg)
075 __attribute__((noreturn)); 250 __attribute__((noreturn));
076 251
077 static void core_routine (sos_cpu_kstate_funct 252 static void core_routine (sos_cpu_kstate_function_arg1_t *start_func,
078 sos_ui32_t start_arg 253 sos_ui32_t start_arg,
079 sos_cpu_kstate_funct 254 sos_cpu_kstate_function_arg1_t *exit_func,
080 sos_ui32_t exit_arg) 255 sos_ui32_t exit_arg)
081 { 256 {
082 start_func(start_arg); 257 start_func(start_arg);
083 exit_func(exit_arg); 258 exit_func(exit_arg);
084 259
085 SOS_ASSERT_FATAL(! "The exit function of the 260 SOS_ASSERT_FATAL(! "The exit function of the thread should NOT return !");
086 for(;;); 261 for(;;);
087 } 262 }
088 263
089 264
090 sos_ret_t sos_cpu_kstate_init(struct sos_cpu_k !! 265 sos_ret_t sos_cpu_kstate_init(struct sos_cpu_state **ctxt,
091 sos_cpu_kstate_f 266 sos_cpu_kstate_function_arg1_t *start_func,
092 sos_ui32_t star 267 sos_ui32_t start_arg,
093 sos_vaddr_t stac 268 sos_vaddr_t stack_bottom,
094 sos_size_t stac 269 sos_size_t stack_size,
095 sos_cpu_kstate_f 270 sos_cpu_kstate_function_arg1_t *exit_func,
096 sos_ui32_t exit 271 sos_ui32_t exit_arg)
097 { 272 {
>> 273
>> 274 struct sos_cpu_kstate *kctxt;
>> 275
098 276
099 277
100 278
101 279
102 280
103 281
104 282
105 283
106 284
107 285
108 286
109 287
110 288
111 289
112 290
113 291
114 292
115 293
116 294
117 295
118 296
119 297
120 sos_vaddr_t tmp_vaddr = stack_bottom + stack 298 sos_vaddr_t tmp_vaddr = stack_bottom + stack_size;
121 sos_ui32_t *stack = (sos_ui32_t*)tmp_vaddr; 299 sos_ui32_t *stack = (sos_ui32_t*)tmp_vaddr;
122 300
123 301
124 #ifdef SOS_CPU_KSTATE_DETECT_UNINIT_VARS !! 302 #ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS
125 memset((void*)stack_bottom, SOS_CPU_KSTATE_S !! 303 memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, stack_size);
126 #elif defined(SOS_CPU_KSTATE_DETECT_STACK_OVER !! 304 #elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
127 sos_cpu_kstate_prepare_detect_stack_overflow !! 305 sos_cpu_state_prepare_detect_kernel_stack_overflow(stack_bottom, stack_size);
128 #endif 306 #endif
129 307
130 308
131 309
132 *(--stack) = exit_arg; 310 *(--stack) = exit_arg;
133 *(--stack) = (sos_ui32_t)exit_func; 311 *(--stack) = (sos_ui32_t)exit_func;
134 *(--stack) = start_arg; 312 *(--stack) = start_arg;
135 *(--stack) = (sos_ui32_t)start_func; 313 *(--stack) = (sos_ui32_t)start_func;
136 *(--stack) = 0; 314 *(--stack) = 0;
137 315
138 316
139 317
140 318
141 319
142 320
143 321
144 322
145 323
146 tmp_vaddr = ((sos_vaddr_t)stack) - sizeof(s 324 tmp_vaddr = ((sos_vaddr_t)stack) - sizeof(struct sos_cpu_kstate);
147 *ctxt = (struct sos_cpu_kstate*)tmp_vaddr; !! 325 kctxt = (struct sos_cpu_kstate*)tmp_vaddr;
148 326
149 327
150 memset(*ctxt, 0x0, sizeof(struct sos_cpu_kst !! 328 memset(kctxt, 0x0, sizeof(struct sos_cpu_kstate));
151 329
152 330
153 331
154 (*ctxt)->eip = (sos_ui32_t)core_routine; !! 332 kctxt->regs.eip = (sos_ui32_t)core_routine;
>> 333
>> 334
>> 335 kctxt->regs.cs
>> 336 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE);
>> 337 kctxt->regs.ds
>> 338 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
>> 339 kctxt->regs.es
>> 340 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
>> 341 kctxt->regs.cpl0_ss
>> 342 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
>> 343
>> 344
>> 345
>> 346 kctxt->regs.eflags = (1 << 9);
>> 347
>> 348
>> 349 *ctxt = (struct sos_cpu_state*) kctxt;
>> 350
>> 351 return SOS_OK;
>> 352 }
>> 353
>> 354
>> 355
>> 356
>> 357
>> 358
>> 359
>> 360
>> 361 static sos_ret_t cpu_ustate_init(struct sos_cpu_state **ctxt,
>> 362 const struct sos_cpu_state *model_uctxt,
>> 363 sos_uaddr_t user_start_PC,
>> 364 sos_ui32_t user_start_arg1,
>> 365 sos_ui32_t user_start_arg2,
>> 366 sos_uaddr_t user_initial_SP,
>> 367 sos_vaddr_t kernel_stack_bottom,
>> 368 sos_size_t kernel_stack_size)
>> 369 {
>> 370
>> 371 struct sos_cpu_ustate *uctxt;
>> 372
>> 373
>> 374
>> 375
>> 376
>> 377
>> 378
>> 379 sos_vaddr_t uctxt_vaddr = kernel_stack_bottom
>> 380 + kernel_stack_size
>> 381 - sizeof(struct sos_cpu_ustate);
>> 382 uctxt = (struct sos_cpu_ustate*)uctxt_vaddr;
>> 383
>> 384 if (model_uctxt && !sos_cpu_context_is_in_user_mode(model_uctxt))
>> 385 return -SOS_EINVAL;
>> 386
>> 387
>> 388 #ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS
>> 389 memset((void*)kernel_stack_bottom,
>> 390 SOS_CPU_STATE_STACK_POISON,
>> 391 kernel_stack_size);
>> 392 #elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
>> 393 sos_cpu_state_prepare_detect_kernel_stack_overflow(kernel_stack_bottom,
>> 394 kernel_stack_size);
>> 395 #endif
>> 396
>> 397
>> 398
>> 399
>> 400
>> 401
>> 402
>> 403
>> 404 if (! model_uctxt)
>> 405 {
>> 406 memset(uctxt, 0x0, sizeof(struct sos_cpu_ustate));
>> 407
>> 408
>> 409
>> 410 uctxt->regs.eip = (sos_ui32_t)user_start_PC;
>> 411
>> 412
>> 413 uctxt->cpl3_esp = user_initial_SP;
>> 414 }
>> 415 else
>> 416 memcpy(uctxt, model_uctxt, sizeof(struct sos_cpu_ustate));
>> 417
>> 418
>> 419
>> 420 uctxt->regs.eax = user_start_arg1;
>> 421
>> 422
>> 423 if (! model_uctxt)
>> 424 uctxt->regs.ebx = user_start_arg2;
155 425
156 426
157 (*ctxt)->cs = SOS_BUILD_SEGMENT_REG_VALUE(0 !! 427 uctxt->regs.cs
158 (*ctxt)->ds = SOS_BUILD_SEGMENT_REG_VALUE(0 !! 428 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE);
159 (*ctxt)->es = SOS_BUILD_SEGMENT_REG_VALUE(0 !! 429 uctxt->regs.ds
160 (*ctxt)->ss = SOS_BUILD_SEGMENT_REG_VALUE(0 !! 430 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA);
>> 431 uctxt->regs.es
>> 432 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA);
>> 433 uctxt->cpl3_ss
>> 434 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA);
>> 435
>> 436
>> 437
>> 438
>> 439
>> 440 uctxt->regs.cpl0_ss
>> 441 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
>> 442
161 443
162 444
163 445
164 (*ctxt)->eflags = (1 << 9); !! 446 uctxt->regs.eflags = (1 << 9);
>> 447
>> 448
>> 449 *ctxt = (struct sos_cpu_state*) uctxt;
165 450
166 return SOS_OK; 451 return SOS_OK;
167 } 452 }
168 453
169 454
170 #if defined(SOS_CPU_KSTATE_DETECT_STACK_OVERFL !! 455 sos_ret_t sos_cpu_ustate_init(struct sos_cpu_state **ctxt,
>> 456 sos_uaddr_t user_start_PC,
>> 457 sos_ui32_t user_start_arg1,
>> 458 sos_ui32_t user_start_arg2,
>> 459 sos_uaddr_t user_initial_SP,
>> 460 sos_vaddr_t kernel_stack_bottom,
>> 461 sos_size_t kernel_stack_size)
>> 462 {
>> 463 return cpu_ustate_init(ctxt, NULL,
>> 464 user_start_PC,
>> 465 user_start_arg1, user_start_arg2,
>> 466 user_initial_SP,
>> 467 kernel_stack_bottom, kernel_stack_size);
>> 468 }
>> 469
>> 470
>> 471 sos_ret_t sos_cpu_ustate_duplicate(struct sos_cpu_state **ctxt,
>> 472 const struct sos_cpu_state *model_uctxt,
>> 473 sos_ui32_t user_retval,
>> 474 sos_vaddr_t kernel_stack_bottom,
>> 475 sos_size_t kernel_stack_size)
>> 476 {
>> 477 return cpu_ustate_init(ctxt, model_uctxt,
>> 478 0,
>> 479 user_retval, 0,
>> 480 0,
>> 481 kernel_stack_bottom, kernel_stack_size);
>> 482 }
>> 483
>> 484
>> 485 sos_ret_t
>> 486 sos_cpu_context_is_in_user_mode(const struct sos_cpu_state *ctxt)
>> 487 {
>> 488
>> 489
>> 490 switch (GET_CPU_CS_REGISTER_VALUE(ctxt->cs))
>> 491 {
>> 492 case SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE):
>> 493 return TRUE;
>> 494 break;
>> 495
>> 496 case SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE):
>> 497 return FALSE;
>> 498 break;
>> 499
>> 500 default:
>> 501 SOS_FATAL_ERROR("Invalid saved context Code segment register: 0x%x (k=%x, u=%x) !",
>> 502 (unsigned) GET_CPU_CS_REGISTER_VALUE(ctxt->cs),
>> 503 SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE),
>> 504 SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE));
>> 505 break;
>> 506 }
>> 507
>> 508
>> 509 return -SOS_EFATAL;
>> 510 }
>> 511
>> 512
>> 513 #if defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
171 void 514 void
172 sos_cpu_kstate_prepare_detect_stack_overflow(c !! 515 sos_cpu_state_prepare_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt,
173 s !! 516 sos_vaddr_t stack_bottom,
174 s !! 517 sos_size_t stack_size)
175 { 518 {
176 sos_size_t poison_size = SOS_CPU_KSTATE_DETE !! 519 sos_size_t poison_size = SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW;
177 if (poison_size > stack_size) 520 if (poison_size > stack_size)
178 poison_size = stack_size; 521 poison_size = stack_size;
179 522
180 memset((void*)stack_bottom, SOS_CPU_KSTATE_S !! 523 memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, poison_size);
181 } 524 }
182 525
183 526
184 void 527 void
185 sos_cpu_kstate_detect_stack_overflow(const str !! 528 sos_cpu_state_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt,
186 sos_vaddr !! 529 sos_vaddr_t stack_bottom,
187 sos_size_ !! 530 sos_size_t stack_size)
188 { 531 {
189 unsigned char *c; 532 unsigned char *c;
190 int i; 533 int i;
191 534
>> 535
>> 536
>> 537
>> 538
192 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) >= stac 539 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) >= stack_bottom);
193 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) + sizeo 540 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) + sizeof(struct sos_cpu_kstate)
194 <= stack_bottom + stack_siz 541 <= stack_bottom + stack_size);
>> 542
>> 543
195 for (c = (unsigned char*) stack_bottom, i = 544 for (c = (unsigned char*) stack_bottom, i = 0 ;
196 (i < SOS_CPU_KSTATE_DETECT_STACK_OVERFL !! 545 (i < SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) && (i < stack_size) ;
197 c++, i++) 546 c++, i++)
198 { 547 {
199 SOS_ASSERT_FATAL(SOS_CPU_KSTATE_STACK_PO !! 548 SOS_ASSERT_FATAL(SOS_CPU_STATE_STACK_POISON == *c);
200 } 549 }
201 } 550 }
202 #endif 551 #endif
203 552
204 553
205 sos_vaddr_t sos_cpu_kstate_get_PC(const struct !! 554
>> 555
>> 556
>> 557
>> 558
>> 559 sos_vaddr_t sos_cpu_context_get_PC(const struct sos_cpu_state *ctxt)
206 { 560 {
207 SOS_ASSERT_FATAL(NULL != ctxt); 561 SOS_ASSERT_FATAL(NULL != ctxt);
>> 562
>> 563
>> 564
208 return ctxt->eip; 565 return ctxt->eip;
209 } 566 }
210 567
211 568
212 sos_vaddr_t sos_cpu_kstate_get_SP(const struct !! 569 sos_vaddr_t sos_cpu_context_get_SP(const struct sos_cpu_state *ctxt)
213 { 570 {
214 SOS_ASSERT_FATAL(NULL != ctxt); 571 SOS_ASSERT_FATAL(NULL != ctxt);
>> 572
>> 573
>> 574
>> 575
>> 576 if (TRUE == sos_cpu_context_is_in_user_mode(ctxt))
>> 577 {
>> 578 struct sos_cpu_ustate * uctxt = (struct sos_cpu_ustate*)ctxt;
>> 579 return uctxt->cpl3_esp;
>> 580 }
>> 581
>> 582
>> 583
>> 584
215 return (sos_vaddr_t)ctxt; 585 return (sos_vaddr_t)ctxt;
216 } 586 }
217 587
218 588
219 void sos_cpu_kstate_dump(const struct sos_cpu_ !! 589 sos_ret_t
>> 590 sos_cpu_context_set_EX_return_address(struct sos_cpu_state *ctxt,
>> 591 sos_vaddr_t ret_vaddr)
>> 592 {
>> 593 ctxt->eip = ret_vaddr;
>> 594 return SOS_OK;
>> 595 }
>> 596
>> 597
>> 598 void sos_cpu_context_dump(const struct sos_cpu_state *ctxt)
220 { 599 {
221 char buf[128]; 600 char buf[128];
>> 601
222 snprintf(buf, sizeof(buf), 602 snprintf(buf, sizeof(buf),
223 "CPU: eip=%x esp=%x eflags=%x cs=%x !! 603 "CPU: eip=%x esp0=%x eflags=%x cs=%x ds=%x ss0=%x err=%x",
224 (unsigned)ctxt->eip, (unsigned)ctxt 604 (unsigned)ctxt->eip, (unsigned)ctxt, (unsigned)ctxt->eflags,
225 (unsigned)ctxt->cs, (unsigned)ctxt- !! 605 (unsigned)GET_CPU_CS_REGISTER_VALUE(ctxt->cs), (unsigned)ctxt->ds,
>> 606 (unsigned)ctxt->cpl0_ss,
226 (unsigned)ctxt->error_code); 607 (unsigned)ctxt->error_code);
>> 608 if (TRUE == sos_cpu_context_is_in_user_mode(ctxt))
>> 609 {
>> 610 struct sos_cpu_ustate * uctxt = (struct sos_cpu_ustate*)ctxt;
>> 611 snprintf(buf, sizeof(buf),
>> 612 "%s esp3=%x ss3=%x",
>> 613 buf, (unsigned)uctxt->cpl3_esp, (unsigned)uctxt->cpl3_ss);
>> 614 }
>> 615 else
>> 616 snprintf(buf, sizeof(buf), "%s [KERNEL MODE]", buf);
>> 617
227 sos_bochs_putstring(buf); sos_bochs_putstrin 618 sos_bochs_putstring(buf); sos_bochs_putstring("\n");
228 sos_x86_videomem_putstring(23, 0, 619 sos_x86_videomem_putstring(23, 0,
229 SOS_X86_VIDEO_FG_BLA !! 620 SOS_X86_VIDEO_FG_BLACK | SOS_X86_VIDEO_BG_LTGRAY,
230 buf); !! 621 buf);
231 } 622 }
232 623
233 624
234 sos_ui32_t sos_cpu_kstate_get_EX_info(const st !! 625
>> 626
>> 627
>> 628
>> 629
>> 630 sos_ui32_t sos_cpu_context_get_EX_info(const struct sos_cpu_state *ctxt)
235 { 631 {
236 SOS_ASSERT_FATAL(NULL != ctxt); 632 SOS_ASSERT_FATAL(NULL != ctxt);
237 return ctxt->error_code; 633 return ctxt->error_code;
238 } 634 }
239 635
240 636
241 sos_vaddr_t 637 sos_vaddr_t
242 sos_cpu_kstate_get_EX_faulting_vaddr(const str !! 638 sos_cpu_context_get_EX_faulting_vaddr(const struct sos_cpu_state *ctxt)
243 { 639 {
244 sos_ui32_t cr2; 640 sos_ui32_t cr2;
245 641
246 !! 642
247 !! 643
>> 644
>> 645
>> 646
>> 647
>> 648
>> 649
>> 650
>> 651
>> 652
>> 653
>> 654
248 asm volatile ("movl %%cr2, %0" 655 asm volatile ("movl %%cr2, %0"
249 :"=r"(cr2) 656 :"=r"(cr2)
250 : ); 657 : );
251 658
252 return cr2; 659 return cr2;
253 } 660 }
254 661
255 662
256 sos_ui32_t sos_backtrace(const struct sos_cpu_ !! 663
>> 664
>> 665
>> 666
>> 667
>> 668
>> 669
>> 670
>> 671
>> 672
>> 673
>> 674
>> 675
>> 676
>> 677
>> 678 inline
>> 679 sos_ret_t sos_syscall_get3args(const struct sos_cpu_state *user_ctxt,
>> 680 unsigned int *arg1,
>> 681 unsigned int *arg2,
>> 682 unsigned int *arg3)
>> 683 {
>> 684 *arg1 = user_ctxt->ebx;
>> 685 *arg2 = user_ctxt->ecx;
>> 686 *arg3 = user_ctxt->edx;
>> 687 return SOS_OK;
>> 688 }
>> 689
>> 690
>> 691 sos_ret_t sos_syscall_get1arg(const struct sos_cpu_state *user_ctxt,
>> 692 unsigned int *arg1)
>> 693 {
>> 694 unsigned int unused;
>> 695 return sos_syscall_get3args(user_ctxt, arg1, & unused, & unused);
>> 696 }
>> 697
>> 698
>> 699 sos_ret_t sos_syscall_get2args(const struct sos_cpu_state *user_ctxt,
>> 700 unsigned int *arg1,
>> 701 unsigned int *arg2)
>> 702 {
>> 703 unsigned int unused;
>> 704 return sos_syscall_get3args(user_ctxt, arg1, arg2, & unused);
>> 705 }
>> 706
>> 707
>> 708
>> 709
>> 710
>> 711
>> 712
>> 713 sos_ret_t sos_syscall_get4args(const struct sos_cpu_state *user_ctxt,
>> 714 unsigned int *arg1,
>> 715 unsigned int *arg2,
>> 716 unsigned int *arg3,
>> 717 unsigned int *arg4)
>> 718 {
>> 719 sos_uaddr_t uaddr_other_args;
>> 720 unsigned int other_args[2];
>> 721 sos_ret_t retval;
>> 722
>> 723
>> 724
>> 725 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
>> 726 (unsigned int *)& uaddr_other_args);
>> 727 if (SOS_OK != retval)
>> 728 return retval;
>> 729
>> 730
>> 731
>> 732 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
>> 733 (sos_uaddr_t)uaddr_other_args,
>> 734 sizeof(other_args));
>> 735 if (sizeof(other_args) != retval)
>> 736 return -SOS_EFAULT;
>> 737
>> 738 *arg3 = other_args[0];
>> 739 *arg4 = other_args[1];
>> 740 return SOS_OK;
>> 741 }
>> 742
>> 743
>> 744 sos_ret_t sos_syscall_get5args(const struct sos_cpu_state *user_ctxt,
>> 745 unsigned int *arg1,
>> 746 unsigned int *arg2,
>> 747 unsigned int *arg3,
>> 748 unsigned int *arg4,
>> 749 unsigned int *arg5)
>> 750 {
>> 751 sos_uaddr_t uaddr_other_args;
>> 752 unsigned int other_args[3];
>> 753 sos_ret_t retval;
>> 754
>> 755
>> 756
>> 757 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
>> 758 (unsigned int *)& uaddr_other_args);
>> 759 if (SOS_OK != retval)
>> 760 return retval;
>> 761
>> 762
>> 763
>> 764 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
>> 765 (sos_uaddr_t)uaddr_other_args,
>> 766 sizeof(other_args));
>> 767 if (sizeof(other_args) != retval)
>> 768 return -SOS_EFAULT;
>> 769
>> 770 *arg3 = other_args[0];
>> 771 *arg4 = other_args[1];
>> 772 *arg5 = other_args[2];
>> 773 return SOS_OK;
>> 774 }
>> 775
>> 776
>> 777 sos_ret_t sos_syscall_get6args(const struct sos_cpu_state *user_ctxt,
>> 778 unsigned int *arg1,
>> 779 unsigned int *arg2,
>> 780 unsigned int *arg3,
>> 781 unsigned int *arg4,
>> 782 unsigned int *arg5,
>> 783 unsigned int *arg6)
>> 784 {
>> 785 sos_uaddr_t uaddr_other_args;
>> 786 unsigned int other_args[4];
>> 787 sos_ret_t retval;
>> 788
>> 789
>> 790
>> 791 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
>> 792 (unsigned int *)& uaddr_other_args);
>> 793 if (SOS_OK != retval)
>> 794 return retval;
>> 795
>> 796
>> 797
>> 798 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
>> 799 (sos_uaddr_t)uaddr_other_args,
>> 800 sizeof(other_args));
>> 801 if (sizeof(other_args) != retval)
>> 802 return -SOS_EFAULT;
>> 803
>> 804 *arg3 = other_args[0];
>> 805 *arg4 = other_args[1];
>> 806 *arg5 = other_args[2];
>> 807 *arg6 = other_args[3];
>> 808 return SOS_OK;
>> 809 }
>> 810
>> 811
>> 812 sos_ret_t sos_syscall_get7args(const struct sos_cpu_state *user_ctxt,
>> 813 unsigned int *arg1,
>> 814 unsigned int *arg2,
>> 815 unsigned int *arg3,
>> 816 unsigned int *arg4,
>> 817 unsigned int *arg5,
>> 818 unsigned int *arg6,
>> 819 unsigned int *arg7)
>> 820 {
>> 821 sos_uaddr_t uaddr_other_args;
>> 822 unsigned int other_args[5];
>> 823 sos_ret_t retval;
>> 824
>> 825
>> 826
>> 827 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
>> 828 (unsigned int *)& uaddr_other_args);
>> 829 if (SOS_OK != retval)
>> 830 return retval;
>> 831
>> 832
>> 833
>> 834 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
>> 835 (sos_uaddr_t)uaddr_other_args,
>> 836 sizeof(other_args));
>> 837 if (sizeof(other_args) != retval)
>> 838 return -SOS_EFAULT;
>> 839
>> 840 *arg3 = other_args[0];
>> 841 *arg4 = other_args[1];
>> 842 *arg5 = other_args[2];
>> 843 *arg6 = other_args[3];
>> 844 *arg7 = other_args[4];
>> 845 return SOS_OK;
>> 846 }
>> 847
>> 848
>> 849 sos_ret_t sos_syscall_get8args(const struct sos_cpu_state *user_ctxt,
>> 850 unsigned int *arg1,
>> 851 unsigned int *arg2,
>> 852 unsigned int *arg3,
>> 853 unsigned int *arg4,
>> 854 unsigned int *arg5,
>> 855 unsigned int *arg6,
>> 856 unsigned int *arg7,
>> 857 unsigned int *arg8)
>> 858 {
>> 859 sos_uaddr_t uaddr_other_args;
>> 860 unsigned int other_args[6];
>> 861 sos_ret_t retval;
>> 862
>> 863
>> 864
>> 865 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
>> 866 (unsigned int *)& uaddr_other_args);
>> 867 if (SOS_OK != retval)
>> 868 return retval;
>> 869
>> 870
>> 871
>> 872 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
>> 873 (sos_uaddr_t)uaddr_other_args,
>> 874 sizeof(other_args));
>> 875 if (sizeof(other_args) != retval)
>> 876 return -SOS_EFAULT;
>> 877
>> 878 *arg3 = other_args[0];
>> 879 *arg4 = other_args[1];
>> 880 *arg5 = other_args[2];
>> 881 *arg6 = other_args[3];
>> 882 *arg7 = other_args[4];
>> 883 *arg8 = other_args[5];
>> 884 return SOS_OK;
>> 885 }
>> 886
>> 887
>> 888
>> 889
>> 890
>> 891
>> 892
>> 893 sos_ui32_t sos_backtrace(const struct sos_cpu_state *cpu_state,
257 sos_ui32_t max_depth, 894 sos_ui32_t max_depth,
258 sos_vaddr_t stack_bot 895 sos_vaddr_t stack_bottom,
259 sos_size_t stack_size 896 sos_size_t stack_size,
260 sos_backtrace_callbac 897 sos_backtrace_callback_t * backtracer,
261 void *custom_arg) 898 void *custom_arg)
262 { 899 {
263 int depth; 900 int depth;
264 sos_vaddr_t callee_PC, caller_frame; 901 sos_vaddr_t callee_PC, caller_frame;
265 902
>> 903
>> 904 if ((NULL != cpu_state)
>> 905 &&
>> 906 (TRUE == sos_cpu_context_is_in_user_mode(cpu_state)))
>> 907 {
>> 908 return 0;
>> 909 }
>> 910
266 911
267 912
268 913
269 914
270 915
271 916
272 917
273 918
274 919
275 920
276 921
277 922
278 923
279 924
280 925
281 926
282 927
283 928
284 929
285 930
286 931
287 932
288 933
289 934
290 935
291 936
292 937
293 938
294 939
295 if (cpu_kstate) !! 940 if (cpu_state)
296 { 941 {
297 callee_PC = cpu_kstate->eip; !! 942 callee_PC = cpu_state->eip;
298 caller_frame = cpu_kstate->ebp; !! 943 caller_frame = cpu_state->ebp;
299 } 944 }
300 else 945 else
301 { 946 {
302 947
303 callee_PC = (sos_vaddr_t)__builtin_re 948 callee_PC = (sos_vaddr_t)__builtin_return_address(0);
304 caller_frame = (sos_vaddr_t)__builtin_fr 949 caller_frame = (sos_vaddr_t)__builtin_frame_address(1);
305 } 950 }
306 951
307 for(depth=0 ; depth < max_depth ; depth ++) 952 for(depth=0 ; depth < max_depth ; depth ++)
308 { 953 {
309 954
310 backtracer(callee_PC, caller_frame + 8, 955 backtracer(callee_PC, caller_frame + 8, depth, custom_arg);
311 956
312 957
313 if ( (caller_frame < stack_bottom) 958 if ( (caller_frame < stack_bottom)
314 || (caller_frame + 4 >= stack_botto 959 || (caller_frame + 4 >= stack_bottom + stack_size) )
315 return depth; 960 return depth;
316 961
317 962
318 callee_PC = *((sos_vaddr_t*) (caller_ 963 callee_PC = *((sos_vaddr_t*) (caller_frame + 4));
319 caller_frame = *((sos_vaddr_t*) caller_f 964 caller_frame = *((sos_vaddr_t*) caller_frame);
320 } 965 }
321 966
322 return depth; 967 return depth;
>> 968 }
>> 969
>> 970
>> 971
>> 972
>> 973
>> 974
>> 975
>> 976
>> 977
>> 978
>> 979
>> 980 void
>> 981 sos_cpu_context_update_kernel_tss(struct sos_cpu_state *next_ctxt)
>> 982 {
>> 983
>> 984 if (sos_cpu_context_is_in_user_mode(next_ctxt))
>> 985 {
>> 986
>> 987
>> 988
>> 989
>> 990
>> 991
>> 992
>> 993
>> 994
>> 995
>> 996
>> 997 kernel_tss.esp0 = ((sos_vaddr_t)next_ctxt)
>> 998 + sizeof(struct sos_cpu_ustate);
>> 999
>> 1000
>> 1001
>> 1002 }
>> 1003 else
>> 1004 {
>> 1005
>> 1006
>> 1007 }
323 } 1008 }