Diff markup
001 001
002 002
003 003
004 004
005 005
006 006
007 007
008 008
009 009
010 010
011 011
012 012
013 013
014 014
015 015
016 016
017 017
018 018
019 019
020 020
021 #include <sos/assert.h> 021 #include <sos/assert.h>
022 #include <sos/klibc.h> 022 #include <sos/klibc.h>
023 #include <drivers/bochs.h> 023 #include <drivers/bochs.h>
024 #include <drivers/x86_videomem.h> 024 #include <drivers/x86_videomem.h>
025 #include <hwcore/segment.h> 025 #include <hwcore/segment.h>
>> 026 #include <hwcore/gdt.h>
>> 027 #include <sos/uaccess.h>
026 028
027 #include "cpu_context.h" 029 #include "cpu_context.h"
028 030
029 031
030 032
031 033
032 034
033 035
034 036
035 037
036 038
037 039
038 040
039 041
040 042
041 043
042 044
043 struct sos_cpu_state { 045 struct sos_cpu_state {
044 046
045 047
046 048
047 sos_ui16_t gs; 049 sos_ui16_t gs;
048 sos_ui16_t fs; 050 sos_ui16_t fs;
049 sos_ui16_t es; 051 sos_ui16_t es;
050 sos_ui16_t ds; 052 sos_ui16_t ds;
051 sos_ui16_t cpl0_ss; 053 sos_ui16_t cpl0_ss;
052 054
053 055
054 sos_ui16_t alignment_padding; 056 sos_ui16_t alignment_padding;
055 sos_ui32_t eax; 057 sos_ui32_t eax;
056 sos_ui32_t ebx; 058 sos_ui32_t ebx;
057 sos_ui32_t ecx; 059 sos_ui32_t ecx;
058 sos_ui32_t edx; 060 sos_ui32_t edx;
059 sos_ui32_t esi; 061 sos_ui32_t esi;
060 sos_ui32_t edi; 062 sos_ui32_t edi;
061 sos_ui32_t ebp; 063 sos_ui32_t ebp;
062 064
063 065
064 sos_ui32_t error_code; 066 sos_ui32_t error_code;
065 sos_vaddr_t eip; 067 sos_vaddr_t eip;
066 sos_ui32_t cs; 068 sos_ui32_t cs;
067 069
068 sos_ui32_t eflags; 070 sos_ui32_t eflags;
069 071
070 072
071 } __attribute__((packed)); 073 } __attribute__((packed));
072 074
073 075
074 076
075 077
076 078
077 079
078 080
079 081
080 082
081 083
082 084
083 085
084 086
085 087
086 088
087 #define GET_CPU_CS_REGISTER_VALUE(pushed_ui32_ 089 #define GET_CPU_CS_REGISTER_VALUE(pushed_ui32_cs_value) \
088 ( (pushed_ui32_cs_value) & 0xffff ) 090 ( (pushed_ui32_cs_value) & 0xffff )
089 091
090 092
091 093
092 094
093 095
094 struct sos_cpu_kstate 096 struct sos_cpu_kstate
095 { 097 {
096 struct sos_cpu_state regs; 098 struct sos_cpu_state regs;
097 } __attribute__((packed)); 099 } __attribute__((packed));
098 100
099 101
100 102
>> 103
>> 104
>> 105
>> 106
>> 107
>> 108
>> 109
>> 110 struct sos_cpu_ustate
>> 111 {
>> 112 struct sos_cpu_state regs;
>> 113 struct
>> 114 {
>> 115 sos_ui32_t cpl3_esp;
>> 116 sos_ui16_t cpl3_ss;
>> 117 };
>> 118 } __attribute__((packed));
>> 119
>> 120
>> 121
>> 122
>> 123
>> 124
>> 125
>> 126
>> 127
>> 128
>> 129
>> 130
>> 131 struct x86_tss {
>> 132
>> 133
>> 134
>> 135
>> 136
>> 137
>> 138
>> 139
>> 140
>> 141
>> 142 sos_ui16_t back_link;
>> 143
>> 144 sos_ui16_t reserved1;
>> 145
>> 146
>> 147 sos_vaddr_t esp0;
>> 148 sos_ui16_t ss0;
>> 149
>> 150 sos_ui16_t reserved2;
>> 151
>> 152
>> 153 sos_vaddr_t esp1;
>> 154 sos_ui16_t ss1;
>> 155
>> 156 sos_ui16_t reserved3;
>> 157
>> 158
>> 159 sos_vaddr_t esp2;
>> 160 sos_ui16_t ss2;
>> 161
>> 162 sos_ui16_t reserved4;
>> 163
>> 164
>> 165 sos_vaddr_t cr3;
>> 166 sos_vaddr_t eip;
>> 167 sos_ui32_t eflags;
>> 168 sos_ui32_t eax;
>> 169 sos_ui32_t ecx;
>> 170 sos_ui32_t edx;
>> 171 sos_ui32_t ebx;
>> 172 sos_ui32_t esp;
>> 173 sos_ui32_t ebp;
>> 174 sos_ui32_t esi;
>> 175 sos_ui32_t edi;
>> 176
>> 177
>> 178 sos_ui16_t es;
>> 179 sos_ui16_t reserved5;
>> 180
>> 181
>> 182 sos_ui16_t cs;
>> 183 sos_ui16_t reserved6;
>> 184
>> 185
>> 186 sos_ui16_t ss;
>> 187 sos_ui16_t reserved7;
>> 188
>> 189
>> 190 sos_ui16_t ds;
>> 191 sos_ui16_t reserved8;
>> 192
>> 193
>> 194 sos_ui16_t fs;
>> 195 sos_ui16_t reserved9;
>> 196
>> 197
>> 198 sos_ui16_t gs;
>> 199 sos_ui16_t reserved10;
>> 200
>> 201
>> 202 sos_ui16_t ldtr;
>> 203 sos_ui16_t reserved11;
>> 204
>> 205
>> 206 sos_ui16_t debug_trap_flag :1;
>> 207 sos_ui16_t reserved12 :15;
>> 208 sos_ui16_t iomap_base_addr;
>> 209
>> 210
>> 211 } __attribute__((packed, aligned(128)));
>> 212
>> 213
>> 214 static struct x86_tss kernel_tss;
>> 215
>> 216
>> 217 sos_ret_t sos_cpu_context_subsystem_setup()
>> 218 {
>> 219
>> 220 memset(&kernel_tss, 0x0, sizeof(kernel_tss));
>> 221
>> 222
>> 223
>> 224
>> 225
>> 226
>> 227
>> 228
>> 229
>> 230
>> 231
>> 232 kernel_tss.ss0 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
>> 233
>> 234
>> 235 sos_gdt_register_kernel_tss((sos_vaddr_t) &kernel_tss);
>> 236
>> 237 return SOS_OK;
>> 238 }
>> 239
>> 240
>> 241
101 242
102 243
103 244
104 245
105 static void core_routine (sos_cpu_kstate_funct 246 static void core_routine (sos_cpu_kstate_function_arg1_t *start_func,
106 sos_ui32_t start_arg 247 sos_ui32_t start_arg,
107 sos_cpu_kstate_funct 248 sos_cpu_kstate_function_arg1_t *exit_func,
108 sos_ui32_t exit_arg) 249 sos_ui32_t exit_arg)
109 __attribute__((noreturn)); 250 __attribute__((noreturn));
110 251
111 static void core_routine (sos_cpu_kstate_funct 252 static void core_routine (sos_cpu_kstate_function_arg1_t *start_func,
112 sos_ui32_t start_arg 253 sos_ui32_t start_arg,
113 sos_cpu_kstate_funct 254 sos_cpu_kstate_function_arg1_t *exit_func,
114 sos_ui32_t exit_arg) 255 sos_ui32_t exit_arg)
115 { 256 {
116 start_func(start_arg); 257 start_func(start_arg);
117 exit_func(exit_arg); 258 exit_func(exit_arg);
118 259
119 SOS_ASSERT_FATAL(! "The exit function of the 260 SOS_ASSERT_FATAL(! "The exit function of the thread should NOT return !");
120 for(;;); 261 for(;;);
121 } 262 }
122 263
123 264
124 sos_ret_t sos_cpu_kstate_init(struct sos_cpu_s 265 sos_ret_t sos_cpu_kstate_init(struct sos_cpu_state **ctxt,
125 sos_cpu_kstate_f 266 sos_cpu_kstate_function_arg1_t *start_func,
126 sos_ui32_t star 267 sos_ui32_t start_arg,
127 sos_vaddr_t stac 268 sos_vaddr_t stack_bottom,
128 sos_size_t stac 269 sos_size_t stack_size,
129 sos_cpu_kstate_f 270 sos_cpu_kstate_function_arg1_t *exit_func,
130 sos_ui32_t exit 271 sos_ui32_t exit_arg)
131 { 272 {
132 273
133 struct sos_cpu_kstate *kctxt; 274 struct sos_cpu_kstate *kctxt;
134 275
135 276
136 277
137 278
138 279
139 280
140 281
141 282
142 283
143 284
144 285
145 286
146 287
147 288
148 289
149 290
150 291
151 292
152 293
153 294
154 295
155 296
156 297
157 sos_vaddr_t tmp_vaddr = stack_bottom + stack 298 sos_vaddr_t tmp_vaddr = stack_bottom + stack_size;
158 sos_ui32_t *stack = (sos_ui32_t*)tmp_vaddr; 299 sos_ui32_t *stack = (sos_ui32_t*)tmp_vaddr;
159 300
160 301
161 #ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS 302 #ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS
162 memset((void*)stack_bottom, SOS_CPU_STATE_ST 303 memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, stack_size);
163 #elif defined(SOS_CPU_STATE_DETECT_KERNEL_STAC 304 #elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
164 sos_cpu_state_prepare_detect_kernel_stack_ov 305 sos_cpu_state_prepare_detect_kernel_stack_overflow(stack_bottom, stack_size);
165 #endif 306 #endif
166 307
167 308
168 309
169 *(--stack) = exit_arg; 310 *(--stack) = exit_arg;
170 *(--stack) = (sos_ui32_t)exit_func; 311 *(--stack) = (sos_ui32_t)exit_func;
171 *(--stack) = start_arg; 312 *(--stack) = start_arg;
172 *(--stack) = (sos_ui32_t)start_func; 313 *(--stack) = (sos_ui32_t)start_func;
173 *(--stack) = 0; 314 *(--stack) = 0;
174 315
175 316
176 317
177 318
178 319
179 320
180 321
181 322
182 323
183 tmp_vaddr = ((sos_vaddr_t)stack) - sizeof(s 324 tmp_vaddr = ((sos_vaddr_t)stack) - sizeof(struct sos_cpu_kstate);
184 kctxt = (struct sos_cpu_kstate*)tmp_vaddr; 325 kctxt = (struct sos_cpu_kstate*)tmp_vaddr;
185 326
186 327
187 memset(kctxt, 0x0, sizeof(struct sos_cpu_kst 328 memset(kctxt, 0x0, sizeof(struct sos_cpu_kstate));
188 329
189 330
190 331
191 kctxt->regs.eip = (sos_ui32_t)core_routine; 332 kctxt->regs.eip = (sos_ui32_t)core_routine;
192 333
193 334
194 kctxt->regs.cs 335 kctxt->regs.cs
195 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SO 336 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE);
196 kctxt->regs.ds 337 kctxt->regs.ds
197 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SO 338 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
198 kctxt->regs.es 339 kctxt->regs.es
199 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SO 340 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
200 kctxt->regs.cpl0_ss 341 kctxt->regs.cpl0_ss
201 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SO 342 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
202 343
203 344
204 345
205 kctxt->regs.eflags = (1 << 9); 346 kctxt->regs.eflags = (1 << 9);
206 347
207 348
208 *ctxt = (struct sos_cpu_state*) kctxt; 349 *ctxt = (struct sos_cpu_state*) kctxt;
209 350
210 return SOS_OK; 351 return SOS_OK;
211 } 352 }
212 353
213 354
>> 355
>> 356
>> 357
>> 358
>> 359
>> 360
>> 361 static sos_ret_t cpu_ustate_init(struct sos_cpu_state **ctxt,
>> 362 const struct sos_cpu_state *model_uctxt,
>> 363 sos_uaddr_t user_start_PC,
>> 364 sos_ui32_t user_start_arg1,
>> 365 sos_ui32_t user_start_arg2,
>> 366 sos_uaddr_t user_initial_SP,
>> 367 sos_vaddr_t kernel_stack_bottom,
>> 368 sos_size_t kernel_stack_size)
>> 369 {
>> 370
>> 371 struct sos_cpu_ustate *uctxt;
>> 372
>> 373
>> 374
>> 375
>> 376
>> 377
>> 378
>> 379 sos_vaddr_t uctxt_vaddr = kernel_stack_bottom
>> 380 + kernel_stack_size
>> 381 - sizeof(struct sos_cpu_ustate);
>> 382 uctxt = (struct sos_cpu_ustate*)uctxt_vaddr;
>> 383
>> 384 if (model_uctxt && !sos_cpu_context_is_in_user_mode(model_uctxt))
>> 385 return -SOS_EINVAL;
>> 386
>> 387
>> 388 #ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS
>> 389 memset((void*)kernel_stack_bottom,
>> 390 SOS_CPU_STATE_STACK_POISON,
>> 391 kernel_stack_size);
>> 392 #elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
>> 393 sos_cpu_state_prepare_detect_kernel_stack_overflow(kernel_stack_bottom,
>> 394 kernel_stack_size);
>> 395 #endif
>> 396
>> 397
>> 398
>> 399
>> 400
>> 401
>> 402
>> 403
>> 404 if (! model_uctxt)
>> 405 {
>> 406 memset(uctxt, 0x0, sizeof(struct sos_cpu_ustate));
>> 407
>> 408
>> 409
>> 410 uctxt->regs.eip = (sos_ui32_t)user_start_PC;
>> 411
>> 412
>> 413 uctxt->cpl3_esp = user_initial_SP;
>> 414 }
>> 415 else
>> 416 memcpy(uctxt, model_uctxt, sizeof(struct sos_cpu_ustate));
>> 417
>> 418
>> 419
>> 420 uctxt->regs.eax = user_start_arg1;
>> 421
>> 422
>> 423 if (! model_uctxt)
>> 424 uctxt->regs.ebx = user_start_arg2;
>> 425
>> 426
>> 427 uctxt->regs.cs
>> 428 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE);
>> 429 uctxt->regs.ds
>> 430 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA);
>> 431 uctxt->regs.es
>> 432 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA);
>> 433 uctxt->cpl3_ss
>> 434 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA);
>> 435
>> 436
>> 437
>> 438
>> 439
>> 440 uctxt->regs.cpl0_ss
>> 441 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
>> 442
>> 443
>> 444
>> 445
>> 446 uctxt->regs.eflags = (1 << 9);
>> 447
>> 448
>> 449 *ctxt = (struct sos_cpu_state*) uctxt;
>> 450
>> 451 return SOS_OK;
>> 452 }
>> 453
>> 454
>> 455 sos_ret_t sos_cpu_ustate_init(struct sos_cpu_state **ctxt,
>> 456 sos_uaddr_t user_start_PC,
>> 457 sos_ui32_t user_start_arg1,
>> 458 sos_ui32_t user_start_arg2,
>> 459 sos_uaddr_t user_initial_SP,
>> 460 sos_vaddr_t kernel_stack_bottom,
>> 461 sos_size_t kernel_stack_size)
>> 462 {
>> 463 return cpu_ustate_init(ctxt, NULL,
>> 464 user_start_PC,
>> 465 user_start_arg1, user_start_arg2,
>> 466 user_initial_SP,
>> 467 kernel_stack_bottom, kernel_stack_size);
>> 468 }
>> 469
>> 470
>> 471 sos_ret_t sos_cpu_ustate_duplicate(struct sos_cpu_state **ctxt,
>> 472 const struct sos_cpu_state *model_uctxt,
>> 473 sos_ui32_t user_retval,
>> 474 sos_vaddr_t kernel_stack_bottom,
>> 475 sos_size_t kernel_stack_size)
>> 476 {
>> 477 return cpu_ustate_init(ctxt, model_uctxt,
>> 478 0,
>> 479 user_retval, 0,
>> 480 0,
>> 481 kernel_stack_bottom, kernel_stack_size);
>> 482 }
>> 483
>> 484
>> 485 sos_ret_t
>> 486 sos_cpu_context_is_in_user_mode(const struct sos_cpu_state *ctxt)
>> 487 {
>> 488
>> 489
>> 490 switch (GET_CPU_CS_REGISTER_VALUE(ctxt->cs))
>> 491 {
>> 492 case SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE):
>> 493 return TRUE;
>> 494 break;
>> 495
>> 496 case SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE):
>> 497 return FALSE;
>> 498 break;
>> 499
>> 500 default:
>> 501 SOS_FATAL_ERROR("Invalid saved context Code segment register: 0x%x (k=%x, u=%x) !",
>> 502 (unsigned) GET_CPU_CS_REGISTER_VALUE(ctxt->cs),
>> 503 SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE),
>> 504 SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE));
>> 505 break;
>> 506 }
>> 507
>> 508
>> 509 return -SOS_EFATAL;
>> 510 }
>> 511
>> 512
214 #if defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_ 513 #if defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
215 void 514 void
216 sos_cpu_state_prepare_detect_kernel_stack_over 515 sos_cpu_state_prepare_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt,
217 516 sos_vaddr_t stack_bottom,
218 517 sos_size_t stack_size)
219 { 518 {
220 sos_size_t poison_size = SOS_CPU_STATE_DETEC 519 sos_size_t poison_size = SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW;
221 if (poison_size > stack_size) 520 if (poison_size > stack_size)
222 poison_size = stack_size; 521 poison_size = stack_size;
223 522
224 memset((void*)stack_bottom, SOS_CPU_STATE_ST 523 memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, poison_size);
225 } 524 }
226 525
227 526
228 void 527 void
229 sos_cpu_state_detect_kernel_stack_overflow(con 528 sos_cpu_state_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt,
230 sos 529 sos_vaddr_t stack_bottom,
231 sos 530 sos_size_t stack_size)
232 { 531 {
233 unsigned char *c; 532 unsigned char *c;
234 int i; 533 int i;
235 534
236 535
237 536
238 537
239 538
240 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) >= stac 539 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) >= stack_bottom);
241 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) + sizeo 540 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) + sizeof(struct sos_cpu_kstate)
242 <= stack_bottom + stack_siz 541 <= stack_bottom + stack_size);
243 542
244 543
245 for (c = (unsigned char*) stack_bottom, i = 544 for (c = (unsigned char*) stack_bottom, i = 0 ;
246 (i < SOS_CPU_STATE_DETECT_KERNEL_STACK_ 545 (i < SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) && (i < stack_size) ;
247 c++, i++) 546 c++, i++)
248 { 547 {
249 SOS_ASSERT_FATAL(SOS_CPU_STATE_STACK_POI 548 SOS_ASSERT_FATAL(SOS_CPU_STATE_STACK_POISON == *c);
250 } 549 }
251 } 550 }
252 #endif 551 #endif
253 552
254 553
255 554
256 555
257 556
258 557
259 558
260 sos_vaddr_t sos_cpu_context_get_PC(const struc 559 sos_vaddr_t sos_cpu_context_get_PC(const struct sos_cpu_state *ctxt)
261 { 560 {
262 SOS_ASSERT_FATAL(NULL != ctxt); 561 SOS_ASSERT_FATAL(NULL != ctxt);
263 562
264 563
265 564
266 return ctxt->eip; 565 return ctxt->eip;
267 } 566 }
268 567
269 568
270 sos_vaddr_t sos_cpu_context_get_SP(const struc 569 sos_vaddr_t sos_cpu_context_get_SP(const struct sos_cpu_state *ctxt)
271 { 570 {
272 SOS_ASSERT_FATAL(NULL != ctxt); 571 SOS_ASSERT_FATAL(NULL != ctxt);
273 572
>> 573
>> 574
>> 575
>> 576 if (TRUE == sos_cpu_context_is_in_user_mode(ctxt))
>> 577 {
>> 578 struct sos_cpu_ustate * uctxt = (struct sos_cpu_ustate*)ctxt;
>> 579 return uctxt->cpl3_esp;
>> 580 }
>> 581
274 582
275 583
276 584
277 return (sos_vaddr_t)ctxt; 585 return (sos_vaddr_t)ctxt;
278 } 586 }
279 587
280 588
>> 589 sos_ret_t
>> 590 sos_cpu_context_set_EX_return_address(struct sos_cpu_state *ctxt,
>> 591 sos_vaddr_t ret_vaddr)
>> 592 {
>> 593 ctxt->eip = ret_vaddr;
>> 594 return SOS_OK;
>> 595 }
>> 596
>> 597
281 void sos_cpu_context_dump(const struct sos_cpu 598 void sos_cpu_context_dump(const struct sos_cpu_state *ctxt)
282 { 599 {
283 char buf[128]; 600 char buf[128];
>> 601
284 snprintf(buf, sizeof(buf), 602 snprintf(buf, sizeof(buf),
285 "CPU: eip=%x esp=%x eflags=%x cs=%x !! 603 "CPU: eip=%x esp0=%x eflags=%x cs=%x ds=%x ss0=%x err=%x",
286 (unsigned)ctxt->eip, (unsigned)ctxt 604 (unsigned)ctxt->eip, (unsigned)ctxt, (unsigned)ctxt->eflags,
287 (unsigned)GET_CPU_CS_REGISTER_VALUE 605 (unsigned)GET_CPU_CS_REGISTER_VALUE(ctxt->cs), (unsigned)ctxt->ds,
288 (unsigned)ctxt->cpl0_ss, 606 (unsigned)ctxt->cpl0_ss,
289 (unsigned)ctxt->error_code); 607 (unsigned)ctxt->error_code);
>> 608 if (TRUE == sos_cpu_context_is_in_user_mode(ctxt))
>> 609 {
>> 610 struct sos_cpu_ustate * uctxt = (struct sos_cpu_ustate*)ctxt;
>> 611 snprintf(buf, sizeof(buf),
>> 612 "%s esp3=%x ss3=%x",
>> 613 buf, (unsigned)uctxt->cpl3_esp, (unsigned)uctxt->cpl3_ss);
>> 614 }
>> 615 else
>> 616 snprintf(buf, sizeof(buf), "%s [KERNEL MODE]", buf);
>> 617
290 sos_bochs_putstring(buf); sos_bochs_putstrin 618 sos_bochs_putstring(buf); sos_bochs_putstring("\n");
291 sos_x86_videomem_putstring(23, 0, 619 sos_x86_videomem_putstring(23, 0,
292 SOS_X86_VIDEO_FG_BLA !! 620 SOS_X86_VIDEO_FG_BLACK | SOS_X86_VIDEO_BG_LTGRAY,
293 buf); !! 621 buf);
294 } 622 }
295 623
296 624
297 625
298 626
299 627
300 628
301 629
302 sos_ui32_t sos_cpu_context_get_EX_info(const s 630 sos_ui32_t sos_cpu_context_get_EX_info(const struct sos_cpu_state *ctxt)
303 { 631 {
304 SOS_ASSERT_FATAL(NULL != ctxt); 632 SOS_ASSERT_FATAL(NULL != ctxt);
305 return ctxt->error_code; 633 return ctxt->error_code;
306 } 634 }
307 635
308 636
309 sos_vaddr_t 637 sos_vaddr_t
310 sos_cpu_context_get_EX_faulting_vaddr(const st 638 sos_cpu_context_get_EX_faulting_vaddr(const struct sos_cpu_state *ctxt)
311 { 639 {
312 sos_ui32_t cr2; 640 sos_ui32_t cr2;
313 641
314 642
315 643
316 644
317 645
318 646
319 647
320 648
321 649
322 650
323 651
324 652
325 653
326 654
327 asm volatile ("movl %%cr2, %0" 655 asm volatile ("movl %%cr2, %0"
328 :"=r"(cr2) 656 :"=r"(cr2)
329 : ); 657 : );
330 658
331 return cr2; 659 return cr2;
332 } 660 }
333 661
334 662
335 663
>> 664
>> 665
>> 666
>> 667
>> 668
>> 669
>> 670
>> 671
>> 672
>> 673
>> 674
>> 675
>> 676
>> 677
>> 678 inline
>> 679 sos_ret_t sos_syscall_get3args(const struct sos_cpu_state *user_ctxt,
>> 680 unsigned int *arg1,
>> 681 unsigned int *arg2,
>> 682 unsigned int *arg3)
>> 683 {
>> 684 *arg1 = user_ctxt->ebx;
>> 685 *arg2 = user_ctxt->ecx;
>> 686 *arg3 = user_ctxt->edx;
>> 687 return SOS_OK;
>> 688 }
>> 689
>> 690
>> 691 sos_ret_t sos_syscall_get1arg(const struct sos_cpu_state *user_ctxt,
>> 692 unsigned int *arg1)
>> 693 {
>> 694 unsigned int unused;
>> 695 return sos_syscall_get3args(user_ctxt, arg1, & unused, & unused);
>> 696 }
>> 697
>> 698
>> 699 sos_ret_t sos_syscall_get2args(const struct sos_cpu_state *user_ctxt,
>> 700 unsigned int *arg1,
>> 701 unsigned int *arg2)
>> 702 {
>> 703 unsigned int unused;
>> 704 return sos_syscall_get3args(user_ctxt, arg1, arg2, & unused);
>> 705 }
>> 706
>> 707
>> 708
>> 709
>> 710
>> 711
>> 712
>> 713 sos_ret_t sos_syscall_get4args(const struct sos_cpu_state *user_ctxt,
>> 714 unsigned int *arg1,
>> 715 unsigned int *arg2,
>> 716 unsigned int *arg3,
>> 717 unsigned int *arg4)
>> 718 {
>> 719 sos_uaddr_t uaddr_other_args;
>> 720 unsigned int other_args[2];
>> 721 sos_ret_t retval;
>> 722
>> 723
>> 724
>> 725 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
>> 726 (unsigned int *)& uaddr_other_args);
>> 727 if (SOS_OK != retval)
>> 728 return retval;
>> 729
>> 730
>> 731
>> 732 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
>> 733 (sos_uaddr_t)uaddr_other_args,
>> 734 sizeof(other_args));
>> 735 if (sizeof(other_args) != retval)
>> 736 return -SOS_EFAULT;
>> 737
>> 738 *arg3 = other_args[0];
>> 739 *arg4 = other_args[1];
>> 740 return SOS_OK;
>> 741 }
>> 742
>> 743
>> 744 sos_ret_t sos_syscall_get5args(const struct sos_cpu_state *user_ctxt,
>> 745 unsigned int *arg1,
>> 746 unsigned int *arg2,
>> 747 unsigned int *arg3,
>> 748 unsigned int *arg4,
>> 749 unsigned int *arg5)
>> 750 {
>> 751 sos_uaddr_t uaddr_other_args;
>> 752 unsigned int other_args[3];
>> 753 sos_ret_t retval;
>> 754
>> 755
>> 756
>> 757 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
>> 758 (unsigned int *)& uaddr_other_args);
>> 759 if (SOS_OK != retval)
>> 760 return retval;
>> 761
>> 762
>> 763
>> 764 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
>> 765 (sos_uaddr_t)uaddr_other_args,
>> 766 sizeof(other_args));
>> 767 if (sizeof(other_args) != retval)
>> 768 return -SOS_EFAULT;
>> 769
>> 770 *arg3 = other_args[0];
>> 771 *arg4 = other_args[1];
>> 772 *arg5 = other_args[2];
>> 773 return SOS_OK;
>> 774 }
>> 775
>> 776
>> 777 sos_ret_t sos_syscall_get6args(const struct sos_cpu_state *user_ctxt,
>> 778 unsigned int *arg1,
>> 779 unsigned int *arg2,
>> 780 unsigned int *arg3,
>> 781 unsigned int *arg4,
>> 782 unsigned int *arg5,
>> 783 unsigned int *arg6)
>> 784 {
>> 785 sos_uaddr_t uaddr_other_args;
>> 786 unsigned int other_args[4];
>> 787 sos_ret_t retval;
>> 788
>> 789
>> 790
>> 791 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
>> 792 (unsigned int *)& uaddr_other_args);
>> 793 if (SOS_OK != retval)
>> 794 return retval;
>> 795
>> 796
>> 797
>> 798 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
>> 799 (sos_uaddr_t)uaddr_other_args,
>> 800 sizeof(other_args));
>> 801 if (sizeof(other_args) != retval)
>> 802 return -SOS_EFAULT;
>> 803
>> 804 *arg3 = other_args[0];
>> 805 *arg4 = other_args[1];
>> 806 *arg5 = other_args[2];
>> 807 *arg6 = other_args[3];
>> 808 return SOS_OK;
>> 809 }
>> 810
>> 811
>> 812 sos_ret_t sos_syscall_get7args(const struct sos_cpu_state *user_ctxt,
>> 813 unsigned int *arg1,
>> 814 unsigned int *arg2,
>> 815 unsigned int *arg3,
>> 816 unsigned int *arg4,
>> 817 unsigned int *arg5,
>> 818 unsigned int *arg6,
>> 819 unsigned int *arg7)
>> 820 {
>> 821 sos_uaddr_t uaddr_other_args;
>> 822 unsigned int other_args[5];
>> 823 sos_ret_t retval;
>> 824
>> 825
>> 826
>> 827 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
>> 828 (unsigned int *)& uaddr_other_args);
>> 829 if (SOS_OK != retval)
>> 830 return retval;
>> 831
>> 832
>> 833
>> 834 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
>> 835 (sos_uaddr_t)uaddr_other_args,
>> 836 sizeof(other_args));
>> 837 if (sizeof(other_args) != retval)
>> 838 return -SOS_EFAULT;
>> 839
>> 840 *arg3 = other_args[0];
>> 841 *arg4 = other_args[1];
>> 842 *arg5 = other_args[2];
>> 843 *arg6 = other_args[3];
>> 844 *arg7 = other_args[4];
>> 845 return SOS_OK;
>> 846 }
>> 847
>> 848
>> 849 sos_ret_t sos_syscall_get8args(const struct sos_cpu_state *user_ctxt,
>> 850 unsigned int *arg1,
>> 851 unsigned int *arg2,
>> 852 unsigned int *arg3,
>> 853 unsigned int *arg4,
>> 854 unsigned int *arg5,
>> 855 unsigned int *arg6,
>> 856 unsigned int *arg7,
>> 857 unsigned int *arg8)
>> 858 {
>> 859 sos_uaddr_t uaddr_other_args;
>> 860 unsigned int other_args[6];
>> 861 sos_ret_t retval;
>> 862
>> 863
>> 864
>> 865 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
>> 866 (unsigned int *)& uaddr_other_args);
>> 867 if (SOS_OK != retval)
>> 868 return retval;
>> 869
>> 870
>> 871
>> 872 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
>> 873 (sos_uaddr_t)uaddr_other_args,
>> 874 sizeof(other_args));
>> 875 if (sizeof(other_args) != retval)
>> 876 return -SOS_EFAULT;
>> 877
>> 878 *arg3 = other_args[0];
>> 879 *arg4 = other_args[1];
>> 880 *arg5 = other_args[2];
>> 881 *arg6 = other_args[3];
>> 882 *arg7 = other_args[4];
>> 883 *arg8 = other_args[5];
>> 884 return SOS_OK;
>> 885 }
>> 886
>> 887
>> 888
336 889
337 890
338 891
339 892
340 sos_ui32_t sos_backtrace(const struct sos_cpu_ 893 sos_ui32_t sos_backtrace(const struct sos_cpu_state *cpu_state,
341 sos_ui32_t max_depth, 894 sos_ui32_t max_depth,
342 sos_vaddr_t stack_bot 895 sos_vaddr_t stack_bottom,
343 sos_size_t stack_size 896 sos_size_t stack_size,
344 sos_backtrace_callbac 897 sos_backtrace_callback_t * backtracer,
345 void *custom_arg) 898 void *custom_arg)
346 { 899 {
347 int depth; 900 int depth;
348 sos_vaddr_t callee_PC, caller_frame; 901 sos_vaddr_t callee_PC, caller_frame;
349 902
>> 903
>> 904 if ((NULL != cpu_state)
>> 905 &&
>> 906 (TRUE == sos_cpu_context_is_in_user_mode(cpu_state)))
>> 907 {
>> 908 return 0;
>> 909 }
>> 910
350 911
351 912
352 913
353 914
354 915
355 916
356 917
357 918
358 919
359 920
360 921
361 922
362 923
363 924
364 925
365 926
366 927
367 928
368 929
369 930
370 931
371 932
372 933
373 934
374 935
375 936
376 937
377 938
378 939
379 if (cpu_state) 940 if (cpu_state)
380 { 941 {
381 callee_PC = cpu_state->eip; 942 callee_PC = cpu_state->eip;
382 caller_frame = cpu_state->ebp; 943 caller_frame = cpu_state->ebp;
383 } 944 }
384 else 945 else
385 { 946 {
386 947
387 callee_PC = (sos_vaddr_t)__builtin_re 948 callee_PC = (sos_vaddr_t)__builtin_return_address(0);
388 caller_frame = (sos_vaddr_t)__builtin_fr 949 caller_frame = (sos_vaddr_t)__builtin_frame_address(1);
389 } 950 }
390 951
391 for(depth=0 ; depth < max_depth ; depth ++) 952 for(depth=0 ; depth < max_depth ; depth ++)
392 { 953 {
393 954
394 backtracer(callee_PC, caller_frame + 8, 955 backtracer(callee_PC, caller_frame + 8, depth, custom_arg);
395 956
396 957
397 if ( (caller_frame < stack_bottom) 958 if ( (caller_frame < stack_bottom)
398 || (caller_frame + 4 >= stack_botto 959 || (caller_frame + 4 >= stack_bottom + stack_size) )
399 return depth; 960 return depth;
400 961
401 962
402 callee_PC = *((sos_vaddr_t*) (caller_ 963 callee_PC = *((sos_vaddr_t*) (caller_frame + 4));
403 caller_frame = *((sos_vaddr_t*) caller_f 964 caller_frame = *((sos_vaddr_t*) caller_frame);
404 } 965 }
405 966
406 return depth; 967 return depth;
>> 968 }
>> 969
>> 970
>> 971
>> 972
>> 973
>> 974
>> 975
>> 976
>> 977
>> 978
>> 979
>> 980 void
>> 981 sos_cpu_context_update_kernel_tss(struct sos_cpu_state *next_ctxt)
>> 982 {
>> 983
>> 984 if (sos_cpu_context_is_in_user_mode(next_ctxt))
>> 985 {
>> 986
>> 987
>> 988
>> 989
>> 990
>> 991
>> 992
>> 993
>> 994
>> 995
>> 996
>> 997 kernel_tss.esp0 = ((sos_vaddr_t)next_ctxt)
>> 998 + sizeof(struct sos_cpu_ustate);
>> 999
>> 1000
>> 1001
>> 1002 }
>> 1003 else
>> 1004 {
>> 1005
>> 1006
>> 1007 }
407 } 1008 }