Diff markup
001 001
002 002
003 003
004 004
005 005
006 006
007 007
008 008
009 009
010 010
011 011
012 012
013 013
014 014
015 015
016 016
017 017
018 018
019 019
020 020
021 #include <sos/assert.h> 021 #include <sos/assert.h>
022 #include <sos/klibc.h> 022 #include <sos/klibc.h>
023 #include <drivers/bochs.h> 023 #include <drivers/bochs.h>
024 #include <drivers/x86_videomem.h> 024 #include <drivers/x86_videomem.h>
025 #include <hwcore/segment.h> 025 #include <hwcore/segment.h>
>> 026 #include <hwcore/gdt.h>
>> 027 #include <sos/uaccess.h>
026 028
027 #include "cpu_context.h" 029 #include "cpu_context.h"
028 030
029 031
030 032
031 033
032 034
033 035
034 036
035 037
036 038
037 039
038 040
039 041
040 042
041 043
042 044
043 struct sos_cpu_state { 045 struct sos_cpu_state {
044 046
045 047
046 048
047 sos_ui16_t gs; 049 sos_ui16_t gs;
048 sos_ui16_t fs; 050 sos_ui16_t fs;
049 sos_ui16_t es; 051 sos_ui16_t es;
050 sos_ui16_t ds; 052 sos_ui16_t ds;
051 sos_ui16_t cpl0_ss; 053 sos_ui16_t cpl0_ss;
052 054
053 055
054 sos_ui16_t alignment_padding; 056 sos_ui16_t alignment_padding;
055 sos_ui32_t eax; 057 sos_ui32_t eax;
056 sos_ui32_t ebx; 058 sos_ui32_t ebx;
057 sos_ui32_t ecx; 059 sos_ui32_t ecx;
058 sos_ui32_t edx; 060 sos_ui32_t edx;
059 sos_ui32_t esi; 061 sos_ui32_t esi;
060 sos_ui32_t edi; 062 sos_ui32_t edi;
061 sos_ui32_t ebp; 063 sos_ui32_t ebp;
062 064
063 065
064 sos_ui32_t error_code; 066 sos_ui32_t error_code;
065 sos_vaddr_t eip; 067 sos_vaddr_t eip;
066 sos_ui32_t cs; 068 sos_ui32_t cs;
067 069
068 sos_ui32_t eflags; 070 sos_ui32_t eflags;
069 071
070 072
071 } __attribute__((packed)); 073 } __attribute__((packed));
072 074
073 075
074 076
075 077
076 078
077 079
078 080
079 081
080 082
081 083
082 084
083 085
084 086
085 087
086 088
087 #define GET_CPU_CS_REGISTER_VALUE(pushed_ui32_ 089 #define GET_CPU_CS_REGISTER_VALUE(pushed_ui32_cs_value) \
088 ( (pushed_ui32_cs_value) & 0xffff ) 090 ( (pushed_ui32_cs_value) & 0xffff )
089 091
090 092
091 093
092 094
093 095
094 struct sos_cpu_kstate 096 struct sos_cpu_kstate
095 { 097 {
096 struct sos_cpu_state regs; 098 struct sos_cpu_state regs;
097 } __attribute__((packed)); 099 } __attribute__((packed));
098 100
099 101
100 102
>> 103
>> 104
>> 105
>> 106
>> 107
>> 108
>> 109
>> 110 struct sos_cpu_ustate
>> 111 {
>> 112 struct sos_cpu_state regs;
>> 113 struct
>> 114 {
>> 115 sos_ui32_t cpl3_esp;
>> 116 sos_ui16_t cpl3_ss;
>> 117 };
>> 118 } __attribute__((packed));
>> 119
>> 120
>> 121
>> 122
>> 123
>> 124
>> 125
>> 126
>> 127
>> 128
>> 129
>> 130
>> 131 struct x86_tss {
>> 132
>> 133
>> 134
>> 135
>> 136
>> 137
>> 138
>> 139
>> 140
>> 141
>> 142 sos_ui16_t back_link;
>> 143
>> 144 sos_ui16_t reserved1;
>> 145
>> 146
>> 147 sos_vaddr_t esp0;
>> 148 sos_ui16_t ss0;
>> 149
>> 150 sos_ui16_t reserved2;
>> 151
>> 152
>> 153 sos_vaddr_t esp1;
>> 154 sos_ui16_t ss1;
>> 155
>> 156 sos_ui16_t reserved3;
>> 157
>> 158
>> 159 sos_vaddr_t esp2;
>> 160 sos_ui16_t ss2;
>> 161
>> 162 sos_ui16_t reserved4;
>> 163
>> 164
>> 165 sos_vaddr_t cr3;
>> 166 sos_vaddr_t eip;
>> 167 sos_ui32_t eflags;
>> 168 sos_ui32_t eax;
>> 169 sos_ui32_t ecx;
>> 170 sos_ui32_t edx;
>> 171 sos_ui32_t ebx;
>> 172 sos_ui32_t esp;
>> 173 sos_ui32_t ebp;
>> 174 sos_ui32_t esi;
>> 175 sos_ui32_t edi;
>> 176
>> 177
>> 178 sos_ui16_t es;
>> 179 sos_ui16_t reserved5;
>> 180
>> 181
>> 182 sos_ui16_t cs;
>> 183 sos_ui16_t reserved6;
>> 184
>> 185
>> 186 sos_ui16_t ss;
>> 187 sos_ui16_t reserved7;
>> 188
>> 189
>> 190 sos_ui16_t ds;
>> 191 sos_ui16_t reserved8;
>> 192
>> 193
>> 194 sos_ui16_t fs;
>> 195 sos_ui16_t reserved9;
>> 196
>> 197
>> 198 sos_ui16_t gs;
>> 199 sos_ui16_t reserved10;
>> 200
>> 201
>> 202 sos_ui16_t ldtr;
>> 203 sos_ui16_t reserved11;
>> 204
>> 205
>> 206 sos_ui16_t debug_trap_flag :1;
>> 207 sos_ui16_t reserved12 :15;
>> 208 sos_ui16_t iomap_base_addr;
>> 209
>> 210
>> 211 } __attribute__((packed, aligned(128)));
>> 212
>> 213
>> 214 static struct x86_tss kernel_tss;
>> 215
>> 216
>> 217 sos_ret_t sos_cpu_context_subsystem_setup()
>> 218 {
>> 219
>> 220 memset(&kernel_tss, 0x0, sizeof(kernel_tss));
>> 221
>> 222
>> 223
>> 224
>> 225
>> 226
>> 227
>> 228
>> 229
>> 230
>> 231
>> 232 kernel_tss.ss0 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
>> 233
>> 234
>> 235 sos_gdt_register_kernel_tss((sos_vaddr_t) &kernel_tss);
>> 236
>> 237 return SOS_OK;
>> 238 }
>> 239
>> 240
>> 241
101 242
102 243
103 244
104 245
105 static void core_routine (sos_cpu_kstate_funct 246 static void core_routine (sos_cpu_kstate_function_arg1_t *start_func,
106 sos_ui32_t start_arg 247 sos_ui32_t start_arg,
107 sos_cpu_kstate_funct 248 sos_cpu_kstate_function_arg1_t *exit_func,
108 sos_ui32_t exit_arg) 249 sos_ui32_t exit_arg)
109 __attribute__((noreturn)); 250 __attribute__((noreturn));
110 251
111 static void core_routine (sos_cpu_kstate_funct 252 static void core_routine (sos_cpu_kstate_function_arg1_t *start_func,
112 sos_ui32_t start_arg 253 sos_ui32_t start_arg,
113 sos_cpu_kstate_funct 254 sos_cpu_kstate_function_arg1_t *exit_func,
114 sos_ui32_t exit_arg) 255 sos_ui32_t exit_arg)
115 { 256 {
116 start_func(start_arg); 257 start_func(start_arg);
117 exit_func(exit_arg); 258 exit_func(exit_arg);
118 259
119 SOS_ASSERT_FATAL(! "The exit function of the 260 SOS_ASSERT_FATAL(! "The exit function of the thread should NOT return !");
120 for(;;); 261 for(;;);
121 } 262 }
122 263
123 264
124 sos_ret_t sos_cpu_kstate_init(struct sos_cpu_s 265 sos_ret_t sos_cpu_kstate_init(struct sos_cpu_state **ctxt,
125 sos_cpu_kstate_f 266 sos_cpu_kstate_function_arg1_t *start_func,
126 sos_ui32_t star 267 sos_ui32_t start_arg,
127 sos_vaddr_t stac 268 sos_vaddr_t stack_bottom,
128 sos_size_t stac 269 sos_size_t stack_size,
129 sos_cpu_kstate_f 270 sos_cpu_kstate_function_arg1_t *exit_func,
130 sos_ui32_t exit 271 sos_ui32_t exit_arg)
131 { 272 {
132 273
133 struct sos_cpu_kstate *kctxt; 274 struct sos_cpu_kstate *kctxt;
134 275
135 276
136 277
137 278
138 279
139 280
140 281
141 282
142 283
143 284
144 285
145 286
146 287
147 288
148 289
149 290
150 291
151 292
152 293
153 294
154 295
155 296
156 297
157 sos_vaddr_t tmp_vaddr = stack_bottom + stack 298 sos_vaddr_t tmp_vaddr = stack_bottom + stack_size;
158 sos_ui32_t *stack = (sos_ui32_t*)tmp_vaddr; 299 sos_ui32_t *stack = (sos_ui32_t*)tmp_vaddr;
159 300
160 301
161 #ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS 302 #ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS
162 memset((void*)stack_bottom, SOS_CPU_STATE_ST 303 memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, stack_size);
163 #elif defined(SOS_CPU_STATE_DETECT_KERNEL_STAC 304 #elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
164 sos_cpu_state_prepare_detect_kernel_stack_ov 305 sos_cpu_state_prepare_detect_kernel_stack_overflow(stack_bottom, stack_size);
165 #endif 306 #endif
166 307
167 308
168 309
169 *(--stack) = exit_arg; 310 *(--stack) = exit_arg;
170 *(--stack) = (sos_ui32_t)exit_func; 311 *(--stack) = (sos_ui32_t)exit_func;
171 *(--stack) = start_arg; 312 *(--stack) = start_arg;
172 *(--stack) = (sos_ui32_t)start_func; 313 *(--stack) = (sos_ui32_t)start_func;
173 *(--stack) = 0; 314 *(--stack) = 0;
174 315
175 316
176 317
177 318
178 319
179 320
180 321
181 322
182 323
183 tmp_vaddr = ((sos_vaddr_t)stack) - sizeof(s 324 tmp_vaddr = ((sos_vaddr_t)stack) - sizeof(struct sos_cpu_kstate);
184 kctxt = (struct sos_cpu_kstate*)tmp_vaddr; 325 kctxt = (struct sos_cpu_kstate*)tmp_vaddr;
185 326
186 327
187 memset(kctxt, 0x0, sizeof(struct sos_cpu_kst 328 memset(kctxt, 0x0, sizeof(struct sos_cpu_kstate));
188 329
189 330
190 331
191 kctxt->regs.eip = (sos_ui32_t)core_routine; 332 kctxt->regs.eip = (sos_ui32_t)core_routine;
192 333
193 334
194 kctxt->regs.cs 335 kctxt->regs.cs
195 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SO 336 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE);
196 kctxt->regs.ds 337 kctxt->regs.ds
197 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SO 338 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
198 kctxt->regs.es 339 kctxt->regs.es
199 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SO 340 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
200 kctxt->regs.cpl0_ss 341 kctxt->regs.cpl0_ss
201 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SO 342 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
202 343
203 344
204 345
205 kctxt->regs.eflags = (1 << 9); 346 kctxt->regs.eflags = (1 << 9);
206 347
207 348
208 *ctxt = (struct sos_cpu_state*) kctxt; 349 *ctxt = (struct sos_cpu_state*) kctxt;
209 350
210 return SOS_OK; 351 return SOS_OK;
211 } 352 }
212 353
213 354
>> 355 sos_ret_t sos_cpu_ustate_init(struct sos_cpu_state **ctxt,
>> 356 sos_uaddr_t user_start_PC,
>> 357 sos_ui32_t user_start_arg1,
>> 358 sos_ui32_t user_start_arg2,
>> 359 sos_uaddr_t user_initial_SP,
>> 360 sos_vaddr_t kernel_stack_bottom,
>> 361 sos_size_t kernel_stack_size)
>> 362 {
>> 363
>> 364 struct sos_cpu_ustate *uctxt;
>> 365
>> 366
>> 367
>> 368
>> 369
>> 370
>> 371
>> 372 sos_vaddr_t uctxt_vaddr = kernel_stack_bottom
>> 373 + kernel_stack_size
>> 374 - sizeof(struct sos_cpu_ustate);
>> 375 uctxt = (struct sos_cpu_ustate*)uctxt_vaddr;
>> 376
>> 377
>> 378 #ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS
>> 379 memset((void*)kernel_stack_bottom,
>> 380 SOS_CPU_STATE_STACK_POISON,
>> 381 kernel_stack_size);
>> 382 #elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
>> 383 sos_cpu_state_prepare_detect_kernel_stack_overflow(kernel_stack_bottom,
>> 384 kernel_stack_size);
>> 385 #endif
>> 386
>> 387
>> 388
>> 389
>> 390
>> 391
>> 392
>> 393 memset(uctxt, 0x0, sizeof(struct sos_cpu_ustate));
>> 394
>> 395
>> 396
>> 397 uctxt->regs.eip = (sos_ui32_t)user_start_PC;
>> 398
>> 399
>> 400 uctxt->cpl3_esp = user_initial_SP;
>> 401
>> 402
>> 403
>> 404 uctxt->regs.eax = user_start_arg1;
>> 405 uctxt->regs.ebx = user_start_arg2;
>> 406
>> 407
>> 408 uctxt->regs.cs
>> 409 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE);
>> 410 uctxt->regs.ds
>> 411 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA);
>> 412 uctxt->regs.es
>> 413 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA);
>> 414 uctxt->cpl3_ss
>> 415 = SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UDATA);
>> 416
>> 417
>> 418
>> 419
>> 420
>> 421 uctxt->regs.cpl0_ss
>> 422 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
>> 423
>> 424
>> 425
>> 426
>> 427 uctxt->regs.eflags = (1 << 9);
>> 428
>> 429
>> 430 *ctxt = (struct sos_cpu_state*) uctxt;
>> 431
>> 432 return SOS_OK;
>> 433 }
>> 434
>> 435
>> 436 sos_ret_t
>> 437 sos_cpu_context_is_in_user_mode(const struct sos_cpu_state *ctxt)
>> 438 {
>> 439
>> 440
>> 441 switch (GET_CPU_CS_REGISTER_VALUE(ctxt->cs))
>> 442 {
>> 443 case SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE):
>> 444 return TRUE;
>> 445 break;
>> 446
>> 447 case SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE):
>> 448 return FALSE;
>> 449 break;
>> 450
>> 451 default:
>> 452 SOS_FATAL_ERROR("Invalid saved context Code segment register: 0x%x (k=%x, u=%x) !",
>> 453 (unsigned) GET_CPU_CS_REGISTER_VALUE(ctxt->cs),
>> 454 SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE),
>> 455 SOS_BUILD_SEGMENT_REG_VALUE(3, FALSE, SOS_SEG_UCODE));
>> 456 break;
>> 457 }
>> 458
>> 459
>> 460 return -SOS_EFATAL;
>> 461 }
>> 462
>> 463
214 #if defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_ 464 #if defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
215 void 465 void
216 sos_cpu_state_prepare_detect_kernel_stack_over 466 sos_cpu_state_prepare_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt,
217 467 sos_vaddr_t stack_bottom,
218 468 sos_size_t stack_size)
219 { 469 {
220 sos_size_t poison_size = SOS_CPU_STATE_DETEC 470 sos_size_t poison_size = SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW;
221 if (poison_size > stack_size) 471 if (poison_size > stack_size)
222 poison_size = stack_size; 472 poison_size = stack_size;
223 473
224 memset((void*)stack_bottom, SOS_CPU_STATE_ST 474 memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, poison_size);
225 } 475 }
226 476
227 477
228 void 478 void
229 sos_cpu_state_detect_kernel_stack_overflow(con 479 sos_cpu_state_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt,
230 sos 480 sos_vaddr_t stack_bottom,
231 sos 481 sos_size_t stack_size)
232 { 482 {
233 unsigned char *c; 483 unsigned char *c;
234 int i; 484 int i;
235 485
236 486
237 487
238 488
239 489
240 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) >= stac 490 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) >= stack_bottom);
241 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) + sizeo 491 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) + sizeof(struct sos_cpu_kstate)
242 <= stack_bottom + stack_siz 492 <= stack_bottom + stack_size);
243 493
244 494
245 for (c = (unsigned char*) stack_bottom, i = 495 for (c = (unsigned char*) stack_bottom, i = 0 ;
246 (i < SOS_CPU_STATE_DETECT_KERNEL_STACK_ 496 (i < SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) && (i < stack_size) ;
247 c++, i++) 497 c++, i++)
248 { 498 {
249 SOS_ASSERT_FATAL(SOS_CPU_STATE_STACK_POI 499 SOS_ASSERT_FATAL(SOS_CPU_STATE_STACK_POISON == *c);
250 } 500 }
251 } 501 }
252 #endif 502 #endif
253 503
254 504
255 505
256 506
257 507
258 508
259 509
260 sos_vaddr_t sos_cpu_context_get_PC(const struc 510 sos_vaddr_t sos_cpu_context_get_PC(const struct sos_cpu_state *ctxt)
261 { 511 {
262 SOS_ASSERT_FATAL(NULL != ctxt); 512 SOS_ASSERT_FATAL(NULL != ctxt);
263 513
264 514
265 515
266 return ctxt->eip; 516 return ctxt->eip;
267 } 517 }
268 518
269 519
270 sos_vaddr_t sos_cpu_context_get_SP(const struc 520 sos_vaddr_t sos_cpu_context_get_SP(const struct sos_cpu_state *ctxt)
271 { 521 {
272 SOS_ASSERT_FATAL(NULL != ctxt); 522 SOS_ASSERT_FATAL(NULL != ctxt);
273 523
>> 524
>> 525
>> 526
>> 527 if (TRUE == sos_cpu_context_is_in_user_mode(ctxt))
>> 528 {
>> 529 struct sos_cpu_ustate * uctxt = (struct sos_cpu_ustate*)ctxt;
>> 530 return uctxt->cpl3_esp;
>> 531 }
>> 532
274 533
275 534
276 535
277 return (sos_vaddr_t)ctxt; 536 return (sos_vaddr_t)ctxt;
278 } 537 }
279 538
280 539
>> 540 sos_ret_t
>> 541 sos_cpu_context_set_EX_return_address(struct sos_cpu_state *ctxt,
>> 542 sos_vaddr_t ret_vaddr)
>> 543 {
>> 544 ctxt->eip = ret_vaddr;
>> 545 return SOS_OK;
>> 546 }
>> 547
>> 548
281 void sos_cpu_context_dump(const struct sos_cpu 549 void sos_cpu_context_dump(const struct sos_cpu_state *ctxt)
282 { 550 {
283 char buf[128]; 551 char buf[128];
>> 552
284 snprintf(buf, sizeof(buf), 553 snprintf(buf, sizeof(buf),
285 "CPU: eip=%x esp=%x eflags=%x cs=%x !! 554 "CPU: eip=%x esp0=%x eflags=%x cs=%x ds=%x ss0=%x err=%x",
286 (unsigned)ctxt->eip, (unsigned)ctxt 555 (unsigned)ctxt->eip, (unsigned)ctxt, (unsigned)ctxt->eflags,
287 (unsigned)GET_CPU_CS_REGISTER_VALUE 556 (unsigned)GET_CPU_CS_REGISTER_VALUE(ctxt->cs), (unsigned)ctxt->ds,
288 (unsigned)ctxt->cpl0_ss, 557 (unsigned)ctxt->cpl0_ss,
289 (unsigned)ctxt->error_code); 558 (unsigned)ctxt->error_code);
>> 559 if (TRUE == sos_cpu_context_is_in_user_mode(ctxt))
>> 560 {
>> 561 struct sos_cpu_ustate * uctxt = (struct sos_cpu_ustate*)ctxt;
>> 562 snprintf(buf, sizeof(buf),
>> 563 "%s esp3=%x ss3=%x",
>> 564 buf, (unsigned)uctxt->cpl3_esp, (unsigned)uctxt->cpl3_ss);
>> 565 }
>> 566 else
>> 567 snprintf(buf, sizeof(buf), "%s [KERNEL MODE]", buf);
>> 568
290 sos_bochs_putstring(buf); sos_bochs_putstrin 569 sos_bochs_putstring(buf); sos_bochs_putstring("\n");
291 sos_x86_videomem_putstring(23, 0, 570 sos_x86_videomem_putstring(23, 0,
292 SOS_X86_VIDEO_FG_BLA !! 571 SOS_X86_VIDEO_FG_BLACK | SOS_X86_VIDEO_BG_LTGRAY,
293 buf); !! 572 buf);
294 } 573 }
295 574
296 575
297 576
298 577
299 578
300 579
301 580
302 sos_ui32_t sos_cpu_context_get_EX_info(const s 581 sos_ui32_t sos_cpu_context_get_EX_info(const struct sos_cpu_state *ctxt)
303 { 582 {
304 SOS_ASSERT_FATAL(NULL != ctxt); 583 SOS_ASSERT_FATAL(NULL != ctxt);
305 return ctxt->error_code; 584 return ctxt->error_code;
306 } 585 }
307 586
308 587
309 sos_vaddr_t 588 sos_vaddr_t
310 sos_cpu_context_get_EX_faulting_vaddr(const st 589 sos_cpu_context_get_EX_faulting_vaddr(const struct sos_cpu_state *ctxt)
311 { 590 {
312 sos_ui32_t cr2; 591 sos_ui32_t cr2;
313 592
314 593
315 594
316 595
317 596
318 597
319 598
320 599
321 600
322 601
323 602
324 603
325 604
326 605
327 asm volatile ("movl %%cr2, %0" 606 asm volatile ("movl %%cr2, %0"
328 :"=r"(cr2) 607 :"=r"(cr2)
329 : ); 608 : );
330 609
331 return cr2; 610 return cr2;
332 } 611 }
333 612
334 613
335 614
>> 615
>> 616
>> 617
>> 618
>> 619
>> 620
>> 621
>> 622
>> 623
>> 624
>> 625
>> 626
>> 627
>> 628
>> 629 inline
>> 630 sos_ret_t sos_syscall_get3args(const struct sos_cpu_state *user_ctxt,
>> 631 unsigned int *arg1,
>> 632 unsigned int *arg2,
>> 633 unsigned int *arg3)
>> 634 {
>> 635 *arg1 = user_ctxt->ebx;
>> 636 *arg2 = user_ctxt->ecx;
>> 637 *arg3 = user_ctxt->edx;
>> 638 return SOS_OK;
>> 639 }
>> 640
>> 641
>> 642 sos_ret_t sos_syscall_get1arg(const struct sos_cpu_state *user_ctxt,
>> 643 unsigned int *arg1)
>> 644 {
>> 645 unsigned int unused;
>> 646 return sos_syscall_get3args(user_ctxt, arg1, & unused, & unused);
>> 647 }
>> 648
>> 649
>> 650 sos_ret_t sos_syscall_get2args(const struct sos_cpu_state *user_ctxt,
>> 651 unsigned int *arg1,
>> 652 unsigned int *arg2)
>> 653 {
>> 654 unsigned int unused;
>> 655 return sos_syscall_get3args(user_ctxt, arg1, arg2, & unused);
>> 656 }
>> 657
>> 658
>> 659
>> 660
>> 661
>> 662
>> 663
>> 664 sos_ret_t sos_syscall_get4args(const struct sos_cpu_state *user_ctxt,
>> 665 unsigned int *arg1,
>> 666 unsigned int *arg2,
>> 667 unsigned int *arg3,
>> 668 unsigned int *arg4)
>> 669 {
>> 670 sos_uaddr_t uaddr_other_args;
>> 671 unsigned int other_args[2];
>> 672 sos_ret_t retval;
>> 673
>> 674
>> 675
>> 676 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
>> 677 (unsigned int *)& uaddr_other_args);
>> 678 if (SOS_OK != retval)
>> 679 return retval;
>> 680
>> 681
>> 682
>> 683 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
>> 684 (sos_uaddr_t)uaddr_other_args,
>> 685 sizeof(other_args));
>> 686 if (sizeof(other_args) != retval)
>> 687 return -SOS_EFAULT;
>> 688
>> 689 *arg3 = other_args[0];
>> 690 *arg4 = other_args[1];
>> 691 return SOS_OK;
>> 692 }
>> 693
>> 694
>> 695 sos_ret_t sos_syscall_get5args(const struct sos_cpu_state *user_ctxt,
>> 696 unsigned int *arg1,
>> 697 unsigned int *arg2,
>> 698 unsigned int *arg3,
>> 699 unsigned int *arg4,
>> 700 unsigned int *arg5)
>> 701 {
>> 702 sos_uaddr_t uaddr_other_args;
>> 703 unsigned int other_args[3];
>> 704 sos_ret_t retval;
>> 705
>> 706
>> 707
>> 708 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
>> 709 (unsigned int *)& uaddr_other_args);
>> 710 if (SOS_OK != retval)
>> 711 return retval;
>> 712
>> 713
>> 714
>> 715 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
>> 716 (sos_uaddr_t)uaddr_other_args,
>> 717 sizeof(other_args));
>> 718 if (sizeof(other_args) != retval)
>> 719 return -SOS_EFAULT;
>> 720
>> 721 *arg3 = other_args[0];
>> 722 *arg4 = other_args[1];
>> 723 *arg5 = other_args[2];
>> 724 return SOS_OK;
>> 725 }
>> 726
>> 727
>> 728 sos_ret_t sos_syscall_get6args(const struct sos_cpu_state *user_ctxt,
>> 729 unsigned int *arg1,
>> 730 unsigned int *arg2,
>> 731 unsigned int *arg3,
>> 732 unsigned int *arg4,
>> 733 unsigned int *arg5,
>> 734 unsigned int *arg6)
>> 735 {
>> 736 sos_uaddr_t uaddr_other_args;
>> 737 unsigned int other_args[4];
>> 738 sos_ret_t retval;
>> 739
>> 740
>> 741
>> 742 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
>> 743 (unsigned int *)& uaddr_other_args);
>> 744 if (SOS_OK != retval)
>> 745 return retval;
>> 746
>> 747
>> 748
>> 749 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
>> 750 (sos_uaddr_t)uaddr_other_args,
>> 751 sizeof(other_args));
>> 752 if (sizeof(other_args) != retval)
>> 753 return -SOS_EFAULT;
>> 754
>> 755 *arg3 = other_args[0];
>> 756 *arg4 = other_args[1];
>> 757 *arg5 = other_args[2];
>> 758 *arg6 = other_args[3];
>> 759 return SOS_OK;
>> 760 }
>> 761
>> 762
>> 763 sos_ret_t sos_syscall_get7args(const struct sos_cpu_state *user_ctxt,
>> 764 unsigned int *arg1,
>> 765 unsigned int *arg2,
>> 766 unsigned int *arg3,
>> 767 unsigned int *arg4,
>> 768 unsigned int *arg5,
>> 769 unsigned int *arg6,
>> 770 unsigned int *arg7)
>> 771 {
>> 772 sos_uaddr_t uaddr_other_args;
>> 773 unsigned int other_args[5];
>> 774 sos_ret_t retval;
>> 775
>> 776
>> 777
>> 778 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
>> 779 (unsigned int *)& uaddr_other_args);
>> 780 if (SOS_OK != retval)
>> 781 return retval;
>> 782
>> 783
>> 784
>> 785 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
>> 786 (sos_uaddr_t)uaddr_other_args,
>> 787 sizeof(other_args));
>> 788 if (sizeof(other_args) != retval)
>> 789 return -SOS_EFAULT;
>> 790
>> 791 *arg3 = other_args[0];
>> 792 *arg4 = other_args[1];
>> 793 *arg5 = other_args[2];
>> 794 *arg6 = other_args[3];
>> 795 *arg7 = other_args[4];
>> 796 return SOS_OK;
>> 797 }
>> 798
>> 799
>> 800 sos_ret_t sos_syscall_get8args(const struct sos_cpu_state *user_ctxt,
>> 801 unsigned int *arg1,
>> 802 unsigned int *arg2,
>> 803 unsigned int *arg3,
>> 804 unsigned int *arg4,
>> 805 unsigned int *arg5,
>> 806 unsigned int *arg6,
>> 807 unsigned int *arg7,
>> 808 unsigned int *arg8)
>> 809 {
>> 810 sos_uaddr_t uaddr_other_args;
>> 811 unsigned int other_args[6];
>> 812 sos_ret_t retval;
>> 813
>> 814
>> 815
>> 816 retval = sos_syscall_get3args(user_ctxt, arg1, arg2,
>> 817 (unsigned int *)& uaddr_other_args);
>> 818 if (SOS_OK != retval)
>> 819 return retval;
>> 820
>> 821
>> 822
>> 823 retval = sos_memcpy_from_user((sos_vaddr_t)other_args,
>> 824 (sos_uaddr_t)uaddr_other_args,
>> 825 sizeof(other_args));
>> 826 if (sizeof(other_args) != retval)
>> 827 return -SOS_EFAULT;
>> 828
>> 829 *arg3 = other_args[0];
>> 830 *arg4 = other_args[1];
>> 831 *arg5 = other_args[2];
>> 832 *arg6 = other_args[3];
>> 833 *arg7 = other_args[4];
>> 834 *arg8 = other_args[5];
>> 835 return SOS_OK;
>> 836 }
>> 837
>> 838
>> 839
336 840
337 841
338 842
339 843
340 sos_ui32_t sos_backtrace(const struct sos_cpu_ 844 sos_ui32_t sos_backtrace(const struct sos_cpu_state *cpu_state,
341 sos_ui32_t max_depth, 845 sos_ui32_t max_depth,
342 sos_vaddr_t stack_bot 846 sos_vaddr_t stack_bottom,
343 sos_size_t stack_size 847 sos_size_t stack_size,
344 sos_backtrace_callbac 848 sos_backtrace_callback_t * backtracer,
345 void *custom_arg) 849 void *custom_arg)
346 { 850 {
347 int depth; 851 int depth;
348 sos_vaddr_t callee_PC, caller_frame; 852 sos_vaddr_t callee_PC, caller_frame;
349 853
>> 854
>> 855 if ((NULL != cpu_state)
>> 856 &&
>> 857 (TRUE == sos_cpu_context_is_in_user_mode(cpu_state)))
>> 858 {
>> 859 return 0;
>> 860 }
>> 861
350 862
351 863
352 864
353 865
354 866
355 867
356 868
357 869
358 870
359 871
360 872
361 873
362 874
363 875
364 876
365 877
366 878
367 879
368 880
369 881
370 882
371 883
372 884
373 885
374 886
375 887
376 888
377 889
378 890
379 if (cpu_state) 891 if (cpu_state)
380 { 892 {
381 callee_PC = cpu_state->eip; 893 callee_PC = cpu_state->eip;
382 caller_frame = cpu_state->ebp; 894 caller_frame = cpu_state->ebp;
383 } 895 }
384 else 896 else
385 { 897 {
386 898
387 callee_PC = (sos_vaddr_t)__builtin_re 899 callee_PC = (sos_vaddr_t)__builtin_return_address(0);
388 caller_frame = (sos_vaddr_t)__builtin_fr 900 caller_frame = (sos_vaddr_t)__builtin_frame_address(1);
389 } 901 }
390 902
391 for(depth=0 ; depth < max_depth ; depth ++) 903 for(depth=0 ; depth < max_depth ; depth ++)
392 { 904 {
393 905
394 backtracer(callee_PC, caller_frame + 8, 906 backtracer(callee_PC, caller_frame + 8, depth, custom_arg);
395 907
396 908
397 if ( (caller_frame < stack_bottom) 909 if ( (caller_frame < stack_bottom)
398 || (caller_frame + 4 >= stack_botto 910 || (caller_frame + 4 >= stack_bottom + stack_size) )
399 return depth; 911 return depth;
400 912
401 913
402 callee_PC = *((sos_vaddr_t*) (caller_ 914 callee_PC = *((sos_vaddr_t*) (caller_frame + 4));
403 caller_frame = *((sos_vaddr_t*) caller_f 915 caller_frame = *((sos_vaddr_t*) caller_frame);
404 } 916 }
405 917
406 return depth; 918 return depth;
>> 919 }
>> 920
>> 921
>> 922
>> 923
>> 924
>> 925
>> 926
>> 927
>> 928
>> 929
>> 930
>> 931 void
>> 932 sos_cpu_context_update_kernel_tss(struct sos_cpu_state *next_ctxt)
>> 933 {
>> 934
>> 935 if (sos_cpu_context_is_in_user_mode(next_ctxt))
>> 936 {
>> 937
>> 938
>> 939
>> 940
>> 941
>> 942
>> 943
>> 944
>> 945
>> 946
>> 947
>> 948 kernel_tss.esp0 = ((sos_vaddr_t)next_ctxt)
>> 949 + sizeof(struct sos_cpu_ustate);
>> 950
>> 951
>> 952
>> 953 }
>> 954 else
>> 955 {
>> 956
>> 957
>> 958 }
407 } 959 }