001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019
020
021 #include <sos/assert.h>
022 #include <sos/klibc.h>
023 #include <drivers/bochs.h>
024 #include <drivers/x86_videomem.h>
025 #include <hwcore/segment.h>
026
027 #include "cpu_context.h"
028
029
030
031
032
033
034
035
036
037
038
039
040
041
042
043 struct sos_cpu_state {
044
045
046
047 sos_ui16_t gs;
048 sos_ui16_t fs;
049 sos_ui16_t es;
050 sos_ui16_t ds;
051 sos_ui16_t cpl0_ss;
052
053
054 sos_ui16_t alignment_padding;
055 sos_ui32_t eax;
056 sos_ui32_t ebx;
057 sos_ui32_t ecx;
058 sos_ui32_t edx;
059 sos_ui32_t esi;
060 sos_ui32_t edi;
061 sos_ui32_t ebp;
062
063
064 sos_ui32_t error_code;
065 sos_vaddr_t eip;
066 sos_ui32_t cs;
067
068 sos_ui32_t eflags;
069
070
071 } __attribute__((packed));
072
073
074
075
076
077
078
079
080
081
082
083
084
085
086
087 #define GET_CPU_CS_REGISTER_VALUE(pushed_ui32_cs_value) \
088 ( (pushed_ui32_cs_value) & 0xffff )
089
090
091
092
093
094 struct sos_cpu_kstate
095 {
096 struct sos_cpu_state regs;
097 } __attribute__((packed));
098
099
100
101
102
103
104
105 static void core_routine (sos_cpu_kstate_function_arg1_t *start_func,
106 sos_ui32_t start_arg,
107 sos_cpu_kstate_function_arg1_t *exit_func,
108 sos_ui32_t exit_arg)
109 __attribute__((noreturn));
110
111 static void core_routine (sos_cpu_kstate_function_arg1_t *start_func,
112 sos_ui32_t start_arg,
113 sos_cpu_kstate_function_arg1_t *exit_func,
114 sos_ui32_t exit_arg)
115 {
116 start_func(start_arg);
117 exit_func(exit_arg);
118
119 SOS_ASSERT_FATAL(! "The exit function of the thread should NOT return !");
120 for(;;);
121 }
122
123
124 sos_ret_t sos_cpu_kstate_init(struct sos_cpu_state **ctxt,
125 sos_cpu_kstate_function_arg1_t *start_func,
126 sos_ui32_t start_arg,
127 sos_vaddr_t stack_bottom,
128 sos_size_t stack_size,
129 sos_cpu_kstate_function_arg1_t *exit_func,
130 sos_ui32_t exit_arg)
131 {
132
133 struct sos_cpu_kstate *kctxt;
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157 sos_vaddr_t tmp_vaddr = stack_bottom + stack_size;
158 sos_ui32_t *stack = (sos_ui32_t*)tmp_vaddr;
159
160
161 #ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS
162 memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, stack_size);
163 #elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
164 sos_cpu_state_prepare_detect_kernel_stack_overflow(stack_bottom, stack_size);
165 #endif
166
167
168
169 *(--stack) = exit_arg;
170 *(--stack) = (sos_ui32_t)exit_func;
171 *(--stack) = start_arg;
172 *(--stack) = (sos_ui32_t)start_func;
173 *(--stack) = 0;
174
175
176
177
178
179
180
181
182
183 tmp_vaddr = ((sos_vaddr_t)stack) - sizeof(struct sos_cpu_kstate);
184 kctxt = (struct sos_cpu_kstate*)tmp_vaddr;
185
186
187 memset(kctxt, 0x0, sizeof(struct sos_cpu_kstate));
188
189
190
191 kctxt->regs.eip = (sos_ui32_t)core_routine;
192
193
194 kctxt->regs.cs
195 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE);
196 kctxt->regs.ds
197 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
198 kctxt->regs.es
199 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
200 kctxt->regs.cpl0_ss
201 = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA);
202
203
204
205 kctxt->regs.eflags = (1 << 9);
206
207
208 *ctxt = (struct sos_cpu_state*) kctxt;
209
210 return SOS_OK;
211 }
212
213
214 #if defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
215 void
216 sos_cpu_state_prepare_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt,
217 sos_vaddr_t stack_bottom,
218 sos_size_t stack_size)
219 {
220 sos_size_t poison_size = SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW;
221 if (poison_size > stack_size)
222 poison_size = stack_size;
223
224 memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, poison_size);
225 }
226
227
228 void
229 sos_cpu_state_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt,
230 sos_vaddr_t stack_bottom,
231 sos_size_t stack_size)
232 {
233 unsigned char *c;
234 int i;
235
236
237
238
239
240 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) >= stack_bottom);
241 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) + sizeof(struct sos_cpu_kstate)
242 <= stack_bottom + stack_size);
243
244
245 for (c = (unsigned char*) stack_bottom, i = 0 ;
246 (i < SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) && (i < stack_size) ;
247 c++, i++)
248 {
249 SOS_ASSERT_FATAL(SOS_CPU_STATE_STACK_POISON == *c);
250 }
251 }
252 #endif
253
254
255
256
257
258
259
260 sos_vaddr_t sos_cpu_context_get_PC(const struct sos_cpu_state *ctxt)
261 {
262 SOS_ASSERT_FATAL(NULL != ctxt);
263
264
265
266 return ctxt->eip;
267 }
268
269
270 sos_vaddr_t sos_cpu_context_get_SP(const struct sos_cpu_state *ctxt)
271 {
272 SOS_ASSERT_FATAL(NULL != ctxt);
273
274
275
276
277 return (sos_vaddr_t)ctxt;
278 }
279
280
281 void sos_cpu_context_dump(const struct sos_cpu_state *ctxt)
282 {
283 char buf[128];
284 snprintf(buf, sizeof(buf),
285 "CPU: eip=%x esp=%x eflags=%x cs=%x ds=%x ss=%x err=%x",
286 (unsigned)ctxt->eip, (unsigned)ctxt, (unsigned)ctxt->eflags,
287 (unsigned)GET_CPU_CS_REGISTER_VALUE(ctxt->cs), (unsigned)ctxt->ds,
288 (unsigned)ctxt->cpl0_ss,
289 (unsigned)ctxt->error_code);
290 sos_bochs_putstring(buf); sos_bochs_putstring("\n");
291 sos_x86_videomem_putstring(23, 0,
292 SOS_X86_VIDEO_FG_BLACK | SOS_X86_VIDEO_BG_LTGRAY,
293 buf);
294 }
295
296
297
298
299
300
301
302 sos_ui32_t sos_cpu_context_get_EX_info(const struct sos_cpu_state *ctxt)
303 {
304 SOS_ASSERT_FATAL(NULL != ctxt);
305 return ctxt->error_code;
306 }
307
308
309 sos_vaddr_t
310 sos_cpu_context_get_EX_faulting_vaddr(const struct sos_cpu_state *ctxt)
311 {
312 sos_ui32_t cr2;
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327 asm volatile ("movl %%cr2, %0"
328 :"=r"(cr2)
329 : );
330
331 return cr2;
332 }
333
334
335
336
337
338
339
340 sos_ui32_t sos_backtrace(const struct sos_cpu_state *cpu_state,
341 sos_ui32_t max_depth,
342 sos_vaddr_t stack_bottom,
343 sos_size_t stack_size,
344 sos_backtrace_callback_t * backtracer,
345 void *custom_arg)
346 {
347 int depth;
348 sos_vaddr_t callee_PC, caller_frame;
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379 if (cpu_state)
380 {
381 callee_PC = cpu_state->eip;
382 caller_frame = cpu_state->ebp;
383 }
384 else
385 {
386
387 callee_PC = (sos_vaddr_t)__builtin_return_address(0);
388 caller_frame = (sos_vaddr_t)__builtin_frame_address(1);
389 }
390
391 for(depth=0 ; depth < max_depth ; depth ++)
392 {
393
394 backtracer(callee_PC, caller_frame + 8, depth, custom_arg);
395
396
397 if ( (caller_frame < stack_bottom)
398 || (caller_frame + 4 >= stack_bottom + stack_size) )
399 return depth;
400
401
402 callee_PC = *((sos_vaddr_t*) (caller_frame + 4));
403 caller_frame = *((sos_vaddr_t*) caller_frame);
404 }
405
406 return depth;
407 }