001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019
020
021 #include <sos/assert.h>
022 #include <sos/klibc.h>
023 #include <drivers/bochs.h>
024 #include <drivers/x86_videomem.h>
025 #include <hwcore/segment.h>
026
027 #include "cpu_context.h"
028
029
030
031
032
033
034
035
036
037
038
039
040
041
042
043 struct sos_cpu_kstate {
044
045
046
047 sos_ui16_t gs;
048 sos_ui16_t fs;
049 sos_ui16_t es;
050 sos_ui16_t ds;
051 sos_ui16_t ss;
052 sos_ui16_t alignment_padding;
053 sos_ui32_t eax;
054 sos_ui32_t ebx;
055 sos_ui32_t ecx;
056 sos_ui32_t edx;
057 sos_ui32_t esi;
058 sos_ui32_t edi;
059 sos_ui32_t ebp;
060
061
062 sos_ui32_t error_code;
063 sos_vaddr_t eip;
064 sos_ui32_t cs;
065 sos_ui32_t eflags;
066
067
068 } __attribute__((packed));
069
070
071 static void core_routine (sos_cpu_kstate_function_arg1_t *start_func,
072 sos_ui32_t start_arg,
073 sos_cpu_kstate_function_arg1_t *exit_func,
074 sos_ui32_t exit_arg)
075 __attribute__((noreturn));
076
077 static void core_routine (sos_cpu_kstate_function_arg1_t *start_func,
078 sos_ui32_t start_arg,
079 sos_cpu_kstate_function_arg1_t *exit_func,
080 sos_ui32_t exit_arg)
081 {
082 start_func(start_arg);
083 exit_func(exit_arg);
084
085 SOS_ASSERT_FATAL(! "The exit function of the thread should NOT return !");
086 for(;;);
087 }
088
089
090 sos_ret_t sos_cpu_kstate_init(struct sos_cpu_kstate **ctxt,
091 sos_cpu_kstate_function_arg1_t *start_func,
092 sos_ui32_t start_arg,
093 sos_vaddr_t stack_bottom,
094 sos_size_t stack_size,
095 sos_cpu_kstate_function_arg1_t *exit_func,
096 sos_ui32_t exit_arg)
097 {
098
099
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120 sos_vaddr_t tmp_vaddr = stack_bottom + stack_size;
121 sos_ui32_t *stack = (sos_ui32_t*)tmp_vaddr;
122
123
124 #ifdef SOS_CPU_KSTATE_DETECT_UNINIT_VARS
125 memset((void*)stack_bottom, SOS_CPU_KSTATE_STACK_POISON, stack_size);
126 #elif defined(SOS_CPU_KSTATE_DETECT_STACK_OVERFLOW)
127 sos_cpu_kstate_prepare_detect_stack_overflow(stack_bottom, stack_size);
128 #endif
129
130
131
132 *(--stack) = exit_arg;
133 *(--stack) = (sos_ui32_t)exit_func;
134 *(--stack) = start_arg;
135 *(--stack) = (sos_ui32_t)start_func;
136 *(--stack) = 0;
137
138
139
140
141
142
143
144
145
146 tmp_vaddr = ((sos_vaddr_t)stack) - sizeof(struct sos_cpu_kstate);
147 *ctxt = (struct sos_cpu_kstate*)tmp_vaddr;
148
149
150 memset(*ctxt, 0x0, sizeof(struct sos_cpu_kstate));
151
152
153
154 (*ctxt)->eip = (sos_ui32_t)core_routine;
155
156
157 (*ctxt)->cs = SOS_BUILD_SEGMENT_REG_VALUE(0, 0, SOS_SEG_KCODE);
158 (*ctxt)->ds = SOS_BUILD_SEGMENT_REG_VALUE(0, 0, SOS_SEG_KDATA);
159 (*ctxt)->es = SOS_BUILD_SEGMENT_REG_VALUE(0, 0, SOS_SEG_KDATA);
160 (*ctxt)->ss = SOS_BUILD_SEGMENT_REG_VALUE(0, 0, SOS_SEG_KDATA);
161
162
163
164 (*ctxt)->eflags = (1 << 9);
165
166 return SOS_OK;
167 }
168
169
170 #if defined(SOS_CPU_KSTATE_DETECT_STACK_OVERFLOW)
171 void
172 sos_cpu_kstate_prepare_detect_stack_overflow(const struct sos_cpu_kstate *ctxt,
173 sos_vaddr_t stack_bottom,
174 sos_size_t stack_size)
175 {
176 sos_size_t poison_size = SOS_CPU_KSTATE_DETECT_STACK_OVERFLOW;
177 if (poison_size > stack_size)
178 poison_size = stack_size;
179
180 memset((void*)stack_bottom, SOS_CPU_KSTATE_STACK_POISON, poison_size);
181 }
182
183
184 void
185 sos_cpu_kstate_detect_stack_overflow(const struct sos_cpu_kstate *ctxt,
186 sos_vaddr_t stack_bottom,
187 sos_size_t stack_size)
188 {
189 unsigned char *c;
190 int i;
191
192 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) >= stack_bottom);
193 SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) + sizeof(struct sos_cpu_kstate)
194 <= stack_bottom + stack_size);
195 for (c = (unsigned char*) stack_bottom, i = 0 ;
196 (i < SOS_CPU_KSTATE_DETECT_STACK_OVERFLOW) && (i < stack_size) ;
197 c++, i++)
198 {
199 SOS_ASSERT_FATAL(SOS_CPU_KSTATE_STACK_POISON == *c);
200 }
201 }
202 #endif
203
204
205 sos_vaddr_t sos_cpu_kstate_get_PC(const struct sos_cpu_kstate *ctxt)
206 {
207 SOS_ASSERT_FATAL(NULL != ctxt);
208 return ctxt->eip;
209 }
210
211
212 sos_vaddr_t sos_cpu_kstate_get_SP(const struct sos_cpu_kstate *ctxt)
213 {
214 SOS_ASSERT_FATAL(NULL != ctxt);
215 return (sos_vaddr_t)ctxt;
216 }
217
218
219 void sos_cpu_kstate_dump(const struct sos_cpu_kstate *ctxt)
220 {
221 char buf[128];
222 snprintf(buf, sizeof(buf),
223 "CPU: eip=%x esp=%x eflags=%x cs=%x ds=%x ss=%x err=%x",
224 (unsigned)ctxt->eip, (unsigned)ctxt, (unsigned)ctxt->eflags,
225 (unsigned)ctxt->cs, (unsigned)ctxt->ds, (unsigned)ctxt->ss,
226 (unsigned)ctxt->error_code);
227 sos_bochs_putstring(buf); sos_bochs_putstring("\n");
228 sos_x86_videomem_putstring(23, 0,
229 SOS_X86_VIDEO_FG_BLACK | SOS_X86_VIDEO_BG_LTGRAY,
230 buf);
231 }
232
233
234 sos_ui32_t sos_cpu_kstate_get_EX_info(const struct sos_cpu_kstate *ctxt)
235 {
236 SOS_ASSERT_FATAL(NULL != ctxt);
237 return ctxt->error_code;
238 }
239
240
241 sos_vaddr_t
242 sos_cpu_kstate_get_EX_faulting_vaddr(const struct sos_cpu_kstate *ctxt)
243 {
244 sos_ui32_t cr2;
245
246
247
248 asm volatile ("movl %%cr2, %0"
249 :"=r"(cr2)
250 : );
251
252 return cr2;
253 }
254
255
256 sos_ui32_t sos_backtrace(const struct sos_cpu_kstate *cpu_kstate,
257 sos_ui32_t max_depth,
258 sos_vaddr_t stack_bottom,
259 sos_size_t stack_size,
260 sos_backtrace_callback_t * backtracer,
261 void *custom_arg)
262 {
263 int depth;
264 sos_vaddr_t callee_PC, caller_frame;
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295 if (cpu_kstate)
296 {
297 callee_PC = cpu_kstate->eip;
298 caller_frame = cpu_kstate->ebp;
299 }
300 else
301 {
302
303 callee_PC = (sos_vaddr_t)__builtin_return_address(0);
304 caller_frame = (sos_vaddr_t)__builtin_frame_address(1);
305 }
306
307 for(depth=0 ; depth < max_depth ; depth ++)
308 {
309
310 backtracer(callee_PC, caller_frame + 8, depth, custom_arg);
311
312
313 if ( (caller_frame < stack_bottom)
314 || (caller_frame + 4 >= stack_bottom + stack_size) )
315 return depth;
316
317
318 callee_PC = *((sos_vaddr_t*) (caller_frame + 4));
319 caller_frame = *((sos_vaddr_t*) caller_frame);
320 }
321
322 return depth;
323 }