SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2005  David Decotigny
002    Copyright (C) 2000-2004, The KOS team
003 
004    This program is free software; you can redistribute it and/or
005    modify it under the terms of the GNU General Public License
006    as published by the Free Software Foundation; either version 2
007    of the License, or (at your option) any later version.
008    
009    This program is distributed in the hope that it will be useful,
010    but WITHOUT ANY WARRANTY; without even the implied warranty of
011    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
012    GNU General Public License for more details.
013    
014    You should have received a copy of the GNU General Public License
015    along with this program; if not, write to the Free Software
016    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
017    USA. 
018 */
019 
020 
021 #include <sos/assert.h>
022 #include <sos/klibc.h>
023 #include <drivers/bochs.h>
024 #include <drivers/x86_videomem.h>
025 #include <hwcore/segment.h>
026 
027 #include "cpu_context.h"
028 
029 
030 /**
031  * Here is the definition of a CPU context for IA32 processors. This
032  * is a SOS convention, not a specification given by the IA32
033  * spec. However there is a strong constraint related to the x86
034  * interrupt handling specification: the top of the stack MUST be
035  * compatible with the 'iret' instruction, ie there must be the
036  * err_code (might be 0), eip, cs and eflags of the destination
037  * context in that order (see Intel x86 specs vol 3, figure 5-4).
038  *
039  * @note IMPORTANT: This definition MUST be consistent with the way
040  * the registers are stored on the stack in
041  * irq_wrappers.S/exception_wrappers.S !!! Hence the constraint above.
042  */
043 struct sos_cpu_state {
044   /* (Lower addresses) */
045 
046   /* These are SOS convention */
047   sos_ui16_t  gs;
048   sos_ui16_t  fs;
049   sos_ui16_t  es;
050   sos_ui16_t  ds;
051   sos_ui16_t  cpl0_ss; /* This is ALWAYS the Stack Segment of the
052                           Kernel context (CPL0) of the interrupted
053                           thread, even for a user thread */
054   sos_ui16_t  alignment_padding; /* unused */
055   sos_ui32_t  eax;
056   sos_ui32_t  ebx;
057   sos_ui32_t  ecx;
058   sos_ui32_t  edx;
059   sos_ui32_t  esi;
060   sos_ui32_t  edi;
061   sos_ui32_t  ebp;
062 
063   /* MUST NEVER CHANGE (dependent on the IA32 iret instruction) */
064   sos_ui32_t  error_code;
065   sos_vaddr_t eip;
066   sos_ui32_t  cs; /* 32bits according to the specs ! However, the CS
067                      register is really 16bits long */
068   sos_ui32_t  eflags;
069 
070   /* (Higher addresses) */
071 } __attribute__((packed));
072 
073 
074 /**
075  * The CS value pushed on the stack by the CPU upon interrupt, and
076  * needed by the iret instruction, is 32bits long while the real CPU
077  * CS register is 16bits only: this macro simply retrieves the CPU
078  * "CS" register value from the CS value pushed on the stack by the
079  * CPU upon interrupt.
080  *
081  * The remaining 16bits pushed by the CPU should be considered
082  * "reserved" and architecture dependent. IMHO, the specs don't say
083  * anything about them. Considering that some architectures generate
084  * non-zero values for these 16bits (at least Cyrix), we'd better
085  * ignore them.
086  */
087 #define GET_CPU_CS_REGISTER_VALUE(pushed_ui32_cs_value) \
088   ( (pushed_ui32_cs_value) & 0xffff )
089 
090 
091 /**
092  * Structure of an interrupted Kernel thread's context
093  */
094 struct sos_cpu_kstate
095 {
096   struct sos_cpu_state regs;
097 } __attribute__((packed));
098 
099 
100 /**
101  * THE main operation of a kernel thread. This routine calls the
102  * kernel thread function start_func and calls exit_func when
103  * start_func returns.
104  */
105 static void core_routine (sos_cpu_kstate_function_arg1_t *start_func,
106                           sos_ui32_t start_arg,
107                           sos_cpu_kstate_function_arg1_t *exit_func,
108                           sos_ui32_t exit_arg)
109      __attribute__((noreturn));
110 
111 static void core_routine (sos_cpu_kstate_function_arg1_t *start_func,
112                           sos_ui32_t start_arg,
113                           sos_cpu_kstate_function_arg1_t *exit_func,
114                           sos_ui32_t exit_arg)
115 {
116   start_func(start_arg);
117   exit_func(exit_arg);
118 
119   SOS_ASSERT_FATAL(! "The exit function of the thread should NOT return !");
120   for(;;);
121 }
122 
123 
124 sos_ret_t sos_cpu_kstate_init(struct sos_cpu_state **ctxt,
125                               sos_cpu_kstate_function_arg1_t *start_func,
126                               sos_ui32_t  start_arg,
127                               sos_vaddr_t stack_bottom,
128                               sos_size_t  stack_size,
129                               sos_cpu_kstate_function_arg1_t *exit_func,
130                               sos_ui32_t  exit_arg)
131 {
132   /* We are initializing a Kernel thread's context */
133   struct sos_cpu_kstate *kctxt;
134 
135   /* This is a critical internal function, so that it is assumed that
136      the caller knows what he does: we legitimally assume that values
137      for ctxt, start_func, stack_* and exit_func are allways VALID ! */
138 
139   /* Setup the stack.
140    *
141    * On x86, the stack goes downward. Each frame is configured this
142    * way (higher addresses first):
143    *
144    *  - (optional unused space. As of gcc 3.3, this space is 24 bytes)
145    *  - arg n
146    *  - arg n-1
147    *  - ...
148    *  - arg 1
149    *  - return instruction address: The address the function returns to
150    *    once finished
151    *  - local variables
152    *
153    * The remaining of the code should be read from the end upward to
154    * understand how the processor will handle it.
155    */
156 
157   sos_vaddr_t tmp_vaddr = stack_bottom + stack_size;
158   sos_ui32_t *stack = (sos_ui32_t*)tmp_vaddr;
159 
160   /* If needed, poison the stack */
161 #ifdef SOS_CPU_STATE_DETECT_UNINIT_KERNEL_VARS
162   memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, stack_size);
163 #elif defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
164   sos_cpu_state_prepare_detect_kernel_stack_overflow(stack_bottom, stack_size);
165 #endif
166 
167   /* Simulate a call to the core_routine() function: prepare its
168      arguments */
169   *(--stack) = exit_arg;
170   *(--stack) = (sos_ui32_t)exit_func;
171   *(--stack) = start_arg;
172   *(--stack) = (sos_ui32_t)start_func;
173   *(--stack) = 0; /* Return address of core_routine => force page fault */
174 
175   /*
176    * Setup the initial context structure, so that the CPU will execute
177    * the function core_routine() once this new context has been
178    * restored on CPU
179    */
180 
181   /* Compute the base address of the structure, which must be located
182      below the previous elements */
183   tmp_vaddr  = ((sos_vaddr_t)stack) - sizeof(struct sos_cpu_kstate);
184   kctxt = (struct sos_cpu_kstate*)tmp_vaddr;
185 
186   /* Initialize the CPU context structure */
187   memset(kctxt, 0x0, sizeof(struct sos_cpu_kstate));
188 
189   /* Tell the CPU context structure that the first instruction to
190      execute will be that of the core_routine() function */
191   kctxt->regs.eip = (sos_ui32_t)core_routine;
192 
193   /* Setup the segment registers */
194   kctxt->regs.cs
195     = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KCODE); /* Code */
196   kctxt->regs.ds
197     = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Data */
198   kctxt->regs.es
199     = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Data */
200   kctxt->regs.cpl0_ss
201     = SOS_BUILD_SEGMENT_REG_VALUE(0, FALSE, SOS_SEG_KDATA); /* Stack */
202   /* fs and gs unused for the moment. */
203 
204   /* The newly created context is initially interruptible */
205   kctxt->regs.eflags = (1 << 9); /* set IF bit */
206 
207   /* Finally, update the generic kernel/user thread context */
208   *ctxt = (struct sos_cpu_state*) kctxt;
209 
210   return SOS_OK;
211 }
212 
213 
214 #if defined(SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW)
215 void
216 sos_cpu_state_prepare_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt,
217                                                    sos_vaddr_t stack_bottom,
218                                                    sos_size_t stack_size)
219 {
220   sos_size_t poison_size = SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW;
221   if (poison_size > stack_size)
222     poison_size = stack_size;
223 
224   memset((void*)stack_bottom, SOS_CPU_STATE_STACK_POISON, poison_size);
225 }
226 
227 
228 void
229 sos_cpu_state_detect_kernel_stack_overflow(const struct sos_cpu_state *ctxt,
230                                            sos_vaddr_t stack_bottom,
231                                            sos_size_t stack_size)
232 {
233   unsigned char *c;
234   int i;
235 
236   /* On SOS, "ctxt" corresponds to the address of the esp register of
237      the saved context in Kernel mode (always, even for the interrupted
238      context of a user thread). Here we make sure that this stack
239      pointer is within the allowed stack area */
240   SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) >= stack_bottom);
241   SOS_ASSERT_FATAL(((sos_vaddr_t)ctxt) + sizeof(struct sos_cpu_kstate)
242                    <= stack_bottom + stack_size);
243 
244   /* Check that the bottom of the stack has not been altered */
245   for (c = (unsigned char*) stack_bottom, i = 0 ;
246        (i < SOS_CPU_STATE_DETECT_KERNEL_STACK_OVERFLOW) && (i < stack_size) ;
247        c++, i++)
248     {
249       SOS_ASSERT_FATAL(SOS_CPU_STATE_STACK_POISON == *c);
250     }
251 }
252 #endif
253 
254 
255 /* =======================================================================
256  * Public Accessor functions
257  */
258 
259 
260 sos_vaddr_t sos_cpu_context_get_PC(const struct sos_cpu_state *ctxt)
261 {
262   SOS_ASSERT_FATAL(NULL != ctxt);
263 
264   /* This is the PC of the interrupted context (ie kernel or user
265      context). */
266   return ctxt->eip;
267 }
268 
269 
270 sos_vaddr_t sos_cpu_context_get_SP(const struct sos_cpu_state *ctxt)
271 {
272   SOS_ASSERT_FATAL(NULL != ctxt);
273 
274   /* On SOS, "ctxt" corresponds to the address of the esp register of
275      the saved context in Kernel mode (always, even for the interrupted
276      context of a user thread). */
277   return (sos_vaddr_t)ctxt;
278 }
279 
280 
281 void sos_cpu_context_dump(const struct sos_cpu_state *ctxt)
282 {
283   char buf[128];
284   snprintf(buf, sizeof(buf),
285            "CPU: eip=%x esp=%x eflags=%x cs=%x ds=%x ss=%x err=%x",
286            (unsigned)ctxt->eip, (unsigned)ctxt, (unsigned)ctxt->eflags,
287            (unsigned)GET_CPU_CS_REGISTER_VALUE(ctxt->cs), (unsigned)ctxt->ds,
288            (unsigned)ctxt->cpl0_ss,
289            (unsigned)ctxt->error_code);
290   sos_bochs_putstring(buf); sos_bochs_putstring("\n");
291   sos_x86_videomem_putstring(23, 0,
292                           SOS_X86_VIDEO_FG_BLACK | SOS_X86_VIDEO_BG_LTGRAY,
293                           buf);
294 }
295 
296 
297 /* =======================================================================
298  * Public Accessor functions TO BE USED ONLY BY Exception handlers
299  */
300 
301 
302 sos_ui32_t sos_cpu_context_get_EX_info(const struct sos_cpu_state *ctxt)
303 {
304   SOS_ASSERT_FATAL(NULL != ctxt);
305   return ctxt->error_code;
306 }
307 
308 
309 sos_vaddr_t
310 sos_cpu_context_get_EX_faulting_vaddr(const struct sos_cpu_state *ctxt)
311 {
312   sos_ui32_t cr2;
313 
314   /*
315    * See Intel Vol 3 (section 5.14): the address of the faulting
316    * virtual address of a page fault is stored in the cr2
317    * register.
318    *
319    * Actually, we do not store the cr2 register in a saved
320    * kernel thread's context. So we retrieve the cr2's value directly
321    * from the processor. The value we retrieve in an exception handler
322    * is actually the correct one because an exception is synchronous
323    * with the code causing the fault, and cannot be interrupted since
324    * the IDT entries in SOS are "interrupt gates" (ie IRQ are
325    * disabled).
326    */
327   asm volatile ("movl %%cr2, %0"
328                 :"=r"(cr2)
329                 : );
330 
331   return cr2;
332 }
333 
334 
335 /* =======================================================================
336  * Backtrace facility. To be used for DEBUGging purpose ONLY.
337  */
338 
339 
340 sos_ui32_t sos_backtrace(const struct sos_cpu_state *cpu_state,
341                          sos_ui32_t max_depth,
342                          sos_vaddr_t stack_bottom,
343                          sos_size_t stack_size,
344                          sos_backtrace_callback_t * backtracer,
345                          void *custom_arg)
346 {
347   int depth;
348   sos_vaddr_t callee_PC, caller_frame;
349 
350   /*
351    * Layout of a frame on the x86 (compiler=gcc):
352    *
353    * funcA calls funcB calls funcC
354    *
355    *         ....
356    *         funcB Argument 2
357    *         funcB Argument 1
358    *         funcA Return eip
359    * frameB: funcA ebp (ie previous stack frame)
360    *         ....
361    *         (funcB local variables)
362    *         ....
363    *         funcC Argument 2
364    *         funcC Argument 1
365    *         funcB Return eip
366    * frameC: funcB ebp (ie previous stack frame == A0) <---- a frame address
367    *         ....
368    *         (funcC local variables)
369    *         ....
370    *
371    * The presence of "ebp" on the stack depends on 2 things:
372    *   + the compiler is gcc
373    *   + the source is compiled WITHOUT the -fomit-frame-pointer option
374    * In the absence of "ebp", chances are high that the value pushed
375    * at that address is outside the stack boundaries, meaning that the
376    * function will return -SOS_ENOSUP.
377    */
378 
379   if (cpu_state)
380     {
381       callee_PC    = cpu_state->eip;
382       caller_frame = cpu_state->ebp;
383     }
384   else
385     {
386       /* Skip the sos_backtrace() frame */
387       callee_PC    = (sos_vaddr_t)__builtin_return_address(0);
388       caller_frame = (sos_vaddr_t)__builtin_frame_address(1);
389     }
390 
391   for(depth=0 ; depth < max_depth ; depth ++)
392     {
393       /* Call the callback */
394       backtracer(callee_PC, caller_frame + 8, depth, custom_arg);
395 
396       /* If the frame address is funky, don't go further */
397       if ( (caller_frame < stack_bottom)
398            || (caller_frame + 4 >= stack_bottom + stack_size) )
399         return depth;
400 
401       /* Go to caller frame */
402       callee_PC    = *((sos_vaddr_t*) (caller_frame + 4));
403       caller_frame = *((sos_vaddr_t*) caller_frame);
404     }
405   
406   return depth;
407 }

source navigation ] diff markup ] identifier search ] general search ]