Diff markup
001 001
002 002
003 003
004 004
005 005
006 006
007 007
008 008
009 009
010 010
011 011
012 012
013 013
014 014
015 015
016 016
017 017
018 018
019 #include <sos/physmem.h> 019 #include <sos/physmem.h>
020 #include <sos/kmem_slab.h> 020 #include <sos/kmem_slab.h>
021 #include <sos/kmalloc.h> 021 #include <sos/kmalloc.h>
022 #include <sos/klibc.h> 022 #include <sos/klibc.h>
023 #include <sos/list.h> 023 #include <sos/list.h>
024 #include <sos/assert.h> 024 #include <sos/assert.h>
025 #include <hwcore/mm_context.h> <<
026 #include <sos/process.h> <<
027 <<
028 #include <drivers/bochs.h> <<
029 #include <drivers/x86_videomem.h> <<
030 025
031 #include <hwcore/irq.h> 026 #include <hwcore/irq.h>
032 027
033 #include "thread.h" 028 #include "thread.h"
034 029
035 030
036 031
037 032
038 033
039 #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PA 034 #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PAGE_SIZE)
040 035
041 036
042 037
043 038
044 039
045 040
046 041
047 042
048 043
049 044
050 045
051 046
052 047
053 048
054 049
055 050
056 static volatile struct sos_thread *current_thr 051 static volatile struct sos_thread *current_thread = NULL;
057 052
058 053
059 054
060 055
061 056
062 057
063 058
064 static struct sos_thread *thread_list = NULL; 059 static struct sos_thread *thread_list = NULL;
065 060
066 061
067 062
068 063
069 064
070 static struct sos_kslab_cache *cache_thread; 065 static struct sos_kslab_cache *cache_thread;
071 066
072 067
073 struct sos_thread *sos_thread_get_current() 068 struct sos_thread *sos_thread_get_current()
074 { 069 {
075 SOS_ASSERT_FATAL(current_thread->state == SO 070 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
076 return (struct sos_thread*)current_thread; 071 return (struct sos_thread*)current_thread;
077 } 072 }
078 073
079 074
080 inline static sos_ret_t _set_current(struct so 075 inline static sos_ret_t _set_current(struct sos_thread *thr)
081 { 076 {
082 SOS_ASSERT_FATAL(thr->state == SOS_THR_READY 077 SOS_ASSERT_FATAL(thr->state == SOS_THR_READY);
083 current_thread = thr; 078 current_thread = thr;
084 current_thread->state = SOS_THR_RUNNING; 079 current_thread->state = SOS_THR_RUNNING;
085 return SOS_OK; 080 return SOS_OK;
086 } 081 }
087 082
088 083
089 sos_ret_t sos_thread_subsystem_setup(sos_vaddr 084 sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
090 sos_size_ 085 sos_size_t init_thread_stack_size)
091 { 086 {
092 struct sos_thread *myself; 087 struct sos_thread *myself;
093 088
094 089
095 cache_thread = sos_kmem_cache_create("thread 090 cache_thread = sos_kmem_cache_create("thread",
096 sizeof( 091 sizeof(struct sos_thread),
097 2, 092 2,
098 0, 093 0,
099 SOS_KSL 094 SOS_KSLAB_CREATE_MAP
100 | SOS_K 095 | SOS_KSLAB_CREATE_ZERO);
101 if (! cache_thread) 096 if (! cache_thread)
102 return -SOS_ENOMEM; 097 return -SOS_ENOMEM;
103 098
104 099
105 myself = (struct sos_thread*) sos_kmem_cache 100 myself = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
106 101 SOS_KSLAB_ALLOC_ATOMIC);
107 if (! myself) 102 if (! myself)
108 return -SOS_ENOMEM; 103 return -SOS_ENOMEM;
109 104
110 105
111 strzcpy(myself->name, "[kinit]", SOS_THR_MAX 106 strzcpy(myself->name, "[kinit]", SOS_THR_MAX_NAMELEN);
112 myself->state = SOS_THR_CREATED; 107 myself->state = SOS_THR_CREATED;
113 myself->priority = SOS_SCHED_PRIO_LOW <<
114 myself->kernel_stack_base_addr = init_thread 108 myself->kernel_stack_base_addr = init_thread_stack_base_addr;
115 myself->kernel_stack_size = init_thread 109 myself->kernel_stack_size = init_thread_stack_size;
116 110
117 111
118 sos_cpu_state_prepare_detect_kernel_stack_ov 112 sos_cpu_state_prepare_detect_kernel_stack_overflow(myself->cpu_state,
119 113 myself->kernel_stack_base_addr,
120 114 myself->kernel_stack_size);
121 115
122 116
123 list_singleton_named(thread_list, myself, gb 117 list_singleton_named(thread_list, myself, gbl_prev, gbl_next);
124 118
125 119
126 myself->state = SOS_THR_READY; 120 myself->state = SOS_THR_READY;
127 _set_current(myself); 121 _set_current(myself);
128 122
129 return SOS_OK; 123 return SOS_OK;
130 } 124 }
131 125
132 126
133 struct sos_thread * 127 struct sos_thread *
134 sos_create_kernel_thread(const char *name, 128 sos_create_kernel_thread(const char *name,
135 sos_kernel_thread_sta 129 sos_kernel_thread_start_routine_t start_func,
136 void *start_arg, !! 130 void *start_arg)
137 sos_sched_priority_t <<
138 { 131 {
139 __label__ undo_creation; 132 __label__ undo_creation;
140 sos_ui32_t flags; 133 sos_ui32_t flags;
141 struct sos_thread *new_thread; 134 struct sos_thread *new_thread;
142 135
143 if (! start_func) 136 if (! start_func)
144 return NULL; 137 return NULL;
145 if (! SOS_SCHED_PRIO_IS_VALID(priority)) <<
146 return NULL; <<
147 138
148 139
149 new_thread 140 new_thread
150 = (struct sos_thread*) sos_kmem_cache_allo 141 = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
151 142 SOS_KSLAB_ALLOC_ATOMIC);
152 if (! new_thread) 143 if (! new_thread)
153 return NULL; 144 return NULL;
154 145
155 146
156 strzcpy(new_thread->name, ((name)?name:"[NON 147 strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
157 new_thread->state = SOS_THR_CREATED; 148 new_thread->state = SOS_THR_CREATED;
158 new_thread->priority = priority; <<
159 149
160 150
161 new_thread->kernel_stack_base_addr = sos_kma 151 new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
162 new_thread->kernel_stack_size = SOS_THR 152 new_thread->kernel_stack_size = SOS_THREAD_KERNEL_STACK_SIZE;
163 if (! new_thread->kernel_stack_base_addr) 153 if (! new_thread->kernel_stack_base_addr)
164 goto undo_creation; 154 goto undo_creation;
165 155
166 156
167 if (SOS_OK 157 if (SOS_OK
168 != sos_cpu_kstate_init(& new_thread->cpu 158 != sos_cpu_kstate_init(& new_thread->cpu_state,
169 (sos_cpu_kstate_f 159 (sos_cpu_kstate_function_arg1_t*) start_func,
170 (sos_ui32_t) star 160 (sos_ui32_t) start_arg,
171 new_thread->kerne 161 new_thread->kernel_stack_base_addr,
172 new_thread->kerne 162 new_thread->kernel_stack_size,
173 (sos_cpu_kstate_f 163 (sos_cpu_kstate_function_arg1_t*) sos_thread_exit,
174 (sos_ui32_t) NULL 164 (sos_ui32_t) NULL))
175 goto undo_creation; 165 goto undo_creation;
176 166
177 167
178 sos_disable_IRQs(flags); 168 sos_disable_IRQs(flags);
179 list_add_tail_named(thread_list, new_thread, 169 list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
180 sos_restore_IRQs(flags); 170 sos_restore_IRQs(flags);
181 171
182 172
183 if (SOS_OK != sos_sched_set_ready(new_thread 173 if (SOS_OK != sos_sched_set_ready(new_thread))
184 goto undo_creation; 174 goto undo_creation;
185 175
186 176
187 return new_thread; 177 return new_thread;
188 178
189 undo_creation: 179 undo_creation:
190 if (new_thread->kernel_stack_base_addr) 180 if (new_thread->kernel_stack_base_addr)
191 sos_kfree((sos_vaddr_t) new_thread->kernel 181 sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
192 sos_kmem_cache_free((sos_vaddr_t) new_thread 182 sos_kmem_cache_free((sos_vaddr_t) new_thread);
193 return NULL; 183 return NULL;
194 } 184 }
195 185
196 186
197 struct sos_thread * <<
198 sos_create_user_thread(const char *name, <<
199 struct sos_process *pro <<
200 sos_uaddr_t user_initia <<
201 sos_ui32_t user_start_ <<
202 sos_ui32_t user_start_ <<
203 sos_uaddr_t user_initia <<
204 sos_sched_priority_t pr <<
205 { <<
206 __label__ undo_creation; <<
207 sos_ui32_t flags; <<
208 struct sos_thread *new_thread; <<
209 <<
210 if (! SOS_SCHED_PRIO_IS_VALID(priority)) <<
211 return NULL; <<
212 <<
213 <<
214 if (! process) <<
215 return NULL; <<
216 <<
217 <<
218 new_thread <<
219 = (struct sos_thread*) sos_kmem_cache_allo <<
220 <<
221 if (! new_thread) <<
222 return NULL; <<
223 <<
224 <<
225 strzcpy(new_thread->name, ((name)?name:"[NON <<
226 new_thread->state = SOS_THR_CREATED; <<
227 new_thread->priority = priority; <<
228 <<
229 <<
230 new_thread->kernel_stack_base_addr = sos_kma <<
231 new_thread->kernel_stack_size = SOS_THR <<
232 if (! new_thread->kernel_stack_base_addr) <<
233 goto undo_creation; <<
234 <<
235 if (SOS_OK <<
236 != sos_cpu_ustate_init(& new_thread->cpu <<
237 user_initial_PC, <<
238 user_start_arg1, <<
239 user_start_arg2, <<
240 user_initial_SP, <<
241 new_thread->kerne <<
242 new_thread->kerne <<
243 goto undo_creation; <<
244 <<
245 <<
246 if (SOS_OK != sos_process_register_thread(pr <<
247 goto undo_creation; <<
248 <<
249 <<
250 sos_disable_IRQs(flags); <<
251 list_add_tail_named(thread_list, new_thread, <<
252 sos_restore_IRQs(flags); <<
253 <<
254 <<
255 if (SOS_OK != sos_sched_set_ready(new_thread <<
256 goto undo_creation; <<
257 <<
258 <<
259 return new_thread; <<
260 <<
261 undo_creation: <<
262 if (new_thread->kernel_stack_base_addr) <<
263 sos_kfree((sos_vaddr_t) new_thread->kernel <<
264 sos_kmem_cache_free((sos_vaddr_t) new_thread <<
265 return NULL; <<
266 } <<
267 <<
268 <<
269 <<
270 <<
271 <<
272 <<
273 <<
274 <<
275 <<
276 <<
277 static void _prepare_mm_context(struct sos_thr <<
278 { <<
279 <<
280 if (sos_cpu_context_is_in_user_mode(the_thre <<
281 == TRUE) <<
282 { <<
283 <<
284 <<
285 <<
286 <<
287 SOS_ASSERT_FATAL(the_thread->process != <<
288 <<
289 <<
290 SOS_ASSERT_FATAL(the_thread->squatted_mm <<
291 <<
292 <<
293 sos_mm_context_switch_to(sos_process_get <<
294 } <<
295 <<
296 <<
297 <<
298 else if (the_thread->squatted_mm_context != <<
299 sos_mm_context_switch_to(the_thread->squat <<
300 } <<
301 <<
302 <<
303 187
304 188
305 static void delete_thread(struct sos_thread *t 189 static void delete_thread(struct sos_thread *thr)
306 { 190 {
307 sos_ui32_t flags; 191 sos_ui32_t flags;
308 192
309 sos_disable_IRQs(flags); 193 sos_disable_IRQs(flags);
310 list_delete_named(thread_list, thr, gbl_prev 194 list_delete_named(thread_list, thr, gbl_prev, gbl_next);
311 sos_restore_IRQs(flags); 195 sos_restore_IRQs(flags);
312 196
313 sos_kfree((sos_vaddr_t) thr->kernel_stack_ba 197 sos_kfree((sos_vaddr_t) thr->kernel_stack_base_addr);
314 <<
315 <<
316 if (thr->squatted_mm_context) <<
317 SOS_ASSERT_FATAL(SOS_OK == sos_thread_chan <<
318 <<
319 <<
320 if (thr->process) <<
321 SOS_ASSERT_FATAL(SOS_OK == sos_process_unr <<
322 <<
323 memset(thr, 0x0, sizeof(struct sos_thread)); 198 memset(thr, 0x0, sizeof(struct sos_thread));
324 sos_kmem_cache_free((sos_vaddr_t) thr); 199 sos_kmem_cache_free((sos_vaddr_t) thr);
325 } 200 }
326 201
327 202
328 void sos_thread_exit() 203 void sos_thread_exit()
329 { 204 {
330 sos_ui32_t flags; 205 sos_ui32_t flags;
331 struct sos_thread *myself, *next_thread; 206 struct sos_thread *myself, *next_thread;
332 207
333 208
334 SOS_ASSERT_FATAL(! sos_servicing_irq()); 209 SOS_ASSERT_FATAL(! sos_servicing_irq());
335 210
336 myself = sos_thread_get_current(); 211 myself = sos_thread_get_current();
337 212
338 213
339 214
340 SOS_ASSERT_FATAL(list_is_empty_named(myself- 215 SOS_ASSERT_FATAL(list_is_empty_named(myself->kwaitq_list,
341 prev_en 216 prev_entry_for_thread,
342 next_en 217 next_entry_for_thread));
343 218
344 219
345 sos_disable_IRQs(flags); 220 sos_disable_IRQs(flags);
346 myself->state = SOS_THR_ZOMBIE; 221 myself->state = SOS_THR_ZOMBIE;
347 next_thread = sos_reschedule(myself, FALSE); 222 next_thread = sos_reschedule(myself, FALSE);
348 223
349 224
350 sos_cpu_state_detect_kernel_stack_overflow(n 225 sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
351 n 226 next_thread->kernel_stack_base_addr,
352 n 227 next_thread->kernel_stack_size);
353 228
354 <<
355 <<
356 <<
357 _prepare_mm_context(next_thread); <<
358 <<
359 229
360 230
361 231
362 232
363 _set_current(next_thread); 233 _set_current(next_thread);
364 sos_cpu_context_exit_to(next_thread->cpu_sta 234 sos_cpu_context_exit_to(next_thread->cpu_state,
365 (sos_cpu_kstate_func 235 (sos_cpu_kstate_function_arg1_t*) delete_thread,
366 (sos_ui32_t) myself) 236 (sos_ui32_t) myself);
367 } 237 }
368 238
369 239
370 sos_sched_priority_t sos_thread_get_priority(s <<
371 { <<
372 if (! thr) <<
373 thr = (struct sos_thread*)current_thread; <<
374 <<
375 return thr->priority; <<
376 } <<
377 <<
378 <<
379 sos_thread_state_t sos_thread_get_state(struct 240 sos_thread_state_t sos_thread_get_state(struct sos_thread *thr)
380 { 241 {
381 if (! thr) 242 if (! thr)
382 thr = (struct sos_thread*)current_thread; 243 thr = (struct sos_thread*)current_thread;
383 244
384 return thr->state; 245 return thr->state;
385 } 246 }
386 247
387 248
388 typedef enum { YIELD_MYSELF, BLOCK_MYSELF } sw 249 typedef enum { YIELD_MYSELF, BLOCK_MYSELF } switch_type_t;
389 250
390 251
391 252
392 253
393 static sos_ret_t _switch_to_next_thread(switch 254 static sos_ret_t _switch_to_next_thread(switch_type_t operation)
394 { 255 {
395 struct sos_thread *myself, *next_thread; 256 struct sos_thread *myself, *next_thread;
396 257
397 SOS_ASSERT_FATAL(current_thread->state == SO 258 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
398 259
399 260
400 SOS_ASSERT_FATAL(! sos_servicing_irq()); 261 SOS_ASSERT_FATAL(! sos_servicing_irq());
401 262
402 myself = (struct sos_thread*)current_thread; 263 myself = (struct sos_thread*)current_thread;
403 264
404 265
405 266
406 if (BLOCK_MYSELF == operation) 267 if (BLOCK_MYSELF == operation)
407 { 268 {
408 myself->state = SOS_THR_BLOCKED; 269 myself->state = SOS_THR_BLOCKED;
409 } 270 }
410 271
411 272
412 next_thread = sos_reschedule(myself, YIELD_M 273 next_thread = sos_reschedule(myself, YIELD_MYSELF == operation);
413 274
414 275
415 if (myself != next_thread) 276 if (myself != next_thread)
416 { 277 {
417 278
418 sos_cpu_state_detect_kernel_stack_overfl 279 sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
419 280 next_thread->kernel_stack_base_addr,
420 281 next_thread->kernel_stack_size);
421 282
422 <<
423 <<
424 <<
425 _prepare_mm_context(next_thread); <<
426 283
427 284
428 285
429 286
430 _set_current(next_thread); 287 _set_current(next_thread);
431 sos_cpu_context_switch(& myself->cpu_sta 288 sos_cpu_context_switch(& myself->cpu_state, next_thread->cpu_state);
432 289
433 290
434 SOS_ASSERT_FATAL(current_thread == mysel 291 SOS_ASSERT_FATAL(current_thread == myself);
435 SOS_ASSERT_FATAL(current_thread->state = 292 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
436 } 293 }
437 else 294 else
438 { 295 {
439 296
440 _set_current(next_thread); 297 _set_current(next_thread);
441 } 298 }
442 299
443 return SOS_OK; 300 return SOS_OK;
444 } 301 }
445 302
446 303
447 <<
448 <<
449 <<
450 <<
451 static sos_ret_t _change_waitq_priorities(stru <<
452 sos_ <<
453 { <<
454 struct sos_kwaitq_entry *kwq_entry; <<
455 int nb_waitqs; <<
456 <<
457 list_foreach_forward_named(thr->kwaitq_list, <<
458 prev_entry_for_th <<
459 { <<
460 SOS_ASSERT_FATAL(SOS_OK == sos_kwaitq_ch <<
461 <<
462 <<
463 } <<
464 <<
465 return SOS_OK; <<
466 } <<
467 <<
468 <<
469 sos_ret_t sos_thread_set_priority(struct sos_t <<
470 sos_sched_p <<
471 { <<
472 __label__ exit_set_prio; <<
473 sos_ui32_t flags; <<
474 sos_ret_t retval; <<
475 <<
476 <<
477 if (! SOS_SCHED_PRIO_IS_VALID(priority)) <<
478 return -SOS_EINVAL; <<
479 <<
480 if (! thr) <<
481 thr = (struct sos_thread*)current_thread; <<
482 <<
483 sos_disable_IRQs(flags); <<
484 <<
485 <<
486 <<
487 retval = _change_waitq_priorities(thr, prior <<
488 if (SOS_OK != retval) <<
489 goto exit_set_prio; <<
490 <<
491 <<
492 <<
493 if (SOS_THR_READY == thr->state) <<
494 retval = sos_sched_change_priority(thr, pr <<
495 <<
496 <<
497 thr->priority = priority; <<
498 <<
499 exit_set_prio: <<
500 sos_restore_IRQs(flags); <<
501 return retval; <<
502 } <<
503 <<
504 <<
505 sos_ret_t sos_thread_yield() 304 sos_ret_t sos_thread_yield()
506 { 305 {
507 sos_ui32_t flags; 306 sos_ui32_t flags;
508 sos_ret_t retval; 307 sos_ret_t retval;
509 308
510 sos_disable_IRQs(flags); 309 sos_disable_IRQs(flags);
511 310
512 retval = _switch_to_next_thread(YIELD_MYSELF 311 retval = _switch_to_next_thread(YIELD_MYSELF);
513 312
514 sos_restore_IRQs(flags); 313 sos_restore_IRQs(flags);
515 return retval; 314 return retval;
516 } 315 }
517 316
518 317
519 318
520 319
521 320
522 struct sleep_timeout_params 321 struct sleep_timeout_params
523 { 322 {
524 struct sos_thread *thread_to_wakeup; 323 struct sos_thread *thread_to_wakeup;
525 sos_bool_t timeout_triggered; 324 sos_bool_t timeout_triggered;
526 }; 325 };
527 326
528 327
529 328
530 329
531 330
532 static void sleep_timeout(struct sos_timeout_a 331 static void sleep_timeout(struct sos_timeout_action *act)
533 { 332 {
534 struct sleep_timeout_params *sleep_timeout_p 333 struct sleep_timeout_params *sleep_timeout_params
535 = (struct sleep_timeout_params*) act->rout 334 = (struct sleep_timeout_params*) act->routine_data;
536 335
537 336
538 sleep_timeout_params->timeout_triggered = TR 337 sleep_timeout_params->timeout_triggered = TRUE;
539 338
540 339
541 SOS_ASSERT_FATAL(SOS_OK == 340 SOS_ASSERT_FATAL(SOS_OK ==
542 sos_thread_force_unblock(sl 341 sos_thread_force_unblock(sleep_timeout_params
543 - 342 ->thread_to_wakeup));
544 } 343 }
545 344
546 345
547 sos_ret_t sos_thread_sleep(struct sos_time *ti 346 sos_ret_t sos_thread_sleep(struct sos_time *timeout)
548 { 347 {
549 sos_ui32_t flags; 348 sos_ui32_t flags;
550 struct sleep_timeout_params sleep_timeout_pa 349 struct sleep_timeout_params sleep_timeout_params;
551 struct sos_timeout_action timeout_action; 350 struct sos_timeout_action timeout_action;
552 sos_ret_t retval; 351 sos_ret_t retval;
553 352
554 353
555 if (NULL == timeout) 354 if (NULL == timeout)
556 { 355 {
557 sos_disable_IRQs(flags); 356 sos_disable_IRQs(flags);
558 retval = _switch_to_next_thread(BLOCK_MY 357 retval = _switch_to_next_thread(BLOCK_MYSELF);
559 sos_restore_IRQs(flags); 358 sos_restore_IRQs(flags);
560 359
561 return retval; 360 return retval;
562 } 361 }
563 362
564 363
565 sos_time_init_action(& timeout_action); 364 sos_time_init_action(& timeout_action);
566 365
567 366
568 sleep_timeout_params.thread_to_wakeup 367 sleep_timeout_params.thread_to_wakeup
569 = (struct sos_thread*)current_thread; 368 = (struct sos_thread*)current_thread;
570 sleep_timeout_params.timeout_triggered = FAL 369 sleep_timeout_params.timeout_triggered = FALSE;
571 370
572 sos_disable_IRQs(flags); 371 sos_disable_IRQs(flags);
573 372
574 373
575 SOS_ASSERT_FATAL(SOS_OK == 374 SOS_ASSERT_FATAL(SOS_OK ==
576 sos_time_register_action_re 375 sos_time_register_action_relative(& timeout_action,
577 376 timeout,
578 377 sleep_timeout,
579 378 & sleep_timeout_params));
580 379
581 380
582 381
583 382
584 retval = _switch_to_next_thread(BLOCK_MYSELF 383 retval = _switch_to_next_thread(BLOCK_MYSELF);
585 384
586 385
587 386
588 if (sleep_timeout_params.timeout_triggered) 387 if (sleep_timeout_params.timeout_triggered)
589 { 388 {
590 389
591 SOS_ASSERT_FATAL(sos_time_is_zero(& time 390 SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout));
592 retval = SOS_OK; 391 retval = SOS_OK;
593 } 392 }
594 else 393 else
595 { 394 {
596 395
597 396
598 SOS_ASSERT_FATAL(SOS_OK == sos_time_unre 397 SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action));
599 retval = -SOS_EINTR; 398 retval = -SOS_EINTR;
600 } 399 }
601 400
602 sos_restore_IRQs(flags); 401 sos_restore_IRQs(flags);
603 402
604 403
605 memcpy(timeout, & timeout_action.timeout, si 404 memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time));
606 405
607 return retval; 406 return retval;
608 } 407 }
609 408
610 409
611 sos_ret_t sos_thread_force_unblock(struct sos_ 410 sos_ret_t sos_thread_force_unblock(struct sos_thread *thread)
612 { 411 {
613 sos_ret_t retval; 412 sos_ret_t retval;
614 sos_ui32_t flags; 413 sos_ui32_t flags;
615 414
616 if (! thread) 415 if (! thread)
617 return -SOS_EINVAL; 416 return -SOS_EINVAL;
618 417
619 sos_disable_IRQs(flags); 418 sos_disable_IRQs(flags);
620 419
621 420
622 retval = SOS_OK; 421 retval = SOS_OK;
623 switch(sos_thread_get_state(thread)) 422 switch(sos_thread_get_state(thread))
624 { 423 {
625 case SOS_THR_RUNNING: 424 case SOS_THR_RUNNING:
626 case SOS_THR_READY: 425 case SOS_THR_READY:
627 426
628 break; 427 break;
629 428
630 case SOS_THR_ZOMBIE: 429 case SOS_THR_ZOMBIE:
631 retval = -SOS_EFATAL; 430 retval = -SOS_EFATAL;
632 break; 431 break;
633 432
634 default: 433 default:
635 retval = sos_sched_set_ready(thread); 434 retval = sos_sched_set_ready(thread);
636 break; 435 break;
637 } 436 }
638 437
639 sos_restore_IRQs(flags); 438 sos_restore_IRQs(flags);
640 439
641 return retval; 440 return retval;
642 } <<
643 <<
644 <<
645 void sos_thread_dump_backtrace(sos_bool_t on_c <<
646 sos_bool_t on_b <<
647 { <<
648 sos_vaddr_t stack_bottom = current_thread->k <<
649 sos_size_t stack_size = current_thread->k <<
650 <<
651 static void backtracer(sos_vaddr_t PC, <<
652 sos_vaddr_t params, <<
653 sos_ui32_t depth, <<
654 void *custom_arg) <<
655 { <<
656 sos_ui32_t invalid = 0xffffffff, *arg1, <<
657 <<
658 <<
659 <<
660 <<
661 <<
662 arg1 = (sos_ui32_t*)params; <<
663 arg2 = (sos_ui32_t*)(params+4); <<
664 arg3 = (sos_ui32_t*)(params+8); <<
665 arg4 = (sos_ui32_t*)(params+12); <<
666 <<
667 <<
668 <<
669 #define INTERVAL_OK(b,v,u) ( ((b) <= (sos_vadd <<
670 && ((sos_vaddr_t) <<
671 if (!INTERVAL_OK(stack_bottom, arg1, sta <<
672 arg1 = &invalid; <<
673 if (!INTERVAL_OK(stack_bottom, arg2, sta <<
674 arg2 = &invalid; <<
675 if (!INTERVAL_OK(stack_bottom, arg3, sta <<
676 arg3 = &invalid; <<
677 if (!INTERVAL_OK(stack_bottom, arg4, sta <<
678 arg4 = &invalid; <<
679 <<
680 <<
681 if (on_bochs) <<
682 sos_bochs_printf("[%d] PC=0x%x arg1=0x <<
683 (unsigned)depth, (uns <<
684 (unsigned)*arg1, (uns <<
685 (unsigned)*arg3); <<
686 <<
687 if (on_console) <<
688 sos_x86_videomem_printf(23-depth, 3, <<
689 SOS_X86_VIDEO_ <<
690 | SOS_X86_VI <<
691 "[%d] PC=0x%x <<
692 (unsigned)dept <<
693 (unsigned)*arg <<
694 (unsigned)*arg <<
695 <<
696 } <<
697 <<
698 sos_backtrace(NULL, 15, stack_bottom, stack_ <<
699 backtracer, NULL); <<
700 } <<
701 <<
702 <<
703 <<
704 <<
705 <<
706 <<
707 <<
708 <<
709 sos_ret_t <<
710 sos_thread_change_current_mm_context(struct so <<
711 { <<
712 sos_ui32_t flags; <<
713 <<
714 <<
715 struct sos_mm_context * prev_mm_ctxt <<
716 = current_thread->squatted_mm_context; <<
717 <<
718 <<
719 <<
720 if (mm_ctxt != NULL) <<
721 SOS_ASSERT_FATAL(prev_mm_ctxt == NULL); <<
722 else <<
723 SOS_ASSERT_FATAL(prev_mm_ctxt != NULL); <<
724 <<
725 sos_disable_IRQs(flags); <<
726 <<
727 <<
728 current_thread->squatted_mm_context = mm_ctx <<
729 <<
730 <<
731 <<
732 if (mm_ctxt != NULL) <<
733 { <<
734 sos_mm_context_ref(mm_ctxt); <<
735 <<
736 <<
737 sos_mm_context_switch_to(mm_ctxt); <<
738 } <<
739 else <<
740 sos_mm_context_unref(prev_mm_ctxt); <<
741 <<
742 <<
743 <<
744 sos_restore_IRQs(flags); <<
745 <<
746 return SOS_OK; <<
747 } <<
748 <<
749 <<
750 void sos_thread_prepare_syscall_switch_back(st <<
751 { <<
752 <<
753 <<
754 <<
755 <<
756 <<
757 <<
758 <<
759 <<
760 current_thread->cpu_state = cpu_state; <<
761 <<
762 <<
763 _prepare_mm_context((struct sos_thread*) cur <<
764 } <<
765 <<
766 <<
767 void sos_thread_prepare_exception_switch_back( <<
768 { <<
769 <<
770 <<
771 <<
772 <<
773 <<
774 <<
775 <<
776 <<
777 current_thread->cpu_state = cpu_state; <<
778 <<
779 <<
780 _prepare_mm_context((struct sos_thread*) cur <<
781 } <<
782 <<
783 <<
784 void <<
785 sos_thread_prepare_irq_servicing(struct sos_cp <<
786 { <<
787 current_thread->cpu_state = interrupted_stat <<
788 } <<
789 <<
790 <<
791 struct sos_cpu_state * <<
792 sos_thread_prepare_irq_switch_back(void) <<
793 { <<
794 struct sos_thread *myself, *next_thread; <<
795 <<
796 <<
797 <<
798 if (! sos_cpu_context_is_in_user_mode(curren <<
799 return current_thread->cpu_state; <<
800 <<
801 <<
802 <<
803 <<
804 <<
805 <<
806 <<
807 SOS_ASSERT_FATAL(current_thread->process != <<
808 <<
809 <<
810 myself = (struct sos_thread*)current_thread; <<
811 <<
812 <<
813 next_thread = sos_reschedule(myself, FALSE); <<
814 <<
815 <<
816 _prepare_mm_context(next_thread); <<
817 <<
818 <<
819 _set_current(next_thread); <<
820 return next_thread->cpu_state; <<
821 } 441 }