Diff markup
001 001
002 002
003 003
004 004
005 005
006 006
007 007
008 008
009 009
010 010
011 011
012 012
013 013
014 014
015 015
016 016
017 017
018 018
019 #include <sos/physmem.h> 019 #include <sos/physmem.h>
020 #include <sos/kmem_slab.h> 020 #include <sos/kmem_slab.h>
021 #include <sos/kmalloc.h> 021 #include <sos/kmalloc.h>
022 #include <sos/klibc.h> 022 #include <sos/klibc.h>
023 #include <sos/list.h> 023 #include <sos/list.h>
024 #include <sos/assert.h> 024 #include <sos/assert.h>
025 #include <hwcore/mm_context.h> <<
026 #include <sos/process.h> <<
027 <<
028 #include <drivers/bochs.h> <<
029 #include <drivers/x86_videomem.h> <<
030 025
031 #include <hwcore/irq.h> 026 #include <hwcore/irq.h>
032 027
033 #include "thread.h" 028 #include "thread.h"
034 029
035 030
036 031
037 032
038 033
039 #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PA 034 #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PAGE_SIZE)
040 035
041 036
042 037
043 038
044 039
045 040
046 041
047 042
048 043
049 044
050 045
051 046
052 047
053 048
054 049
055 050
056 static volatile struct sos_thread *current_thr 051 static volatile struct sos_thread *current_thread = NULL;
057 052
058 053
059 054
060 055
061 056
062 057
063 058
064 static struct sos_thread *thread_list = NULL; 059 static struct sos_thread *thread_list = NULL;
065 060
066 061
067 062
068 063
069 064
070 static struct sos_kslab_cache *cache_thread; 065 static struct sos_kslab_cache *cache_thread;
071 066
072 067
073 <<
074 <<
075 <<
076 <<
077 <<
078 static sos_ret_t change_current_mm_context(str <<
079 <<
080 <<
081 struct sos_thread *sos_thread_get_current() 068 struct sos_thread *sos_thread_get_current()
082 { 069 {
083 SOS_ASSERT_FATAL(current_thread->state == SO 070 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
084 return (struct sos_thread*)current_thread; 071 return (struct sos_thread*)current_thread;
085 } 072 }
086 073
087 074
088 inline static sos_ret_t _set_current(struct so 075 inline static sos_ret_t _set_current(struct sos_thread *thr)
089 { 076 {
090 SOS_ASSERT_FATAL(thr->state == SOS_THR_READY 077 SOS_ASSERT_FATAL(thr->state == SOS_THR_READY);
091 current_thread = thr; 078 current_thread = thr;
092 current_thread->state = SOS_THR_RUNNING; 079 current_thread->state = SOS_THR_RUNNING;
093 return SOS_OK; 080 return SOS_OK;
094 } 081 }
095 082
096 083
097 sos_ret_t sos_thread_subsystem_setup(sos_vaddr 084 sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
098 sos_size_ 085 sos_size_t init_thread_stack_size)
099 { 086 {
100 struct sos_thread *myself; 087 struct sos_thread *myself;
101 088
102 089
103 cache_thread = sos_kmem_cache_create("thread 090 cache_thread = sos_kmem_cache_create("thread",
104 sizeof( 091 sizeof(struct sos_thread),
105 2, 092 2,
106 0, 093 0,
107 SOS_KSL 094 SOS_KSLAB_CREATE_MAP
108 | SOS_K 095 | SOS_KSLAB_CREATE_ZERO);
109 if (! cache_thread) 096 if (! cache_thread)
110 return -SOS_ENOMEM; 097 return -SOS_ENOMEM;
111 098
112 099
113 myself = (struct sos_thread*) sos_kmem_cache 100 myself = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
114 101 SOS_KSLAB_ALLOC_ATOMIC);
115 if (! myself) 102 if (! myself)
116 return -SOS_ENOMEM; 103 return -SOS_ENOMEM;
117 104
118 105
119 strzcpy(myself->name, "[kinit]", SOS_THR_MAX 106 strzcpy(myself->name, "[kinit]", SOS_THR_MAX_NAMELEN);
120 myself->state = SOS_THR_CREATED; 107 myself->state = SOS_THR_CREATED;
121 myself->priority = SOS_SCHED_PRIO_LOW <<
122 myself->kernel_stack_base_addr = init_thread 108 myself->kernel_stack_base_addr = init_thread_stack_base_addr;
123 myself->kernel_stack_size = init_thread 109 myself->kernel_stack_size = init_thread_stack_size;
124 110
125 111
126 sos_cpu_state_prepare_detect_kernel_stack_ov 112 sos_cpu_state_prepare_detect_kernel_stack_overflow(myself->cpu_state,
127 113 myself->kernel_stack_base_addr,
128 114 myself->kernel_stack_size);
129 115
130 116
131 list_singleton_named(thread_list, myself, gb 117 list_singleton_named(thread_list, myself, gbl_prev, gbl_next);
132 118
133 119
134 myself->state = SOS_THR_READY; 120 myself->state = SOS_THR_READY;
135 _set_current(myself); 121 _set_current(myself);
136 122
137 return SOS_OK; 123 return SOS_OK;
138 } 124 }
139 125
140 126
141 struct sos_thread * 127 struct sos_thread *
142 sos_create_kernel_thread(const char *name, 128 sos_create_kernel_thread(const char *name,
143 sos_kernel_thread_sta 129 sos_kernel_thread_start_routine_t start_func,
144 void *start_arg, !! 130 void *start_arg)
145 sos_sched_priority_t <<
146 { 131 {
147 __label__ undo_creation; 132 __label__ undo_creation;
148 sos_ui32_t flags; 133 sos_ui32_t flags;
149 struct sos_thread *new_thread; 134 struct sos_thread *new_thread;
150 135
151 if (! start_func) 136 if (! start_func)
152 return NULL; 137 return NULL;
153 if (! SOS_SCHED_PRIO_IS_VALID(priority)) <<
154 return NULL; <<
155 138
156 139
157 new_thread 140 new_thread
158 = (struct sos_thread*) sos_kmem_cache_allo 141 = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
159 142 SOS_KSLAB_ALLOC_ATOMIC);
160 if (! new_thread) 143 if (! new_thread)
161 return NULL; 144 return NULL;
162 145
163 146
164 strzcpy(new_thread->name, ((name)?name:"[NON 147 strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
165 new_thread->state = SOS_THR_CREATED; 148 new_thread->state = SOS_THR_CREATED;
166 new_thread->priority = priority; <<
167 149
168 150
169 new_thread->kernel_stack_base_addr = sos_kma 151 new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
170 new_thread->kernel_stack_size = SOS_THR 152 new_thread->kernel_stack_size = SOS_THREAD_KERNEL_STACK_SIZE;
171 if (! new_thread->kernel_stack_base_addr) 153 if (! new_thread->kernel_stack_base_addr)
172 goto undo_creation; 154 goto undo_creation;
173 155
174 156
175 if (SOS_OK 157 if (SOS_OK
176 != sos_cpu_kstate_init(& new_thread->cpu 158 != sos_cpu_kstate_init(& new_thread->cpu_state,
177 (sos_cpu_kstate_f 159 (sos_cpu_kstate_function_arg1_t*) start_func,
178 (sos_ui32_t) star 160 (sos_ui32_t) start_arg,
179 new_thread->kerne 161 new_thread->kernel_stack_base_addr,
180 new_thread->kerne 162 new_thread->kernel_stack_size,
181 (sos_cpu_kstate_f 163 (sos_cpu_kstate_function_arg1_t*) sos_thread_exit,
182 (sos_ui32_t) NULL 164 (sos_ui32_t) NULL))
183 goto undo_creation; 165 goto undo_creation;
184 166
185 167
186 sos_disable_IRQs(flags); 168 sos_disable_IRQs(flags);
187 list_add_tail_named(thread_list, new_thread, 169 list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
188 sos_restore_IRQs(flags); 170 sos_restore_IRQs(flags);
189 171
190 172
191 if (SOS_OK != sos_sched_set_ready(new_thread 173 if (SOS_OK != sos_sched_set_ready(new_thread))
192 goto undo_creation; 174 goto undo_creation;
193 175
194 176
195 return new_thread; 177 return new_thread;
196 178
197 undo_creation: 179 undo_creation:
198 if (new_thread->kernel_stack_base_addr) 180 if (new_thread->kernel_stack_base_addr)
199 sos_kfree((sos_vaddr_t) new_thread->kernel 181 sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
200 sos_kmem_cache_free((sos_vaddr_t) new_thread 182 sos_kmem_cache_free((sos_vaddr_t) new_thread);
201 return NULL; 183 return NULL;
202 } 184 }
203 185
204 186
205 <<
206 <<
207 <<
208 <<
209 <<
210 <<
211 static struct sos_thread * <<
212 create_user_thread(const char *name, <<
213 struct sos_process *process <<
214 const struct sos_thread * m <<
215 const struct sos_cpu_state <<
216 sos_uaddr_t user_initial_PC <<
217 sos_ui32_t user_start_arg1 <<
218 sos_ui32_t user_start_arg2 <<
219 sos_uaddr_t user_initial_SP <<
220 sos_sched_priority_t priori <<
221 { <<
222 __label__ undo_creation; <<
223 sos_ui32_t flags; <<
224 struct sos_thread *new_thread; <<
225 <<
226 if (model_thread) <<
227 { <<
228 SOS_ASSERT_FATAL(model_uctxt); <<
229 } <<
230 else <<
231 { <<
232 if (! SOS_SCHED_PRIO_IS_VALID(priority)) <<
233 return NULL; <<
234 } <<
235 <<
236 <<
237 if (! process) <<
238 return NULL; <<
239 <<
240 <<
241 new_thread <<
242 = (struct sos_thread*) sos_kmem_cache_allo <<
243 <<
244 if (! new_thread) <<
245 return NULL; <<
246 <<
247 <<
248 strzcpy(new_thread->name, ((name)?name:"[NON <<
249 new_thread->state = SOS_THR_CREATED; <<
250 if (model_thread) <<
251 new_thread->priority = model_thread->prior <<
252 else <<
253 new_thread->priority = priority; <<
254 <<
255 <<
256 new_thread->kernel_stack_base_addr = sos_kma <<
257 new_thread->kernel_stack_size = SOS_THR <<
258 if (! new_thread->kernel_stack_base_addr) <<
259 goto undo_creation; <<
260 <<
261 <<
262 if (model_thread) <<
263 { <<
264 if (SOS_OK <<
265 != sos_cpu_ustate_duplicate(& new_th <<
266 model_uc <<
267 user_sta <<
268 new_thre <<
269 new_thre <<
270 goto undo_creation; <<
271 } <<
272 else <<
273 { <<
274 if (SOS_OK <<
275 != sos_cpu_ustate_init(& new_thread- <<
276 user_initial_ <<
277 user_start_ar <<
278 user_start_ar <<
279 user_initial_ <<
280 new_thread->k <<
281 new_thread->k <<
282 goto undo_creation; <<
283 } <<
284 <<
285 <<
286 if (SOS_OK != sos_process_register_thread(pr <<
287 goto undo_creation; <<
288 <<
289 <<
290 sos_disable_IRQs(flags); <<
291 list_add_tail_named(thread_list, new_thread, <<
292 sos_restore_IRQs(flags); <<
293 <<
294 <<
295 if (SOS_OK != sos_sched_set_ready(new_thread <<
296 goto undo_creation; <<
297 <<
298 <<
299 return new_thread; <<
300 <<
301 undo_creation: <<
302 if (new_thread->kernel_stack_base_addr) <<
303 sos_kfree((sos_vaddr_t) new_thread->kernel <<
304 sos_kmem_cache_free((sos_vaddr_t) new_thread <<
305 return NULL; <<
306 } <<
307 <<
308 <<
309 struct sos_thread * <<
310 sos_create_user_thread(const char *name, <<
311 struct sos_process *pro <<
312 sos_uaddr_t user_initia <<
313 sos_ui32_t user_start_ <<
314 sos_ui32_t user_start_ <<
315 sos_uaddr_t user_initia <<
316 sos_sched_priority_t pr <<
317 { <<
318 return create_user_thread(name, process, NUL <<
319 user_initial_PC, <<
320 user_start_arg1, <<
321 user_start_arg2, <<
322 user_initial_SP, <<
323 priority); <<
324 } <<
325 <<
326 <<
327 <<
328 <<
329 <<
330 <<
331 struct sos_thread * <<
332 sos_duplicate_user_thread(const char *name, <<
333 struct sos_process * <<
334 const struct sos_thr <<
335 const struct sos_cpu <<
336 sos_ui32_t retval) <<
337 { <<
338 return create_user_thread(name, process, mod <<
339 0, retval, 0, 0, 0 <<
340 } <<
341 <<
342 <<
343 <<
344 <<
345 <<
346 <<
347 <<
348 <<
349 <<
350 <<
351 static void _prepare_mm_context(struct sos_thr <<
352 { <<
353 <<
354 if (sos_cpu_context_is_in_user_mode(the_thre <<
355 == TRUE) <<
356 { <<
357 <<
358 <<
359 <<
360 <<
361 SOS_ASSERT_FATAL(the_thread->process != <<
362 <<
363 <<
364 SOS_ASSERT_FATAL(the_thread->squatted_mm <<
365 <<
366 <<
367 sos_mm_context_switch_to(sos_process_get <<
368 } <<
369 <<
370 <<
371 <<
372 else if (the_thread->squatted_mm_context != <<
373 sos_mm_context_switch_to(the_thread->squat <<
374 } <<
375 <<
376 <<
377 187
378 188
379 static void delete_thread(struct sos_thread *t 189 static void delete_thread(struct sos_thread *thr)
380 { 190 {
381 sos_ui32_t flags; 191 sos_ui32_t flags;
382 192
383 sos_disable_IRQs(flags); 193 sos_disable_IRQs(flags);
384 list_delete_named(thread_list, thr, gbl_prev 194 list_delete_named(thread_list, thr, gbl_prev, gbl_next);
385 sos_restore_IRQs(flags); 195 sos_restore_IRQs(flags);
386 196
387 sos_kfree((sos_vaddr_t) thr->kernel_stack_ba 197 sos_kfree((sos_vaddr_t) thr->kernel_stack_base_addr);
388 <<
389 <<
390 if (thr->squatted_mm_context) <<
391 SOS_ASSERT_FATAL(SOS_OK == change_current_ <<
392 <<
393 <<
394 if (thr->process) <<
395 SOS_ASSERT_FATAL(SOS_OK == sos_process_unr <<
396 <<
397 memset(thr, 0x0, sizeof(struct sos_thread)); 198 memset(thr, 0x0, sizeof(struct sos_thread));
398 sos_kmem_cache_free((sos_vaddr_t) thr); 199 sos_kmem_cache_free((sos_vaddr_t) thr);
399 } 200 }
400 201
401 202
402 void sos_thread_exit() 203 void sos_thread_exit()
403 { 204 {
404 sos_ui32_t flags; 205 sos_ui32_t flags;
405 struct sos_thread *myself, *next_thread; 206 struct sos_thread *myself, *next_thread;
406 207
407 208
408 SOS_ASSERT_FATAL(! sos_servicing_irq()); 209 SOS_ASSERT_FATAL(! sos_servicing_irq());
409 210
410 myself = sos_thread_get_current(); 211 myself = sos_thread_get_current();
411 212
412 213
413 214
414 SOS_ASSERT_FATAL(list_is_empty_named(myself- 215 SOS_ASSERT_FATAL(list_is_empty_named(myself->kwaitq_list,
415 prev_en 216 prev_entry_for_thread,
416 next_en 217 next_entry_for_thread));
417 218
418 219
419 sos_disable_IRQs(flags); 220 sos_disable_IRQs(flags);
420 myself->state = SOS_THR_ZOMBIE; 221 myself->state = SOS_THR_ZOMBIE;
421 next_thread = sos_reschedule(myself, FALSE); 222 next_thread = sos_reschedule(myself, FALSE);
422 223
423 224
424 sos_cpu_state_detect_kernel_stack_overflow(n 225 sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
425 n 226 next_thread->kernel_stack_base_addr,
426 n 227 next_thread->kernel_stack_size);
427 228
428 <<
429 <<
430 <<
431 _prepare_mm_context(next_thread); <<
432 <<
433 229
434 230
435 231
436 232
437 _set_current(next_thread); 233 _set_current(next_thread);
438 sos_cpu_context_exit_to(next_thread->cpu_sta 234 sos_cpu_context_exit_to(next_thread->cpu_state,
439 (sos_cpu_kstate_func 235 (sos_cpu_kstate_function_arg1_t*) delete_thread,
440 (sos_ui32_t) myself) 236 (sos_ui32_t) myself);
441 } 237 }
442 238
443 239
444 sos_sched_priority_t sos_thread_get_priority(s <<
445 { <<
446 if (! thr) <<
447 thr = (struct sos_thread*)current_thread; <<
448 <<
449 return thr->priority; <<
450 } <<
451 <<
452 <<
453 sos_thread_state_t sos_thread_get_state(struct 240 sos_thread_state_t sos_thread_get_state(struct sos_thread *thr)
454 { 241 {
455 if (! thr) 242 if (! thr)
456 thr = (struct sos_thread*)current_thread; 243 thr = (struct sos_thread*)current_thread;
457 244
458 return thr->state; 245 return thr->state;
459 } 246 }
460 247
461 248
462 typedef enum { YIELD_MYSELF, BLOCK_MYSELF } sw 249 typedef enum { YIELD_MYSELF, BLOCK_MYSELF } switch_type_t;
463 250
464 251
465 252
466 253
467 static sos_ret_t _switch_to_next_thread(switch 254 static sos_ret_t _switch_to_next_thread(switch_type_t operation)
468 { 255 {
469 struct sos_thread *myself, *next_thread; 256 struct sos_thread *myself, *next_thread;
470 257
471 SOS_ASSERT_FATAL(current_thread->state == SO 258 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
472 259
473 260
474 SOS_ASSERT_FATAL(! sos_servicing_irq()); 261 SOS_ASSERT_FATAL(! sos_servicing_irq());
475 262
476 myself = (struct sos_thread*)current_thread; 263 myself = (struct sos_thread*)current_thread;
477 264
478 265
479 266
480 if (BLOCK_MYSELF == operation) 267 if (BLOCK_MYSELF == operation)
481 { 268 {
482 myself->state = SOS_THR_BLOCKED; 269 myself->state = SOS_THR_BLOCKED;
483 } 270 }
484 271
485 272
486 next_thread = sos_reschedule(myself, YIELD_M 273 next_thread = sos_reschedule(myself, YIELD_MYSELF == operation);
487 274
488 275
489 if (myself != next_thread) 276 if (myself != next_thread)
490 { 277 {
491 278
492 sos_cpu_state_detect_kernel_stack_overfl 279 sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
493 280 next_thread->kernel_stack_base_addr,
494 281 next_thread->kernel_stack_size);
495 282
496 <<
497 <<
498 <<
499 _prepare_mm_context(next_thread); <<
500 283
501 284
502 285
503 286
504 _set_current(next_thread); 287 _set_current(next_thread);
505 sos_cpu_context_switch(& myself->cpu_sta 288 sos_cpu_context_switch(& myself->cpu_state, next_thread->cpu_state);
506 289
507 290
508 SOS_ASSERT_FATAL(current_thread == mysel 291 SOS_ASSERT_FATAL(current_thread == myself);
509 SOS_ASSERT_FATAL(current_thread->state = 292 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
510 } 293 }
511 else 294 else
512 { 295 {
513 296
514 _set_current(next_thread); 297 _set_current(next_thread);
515 } 298 }
516 299
517 return SOS_OK; 300 return SOS_OK;
518 } 301 }
519 302
520 303
521 <<
522 <<
523 <<
524 <<
525 static sos_ret_t _change_waitq_priorities(stru <<
526 sos_ <<
527 { <<
528 struct sos_kwaitq_entry *kwq_entry; <<
529 int nb_waitqs; <<
530 <<
531 list_foreach_forward_named(thr->kwaitq_list, <<
532 prev_entry_for_th <<
533 { <<
534 SOS_ASSERT_FATAL(SOS_OK == sos_kwaitq_ch <<
535 <<
536 <<
537 } <<
538 <<
539 return SOS_OK; <<
540 } <<
541 <<
542 <<
543 sos_ret_t sos_thread_set_priority(struct sos_t <<
544 sos_sched_p <<
545 { <<
546 __label__ exit_set_prio; <<
547 sos_ui32_t flags; <<
548 sos_ret_t retval; <<
549 <<
550 <<
551 if (! SOS_SCHED_PRIO_IS_VALID(priority)) <<
552 return -SOS_EINVAL; <<
553 <<
554 if (! thr) <<
555 thr = (struct sos_thread*)current_thread; <<
556 <<
557 sos_disable_IRQs(flags); <<
558 <<
559 <<
560 <<
561 retval = _change_waitq_priorities(thr, prior <<
562 if (SOS_OK != retval) <<
563 goto exit_set_prio; <<
564 <<
565 <<
566 <<
567 if (SOS_THR_READY == thr->state) <<
568 retval = sos_sched_change_priority(thr, pr <<
569 <<
570 <<
571 thr->priority = priority; <<
572 <<
573 exit_set_prio: <<
574 sos_restore_IRQs(flags); <<
575 return retval; <<
576 } <<
577 <<
578 <<
579 sos_ret_t sos_thread_yield() 304 sos_ret_t sos_thread_yield()
580 { 305 {
581 sos_ui32_t flags; 306 sos_ui32_t flags;
582 sos_ret_t retval; 307 sos_ret_t retval;
583 308
584 sos_disable_IRQs(flags); 309 sos_disable_IRQs(flags);
585 310
586 retval = _switch_to_next_thread(YIELD_MYSELF 311 retval = _switch_to_next_thread(YIELD_MYSELF);
587 312
588 sos_restore_IRQs(flags); 313 sos_restore_IRQs(flags);
589 return retval; 314 return retval;
590 } 315 }
591 316
592 317
593 318
594 319
595 320
596 struct sleep_timeout_params 321 struct sleep_timeout_params
597 { 322 {
598 struct sos_thread *thread_to_wakeup; 323 struct sos_thread *thread_to_wakeup;
599 sos_bool_t timeout_triggered; 324 sos_bool_t timeout_triggered;
600 }; 325 };
601 326
602 327
603 328
604 329
605 330
606 static void sleep_timeout(struct sos_timeout_a 331 static void sleep_timeout(struct sos_timeout_action *act)
607 { 332 {
608 struct sleep_timeout_params *sleep_timeout_p 333 struct sleep_timeout_params *sleep_timeout_params
609 = (struct sleep_timeout_params*) act->rout 334 = (struct sleep_timeout_params*) act->routine_data;
610 335
611 336
612 sleep_timeout_params->timeout_triggered = TR 337 sleep_timeout_params->timeout_triggered = TRUE;
613 338
614 339
615 SOS_ASSERT_FATAL(SOS_OK == 340 SOS_ASSERT_FATAL(SOS_OK ==
616 sos_thread_force_unblock(sl 341 sos_thread_force_unblock(sleep_timeout_params
617 - 342 ->thread_to_wakeup));
618 } 343 }
619 344
620 345
621 sos_ret_t sos_thread_sleep(struct sos_time *ti 346 sos_ret_t sos_thread_sleep(struct sos_time *timeout)
622 { 347 {
623 sos_ui32_t flags; 348 sos_ui32_t flags;
624 struct sleep_timeout_params sleep_timeout_pa 349 struct sleep_timeout_params sleep_timeout_params;
625 struct sos_timeout_action timeout_action; 350 struct sos_timeout_action timeout_action;
626 sos_ret_t retval; 351 sos_ret_t retval;
627 352
628 353
629 if (NULL == timeout) 354 if (NULL == timeout)
630 { 355 {
631 sos_disable_IRQs(flags); 356 sos_disable_IRQs(flags);
632 retval = _switch_to_next_thread(BLOCK_MY 357 retval = _switch_to_next_thread(BLOCK_MYSELF);
633 sos_restore_IRQs(flags); 358 sos_restore_IRQs(flags);
634 359
635 return retval; 360 return retval;
636 } 361 }
637 362
638 363
639 sos_time_init_action(& timeout_action); 364 sos_time_init_action(& timeout_action);
640 365
641 366
642 sleep_timeout_params.thread_to_wakeup 367 sleep_timeout_params.thread_to_wakeup
643 = (struct sos_thread*)current_thread; 368 = (struct sos_thread*)current_thread;
644 sleep_timeout_params.timeout_triggered = FAL 369 sleep_timeout_params.timeout_triggered = FALSE;
645 370
646 sos_disable_IRQs(flags); 371 sos_disable_IRQs(flags);
647 372
648 373
649 SOS_ASSERT_FATAL(SOS_OK == 374 SOS_ASSERT_FATAL(SOS_OK ==
650 sos_time_register_action_re 375 sos_time_register_action_relative(& timeout_action,
651 376 timeout,
652 377 sleep_timeout,
653 378 & sleep_timeout_params));
654 379
655 380
656 381
657 382
658 retval = _switch_to_next_thread(BLOCK_MYSELF 383 retval = _switch_to_next_thread(BLOCK_MYSELF);
659 384
660 385
661 386
662 if (sleep_timeout_params.timeout_triggered) 387 if (sleep_timeout_params.timeout_triggered)
663 { 388 {
664 389
665 SOS_ASSERT_FATAL(sos_time_is_zero(& time 390 SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout));
666 retval = SOS_OK; 391 retval = SOS_OK;
667 } 392 }
668 else 393 else
669 { 394 {
670 395
671 396
672 SOS_ASSERT_FATAL(SOS_OK == sos_time_unre 397 SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action));
673 retval = -SOS_EINTR; 398 retval = -SOS_EINTR;
674 } 399 }
675 400
676 sos_restore_IRQs(flags); 401 sos_restore_IRQs(flags);
677 402
678 403
679 memcpy(timeout, & timeout_action.timeout, si 404 memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time));
680 405
681 return retval; 406 return retval;
682 } 407 }
683 408
684 409
685 sos_ret_t sos_thread_force_unblock(struct sos_ 410 sos_ret_t sos_thread_force_unblock(struct sos_thread *thread)
686 { 411 {
687 sos_ret_t retval; 412 sos_ret_t retval;
688 sos_ui32_t flags; 413 sos_ui32_t flags;
689 414
690 if (! thread) 415 if (! thread)
691 return -SOS_EINVAL; 416 return -SOS_EINVAL;
692 417
693 sos_disable_IRQs(flags); 418 sos_disable_IRQs(flags);
694 419
695 420
696 retval = SOS_OK; 421 retval = SOS_OK;
697 switch(sos_thread_get_state(thread)) 422 switch(sos_thread_get_state(thread))
698 { 423 {
699 case SOS_THR_RUNNING: 424 case SOS_THR_RUNNING:
700 case SOS_THR_READY: 425 case SOS_THR_READY:
701 426
702 break; 427 break;
703 428
704 case SOS_THR_ZOMBIE: 429 case SOS_THR_ZOMBIE:
705 retval = -SOS_EFATAL; 430 retval = -SOS_EFATAL;
706 break; 431 break;
707 432
708 default: 433 default:
709 retval = sos_sched_set_ready(thread); 434 retval = sos_sched_set_ready(thread);
710 break; 435 break;
711 } 436 }
712 437
713 sos_restore_IRQs(flags); 438 sos_restore_IRQs(flags);
714 439
715 return retval; 440 return retval;
716 } <<
717 <<
718 <<
719 void sos_thread_dump_backtrace(sos_bool_t on_c <<
720 sos_bool_t on_b <<
721 { <<
722 sos_vaddr_t stack_bottom = current_thread->k <<
723 sos_size_t stack_size = current_thread->k <<
724 <<
725 static void backtracer(sos_vaddr_t PC, <<
726 sos_vaddr_t params, <<
727 sos_ui32_t depth, <<
728 void *custom_arg) <<
729 { <<
730 sos_ui32_t invalid = 0xffffffff, *arg1, <<
731 <<
732 <<
733 <<
734 <<
735 <<
736 arg1 = (sos_ui32_t*)params; <<
737 arg2 = (sos_ui32_t*)(params+4); <<
738 arg3 = (sos_ui32_t*)(params+8); <<
739 arg4 = (sos_ui32_t*)(params+12); <<
740 <<
741 <<
742 <<
743 #define INTERVAL_OK(b,v,u) ( ((b) <= (sos_vadd <<
744 && ((sos_vaddr_t) <<
745 if (!INTERVAL_OK(stack_bottom, arg1, sta <<
746 arg1 = &invalid; <<
747 if (!INTERVAL_OK(stack_bottom, arg2, sta <<
748 arg2 = &invalid; <<
749 if (!INTERVAL_OK(stack_bottom, arg3, sta <<
750 arg3 = &invalid; <<
751 if (!INTERVAL_OK(stack_bottom, arg4, sta <<
752 arg4 = &invalid; <<
753 <<
754 <<
755 if (on_bochs) <<
756 sos_bochs_printf("[%d] PC=0x%x arg1=0x <<
757 (unsigned)depth, (uns <<
758 (unsigned)*arg1, (uns <<
759 (unsigned)*arg3); <<
760 <<
761 if (on_console) <<
762 sos_x86_videomem_printf(23-depth, 3, <<
763 SOS_X86_VIDEO_ <<
764 | SOS_X86_VI <<
765 "[%d] PC=0x%x <<
766 (unsigned)dept <<
767 (unsigned)*arg <<
768 (unsigned)*arg <<
769 <<
770 } <<
771 <<
772 sos_backtrace(NULL, 15, stack_bottom, stack_ <<
773 backtracer, NULL); <<
774 } <<
775 <<
776 <<
777 <<
778 <<
779 <<
780 <<
781 <<
782 <<
783 static sos_ret_t <<
784 change_current_mm_context(struct sos_mm_contex <<
785 { <<
786 <<
787 struct sos_mm_context * prev_mm_ctxt <<
788 = current_thread->squatted_mm_context; <<
789 <<
790 <<
791 current_thread->squatted_mm_context = mm_ctx <<
792 <<
793 <<
794 <<
795 if (mm_ctxt != NULL) <<
796 { <<
797 sos_mm_context_ref(mm_ctxt); <<
798 <<
799 <<
800 sos_mm_context_switch_to(mm_ctxt); <<
801 } <<
802 else <<
803 sos_mm_context_unref(prev_mm_ctxt); <<
804 <<
805 <<
806 <<
807 return SOS_OK; <<
808 } <<
809 <<
810 <<
811 sos_ret_t <<
812 sos_thread_prepare_user_space_access(struct so <<
813 sos_vaddr <<
814 { <<
815 sos_ret_t retval; <<
816 sos_ui32_t flags; <<
817 <<
818 if (! dest_as) <<
819 { <<
820 <<
821 if (! current_thread->process) <<
822 return -SOS_EINVAL; <<
823 <<
824 dest_as = sos_process_get_address_space( <<
825 } <<
826 else <<
827 <<
828 <<
829 SOS_ASSERT_FATAL(! fixup_retvaddr); <<
830 <<
831 sos_disable_IRQs(flags); <<
832 SOS_ASSERT_FATAL(NULL == current_thread->squ <<
833 SOS_ASSERT_FATAL(0 == current_thread->fixup_ <<
834 <<
835 <<
836 retval = change_current_mm_context(sos_umem_ <<
837 if (SOS_OK == retval) <<
838 { <<
839 current_thread->fixup_uaccess.return_vad <<
840 current_thread->fixup_uaccess.faulted_ua <<
841 } <<
842 <<
843 sos_restore_IRQs(flags); <<
844 return retval; <<
845 } <<
846 <<
847 <<
848 sos_ret_t <<
849 sos_thread_end_user_space_access(void) <<
850 { <<
851 sos_ret_t retval; <<
852 sos_ui32_t flags; <<
853 <<
854 sos_disable_IRQs(flags); <<
855 SOS_ASSERT_FATAL(NULL != current_thread->squ <<
856 <<
857 <<
858 retval = change_current_mm_context(NULL); <<
859 current_thread->fixup_uaccess.return_vaddr <<
860 current_thread->fixup_uaccess.faulted_uaddr <<
861 <<
862 sos_restore_IRQs(flags); <<
863 return retval; <<
864 } <<
865 <<
866 <<
867 void sos_thread_prepare_syscall_switch_back(st <<
868 { <<
869 <<
870 <<
871 <<
872 <<
873 <<
874 <<
875 <<
876 <<
877 current_thread->cpu_state = cpu_state; <<
878 <<
879 <<
880 _prepare_mm_context((struct sos_thread*) cur <<
881 } <<
882 <<
883 <<
884 void sos_thread_prepare_exception_switch_back( <<
885 { <<
886 <<
887 <<
888 <<
889 <<
890 <<
891 <<
892 <<
893 <<
894 current_thread->cpu_state = cpu_state; <<
895 <<
896 <<
897 _prepare_mm_context((struct sos_thread*) cur <<
898 } <<
899 <<
900 <<
901 void <<
902 sos_thread_prepare_irq_servicing(struct sos_cp <<
903 { <<
904 current_thread->cpu_state = interrupted_stat <<
905 } <<
906 <<
907 <<
908 struct sos_cpu_state * <<
909 sos_thread_prepare_irq_switch_back(void) <<
910 { <<
911 struct sos_thread *myself, *next_thread; <<
912 <<
913 <<
914 <<
915 if (! sos_cpu_context_is_in_user_mode(curren <<
916 return current_thread->cpu_state; <<
917 <<
918 <<
919 <<
920 <<
921 <<
922 <<
923 <<
924 SOS_ASSERT_FATAL(current_thread->process != <<
925 <<
926 <<
927 myself = (struct sos_thread*)current_thread; <<
928 <<
929 <<
930 next_thread = sos_reschedule(myself, FALSE); <<
931 <<
932 <<
933 _prepare_mm_context(next_thread); <<
934 <<
935 <<
936 _set_current(next_thread); <<
937 return next_thread->cpu_state; <<
938 } 441 }