Diff markup
001 001
002 002
003 003
004 004
005 005
006 006
007 007
008 008
009 009
010 010
011 011
012 012
013 013
014 014
015 015
016 016
017 017
018 018
019 #include <sos/physmem.h> 019 #include <sos/physmem.h>
020 #include <sos/kmem_slab.h> 020 #include <sos/kmem_slab.h>
021 #include <sos/kmalloc.h> 021 #include <sos/kmalloc.h>
022 #include <sos/klibc.h> 022 #include <sos/klibc.h>
023 #include <sos/list.h> 023 #include <sos/list.h>
024 #include <sos/assert.h> 024 #include <sos/assert.h>
>> 025 #include <hwcore/mm_context.h>
>> 026 #include <sos/process.h>
>> 027
>> 028 #include <drivers/bochs.h>
>> 029 #include <drivers/x86_videomem.h>
025 030
026 #include <hwcore/irq.h> 031 #include <hwcore/irq.h>
027 032
028 #include "thread.h" 033 #include "thread.h"
029 034
030 035
031 036
032 037
033 038
034 #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PA 039 #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PAGE_SIZE)
035 040
036 041
037 042
038 043
039 044
040 045
041 046
042 047
043 048
044 049
045 050
046 051
047 052
048 053
049 054
050 055
051 static volatile struct sos_thread *current_thr 056 static volatile struct sos_thread *current_thread = NULL;
052 057
053 058
054 059
055 060
056 061
057 062
058 063
059 static struct sos_thread *thread_list = NULL; 064 static struct sos_thread *thread_list = NULL;
060 065
061 066
062 067
063 068
064 069
065 static struct sos_kslab_cache *cache_thread; 070 static struct sos_kslab_cache *cache_thread;
066 071
067 072
>> 073
>> 074
>> 075
>> 076
>> 077
>> 078 static sos_ret_t change_current_mm_context(struct sos_mm_context *mm_ctxt);
>> 079
>> 080
068 struct sos_thread *sos_thread_get_current() 081 struct sos_thread *sos_thread_get_current()
069 { 082 {
070 SOS_ASSERT_FATAL(current_thread->state == SO 083 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
071 return (struct sos_thread*)current_thread; 084 return (struct sos_thread*)current_thread;
072 } 085 }
073 086
074 087
075 inline static sos_ret_t _set_current(struct so 088 inline static sos_ret_t _set_current(struct sos_thread *thr)
076 { 089 {
077 SOS_ASSERT_FATAL(thr->state == SOS_THR_READY 090 SOS_ASSERT_FATAL(thr->state == SOS_THR_READY);
078 current_thread = thr; 091 current_thread = thr;
079 current_thread->state = SOS_THR_RUNNING; 092 current_thread->state = SOS_THR_RUNNING;
080 return SOS_OK; 093 return SOS_OK;
081 } 094 }
082 095
083 096
084 sos_ret_t sos_thread_subsystem_setup(sos_vaddr 097 sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
085 sos_size_ 098 sos_size_t init_thread_stack_size)
086 { 099 {
087 struct sos_thread *myself; 100 struct sos_thread *myself;
088 101
089 102
090 cache_thread = sos_kmem_cache_create("thread 103 cache_thread = sos_kmem_cache_create("thread",
091 sizeof( 104 sizeof(struct sos_thread),
092 2, 105 2,
093 0, 106 0,
094 SOS_KSL 107 SOS_KSLAB_CREATE_MAP
095 | SOS_K 108 | SOS_KSLAB_CREATE_ZERO);
096 if (! cache_thread) 109 if (! cache_thread)
097 return -SOS_ENOMEM; 110 return -SOS_ENOMEM;
098 111
099 112
100 myself = (struct sos_thread*) sos_kmem_cache 113 myself = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
101 114 SOS_KSLAB_ALLOC_ATOMIC);
102 if (! myself) 115 if (! myself)
103 return -SOS_ENOMEM; 116 return -SOS_ENOMEM;
104 117
105 118
106 strzcpy(myself->name, "[kinit]", SOS_THR_MAX 119 strzcpy(myself->name, "[kinit]", SOS_THR_MAX_NAMELEN);
107 myself->state = SOS_THR_CREATED; 120 myself->state = SOS_THR_CREATED;
>> 121 myself->priority = SOS_SCHED_PRIO_LOWEST;
108 myself->kernel_stack_base_addr = init_thread 122 myself->kernel_stack_base_addr = init_thread_stack_base_addr;
109 myself->kernel_stack_size = init_thread 123 myself->kernel_stack_size = init_thread_stack_size;
110 124
111 125
112 sos_cpu_state_prepare_detect_kernel_stack_ov 126 sos_cpu_state_prepare_detect_kernel_stack_overflow(myself->cpu_state,
113 127 myself->kernel_stack_base_addr,
114 128 myself->kernel_stack_size);
115 129
116 130
117 list_singleton_named(thread_list, myself, gb 131 list_singleton_named(thread_list, myself, gbl_prev, gbl_next);
118 132
119 133
120 myself->state = SOS_THR_READY; 134 myself->state = SOS_THR_READY;
121 _set_current(myself); 135 _set_current(myself);
122 136
123 return SOS_OK; 137 return SOS_OK;
124 } 138 }
125 139
126 140
127 struct sos_thread * 141 struct sos_thread *
128 sos_create_kernel_thread(const char *name, 142 sos_create_kernel_thread(const char *name,
129 sos_kernel_thread_sta 143 sos_kernel_thread_start_routine_t start_func,
130 void *start_arg) !! 144 void *start_arg,
>> 145 sos_sched_priority_t priority)
131 { 146 {
132 __label__ undo_creation; 147 __label__ undo_creation;
133 sos_ui32_t flags; 148 sos_ui32_t flags;
134 struct sos_thread *new_thread; 149 struct sos_thread *new_thread;
135 150
136 if (! start_func) 151 if (! start_func)
137 return NULL; 152 return NULL;
>> 153 if (! SOS_SCHED_PRIO_IS_VALID(priority))
>> 154 return NULL;
138 155
139 156
140 new_thread 157 new_thread
141 = (struct sos_thread*) sos_kmem_cache_allo 158 = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
142 159 SOS_KSLAB_ALLOC_ATOMIC);
143 if (! new_thread) 160 if (! new_thread)
144 return NULL; 161 return NULL;
145 162
146 163
147 strzcpy(new_thread->name, ((name)?name:"[NON 164 strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
148 new_thread->state = SOS_THR_CREATED; 165 new_thread->state = SOS_THR_CREATED;
>> 166 new_thread->priority = priority;
149 167
150 168
151 new_thread->kernel_stack_base_addr = sos_kma 169 new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
152 new_thread->kernel_stack_size = SOS_THR 170 new_thread->kernel_stack_size = SOS_THREAD_KERNEL_STACK_SIZE;
153 if (! new_thread->kernel_stack_base_addr) 171 if (! new_thread->kernel_stack_base_addr)
154 goto undo_creation; 172 goto undo_creation;
155 173
156 174
157 if (SOS_OK 175 if (SOS_OK
158 != sos_cpu_kstate_init(& new_thread->cpu 176 != sos_cpu_kstate_init(& new_thread->cpu_state,
159 (sos_cpu_kstate_f 177 (sos_cpu_kstate_function_arg1_t*) start_func,
160 (sos_ui32_t) star 178 (sos_ui32_t) start_arg,
161 new_thread->kerne 179 new_thread->kernel_stack_base_addr,
162 new_thread->kerne 180 new_thread->kernel_stack_size,
163 (sos_cpu_kstate_f 181 (sos_cpu_kstate_function_arg1_t*) sos_thread_exit,
164 (sos_ui32_t) NULL 182 (sos_ui32_t) NULL))
165 goto undo_creation; 183 goto undo_creation;
166 184
167 185
168 sos_disable_IRQs(flags); 186 sos_disable_IRQs(flags);
169 list_add_tail_named(thread_list, new_thread, 187 list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
170 sos_restore_IRQs(flags); 188 sos_restore_IRQs(flags);
171 189
172 190
173 if (SOS_OK != sos_sched_set_ready(new_thread 191 if (SOS_OK != sos_sched_set_ready(new_thread))
174 goto undo_creation; 192 goto undo_creation;
175 193
176 194
177 return new_thread; 195 return new_thread;
178 196
179 undo_creation: 197 undo_creation:
180 if (new_thread->kernel_stack_base_addr) 198 if (new_thread->kernel_stack_base_addr)
181 sos_kfree((sos_vaddr_t) new_thread->kernel 199 sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
182 sos_kmem_cache_free((sos_vaddr_t) new_thread 200 sos_kmem_cache_free((sos_vaddr_t) new_thread);
183 return NULL; 201 return NULL;
184 } 202 }
185 203
186 204
>> 205
>> 206
>> 207
>> 208
>> 209
>> 210
>> 211 static struct sos_thread *
>> 212 create_user_thread(const char *name,
>> 213 struct sos_process *process,
>> 214 const struct sos_thread * model_thread,
>> 215 const struct sos_cpu_state * model_uctxt,
>> 216 sos_uaddr_t user_initial_PC,
>> 217 sos_ui32_t user_start_arg1,
>> 218 sos_ui32_t user_start_arg2,
>> 219 sos_uaddr_t user_initial_SP,
>> 220 sos_sched_priority_t priority)
>> 221 {
>> 222 __label__ undo_creation;
>> 223 sos_ui32_t flags;
>> 224 struct sos_thread *new_thread;
>> 225
>> 226 if (model_thread)
>> 227 {
>> 228 SOS_ASSERT_FATAL(model_uctxt);
>> 229 }
>> 230 else
>> 231 {
>> 232 if (! SOS_SCHED_PRIO_IS_VALID(priority))
>> 233 return NULL;
>> 234 }
>> 235
>> 236
>> 237 if (! process)
>> 238 return NULL;
>> 239
>> 240
>> 241 new_thread
>> 242 = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
>> 243 SOS_KSLAB_ALLOC_ATOMIC);
>> 244 if (! new_thread)
>> 245 return NULL;
>> 246
>> 247
>> 248 strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
>> 249 new_thread->state = SOS_THR_CREATED;
>> 250 if (model_thread)
>> 251 new_thread->priority = model_thread->priority;
>> 252 else
>> 253 new_thread->priority = priority;
>> 254
>> 255
>> 256 new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
>> 257 new_thread->kernel_stack_size = SOS_THREAD_KERNEL_STACK_SIZE;
>> 258 if (! new_thread->kernel_stack_base_addr)
>> 259 goto undo_creation;
>> 260
>> 261
>> 262 if (model_thread)
>> 263 {
>> 264 if (SOS_OK
>> 265 != sos_cpu_ustate_duplicate(& new_thread->cpu_state,
>> 266 model_uctxt,
>> 267 user_start_arg1,
>> 268 new_thread->kernel_stack_base_addr,
>> 269 new_thread->kernel_stack_size))
>> 270 goto undo_creation;
>> 271 }
>> 272 else
>> 273 {
>> 274 if (SOS_OK
>> 275 != sos_cpu_ustate_init(& new_thread->cpu_state,
>> 276 user_initial_PC,
>> 277 user_start_arg1,
>> 278 user_start_arg2,
>> 279 user_initial_SP,
>> 280 new_thread->kernel_stack_base_addr,
>> 281 new_thread->kernel_stack_size))
>> 282 goto undo_creation;
>> 283 }
>> 284
>> 285
>> 286 if (SOS_OK != sos_process_register_thread(process, new_thread))
>> 287 goto undo_creation;
>> 288
>> 289
>> 290 sos_disable_IRQs(flags);
>> 291 list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
>> 292 sos_restore_IRQs(flags);
>> 293
>> 294
>> 295 if (SOS_OK != sos_sched_set_ready(new_thread))
>> 296 goto undo_creation;
>> 297
>> 298
>> 299 return new_thread;
>> 300
>> 301 undo_creation:
>> 302 if (new_thread->kernel_stack_base_addr)
>> 303 sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
>> 304 sos_kmem_cache_free((sos_vaddr_t) new_thread);
>> 305 return NULL;
>> 306 }
>> 307
>> 308
>> 309 struct sos_thread *
>> 310 sos_create_user_thread(const char *name,
>> 311 struct sos_process *process,
>> 312 sos_uaddr_t user_initial_PC,
>> 313 sos_ui32_t user_start_arg1,
>> 314 sos_ui32_t user_start_arg2,
>> 315 sos_uaddr_t user_initial_SP,
>> 316 sos_sched_priority_t priority)
>> 317 {
>> 318 return create_user_thread(name, process, NULL, NULL,
>> 319 user_initial_PC,
>> 320 user_start_arg1,
>> 321 user_start_arg2,
>> 322 user_initial_SP,
>> 323 priority);
>> 324 }
>> 325
>> 326
>> 327
>> 328
>> 329
>> 330
>> 331 struct sos_thread *
>> 332 sos_duplicate_user_thread(const char *name,
>> 333 struct sos_process *process,
>> 334 const struct sos_thread * model_thread,
>> 335 const struct sos_cpu_state * model_uctxt,
>> 336 sos_ui32_t retval)
>> 337 {
>> 338 return create_user_thread(name, process, model_thread, model_uctxt,
>> 339 0, retval, 0, 0, 0);
>> 340 }
>> 341
>> 342
>> 343
>> 344
>> 345
>> 346
>> 347
>> 348
>> 349
>> 350
>> 351 static void _prepare_mm_context(struct sos_thread *the_thread)
>> 352 {
>> 353
>> 354 if (sos_cpu_context_is_in_user_mode(the_thread->cpu_state)
>> 355 == TRUE)
>> 356 {
>> 357
>> 358
>> 359
>> 360
>> 361 SOS_ASSERT_FATAL(the_thread->process != NULL);
>> 362
>> 363
>> 364 SOS_ASSERT_FATAL(the_thread->squatted_mm_context == NULL);
>> 365
>> 366
>> 367 sos_mm_context_switch_to(sos_process_get_mm_context(the_thread->process));
>> 368 }
>> 369
>> 370
>> 371
>> 372 else if (the_thread->squatted_mm_context != NULL)
>> 373 sos_mm_context_switch_to(the_thread->squatted_mm_context);
>> 374 }
>> 375
>> 376
187 377
188 378
189 static void delete_thread(struct sos_thread *t 379 static void delete_thread(struct sos_thread *thr)
190 { 380 {
191 sos_ui32_t flags; 381 sos_ui32_t flags;
192 382
193 sos_disable_IRQs(flags); 383 sos_disable_IRQs(flags);
194 list_delete_named(thread_list, thr, gbl_prev 384 list_delete_named(thread_list, thr, gbl_prev, gbl_next);
195 sos_restore_IRQs(flags); 385 sos_restore_IRQs(flags);
196 386
197 sos_kfree((sos_vaddr_t) thr->kernel_stack_ba 387 sos_kfree((sos_vaddr_t) thr->kernel_stack_base_addr);
>> 388
>> 389
>> 390 if (thr->squatted_mm_context)
>> 391 SOS_ASSERT_FATAL(SOS_OK == change_current_mm_context(NULL));
>> 392
>> 393
>> 394 if (thr->process)
>> 395 SOS_ASSERT_FATAL(SOS_OK == sos_process_unregister_thread(thr));
>> 396
198 memset(thr, 0x0, sizeof(struct sos_thread)); 397 memset(thr, 0x0, sizeof(struct sos_thread));
199 sos_kmem_cache_free((sos_vaddr_t) thr); 398 sos_kmem_cache_free((sos_vaddr_t) thr);
200 } 399 }
201 400
202 401
203 void sos_thread_exit() 402 void sos_thread_exit()
204 { 403 {
205 sos_ui32_t flags; 404 sos_ui32_t flags;
206 struct sos_thread *myself, *next_thread; 405 struct sos_thread *myself, *next_thread;
207 406
208 407
209 SOS_ASSERT_FATAL(! sos_servicing_irq()); 408 SOS_ASSERT_FATAL(! sos_servicing_irq());
210 409
211 myself = sos_thread_get_current(); 410 myself = sos_thread_get_current();
212 411
213 412
214 413
215 SOS_ASSERT_FATAL(list_is_empty_named(myself- 414 SOS_ASSERT_FATAL(list_is_empty_named(myself->kwaitq_list,
216 prev_en 415 prev_entry_for_thread,
217 next_en 416 next_entry_for_thread));
218 417
219 418
220 sos_disable_IRQs(flags); 419 sos_disable_IRQs(flags);
221 myself->state = SOS_THR_ZOMBIE; 420 myself->state = SOS_THR_ZOMBIE;
222 next_thread = sos_reschedule(myself, FALSE); 421 next_thread = sos_reschedule(myself, FALSE);
223 422
224 423
225 sos_cpu_state_detect_kernel_stack_overflow(n 424 sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
226 n 425 next_thread->kernel_stack_base_addr,
227 n 426 next_thread->kernel_stack_size);
228 427
>> 428
>> 429
>> 430
>> 431 _prepare_mm_context(next_thread);
>> 432
229 433
230 434
231 435
232 436
233 _set_current(next_thread); 437 _set_current(next_thread);
234 sos_cpu_context_exit_to(next_thread->cpu_sta 438 sos_cpu_context_exit_to(next_thread->cpu_state,
235 (sos_cpu_kstate_func 439 (sos_cpu_kstate_function_arg1_t*) delete_thread,
236 (sos_ui32_t) myself) 440 (sos_ui32_t) myself);
237 } 441 }
238 442
239 443
>> 444 sos_sched_priority_t sos_thread_get_priority(struct sos_thread *thr)
>> 445 {
>> 446 if (! thr)
>> 447 thr = (struct sos_thread*)current_thread;
>> 448
>> 449 return thr->priority;
>> 450 }
>> 451
>> 452
240 sos_thread_state_t sos_thread_get_state(struct 453 sos_thread_state_t sos_thread_get_state(struct sos_thread *thr)
241 { 454 {
242 if (! thr) 455 if (! thr)
243 thr = (struct sos_thread*)current_thread; 456 thr = (struct sos_thread*)current_thread;
244 457
245 return thr->state; 458 return thr->state;
246 } 459 }
247 460
248 461
249 typedef enum { YIELD_MYSELF, BLOCK_MYSELF } sw 462 typedef enum { YIELD_MYSELF, BLOCK_MYSELF } switch_type_t;
250 463
251 464
252 465
253 466
254 static sos_ret_t _switch_to_next_thread(switch 467 static sos_ret_t _switch_to_next_thread(switch_type_t operation)
255 { 468 {
256 struct sos_thread *myself, *next_thread; 469 struct sos_thread *myself, *next_thread;
257 470
258 SOS_ASSERT_FATAL(current_thread->state == SO 471 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
259 472
260 473
261 SOS_ASSERT_FATAL(! sos_servicing_irq()); 474 SOS_ASSERT_FATAL(! sos_servicing_irq());
262 475
263 myself = (struct sos_thread*)current_thread; 476 myself = (struct sos_thread*)current_thread;
264 477
265 478
266 479
267 if (BLOCK_MYSELF == operation) 480 if (BLOCK_MYSELF == operation)
268 { 481 {
269 myself->state = SOS_THR_BLOCKED; 482 myself->state = SOS_THR_BLOCKED;
270 } 483 }
271 484
272 485
273 next_thread = sos_reschedule(myself, YIELD_M 486 next_thread = sos_reschedule(myself, YIELD_MYSELF == operation);
274 487
275 488
276 if (myself != next_thread) 489 if (myself != next_thread)
277 { 490 {
278 491
279 sos_cpu_state_detect_kernel_stack_overfl 492 sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
280 493 next_thread->kernel_stack_base_addr,
281 494 next_thread->kernel_stack_size);
282 495
>> 496
>> 497
>> 498
>> 499 _prepare_mm_context(next_thread);
283 500
284 501
285 502
286 503
287 _set_current(next_thread); 504 _set_current(next_thread);
288 sos_cpu_context_switch(& myself->cpu_sta 505 sos_cpu_context_switch(& myself->cpu_state, next_thread->cpu_state);
289 506
290 507
291 SOS_ASSERT_FATAL(current_thread == mysel 508 SOS_ASSERT_FATAL(current_thread == myself);
292 SOS_ASSERT_FATAL(current_thread->state = 509 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
293 } 510 }
294 else 511 else
295 { 512 {
296 513
297 _set_current(next_thread); 514 _set_current(next_thread);
298 } 515 }
299 516
300 return SOS_OK; 517 return SOS_OK;
301 } 518 }
302 519
303 520
>> 521
>> 522
>> 523
>> 524
>> 525 static sos_ret_t _change_waitq_priorities(struct sos_thread *thr,
>> 526 sos_sched_priority_t priority)
>> 527 {
>> 528 struct sos_kwaitq_entry *kwq_entry;
>> 529 int nb_waitqs;
>> 530
>> 531 list_foreach_forward_named(thr->kwaitq_list, kwq_entry, nb_waitqs,
>> 532 prev_entry_for_thread, next_entry_for_thread)
>> 533 {
>> 534 SOS_ASSERT_FATAL(SOS_OK == sos_kwaitq_change_priority(kwq_entry->kwaitq,
>> 535 kwq_entry,
>> 536 priority));
>> 537 }
>> 538
>> 539 return SOS_OK;
>> 540 }
>> 541
>> 542
>> 543 sos_ret_t sos_thread_set_priority(struct sos_thread *thr,
>> 544 sos_sched_priority_t priority)
>> 545 {
>> 546 __label__ exit_set_prio;
>> 547 sos_ui32_t flags;
>> 548 sos_ret_t retval;
>> 549
>> 550
>> 551 if (! SOS_SCHED_PRIO_IS_VALID(priority))
>> 552 return -SOS_EINVAL;
>> 553
>> 554 if (! thr)
>> 555 thr = (struct sos_thread*)current_thread;
>> 556
>> 557 sos_disable_IRQs(flags);
>> 558
>> 559
>> 560
>> 561 retval = _change_waitq_priorities(thr, priority);
>> 562 if (SOS_OK != retval)
>> 563 goto exit_set_prio;
>> 564
>> 565
>> 566
>> 567 if (SOS_THR_READY == thr->state)
>> 568 retval = sos_sched_change_priority(thr, priority);
>> 569
>> 570
>> 571 thr->priority = priority;
>> 572
>> 573 exit_set_prio:
>> 574 sos_restore_IRQs(flags);
>> 575 return retval;
>> 576 }
>> 577
>> 578
304 sos_ret_t sos_thread_yield() 579 sos_ret_t sos_thread_yield()
305 { 580 {
306 sos_ui32_t flags; 581 sos_ui32_t flags;
307 sos_ret_t retval; 582 sos_ret_t retval;
308 583
309 sos_disable_IRQs(flags); 584 sos_disable_IRQs(flags);
310 585
311 retval = _switch_to_next_thread(YIELD_MYSELF 586 retval = _switch_to_next_thread(YIELD_MYSELF);
312 587
313 sos_restore_IRQs(flags); 588 sos_restore_IRQs(flags);
314 return retval; 589 return retval;
315 } 590 }
316 591
317 592
318 593
319 594
320 595
321 struct sleep_timeout_params 596 struct sleep_timeout_params
322 { 597 {
323 struct sos_thread *thread_to_wakeup; 598 struct sos_thread *thread_to_wakeup;
324 sos_bool_t timeout_triggered; 599 sos_bool_t timeout_triggered;
325 }; 600 };
326 601
327 602
328 603
329 604
330 605
331 static void sleep_timeout(struct sos_timeout_a 606 static void sleep_timeout(struct sos_timeout_action *act)
332 { 607 {
333 struct sleep_timeout_params *sleep_timeout_p 608 struct sleep_timeout_params *sleep_timeout_params
334 = (struct sleep_timeout_params*) act->rout 609 = (struct sleep_timeout_params*) act->routine_data;
335 610
336 611
337 sleep_timeout_params->timeout_triggered = TR 612 sleep_timeout_params->timeout_triggered = TRUE;
338 613
339 614
340 SOS_ASSERT_FATAL(SOS_OK == 615 SOS_ASSERT_FATAL(SOS_OK ==
341 sos_thread_force_unblock(sl 616 sos_thread_force_unblock(sleep_timeout_params
342 - 617 ->thread_to_wakeup));
343 } 618 }
344 619
345 620
346 sos_ret_t sos_thread_sleep(struct sos_time *ti 621 sos_ret_t sos_thread_sleep(struct sos_time *timeout)
347 { 622 {
348 sos_ui32_t flags; 623 sos_ui32_t flags;
349 struct sleep_timeout_params sleep_timeout_pa 624 struct sleep_timeout_params sleep_timeout_params;
350 struct sos_timeout_action timeout_action; 625 struct sos_timeout_action timeout_action;
351 sos_ret_t retval; 626 sos_ret_t retval;
352 627
353 628
354 if (NULL == timeout) 629 if (NULL == timeout)
355 { 630 {
356 sos_disable_IRQs(flags); 631 sos_disable_IRQs(flags);
357 retval = _switch_to_next_thread(BLOCK_MY 632 retval = _switch_to_next_thread(BLOCK_MYSELF);
358 sos_restore_IRQs(flags); 633 sos_restore_IRQs(flags);
359 634
360 return retval; 635 return retval;
361 } 636 }
362 637
363 638
364 sos_time_init_action(& timeout_action); 639 sos_time_init_action(& timeout_action);
365 640
366 641
367 sleep_timeout_params.thread_to_wakeup 642 sleep_timeout_params.thread_to_wakeup
368 = (struct sos_thread*)current_thread; 643 = (struct sos_thread*)current_thread;
369 sleep_timeout_params.timeout_triggered = FAL 644 sleep_timeout_params.timeout_triggered = FALSE;
370 645
371 sos_disable_IRQs(flags); 646 sos_disable_IRQs(flags);
372 647
373 648
374 SOS_ASSERT_FATAL(SOS_OK == 649 SOS_ASSERT_FATAL(SOS_OK ==
375 sos_time_register_action_re 650 sos_time_register_action_relative(& timeout_action,
376 651 timeout,
377 652 sleep_timeout,
378 653 & sleep_timeout_params));
379 654
380 655
381 656
382 657
383 retval = _switch_to_next_thread(BLOCK_MYSELF 658 retval = _switch_to_next_thread(BLOCK_MYSELF);
384 659
385 660
386 661
387 if (sleep_timeout_params.timeout_triggered) 662 if (sleep_timeout_params.timeout_triggered)
388 { 663 {
389 664
390 SOS_ASSERT_FATAL(sos_time_is_zero(& time 665 SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout));
391 retval = SOS_OK; 666 retval = SOS_OK;
392 } 667 }
393 else 668 else
394 { 669 {
395 670
396 671
397 SOS_ASSERT_FATAL(SOS_OK == sos_time_unre 672 SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action));
398 retval = -SOS_EINTR; 673 retval = -SOS_EINTR;
399 } 674 }
400 675
401 sos_restore_IRQs(flags); 676 sos_restore_IRQs(flags);
402 677
403 678
404 memcpy(timeout, & timeout_action.timeout, si 679 memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time));
405 680
406 return retval; 681 return retval;
407 } 682 }
408 683
409 684
410 sos_ret_t sos_thread_force_unblock(struct sos_ 685 sos_ret_t sos_thread_force_unblock(struct sos_thread *thread)
411 { 686 {
412 sos_ret_t retval; 687 sos_ret_t retval;
413 sos_ui32_t flags; 688 sos_ui32_t flags;
414 689
415 if (! thread) 690 if (! thread)
416 return -SOS_EINVAL; 691 return -SOS_EINVAL;
417 692
418 sos_disable_IRQs(flags); 693 sos_disable_IRQs(flags);
419 694
420 695
421 retval = SOS_OK; 696 retval = SOS_OK;
422 switch(sos_thread_get_state(thread)) 697 switch(sos_thread_get_state(thread))
423 { 698 {
424 case SOS_THR_RUNNING: 699 case SOS_THR_RUNNING:
425 case SOS_THR_READY: 700 case SOS_THR_READY:
426 701
427 break; 702 break;
428 703
429 case SOS_THR_ZOMBIE: 704 case SOS_THR_ZOMBIE:
430 retval = -SOS_EFATAL; 705 retval = -SOS_EFATAL;
431 break; 706 break;
432 707
433 default: 708 default:
434 retval = sos_sched_set_ready(thread); 709 retval = sos_sched_set_ready(thread);
435 break; 710 break;
436 } 711 }
437 712
438 sos_restore_IRQs(flags); 713 sos_restore_IRQs(flags);
439 714
440 return retval; 715 return retval;
>> 716 }
>> 717
>> 718
>> 719 void sos_thread_dump_backtrace(sos_bool_t on_console,
>> 720 sos_bool_t on_bochs)
>> 721 {
>> 722 sos_vaddr_t stack_bottom = current_thread->kernel_stack_base_addr;
>> 723 sos_size_t stack_size = current_thread->kernel_stack_size;
>> 724
>> 725 static void backtracer(sos_vaddr_t PC,
>> 726 sos_vaddr_t params,
>> 727 sos_ui32_t depth,
>> 728 void *custom_arg)
>> 729 {
>> 730 sos_ui32_t invalid = 0xffffffff, *arg1, *arg2, *arg3, *arg4;
>> 731
>> 732
>> 733
>> 734
>> 735
>> 736 arg1 = (sos_ui32_t*)params;
>> 737 arg2 = (sos_ui32_t*)(params+4);
>> 738 arg3 = (sos_ui32_t*)(params+8);
>> 739 arg4 = (sos_ui32_t*)(params+12);
>> 740
>> 741
>> 742
>> 743 #define INTERVAL_OK(b,v,u) ( ((b) <= (sos_vaddr_t)(v)) \
>> 744 && ((sos_vaddr_t)(v) < (u)) )
>> 745 if (!INTERVAL_OK(stack_bottom, arg1, stack_bottom + stack_size))
>> 746 arg1 = &invalid;
>> 747 if (!INTERVAL_OK(stack_bottom, arg2, stack_bottom + stack_size))
>> 748 arg2 = &invalid;
>> 749 if (!INTERVAL_OK(stack_bottom, arg3, stack_bottom + stack_size))
>> 750 arg3 = &invalid;
>> 751 if (!INTERVAL_OK(stack_bottom, arg4, stack_bottom + stack_size))
>> 752 arg4 = &invalid;
>> 753
>> 754
>> 755 if (on_bochs)
>> 756 sos_bochs_printf("[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x\n",
>> 757 (unsigned)depth, (unsigned)PC,
>> 758 (unsigned)*arg1, (unsigned)*arg2,
>> 759 (unsigned)*arg3);
>> 760
>> 761 if (on_console)
>> 762 sos_x86_videomem_printf(23-depth, 3,
>> 763 SOS_X86_VIDEO_BG_BLUE
>> 764 | SOS_X86_VIDEO_FG_LTGREEN,
>> 765 "[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x arg4=0x%x",
>> 766 (unsigned)depth, PC,
>> 767 (unsigned)*arg1, (unsigned)*arg2,
>> 768 (unsigned)*arg3, (unsigned)*arg4);
>> 769
>> 770 }
>> 771
>> 772 sos_backtrace(NULL, 15, stack_bottom, stack_size,
>> 773 backtracer, NULL);
>> 774 }
>> 775
>> 776
>> 777
>> 778
>> 779
>> 780
>> 781
>> 782
>> 783 static sos_ret_t
>> 784 change_current_mm_context(struct sos_mm_context *mm_ctxt)
>> 785 {
>> 786
>> 787 struct sos_mm_context * prev_mm_ctxt
>> 788 = current_thread->squatted_mm_context;
>> 789
>> 790
>> 791 current_thread->squatted_mm_context = mm_ctxt;
>> 792
>> 793
>> 794
>> 795 if (mm_ctxt != NULL)
>> 796 {
>> 797 sos_mm_context_ref(mm_ctxt);
>> 798
>> 799
>> 800 sos_mm_context_switch_to(mm_ctxt);
>> 801 }
>> 802 else
>> 803 sos_mm_context_unref(prev_mm_ctxt);
>> 804
>> 805
>> 806
>> 807 return SOS_OK;
>> 808 }
>> 809
>> 810
>> 811 sos_ret_t
>> 812 sos_thread_prepare_user_space_access(struct sos_umem_vmm_as * dest_as,
>> 813 sos_vaddr_t fixup_retvaddr)
>> 814 {
>> 815 sos_ret_t retval;
>> 816 sos_ui32_t flags;
>> 817
>> 818 if (! dest_as)
>> 819 {
>> 820
>> 821 if (! current_thread->process)
>> 822 return -SOS_EINVAL;
>> 823
>> 824 dest_as = sos_process_get_address_space(current_thread->process);
>> 825 }
>> 826 else
>> 827
>> 828
>> 829 SOS_ASSERT_FATAL(! fixup_retvaddr);
>> 830
>> 831 sos_disable_IRQs(flags);
>> 832 SOS_ASSERT_FATAL(NULL == current_thread->squatted_mm_context);
>> 833 SOS_ASSERT_FATAL(0 == current_thread->fixup_uaccess.return_vaddr);
>> 834
>> 835
>> 836 retval = change_current_mm_context(sos_umem_vmm_get_mm_context(dest_as));
>> 837 if (SOS_OK == retval)
>> 838 {
>> 839 current_thread->fixup_uaccess.return_vaddr = fixup_retvaddr;
>> 840 current_thread->fixup_uaccess.faulted_uaddr = 0;
>> 841 }
>> 842
>> 843 sos_restore_IRQs(flags);
>> 844 return retval;
>> 845 }
>> 846
>> 847
>> 848 sos_ret_t
>> 849 sos_thread_end_user_space_access(void)
>> 850 {
>> 851 sos_ret_t retval;
>> 852 sos_ui32_t flags;
>> 853
>> 854 sos_disable_IRQs(flags);
>> 855 SOS_ASSERT_FATAL(NULL != current_thread->squatted_mm_context);
>> 856
>> 857
>> 858 retval = change_current_mm_context(NULL);
>> 859 current_thread->fixup_uaccess.return_vaddr = 0;
>> 860 current_thread->fixup_uaccess.faulted_uaddr = 0;
>> 861
>> 862 sos_restore_IRQs(flags);
>> 863 return retval;
>> 864 }
>> 865
>> 866
>> 867 void sos_thread_prepare_syscall_switch_back(struct sos_cpu_state *cpu_state)
>> 868 {
>> 869
>> 870
>> 871
>> 872
>> 873
>> 874
>> 875
>> 876
>> 877 current_thread->cpu_state = cpu_state;
>> 878
>> 879
>> 880 _prepare_mm_context((struct sos_thread*) current_thread);
>> 881 }
>> 882
>> 883
>> 884 void sos_thread_prepare_exception_switch_back(struct sos_cpu_state *cpu_state)
>> 885 {
>> 886
>> 887
>> 888
>> 889
>> 890
>> 891
>> 892
>> 893
>> 894 current_thread->cpu_state = cpu_state;
>> 895
>> 896
>> 897 _prepare_mm_context((struct sos_thread*) current_thread);
>> 898 }
>> 899
>> 900
>> 901 void
>> 902 sos_thread_prepare_irq_servicing(struct sos_cpu_state *interrupted_state)
>> 903 {
>> 904 current_thread->cpu_state = interrupted_state;
>> 905 }
>> 906
>> 907
>> 908 struct sos_cpu_state *
>> 909 sos_thread_prepare_irq_switch_back(void)
>> 910 {
>> 911 struct sos_thread *myself, *next_thread;
>> 912
>> 913
>> 914
>> 915 if (! sos_cpu_context_is_in_user_mode(current_thread->cpu_state))
>> 916 return current_thread->cpu_state;
>> 917
>> 918
>> 919
>> 920
>> 921
>> 922
>> 923
>> 924 SOS_ASSERT_FATAL(current_thread->process != NULL);
>> 925
>> 926
>> 927 myself = (struct sos_thread*)current_thread;
>> 928
>> 929
>> 930 next_thread = sos_reschedule(myself, FALSE);
>> 931
>> 932
>> 933 _prepare_mm_context(next_thread);
>> 934
>> 935
>> 936 _set_current(next_thread);
>> 937 return next_thread->cpu_state;
441 } 938 }