Diff markup
001 001
002 002
003 003
004 004
005 005
006 006
007 007
008 008
009 009
010 010
011 011
012 012
013 013
014 014
015 015
016 016
017 017
018 018
019 #include <sos/physmem.h> 019 #include <sos/physmem.h>
020 #include <sos/kmem_slab.h> 020 #include <sos/kmem_slab.h>
021 #include <sos/kmalloc.h> 021 #include <sos/kmalloc.h>
022 #include <sos/klibc.h> 022 #include <sos/klibc.h>
023 #include <sos/list.h> 023 #include <sos/list.h>
024 #include <sos/assert.h> 024 #include <sos/assert.h>
>> 025 #include <hwcore/mm_context.h>
>> 026 #include <sos/process.h>
>> 027
>> 028 #include <drivers/bochs.h>
>> 029 #include <drivers/x86_videomem.h>
025 030
026 #include <hwcore/irq.h> 031 #include <hwcore/irq.h>
027 032
028 #include "thread.h" 033 #include "thread.h"
029 034
030 035
031 036
032 037
033 038
034 #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PA 039 #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PAGE_SIZE)
035 040
036 041
037 042
038 043
039 044
040 045
041 046
042 047
043 048
044 049
045 050
046 051
047 052
048 053
049 054
050 055
051 static volatile struct sos_thread *current_thr 056 static volatile struct sos_thread *current_thread = NULL;
052 057
053 058
054 059
055 060
056 061
057 062
058 063
059 static struct sos_thread *thread_list = NULL; 064 static struct sos_thread *thread_list = NULL;
060 065
061 066
062 067
063 068
064 069
065 static struct sos_kslab_cache *cache_thread; 070 static struct sos_kslab_cache *cache_thread;
066 071
067 072
068 struct sos_thread *sos_thread_get_current() 073 struct sos_thread *sos_thread_get_current()
069 { 074 {
070 SOS_ASSERT_FATAL(current_thread->state == SO 075 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
071 return (struct sos_thread*)current_thread; 076 return (struct sos_thread*)current_thread;
072 } 077 }
073 078
074 079
075 inline static sos_ret_t _set_current(struct so 080 inline static sos_ret_t _set_current(struct sos_thread *thr)
076 { 081 {
077 SOS_ASSERT_FATAL(thr->state == SOS_THR_READY 082 SOS_ASSERT_FATAL(thr->state == SOS_THR_READY);
078 current_thread = thr; 083 current_thread = thr;
079 current_thread->state = SOS_THR_RUNNING; 084 current_thread->state = SOS_THR_RUNNING;
080 return SOS_OK; 085 return SOS_OK;
081 } 086 }
082 087
083 088
084 sos_ret_t sos_thread_subsystem_setup(sos_vaddr 089 sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
085 sos_size_ 090 sos_size_t init_thread_stack_size)
086 { 091 {
087 struct sos_thread *myself; 092 struct sos_thread *myself;
088 093
089 094
090 cache_thread = sos_kmem_cache_create("thread 095 cache_thread = sos_kmem_cache_create("thread",
091 sizeof( 096 sizeof(struct sos_thread),
092 2, 097 2,
093 0, 098 0,
094 SOS_KSL 099 SOS_KSLAB_CREATE_MAP
095 | SOS_K 100 | SOS_KSLAB_CREATE_ZERO);
096 if (! cache_thread) 101 if (! cache_thread)
097 return -SOS_ENOMEM; 102 return -SOS_ENOMEM;
098 103
099 104
100 myself = (struct sos_thread*) sos_kmem_cache 105 myself = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
101 106 SOS_KSLAB_ALLOC_ATOMIC);
102 if (! myself) 107 if (! myself)
103 return -SOS_ENOMEM; 108 return -SOS_ENOMEM;
104 109
105 110
106 strzcpy(myself->name, "[kinit]", SOS_THR_MAX 111 strzcpy(myself->name, "[kinit]", SOS_THR_MAX_NAMELEN);
107 myself->state = SOS_THR_CREATED; 112 myself->state = SOS_THR_CREATED;
>> 113 myself->priority = SOS_SCHED_PRIO_LOWEST;
108 myself->kernel_stack_base_addr = init_thread 114 myself->kernel_stack_base_addr = init_thread_stack_base_addr;
109 myself->kernel_stack_size = init_thread 115 myself->kernel_stack_size = init_thread_stack_size;
110 116
111 117
112 sos_cpu_state_prepare_detect_kernel_stack_ov 118 sos_cpu_state_prepare_detect_kernel_stack_overflow(myself->cpu_state,
113 119 myself->kernel_stack_base_addr,
114 120 myself->kernel_stack_size);
115 121
116 122
117 list_singleton_named(thread_list, myself, gb 123 list_singleton_named(thread_list, myself, gbl_prev, gbl_next);
118 124
119 125
120 myself->state = SOS_THR_READY; 126 myself->state = SOS_THR_READY;
121 _set_current(myself); 127 _set_current(myself);
122 128
123 return SOS_OK; 129 return SOS_OK;
124 } 130 }
125 131
126 132
127 struct sos_thread * 133 struct sos_thread *
128 sos_create_kernel_thread(const char *name, 134 sos_create_kernel_thread(const char *name,
129 sos_kernel_thread_sta 135 sos_kernel_thread_start_routine_t start_func,
130 void *start_arg) !! 136 void *start_arg,
>> 137 sos_sched_priority_t priority)
131 { 138 {
132 __label__ undo_creation; 139 __label__ undo_creation;
133 sos_ui32_t flags; 140 sos_ui32_t flags;
134 struct sos_thread *new_thread; 141 struct sos_thread *new_thread;
135 142
136 if (! start_func) 143 if (! start_func)
137 return NULL; 144 return NULL;
>> 145 if (! SOS_SCHED_PRIO_IS_VALID(priority))
>> 146 return NULL;
138 147
139 148
140 new_thread 149 new_thread
141 = (struct sos_thread*) sos_kmem_cache_allo 150 = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
142 151 SOS_KSLAB_ALLOC_ATOMIC);
143 if (! new_thread) 152 if (! new_thread)
144 return NULL; 153 return NULL;
145 154
146 155
147 strzcpy(new_thread->name, ((name)?name:"[NON 156 strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
148 new_thread->state = SOS_THR_CREATED; 157 new_thread->state = SOS_THR_CREATED;
>> 158 new_thread->priority = priority;
149 159
150 160
151 new_thread->kernel_stack_base_addr = sos_kma 161 new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
152 new_thread->kernel_stack_size = SOS_THR 162 new_thread->kernel_stack_size = SOS_THREAD_KERNEL_STACK_SIZE;
153 if (! new_thread->kernel_stack_base_addr) 163 if (! new_thread->kernel_stack_base_addr)
154 goto undo_creation; 164 goto undo_creation;
155 165
156 166
157 if (SOS_OK 167 if (SOS_OK
158 != sos_cpu_kstate_init(& new_thread->cpu 168 != sos_cpu_kstate_init(& new_thread->cpu_state,
159 (sos_cpu_kstate_f 169 (sos_cpu_kstate_function_arg1_t*) start_func,
160 (sos_ui32_t) star 170 (sos_ui32_t) start_arg,
161 new_thread->kerne 171 new_thread->kernel_stack_base_addr,
162 new_thread->kerne 172 new_thread->kernel_stack_size,
163 (sos_cpu_kstate_f 173 (sos_cpu_kstate_function_arg1_t*) sos_thread_exit,
164 (sos_ui32_t) NULL 174 (sos_ui32_t) NULL))
165 goto undo_creation; 175 goto undo_creation;
166 176
167 177
168 sos_disable_IRQs(flags); 178 sos_disable_IRQs(flags);
169 list_add_tail_named(thread_list, new_thread, 179 list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
170 sos_restore_IRQs(flags); 180 sos_restore_IRQs(flags);
171 181
172 182
173 if (SOS_OK != sos_sched_set_ready(new_thread 183 if (SOS_OK != sos_sched_set_ready(new_thread))
174 goto undo_creation; 184 goto undo_creation;
175 185
176 186
177 return new_thread; 187 return new_thread;
178 188
179 undo_creation: 189 undo_creation:
180 if (new_thread->kernel_stack_base_addr) 190 if (new_thread->kernel_stack_base_addr)
181 sos_kfree((sos_vaddr_t) new_thread->kernel 191 sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
182 sos_kmem_cache_free((sos_vaddr_t) new_thread 192 sos_kmem_cache_free((sos_vaddr_t) new_thread);
183 return NULL; 193 return NULL;
184 } 194 }
185 195
186 196
>> 197 struct sos_thread *
>> 198 sos_create_user_thread(const char *name,
>> 199 struct sos_process *process,
>> 200 sos_uaddr_t user_initial_PC,
>> 201 sos_ui32_t user_start_arg1,
>> 202 sos_ui32_t user_start_arg2,
>> 203 sos_uaddr_t user_initial_SP,
>> 204 sos_sched_priority_t priority)
>> 205 {
>> 206 __label__ undo_creation;
>> 207 sos_ui32_t flags;
>> 208 struct sos_thread *new_thread;
>> 209
>> 210 if (! SOS_SCHED_PRIO_IS_VALID(priority))
>> 211 return NULL;
>> 212
>> 213
>> 214 if (! process)
>> 215 return NULL;
>> 216
>> 217
>> 218 new_thread
>> 219 = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
>> 220 SOS_KSLAB_ALLOC_ATOMIC);
>> 221 if (! new_thread)
>> 222 return NULL;
>> 223
>> 224
>> 225 strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
>> 226 new_thread->state = SOS_THR_CREATED;
>> 227 new_thread->priority = priority;
>> 228
>> 229
>> 230 new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
>> 231 new_thread->kernel_stack_size = SOS_THREAD_KERNEL_STACK_SIZE;
>> 232 if (! new_thread->kernel_stack_base_addr)
>> 233 goto undo_creation;
>> 234
>> 235 if (SOS_OK
>> 236 != sos_cpu_ustate_init(& new_thread->cpu_state,
>> 237 user_initial_PC,
>> 238 user_start_arg1,
>> 239 user_start_arg2,
>> 240 user_initial_SP,
>> 241 new_thread->kernel_stack_base_addr,
>> 242 new_thread->kernel_stack_size))
>> 243 goto undo_creation;
>> 244
>> 245
>> 246 if (SOS_OK != sos_process_register_thread(process, new_thread))
>> 247 goto undo_creation;
>> 248
>> 249
>> 250 sos_disable_IRQs(flags);
>> 251 list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
>> 252 sos_restore_IRQs(flags);
>> 253
>> 254
>> 255 if (SOS_OK != sos_sched_set_ready(new_thread))
>> 256 goto undo_creation;
>> 257
>> 258
>> 259 return new_thread;
>> 260
>> 261 undo_creation:
>> 262 if (new_thread->kernel_stack_base_addr)
>> 263 sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
>> 264 sos_kmem_cache_free((sos_vaddr_t) new_thread);
>> 265 return NULL;
>> 266 }
>> 267
>> 268
>> 269
>> 270
>> 271
>> 272
>> 273
>> 274
>> 275
>> 276
>> 277 static void _prepare_mm_context(struct sos_thread *the_thread)
>> 278 {
>> 279
>> 280 if (sos_cpu_context_is_in_user_mode(the_thread->cpu_state)
>> 281 == TRUE)
>> 282 {
>> 283
>> 284
>> 285
>> 286
>> 287 SOS_ASSERT_FATAL(the_thread->process != NULL);
>> 288
>> 289
>> 290 SOS_ASSERT_FATAL(the_thread->squatted_mm_context == NULL);
>> 291
>> 292
>> 293 sos_mm_context_switch_to(sos_process_get_mm_context(the_thread->process));
>> 294 }
>> 295
>> 296
>> 297
>> 298 else if (the_thread->squatted_mm_context != NULL)
>> 299 sos_mm_context_switch_to(the_thread->squatted_mm_context);
>> 300 }
>> 301
>> 302
187 303
188 304
189 static void delete_thread(struct sos_thread *t 305 static void delete_thread(struct sos_thread *thr)
190 { 306 {
191 sos_ui32_t flags; 307 sos_ui32_t flags;
192 308
193 sos_disable_IRQs(flags); 309 sos_disable_IRQs(flags);
194 list_delete_named(thread_list, thr, gbl_prev 310 list_delete_named(thread_list, thr, gbl_prev, gbl_next);
195 sos_restore_IRQs(flags); 311 sos_restore_IRQs(flags);
196 312
197 sos_kfree((sos_vaddr_t) thr->kernel_stack_ba 313 sos_kfree((sos_vaddr_t) thr->kernel_stack_base_addr);
>> 314
>> 315
>> 316 if (thr->squatted_mm_context)
>> 317 SOS_ASSERT_FATAL(SOS_OK == sos_thread_change_current_mm_context(NULL));
>> 318
>> 319
>> 320 if (thr->process)
>> 321 SOS_ASSERT_FATAL(SOS_OK == sos_process_unregister_thread(thr));
>> 322
198 memset(thr, 0x0, sizeof(struct sos_thread)); 323 memset(thr, 0x0, sizeof(struct sos_thread));
199 sos_kmem_cache_free((sos_vaddr_t) thr); 324 sos_kmem_cache_free((sos_vaddr_t) thr);
200 } 325 }
201 326
202 327
203 void sos_thread_exit() 328 void sos_thread_exit()
204 { 329 {
205 sos_ui32_t flags; 330 sos_ui32_t flags;
206 struct sos_thread *myself, *next_thread; 331 struct sos_thread *myself, *next_thread;
207 332
208 333
209 SOS_ASSERT_FATAL(! sos_servicing_irq()); 334 SOS_ASSERT_FATAL(! sos_servicing_irq());
210 335
211 myself = sos_thread_get_current(); 336 myself = sos_thread_get_current();
212 337
213 338
214 339
215 SOS_ASSERT_FATAL(list_is_empty_named(myself- 340 SOS_ASSERT_FATAL(list_is_empty_named(myself->kwaitq_list,
216 prev_en 341 prev_entry_for_thread,
217 next_en 342 next_entry_for_thread));
218 343
219 344
220 sos_disable_IRQs(flags); 345 sos_disable_IRQs(flags);
221 myself->state = SOS_THR_ZOMBIE; 346 myself->state = SOS_THR_ZOMBIE;
222 next_thread = sos_reschedule(myself, FALSE); 347 next_thread = sos_reschedule(myself, FALSE);
223 348
224 349
225 sos_cpu_state_detect_kernel_stack_overflow(n 350 sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
226 n 351 next_thread->kernel_stack_base_addr,
227 n 352 next_thread->kernel_stack_size);
228 353
>> 354
>> 355
>> 356
>> 357 _prepare_mm_context(next_thread);
>> 358
229 359
230 360
231 361
232 362
233 _set_current(next_thread); 363 _set_current(next_thread);
234 sos_cpu_context_exit_to(next_thread->cpu_sta 364 sos_cpu_context_exit_to(next_thread->cpu_state,
235 (sos_cpu_kstate_func 365 (sos_cpu_kstate_function_arg1_t*) delete_thread,
236 (sos_ui32_t) myself) 366 (sos_ui32_t) myself);
237 } 367 }
238 368
239 369
>> 370 sos_sched_priority_t sos_thread_get_priority(struct sos_thread *thr)
>> 371 {
>> 372 if (! thr)
>> 373 thr = (struct sos_thread*)current_thread;
>> 374
>> 375 return thr->priority;
>> 376 }
>> 377
>> 378
240 sos_thread_state_t sos_thread_get_state(struct 379 sos_thread_state_t sos_thread_get_state(struct sos_thread *thr)
241 { 380 {
242 if (! thr) 381 if (! thr)
243 thr = (struct sos_thread*)current_thread; 382 thr = (struct sos_thread*)current_thread;
244 383
245 return thr->state; 384 return thr->state;
246 } 385 }
247 386
248 387
249 typedef enum { YIELD_MYSELF, BLOCK_MYSELF } sw 388 typedef enum { YIELD_MYSELF, BLOCK_MYSELF } switch_type_t;
250 389
251 390
252 391
253 392
254 static sos_ret_t _switch_to_next_thread(switch 393 static sos_ret_t _switch_to_next_thread(switch_type_t operation)
255 { 394 {
256 struct sos_thread *myself, *next_thread; 395 struct sos_thread *myself, *next_thread;
257 396
258 SOS_ASSERT_FATAL(current_thread->state == SO 397 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
259 398
260 399
261 SOS_ASSERT_FATAL(! sos_servicing_irq()); 400 SOS_ASSERT_FATAL(! sos_servicing_irq());
262 401
263 myself = (struct sos_thread*)current_thread; 402 myself = (struct sos_thread*)current_thread;
264 403
265 404
266 405
267 if (BLOCK_MYSELF == operation) 406 if (BLOCK_MYSELF == operation)
268 { 407 {
269 myself->state = SOS_THR_BLOCKED; 408 myself->state = SOS_THR_BLOCKED;
270 } 409 }
271 410
272 411
273 next_thread = sos_reschedule(myself, YIELD_M 412 next_thread = sos_reschedule(myself, YIELD_MYSELF == operation);
274 413
275 414
276 if (myself != next_thread) 415 if (myself != next_thread)
277 { 416 {
278 417
279 sos_cpu_state_detect_kernel_stack_overfl 418 sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
280 419 next_thread->kernel_stack_base_addr,
281 420 next_thread->kernel_stack_size);
282 421
>> 422
>> 423
>> 424
>> 425 _prepare_mm_context(next_thread);
283 426
284 427
285 428
286 429
287 _set_current(next_thread); 430 _set_current(next_thread);
288 sos_cpu_context_switch(& myself->cpu_sta 431 sos_cpu_context_switch(& myself->cpu_state, next_thread->cpu_state);
289 432
290 433
291 SOS_ASSERT_FATAL(current_thread == mysel 434 SOS_ASSERT_FATAL(current_thread == myself);
292 SOS_ASSERT_FATAL(current_thread->state = 435 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
293 } 436 }
294 else 437 else
295 { 438 {
296 439
297 _set_current(next_thread); 440 _set_current(next_thread);
298 } 441 }
299 442
300 return SOS_OK; 443 return SOS_OK;
301 } 444 }
302 445
303 446
>> 447
>> 448
>> 449
>> 450
>> 451 static sos_ret_t _change_waitq_priorities(struct sos_thread *thr,
>> 452 sos_sched_priority_t priority)
>> 453 {
>> 454 struct sos_kwaitq_entry *kwq_entry;
>> 455 int nb_waitqs;
>> 456
>> 457 list_foreach_forward_named(thr->kwaitq_list, kwq_entry, nb_waitqs,
>> 458 prev_entry_for_thread, next_entry_for_thread)
>> 459 {
>> 460 SOS_ASSERT_FATAL(SOS_OK == sos_kwaitq_change_priority(kwq_entry->kwaitq,
>> 461 kwq_entry,
>> 462 priority));
>> 463 }
>> 464
>> 465 return SOS_OK;
>> 466 }
>> 467
>> 468
>> 469 sos_ret_t sos_thread_set_priority(struct sos_thread *thr,
>> 470 sos_sched_priority_t priority)
>> 471 {
>> 472 __label__ exit_set_prio;
>> 473 sos_ui32_t flags;
>> 474 sos_ret_t retval;
>> 475
>> 476
>> 477 if (! SOS_SCHED_PRIO_IS_VALID(priority))
>> 478 return -SOS_EINVAL;
>> 479
>> 480 if (! thr)
>> 481 thr = (struct sos_thread*)current_thread;
>> 482
>> 483 sos_disable_IRQs(flags);
>> 484
>> 485
>> 486
>> 487 retval = _change_waitq_priorities(thr, priority);
>> 488 if (SOS_OK != retval)
>> 489 goto exit_set_prio;
>> 490
>> 491
>> 492
>> 493 if (SOS_THR_READY == thr->state)
>> 494 retval = sos_sched_change_priority(thr, priority);
>> 495
>> 496
>> 497 thr->priority = priority;
>> 498
>> 499 exit_set_prio:
>> 500 sos_restore_IRQs(flags);
>> 501 return retval;
>> 502 }
>> 503
>> 504
304 sos_ret_t sos_thread_yield() 505 sos_ret_t sos_thread_yield()
305 { 506 {
306 sos_ui32_t flags; 507 sos_ui32_t flags;
307 sos_ret_t retval; 508 sos_ret_t retval;
308 509
309 sos_disable_IRQs(flags); 510 sos_disable_IRQs(flags);
310 511
311 retval = _switch_to_next_thread(YIELD_MYSELF 512 retval = _switch_to_next_thread(YIELD_MYSELF);
312 513
313 sos_restore_IRQs(flags); 514 sos_restore_IRQs(flags);
314 return retval; 515 return retval;
315 } 516 }
316 517
317 518
318 519
319 520
320 521
321 struct sleep_timeout_params 522 struct sleep_timeout_params
322 { 523 {
323 struct sos_thread *thread_to_wakeup; 524 struct sos_thread *thread_to_wakeup;
324 sos_bool_t timeout_triggered; 525 sos_bool_t timeout_triggered;
325 }; 526 };
326 527
327 528
328 529
329 530
330 531
331 static void sleep_timeout(struct sos_timeout_a 532 static void sleep_timeout(struct sos_timeout_action *act)
332 { 533 {
333 struct sleep_timeout_params *sleep_timeout_p 534 struct sleep_timeout_params *sleep_timeout_params
334 = (struct sleep_timeout_params*) act->rout 535 = (struct sleep_timeout_params*) act->routine_data;
335 536
336 537
337 sleep_timeout_params->timeout_triggered = TR 538 sleep_timeout_params->timeout_triggered = TRUE;
338 539
339 540
340 SOS_ASSERT_FATAL(SOS_OK == 541 SOS_ASSERT_FATAL(SOS_OK ==
341 sos_thread_force_unblock(sl 542 sos_thread_force_unblock(sleep_timeout_params
342 - 543 ->thread_to_wakeup));
343 } 544 }
344 545
345 546
346 sos_ret_t sos_thread_sleep(struct sos_time *ti 547 sos_ret_t sos_thread_sleep(struct sos_time *timeout)
347 { 548 {
348 sos_ui32_t flags; 549 sos_ui32_t flags;
349 struct sleep_timeout_params sleep_timeout_pa 550 struct sleep_timeout_params sleep_timeout_params;
350 struct sos_timeout_action timeout_action; 551 struct sos_timeout_action timeout_action;
351 sos_ret_t retval; 552 sos_ret_t retval;
352 553
353 554
354 if (NULL == timeout) 555 if (NULL == timeout)
355 { 556 {
356 sos_disable_IRQs(flags); 557 sos_disable_IRQs(flags);
357 retval = _switch_to_next_thread(BLOCK_MY 558 retval = _switch_to_next_thread(BLOCK_MYSELF);
358 sos_restore_IRQs(flags); 559 sos_restore_IRQs(flags);
359 560
360 return retval; 561 return retval;
361 } 562 }
362 563
363 564
364 sos_time_init_action(& timeout_action); 565 sos_time_init_action(& timeout_action);
365 566
366 567
367 sleep_timeout_params.thread_to_wakeup 568 sleep_timeout_params.thread_to_wakeup
368 = (struct sos_thread*)current_thread; 569 = (struct sos_thread*)current_thread;
369 sleep_timeout_params.timeout_triggered = FAL 570 sleep_timeout_params.timeout_triggered = FALSE;
370 571
371 sos_disable_IRQs(flags); 572 sos_disable_IRQs(flags);
372 573
373 574
374 SOS_ASSERT_FATAL(SOS_OK == 575 SOS_ASSERT_FATAL(SOS_OK ==
375 sos_time_register_action_re 576 sos_time_register_action_relative(& timeout_action,
376 577 timeout,
377 578 sleep_timeout,
378 579 & sleep_timeout_params));
379 580
380 581
381 582
382 583
383 retval = _switch_to_next_thread(BLOCK_MYSELF 584 retval = _switch_to_next_thread(BLOCK_MYSELF);
384 585
385 586
386 587
387 if (sleep_timeout_params.timeout_triggered) 588 if (sleep_timeout_params.timeout_triggered)
388 { 589 {
389 590
390 SOS_ASSERT_FATAL(sos_time_is_zero(& time 591 SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout));
391 retval = SOS_OK; 592 retval = SOS_OK;
392 } 593 }
393 else 594 else
394 { 595 {
395 596
396 597
397 SOS_ASSERT_FATAL(SOS_OK == sos_time_unre 598 SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action));
398 retval = -SOS_EINTR; 599 retval = -SOS_EINTR;
399 } 600 }
400 601
401 sos_restore_IRQs(flags); 602 sos_restore_IRQs(flags);
402 603
403 604
404 memcpy(timeout, & timeout_action.timeout, si 605 memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time));
405 606
406 return retval; 607 return retval;
407 } 608 }
408 609
409 610
410 sos_ret_t sos_thread_force_unblock(struct sos_ 611 sos_ret_t sos_thread_force_unblock(struct sos_thread *thread)
411 { 612 {
412 sos_ret_t retval; 613 sos_ret_t retval;
413 sos_ui32_t flags; 614 sos_ui32_t flags;
414 615
415 if (! thread) 616 if (! thread)
416 return -SOS_EINVAL; 617 return -SOS_EINVAL;
417 618
418 sos_disable_IRQs(flags); 619 sos_disable_IRQs(flags);
419 620
420 621
421 retval = SOS_OK; 622 retval = SOS_OK;
422 switch(sos_thread_get_state(thread)) 623 switch(sos_thread_get_state(thread))
423 { 624 {
424 case SOS_THR_RUNNING: 625 case SOS_THR_RUNNING:
425 case SOS_THR_READY: 626 case SOS_THR_READY:
426 627
427 break; 628 break;
428 629
429 case SOS_THR_ZOMBIE: 630 case SOS_THR_ZOMBIE:
430 retval = -SOS_EFATAL; 631 retval = -SOS_EFATAL;
431 break; 632 break;
432 633
433 default: 634 default:
434 retval = sos_sched_set_ready(thread); 635 retval = sos_sched_set_ready(thread);
435 break; 636 break;
436 } 637 }
437 638
438 sos_restore_IRQs(flags); 639 sos_restore_IRQs(flags);
439 640
440 return retval; 641 return retval;
>> 642 }
>> 643
>> 644
>> 645 void sos_thread_dump_backtrace(sos_bool_t on_console,
>> 646 sos_bool_t on_bochs)
>> 647 {
>> 648 sos_vaddr_t stack_bottom = current_thread->kernel_stack_base_addr;
>> 649 sos_size_t stack_size = current_thread->kernel_stack_size;
>> 650
>> 651 static void backtracer(sos_vaddr_t PC,
>> 652 sos_vaddr_t params,
>> 653 sos_ui32_t depth,
>> 654 void *custom_arg)
>> 655 {
>> 656 sos_ui32_t invalid = 0xffffffff, *arg1, *arg2, *arg3, *arg4;
>> 657
>> 658
>> 659
>> 660
>> 661
>> 662 arg1 = (sos_ui32_t*)params;
>> 663 arg2 = (sos_ui32_t*)(params+4);
>> 664 arg3 = (sos_ui32_t*)(params+8);
>> 665 arg4 = (sos_ui32_t*)(params+12);
>> 666
>> 667
>> 668
>> 669 #define INTERVAL_OK(b,v,u) ( ((b) <= (sos_vaddr_t)(v)) \
>> 670 && ((sos_vaddr_t)(v) < (u)) )
>> 671 if (!INTERVAL_OK(stack_bottom, arg1, stack_bottom + stack_size))
>> 672 arg1 = &invalid;
>> 673 if (!INTERVAL_OK(stack_bottom, arg2, stack_bottom + stack_size))
>> 674 arg2 = &invalid;
>> 675 if (!INTERVAL_OK(stack_bottom, arg3, stack_bottom + stack_size))
>> 676 arg3 = &invalid;
>> 677 if (!INTERVAL_OK(stack_bottom, arg4, stack_bottom + stack_size))
>> 678 arg4 = &invalid;
>> 679
>> 680
>> 681 if (on_bochs)
>> 682 sos_bochs_printf("[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x\n",
>> 683 (unsigned)depth, (unsigned)PC,
>> 684 (unsigned)*arg1, (unsigned)*arg2,
>> 685 (unsigned)*arg3);
>> 686
>> 687 if (on_console)
>> 688 sos_x86_videomem_printf(23-depth, 3,
>> 689 SOS_X86_VIDEO_BG_BLUE
>> 690 | SOS_X86_VIDEO_FG_LTGREEN,
>> 691 "[%d] PC=0x%x arg1=0x%x arg2=0x%x arg3=0x%x arg4=0x%x",
>> 692 (unsigned)depth, PC,
>> 693 (unsigned)*arg1, (unsigned)*arg2,
>> 694 (unsigned)*arg3, (unsigned)*arg4);
>> 695
>> 696 }
>> 697
>> 698 sos_backtrace(NULL, 15, stack_bottom, stack_size,
>> 699 backtracer, NULL);
>> 700 }
>> 701
>> 702
>> 703
>> 704
>> 705
>> 706
>> 707
>> 708
>> 709 sos_ret_t
>> 710 sos_thread_change_current_mm_context(struct sos_mm_context *mm_ctxt)
>> 711 {
>> 712 sos_ui32_t flags;
>> 713
>> 714
>> 715 struct sos_mm_context * prev_mm_ctxt
>> 716 = current_thread->squatted_mm_context;
>> 717
>> 718
>> 719
>> 720 if (mm_ctxt != NULL)
>> 721 SOS_ASSERT_FATAL(prev_mm_ctxt == NULL);
>> 722 else
>> 723 SOS_ASSERT_FATAL(prev_mm_ctxt != NULL);
>> 724
>> 725 sos_disable_IRQs(flags);
>> 726
>> 727
>> 728 current_thread->squatted_mm_context = mm_ctxt;
>> 729
>> 730
>> 731
>> 732 if (mm_ctxt != NULL)
>> 733 {
>> 734 sos_mm_context_ref(mm_ctxt);
>> 735
>> 736
>> 737 sos_mm_context_switch_to(mm_ctxt);
>> 738 }
>> 739 else
>> 740 sos_mm_context_unref(prev_mm_ctxt);
>> 741
>> 742
>> 743
>> 744 sos_restore_IRQs(flags);
>> 745
>> 746 return SOS_OK;
>> 747 }
>> 748
>> 749
>> 750 void sos_thread_prepare_syscall_switch_back(struct sos_cpu_state *cpu_state)
>> 751 {
>> 752
>> 753
>> 754
>> 755
>> 756
>> 757
>> 758
>> 759
>> 760 current_thread->cpu_state = cpu_state;
>> 761
>> 762
>> 763 _prepare_mm_context((struct sos_thread*) current_thread);
>> 764 }
>> 765
>> 766
>> 767 void sos_thread_prepare_exception_switch_back(struct sos_cpu_state *cpu_state)
>> 768 {
>> 769
>> 770
>> 771
>> 772
>> 773
>> 774
>> 775
>> 776
>> 777 current_thread->cpu_state = cpu_state;
>> 778
>> 779
>> 780 _prepare_mm_context((struct sos_thread*) current_thread);
>> 781 }
>> 782
>> 783
>> 784 void
>> 785 sos_thread_prepare_irq_servicing(struct sos_cpu_state *interrupted_state)
>> 786 {
>> 787 current_thread->cpu_state = interrupted_state;
>> 788 }
>> 789
>> 790
>> 791 struct sos_cpu_state *
>> 792 sos_thread_prepare_irq_switch_back(void)
>> 793 {
>> 794 struct sos_thread *myself, *next_thread;
>> 795
>> 796
>> 797
>> 798 if (! sos_cpu_context_is_in_user_mode(current_thread->cpu_state))
>> 799 return current_thread->cpu_state;
>> 800
>> 801
>> 802
>> 803
>> 804
>> 805
>> 806
>> 807 SOS_ASSERT_FATAL(current_thread->process != NULL);
>> 808
>> 809
>> 810 myself = (struct sos_thread*)current_thread;
>> 811
>> 812
>> 813 next_thread = sos_reschedule(myself, FALSE);
>> 814
>> 815
>> 816 _prepare_mm_context(next_thread);
>> 817
>> 818
>> 819 _set_current(next_thread);
>> 820 return next_thread->cpu_state;
441 } 821 }