001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019 #include <sos/physmem.h>
020 #include <sos/kmem_slab.h>
021 #include <sos/kmalloc.h>
022 #include <sos/klibc.h>
023 #include <sos/list.h>
024 #include <sos/assert.h>
025
026 #include <hwcore/irq.h>
027
028 #include "thread.h"
029
030
031
032
033
034 #define SOS_THREAD_KERNEL_STACK_SIZE (1*SOS_PAGE_SIZE)
035
036
037
038
039
040
041
042
043
044
045
046
047
048
049
050
051 static volatile struct sos_thread *current_thread = NULL;
052
053
054
055
056
057
058
059 static struct sos_thread *thread_list = NULL;
060
061
062
063
064
065 static struct sos_kslab_cache *cache_thread;
066
067
068 struct sos_thread *sos_thread_get_current()
069 {
070 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
071 return (struct sos_thread*)current_thread;
072 }
073
074
075 inline static sos_ret_t _set_current(struct sos_thread *thr)
076 {
077 SOS_ASSERT_FATAL(thr->state == SOS_THR_READY);
078 current_thread = thr;
079 current_thread->state = SOS_THR_RUNNING;
080 return SOS_OK;
081 }
082
083
084 sos_ret_t sos_thread_subsystem_setup(sos_vaddr_t init_thread_stack_base_addr,
085 sos_size_t init_thread_stack_size)
086 {
087 struct sos_thread *myself;
088
089
090 cache_thread = sos_kmem_cache_create("thread",
091 sizeof(struct sos_thread),
092 2,
093 0,
094 SOS_KSLAB_CREATE_MAP
095 | SOS_KSLAB_CREATE_ZERO);
096 if (! cache_thread)
097 return -SOS_ENOMEM;
098
099
100 myself = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
101 SOS_KSLAB_ALLOC_ATOMIC);
102 if (! myself)
103 return -SOS_ENOMEM;
104
105
106 strzcpy(myself->name, "[kinit]", SOS_THR_MAX_NAMELEN);
107 myself->state = SOS_THR_CREATED;
108 myself->kernel_stack_base_addr = init_thread_stack_base_addr;
109 myself->kernel_stack_size = init_thread_stack_size;
110
111
112 sos_cpu_state_prepare_detect_kernel_stack_overflow(myself->cpu_state,
113 myself->kernel_stack_base_addr,
114 myself->kernel_stack_size);
115
116
117 list_singleton_named(thread_list, myself, gbl_prev, gbl_next);
118
119
120 myself->state = SOS_THR_READY;
121 _set_current(myself);
122
123 return SOS_OK;
124 }
125
126
127 struct sos_thread *
128 sos_create_kernel_thread(const char *name,
129 sos_kernel_thread_start_routine_t start_func,
130 void *start_arg)
131 {
132 __label__ undo_creation;
133 sos_ui32_t flags;
134 struct sos_thread *new_thread;
135
136 if (! start_func)
137 return NULL;
138
139
140 new_thread
141 = (struct sos_thread*) sos_kmem_cache_alloc(cache_thread,
142 SOS_KSLAB_ALLOC_ATOMIC);
143 if (! new_thread)
144 return NULL;
145
146
147 strzcpy(new_thread->name, ((name)?name:"[NONAME]"), SOS_THR_MAX_NAMELEN);
148 new_thread->state = SOS_THR_CREATED;
149
150
151 new_thread->kernel_stack_base_addr = sos_kmalloc(SOS_THREAD_KERNEL_STACK_SIZE, 0);
152 new_thread->kernel_stack_size = SOS_THREAD_KERNEL_STACK_SIZE;
153 if (! new_thread->kernel_stack_base_addr)
154 goto undo_creation;
155
156
157 if (SOS_OK
158 != sos_cpu_kstate_init(& new_thread->cpu_state,
159 (sos_cpu_kstate_function_arg1_t*) start_func,
160 (sos_ui32_t) start_arg,
161 new_thread->kernel_stack_base_addr,
162 new_thread->kernel_stack_size,
163 (sos_cpu_kstate_function_arg1_t*) sos_thread_exit,
164 (sos_ui32_t) NULL))
165 goto undo_creation;
166
167
168 sos_disable_IRQs(flags);
169 list_add_tail_named(thread_list, new_thread, gbl_prev, gbl_next);
170 sos_restore_IRQs(flags);
171
172
173 if (SOS_OK != sos_sched_set_ready(new_thread))
174 goto undo_creation;
175
176
177 return new_thread;
178
179 undo_creation:
180 if (new_thread->kernel_stack_base_addr)
181 sos_kfree((sos_vaddr_t) new_thread->kernel_stack_base_addr);
182 sos_kmem_cache_free((sos_vaddr_t) new_thread);
183 return NULL;
184 }
185
186
187
188
189 static void delete_thread(struct sos_thread *thr)
190 {
191 sos_ui32_t flags;
192
193 sos_disable_IRQs(flags);
194 list_delete_named(thread_list, thr, gbl_prev, gbl_next);
195 sos_restore_IRQs(flags);
196
197 sos_kfree((sos_vaddr_t) thr->kernel_stack_base_addr);
198 memset(thr, 0x0, sizeof(struct sos_thread));
199 sos_kmem_cache_free((sos_vaddr_t) thr);
200 }
201
202
203 void sos_thread_exit()
204 {
205 sos_ui32_t flags;
206 struct sos_thread *myself, *next_thread;
207
208
209 SOS_ASSERT_FATAL(! sos_servicing_irq());
210
211 myself = sos_thread_get_current();
212
213
214
215 SOS_ASSERT_FATAL(list_is_empty_named(myself->kwaitq_list,
216 prev_entry_for_thread,
217 next_entry_for_thread));
218
219
220 sos_disable_IRQs(flags);
221 myself->state = SOS_THR_ZOMBIE;
222 next_thread = sos_reschedule(myself, FALSE);
223
224
225 sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
226 next_thread->kernel_stack_base_addr,
227 next_thread->kernel_stack_size);
228
229
230
231
232
233 _set_current(next_thread);
234 sos_cpu_context_exit_to(next_thread->cpu_state,
235 (sos_cpu_kstate_function_arg1_t*) delete_thread,
236 (sos_ui32_t) myself);
237 }
238
239
240 sos_thread_state_t sos_thread_get_state(struct sos_thread *thr)
241 {
242 if (! thr)
243 thr = (struct sos_thread*)current_thread;
244
245 return thr->state;
246 }
247
248
249 typedef enum { YIELD_MYSELF, BLOCK_MYSELF } switch_type_t;
250
251
252
253
254 static sos_ret_t _switch_to_next_thread(switch_type_t operation)
255 {
256 struct sos_thread *myself, *next_thread;
257
258 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
259
260
261 SOS_ASSERT_FATAL(! sos_servicing_irq());
262
263 myself = (struct sos_thread*)current_thread;
264
265
266
267 if (BLOCK_MYSELF == operation)
268 {
269 myself->state = SOS_THR_BLOCKED;
270 }
271
272
273 next_thread = sos_reschedule(myself, YIELD_MYSELF == operation);
274
275
276 if (myself != next_thread)
277 {
278
279 sos_cpu_state_detect_kernel_stack_overflow(next_thread->cpu_state,
280 next_thread->kernel_stack_base_addr,
281 next_thread->kernel_stack_size);
282
283
284
285
286
287 _set_current(next_thread);
288 sos_cpu_context_switch(& myself->cpu_state, next_thread->cpu_state);
289
290
291 SOS_ASSERT_FATAL(current_thread == myself);
292 SOS_ASSERT_FATAL(current_thread->state == SOS_THR_RUNNING);
293 }
294 else
295 {
296
297 _set_current(next_thread);
298 }
299
300 return SOS_OK;
301 }
302
303
304 sos_ret_t sos_thread_yield()
305 {
306 sos_ui32_t flags;
307 sos_ret_t retval;
308
309 sos_disable_IRQs(flags);
310
311 retval = _switch_to_next_thread(YIELD_MYSELF);
312
313 sos_restore_IRQs(flags);
314 return retval;
315 }
316
317
318
319
320
321 struct sleep_timeout_params
322 {
323 struct sos_thread *thread_to_wakeup;
324 sos_bool_t timeout_triggered;
325 };
326
327
328
329
330
331 static void sleep_timeout(struct sos_timeout_action *act)
332 {
333 struct sleep_timeout_params *sleep_timeout_params
334 = (struct sleep_timeout_params*) act->routine_data;
335
336
337 sleep_timeout_params->timeout_triggered = TRUE;
338
339
340 SOS_ASSERT_FATAL(SOS_OK ==
341 sos_thread_force_unblock(sleep_timeout_params
342 ->thread_to_wakeup));
343 }
344
345
346 sos_ret_t sos_thread_sleep(struct sos_time *timeout)
347 {
348 sos_ui32_t flags;
349 struct sleep_timeout_params sleep_timeout_params;
350 struct sos_timeout_action timeout_action;
351 sos_ret_t retval;
352
353
354 if (NULL == timeout)
355 {
356 sos_disable_IRQs(flags);
357 retval = _switch_to_next_thread(BLOCK_MYSELF);
358 sos_restore_IRQs(flags);
359
360 return retval;
361 }
362
363
364 sos_time_init_action(& timeout_action);
365
366
367 sleep_timeout_params.thread_to_wakeup
368 = (struct sos_thread*)current_thread;
369 sleep_timeout_params.timeout_triggered = FALSE;
370
371 sos_disable_IRQs(flags);
372
373
374 SOS_ASSERT_FATAL(SOS_OK ==
375 sos_time_register_action_relative(& timeout_action,
376 timeout,
377 sleep_timeout,
378 & sleep_timeout_params));
379
380
381
382
383 retval = _switch_to_next_thread(BLOCK_MYSELF);
384
385
386
387 if (sleep_timeout_params.timeout_triggered)
388 {
389
390 SOS_ASSERT_FATAL(sos_time_is_zero(& timeout_action.timeout));
391 retval = SOS_OK;
392 }
393 else
394 {
395
396
397 SOS_ASSERT_FATAL(SOS_OK == sos_time_unregister_action(& timeout_action));
398 retval = -SOS_EINTR;
399 }
400
401 sos_restore_IRQs(flags);
402
403
404 memcpy(timeout, & timeout_action.timeout, sizeof(struct sos_time));
405
406 return retval;
407 }
408
409
410 sos_ret_t sos_thread_force_unblock(struct sos_thread *thread)
411 {
412 sos_ret_t retval;
413 sos_ui32_t flags;
414
415 if (! thread)
416 return -SOS_EINVAL;
417
418 sos_disable_IRQs(flags);
419
420
421 retval = SOS_OK;
422 switch(sos_thread_get_state(thread))
423 {
424 case SOS_THR_RUNNING:
425 case SOS_THR_READY:
426
427 break;
428
429 case SOS_THR_ZOMBIE:
430 retval = -SOS_EFATAL;
431 break;
432
433 default:
434 retval = sos_sched_set_ready(thread);
435 break;
436 }
437
438 sos_restore_IRQs(flags);
439
440 return retval;
441 }