001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019 #include <sos/errno.h>
020 #include <sos/klibc.h>
021 #include <sos/assert.h>
022 #include <sos/list.h>
023 #include <sos/calcload.h>
024
025 #include "sched.h"
026
027
028
029
030
031
032
033
034
035
036
037 struct sos_sched_queue
038 {
039 unsigned int nr_threads;
040 struct sos_thread *thread_list[SOS_SCHED_NUM_PRIO];
041 };
042
043
044
045
046
047
048
049 static struct sos_sched_queue *active_queue, *expired_queue;
050
051
052
053
054
055 static struct sos_sched_queue sched_queue[2];
056
057
058
059
060
061 struct sos_time time_slice[SOS_SCHED_NUM_PRIO];
062
063
064 sos_ret_t sos_sched_subsystem_setup()
065 {
066 sos_sched_priority_t prio;
067
068 memset(sched_queue, 0x0, sizeof(sched_queue));
069 active_queue = & sched_queue[0];
070 expired_queue = & sched_queue[1];
071
072
073 for (prio = SOS_SCHED_PRIO_TS_HIGHEST ;
074 prio <= SOS_SCHED_PRIO_TS_LOWEST ;
075 prio ++)
076 {
077 unsigned int ms;
078 ms = SOS_TIME_SLICE_MIN
079 + (SOS_TIME_SLICE_MAX - SOS_TIME_SLICE_MIN)
080 * (prio - SOS_SCHED_PRIO_TS_HIGHEST)
081 / (SOS_SCHED_PRIO_TS_LOWEST - SOS_SCHED_PRIO_TS_HIGHEST);
082 time_slice[prio].sec = ms / 1000;
083 time_slice[prio].nanosec = 1000000UL * (ms % 1000);
084 }
085
086 return SOS_OK;
087 }
088
089
090
091
092
093
094
095
096
097 static sos_ret_t add_in_ready_queue(struct sos_sched_queue *q,
098 struct sos_thread *thr,
099 sos_bool_t insert_at_tail)
100 {
101 sos_sched_priority_t prio;
102
103 SOS_ASSERT_FATAL( (SOS_THR_CREATED == thr->state)
104 || (SOS_THR_RUNNING == thr->state)
105 || (SOS_THR_BLOCKED == thr->state) );
106
107
108 prio = sos_thread_get_priority(thr);
109 if (insert_at_tail)
110 list_add_tail_named(q->thread_list[prio], thr,
111 ready.rdy_prev, ready.rdy_next);
112 else
113 list_add_head_named(q->thread_list[prio], thr,
114 ready.rdy_prev, ready.rdy_next);
115 thr->ready.rdy_queue = q;
116 q->nr_threads ++;
117
118
119 thr->state = SOS_THR_READY;
120
121 return SOS_OK;
122 }
123
124
125 sos_ret_t sos_sched_set_ready(struct sos_thread *thr)
126 {
127 sos_ret_t retval;
128
129
130 if (SOS_THR_READY == thr->state)
131 return SOS_OK;
132
133
134 memset(& thr->user_time_spent_in_slice, 0x0, sizeof(struct sos_time));
135
136 if (SOS_SCHED_PRIO_IS_RT(sos_thread_get_priority(thr)))
137 {
138
139 retval = add_in_ready_queue(active_queue, thr, TRUE);
140 }
141 else
142 {
143
144 retval = add_in_ready_queue(expired_queue, thr, TRUE);
145 }
146
147 return retval;
148 }
149
150
151 sos_ret_t sos_sched_change_priority(struct sos_thread *thr,
152 sos_sched_priority_t priority)
153 {
154 struct sos_thread *thread_list;
155 SOS_ASSERT_FATAL(SOS_THR_READY == thr->state);
156
157
158 thread_list
159 = thr->ready.rdy_queue->thread_list[sos_thread_get_priority(thr)];
160
161 list_delete_named(thread_list, thr, ready.rdy_prev, ready.rdy_next);
162
163
164 thread_list = thr->ready.rdy_queue->thread_list[priority];
165 list_add_tail_named(thread_list, thr, ready.rdy_prev, ready.rdy_next);
166 thr->ready.rdy_queue->thread_list[priority] = thread_list;
167
168 return SOS_OK;
169 }
170
171
172
173
174
175
176 static sos_bool_t
177 thread_expired_its_quantuum(struct sos_thread *thr)
178 {
179 sos_sched_priority_t prio = sos_thread_get_priority(thr);
180
181
182 if (SOS_SCHED_PRIO_IS_RT(prio))
183 return FALSE;
184
185
186
187
188 if (sos_time_cmp(& thr->user_time_spent_in_slice,
189 & time_slice[prio]) >= 0)
190 return TRUE;
191
192 return FALSE;
193 }
194
195
196 struct sos_thread * sos_reschedule(struct sos_thread *current_thread,
197 sos_bool_t do_yield)
198 {
199 sos_sched_priority_t prio;
200
201
202
203 if (thread_expired_its_quantuum(current_thread))
204 {
205
206 memset(& current_thread->user_time_spent_in_slice,
207 0x0, sizeof(struct sos_time));
208
209 do_yield = TRUE;
210 }
211
212 if (SOS_THR_ZOMBIE == current_thread->state)
213 {
214
215
216
217 }
218 else if (SOS_THR_BLOCKED != current_thread->state)
219 {
220
221
222 if (do_yield)
223 {
224
225 if (SOS_SCHED_PRIO_IS_RT(sos_thread_get_priority(current_thread)))
226 add_in_ready_queue(active_queue, current_thread, TRUE);
227 else
228 add_in_ready_queue(expired_queue, current_thread, TRUE);
229 }
230 else
231 {
232
233 add_in_ready_queue(active_queue, current_thread, FALSE);
234 }
235 }
236
237
238
239 if (active_queue->nr_threads <= 0)
240 {
241
242 struct sos_sched_queue *q;
243 q = active_queue;
244 active_queue = expired_queue;
245 expired_queue = q;
246 }
247
248
249
250 for (prio = SOS_SCHED_PRIO_HIGHEST ; prio <= SOS_SCHED_PRIO_LOWEST ; prio ++)
251 {
252 struct sos_thread *next_thr;
253
254 if (list_is_empty_named(active_queue->thread_list[prio],
255 ready.rdy_prev, ready.rdy_next))
256 continue;
257
258
259 next_thr = list_pop_head_named(active_queue->thread_list[prio],
260 ready.rdy_prev, ready.rdy_next);
261 active_queue->nr_threads --;
262
263 return next_thr;
264 }
265
266
267 SOS_FATAL_ERROR("No kernel thread ready ?!");
268 return NULL;
269 }
270
271
272 sos_ret_t sos_sched_do_timer_tick()
273 {
274 struct sos_thread *interrupted_thread = sos_thread_get_current();
275 struct sos_time tick_duration;
276 sos_bool_t cur_is_user;
277 sos_ui32_t nb_user_ready = 0;
278 sos_ui32_t nb_kernel_ready = 0;
279 int prio;
280
281 sos_time_get_tick_resolution(& tick_duration);
282
283
284 if (sos_cpu_context_is_in_user_mode(interrupted_thread->cpu_state))
285 {
286 cur_is_user = TRUE;
287
288
289 sos_time_inc(& interrupted_thread->rusage.ru_utime,
290 & tick_duration);
291
292
293 sos_time_inc(& interrupted_thread->user_time_spent_in_slice,
294 & tick_duration);
295 }
296 else
297 {
298 cur_is_user = FALSE;
299
300
301 sos_time_inc(& interrupted_thread->rusage.ru_stime,
302 & tick_duration);
303 }
304
305
306
307 for (prio = SOS_SCHED_PRIO_HIGHEST ; prio <= SOS_SCHED_PRIO_LOWEST ; prio ++)
308 {
309 struct sos_thread *thr;
310 int nb_thrs;
311
312 list_foreach_forward_named(active_queue->thread_list[prio],
313 thr, nb_thrs,
314 ready.rdy_prev, ready.rdy_next)
315 {
316 if (sos_cpu_context_is_in_user_mode(thr->cpu_state))
317 nb_user_ready ++;
318 else
319 nb_kernel_ready ++;
320 }
321
322 list_foreach_forward_named(expired_queue->thread_list[prio],
323 thr, nb_thrs,
324 ready.rdy_prev, ready.rdy_next)
325 {
326 if (sos_cpu_context_is_in_user_mode(thr->cpu_state))
327 nb_user_ready ++;
328 else
329 nb_kernel_ready ++;
330 }
331 }
332
333 sos_load_do_timer_tick(cur_is_user,
334 nb_user_ready,
335 nb_kernel_ready);
336
337 return SOS_OK;
338 }