Diff markup
001 001
002 002
003 003
004 004
005 005
006 006
007 007
008 008
009 009
010 010
011 011
012 012
013 013
014 014
015 015
016 016
017 017
018 018
019 #include <sos/errno.h> 019 #include <sos/errno.h>
020 #include <sos/klibc.h> 020 #include <sos/klibc.h>
021 #include <sos/assert.h> 021 #include <sos/assert.h>
022 #include <sos/list.h> 022 #include <sos/list.h>
023 #include <sos/calcload.h> 023 #include <sos/calcload.h>
024 024
025 #include "sched.h" 025 #include "sched.h"
026 026
027 027
028 028
029 029
030 030
031 031
032 032
033 033
034 034
035 035
036 036
037 struct sos_sched_queue 037 struct sos_sched_queue
038 { 038 {
039 unsigned int nr_threads; 039 unsigned int nr_threads;
040 struct sos_thread *thread_list[SOS_SCHED_NUM 040 struct sos_thread *thread_list[SOS_SCHED_NUM_PRIO];
041 }; 041 };
042 042
043 043
044 044
045 045
046 046
047 047
048 048
049 static struct sos_sched_queue *active_queue, * 049 static struct sos_sched_queue *active_queue, *expired_queue;
050 050
051 051
052 052
053 053
054 054
055 static struct sos_sched_queue sched_queue[2]; 055 static struct sos_sched_queue sched_queue[2];
056 056
057 057
058 058
059 059
060 060
061 struct sos_time time_slice[SOS_SCHED_NUM_PRIO] 061 struct sos_time time_slice[SOS_SCHED_NUM_PRIO];
062 062
063 063
064 sos_ret_t sos_sched_subsystem_setup() 064 sos_ret_t sos_sched_subsystem_setup()
065 { 065 {
066 sos_sched_priority_t prio; 066 sos_sched_priority_t prio;
067 067
068 memset(sched_queue, 0x0, sizeof(sched_queue) 068 memset(sched_queue, 0x0, sizeof(sched_queue));
069 active_queue = & sched_queue[0]; 069 active_queue = & sched_queue[0];
070 expired_queue = & sched_queue[1]; 070 expired_queue = & sched_queue[1];
071 071
072 072
073 for (prio = SOS_SCHED_PRIO_TS_HIGHEST ; 073 for (prio = SOS_SCHED_PRIO_TS_HIGHEST ;
074 prio <= SOS_SCHED_PRIO_TS_LOWEST ; 074 prio <= SOS_SCHED_PRIO_TS_LOWEST ;
075 prio ++) 075 prio ++)
076 { 076 {
077 unsigned int ms; 077 unsigned int ms;
078 ms = SOS_TIME_SLICE_MIN 078 ms = SOS_TIME_SLICE_MIN
079 + (SOS_TIME_SLICE_MAX - SOS_TIME_SL 079 + (SOS_TIME_SLICE_MAX - SOS_TIME_SLICE_MIN)
080 * (prio - SOS_SCHED_PRIO_TS_HIGHE 080 * (prio - SOS_SCHED_PRIO_TS_HIGHEST)
081 / (SOS_SCHED_PRIO_TS_LOWEST - SOS 081 / (SOS_SCHED_PRIO_TS_LOWEST - SOS_SCHED_PRIO_TS_HIGHEST);
082 time_slice[prio].sec = ms / 1000; 082 time_slice[prio].sec = ms / 1000;
083 time_slice[prio].nanosec = 1000000UL * ( 083 time_slice[prio].nanosec = 1000000UL * (ms % 1000);
084 } 084 }
085 085
086 return SOS_OK; 086 return SOS_OK;
087 } 087 }
088 088
089 089
090 090
091 091
092 092
093 093
094 094
095 095
096 096
097 static sos_ret_t add_in_ready_queue(struct sos 097 static sos_ret_t add_in_ready_queue(struct sos_sched_queue *q,
098 struct sos 098 struct sos_thread *thr,
099 sos_bool_t 099 sos_bool_t insert_at_tail)
100 { 100 {
101 sos_sched_priority_t prio; 101 sos_sched_priority_t prio;
102 102
103 SOS_ASSERT_FATAL( (SOS_THR_CREATED == thr->s 103 SOS_ASSERT_FATAL( (SOS_THR_CREATED == thr->state)
104 || (SOS_THR_RUNNING == thr 104 || (SOS_THR_RUNNING == thr->state)
105 || (SOS_THR_BLOCKED == thr 105 || (SOS_THR_BLOCKED == thr->state) );
106 106
107 107
108 prio = sos_thread_get_priority(thr); 108 prio = sos_thread_get_priority(thr);
109 if (insert_at_tail) 109 if (insert_at_tail)
110 list_add_tail_named(q->thread_list[prio], 110 list_add_tail_named(q->thread_list[prio], thr,
111 ready.rdy_prev, ready. 111 ready.rdy_prev, ready.rdy_next);
112 else 112 else
113 list_add_head_named(q->thread_list[prio], 113 list_add_head_named(q->thread_list[prio], thr,
114 ready.rdy_prev, ready. 114 ready.rdy_prev, ready.rdy_next);
115 thr->ready.rdy_queue = q; 115 thr->ready.rdy_queue = q;
116 q->nr_threads ++; 116 q->nr_threads ++;
117 117
118 118
119 thr->state = SOS_THR_READY; 119 thr->state = SOS_THR_READY;
120 120
121 return SOS_OK; 121 return SOS_OK;
122 } 122 }
123 123
124 124
125 sos_ret_t sos_sched_set_ready(struct sos_threa 125 sos_ret_t sos_sched_set_ready(struct sos_thread *thr)
126 { 126 {
127 sos_ret_t retval; 127 sos_ret_t retval;
128 128
129 129
130 if (SOS_THR_READY == thr->state) 130 if (SOS_THR_READY == thr->state)
131 return SOS_OK; 131 return SOS_OK;
132 132
133 133
134 memset(& thr->user_time_spent_in_slice, 0x0, 134 memset(& thr->user_time_spent_in_slice, 0x0, sizeof(struct sos_time));
135 135
136 if (SOS_SCHED_PRIO_IS_RT(sos_thread_get_prio 136 if (SOS_SCHED_PRIO_IS_RT(sos_thread_get_priority(thr)))
137 { 137 {
138 138
139 retval = add_in_ready_queue(active_queue 139 retval = add_in_ready_queue(active_queue, thr, TRUE);
140 } 140 }
141 else 141 else
142 { 142 {
143 143
144 retval = add_in_ready_queue(expired_queu 144 retval = add_in_ready_queue(expired_queue, thr, TRUE);
145 } 145 }
146 146
147 return retval; 147 return retval;
148 } 148 }
149 149
150 150
151 sos_ret_t sos_sched_change_priority(struct sos 151 sos_ret_t sos_sched_change_priority(struct sos_thread *thr,
152 sos_sched_ 152 sos_sched_priority_t priority)
153 { 153 {
154 struct sos_thread *thread_list; 154 struct sos_thread *thread_list;
155 SOS_ASSERT_FATAL(SOS_THR_READY == thr->state 155 SOS_ASSERT_FATAL(SOS_THR_READY == thr->state);
156 156
157 157
158 thread_list 158 thread_list
159 = thr->ready.rdy_queue->thread_list[sos_th 159 = thr->ready.rdy_queue->thread_list[sos_thread_get_priority(thr)];
160 160
161 list_delete_named(thread_list, thr, ready.rd 161 list_delete_named(thread_list, thr, ready.rdy_prev, ready.rdy_next);
162 162
163 163
164 thread_list = thr->ready.rdy_queue->thread_l 164 thread_list = thr->ready.rdy_queue->thread_list[priority];
165 list_add_tail_named(thread_list, thr, ready. 165 list_add_tail_named(thread_list, thr, ready.rdy_prev, ready.rdy_next);
166 thr->ready.rdy_queue->thread_list[priority] 166 thr->ready.rdy_queue->thread_list[priority] = thread_list;
167 167
168 return SOS_OK; 168 return SOS_OK;
169 } 169 }
170 170
171 171
172 172
173 173
174 174
175 175
176 static sos_bool_t 176 static sos_bool_t
177 thread_expired_its_quantuum(struct sos_thread 177 thread_expired_its_quantuum(struct sos_thread *thr)
178 { 178 {
179 sos_sched_priority_t prio = sos_thread_get_p 179 sos_sched_priority_t prio = sos_thread_get_priority(thr);
180 180
181 181
182 if (SOS_SCHED_PRIO_IS_RT(prio)) 182 if (SOS_SCHED_PRIO_IS_RT(prio))
183 return FALSE; 183 return FALSE;
184 184
185 185
186 186
187 187
188 if (sos_time_cmp(& thr->user_time_spent_in_s 188 if (sos_time_cmp(& thr->user_time_spent_in_slice,
189 & time_slice[prio]) >= 0) 189 & time_slice[prio]) >= 0)
190 return TRUE; 190 return TRUE;
191 191
192 return FALSE; 192 return FALSE;
193 } 193 }
194 194
195 195
196 struct sos_thread * sos_reschedule(struct sos_ 196 struct sos_thread * sos_reschedule(struct sos_thread *current_thread,
197 sos_bool_t 197 sos_bool_t do_yield)
198 { 198 {
199 sos_sched_priority_t prio; 199 sos_sched_priority_t prio;
200 200
201 201
202 202
203 if (thread_expired_its_quantuum(current_thre 203 if (thread_expired_its_quantuum(current_thread))
204 { 204 {
205 205
206 memset(& current_thread->user_time_spent 206 memset(& current_thread->user_time_spent_in_slice,
207 0x0, sizeof(struct sos_time)); 207 0x0, sizeof(struct sos_time));
208 208
209 do_yield = TRUE; 209 do_yield = TRUE;
210 } 210 }
211 211
212 if (SOS_THR_ZOMBIE == current_thread->state) 212 if (SOS_THR_ZOMBIE == current_thread->state)
213 { 213 {
214 214
215 215
216 216
217 } 217 }
218 else if (SOS_THR_BLOCKED != current_thread-> 218 else if (SOS_THR_BLOCKED != current_thread->state)
219 { 219 {
220 220
221 221
222 if (do_yield) 222 if (do_yield)
223 { 223 {
224 224
225 if (SOS_SCHED_PRIO_IS_RT(sos_thread_ 225 if (SOS_SCHED_PRIO_IS_RT(sos_thread_get_priority(current_thread)))
226 add_in_ready_queue(active_queue, c 226 add_in_ready_queue(active_queue, current_thread, TRUE);
227 else 227 else
228 add_in_ready_queue(expired_queue, 228 add_in_ready_queue(expired_queue, current_thread, TRUE);
229 } 229 }
230 else 230 else
231 { 231 {
232 232
233 add_in_ready_queue(active_queue, cur 233 add_in_ready_queue(active_queue, current_thread, FALSE);
234 } 234 }
235 } 235 }
236 236
237 237
238 238
239 if (active_queue->nr_threads <= 0) 239 if (active_queue->nr_threads <= 0)
240 { 240 {
241 241
242 struct sos_sched_queue *q; 242 struct sos_sched_queue *q;
243 q = active_queue; 243 q = active_queue;
244 active_queue = expired_queue; 244 active_queue = expired_queue;
245 expired_queue = q; 245 expired_queue = q;
246 } 246 }
247 247
248 248
249 249
250 for (prio = SOS_SCHED_PRIO_HIGHEST ; prio <= 250 for (prio = SOS_SCHED_PRIO_HIGHEST ; prio <= SOS_SCHED_PRIO_LOWEST ; prio ++)
251 { 251 {
252 struct sos_thread *next_thr; 252 struct sos_thread *next_thr;
253 253
254 if (list_is_empty_named(active_queue->th 254 if (list_is_empty_named(active_queue->thread_list[prio],
255 ready.rdy_prev, 255 ready.rdy_prev, ready.rdy_next))
256 continue; 256 continue;
257 257
258 258
259 next_thr = list_pop_head_named(active_qu 259 next_thr = list_pop_head_named(active_queue->thread_list[prio],
260 ready.rdy 260 ready.rdy_prev, ready.rdy_next);
261 active_queue->nr_threads --; 261 active_queue->nr_threads --;
262 262
263 return next_thr; 263 return next_thr;
264 } 264 }
265 265
266 266
267 SOS_FATAL_ERROR("No kernel thread ready ?!") 267 SOS_FATAL_ERROR("No kernel thread ready ?!");
268 return NULL; 268 return NULL;
269 } 269 }
270 270
271 271
272 sos_ret_t sos_sched_do_timer_tick() 272 sos_ret_t sos_sched_do_timer_tick()
273 { 273 {
274 struct sos_thread *interrupted_thread = sos_ 274 struct sos_thread *interrupted_thread = sos_thread_get_current();
275 struct sos_time tick_duration; 275 struct sos_time tick_duration;
276 sos_bool_t cur_is_user; 276 sos_bool_t cur_is_user;
277 sos_ui32_t nb_user_ready = 0; 277 sos_ui32_t nb_user_ready = 0;
278 sos_ui32_t nb_kernel_ready = 0; 278 sos_ui32_t nb_kernel_ready = 0;
279 int prio; 279 int prio;
280 280
281 sos_time_get_tick_resolution(& tick_duration 281 sos_time_get_tick_resolution(& tick_duration);
282 282
283 283
284 if (sos_cpu_context_is_in_user_mode(interrup 284 if (sos_cpu_context_is_in_user_mode(interrupted_thread->cpu_state))
285 { 285 {
286 cur_is_user = TRUE; 286 cur_is_user = TRUE;
287 287
288 288
289 sos_time_inc(& interrupted_thread->rusag 289 sos_time_inc(& interrupted_thread->rusage.ru_utime,
290 & tick_duration); 290 & tick_duration);
291 291
292 292
293 sos_time_inc(& interrupted_thread->user_ 293 sos_time_inc(& interrupted_thread->user_time_spent_in_slice,
294 & tick_duration); 294 & tick_duration);
295 } 295 }
296 else 296 else
297 { 297 {
298 cur_is_user = FALSE; 298 cur_is_user = FALSE;
299 299
300 300
301 sos_time_inc(& interrupted_thread->rusag 301 sos_time_inc(& interrupted_thread->rusage.ru_stime,
302 & tick_duration); 302 & tick_duration);
303 } 303 }
304 304
305 305
306 306
307 for (prio = SOS_SCHED_PRIO_HIGHEST ; prio <= 307 for (prio = SOS_SCHED_PRIO_HIGHEST ; prio <= SOS_SCHED_PRIO_LOWEST ; prio ++)
308 { 308 {
309 struct sos_thread *thr; 309 struct sos_thread *thr;
310 int nb_thrs; 310 int nb_thrs;
311 311
312 list_foreach_forward_named(active_queue- 312 list_foreach_forward_named(active_queue->thread_list[prio],
313 thr, nb_thrs, 313 thr, nb_thrs,
314 ready.rdy_pre 314 ready.rdy_prev, ready.rdy_next)
315 { 315 {
316 if (sos_cpu_context_is_in_user_mode( 316 if (sos_cpu_context_is_in_user_mode(thr->cpu_state))
317 nb_user_ready ++; 317 nb_user_ready ++;
318 else 318 else
319 nb_kernel_ready ++; 319 nb_kernel_ready ++;
320 } 320 }
321 321
322 list_foreach_forward_named(expired_queue 322 list_foreach_forward_named(expired_queue->thread_list[prio],
323 thr, nb_thrs, 323 thr, nb_thrs,
324 ready.rdy_pre 324 ready.rdy_prev, ready.rdy_next)
325 { 325 {
326 if (sos_cpu_context_is_in_user_mode( 326 if (sos_cpu_context_is_in_user_mode(thr->cpu_state))
327 nb_user_ready ++; 327 nb_user_ready ++;
328 else 328 else
329 nb_kernel_ready ++; 329 nb_kernel_ready ++;
330 } 330 }
331 } 331 }
332 332
333 sos_load_do_timer_tick(cur_is_user, 333 sos_load_do_timer_tick(cur_is_user,
334 nb_user_ready, 334 nb_user_ready,
335 nb_kernel_ready); 335 nb_kernel_ready);
336 336
337 return SOS_OK; 337 return SOS_OK;
338 } 338 }