Diff markup
001 001
002 002
003 003
004 004
005 005
006 006
007 007
008 008
009 009
010 010
011 011
012 012
013 013
014 014
015 015
016 016
017 017
018 018
019 #include <sos/errno.h> 019 #include <sos/errno.h>
020 #include <sos/klibc.h> 020 #include <sos/klibc.h>
021 #include <sos/assert.h> 021 #include <sos/assert.h>
022 #include <sos/list.h> 022 #include <sos/list.h>
>> 023 #include <sos/calcload.h>
023 024
024 #include "sched.h" 025 #include "sched.h"
025 026
026 027
027 028
028 029
029 !! 030
030 !! 031
031 !! 032
032 !! 033
033 !! 034
>> 035
034 036
035 static struct !! 037 struct sos_sched_queue
036 { 038 {
037 unsigned int nr_threads; 039 unsigned int nr_threads;
038 struct sos_thread *thread_list; !! 040 struct sos_thread *thread_list[SOS_SCHED_NUM_PRIO];
039 } ready_queue; !! 041 };
>> 042
>> 043
>> 044
>> 045
>> 046
>> 047
>> 048
>> 049 static struct sos_sched_queue *active_queue, *expired_queue;
>> 050
>> 051
>> 052
>> 053
>> 054
>> 055 static struct sos_sched_queue sched_queue[2];
>> 056
>> 057
>> 058
>> 059
>> 060
>> 061 struct sos_time time_slice[SOS_SCHED_NUM_PRIO];
040 062
041 063
042 sos_ret_t sos_sched_subsystem_setup() 064 sos_ret_t sos_sched_subsystem_setup()
043 { 065 {
044 memset(& ready_queue, 0x0, sizeof(ready_queu !! 066 sos_sched_priority_t prio;
>> 067
>> 068 memset(sched_queue, 0x0, sizeof(sched_queue));
>> 069 active_queue = & sched_queue[0];
>> 070 expired_queue = & sched_queue[1];
>> 071
>> 072
>> 073 for (prio = SOS_SCHED_PRIO_TS_HIGHEST ;
>> 074 prio <= SOS_SCHED_PRIO_TS_LOWEST ;
>> 075 prio ++)
>> 076 {
>> 077 unsigned int ms;
>> 078 ms = SOS_TIME_SLICE_MIN
>> 079 + (SOS_TIME_SLICE_MAX - SOS_TIME_SLICE_MIN)
>> 080 * (prio - SOS_SCHED_PRIO_TS_HIGHEST)
>> 081 / (SOS_SCHED_PRIO_TS_LOWEST - SOS_SCHED_PRIO_TS_HIGHEST);
>> 082 time_slice[prio].sec = ms / 1000;
>> 083 time_slice[prio].nanosec = 1000000UL * (ms % 1000);
>> 084 }
045 085
046 return SOS_OK; 086 return SOS_OK;
047 } 087 }
048 088
049 089
050 090
051 091
052 092
053 093
054 094
055 095
056 096
057 static sos_ret_t add_in_ready_queue(struct sos !! 097 static sos_ret_t add_in_ready_queue(struct sos_sched_queue *q,
>> 098 struct sos_thread *thr,
058 sos_bool_t 099 sos_bool_t insert_at_tail)
059 { 100 {
>> 101 sos_sched_priority_t prio;
060 102
061 SOS_ASSERT_FATAL( (SOS_THR_CREATED == thr->s 103 SOS_ASSERT_FATAL( (SOS_THR_CREATED == thr->state)
062 || (SOS_THR_RUNNING == thr 104 || (SOS_THR_RUNNING == thr->state)
063 || (SOS_THR_BLOCKED == thr 105 || (SOS_THR_BLOCKED == thr->state) );
064 106
065 107
>> 108 prio = sos_thread_get_priority(thr);
066 if (insert_at_tail) 109 if (insert_at_tail)
067 list_add_tail_named(ready_queue.thread_lis !! 110 list_add_tail_named(q->thread_list[prio], thr,
068 ready.rdy_prev, ready. 111 ready.rdy_prev, ready.rdy_next);
069 else 112 else
070 list_add_head_named(ready_queue.thread_lis !! 113 list_add_head_named(q->thread_list[prio], thr,
071 ready.rdy_prev, ready. 114 ready.rdy_prev, ready.rdy_next);
072 ready_queue.nr_threads ++; !! 115 thr->ready.rdy_queue = q;
>> 116 q->nr_threads ++;
073 117
074 118
075 thr->state = SOS_THR_READY; 119 thr->state = SOS_THR_READY;
076 120
077 return SOS_OK; 121 return SOS_OK;
078 } 122 }
079 123
080 124
081 sos_ret_t sos_sched_set_ready(struct sos_threa 125 sos_ret_t sos_sched_set_ready(struct sos_thread *thr)
082 { 126 {
083 sos_ret_t retval; 127 sos_ret_t retval;
084 128
085 129
086 if (SOS_THR_READY == thr->state) 130 if (SOS_THR_READY == thr->state)
087 return SOS_OK; 131 return SOS_OK;
088 132
089 !! 133
090 retval = add_in_ready_queue(thr, TRUE); !! 134 memset(& thr->running.user_time_spent_in_slice, 0x0, sizeof(struct sos_time));
>> 135
>> 136 if (SOS_SCHED_PRIO_IS_RT(sos_thread_get_priority(thr)))
>> 137 {
>> 138
>> 139 retval = add_in_ready_queue(active_queue, thr, TRUE);
>> 140 }
>> 141 else
>> 142 {
>> 143
>> 144 retval = add_in_ready_queue(expired_queue, thr, TRUE);
>> 145 }
091 146
092 return retval; 147 return retval;
093 } 148 }
094 149
095 150
>> 151 sos_ret_t sos_sched_change_priority(struct sos_thread *thr,
>> 152 sos_sched_priority_t priority)
>> 153 {
>> 154 struct sos_thread *thread_list;
>> 155 SOS_ASSERT_FATAL(SOS_THR_READY == thr->state);
>> 156
>> 157
>> 158 thread_list
>> 159 = thr->ready.rdy_queue->thread_list[sos_thread_get_priority(thr)];
>> 160
>> 161 list_delete_named(thread_list, thr, ready.rdy_prev, ready.rdy_next);
>> 162
>> 163
>> 164 thread_list = thr->ready.rdy_queue->thread_list[priority];
>> 165 list_add_tail_named(thread_list, thr, ready.rdy_prev, ready.rdy_next);
>> 166 thr->ready.rdy_queue->thread_list[priority] = thread_list;
>> 167
>> 168 return SOS_OK;
>> 169 }
>> 170
>> 171
>> 172
>> 173
>> 174
>> 175
>> 176 static sos_bool_t
>> 177 thread_expired_its_quantuum(struct sos_thread *thr)
>> 178 {
>> 179 sos_sched_priority_t prio = sos_thread_get_priority(thr);
>> 180
>> 181
>> 182 if (SOS_SCHED_PRIO_IS_RT(prio))
>> 183 return FALSE;
>> 184
>> 185
>> 186
>> 187
>> 188 if (sos_time_cmp(& thr->running.user_time_spent_in_slice,
>> 189 & time_slice[prio]) >= 0)
>> 190 return TRUE;
>> 191
>> 192 return FALSE;
>> 193 }
>> 194
>> 195
096 struct sos_thread * sos_reschedule(struct sos_ 196 struct sos_thread * sos_reschedule(struct sos_thread *current_thread,
097 sos_bool_t 197 sos_bool_t do_yield)
098 { 198 {
>> 199 sos_sched_priority_t prio;
>> 200
>> 201
>> 202
>> 203 if (thread_expired_its_quantuum(current_thread))
>> 204 {
>> 205
>> 206 memset(& current_thread->running.user_time_spent_in_slice,
>> 207 0x0, sizeof(struct sos_time));
>> 208
>> 209 do_yield = TRUE;
>> 210 }
099 211
100 if (SOS_THR_ZOMBIE == current_thread->state) 212 if (SOS_THR_ZOMBIE == current_thread->state)
101 { 213 {
102 214
103 215
104 216
105 } 217 }
106 else if (SOS_THR_BLOCKED != current_thread-> 218 else if (SOS_THR_BLOCKED != current_thread->state)
107 { 219 {
108 220
109 221
110 if (do_yield) 222 if (do_yield)
111 !! 223 {
112 add_in_ready_queue(current_thread, TRU !! 224
>> 225 if (SOS_SCHED_PRIO_IS_RT(sos_thread_get_priority(current_thread)))
>> 226 add_in_ready_queue(active_queue, current_thread, TRUE);
>> 227 else
>> 228 add_in_ready_queue(expired_queue, current_thread, TRUE);
>> 229 }
113 else 230 else
114 !! 231 {
115 add_in_ready_queue(current_thread, FAL !! 232
>> 233 add_in_ready_queue(active_queue, current_thread, FALSE);
>> 234 }
116 } 235 }
117 236
118 !! 237
119 if (ready_queue.nr_threads > 0) !! 238
>> 239 if (active_queue->nr_threads <= 0)
>> 240 {
>> 241
>> 242 struct sos_sched_queue *q;
>> 243 q = active_queue;
>> 244 active_queue = expired_queue;
>> 245 expired_queue = q;
>> 246 }
>> 247
>> 248
>> 249
>> 250 for (prio = SOS_SCHED_PRIO_HIGHEST ; prio <= SOS_SCHED_PRIO_LOWEST ; prio ++)
120 { 251 {
121 struct sos_thread *next_thr; 252 struct sos_thread *next_thr;
122 253
>> 254 if (list_is_empty_named(active_queue->thread_list[prio],
>> 255 ready.rdy_prev, ready.rdy_next))
>> 256 continue;
>> 257
123 258
124 next_thr = list_pop_head_named(ready_que !! 259 next_thr = list_pop_head_named(active_queue->thread_list[prio],
125 ready.rdy 260 ready.rdy_prev, ready.rdy_next);
126 ready_queue.nr_threads --; !! 261 active_queue->nr_threads --;
127 262
128 return next_thr; 263 return next_thr;
129 } 264 }
130 265
>> 266
131 SOS_FATAL_ERROR("No kernel thread ready ?!") 267 SOS_FATAL_ERROR("No kernel thread ready ?!");
132 return NULL; 268 return NULL;
>> 269 }
>> 270
>> 271
>> 272 sos_ret_t sos_sched_do_timer_tick()
>> 273 {
>> 274 struct sos_thread *interrupted_thread = sos_thread_get_current();
>> 275 struct sos_time tick_duration;
>> 276 sos_bool_t cur_is_user;
>> 277 sos_ui32_t nb_user_ready = 0;
>> 278 sos_ui32_t nb_kernel_ready = 0;
>> 279 int prio;
>> 280
>> 281 sos_time_get_tick_resolution(& tick_duration);
>> 282
>> 283
>> 284 if (sos_cpu_context_is_in_user_mode(interrupted_thread->cpu_state))
>> 285 {
>> 286 cur_is_user = TRUE;
>> 287
>> 288
>> 289 sos_time_inc(& interrupted_thread->rusage.ru_utime,
>> 290 & tick_duration);
>> 291
>> 292
>> 293 sos_time_inc(& interrupted_thread->running.user_time_spent_in_slice,
>> 294 & tick_duration);
>> 295 }
>> 296 else
>> 297 {
>> 298 cur_is_user = FALSE;
>> 299
>> 300
>> 301 sos_time_inc(& interrupted_thread->rusage.ru_stime,
>> 302 & tick_duration);
>> 303 }
>> 304
>> 305
>> 306
>> 307 for (prio = SOS_SCHED_PRIO_HIGHEST ; prio <= SOS_SCHED_PRIO_LOWEST ; prio ++)
>> 308 {
>> 309 struct sos_thread *thr;
>> 310 int nb_thrs;
>> 311
>> 312 list_foreach_forward_named(active_queue->thread_list[prio],
>> 313 thr, nb_thrs,
>> 314 ready.rdy_prev, ready.rdy_next)
>> 315 {
>> 316 if (sos_cpu_context_is_in_user_mode(thr->cpu_state))
>> 317 nb_user_ready ++;
>> 318 else
>> 319 nb_kernel_ready ++;
>> 320 }
>> 321
>> 322 list_foreach_forward_named(expired_queue->thread_list[prio],
>> 323 thr, nb_thrs,
>> 324 ready.rdy_prev, ready.rdy_next)
>> 325 {
>> 326 if (sos_cpu_context_is_in_user_mode(thr->cpu_state))
>> 327 nb_user_ready ++;
>> 328 else
>> 329 nb_kernel_ready ++;
>> 330 }
>> 331 }
>> 332
>> 333 sos_load_do_timer_tick(cur_is_user,
>> 334 nb_user_ready,
>> 335 nb_kernel_ready);
>> 336
>> 337 return SOS_OK;
133 } 338 }