SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2004 David Decotigny
002 
003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any later version.
007    
008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details.
012    
013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA. 
017 */
018 
019 #include <sos/errno.h>
020 #include <sos/klibc.h>
021 #include <sos/assert.h>
022 #include <sos/list.h>
023 #include <sos/calcload.h>
024 
025 #include "sched.h"
026 
027 
028 /**
029  * The definition of the scheduler queue. We could have used a normal
030  * kwaitq here, it would have had the same properties (regarding
031  * priority ordering mainly). But we don't bother with size
032  * considerations here (in kwaitq, we had better make the kwaitq
033  * structure as small as possible because there are a lot of kwaitq in
034  * the system: at least 1 per opened file), so that we can implement a
035  * much faster way of handling the prioritized jobs.
036  */
037 struct sos_sched_queue
038 {
039   unsigned int nr_threads;
040   struct sos_thread *thread_list[SOS_SCHED_NUM_PRIO];
041 };
042 
043 
044 /**
045  * We manage 2 queues: a queue being scanned for ready threads
046  * (active_queue) and a queue to store the threads the threads having
047  * expired their time quantuum.
048  */
049 static struct sos_sched_queue *active_queue, *expired_queue;
050 
051 
052 /**
053  * The instances for the active/expired queues
054  */
055 static struct sos_sched_queue sched_queue[2];
056 
057 
058 /**
059  * The array giving the timeslice corresponding to each priority level
060  */
061 struct sos_time time_slice[SOS_SCHED_NUM_PRIO];
062 
063 
064 sos_ret_t sos_sched_subsystem_setup()
065 {
066   sos_sched_priority_t prio;
067 
068   memset(sched_queue, 0x0, sizeof(sched_queue));
069   active_queue  = & sched_queue[0];
070   expired_queue = & sched_queue[1];
071 
072   /* pre-compute time slices */
073   for (prio = SOS_SCHED_PRIO_TS_HIGHEST ;
074        prio <= SOS_SCHED_PRIO_TS_LOWEST ;
075        prio ++)
076     {
077       unsigned int ms;
078       ms = SOS_TIME_SLICE_MIN
079            + (SOS_TIME_SLICE_MAX - SOS_TIME_SLICE_MIN)
080              * (prio - SOS_SCHED_PRIO_TS_HIGHEST)
081              / (SOS_SCHED_PRIO_TS_LOWEST - SOS_SCHED_PRIO_TS_HIGHEST);
082       time_slice[prio].sec     = ms / 1000;
083       time_slice[prio].nanosec = 1000000UL * (ms % 1000);
084     }
085 
086   return SOS_OK;
087 }
088 
089 
090 /**
091  * Helper function to add a thread in a ready queue AND to change the
092  * state of the given thread to "READY".
093  *
094  * @param insert_at_tail TRUE to tell to add the thread at the end of
095  * the ready list. Otherwise it is added at the head of it.
096  */
097 static sos_ret_t add_in_ready_queue(struct sos_sched_queue *q,
098                                     struct sos_thread *thr,
099                                     sos_bool_t insert_at_tail)
100 {
101   sos_sched_priority_t prio;
102 
103   SOS_ASSERT_FATAL( (SOS_THR_CREATED == thr->state)
104                     || (SOS_THR_RUNNING == thr->state) /* Yield */
105                     || (SOS_THR_BLOCKED == thr->state) );
106 
107   /* Add the thread to the CPU queue */
108   prio = sos_thread_get_priority(thr);
109   if (insert_at_tail)
110     list_add_tail_named(q->thread_list[prio], thr,
111                         ready.rdy_prev, ready.rdy_next);
112   else
113     list_add_head_named(q->thread_list[prio], thr,
114                         ready.rdy_prev, ready.rdy_next);
115   thr->ready.rdy_queue = q;
116   q->nr_threads ++;
117 
118   /* Ok, thread is now really ready to be (re)started */
119   thr->state = SOS_THR_READY;
120 
121   return SOS_OK;
122 }
123 
124 
125 sos_ret_t sos_sched_set_ready(struct sos_thread *thr)
126 {
127   sos_ret_t retval;
128 
129   /* Don't do anything for already ready threads */
130   if (SOS_THR_READY == thr->state)
131     return SOS_OK;
132 
133   /* Reset the CPU time used in the quantuum */
134   memset(& thr->user_time_spent_in_slice, 0x0, sizeof(struct sos_time));
135 
136   if (SOS_SCHED_PRIO_IS_RT(sos_thread_get_priority(thr)))
137     {
138       /* Real-time thread: schedule it for the present turn */
139       retval = add_in_ready_queue(active_queue, thr, TRUE);
140     }
141   else
142     {
143       /* Non real-time thread: schedule it for next turn */
144       retval = add_in_ready_queue(expired_queue, thr, TRUE);
145     }
146 
147   return retval;
148 }
149 
150 
151 sos_ret_t sos_sched_change_priority(struct sos_thread *thr,
152                                     sos_sched_priority_t priority)
153 {
154   struct sos_thread *thread_list;
155   SOS_ASSERT_FATAL(SOS_THR_READY == thr->state);
156 
157   /* Temp variable */
158   thread_list
159     = thr->ready.rdy_queue->thread_list[sos_thread_get_priority(thr)];
160 
161   list_delete_named(thread_list, thr, ready.rdy_prev, ready.rdy_next);
162 
163   /* Update lists */
164   thread_list = thr->ready.rdy_queue->thread_list[priority];
165   list_add_tail_named(thread_list, thr, ready.rdy_prev, ready.rdy_next);
166   thr->ready.rdy_queue->thread_list[priority] = thread_list;
167 
168   return SOS_OK;
169 }
170 
171 
172 /**
173  * Helper function to determine whether the current thread expired its
174  * time quantuum
175  */
176 static sos_bool_t
177 thread_expired_its_quantuum(struct sos_thread *thr)
178 {
179   sos_sched_priority_t prio = sos_thread_get_priority(thr);
180 
181   /* No timesharing/round-robin for "real-time" threads */
182   if (SOS_SCHED_PRIO_IS_RT(prio))
183     return FALSE;
184 
185   /* Current (user) thread expired its time quantuum ?  A kernel
186      thread never expires because sos_sched_do_timer_tick() below
187      won't update its user_time_spent_in_slice */
188   if (sos_time_cmp(& thr->user_time_spent_in_slice,
189                    & time_slice[prio]) >= 0)
190       return TRUE;
191 
192   return FALSE;
193 }
194 
195 
196 struct sos_thread * sos_reschedule(struct sos_thread *current_thread,
197                                    sos_bool_t do_yield)
198 {
199   sos_sched_priority_t prio;
200 
201   /* Force the current thread to release the CPU if it expired its
202      quantuum */
203   if (thread_expired_its_quantuum(current_thread))
204     {
205       /* Reset the CPU time used in the quantuum */
206       memset(& current_thread->user_time_spent_in_slice,
207              0x0, sizeof(struct sos_time));
208 
209       do_yield = TRUE;
210     }
211 
212   if (SOS_THR_ZOMBIE == current_thread->state)
213     {
214       /* Don't think of returning to this thread since it is
215          terminated */
216       /* Nop */
217     }
218   else if (SOS_THR_BLOCKED != current_thread->state)
219     {
220       /* Take into account the current executing thread unless it is
221          marked blocked */
222       if (do_yield)
223         {
224           /* Ok, reserve it for next turn */
225           if (SOS_SCHED_PRIO_IS_RT(sos_thread_get_priority(current_thread)))
226             add_in_ready_queue(active_queue, current_thread, TRUE);
227           else
228             add_in_ready_queue(expired_queue, current_thread, TRUE);
229         }
230       else
231         {
232           /* Put it at the head of the active list */
233           add_in_ready_queue(active_queue, current_thread, FALSE);
234         }
235     }
236 
237 
238   /* Active queue is empty ? */
239   if (active_queue->nr_threads <= 0)
240     {
241       /* Yes: Exchange it with the expired queue */
242       struct sos_sched_queue *q;
243       q = active_queue;
244       active_queue = expired_queue;
245       expired_queue = q;
246     }
247 
248   /* Now loop over the priorities in the active queue, looking for a
249      non-empty queue */
250   for (prio = SOS_SCHED_PRIO_HIGHEST ; prio <= SOS_SCHED_PRIO_LOWEST ; prio ++)
251     {
252       struct sos_thread *next_thr;
253 
254       if (list_is_empty_named(active_queue->thread_list[prio],
255                               ready.rdy_prev, ready.rdy_next))
256         continue;
257       
258       /* Queue is not empty: take the thread at its head */
259       next_thr = list_pop_head_named(active_queue->thread_list[prio],
260                                      ready.rdy_prev, ready.rdy_next);
261       active_queue->nr_threads --;
262 
263       return next_thr;
264     }
265 
266 
267   SOS_FATAL_ERROR("No kernel thread ready ?!");
268   return NULL;
269 }
270 
271 
272 sos_ret_t sos_sched_do_timer_tick()
273 {
274   struct sos_thread *interrupted_thread = sos_thread_get_current();
275   struct sos_time tick_duration;
276   sos_bool_t cur_is_user;
277   sos_ui32_t nb_user_ready = 0;
278   sos_ui32_t nb_kernel_ready = 0;
279   int prio;
280 
281   sos_time_get_tick_resolution(& tick_duration);
282 
283   /* Update the timing statistics */
284   if (sos_cpu_context_is_in_user_mode(interrupted_thread->cpu_state))
285     {
286       cur_is_user = TRUE;
287 
288       /* User time */
289       sos_time_inc(& interrupted_thread->rusage.ru_utime,
290                    & tick_duration);
291 
292       /* Update time spent is current timeslice ONLY for a user thread */
293       sos_time_inc(& interrupted_thread->user_time_spent_in_slice,
294                    & tick_duration);
295     }
296   else
297     {
298       cur_is_user = FALSE;
299 
300       /* System time */
301       sos_time_inc(& interrupted_thread->rusage.ru_stime,
302                    & tick_duration);
303     }
304 
305 
306   /* Update load stats */
307   for (prio = SOS_SCHED_PRIO_HIGHEST ; prio <= SOS_SCHED_PRIO_LOWEST ; prio ++)
308     {
309       struct sos_thread *thr;
310       int nb_thrs;
311 
312       list_foreach_forward_named(active_queue->thread_list[prio],
313                                  thr, nb_thrs,
314                                  ready.rdy_prev, ready.rdy_next)
315         {
316           if (sos_cpu_context_is_in_user_mode(thr->cpu_state))
317             nb_user_ready ++;
318           else
319             nb_kernel_ready ++;
320         }
321 
322       list_foreach_forward_named(expired_queue->thread_list[prio],
323                                  thr, nb_thrs,
324                                  ready.rdy_prev, ready.rdy_next)
325         {
326           if (sos_cpu_context_is_in_user_mode(thr->cpu_state))
327             nb_user_ready ++;
328           else
329             nb_kernel_ready ++;
330         }
331     }
332 
333   sos_load_do_timer_tick(cur_is_user,
334                          nb_user_ready,
335                          nb_kernel_ready);
336 
337   return SOS_OK;
338 }

source navigation ] diff markup ] identifier search ] general search ]