]>
Commit | Line | Data |
---|---|---|
bb44e5d1 IM |
1 | /* |
2 | * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR | |
3 | * policies) | |
4 | */ | |
5 | ||
6 | /* | |
7 | * Update the current task's runtime statistics. Skip current tasks that | |
8 | * are not in our scheduling class. | |
9 | */ | |
a9957449 | 10 | static void update_curr_rt(struct rq *rq) |
bb44e5d1 IM |
11 | { |
12 | struct task_struct *curr = rq->curr; | |
13 | u64 delta_exec; | |
14 | ||
15 | if (!task_has_rt_policy(curr)) | |
16 | return; | |
17 | ||
d281918d | 18 | delta_exec = rq->clock - curr->se.exec_start; |
bb44e5d1 IM |
19 | if (unlikely((s64)delta_exec < 0)) |
20 | delta_exec = 0; | |
6cfb0d5d IM |
21 | |
22 | schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); | |
bb44e5d1 IM |
23 | |
24 | curr->se.sum_exec_runtime += delta_exec; | |
d281918d | 25 | curr->se.exec_start = rq->clock; |
d842de87 | 26 | cpuacct_charge(curr, delta_exec); |
bb44e5d1 IM |
27 | } |
28 | ||
63489e45 SR |
29 | static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq) |
30 | { | |
31 | WARN_ON(!rt_task(p)); | |
32 | rq->rt.rt_nr_running++; | |
33 | } | |
34 | ||
35 | static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq) | |
36 | { | |
37 | WARN_ON(!rt_task(p)); | |
38 | WARN_ON(!rq->rt.rt_nr_running); | |
39 | rq->rt.rt_nr_running--; | |
40 | } | |
41 | ||
fd390f6a | 42 | static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) |
bb44e5d1 IM |
43 | { |
44 | struct rt_prio_array *array = &rq->rt.active; | |
45 | ||
46 | list_add_tail(&p->run_list, array->queue + p->prio); | |
47 | __set_bit(p->prio, array->bitmap); | |
58e2d4ca | 48 | inc_cpu_load(rq, p->se.load.weight); |
63489e45 SR |
49 | |
50 | inc_rt_tasks(p, rq); | |
bb44e5d1 IM |
51 | } |
52 | ||
53 | /* | |
54 | * Adding/removing a task to/from a priority array: | |
55 | */ | |
f02231e5 | 56 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) |
bb44e5d1 IM |
57 | { |
58 | struct rt_prio_array *array = &rq->rt.active; | |
59 | ||
f1e14ef6 | 60 | update_curr_rt(rq); |
bb44e5d1 IM |
61 | |
62 | list_del(&p->run_list); | |
63 | if (list_empty(array->queue + p->prio)) | |
64 | __clear_bit(p->prio, array->bitmap); | |
58e2d4ca | 65 | dec_cpu_load(rq, p->se.load.weight); |
63489e45 SR |
66 | |
67 | dec_rt_tasks(p, rq); | |
bb44e5d1 IM |
68 | } |
69 | ||
70 | /* | |
71 | * Put task to the end of the run list without the overhead of dequeue | |
72 | * followed by enqueue. | |
73 | */ | |
74 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) | |
75 | { | |
76 | struct rt_prio_array *array = &rq->rt.active; | |
77 | ||
78 | list_move_tail(&p->run_list, array->queue + p->prio); | |
79 | } | |
80 | ||
81 | static void | |
4530d7ab | 82 | yield_task_rt(struct rq *rq) |
bb44e5d1 | 83 | { |
4530d7ab | 84 | requeue_task_rt(rq, rq->curr); |
bb44e5d1 IM |
85 | } |
86 | ||
87 | /* | |
88 | * Preempt the current task with a newly woken task if needed: | |
89 | */ | |
90 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) | |
91 | { | |
92 | if (p->prio < rq->curr->prio) | |
93 | resched_task(rq->curr); | |
94 | } | |
95 | ||
fb8d4724 | 96 | static struct task_struct *pick_next_task_rt(struct rq *rq) |
bb44e5d1 IM |
97 | { |
98 | struct rt_prio_array *array = &rq->rt.active; | |
99 | struct task_struct *next; | |
100 | struct list_head *queue; | |
101 | int idx; | |
102 | ||
103 | idx = sched_find_first_bit(array->bitmap); | |
104 | if (idx >= MAX_RT_PRIO) | |
105 | return NULL; | |
106 | ||
107 | queue = array->queue + idx; | |
108 | next = list_entry(queue->next, struct task_struct, run_list); | |
109 | ||
d281918d | 110 | next->se.exec_start = rq->clock; |
bb44e5d1 IM |
111 | |
112 | return next; | |
113 | } | |
114 | ||
31ee529c | 115 | static void put_prev_task_rt(struct rq *rq, struct task_struct *p) |
bb44e5d1 | 116 | { |
f1e14ef6 | 117 | update_curr_rt(rq); |
bb44e5d1 IM |
118 | p->se.exec_start = 0; |
119 | } | |
120 | ||
681f3e68 | 121 | #ifdef CONFIG_SMP |
bb44e5d1 IM |
122 | /* |
123 | * Load-balancing iterator. Note: while the runqueue stays locked | |
124 | * during the whole iteration, the current task might be | |
125 | * dequeued so the iterator has to be dequeue-safe. Here we | |
126 | * achieve that by always pre-iterating before returning | |
127 | * the current task: | |
128 | */ | |
129 | static struct task_struct *load_balance_start_rt(void *arg) | |
130 | { | |
131 | struct rq *rq = arg; | |
132 | struct rt_prio_array *array = &rq->rt.active; | |
133 | struct list_head *head, *curr; | |
134 | struct task_struct *p; | |
135 | int idx; | |
136 | ||
137 | idx = sched_find_first_bit(array->bitmap); | |
138 | if (idx >= MAX_RT_PRIO) | |
139 | return NULL; | |
140 | ||
141 | head = array->queue + idx; | |
142 | curr = head->prev; | |
143 | ||
144 | p = list_entry(curr, struct task_struct, run_list); | |
145 | ||
146 | curr = curr->prev; | |
147 | ||
148 | rq->rt.rt_load_balance_idx = idx; | |
149 | rq->rt.rt_load_balance_head = head; | |
150 | rq->rt.rt_load_balance_curr = curr; | |
151 | ||
152 | return p; | |
153 | } | |
154 | ||
155 | static struct task_struct *load_balance_next_rt(void *arg) | |
156 | { | |
157 | struct rq *rq = arg; | |
158 | struct rt_prio_array *array = &rq->rt.active; | |
159 | struct list_head *head, *curr; | |
160 | struct task_struct *p; | |
161 | int idx; | |
162 | ||
163 | idx = rq->rt.rt_load_balance_idx; | |
164 | head = rq->rt.rt_load_balance_head; | |
165 | curr = rq->rt.rt_load_balance_curr; | |
166 | ||
167 | /* | |
168 | * If we arrived back to the head again then | |
169 | * iterate to the next queue (if any): | |
170 | */ | |
171 | if (unlikely(head == curr)) { | |
172 | int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1); | |
173 | ||
174 | if (next_idx >= MAX_RT_PRIO) | |
175 | return NULL; | |
176 | ||
177 | idx = next_idx; | |
178 | head = array->queue + idx; | |
179 | curr = head->prev; | |
180 | ||
181 | rq->rt.rt_load_balance_idx = idx; | |
182 | rq->rt.rt_load_balance_head = head; | |
183 | } | |
184 | ||
185 | p = list_entry(curr, struct task_struct, run_list); | |
186 | ||
187 | curr = curr->prev; | |
188 | ||
189 | rq->rt.rt_load_balance_curr = curr; | |
190 | ||
191 | return p; | |
192 | } | |
193 | ||
43010659 | 194 | static unsigned long |
bb44e5d1 | 195 | load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, |
e1d1484f PW |
196 | unsigned long max_load_move, |
197 | struct sched_domain *sd, enum cpu_idle_type idle, | |
198 | int *all_pinned, int *this_best_prio) | |
bb44e5d1 | 199 | { |
bb44e5d1 IM |
200 | struct rq_iterator rt_rq_iterator; |
201 | ||
bb44e5d1 IM |
202 | rt_rq_iterator.start = load_balance_start_rt; |
203 | rt_rq_iterator.next = load_balance_next_rt; | |
204 | /* pass 'busiest' rq argument into | |
205 | * load_balance_[start|next]_rt iterators | |
206 | */ | |
207 | rt_rq_iterator.arg = busiest; | |
208 | ||
e1d1484f PW |
209 | return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd, |
210 | idle, all_pinned, this_best_prio, &rt_rq_iterator); | |
211 | } | |
212 | ||
213 | static int | |
214 | move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | |
215 | struct sched_domain *sd, enum cpu_idle_type idle) | |
216 | { | |
217 | struct rq_iterator rt_rq_iterator; | |
218 | ||
219 | rt_rq_iterator.start = load_balance_start_rt; | |
220 | rt_rq_iterator.next = load_balance_next_rt; | |
221 | rt_rq_iterator.arg = busiest; | |
bb44e5d1 | 222 | |
e1d1484f PW |
223 | return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, |
224 | &rt_rq_iterator); | |
bb44e5d1 | 225 | } |
681f3e68 | 226 | #endif |
bb44e5d1 IM |
227 | |
228 | static void task_tick_rt(struct rq *rq, struct task_struct *p) | |
229 | { | |
67e2be02 PZ |
230 | update_curr_rt(rq); |
231 | ||
bb44e5d1 IM |
232 | /* |
233 | * RR tasks need a special form of timeslice management. | |
234 | * FIFO tasks have no timeslices. | |
235 | */ | |
236 | if (p->policy != SCHED_RR) | |
237 | return; | |
238 | ||
239 | if (--p->time_slice) | |
240 | return; | |
241 | ||
a4ec24b4 | 242 | p->time_slice = DEF_TIMESLICE; |
bb44e5d1 | 243 | |
98fbc798 DA |
244 | /* |
245 | * Requeue to the end of queue if we are not the only element | |
246 | * on the queue: | |
247 | */ | |
248 | if (p->run_list.prev != p->run_list.next) { | |
249 | requeue_task_rt(rq, p); | |
250 | set_tsk_need_resched(p); | |
251 | } | |
bb44e5d1 IM |
252 | } |
253 | ||
83b699ed SV |
254 | static void set_curr_task_rt(struct rq *rq) |
255 | { | |
256 | struct task_struct *p = rq->curr; | |
257 | ||
258 | p->se.exec_start = rq->clock; | |
259 | } | |
260 | ||
5522d5d5 IM |
261 | const struct sched_class rt_sched_class = { |
262 | .next = &fair_sched_class, | |
bb44e5d1 IM |
263 | .enqueue_task = enqueue_task_rt, |
264 | .dequeue_task = dequeue_task_rt, | |
265 | .yield_task = yield_task_rt, | |
266 | ||
267 | .check_preempt_curr = check_preempt_curr_rt, | |
268 | ||
269 | .pick_next_task = pick_next_task_rt, | |
270 | .put_prev_task = put_prev_task_rt, | |
271 | ||
681f3e68 | 272 | #ifdef CONFIG_SMP |
bb44e5d1 | 273 | .load_balance = load_balance_rt, |
e1d1484f | 274 | .move_one_task = move_one_task_rt, |
681f3e68 | 275 | #endif |
bb44e5d1 | 276 | |
83b699ed | 277 | .set_curr_task = set_curr_task_rt, |
bb44e5d1 | 278 | .task_tick = task_tick_rt, |
bb44e5d1 | 279 | }; |