]>
Commit | Line | Data |
---|---|---|
bb44e5d1 IM |
1 | /* |
2 | * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR | |
3 | * policies) | |
4 | */ | |
5 | ||
6 | /* | |
7 | * Update the current task's runtime statistics. Skip current tasks that | |
8 | * are not in our scheduling class. | |
9 | */ | |
f1e14ef6 | 10 | static inline void update_curr_rt(struct rq *rq) |
bb44e5d1 IM |
11 | { |
12 | struct task_struct *curr = rq->curr; | |
13 | u64 delta_exec; | |
14 | ||
15 | if (!task_has_rt_policy(curr)) | |
16 | return; | |
17 | ||
d281918d | 18 | delta_exec = rq->clock - curr->se.exec_start; |
bb44e5d1 IM |
19 | if (unlikely((s64)delta_exec < 0)) |
20 | delta_exec = 0; | |
6cfb0d5d IM |
21 | |
22 | schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); | |
bb44e5d1 IM |
23 | |
24 | curr->se.sum_exec_runtime += delta_exec; | |
d281918d | 25 | curr->se.exec_start = rq->clock; |
bb44e5d1 IM |
26 | } |
27 | ||
fd390f6a | 28 | static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) |
bb44e5d1 IM |
29 | { |
30 | struct rt_prio_array *array = &rq->rt.active; | |
31 | ||
32 | list_add_tail(&p->run_list, array->queue + p->prio); | |
33 | __set_bit(p->prio, array->bitmap); | |
34 | } | |
35 | ||
36 | /* | |
37 | * Adding/removing a task to/from a priority array: | |
38 | */ | |
39 | static void | |
40 | dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep, u64 now) | |
41 | { | |
42 | struct rt_prio_array *array = &rq->rt.active; | |
43 | ||
f1e14ef6 | 44 | update_curr_rt(rq); |
bb44e5d1 IM |
45 | |
46 | list_del(&p->run_list); | |
47 | if (list_empty(array->queue + p->prio)) | |
48 | __clear_bit(p->prio, array->bitmap); | |
49 | } | |
50 | ||
51 | /* | |
52 | * Put task to the end of the run list without the overhead of dequeue | |
53 | * followed by enqueue. | |
54 | */ | |
55 | static void requeue_task_rt(struct rq *rq, struct task_struct *p) | |
56 | { | |
57 | struct rt_prio_array *array = &rq->rt.active; | |
58 | ||
59 | list_move_tail(&p->run_list, array->queue + p->prio); | |
60 | } | |
61 | ||
62 | static void | |
63 | yield_task_rt(struct rq *rq, struct task_struct *p) | |
64 | { | |
65 | requeue_task_rt(rq, p); | |
66 | } | |
67 | ||
68 | /* | |
69 | * Preempt the current task with a newly woken task if needed: | |
70 | */ | |
71 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) | |
72 | { | |
73 | if (p->prio < rq->curr->prio) | |
74 | resched_task(rq->curr); | |
75 | } | |
76 | ||
77 | static struct task_struct *pick_next_task_rt(struct rq *rq, u64 now) | |
78 | { | |
79 | struct rt_prio_array *array = &rq->rt.active; | |
80 | struct task_struct *next; | |
81 | struct list_head *queue; | |
82 | int idx; | |
83 | ||
84 | idx = sched_find_first_bit(array->bitmap); | |
85 | if (idx >= MAX_RT_PRIO) | |
86 | return NULL; | |
87 | ||
88 | queue = array->queue + idx; | |
89 | next = list_entry(queue->next, struct task_struct, run_list); | |
90 | ||
d281918d | 91 | next->se.exec_start = rq->clock; |
bb44e5d1 IM |
92 | |
93 | return next; | |
94 | } | |
95 | ||
96 | static void put_prev_task_rt(struct rq *rq, struct task_struct *p, u64 now) | |
97 | { | |
f1e14ef6 | 98 | update_curr_rt(rq); |
bb44e5d1 IM |
99 | p->se.exec_start = 0; |
100 | } | |
101 | ||
102 | /* | |
103 | * Load-balancing iterator. Note: while the runqueue stays locked | |
104 | * during the whole iteration, the current task might be | |
105 | * dequeued so the iterator has to be dequeue-safe. Here we | |
106 | * achieve that by always pre-iterating before returning | |
107 | * the current task: | |
108 | */ | |
109 | static struct task_struct *load_balance_start_rt(void *arg) | |
110 | { | |
111 | struct rq *rq = arg; | |
112 | struct rt_prio_array *array = &rq->rt.active; | |
113 | struct list_head *head, *curr; | |
114 | struct task_struct *p; | |
115 | int idx; | |
116 | ||
117 | idx = sched_find_first_bit(array->bitmap); | |
118 | if (idx >= MAX_RT_PRIO) | |
119 | return NULL; | |
120 | ||
121 | head = array->queue + idx; | |
122 | curr = head->prev; | |
123 | ||
124 | p = list_entry(curr, struct task_struct, run_list); | |
125 | ||
126 | curr = curr->prev; | |
127 | ||
128 | rq->rt.rt_load_balance_idx = idx; | |
129 | rq->rt.rt_load_balance_head = head; | |
130 | rq->rt.rt_load_balance_curr = curr; | |
131 | ||
132 | return p; | |
133 | } | |
134 | ||
135 | static struct task_struct *load_balance_next_rt(void *arg) | |
136 | { | |
137 | struct rq *rq = arg; | |
138 | struct rt_prio_array *array = &rq->rt.active; | |
139 | struct list_head *head, *curr; | |
140 | struct task_struct *p; | |
141 | int idx; | |
142 | ||
143 | idx = rq->rt.rt_load_balance_idx; | |
144 | head = rq->rt.rt_load_balance_head; | |
145 | curr = rq->rt.rt_load_balance_curr; | |
146 | ||
147 | /* | |
148 | * If we arrived back to the head again then | |
149 | * iterate to the next queue (if any): | |
150 | */ | |
151 | if (unlikely(head == curr)) { | |
152 | int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1); | |
153 | ||
154 | if (next_idx >= MAX_RT_PRIO) | |
155 | return NULL; | |
156 | ||
157 | idx = next_idx; | |
158 | head = array->queue + idx; | |
159 | curr = head->prev; | |
160 | ||
161 | rq->rt.rt_load_balance_idx = idx; | |
162 | rq->rt.rt_load_balance_head = head; | |
163 | } | |
164 | ||
165 | p = list_entry(curr, struct task_struct, run_list); | |
166 | ||
167 | curr = curr->prev; | |
168 | ||
169 | rq->rt.rt_load_balance_curr = curr; | |
170 | ||
171 | return p; | |
172 | } | |
173 | ||
43010659 | 174 | static unsigned long |
bb44e5d1 IM |
175 | load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, |
176 | unsigned long max_nr_move, unsigned long max_load_move, | |
177 | struct sched_domain *sd, enum cpu_idle_type idle, | |
a4ac01c3 | 178 | int *all_pinned, int *this_best_prio) |
bb44e5d1 | 179 | { |
bb44e5d1 IM |
180 | int nr_moved; |
181 | struct rq_iterator rt_rq_iterator; | |
43010659 | 182 | unsigned long load_moved; |
bb44e5d1 | 183 | |
bb44e5d1 IM |
184 | rt_rq_iterator.start = load_balance_start_rt; |
185 | rt_rq_iterator.next = load_balance_next_rt; | |
186 | /* pass 'busiest' rq argument into | |
187 | * load_balance_[start|next]_rt iterators | |
188 | */ | |
189 | rt_rq_iterator.arg = busiest; | |
190 | ||
191 | nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move, | |
43010659 | 192 | max_load_move, sd, idle, all_pinned, &load_moved, |
a4ac01c3 | 193 | this_best_prio, &rt_rq_iterator); |
bb44e5d1 | 194 | |
43010659 | 195 | return load_moved; |
bb44e5d1 IM |
196 | } |
197 | ||
198 | static void task_tick_rt(struct rq *rq, struct task_struct *p) | |
199 | { | |
200 | /* | |
201 | * RR tasks need a special form of timeslice management. | |
202 | * FIFO tasks have no timeslices. | |
203 | */ | |
204 | if (p->policy != SCHED_RR) | |
205 | return; | |
206 | ||
207 | if (--p->time_slice) | |
208 | return; | |
209 | ||
210 | p->time_slice = static_prio_timeslice(p->static_prio); | |
211 | set_tsk_need_resched(p); | |
212 | ||
213 | /* put it at the end of the queue: */ | |
214 | requeue_task_rt(rq, p); | |
215 | } | |
216 | ||
bb44e5d1 IM |
217 | static struct sched_class rt_sched_class __read_mostly = { |
218 | .enqueue_task = enqueue_task_rt, | |
219 | .dequeue_task = dequeue_task_rt, | |
220 | .yield_task = yield_task_rt, | |
221 | ||
222 | .check_preempt_curr = check_preempt_curr_rt, | |
223 | ||
224 | .pick_next_task = pick_next_task_rt, | |
225 | .put_prev_task = put_prev_task_rt, | |
226 | ||
227 | .load_balance = load_balance_rt, | |
228 | ||
229 | .task_tick = task_tick_rt, | |
bb44e5d1 | 230 | }; |