]> bbs.cooldavid.org Git - net-next-2.6.git/blame_incremental - fs/ioprio.c
xps: Transmit Packet Steering
[net-next-2.6.git] / fs / ioprio.c
... / ...
CommitLineData
1/*
2 * fs/ioprio.c
3 *
4 * Copyright (C) 2004 Jens Axboe <axboe@kernel.dk>
5 *
6 * Helper functions for setting/querying io priorities of processes. The
7 * system calls closely mimmick getpriority/setpriority, see the man page for
8 * those. The prio argument is a composite of prio class and prio data, where
9 * the data argument has meaning within that class. The standard scheduling
10 * classes have 8 distinct prio levels, with 0 being the highest prio and 7
11 * being the lowest.
12 *
13 * IOW, setting BE scheduling class with prio 2 is done ala:
14 *
15 * unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2;
16 *
17 * ioprio_set(PRIO_PROCESS, pid, prio);
18 *
19 * See also Documentation/block/ioprio.txt
20 *
21 */
22#include <linux/gfp.h>
23#include <linux/kernel.h>
24#include <linux/ioprio.h>
25#include <linux/blkdev.h>
26#include <linux/capability.h>
27#include <linux/syscalls.h>
28#include <linux/security.h>
29#include <linux/pid_namespace.h>
30
31int set_task_ioprio(struct task_struct *task, int ioprio)
32{
33 int err;
34 struct io_context *ioc;
35 const struct cred *cred = current_cred(), *tcred;
36
37 rcu_read_lock();
38 tcred = __task_cred(task);
39 if (tcred->uid != cred->euid &&
40 tcred->uid != cred->uid && !capable(CAP_SYS_NICE)) {
41 rcu_read_unlock();
42 return -EPERM;
43 }
44 rcu_read_unlock();
45
46 err = security_task_setioprio(task, ioprio);
47 if (err)
48 return err;
49
50 task_lock(task);
51 do {
52 ioc = task->io_context;
53 /* see wmb() in current_io_context() */
54 smp_read_barrier_depends();
55 if (ioc)
56 break;
57
58 ioc = alloc_io_context(GFP_ATOMIC, -1);
59 if (!ioc) {
60 err = -ENOMEM;
61 break;
62 }
63 task->io_context = ioc;
64 } while (1);
65
66 if (!err) {
67 ioc->ioprio = ioprio;
68 ioc->ioprio_changed = 1;
69 }
70
71 task_unlock(task);
72 return err;
73}
74EXPORT_SYMBOL_GPL(set_task_ioprio);
75
76SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
77{
78 int class = IOPRIO_PRIO_CLASS(ioprio);
79 int data = IOPRIO_PRIO_DATA(ioprio);
80 struct task_struct *p, *g;
81 struct user_struct *user;
82 struct pid *pgrp;
83 int ret;
84
85 switch (class) {
86 case IOPRIO_CLASS_RT:
87 if (!capable(CAP_SYS_ADMIN))
88 return -EPERM;
89 /* fall through, rt has prio field too */
90 case IOPRIO_CLASS_BE:
91 if (data >= IOPRIO_BE_NR || data < 0)
92 return -EINVAL;
93
94 break;
95 case IOPRIO_CLASS_IDLE:
96 break;
97 case IOPRIO_CLASS_NONE:
98 if (data)
99 return -EINVAL;
100 break;
101 default:
102 return -EINVAL;
103 }
104
105 ret = -ESRCH;
106 /*
107 * We want IOPRIO_WHO_PGRP/IOPRIO_WHO_USER to be "atomic",
108 * so we can't use rcu_read_lock(). See re-copy of ->ioprio
109 * in copy_process().
110 */
111 read_lock(&tasklist_lock);
112 switch (which) {
113 case IOPRIO_WHO_PROCESS:
114 rcu_read_lock();
115 if (!who)
116 p = current;
117 else
118 p = find_task_by_vpid(who);
119 if (p)
120 ret = set_task_ioprio(p, ioprio);
121 rcu_read_unlock();
122 break;
123 case IOPRIO_WHO_PGRP:
124 if (!who)
125 pgrp = task_pgrp(current);
126 else
127 pgrp = find_vpid(who);
128 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
129 ret = set_task_ioprio(p, ioprio);
130 if (ret)
131 break;
132 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
133 break;
134 case IOPRIO_WHO_USER:
135 if (!who)
136 user = current_user();
137 else
138 user = find_user(who);
139
140 if (!user)
141 break;
142
143 do_each_thread(g, p) {
144 int match;
145
146 rcu_read_lock();
147 match = __task_cred(p)->uid == who;
148 rcu_read_unlock();
149 if (!match)
150 continue;
151 ret = set_task_ioprio(p, ioprio);
152 if (ret)
153 goto free_uid;
154 } while_each_thread(g, p);
155free_uid:
156 if (who)
157 free_uid(user);
158 break;
159 default:
160 ret = -EINVAL;
161 }
162
163 read_unlock(&tasklist_lock);
164 return ret;
165}
166
167static int get_task_ioprio(struct task_struct *p)
168{
169 int ret;
170
171 ret = security_task_getioprio(p);
172 if (ret)
173 goto out;
174 ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
175 if (p->io_context)
176 ret = p->io_context->ioprio;
177out:
178 return ret;
179}
180
181int ioprio_best(unsigned short aprio, unsigned short bprio)
182{
183 unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
184 unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
185
186 if (aclass == IOPRIO_CLASS_NONE)
187 aclass = IOPRIO_CLASS_BE;
188 if (bclass == IOPRIO_CLASS_NONE)
189 bclass = IOPRIO_CLASS_BE;
190
191 if (aclass == bclass)
192 return min(aprio, bprio);
193 if (aclass > bclass)
194 return bprio;
195 else
196 return aprio;
197}
198
199SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
200{
201 struct task_struct *g, *p;
202 struct user_struct *user;
203 struct pid *pgrp;
204 int ret = -ESRCH;
205 int tmpio;
206
207 read_lock(&tasklist_lock);
208 switch (which) {
209 case IOPRIO_WHO_PROCESS:
210 rcu_read_lock();
211 if (!who)
212 p = current;
213 else
214 p = find_task_by_vpid(who);
215 if (p)
216 ret = get_task_ioprio(p);
217 rcu_read_unlock();
218 break;
219 case IOPRIO_WHO_PGRP:
220 if (!who)
221 pgrp = task_pgrp(current);
222 else
223 pgrp = find_vpid(who);
224 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
225 tmpio = get_task_ioprio(p);
226 if (tmpio < 0)
227 continue;
228 if (ret == -ESRCH)
229 ret = tmpio;
230 else
231 ret = ioprio_best(ret, tmpio);
232 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
233 break;
234 case IOPRIO_WHO_USER:
235 if (!who)
236 user = current_user();
237 else
238 user = find_user(who);
239
240 if (!user)
241 break;
242
243 do_each_thread(g, p) {
244 int match;
245
246 rcu_read_lock();
247 match = __task_cred(p)->uid == user->uid;
248 rcu_read_unlock();
249 if (!match)
250 continue;
251 tmpio = get_task_ioprio(p);
252 if (tmpio < 0)
253 continue;
254 if (ret == -ESRCH)
255 ret = tmpio;
256 else
257 ret = ioprio_best(ret, tmpio);
258 } while_each_thread(g, p);
259
260 if (who)
261 free_uid(user);
262 break;
263 default:
264 ret = -EINVAL;
265 }
266
267 read_unlock(&tasklist_lock);
268 return ret;
269}