]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/user.c
IPv6: Delete redundant counter of IPSTATS_MIB_REASMFAILS
[net-next-2.6.git] / kernel / user.c
CommitLineData
1da177e4
LT
1/*
2 * The "user cache".
3 *
4 * (C) Copyright 1991-2000 Linus Torvalds
5 *
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
10
11#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/bitops.h>
15#include <linux/key.h>
4021cb27 16#include <linux/interrupt.h>
acce292c
CLG
17#include <linux/module.h>
18#include <linux/user_namespace.h>
d84f4f99 19#include "cred-internals.h"
1da177e4 20
aee16ce7
PE
21struct user_namespace init_user_ns = {
22 .kref = {
1d1e9756 23 .refcount = ATOMIC_INIT(2),
aee16ce7 24 },
18b6e041 25 .creator = &root_user,
aee16ce7
PE
26};
27EXPORT_SYMBOL_GPL(init_user_ns);
28
1da177e4
LT
29/*
30 * UID task count cache, to get fast user lookup in "alloc_uid"
31 * when changing user ID's (ie setuid() and friends).
32 */
33
1da177e4
LT
34#define UIDHASH_MASK (UIDHASH_SZ - 1)
35#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
acce292c 36#define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
1da177e4 37
e18b890b 38static struct kmem_cache *uid_cachep;
4021cb27
IM
39
40/*
41 * The uidhash_lock is mostly taken from process context, but it is
42 * occasionally also taken from softirq/tasklet context, when
43 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
3fa97c9d
AM
44 * But free_uid() is also called with local interrupts disabled, and running
45 * local_bh_enable() with local interrupts disabled is an error - we'll run
46 * softirq callbacks, and they can unconditionally enable interrupts, and
47 * the caller of free_uid() didn't expect that..
4021cb27 48 */
1da177e4
LT
49static DEFINE_SPINLOCK(uidhash_lock);
50
18b6e041 51/* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */
1da177e4 52struct user_struct root_user = {
18b6e041 53 .__count = ATOMIC_INIT(2),
1da177e4
LT
54 .processes = ATOMIC_INIT(1),
55 .files = ATOMIC_INIT(0),
56 .sigpending = ATOMIC_INIT(0),
1da177e4 57 .locked_shm = 0,
18b6e041 58 .user_ns = &init_user_ns,
052f1dc7 59#ifdef CONFIG_USER_SCHED
4cf86d77 60 .tg = &init_task_group,
24e377a8 61#endif
1da177e4
LT
62};
63
5cb350ba
DG
64/*
65 * These routines must be called with the uidhash spinlock held!
66 */
40aeb400 67static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
5cb350ba
DG
68{
69 hlist_add_head(&up->uidhash_node, hashent);
70}
71
40aeb400 72static void uid_hash_remove(struct user_struct *up)
5cb350ba
DG
73{
74 hlist_del_init(&up->uidhash_node);
fb5ae64f 75 put_user_ns(up->user_ns);
5cb350ba
DG
76}
77
052f1dc7 78#ifdef CONFIG_USER_SCHED
5cb350ba 79
24e377a8
SV
80static void sched_destroy_user(struct user_struct *up)
81{
82 sched_destroy_group(up->tg);
83}
84
85static int sched_create_user(struct user_struct *up)
86{
87 int rc = 0;
88
eff766a6 89 up->tg = sched_create_group(&root_task_group);
24e377a8
SV
90 if (IS_ERR(up->tg))
91 rc = -ENOMEM;
92
6c415b92
AB
93 set_tg_uid(up);
94
24e377a8
SV
95 return rc;
96}
97
052f1dc7 98#else /* CONFIG_USER_SCHED */
b1a8c172
DG
99
100static void sched_destroy_user(struct user_struct *up) { }
101static int sched_create_user(struct user_struct *up) { return 0; }
b1a8c172 102
052f1dc7 103#endif /* CONFIG_USER_SCHED */
b1a8c172 104
052f1dc7 105#if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
b1a8c172 106
3959214f
KS
107static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
108{
109 struct user_struct *user;
110 struct hlist_node *h;
111
112 hlist_for_each_entry(user, h, hashent, uidhash_node) {
113 if (user->uid == uid) {
114 /* possibly resurrect an "almost deleted" object */
115 if (atomic_inc_return(&user->__count) == 1)
116 cancel_delayed_work(&user->work);
117 return user;
118 }
119 }
120
121 return NULL;
122}
123
eb41d946 124static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
b1a8c172
DG
125static DEFINE_MUTEX(uids_mutex);
126
5cb350ba
DG
127static inline void uids_mutex_lock(void)
128{
129 mutex_lock(&uids_mutex);
130}
24e377a8 131
5cb350ba
DG
132static inline void uids_mutex_unlock(void)
133{
134 mutex_unlock(&uids_mutex);
135}
24e377a8 136
eb41d946 137/* uid directory attributes */
052f1dc7 138#ifdef CONFIG_FAIR_GROUP_SCHED
eb41d946
KS
139static ssize_t cpu_shares_show(struct kobject *kobj,
140 struct kobj_attribute *attr,
141 char *buf)
5cb350ba 142{
eb41d946 143 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
24e377a8 144
eb41d946 145 return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
5cb350ba
DG
146}
147
eb41d946
KS
148static ssize_t cpu_shares_store(struct kobject *kobj,
149 struct kobj_attribute *attr,
150 const char *buf, size_t size)
5cb350ba 151{
eb41d946 152 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
5cb350ba
DG
153 unsigned long shares;
154 int rc;
155
eb41d946 156 sscanf(buf, "%lu", &shares);
5cb350ba
DG
157
158 rc = sched_group_set_shares(up->tg, shares);
159
160 return (rc ? rc : size);
161}
162
eb41d946
KS
163static struct kobj_attribute cpu_share_attr =
164 __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
052f1dc7 165#endif
eb41d946 166
052f1dc7 167#ifdef CONFIG_RT_GROUP_SCHED
9f0c1e56
PZ
168static ssize_t cpu_rt_runtime_show(struct kobject *kobj,
169 struct kobj_attribute *attr,
170 char *buf)
171{
172 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
173
af4491e5 174 return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg));
9f0c1e56
PZ
175}
176
177static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
178 struct kobj_attribute *attr,
179 const char *buf, size_t size)
180{
181 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
182 unsigned long rt_runtime;
183 int rc;
184
af4491e5 185 sscanf(buf, "%ld", &rt_runtime);
9f0c1e56
PZ
186
187 rc = sched_group_set_rt_runtime(up->tg, rt_runtime);
188
189 return (rc ? rc : size);
190}
191
192static struct kobj_attribute cpu_rt_runtime_attr =
193 __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
d0b27fa7
PZ
194
195static ssize_t cpu_rt_period_show(struct kobject *kobj,
196 struct kobj_attribute *attr,
197 char *buf)
198{
199 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
200
201 return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg));
202}
203
204static ssize_t cpu_rt_period_store(struct kobject *kobj,
205 struct kobj_attribute *attr,
206 const char *buf, size_t size)
207{
208 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
209 unsigned long rt_period;
210 int rc;
211
212 sscanf(buf, "%lu", &rt_period);
213
214 rc = sched_group_set_rt_period(up->tg, rt_period);
215
216 return (rc ? rc : size);
217}
218
219static struct kobj_attribute cpu_rt_period_attr =
220 __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store);
052f1dc7 221#endif
9f0c1e56 222
eb41d946
KS
223/* default attributes per uid directory */
224static struct attribute *uids_attributes[] = {
052f1dc7 225#ifdef CONFIG_FAIR_GROUP_SCHED
eb41d946 226 &cpu_share_attr.attr,
052f1dc7
PZ
227#endif
228#ifdef CONFIG_RT_GROUP_SCHED
9f0c1e56 229 &cpu_rt_runtime_attr.attr,
d0b27fa7 230 &cpu_rt_period_attr.attr,
052f1dc7 231#endif
eb41d946
KS
232 NULL
233};
234
235/* the lifetime of user_struct is not managed by the core (now) */
236static void uids_release(struct kobject *kobj)
5cb350ba 237{
eb41d946 238 return;
5cb350ba
DG
239}
240
eb41d946
KS
241static struct kobj_type uids_ktype = {
242 .sysfs_ops = &kobj_sysfs_ops,
243 .default_attrs = uids_attributes,
244 .release = uids_release,
245};
246
94d6a5f7
SH
247/*
248 * Create /sys/kernel/uids/<uid>/cpu_share file for this user
249 * We do not create this file for users in a user namespace (until
250 * sysfs tagging is implemented).
251 *
252 * See Documentation/scheduler/sched-design-CFS.txt for ramifications.
253 */
eb41d946 254static int uids_user_create(struct user_struct *up)
1da177e4 255{
eb41d946 256 struct kobject *kobj = &up->kobj;
5cb350ba
DG
257 int error;
258
eb41d946 259 memset(kobj, 0, sizeof(struct kobject));
c37bbb0f
SH
260 if (up->user_ns != &init_user_ns)
261 return 0;
eb41d946 262 kobj->kset = uids_kset;
cf15126b
GKH
263 error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
264 if (error) {
265 kobject_put(kobj);
5cb350ba 266 goto done;
cf15126b 267 }
5cb350ba 268
fb7dde37 269 kobject_uevent(kobj, KOBJ_ADD);
5cb350ba
DG
270done:
271 return error;
1da177e4
LT
272}
273
eb41d946 274/* create these entries in sysfs:
5cb350ba
DG
275 * "/sys/kernel/uids" directory
276 * "/sys/kernel/uids/0" directory (for root user)
277 * "/sys/kernel/uids/0/cpu_share" file (for root user)
278 */
eb41d946 279int __init uids_sysfs_init(void)
1da177e4 280{
0ff21e46 281 uids_kset = kset_create_and_add("uids", NULL, kernel_kobj);
eb41d946
KS
282 if (!uids_kset)
283 return -ENOMEM;
5cb350ba 284
eb41d946 285 return uids_user_create(&root_user);
1da177e4
LT
286}
287
3959214f 288/* delayed work function to remove sysfs directory for a user and free up
5cb350ba
DG
289 * corresponding structures.
290 */
be50b834 291static void cleanup_user_struct(struct work_struct *w)
1da177e4 292{
3959214f 293 struct user_struct *up = container_of(w, struct user_struct, work.work);
5cb350ba
DG
294 unsigned long flags;
295 int remove_user = 0;
1da177e4 296
5cb350ba
DG
297 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
298 * atomic.
299 */
300 uids_mutex_lock();
301
3959214f
KS
302 spin_lock_irqsave(&uidhash_lock, flags);
303 if (atomic_read(&up->__count) == 0) {
5cb350ba
DG
304 uid_hash_remove(up);
305 remove_user = 1;
1da177e4 306 }
3959214f 307 spin_unlock_irqrestore(&uidhash_lock, flags);
1da177e4 308
5cb350ba
DG
309 if (!remove_user)
310 goto done;
311
be50b834
DG
312 if (up->user_ns == &init_user_ns) {
313 kobject_uevent(&up->kobj, KOBJ_REMOVE);
314 kobject_del(&up->kobj);
315 kobject_put(&up->kobj);
316 }
5cb350ba
DG
317
318 sched_destroy_user(up);
319 key_put(up->uid_keyring);
320 key_put(up->session_keyring);
321 kmem_cache_free(uid_cachep, up);
322
323done:
324 uids_mutex_unlock();
325}
326
327/* IRQs are disabled and uidhash_lock is held upon function entry.
328 * IRQ state (as stored in flags) is restored and uidhash_lock released
329 * upon function exit.
330 */
18b6e041 331static void free_user(struct user_struct *up, unsigned long flags)
5cb350ba 332{
3959214f
KS
333 INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
334 schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
b00bc0b2 335 spin_unlock_irqrestore(&uidhash_lock, flags);
1da177e4
LT
336}
337
052f1dc7 338#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
5cb350ba 339
3959214f
KS
340static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
341{
342 struct user_struct *user;
343 struct hlist_node *h;
344
345 hlist_for_each_entry(user, h, hashent, uidhash_node) {
346 if (user->uid == uid) {
347 atomic_inc(&user->__count);
348 return user;
349 }
350 }
351
352 return NULL;
353}
354
eb41d946
KS
355int uids_sysfs_init(void) { return 0; }
356static inline int uids_user_create(struct user_struct *up) { return 0; }
5cb350ba
DG
357static inline void uids_mutex_lock(void) { }
358static inline void uids_mutex_unlock(void) { }
359
360/* IRQs are disabled and uidhash_lock is held upon function entry.
361 * IRQ state (as stored in flags) is restored and uidhash_lock released
362 * upon function exit.
363 */
18b6e041 364static void free_user(struct user_struct *up, unsigned long flags)
5cb350ba
DG
365{
366 uid_hash_remove(up);
367 spin_unlock_irqrestore(&uidhash_lock, flags);
368 sched_destroy_user(up);
369 key_put(up->uid_keyring);
370 key_put(up->session_keyring);
371 kmem_cache_free(uid_cachep, up);
372}
373
b1a8c172 374#endif
5cb350ba 375
54e99124
DG
376#if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED)
377/*
378 * We need to check if a setuid can take place. This function should be called
379 * before successfully completing the setuid.
380 */
381int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
382{
383
384 return sched_rt_can_attach(up->tg, tsk);
385
386}
387#else
388int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
389{
390 return 1;
391}
392#endif
393
1da177e4
LT
394/*
395 * Locate the user_struct for the passed UID. If found, take a ref on it. The
396 * caller must undo that ref with free_uid().
397 *
398 * If the user_struct could not be found, return NULL.
399 */
400struct user_struct *find_user(uid_t uid)
401{
402 struct user_struct *ret;
3fa97c9d 403 unsigned long flags;
6ded6ab9 404 struct user_namespace *ns = current_user_ns();
1da177e4 405
3fa97c9d 406 spin_lock_irqsave(&uidhash_lock, flags);
acce292c 407 ret = uid_hash_find(uid, uidhashentry(ns, uid));
3fa97c9d 408 spin_unlock_irqrestore(&uidhash_lock, flags);
1da177e4
LT
409 return ret;
410}
411
412void free_uid(struct user_struct *up)
413{
3fa97c9d
AM
414 unsigned long flags;
415
36f57413
AM
416 if (!up)
417 return;
418
3fa97c9d 419 local_irq_save(flags);
5cb350ba
DG
420 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
421 free_user(up, flags);
422 else
36f57413 423 local_irq_restore(flags);
1da177e4
LT
424}
425
354a1f4d 426struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
1da177e4 427{
735de223 428 struct hlist_head *hashent = uidhashentry(ns, uid);
8eb703e4 429 struct user_struct *up, *new;
1da177e4 430
eb41d946 431 /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
5cb350ba
DG
432 * atomic.
433 */
434 uids_mutex_lock();
435
3fa97c9d 436 spin_lock_irq(&uidhash_lock);
1da177e4 437 up = uid_hash_find(uid, hashent);
3fa97c9d 438 spin_unlock_irq(&uidhash_lock);
1da177e4
LT
439
440 if (!up) {
354a1f4d 441 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
8eb703e4
PE
442 if (!new)
443 goto out_unlock;
5e8869bb 444
1da177e4
LT
445 new->uid = uid;
446 atomic_set(&new->__count, 1);
1da177e4 447
8eb703e4 448 if (sched_create_user(new) < 0)
69664cf1 449 goto out_free_user;
24e377a8 450
18b6e041
SH
451 new->user_ns = get_user_ns(ns);
452
8eb703e4
PE
453 if (uids_user_create(new))
454 goto out_destoy_sched;
5cb350ba 455
1da177e4
LT
456 /*
457 * Before adding this, check whether we raced
458 * on adding the same user already..
459 */
3fa97c9d 460 spin_lock_irq(&uidhash_lock);
1da177e4
LT
461 up = uid_hash_find(uid, hashent);
462 if (up) {
052f1dc7 463 /* This case is not possible when CONFIG_USER_SCHED
5cb350ba
DG
464 * is defined, since we serialize alloc_uid() using
465 * uids_mutex. Hence no need to call
466 * sched_destroy_user() or remove_user_sysfs_dir().
467 */
1da177e4
LT
468 key_put(new->uid_keyring);
469 key_put(new->session_keyring);
470 kmem_cache_free(uid_cachep, new);
471 } else {
472 uid_hash_insert(new, hashent);
473 up = new;
474 }
3fa97c9d 475 spin_unlock_irq(&uidhash_lock);
1da177e4 476 }
5cb350ba
DG
477
478 uids_mutex_unlock();
479
1da177e4 480 return up;
8eb703e4
PE
481
482out_destoy_sched:
483 sched_destroy_user(new);
18b6e041 484 put_user_ns(new->user_ns);
8eb703e4
PE
485out_free_user:
486 kmem_cache_free(uid_cachep, new);
487out_unlock:
488 uids_mutex_unlock();
489 return NULL;
1da177e4
LT
490}
491
1da177e4
LT
492static int __init uid_cache_init(void)
493{
494 int n;
495
496 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
20c2df83 497 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4
LT
498
499 for(n = 0; n < UIDHASH_SZ; ++n)
735de223 500 INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
1da177e4
LT
501
502 /* Insert the root user immediately (init already runs as root) */
3fa97c9d 503 spin_lock_irq(&uidhash_lock);
acce292c 504 uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
3fa97c9d 505 spin_unlock_irq(&uidhash_lock);
1da177e4
LT
506
507 return 0;
508}
509
510module_init(uid_cache_init);