]> bbs.cooldavid.org Git - net-next-2.6.git/blame - ipc/util.c
[PATCH] IPC namespace core
[net-next-2.6.git] / ipc / util.c
CommitLineData
1da177e4
LT
1/*
2 * linux/ipc/util.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 *
5 * Sep 1997 - Call suser() last after "normal" permission checks so we
6 * get BSD style process accounting right.
7 * Occurs in several places in the IPC code.
8 * Chris Evans, <chris@ferret.lmh.ox.ac.uk>
9 * Nov 1999 - ipc helper functions, unified SMP locking
624dffcb 10 * Manfred Spraul <manfred@colorfullife.com>
1da177e4
LT
11 * Oct 2002 - One lock per IPC id. RCU ipc_free for lock-free grow_ary().
12 * Mingming Cao <cmm@us.ibm.com>
073115d6
SG
13 * Mar 2006 - support for audit of ipc object properties
14 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
1da177e4
LT
15 */
16
1da177e4
LT
17#include <linux/mm.h>
18#include <linux/shm.h>
19#include <linux/init.h>
20#include <linux/msg.h>
21#include <linux/smp_lock.h>
22#include <linux/vmalloc.h>
23#include <linux/slab.h>
c59ede7b 24#include <linux/capability.h>
1da177e4
LT
25#include <linux/highuid.h>
26#include <linux/security.h>
27#include <linux/rcupdate.h>
28#include <linux/workqueue.h>
ae781774
MW
29#include <linux/seq_file.h>
30#include <linux/proc_fs.h>
073115d6 31#include <linux/audit.h>
1da177e4
LT
32
33#include <asm/unistd.h>
34
35#include "util.h"
36
ae781774
MW
37struct ipc_proc_iface {
38 const char *path;
39 const char *header;
40 struct ipc_ids *ids;
41 int (*show)(struct seq_file *, void *);
42};
43
1da177e4
LT
44/**
45 * ipc_init - initialise IPC subsystem
46 *
47 * The various system5 IPC resources (semaphores, messages and shared
48 * memory are initialised
49 */
50
51static int __init ipc_init(void)
52{
53 sem_init();
54 msg_init();
55 shm_init();
56 return 0;
57}
58__initcall(ipc_init);
59
60/**
61 * ipc_init_ids - initialise IPC identifiers
62 * @ids: Identifier set
63 * @size: Number of identifiers
64 *
65 * Given a size for the ipc identifier range (limited below IPCMNI)
66 * set up the sequence range to use then allocate and initialise the
67 * array itself.
68 */
69
70void __init ipc_init_ids(struct ipc_ids* ids, int size)
71{
72 int i;
5f921ae9
IM
73
74 mutex_init(&ids->mutex);
1da177e4
LT
75
76 if(size > IPCMNI)
77 size = IPCMNI;
78 ids->in_use = 0;
79 ids->max_id = -1;
80 ids->seq = 0;
81 {
82 int seq_limit = INT_MAX/SEQ_MULTIPLIER;
83 if(seq_limit > USHRT_MAX)
84 ids->seq_max = USHRT_MAX;
85 else
86 ids->seq_max = seq_limit;
87 }
88
89 ids->entries = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*size +
90 sizeof(struct ipc_id_ary));
91
92 if(ids->entries == NULL) {
93 printk(KERN_ERR "ipc_init_ids() failed, ipc service disabled.\n");
94 size = 0;
95 ids->entries = &ids->nullentry;
96 }
97 ids->entries->size = size;
98 for(i=0;i<size;i++)
99 ids->entries->p[i] = NULL;
100}
101
ae781774
MW
102#ifdef CONFIG_PROC_FS
103static struct file_operations sysvipc_proc_fops;
104/**
105 * ipc_init_proc_interface - Create a proc interface for sysipc types
106 * using a seq_file interface.
107 * @path: Path in procfs
108 * @header: Banner to be printed at the beginning of the file.
109 * @ids: ipc id table to iterate.
110 * @show: show routine.
111 */
112void __init ipc_init_proc_interface(const char *path, const char *header,
113 struct ipc_ids *ids,
114 int (*show)(struct seq_file *, void *))
115{
116 struct proc_dir_entry *pde;
117 struct ipc_proc_iface *iface;
118
119 iface = kmalloc(sizeof(*iface), GFP_KERNEL);
120 if (!iface)
121 return;
122 iface->path = path;
123 iface->header = header;
124 iface->ids = ids;
125 iface->show = show;
126
127 pde = create_proc_entry(path,
128 S_IRUGO, /* world readable */
129 NULL /* parent dir */);
130 if (pde) {
131 pde->data = iface;
132 pde->proc_fops = &sysvipc_proc_fops;
133 } else {
134 kfree(iface);
135 }
136}
137#endif
138
1da177e4
LT
139/**
140 * ipc_findkey - find a key in an ipc identifier set
141 * @ids: Identifier set
142 * @key: The key to find
143 *
5f921ae9 144 * Requires ipc_ids.mutex locked.
1da177e4
LT
145 * Returns the identifier if found or -1 if not.
146 */
147
148int ipc_findkey(struct ipc_ids* ids, key_t key)
149{
150 int id;
151 struct kern_ipc_perm* p;
152 int max_id = ids->max_id;
153
154 /*
155 * rcu_dereference() is not needed here
5f921ae9 156 * since ipc_ids.mutex is held
1da177e4
LT
157 */
158 for (id = 0; id <= max_id; id++) {
159 p = ids->entries->p[id];
160 if(p==NULL)
161 continue;
162 if (key == p->key)
163 return id;
164 }
165 return -1;
166}
167
168/*
5f921ae9 169 * Requires ipc_ids.mutex locked
1da177e4
LT
170 */
171static int grow_ary(struct ipc_ids* ids, int newsize)
172{
173 struct ipc_id_ary* new;
174 struct ipc_id_ary* old;
175 int i;
176 int size = ids->entries->size;
177
178 if(newsize > IPCMNI)
179 newsize = IPCMNI;
180 if(newsize <= size)
181 return newsize;
182
183 new = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*newsize +
184 sizeof(struct ipc_id_ary));
185 if(new == NULL)
186 return size;
187 new->size = newsize;
a9a5cd5d 188 memcpy(new->p, ids->entries->p, sizeof(struct kern_ipc_perm *)*size);
1da177e4
LT
189 for(i=size;i<newsize;i++) {
190 new->p[i] = NULL;
191 }
192 old = ids->entries;
193
194 /*
195 * Use rcu_assign_pointer() to make sure the memcpyed contents
196 * of the new array are visible before the new array becomes visible.
197 */
198 rcu_assign_pointer(ids->entries, new);
199
200 ipc_rcu_putref(old);
201 return newsize;
202}
203
204/**
205 * ipc_addid - add an IPC identifier
206 * @ids: IPC identifier set
207 * @new: new IPC permission set
208 * @size: new size limit for the id array
209 *
210 * Add an entry 'new' to the IPC arrays. The permissions object is
211 * initialised and the first free entry is set up and the id assigned
212 * is returned. The list is returned in a locked state on success.
213 * On failure the list is not locked and -1 is returned.
214 *
5f921ae9 215 * Called with ipc_ids.mutex held.
1da177e4
LT
216 */
217
218int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
219{
220 int id;
221
222 size = grow_ary(ids,size);
223
224 /*
225 * rcu_dereference()() is not needed here since
5f921ae9 226 * ipc_ids.mutex is held
1da177e4
LT
227 */
228 for (id = 0; id < size; id++) {
229 if(ids->entries->p[id] == NULL)
230 goto found;
231 }
232 return -1;
233found:
234 ids->in_use++;
235 if (id > ids->max_id)
236 ids->max_id = id;
237
238 new->cuid = new->uid = current->euid;
239 new->gid = new->cgid = current->egid;
240
241 new->seq = ids->seq++;
242 if(ids->seq > ids->seq_max)
243 ids->seq = 0;
244
245 spin_lock_init(&new->lock);
246 new->deleted = 0;
247 rcu_read_lock();
248 spin_lock(&new->lock);
249 ids->entries->p[id] = new;
250 return id;
251}
252
253/**
254 * ipc_rmid - remove an IPC identifier
255 * @ids: identifier set
256 * @id: Identifier to remove
257 *
258 * The identifier must be valid, and in use. The kernel will panic if
259 * fed an invalid identifier. The entry is removed and internal
260 * variables recomputed. The object associated with the identifier
261 * is returned.
5f921ae9 262 * ipc_ids.mutex and the spinlock for this ID is hold before this function
1da177e4
LT
263 * is called, and remain locked on the exit.
264 */
265
266struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
267{
268 struct kern_ipc_perm* p;
269 int lid = id % SEQ_MULTIPLIER;
9bc98fc6 270 BUG_ON(lid >= ids->entries->size);
1da177e4
LT
271
272 /*
273 * do not need a rcu_dereference()() here to force ordering
5f921ae9 274 * on Alpha, since the ipc_ids.mutex is held.
1da177e4
LT
275 */
276 p = ids->entries->p[lid];
277 ids->entries->p[lid] = NULL;
9bc98fc6 278 BUG_ON(p==NULL);
1da177e4
LT
279 ids->in_use--;
280
281 if (lid == ids->max_id) {
282 do {
283 lid--;
284 if(lid == -1)
285 break;
286 } while (ids->entries->p[lid] == NULL);
287 ids->max_id = lid;
288 }
289 p->deleted = 1;
290 return p;
291}
292
293/**
294 * ipc_alloc - allocate ipc space
295 * @size: size desired
296 *
297 * Allocate memory from the appropriate pools and return a pointer to it.
298 * NULL is returned if the allocation fails
299 */
300
301void* ipc_alloc(int size)
302{
303 void* out;
304 if(size > PAGE_SIZE)
305 out = vmalloc(size);
306 else
307 out = kmalloc(size, GFP_KERNEL);
308 return out;
309}
310
311/**
312 * ipc_free - free ipc space
313 * @ptr: pointer returned by ipc_alloc
314 * @size: size of block
315 *
316 * Free a block created with ipc_alloc. The caller must know the size
317 * used in the allocation call.
318 */
319
320void ipc_free(void* ptr, int size)
321{
322 if(size > PAGE_SIZE)
323 vfree(ptr);
324 else
325 kfree(ptr);
326}
327
328/*
329 * rcu allocations:
330 * There are three headers that are prepended to the actual allocation:
331 * - during use: ipc_rcu_hdr.
332 * - during the rcu grace period: ipc_rcu_grace.
333 * - [only if vmalloc]: ipc_rcu_sched.
334 * Their lifetime doesn't overlap, thus the headers share the same memory.
335 * Unlike a normal union, they are right-aligned, thus some container_of
336 * forward/backward casting is necessary:
337 */
338struct ipc_rcu_hdr
339{
340 int refcount;
341 int is_vmalloc;
342 void *data[0];
343};
344
345
346struct ipc_rcu_grace
347{
348 struct rcu_head rcu;
349 /* "void *" makes sure alignment of following data is sane. */
350 void *data[0];
351};
352
353struct ipc_rcu_sched
354{
355 struct work_struct work;
356 /* "void *" makes sure alignment of following data is sane. */
357 void *data[0];
358};
359
360#define HDRLEN_KMALLOC (sizeof(struct ipc_rcu_grace) > sizeof(struct ipc_rcu_hdr) ? \
361 sizeof(struct ipc_rcu_grace) : sizeof(struct ipc_rcu_hdr))
362#define HDRLEN_VMALLOC (sizeof(struct ipc_rcu_sched) > HDRLEN_KMALLOC ? \
363 sizeof(struct ipc_rcu_sched) : HDRLEN_KMALLOC)
364
365static inline int rcu_use_vmalloc(int size)
366{
367 /* Too big for a single page? */
368 if (HDRLEN_KMALLOC + size > PAGE_SIZE)
369 return 1;
370 return 0;
371}
372
373/**
374 * ipc_rcu_alloc - allocate ipc and rcu space
375 * @size: size desired
376 *
377 * Allocate memory for the rcu header structure + the object.
378 * Returns the pointer to the object.
379 * NULL is returned if the allocation fails.
380 */
381
382void* ipc_rcu_alloc(int size)
383{
384 void* out;
385 /*
386 * We prepend the allocation with the rcu struct, and
387 * workqueue if necessary (for vmalloc).
388 */
389 if (rcu_use_vmalloc(size)) {
390 out = vmalloc(HDRLEN_VMALLOC + size);
391 if (out) {
392 out += HDRLEN_VMALLOC;
393 container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
394 container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
395 }
396 } else {
397 out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
398 if (out) {
399 out += HDRLEN_KMALLOC;
400 container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
401 container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
402 }
403 }
404
405 return out;
406}
407
408void ipc_rcu_getref(void *ptr)
409{
410 container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
411}
412
413/**
1e5d5331
RD
414 * ipc_schedule_free - free ipc + rcu space
415 * @head: RCU callback structure for queued work
1da177e4
LT
416 *
417 * Since RCU callback function is called in bh,
418 * we need to defer the vfree to schedule_work
419 */
420static void ipc_schedule_free(struct rcu_head *head)
421{
422 struct ipc_rcu_grace *grace =
423 container_of(head, struct ipc_rcu_grace, rcu);
424 struct ipc_rcu_sched *sched =
425 container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]);
426
427 INIT_WORK(&sched->work, vfree, sched);
428 schedule_work(&sched->work);
429}
430
431/**
1e5d5331
RD
432 * ipc_immediate_free - free ipc + rcu space
433 * @head: RCU callback structure that contains pointer to be freed
1da177e4 434 *
1e5d5331 435 * Free from the RCU callback context
1da177e4
LT
436 */
437static void ipc_immediate_free(struct rcu_head *head)
438{
439 struct ipc_rcu_grace *free =
440 container_of(head, struct ipc_rcu_grace, rcu);
441 kfree(free);
442}
443
444void ipc_rcu_putref(void *ptr)
445{
446 if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
447 return;
448
449 if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) {
450 call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu,
451 ipc_schedule_free);
452 } else {
453 call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu,
454 ipc_immediate_free);
455 }
456}
457
458/**
459 * ipcperms - check IPC permissions
460 * @ipcp: IPC permission set
461 * @flag: desired permission set.
462 *
463 * Check user, group, other permissions for access
464 * to ipc resources. return 0 if allowed
465 */
466
467int ipcperms (struct kern_ipc_perm *ipcp, short flag)
468{ /* flag will most probably be 0 or S_...UGO from <linux/stat.h> */
073115d6 469 int requested_mode, granted_mode, err;
1da177e4 470
073115d6
SG
471 if (unlikely((err = audit_ipc_obj(ipcp))))
472 return err;
1da177e4
LT
473 requested_mode = (flag >> 6) | (flag >> 3) | flag;
474 granted_mode = ipcp->mode;
475 if (current->euid == ipcp->cuid || current->euid == ipcp->uid)
476 granted_mode >>= 6;
477 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
478 granted_mode >>= 3;
479 /* is there some bit set in requested_mode but not in granted_mode? */
480 if ((requested_mode & ~granted_mode & 0007) &&
481 !capable(CAP_IPC_OWNER))
482 return -1;
483
484 return security_ipc_permission(ipcp, flag);
485}
486
487/*
488 * Functions to convert between the kern_ipc_perm structure and the
489 * old/new ipc_perm structures
490 */
491
492/**
493 * kernel_to_ipc64_perm - convert kernel ipc permissions to user
494 * @in: kernel permissions
495 * @out: new style IPC permissions
496 *
497 * Turn the kernel object 'in' into a set of permissions descriptions
498 * for returning to userspace (out).
499 */
500
501
502void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
503{
504 out->key = in->key;
505 out->uid = in->uid;
506 out->gid = in->gid;
507 out->cuid = in->cuid;
508 out->cgid = in->cgid;
509 out->mode = in->mode;
510 out->seq = in->seq;
511}
512
513/**
514 * ipc64_perm_to_ipc_perm - convert old ipc permissions to new
515 * @in: new style IPC permissions
516 * @out: old style IPC permissions
517 *
518 * Turn the new style permissions object in into a compatibility
519 * object and store it into the 'out' pointer.
520 */
521
522void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
523{
524 out->key = in->key;
525 SET_UID(out->uid, in->uid);
526 SET_GID(out->gid, in->gid);
527 SET_UID(out->cuid, in->cuid);
528 SET_GID(out->cgid, in->cgid);
529 out->mode = in->mode;
530 out->seq = in->seq;
531}
532
533/*
534 * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get()
5f921ae9
IM
535 * is called with shm_ids.mutex locked. Since grow_ary() is also called with
536 * shm_ids.mutex down(for Shared Memory), there is no need to add read
1da177e4
LT
537 * barriers here to gurantee the writes in grow_ary() are seen in order
538 * here (for Alpha).
539 *
5f921ae9
IM
540 * However ipc_get() itself does not necessary require ipc_ids.mutex down. So
541 * if in the future ipc_get() is used by other places without ipc_ids.mutex
1da177e4
LT
542 * down, then ipc_get() needs read memery barriers as ipc_lock() does.
543 */
544struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id)
545{
546 struct kern_ipc_perm* out;
547 int lid = id % SEQ_MULTIPLIER;
548 if(lid >= ids->entries->size)
549 return NULL;
550 out = ids->entries->p[lid];
551 return out;
552}
553
554struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id)
555{
556 struct kern_ipc_perm* out;
557 int lid = id % SEQ_MULTIPLIER;
558 struct ipc_id_ary* entries;
559
560 rcu_read_lock();
561 entries = rcu_dereference(ids->entries);
562 if(lid >= entries->size) {
563 rcu_read_unlock();
564 return NULL;
565 }
566 out = entries->p[lid];
567 if(out == NULL) {
568 rcu_read_unlock();
569 return NULL;
570 }
571 spin_lock(&out->lock);
572
573 /* ipc_rmid() may have already freed the ID while ipc_lock
574 * was spinning: here verify that the structure is still valid
575 */
576 if (out->deleted) {
577 spin_unlock(&out->lock);
578 rcu_read_unlock();
579 return NULL;
580 }
581 return out;
582}
583
584void ipc_lock_by_ptr(struct kern_ipc_perm *perm)
585{
586 rcu_read_lock();
587 spin_lock(&perm->lock);
588}
589
590void ipc_unlock(struct kern_ipc_perm* perm)
591{
592 spin_unlock(&perm->lock);
593 rcu_read_unlock();
594}
595
596int ipc_buildid(struct ipc_ids* ids, int id, int seq)
597{
598 return SEQ_MULTIPLIER*seq + id;
599}
600
601int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid)
602{
603 if(uid/SEQ_MULTIPLIER != ipcp->seq)
604 return 1;
605 return 0;
606}
607
608#ifdef __ARCH_WANT_IPC_PARSE_VERSION
609
610
611/**
612 * ipc_parse_version - IPC call version
613 * @cmd: pointer to command
614 *
615 * Return IPC_64 for new style IPC and IPC_OLD for old style IPC.
616 * The cmd value is turned from an encoding command and version into
617 * just the command code.
618 */
619
620int ipc_parse_version (int *cmd)
621{
622 if (*cmd & IPC_64) {
623 *cmd ^= IPC_64;
624 return IPC_64;
625 } else {
626 return IPC_OLD;
627 }
628}
629
630#endif /* __ARCH_WANT_IPC_PARSE_VERSION */
ae781774
MW
631
632#ifdef CONFIG_PROC_FS
633static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
634{
635 struct ipc_proc_iface *iface = s->private;
636 struct kern_ipc_perm *ipc = it;
637 loff_t p;
638
639 /* If we had an ipc id locked before, unlock it */
640 if (ipc && ipc != SEQ_START_TOKEN)
641 ipc_unlock(ipc);
642
643 /*
644 * p = *pos - 1 (because id 0 starts at position 1)
645 * + 1 (because we increment the position by one)
646 */
647 for (p = *pos; p <= iface->ids->max_id; p++) {
648 if ((ipc = ipc_lock(iface->ids, p)) != NULL) {
649 *pos = p + 1;
650 return ipc;
651 }
652 }
653
654 /* Out of range - return NULL to terminate iteration */
655 return NULL;
656}
657
658/*
659 * File positions: pos 0 -> header, pos n -> ipc id + 1.
660 * SeqFile iterator: iterator value locked shp or SEQ_TOKEN_START.
661 */
662static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
663{
664 struct ipc_proc_iface *iface = s->private;
665 struct kern_ipc_perm *ipc;
666 loff_t p;
667
668 /*
669 * Take the lock - this will be released by the corresponding
670 * call to stop().
671 */
5f921ae9 672 mutex_lock(&iface->ids->mutex);
ae781774
MW
673
674 /* pos < 0 is invalid */
675 if (*pos < 0)
676 return NULL;
677
678 /* pos == 0 means header */
679 if (*pos == 0)
680 return SEQ_START_TOKEN;
681
682 /* Find the (pos-1)th ipc */
683 for (p = *pos - 1; p <= iface->ids->max_id; p++) {
684 if ((ipc = ipc_lock(iface->ids, p)) != NULL) {
685 *pos = p + 1;
686 return ipc;
687 }
688 }
689 return NULL;
690}
691
692static void sysvipc_proc_stop(struct seq_file *s, void *it)
693{
694 struct kern_ipc_perm *ipc = it;
695 struct ipc_proc_iface *iface = s->private;
696
697 /* If we had a locked segment, release it */
698 if (ipc && ipc != SEQ_START_TOKEN)
699 ipc_unlock(ipc);
700
701 /* Release the lock we took in start() */
5f921ae9 702 mutex_unlock(&iface->ids->mutex);
ae781774
MW
703}
704
705static int sysvipc_proc_show(struct seq_file *s, void *it)
706{
707 struct ipc_proc_iface *iface = s->private;
708
709 if (it == SEQ_START_TOKEN)
710 return seq_puts(s, iface->header);
711
712 return iface->show(s, it);
713}
714
715static struct seq_operations sysvipc_proc_seqops = {
716 .start = sysvipc_proc_start,
717 .stop = sysvipc_proc_stop,
718 .next = sysvipc_proc_next,
719 .show = sysvipc_proc_show,
720};
721
722static int sysvipc_proc_open(struct inode *inode, struct file *file) {
723 int ret;
724 struct seq_file *seq;
725
726 ret = seq_open(file, &sysvipc_proc_seqops);
727 if (!ret) {
728 seq = file->private_data;
729 seq->private = PDE(inode)->data;
730 }
731 return ret;
732}
733
734static struct file_operations sysvipc_proc_fops = {
735 .open = sysvipc_proc_open,
736 .read = seq_read,
737 .llseek = seq_lseek,
738 .release = seq_release,
739};
740#endif /* CONFIG_PROC_FS */