]> bbs.cooldavid.org Git - net-next-2.6.git/blame - kernel/pid_namespace.c
xps: Transmit Packet Steering
[net-next-2.6.git] / kernel / pid_namespace.c
CommitLineData
74bd59bb
PE
1/*
2 * Pid namespaces
3 *
4 * Authors:
5 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
6 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
7 * Many thanks to Oleg Nesterov for comments and help
8 *
9 */
10
11#include <linux/pid.h>
12#include <linux/pid_namespace.h>
13#include <linux/syscalls.h>
14#include <linux/err.h>
0b6b030f 15#include <linux/acct.h>
5a0e3ad6 16#include <linux/slab.h>
74bd59bb
PE
17
18#define BITS_PER_PAGE (PAGE_SIZE*8)
19
20struct pid_cache {
21 int nr_ids;
22 char name[16];
23 struct kmem_cache *cachep;
24 struct list_head list;
25};
26
27static LIST_HEAD(pid_caches_lh);
28static DEFINE_MUTEX(pid_caches_mutex);
29static struct kmem_cache *pid_ns_cachep;
30
31/*
32 * creates the kmem cache to allocate pids from.
33 * @nr_ids: the number of numerical ids this pid will have to carry
34 */
35
36static struct kmem_cache *create_pid_cachep(int nr_ids)
37{
38 struct pid_cache *pcache;
39 struct kmem_cache *cachep;
40
41 mutex_lock(&pid_caches_mutex);
42 list_for_each_entry(pcache, &pid_caches_lh, list)
43 if (pcache->nr_ids == nr_ids)
44 goto out;
45
46 pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL);
47 if (pcache == NULL)
48 goto err_alloc;
49
50 snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids);
51 cachep = kmem_cache_create(pcache->name,
52 sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid),
53 0, SLAB_HWCACHE_ALIGN, NULL);
54 if (cachep == NULL)
55 goto err_cachep;
56
57 pcache->nr_ids = nr_ids;
58 pcache->cachep = cachep;
59 list_add(&pcache->list, &pid_caches_lh);
60out:
61 mutex_unlock(&pid_caches_mutex);
62 return pcache->cachep;
63
64err_cachep:
65 kfree(pcache);
66err_alloc:
67 mutex_unlock(&pid_caches_mutex);
68 return NULL;
69}
70
ed469a63 71static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns)
74bd59bb
PE
72{
73 struct pid_namespace *ns;
ed469a63 74 unsigned int level = parent_pid_ns->level + 1;
74bd59bb
PE
75 int i;
76
84406c15 77 ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
74bd59bb
PE
78 if (ns == NULL)
79 goto out;
80
81 ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
82 if (!ns->pidmap[0].page)
83 goto out_free;
84
85 ns->pid_cachep = create_pid_cachep(level + 1);
86 if (ns->pid_cachep == NULL)
87 goto out_free_map;
88
89 kref_init(&ns->kref);
74bd59bb 90 ns->level = level;
ed469a63 91 ns->parent = get_pid_ns(parent_pid_ns);
74bd59bb
PE
92
93 set_bit(0, ns->pidmap[0].page);
94 atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
95
84406c15 96 for (i = 1; i < PIDMAP_ENTRIES; i++)
74bd59bb 97 atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
74bd59bb
PE
98
99 return ns;
100
101out_free_map:
102 kfree(ns->pidmap[0].page);
103out_free:
104 kmem_cache_free(pid_ns_cachep, ns);
105out:
106 return ERR_PTR(-ENOMEM);
107}
108
109static void destroy_pid_namespace(struct pid_namespace *ns)
110{
111 int i;
112
113 for (i = 0; i < PIDMAP_ENTRIES; i++)
114 kfree(ns->pidmap[i].page);
115 kmem_cache_free(pid_ns_cachep, ns);
116}
117
118struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns)
119{
74bd59bb 120 if (!(flags & CLONE_NEWPID))
dca4a979 121 return get_pid_ns(old_ns);
e5a47386 122 if (flags & (CLONE_THREAD|CLONE_PARENT))
dca4a979
AD
123 return ERR_PTR(-EINVAL);
124 return create_pid_namespace(old_ns);
74bd59bb
PE
125}
126
127void free_pid_ns(struct kref *kref)
128{
129 struct pid_namespace *ns, *parent;
130
131 ns = container_of(kref, struct pid_namespace, kref);
132
133 parent = ns->parent;
134 destroy_pid_namespace(ns);
135
136 if (parent != NULL)
137 put_pid_ns(parent);
138}
139
140void zap_pid_ns_processes(struct pid_namespace *pid_ns)
141{
142 int nr;
143 int rc;
e4da026f 144 struct task_struct *task;
74bd59bb
PE
145
146 /*
147 * The last thread in the cgroup-init thread group is terminating.
148 * Find remaining pid_ts in the namespace, signal and wait for them
149 * to exit.
150 *
151 * Note: This signals each threads in the namespace - even those that
152 * belong to the same thread group, To avoid this, we would have
153 * to walk the entire tasklist looking a processes in this
154 * namespace, but that could be unnecessarily expensive if the
155 * pid namespace has just a few processes. Or we need to
156 * maintain a tasklist for each pid namespace.
157 *
158 */
159 read_lock(&tasklist_lock);
160 nr = next_pidmap(pid_ns, 1);
161 while (nr > 0) {
e4da026f
SB
162 rcu_read_lock();
163
164 /*
13aa9a6b
ON
165 * Any nested-container's init processes won't ignore the
166 * SEND_SIG_NOINFO signal, see send_signal()->si_fromuser().
e4da026f
SB
167 */
168 task = pid_task(find_vpid(nr), PIDTYPE_PID);
169 if (task)
13aa9a6b 170 send_sig_info(SIGKILL, SEND_SIG_NOINFO, task);
e4da026f
SB
171
172 rcu_read_unlock();
173
74bd59bb
PE
174 nr = next_pidmap(pid_ns, nr);
175 }
176 read_unlock(&tasklist_lock);
177
178 do {
179 clear_thread_flag(TIF_SIGPENDING);
180 rc = sys_wait4(-1, NULL, __WALL, NULL);
181 } while (rc != -ECHILD);
182
0b6b030f 183 acct_exit_ns(pid_ns);
74bd59bb
PE
184 return;
185}
186
187static __init int pid_namespaces_init(void)
188{
189 pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
190 return 0;
191}
192
193__initcall(pid_namespaces_init);