]> bbs.cooldavid.org Git - net-next-2.6.git/blame - lib/idr.c
idr: fix obscure bug in allocation path
[net-next-2.6.git] / lib / idr.c
CommitLineData
1da177e4
LT
1/*
2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
5 *
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
8 *
e15ae2dd 9 * Small id to pointer translation service.
1da177e4 10 *
e15ae2dd 11 * It uses a radix tree like structure as a sparse array indexed
1da177e4 12 * by the id to obtain the pointer. The bitmap makes allocating
e15ae2dd 13 * a new id quick.
1da177e4
LT
14 *
15 * You call it to allocate an id (an int) an associate with that id a
16 * pointer or what ever, we treat it as a (void *). You can pass this
17 * id to a user for him to pass back at a later time. You then pass
18 * that id to this code and it returns your pointer.
19
e15ae2dd 20 * You can release ids at any time. When all ids are released, most of
1da177e4 21 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
e15ae2dd 22 * don't need to go to the memory "store" during an id allocate, just
1da177e4
LT
23 * so you don't need to be too concerned about locking and conflicts
24 * with the slab allocator.
25 */
26
27#ifndef TEST // to test in user space...
28#include <linux/slab.h>
29#include <linux/init.h>
30#include <linux/module.h>
31#endif
5806f07c 32#include <linux/err.h>
1da177e4
LT
33#include <linux/string.h>
34#include <linux/idr.h>
35
e18b890b 36static struct kmem_cache *idr_layer_cache;
1da177e4
LT
37
38static struct idr_layer *alloc_layer(struct idr *idp)
39{
40 struct idr_layer *p;
c259cc28 41 unsigned long flags;
1da177e4 42
c259cc28 43 spin_lock_irqsave(&idp->lock, flags);
1da177e4
LT
44 if ((p = idp->id_free)) {
45 idp->id_free = p->ary[0];
46 idp->id_free_cnt--;
47 p->ary[0] = NULL;
48 }
c259cc28 49 spin_unlock_irqrestore(&idp->lock, flags);
1da177e4
LT
50 return(p);
51}
52
1eec0056
SR
53/* only called when idp->lock is held */
54static void __free_layer(struct idr *idp, struct idr_layer *p)
55{
56 p->ary[0] = idp->id_free;
57 idp->id_free = p;
58 idp->id_free_cnt++;
59}
60
1da177e4
LT
61static void free_layer(struct idr *idp, struct idr_layer *p)
62{
c259cc28
RD
63 unsigned long flags;
64
1da177e4
LT
65 /*
66 * Depends on the return element being zeroed.
67 */
c259cc28 68 spin_lock_irqsave(&idp->lock, flags);
1eec0056 69 __free_layer(idp, p);
c259cc28 70 spin_unlock_irqrestore(&idp->lock, flags);
1da177e4
LT
71}
72
73/**
74 * idr_pre_get - reserver resources for idr allocation
75 * @idp: idr handle
76 * @gfp_mask: memory allocation flags
77 *
78 * This function should be called prior to locking and calling the
79 * following function. It preallocates enough memory to satisfy
80 * the worst possible allocation.
81 *
82 * If the system is REALLY out of memory this function returns 0,
83 * otherwise 1.
84 */
fd4f2df2 85int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
1da177e4
LT
86{
87 while (idp->id_free_cnt < IDR_FREE_MAX) {
88 struct idr_layer *new;
89 new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
e15ae2dd 90 if (new == NULL)
1da177e4
LT
91 return (0);
92 free_layer(idp, new);
93 }
94 return 1;
95}
96EXPORT_SYMBOL(idr_pre_get);
97
98static int sub_alloc(struct idr *idp, void *ptr, int *starting_id)
99{
100 int n, m, sh;
101 struct idr_layer *p, *new;
102 struct idr_layer *pa[MAX_LEVEL];
7aae6dd8 103 int l, id, oid;
1da177e4
LT
104 long bm;
105
106 id = *starting_id;
7aae6dd8 107 restart:
1da177e4
LT
108 p = idp->top;
109 l = idp->layers;
110 pa[l--] = NULL;
111 while (1) {
112 /*
113 * We run around this while until we reach the leaf node...
114 */
115 n = (id >> (IDR_BITS*l)) & IDR_MASK;
116 bm = ~p->bitmap;
117 m = find_next_bit(&bm, IDR_SIZE, n);
118 if (m == IDR_SIZE) {
119 /* no space available go back to previous layer. */
120 l++;
7aae6dd8 121 oid = id;
e15ae2dd 122 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
7aae6dd8
TH
123
124 /* if already at the top layer, we need to grow */
1da177e4
LT
125 if (!(p = pa[l])) {
126 *starting_id = id;
127 return -2;
128 }
7aae6dd8
TH
129
130 /* If we need to go up one layer, continue the
131 * loop; otherwise, restart from the top.
132 */
133 sh = IDR_BITS * (l + 1);
134 if (oid >> sh == id >> sh)
135 continue;
136 else
137 goto restart;
1da177e4
LT
138 }
139 if (m != n) {
140 sh = IDR_BITS*l;
141 id = ((id >> sh) ^ n ^ m) << sh;
142 }
143 if ((id >= MAX_ID_BIT) || (id < 0))
144 return -3;
145 if (l == 0)
146 break;
147 /*
148 * Create the layer below if it is missing.
149 */
150 if (!p->ary[m]) {
151 if (!(new = alloc_layer(idp)))
152 return -1;
153 p->ary[m] = new;
154 p->count++;
155 }
156 pa[l--] = p;
157 p = p->ary[m];
158 }
159 /*
160 * We have reached the leaf node, plant the
161 * users pointer and return the raw id.
162 */
163 p->ary[m] = (struct idr_layer *)ptr;
164 __set_bit(m, &p->bitmap);
165 p->count++;
166 /*
167 * If this layer is full mark the bit in the layer above
168 * to show that this part of the radix tree is full.
169 * This may complete the layer above and require walking
170 * up the radix tree.
171 */
172 n = id;
173 while (p->bitmap == IDR_FULL) {
174 if (!(p = pa[++l]))
175 break;
176 n = n >> IDR_BITS;
177 __set_bit((n & IDR_MASK), &p->bitmap);
178 }
179 return(id);
180}
181
182static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
183{
184 struct idr_layer *p, *new;
185 int layers, v, id;
c259cc28 186 unsigned long flags;
e15ae2dd 187
1da177e4
LT
188 id = starting_id;
189build_up:
190 p = idp->top;
191 layers = idp->layers;
192 if (unlikely(!p)) {
193 if (!(p = alloc_layer(idp)))
194 return -1;
195 layers = 1;
196 }
197 /*
198 * Add a new layer to the top of the tree if the requested
199 * id is larger than the currently allocated space.
200 */
589777ea 201 while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
1da177e4
LT
202 layers++;
203 if (!p->count)
204 continue;
205 if (!(new = alloc_layer(idp))) {
206 /*
207 * The allocation failed. If we built part of
208 * the structure tear it down.
209 */
c259cc28 210 spin_lock_irqsave(&idp->lock, flags);
1da177e4
LT
211 for (new = p; p && p != idp->top; new = p) {
212 p = p->ary[0];
213 new->ary[0] = NULL;
214 new->bitmap = new->count = 0;
1eec0056 215 __free_layer(idp, new);
1da177e4 216 }
c259cc28 217 spin_unlock_irqrestore(&idp->lock, flags);
1da177e4
LT
218 return -1;
219 }
220 new->ary[0] = p;
221 new->count = 1;
222 if (p->bitmap == IDR_FULL)
223 __set_bit(0, &new->bitmap);
224 p = new;
225 }
226 idp->top = p;
227 idp->layers = layers;
228 v = sub_alloc(idp, ptr, &id);
229 if (v == -2)
230 goto build_up;
231 return(v);
232}
233
234/**
7c657f2f 235 * idr_get_new_above - allocate new idr entry above or equal to a start id
1da177e4
LT
236 * @idp: idr handle
237 * @ptr: pointer you want associated with the ide
238 * @start_id: id to start search at
239 * @id: pointer to the allocated handle
240 *
241 * This is the allocate id function. It should be called with any
242 * required locks.
243 *
244 * If memory is required, it will return -EAGAIN, you should unlock
245 * and go back to the idr_pre_get() call. If the idr is full, it will
246 * return -ENOSPC.
247 *
248 * @id returns a value in the range 0 ... 0x7fffffff
249 */
250int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
251{
252 int rv;
e15ae2dd 253
1da177e4
LT
254 rv = idr_get_new_above_int(idp, ptr, starting_id);
255 /*
256 * This is a cheap hack until the IDR code can be fixed to
257 * return proper error values.
258 */
259 if (rv < 0) {
260 if (rv == -1)
261 return -EAGAIN;
262 else /* Will be -3 */
263 return -ENOSPC;
264 }
265 *id = rv;
266 return 0;
267}
268EXPORT_SYMBOL(idr_get_new_above);
269
270/**
271 * idr_get_new - allocate new idr entry
272 * @idp: idr handle
273 * @ptr: pointer you want associated with the ide
274 * @id: pointer to the allocated handle
275 *
276 * This is the allocate id function. It should be called with any
277 * required locks.
278 *
279 * If memory is required, it will return -EAGAIN, you should unlock
280 * and go back to the idr_pre_get() call. If the idr is full, it will
281 * return -ENOSPC.
282 *
283 * @id returns a value in the range 0 ... 0x7fffffff
284 */
285int idr_get_new(struct idr *idp, void *ptr, int *id)
286{
287 int rv;
e15ae2dd 288
1da177e4
LT
289 rv = idr_get_new_above_int(idp, ptr, 0);
290 /*
291 * This is a cheap hack until the IDR code can be fixed to
292 * return proper error values.
293 */
294 if (rv < 0) {
295 if (rv == -1)
296 return -EAGAIN;
297 else /* Will be -3 */
298 return -ENOSPC;
299 }
300 *id = rv;
301 return 0;
302}
303EXPORT_SYMBOL(idr_get_new);
304
305static void idr_remove_warning(int id)
306{
307 printk("idr_remove called for id=%d which is not allocated.\n", id);
308 dump_stack();
309}
310
311static void sub_remove(struct idr *idp, int shift, int id)
312{
313 struct idr_layer *p = idp->top;
314 struct idr_layer **pa[MAX_LEVEL];
315 struct idr_layer ***paa = &pa[0];
316 int n;
317
318 *paa = NULL;
319 *++paa = &idp->top;
320
321 while ((shift > 0) && p) {
322 n = (id >> shift) & IDR_MASK;
323 __clear_bit(n, &p->bitmap);
324 *++paa = &p->ary[n];
325 p = p->ary[n];
326 shift -= IDR_BITS;
327 }
328 n = id & IDR_MASK;
329 if (likely(p != NULL && test_bit(n, &p->bitmap))){
330 __clear_bit(n, &p->bitmap);
331 p->ary[n] = NULL;
332 while(*paa && ! --((**paa)->count)){
333 free_layer(idp, **paa);
334 **paa-- = NULL;
335 }
e15ae2dd 336 if (!*paa)
1da177e4 337 idp->layers = 0;
e15ae2dd 338 } else
1da177e4 339 idr_remove_warning(id);
1da177e4
LT
340}
341
342/**
343 * idr_remove - remove the given id and free it's slot
72fd4a35
RD
344 * @idp: idr handle
345 * @id: unique key
1da177e4
LT
346 */
347void idr_remove(struct idr *idp, int id)
348{
349 struct idr_layer *p;
350
351 /* Mask off upper bits we don't use for the search. */
352 id &= MAX_ID_MASK;
353
354 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
e15ae2dd
JJ
355 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
356 idp->top->ary[0]) { // We can drop a layer
1da177e4
LT
357
358 p = idp->top->ary[0];
359 idp->top->bitmap = idp->top->count = 0;
360 free_layer(idp, idp->top);
361 idp->top = p;
362 --idp->layers;
363 }
364 while (idp->id_free_cnt >= IDR_FREE_MAX) {
1da177e4
LT
365 p = alloc_layer(idp);
366 kmem_cache_free(idr_layer_cache, p);
367 return;
368 }
369}
370EXPORT_SYMBOL(idr_remove);
371
8d3b3591
AM
372/**
373 * idr_destroy - release all cached layers within an idr tree
374 * idp: idr handle
375 */
376void idr_destroy(struct idr *idp)
377{
378 while (idp->id_free_cnt) {
379 struct idr_layer *p = alloc_layer(idp);
380 kmem_cache_free(idr_layer_cache, p);
381 }
382}
383EXPORT_SYMBOL(idr_destroy);
384
1da177e4
LT
385/**
386 * idr_find - return pointer for given id
387 * @idp: idr handle
388 * @id: lookup key
389 *
390 * Return the pointer given the id it has been registered with. A %NULL
391 * return indicates that @id is not valid or you passed %NULL in
392 * idr_get_new().
393 *
394 * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
395 */
396void *idr_find(struct idr *idp, int id)
397{
398 int n;
399 struct idr_layer *p;
400
401 n = idp->layers * IDR_BITS;
402 p = idp->top;
403
404 /* Mask off upper bits we don't use for the search. */
405 id &= MAX_ID_MASK;
406
407 if (id >= (1 << n))
408 return NULL;
409
410 while (n > 0 && p) {
411 n -= IDR_BITS;
412 p = p->ary[(id >> n) & IDR_MASK];
413 }
414 return((void *)p);
415}
416EXPORT_SYMBOL(idr_find);
417
5806f07c
JM
418/**
419 * idr_replace - replace pointer for given id
420 * @idp: idr handle
421 * @ptr: pointer you want associated with the id
422 * @id: lookup key
423 *
424 * Replace the pointer registered with an id and return the old value.
425 * A -ENOENT return indicates that @id was not found.
426 * A -EINVAL return indicates that @id was not within valid constraints.
427 *
428 * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
429 */
430void *idr_replace(struct idr *idp, void *ptr, int id)
431{
432 int n;
433 struct idr_layer *p, *old_p;
434
435 n = idp->layers * IDR_BITS;
436 p = idp->top;
437
438 id &= MAX_ID_MASK;
439
440 if (id >= (1 << n))
441 return ERR_PTR(-EINVAL);
442
443 n -= IDR_BITS;
444 while ((n > 0) && p) {
445 p = p->ary[(id >> n) & IDR_MASK];
446 n -= IDR_BITS;
447 }
448
449 n = id & IDR_MASK;
450 if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
451 return ERR_PTR(-ENOENT);
452
453 old_p = p->ary[n];
454 p->ary[n] = ptr;
455
456 return old_p;
457}
458EXPORT_SYMBOL(idr_replace);
459
e18b890b 460static void idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache,
e15ae2dd 461 unsigned long flags)
1da177e4
LT
462{
463 memset(idr_layer, 0, sizeof(struct idr_layer));
464}
465
466static int init_id_cache(void)
467{
468 if (!idr_layer_cache)
e15ae2dd 469 idr_layer_cache = kmem_cache_create("idr_layer_cache",
1da177e4
LT
470 sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL);
471 return 0;
472}
473
474/**
475 * idr_init - initialize idr handle
476 * @idp: idr handle
477 *
478 * This function is use to set up the handle (@idp) that you will pass
479 * to the rest of the functions.
480 */
481void idr_init(struct idr *idp)
482{
483 init_id_cache();
484 memset(idp, 0, sizeof(struct idr));
485 spin_lock_init(&idp->lock);
486}
487EXPORT_SYMBOL(idr_init);