]> bbs.cooldavid.org Git - net-next-2.6.git/blame - lib/idr.c
mtd: mtdblock: Dynamically allocate cache info structures
[net-next-2.6.git] / lib / idr.c
CommitLineData
1da177e4
LT
1/*
2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
5 *
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
8 *
3219b3b7
ND
9 * Modified by Nadia Derbey to make it RCU safe.
10 *
e15ae2dd 11 * Small id to pointer translation service.
1da177e4 12 *
e15ae2dd 13 * It uses a radix tree like structure as a sparse array indexed
1da177e4 14 * by the id to obtain the pointer. The bitmap makes allocating
e15ae2dd 15 * a new id quick.
1da177e4
LT
16 *
17 * You call it to allocate an id (an int) an associate with that id a
18 * pointer or what ever, we treat it as a (void *). You can pass this
19 * id to a user for him to pass back at a later time. You then pass
20 * that id to this code and it returns your pointer.
21
e15ae2dd 22 * You can release ids at any time. When all ids are released, most of
1da177e4 23 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
e15ae2dd 24 * don't need to go to the memory "store" during an id allocate, just
1da177e4
LT
25 * so you don't need to be too concerned about locking and conflicts
26 * with the slab allocator.
27 */
28
29#ifndef TEST // to test in user space...
30#include <linux/slab.h>
31#include <linux/init.h>
32#include <linux/module.h>
33#endif
5806f07c 34#include <linux/err.h>
1da177e4
LT
35#include <linux/string.h>
36#include <linux/idr.h>
37
e18b890b 38static struct kmem_cache *idr_layer_cache;
1da177e4 39
4ae53789 40static struct idr_layer *get_from_free_list(struct idr *idp)
1da177e4
LT
41{
42 struct idr_layer *p;
c259cc28 43 unsigned long flags;
1da177e4 44
c259cc28 45 spin_lock_irqsave(&idp->lock, flags);
1da177e4
LT
46 if ((p = idp->id_free)) {
47 idp->id_free = p->ary[0];
48 idp->id_free_cnt--;
49 p->ary[0] = NULL;
50 }
c259cc28 51 spin_unlock_irqrestore(&idp->lock, flags);
1da177e4
LT
52 return(p);
53}
54
cf481c20
ND
55static void idr_layer_rcu_free(struct rcu_head *head)
56{
57 struct idr_layer *layer;
58
59 layer = container_of(head, struct idr_layer, rcu_head);
60 kmem_cache_free(idr_layer_cache, layer);
61}
62
63static inline void free_layer(struct idr_layer *p)
64{
65 call_rcu(&p->rcu_head, idr_layer_rcu_free);
66}
67
1eec0056 68/* only called when idp->lock is held */
4ae53789 69static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
1eec0056
SR
70{
71 p->ary[0] = idp->id_free;
72 idp->id_free = p;
73 idp->id_free_cnt++;
74}
75
4ae53789 76static void move_to_free_list(struct idr *idp, struct idr_layer *p)
1da177e4 77{
c259cc28
RD
78 unsigned long flags;
79
1da177e4
LT
80 /*
81 * Depends on the return element being zeroed.
82 */
c259cc28 83 spin_lock_irqsave(&idp->lock, flags);
4ae53789 84 __move_to_free_list(idp, p);
c259cc28 85 spin_unlock_irqrestore(&idp->lock, flags);
1da177e4
LT
86}
87
e33ac8bd
TH
88static void idr_mark_full(struct idr_layer **pa, int id)
89{
90 struct idr_layer *p = pa[0];
91 int l = 0;
92
93 __set_bit(id & IDR_MASK, &p->bitmap);
94 /*
95 * If this layer is full mark the bit in the layer above to
96 * show that this part of the radix tree is full. This may
97 * complete the layer above and require walking up the radix
98 * tree.
99 */
100 while (p->bitmap == IDR_FULL) {
101 if (!(p = pa[++l]))
102 break;
103 id = id >> IDR_BITS;
104 __set_bit((id & IDR_MASK), &p->bitmap);
105 }
106}
107
1da177e4
LT
108/**
109 * idr_pre_get - reserver resources for idr allocation
110 * @idp: idr handle
111 * @gfp_mask: memory allocation flags
112 *
113 * This function should be called prior to locking and calling the
3219b3b7 114 * idr_get_new* functions. It preallocates enough memory to satisfy
1da177e4
LT
115 * the worst possible allocation.
116 *
117 * If the system is REALLY out of memory this function returns 0,
118 * otherwise 1.
119 */
fd4f2df2 120int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
1da177e4
LT
121{
122 while (idp->id_free_cnt < IDR_FREE_MAX) {
123 struct idr_layer *new;
5b019e99 124 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
e15ae2dd 125 if (new == NULL)
1da177e4 126 return (0);
4ae53789 127 move_to_free_list(idp, new);
1da177e4
LT
128 }
129 return 1;
130}
131EXPORT_SYMBOL(idr_pre_get);
132
e33ac8bd 133static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
1da177e4
LT
134{
135 int n, m, sh;
136 struct idr_layer *p, *new;
7aae6dd8 137 int l, id, oid;
5ba25331 138 unsigned long bm;
1da177e4
LT
139
140 id = *starting_id;
7aae6dd8 141 restart:
1da177e4
LT
142 p = idp->top;
143 l = idp->layers;
144 pa[l--] = NULL;
145 while (1) {
146 /*
147 * We run around this while until we reach the leaf node...
148 */
149 n = (id >> (IDR_BITS*l)) & IDR_MASK;
150 bm = ~p->bitmap;
151 m = find_next_bit(&bm, IDR_SIZE, n);
152 if (m == IDR_SIZE) {
153 /* no space available go back to previous layer. */
154 l++;
7aae6dd8 155 oid = id;
e15ae2dd 156 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
7aae6dd8
TH
157
158 /* if already at the top layer, we need to grow */
1da177e4
LT
159 if (!(p = pa[l])) {
160 *starting_id = id;
944ca05c 161 return IDR_NEED_TO_GROW;
1da177e4 162 }
7aae6dd8
TH
163
164 /* If we need to go up one layer, continue the
165 * loop; otherwise, restart from the top.
166 */
167 sh = IDR_BITS * (l + 1);
168 if (oid >> sh == id >> sh)
169 continue;
170 else
171 goto restart;
1da177e4
LT
172 }
173 if (m != n) {
174 sh = IDR_BITS*l;
175 id = ((id >> sh) ^ n ^ m) << sh;
176 }
177 if ((id >= MAX_ID_BIT) || (id < 0))
944ca05c 178 return IDR_NOMORE_SPACE;
1da177e4
LT
179 if (l == 0)
180 break;
181 /*
182 * Create the layer below if it is missing.
183 */
184 if (!p->ary[m]) {
4ae53789
ND
185 new = get_from_free_list(idp);
186 if (!new)
1da177e4 187 return -1;
6ff2d39b 188 new->layer = l-1;
3219b3b7 189 rcu_assign_pointer(p->ary[m], new);
1da177e4
LT
190 p->count++;
191 }
192 pa[l--] = p;
193 p = p->ary[m];
194 }
e33ac8bd
TH
195
196 pa[l] = p;
197 return id;
1da177e4
LT
198}
199
e33ac8bd
TH
200static int idr_get_empty_slot(struct idr *idp, int starting_id,
201 struct idr_layer **pa)
1da177e4
LT
202{
203 struct idr_layer *p, *new;
204 int layers, v, id;
c259cc28 205 unsigned long flags;
e15ae2dd 206
1da177e4
LT
207 id = starting_id;
208build_up:
209 p = idp->top;
210 layers = idp->layers;
211 if (unlikely(!p)) {
4ae53789 212 if (!(p = get_from_free_list(idp)))
1da177e4 213 return -1;
6ff2d39b 214 p->layer = 0;
1da177e4
LT
215 layers = 1;
216 }
217 /*
218 * Add a new layer to the top of the tree if the requested
219 * id is larger than the currently allocated space.
220 */
589777ea 221 while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
1da177e4 222 layers++;
711a49a0
MS
223 if (!p->count) {
224 /* special case: if the tree is currently empty,
225 * then we grow the tree by moving the top node
226 * upwards.
227 */
228 p->layer++;
1da177e4 229 continue;
711a49a0 230 }
4ae53789 231 if (!(new = get_from_free_list(idp))) {
1da177e4
LT
232 /*
233 * The allocation failed. If we built part of
234 * the structure tear it down.
235 */
c259cc28 236 spin_lock_irqsave(&idp->lock, flags);
1da177e4
LT
237 for (new = p; p && p != idp->top; new = p) {
238 p = p->ary[0];
239 new->ary[0] = NULL;
240 new->bitmap = new->count = 0;
4ae53789 241 __move_to_free_list(idp, new);
1da177e4 242 }
c259cc28 243 spin_unlock_irqrestore(&idp->lock, flags);
1da177e4
LT
244 return -1;
245 }
246 new->ary[0] = p;
247 new->count = 1;
6ff2d39b 248 new->layer = layers-1;
1da177e4
LT
249 if (p->bitmap == IDR_FULL)
250 __set_bit(0, &new->bitmap);
251 p = new;
252 }
3219b3b7 253 rcu_assign_pointer(idp->top, p);
1da177e4 254 idp->layers = layers;
e33ac8bd 255 v = sub_alloc(idp, &id, pa);
944ca05c 256 if (v == IDR_NEED_TO_GROW)
1da177e4
LT
257 goto build_up;
258 return(v);
259}
260
e33ac8bd
TH
261static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
262{
263 struct idr_layer *pa[MAX_LEVEL];
264 int id;
265
266 id = idr_get_empty_slot(idp, starting_id, pa);
267 if (id >= 0) {
268 /*
269 * Successfully found an empty slot. Install the user
270 * pointer and mark the slot full.
271 */
3219b3b7
ND
272 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
273 (struct idr_layer *)ptr);
e33ac8bd
TH
274 pa[0]->count++;
275 idr_mark_full(pa, id);
276 }
277
278 return id;
279}
280
1da177e4 281/**
7c657f2f 282 * idr_get_new_above - allocate new idr entry above or equal to a start id
1da177e4 283 * @idp: idr handle
94e2bd68 284 * @ptr: pointer you want associated with the id
1da177e4
LT
285 * @start_id: id to start search at
286 * @id: pointer to the allocated handle
287 *
288 * This is the allocate id function. It should be called with any
289 * required locks.
290 *
291 * If memory is required, it will return -EAGAIN, you should unlock
292 * and go back to the idr_pre_get() call. If the idr is full, it will
293 * return -ENOSPC.
294 *
b098161b 295 * @id returns a value in the range @starting_id ... 0x7fffffff
1da177e4
LT
296 */
297int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
298{
299 int rv;
e15ae2dd 300
1da177e4
LT
301 rv = idr_get_new_above_int(idp, ptr, starting_id);
302 /*
303 * This is a cheap hack until the IDR code can be fixed to
304 * return proper error values.
305 */
944ca05c
ND
306 if (rv < 0)
307 return _idr_rc_to_errno(rv);
1da177e4
LT
308 *id = rv;
309 return 0;
310}
311EXPORT_SYMBOL(idr_get_new_above);
312
313/**
314 * idr_get_new - allocate new idr entry
315 * @idp: idr handle
94e2bd68 316 * @ptr: pointer you want associated with the id
1da177e4
LT
317 * @id: pointer to the allocated handle
318 *
319 * This is the allocate id function. It should be called with any
320 * required locks.
321 *
322 * If memory is required, it will return -EAGAIN, you should unlock
323 * and go back to the idr_pre_get() call. If the idr is full, it will
324 * return -ENOSPC.
325 *
326 * @id returns a value in the range 0 ... 0x7fffffff
327 */
328int idr_get_new(struct idr *idp, void *ptr, int *id)
329{
330 int rv;
e15ae2dd 331
1da177e4
LT
332 rv = idr_get_new_above_int(idp, ptr, 0);
333 /*
334 * This is a cheap hack until the IDR code can be fixed to
335 * return proper error values.
336 */
944ca05c
ND
337 if (rv < 0)
338 return _idr_rc_to_errno(rv);
1da177e4
LT
339 *id = rv;
340 return 0;
341}
342EXPORT_SYMBOL(idr_get_new);
343
344static void idr_remove_warning(int id)
345{
f098ad65
ND
346 printk(KERN_WARNING
347 "idr_remove called for id=%d which is not allocated.\n", id);
1da177e4
LT
348 dump_stack();
349}
350
351static void sub_remove(struct idr *idp, int shift, int id)
352{
353 struct idr_layer *p = idp->top;
354 struct idr_layer **pa[MAX_LEVEL];
355 struct idr_layer ***paa = &pa[0];
cf481c20 356 struct idr_layer *to_free;
1da177e4
LT
357 int n;
358
359 *paa = NULL;
360 *++paa = &idp->top;
361
362 while ((shift > 0) && p) {
363 n = (id >> shift) & IDR_MASK;
364 __clear_bit(n, &p->bitmap);
365 *++paa = &p->ary[n];
366 p = p->ary[n];
367 shift -= IDR_BITS;
368 }
369 n = id & IDR_MASK;
370 if (likely(p != NULL && test_bit(n, &p->bitmap))){
371 __clear_bit(n, &p->bitmap);
cf481c20
ND
372 rcu_assign_pointer(p->ary[n], NULL);
373 to_free = NULL;
1da177e4 374 while(*paa && ! --((**paa)->count)){
cf481c20
ND
375 if (to_free)
376 free_layer(to_free);
377 to_free = **paa;
1da177e4
LT
378 **paa-- = NULL;
379 }
e15ae2dd 380 if (!*paa)
1da177e4 381 idp->layers = 0;
cf481c20
ND
382 if (to_free)
383 free_layer(to_free);
e15ae2dd 384 } else
1da177e4 385 idr_remove_warning(id);
1da177e4
LT
386}
387
388/**
389 * idr_remove - remove the given id and free it's slot
72fd4a35
RD
390 * @idp: idr handle
391 * @id: unique key
1da177e4
LT
392 */
393void idr_remove(struct idr *idp, int id)
394{
395 struct idr_layer *p;
cf481c20 396 struct idr_layer *to_free;
1da177e4
LT
397
398 /* Mask off upper bits we don't use for the search. */
399 id &= MAX_ID_MASK;
400
401 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
e15ae2dd 402 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
cf481c20
ND
403 idp->top->ary[0]) {
404 /*
405 * Single child at leftmost slot: we can shrink the tree.
406 * This level is not needed anymore since when layers are
407 * inserted, they are inserted at the top of the existing
408 * tree.
409 */
410 to_free = idp->top;
1da177e4 411 p = idp->top->ary[0];
cf481c20 412 rcu_assign_pointer(idp->top, p);
1da177e4 413 --idp->layers;
cf481c20
ND
414 to_free->bitmap = to_free->count = 0;
415 free_layer(to_free);
1da177e4
LT
416 }
417 while (idp->id_free_cnt >= IDR_FREE_MAX) {
4ae53789 418 p = get_from_free_list(idp);
cf481c20
ND
419 /*
420 * Note: we don't call the rcu callback here, since the only
421 * layers that fall into the freelist are those that have been
422 * preallocated.
423 */
1da177e4 424 kmem_cache_free(idr_layer_cache, p);
1da177e4 425 }
af8e2a4c 426 return;
1da177e4
LT
427}
428EXPORT_SYMBOL(idr_remove);
429
23936cc0
KH
430/**
431 * idr_remove_all - remove all ids from the given idr tree
432 * @idp: idr handle
433 *
434 * idr_destroy() only frees up unused, cached idp_layers, but this
435 * function will remove all id mappings and leave all idp_layers
436 * unused.
437 *
438 * A typical clean-up sequence for objects stored in an idr tree, will
439 * use idr_for_each() to free all objects, if necessay, then
440 * idr_remove_all() to remove all ids, and idr_destroy() to free
441 * up the cached idr_layers.
442 */
443void idr_remove_all(struct idr *idp)
444{
6ace06dc 445 int n, id, max;
23936cc0
KH
446 struct idr_layer *p;
447 struct idr_layer *pa[MAX_LEVEL];
448 struct idr_layer **paa = &pa[0];
449
450 n = idp->layers * IDR_BITS;
451 p = idp->top;
1b23336a 452 rcu_assign_pointer(idp->top, NULL);
23936cc0
KH
453 max = 1 << n;
454
455 id = 0;
6ace06dc 456 while (id < max) {
23936cc0
KH
457 while (n > IDR_BITS && p) {
458 n -= IDR_BITS;
459 *paa++ = p;
460 p = p->ary[(id >> n) & IDR_MASK];
461 }
462
463 id += 1 << n;
464 while (n < fls(id)) {
cf481c20
ND
465 if (p)
466 free_layer(p);
23936cc0
KH
467 n += IDR_BITS;
468 p = *--paa;
469 }
470 }
23936cc0
KH
471 idp->layers = 0;
472}
473EXPORT_SYMBOL(idr_remove_all);
474
8d3b3591
AM
475/**
476 * idr_destroy - release all cached layers within an idr tree
477 * idp: idr handle
478 */
479void idr_destroy(struct idr *idp)
480{
481 while (idp->id_free_cnt) {
4ae53789 482 struct idr_layer *p = get_from_free_list(idp);
8d3b3591
AM
483 kmem_cache_free(idr_layer_cache, p);
484 }
485}
486EXPORT_SYMBOL(idr_destroy);
487
1da177e4
LT
488/**
489 * idr_find - return pointer for given id
490 * @idp: idr handle
491 * @id: lookup key
492 *
493 * Return the pointer given the id it has been registered with. A %NULL
494 * return indicates that @id is not valid or you passed %NULL in
495 * idr_get_new().
496 *
f9c46d6e
ND
497 * This function can be called under rcu_read_lock(), given that the leaf
498 * pointers lifetimes are correctly managed.
1da177e4
LT
499 */
500void *idr_find(struct idr *idp, int id)
501{
502 int n;
503 struct idr_layer *p;
504
f9c46d6e 505 p = rcu_dereference(idp->top);
6ff2d39b
MS
506 if (!p)
507 return NULL;
508 n = (p->layer+1) * IDR_BITS;
1da177e4
LT
509
510 /* Mask off upper bits we don't use for the search. */
511 id &= MAX_ID_MASK;
512
513 if (id >= (1 << n))
514 return NULL;
6ff2d39b 515 BUG_ON(n == 0);
1da177e4
LT
516
517 while (n > 0 && p) {
518 n -= IDR_BITS;
6ff2d39b 519 BUG_ON(n != p->layer*IDR_BITS);
f9c46d6e 520 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
1da177e4
LT
521 }
522 return((void *)p);
523}
524EXPORT_SYMBOL(idr_find);
525
96d7fa42
KH
526/**
527 * idr_for_each - iterate through all stored pointers
528 * @idp: idr handle
529 * @fn: function to be called for each pointer
530 * @data: data passed back to callback function
531 *
532 * Iterate over the pointers registered with the given idr. The
533 * callback function will be called for each pointer currently
534 * registered, passing the id, the pointer and the data pointer passed
535 * to this function. It is not safe to modify the idr tree while in
536 * the callback, so functions such as idr_get_new and idr_remove are
537 * not allowed.
538 *
539 * We check the return of @fn each time. If it returns anything other
540 * than 0, we break out and return that value.
541 *
542 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
543 */
544int idr_for_each(struct idr *idp,
545 int (*fn)(int id, void *p, void *data), void *data)
546{
547 int n, id, max, error = 0;
548 struct idr_layer *p;
549 struct idr_layer *pa[MAX_LEVEL];
550 struct idr_layer **paa = &pa[0];
551
552 n = idp->layers * IDR_BITS;
f9c46d6e 553 p = rcu_dereference(idp->top);
96d7fa42
KH
554 max = 1 << n;
555
556 id = 0;
557 while (id < max) {
558 while (n > 0 && p) {
559 n -= IDR_BITS;
560 *paa++ = p;
f9c46d6e 561 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
96d7fa42
KH
562 }
563
564 if (p) {
565 error = fn(id, (void *)p, data);
566 if (error)
567 break;
568 }
569
570 id += 1 << n;
571 while (n < fls(id)) {
572 n += IDR_BITS;
573 p = *--paa;
574 }
575 }
576
577 return error;
578}
579EXPORT_SYMBOL(idr_for_each);
580
38460b48
KH
581/**
582 * idr_get_next - lookup next object of id to given id.
583 * @idp: idr handle
584 * @id: pointer to lookup key
585 *
586 * Returns pointer to registered object with id, which is next number to
587 * given id.
588 */
589
590void *idr_get_next(struct idr *idp, int *nextidp)
591{
592 struct idr_layer *p, *pa[MAX_LEVEL];
593 struct idr_layer **paa = &pa[0];
594 int id = *nextidp;
595 int n, max;
596
597 /* find first ent */
598 n = idp->layers * IDR_BITS;
599 max = 1 << n;
600 p = rcu_dereference(idp->top);
601 if (!p)
602 return NULL;
603
604 while (id < max) {
605 while (n > 0 && p) {
606 n -= IDR_BITS;
607 *paa++ = p;
608 p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
609 }
610
611 if (p) {
612 *nextidp = id;
613 return p;
614 }
615
616 id += 1 << n;
617 while (n < fls(id)) {
618 n += IDR_BITS;
619 p = *--paa;
620 }
621 }
622 return NULL;
623}
624
625
626
5806f07c
JM
627/**
628 * idr_replace - replace pointer for given id
629 * @idp: idr handle
630 * @ptr: pointer you want associated with the id
631 * @id: lookup key
632 *
633 * Replace the pointer registered with an id and return the old value.
634 * A -ENOENT return indicates that @id was not found.
635 * A -EINVAL return indicates that @id was not within valid constraints.
636 *
cf481c20 637 * The caller must serialize with writers.
5806f07c
JM
638 */
639void *idr_replace(struct idr *idp, void *ptr, int id)
640{
641 int n;
642 struct idr_layer *p, *old_p;
643
5806f07c 644 p = idp->top;
6ff2d39b
MS
645 if (!p)
646 return ERR_PTR(-EINVAL);
647
648 n = (p->layer+1) * IDR_BITS;
5806f07c
JM
649
650 id &= MAX_ID_MASK;
651
652 if (id >= (1 << n))
653 return ERR_PTR(-EINVAL);
654
655 n -= IDR_BITS;
656 while ((n > 0) && p) {
657 p = p->ary[(id >> n) & IDR_MASK];
658 n -= IDR_BITS;
659 }
660
661 n = id & IDR_MASK;
662 if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
663 return ERR_PTR(-ENOENT);
664
665 old_p = p->ary[n];
cf481c20 666 rcu_assign_pointer(p->ary[n], ptr);
5806f07c
JM
667
668 return old_p;
669}
670EXPORT_SYMBOL(idr_replace);
671
199f0ca5 672void __init idr_init_cache(void)
1da177e4 673{
199f0ca5 674 idr_layer_cache = kmem_cache_create("idr_layer_cache",
5b019e99 675 sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
1da177e4
LT
676}
677
678/**
679 * idr_init - initialize idr handle
680 * @idp: idr handle
681 *
682 * This function is use to set up the handle (@idp) that you will pass
683 * to the rest of the functions.
684 */
685void idr_init(struct idr *idp)
686{
1da177e4
LT
687 memset(idp, 0, sizeof(struct idr));
688 spin_lock_init(&idp->lock);
689}
690EXPORT_SYMBOL(idr_init);
72dba584
TH
691
692
693/*
694 * IDA - IDR based ID allocator
695 *
696 * this is id allocator without id -> pointer translation. Memory
697 * usage is much lower than full blown idr because each id only
698 * occupies a bit. ida uses a custom leaf node which contains
699 * IDA_BITMAP_BITS slots.
700 *
701 * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
702 */
703
704static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
705{
706 unsigned long flags;
707
708 if (!ida->free_bitmap) {
709 spin_lock_irqsave(&ida->idr.lock, flags);
710 if (!ida->free_bitmap) {
711 ida->free_bitmap = bitmap;
712 bitmap = NULL;
713 }
714 spin_unlock_irqrestore(&ida->idr.lock, flags);
715 }
716
717 kfree(bitmap);
718}
719
720/**
721 * ida_pre_get - reserve resources for ida allocation
722 * @ida: ida handle
723 * @gfp_mask: memory allocation flag
724 *
725 * This function should be called prior to locking and calling the
726 * following function. It preallocates enough memory to satisfy the
727 * worst possible allocation.
728 *
729 * If the system is REALLY out of memory this function returns 0,
730 * otherwise 1.
731 */
732int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
733{
734 /* allocate idr_layers */
735 if (!idr_pre_get(&ida->idr, gfp_mask))
736 return 0;
737
738 /* allocate free_bitmap */
739 if (!ida->free_bitmap) {
740 struct ida_bitmap *bitmap;
741
742 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
743 if (!bitmap)
744 return 0;
745
746 free_bitmap(ida, bitmap);
747 }
748
749 return 1;
750}
751EXPORT_SYMBOL(ida_pre_get);
752
753/**
754 * ida_get_new_above - allocate new ID above or equal to a start id
755 * @ida: ida handle
756 * @staring_id: id to start search at
757 * @p_id: pointer to the allocated handle
758 *
759 * Allocate new ID above or equal to @ida. It should be called with
760 * any required locks.
761 *
762 * If memory is required, it will return -EAGAIN, you should unlock
763 * and go back to the ida_pre_get() call. If the ida is full, it will
764 * return -ENOSPC.
765 *
b098161b 766 * @p_id returns a value in the range @starting_id ... 0x7fffffff.
72dba584
TH
767 */
768int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
769{
770 struct idr_layer *pa[MAX_LEVEL];
771 struct ida_bitmap *bitmap;
772 unsigned long flags;
773 int idr_id = starting_id / IDA_BITMAP_BITS;
774 int offset = starting_id % IDA_BITMAP_BITS;
775 int t, id;
776
777 restart:
778 /* get vacant slot */
779 t = idr_get_empty_slot(&ida->idr, idr_id, pa);
944ca05c
ND
780 if (t < 0)
781 return _idr_rc_to_errno(t);
72dba584
TH
782
783 if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
784 return -ENOSPC;
785
786 if (t != idr_id)
787 offset = 0;
788 idr_id = t;
789
790 /* if bitmap isn't there, create a new one */
791 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
792 if (!bitmap) {
793 spin_lock_irqsave(&ida->idr.lock, flags);
794 bitmap = ida->free_bitmap;
795 ida->free_bitmap = NULL;
796 spin_unlock_irqrestore(&ida->idr.lock, flags);
797
798 if (!bitmap)
799 return -EAGAIN;
800
801 memset(bitmap, 0, sizeof(struct ida_bitmap));
3219b3b7
ND
802 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
803 (void *)bitmap);
72dba584
TH
804 pa[0]->count++;
805 }
806
807 /* lookup for empty slot */
808 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
809 if (t == IDA_BITMAP_BITS) {
810 /* no empty slot after offset, continue to the next chunk */
811 idr_id++;
812 offset = 0;
813 goto restart;
814 }
815
816 id = idr_id * IDA_BITMAP_BITS + t;
817 if (id >= MAX_ID_BIT)
818 return -ENOSPC;
819
820 __set_bit(t, bitmap->bitmap);
821 if (++bitmap->nr_busy == IDA_BITMAP_BITS)
822 idr_mark_full(pa, idr_id);
823
824 *p_id = id;
825
826 /* Each leaf node can handle nearly a thousand slots and the
827 * whole idea of ida is to have small memory foot print.
828 * Throw away extra resources one by one after each successful
829 * allocation.
830 */
831 if (ida->idr.id_free_cnt || ida->free_bitmap) {
4ae53789 832 struct idr_layer *p = get_from_free_list(&ida->idr);
72dba584
TH
833 if (p)
834 kmem_cache_free(idr_layer_cache, p);
835 }
836
837 return 0;
838}
839EXPORT_SYMBOL(ida_get_new_above);
840
841/**
842 * ida_get_new - allocate new ID
843 * @ida: idr handle
844 * @p_id: pointer to the allocated handle
845 *
846 * Allocate new ID. It should be called with any required locks.
847 *
848 * If memory is required, it will return -EAGAIN, you should unlock
849 * and go back to the idr_pre_get() call. If the idr is full, it will
850 * return -ENOSPC.
851 *
852 * @id returns a value in the range 0 ... 0x7fffffff.
853 */
854int ida_get_new(struct ida *ida, int *p_id)
855{
856 return ida_get_new_above(ida, 0, p_id);
857}
858EXPORT_SYMBOL(ida_get_new);
859
860/**
861 * ida_remove - remove the given ID
862 * @ida: ida handle
863 * @id: ID to free
864 */
865void ida_remove(struct ida *ida, int id)
866{
867 struct idr_layer *p = ida->idr.top;
868 int shift = (ida->idr.layers - 1) * IDR_BITS;
869 int idr_id = id / IDA_BITMAP_BITS;
870 int offset = id % IDA_BITMAP_BITS;
871 int n;
872 struct ida_bitmap *bitmap;
873
874 /* clear full bits while looking up the leaf idr_layer */
875 while ((shift > 0) && p) {
876 n = (idr_id >> shift) & IDR_MASK;
877 __clear_bit(n, &p->bitmap);
878 p = p->ary[n];
879 shift -= IDR_BITS;
880 }
881
882 if (p == NULL)
883 goto err;
884
885 n = idr_id & IDR_MASK;
886 __clear_bit(n, &p->bitmap);
887
888 bitmap = (void *)p->ary[n];
889 if (!test_bit(offset, bitmap->bitmap))
890 goto err;
891
892 /* update bitmap and remove it if empty */
893 __clear_bit(offset, bitmap->bitmap);
894 if (--bitmap->nr_busy == 0) {
895 __set_bit(n, &p->bitmap); /* to please idr_remove() */
896 idr_remove(&ida->idr, idr_id);
897 free_bitmap(ida, bitmap);
898 }
899
900 return;
901
902 err:
903 printk(KERN_WARNING
904 "ida_remove called for id=%d which is not allocated.\n", id);
905}
906EXPORT_SYMBOL(ida_remove);
907
908/**
909 * ida_destroy - release all cached layers within an ida tree
910 * ida: ida handle
911 */
912void ida_destroy(struct ida *ida)
913{
914 idr_destroy(&ida->idr);
915 kfree(ida->free_bitmap);
916}
917EXPORT_SYMBOL(ida_destroy);
918
919/**
920 * ida_init - initialize ida handle
921 * @ida: ida handle
922 *
923 * This function is use to set up the handle (@ida) that you will pass
924 * to the rest of the functions.
925 */
926void ida_init(struct ida *ida)
927{
928 memset(ida, 0, sizeof(struct ida));
929 idr_init(&ida->idr);
930
931}
932EXPORT_SYMBOL(ida_init);