]> bbs.cooldavid.org Git - net-next-2.6.git/blame - lib/idr.c
[SCTP]: Fix state table entries for chunks received in CLOSED state.
[net-next-2.6.git] / lib / idr.c
CommitLineData
1da177e4
LT
1/*
2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
5 *
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
8 *
e15ae2dd 9 * Small id to pointer translation service.
1da177e4 10 *
e15ae2dd 11 * It uses a radix tree like structure as a sparse array indexed
1da177e4 12 * by the id to obtain the pointer. The bitmap makes allocating
e15ae2dd 13 * a new id quick.
1da177e4
LT
14 *
15 * You call it to allocate an id (an int) an associate with that id a
16 * pointer or what ever, we treat it as a (void *). You can pass this
17 * id to a user for him to pass back at a later time. You then pass
18 * that id to this code and it returns your pointer.
19
e15ae2dd 20 * You can release ids at any time. When all ids are released, most of
1da177e4 21 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
e15ae2dd 22 * don't need to go to the memory "store" during an id allocate, just
1da177e4
LT
23 * so you don't need to be too concerned about locking and conflicts
24 * with the slab allocator.
25 */
26
27#ifndef TEST // to test in user space...
28#include <linux/slab.h>
29#include <linux/init.h>
30#include <linux/module.h>
31#endif
32#include <linux/string.h>
33#include <linux/idr.h>
34
35static kmem_cache_t *idr_layer_cache;
36
37static struct idr_layer *alloc_layer(struct idr *idp)
38{
39 struct idr_layer *p;
40
41 spin_lock(&idp->lock);
42 if ((p = idp->id_free)) {
43 idp->id_free = p->ary[0];
44 idp->id_free_cnt--;
45 p->ary[0] = NULL;
46 }
47 spin_unlock(&idp->lock);
48 return(p);
49}
50
51static void free_layer(struct idr *idp, struct idr_layer *p)
52{
53 /*
54 * Depends on the return element being zeroed.
55 */
56 spin_lock(&idp->lock);
57 p->ary[0] = idp->id_free;
58 idp->id_free = p;
59 idp->id_free_cnt++;
60 spin_unlock(&idp->lock);
61}
62
63/**
64 * idr_pre_get - reserver resources for idr allocation
65 * @idp: idr handle
66 * @gfp_mask: memory allocation flags
67 *
68 * This function should be called prior to locking and calling the
69 * following function. It preallocates enough memory to satisfy
70 * the worst possible allocation.
71 *
72 * If the system is REALLY out of memory this function returns 0,
73 * otherwise 1.
74 */
fd4f2df2 75int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
1da177e4
LT
76{
77 while (idp->id_free_cnt < IDR_FREE_MAX) {
78 struct idr_layer *new;
79 new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
e15ae2dd 80 if (new == NULL)
1da177e4
LT
81 return (0);
82 free_layer(idp, new);
83 }
84 return 1;
85}
86EXPORT_SYMBOL(idr_pre_get);
87
88static int sub_alloc(struct idr *idp, void *ptr, int *starting_id)
89{
90 int n, m, sh;
91 struct idr_layer *p, *new;
92 struct idr_layer *pa[MAX_LEVEL];
93 int l, id;
94 long bm;
95
96 id = *starting_id;
97 p = idp->top;
98 l = idp->layers;
99 pa[l--] = NULL;
100 while (1) {
101 /*
102 * We run around this while until we reach the leaf node...
103 */
104 n = (id >> (IDR_BITS*l)) & IDR_MASK;
105 bm = ~p->bitmap;
106 m = find_next_bit(&bm, IDR_SIZE, n);
107 if (m == IDR_SIZE) {
108 /* no space available go back to previous layer. */
109 l++;
e15ae2dd 110 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
1da177e4
LT
111 if (!(p = pa[l])) {
112 *starting_id = id;
113 return -2;
114 }
115 continue;
116 }
117 if (m != n) {
118 sh = IDR_BITS*l;
119 id = ((id >> sh) ^ n ^ m) << sh;
120 }
121 if ((id >= MAX_ID_BIT) || (id < 0))
122 return -3;
123 if (l == 0)
124 break;
125 /*
126 * Create the layer below if it is missing.
127 */
128 if (!p->ary[m]) {
129 if (!(new = alloc_layer(idp)))
130 return -1;
131 p->ary[m] = new;
132 p->count++;
133 }
134 pa[l--] = p;
135 p = p->ary[m];
136 }
137 /*
138 * We have reached the leaf node, plant the
139 * users pointer and return the raw id.
140 */
141 p->ary[m] = (struct idr_layer *)ptr;
142 __set_bit(m, &p->bitmap);
143 p->count++;
144 /*
145 * If this layer is full mark the bit in the layer above
146 * to show that this part of the radix tree is full.
147 * This may complete the layer above and require walking
148 * up the radix tree.
149 */
150 n = id;
151 while (p->bitmap == IDR_FULL) {
152 if (!(p = pa[++l]))
153 break;
154 n = n >> IDR_BITS;
155 __set_bit((n & IDR_MASK), &p->bitmap);
156 }
157 return(id);
158}
159
160static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
161{
162 struct idr_layer *p, *new;
163 int layers, v, id;
e15ae2dd 164
1da177e4
LT
165 id = starting_id;
166build_up:
167 p = idp->top;
168 layers = idp->layers;
169 if (unlikely(!p)) {
170 if (!(p = alloc_layer(idp)))
171 return -1;
172 layers = 1;
173 }
174 /*
175 * Add a new layer to the top of the tree if the requested
176 * id is larger than the currently allocated space.
177 */
589777ea 178 while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
1da177e4
LT
179 layers++;
180 if (!p->count)
181 continue;
182 if (!(new = alloc_layer(idp))) {
183 /*
184 * The allocation failed. If we built part of
185 * the structure tear it down.
186 */
187 for (new = p; p && p != idp->top; new = p) {
188 p = p->ary[0];
189 new->ary[0] = NULL;
190 new->bitmap = new->count = 0;
191 free_layer(idp, new);
192 }
193 return -1;
194 }
195 new->ary[0] = p;
196 new->count = 1;
197 if (p->bitmap == IDR_FULL)
198 __set_bit(0, &new->bitmap);
199 p = new;
200 }
201 idp->top = p;
202 idp->layers = layers;
203 v = sub_alloc(idp, ptr, &id);
204 if (v == -2)
205 goto build_up;
206 return(v);
207}
208
209/**
7c657f2f 210 * idr_get_new_above - allocate new idr entry above or equal to a start id
1da177e4
LT
211 * @idp: idr handle
212 * @ptr: pointer you want associated with the ide
213 * @start_id: id to start search at
214 * @id: pointer to the allocated handle
215 *
216 * This is the allocate id function. It should be called with any
217 * required locks.
218 *
219 * If memory is required, it will return -EAGAIN, you should unlock
220 * and go back to the idr_pre_get() call. If the idr is full, it will
221 * return -ENOSPC.
222 *
223 * @id returns a value in the range 0 ... 0x7fffffff
224 */
225int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
226{
227 int rv;
e15ae2dd 228
1da177e4
LT
229 rv = idr_get_new_above_int(idp, ptr, starting_id);
230 /*
231 * This is a cheap hack until the IDR code can be fixed to
232 * return proper error values.
233 */
234 if (rv < 0) {
235 if (rv == -1)
236 return -EAGAIN;
237 else /* Will be -3 */
238 return -ENOSPC;
239 }
240 *id = rv;
241 return 0;
242}
243EXPORT_SYMBOL(idr_get_new_above);
244
245/**
246 * idr_get_new - allocate new idr entry
247 * @idp: idr handle
248 * @ptr: pointer you want associated with the ide
249 * @id: pointer to the allocated handle
250 *
251 * This is the allocate id function. It should be called with any
252 * required locks.
253 *
254 * If memory is required, it will return -EAGAIN, you should unlock
255 * and go back to the idr_pre_get() call. If the idr is full, it will
256 * return -ENOSPC.
257 *
258 * @id returns a value in the range 0 ... 0x7fffffff
259 */
260int idr_get_new(struct idr *idp, void *ptr, int *id)
261{
262 int rv;
e15ae2dd 263
1da177e4
LT
264 rv = idr_get_new_above_int(idp, ptr, 0);
265 /*
266 * This is a cheap hack until the IDR code can be fixed to
267 * return proper error values.
268 */
269 if (rv < 0) {
270 if (rv == -1)
271 return -EAGAIN;
272 else /* Will be -3 */
273 return -ENOSPC;
274 }
275 *id = rv;
276 return 0;
277}
278EXPORT_SYMBOL(idr_get_new);
279
280static void idr_remove_warning(int id)
281{
282 printk("idr_remove called for id=%d which is not allocated.\n", id);
283 dump_stack();
284}
285
286static void sub_remove(struct idr *idp, int shift, int id)
287{
288 struct idr_layer *p = idp->top;
289 struct idr_layer **pa[MAX_LEVEL];
290 struct idr_layer ***paa = &pa[0];
291 int n;
292
293 *paa = NULL;
294 *++paa = &idp->top;
295
296 while ((shift > 0) && p) {
297 n = (id >> shift) & IDR_MASK;
298 __clear_bit(n, &p->bitmap);
299 *++paa = &p->ary[n];
300 p = p->ary[n];
301 shift -= IDR_BITS;
302 }
303 n = id & IDR_MASK;
304 if (likely(p != NULL && test_bit(n, &p->bitmap))){
305 __clear_bit(n, &p->bitmap);
306 p->ary[n] = NULL;
307 while(*paa && ! --((**paa)->count)){
308 free_layer(idp, **paa);
309 **paa-- = NULL;
310 }
e15ae2dd 311 if (!*paa)
1da177e4 312 idp->layers = 0;
e15ae2dd 313 } else
1da177e4 314 idr_remove_warning(id);
1da177e4
LT
315}
316
317/**
318 * idr_remove - remove the given id and free it's slot
319 * idp: idr handle
320 * id: uniqueue key
321 */
322void idr_remove(struct idr *idp, int id)
323{
324 struct idr_layer *p;
325
326 /* Mask off upper bits we don't use for the search. */
327 id &= MAX_ID_MASK;
328
329 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
e15ae2dd
JJ
330 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
331 idp->top->ary[0]) { // We can drop a layer
1da177e4
LT
332
333 p = idp->top->ary[0];
334 idp->top->bitmap = idp->top->count = 0;
335 free_layer(idp, idp->top);
336 idp->top = p;
337 --idp->layers;
338 }
339 while (idp->id_free_cnt >= IDR_FREE_MAX) {
1da177e4
LT
340 p = alloc_layer(idp);
341 kmem_cache_free(idr_layer_cache, p);
342 return;
343 }
344}
345EXPORT_SYMBOL(idr_remove);
346
8d3b3591
AM
347/**
348 * idr_destroy - release all cached layers within an idr tree
349 * idp: idr handle
350 */
351void idr_destroy(struct idr *idp)
352{
353 while (idp->id_free_cnt) {
354 struct idr_layer *p = alloc_layer(idp);
355 kmem_cache_free(idr_layer_cache, p);
356 }
357}
358EXPORT_SYMBOL(idr_destroy);
359
1da177e4
LT
360/**
361 * idr_find - return pointer for given id
362 * @idp: idr handle
363 * @id: lookup key
364 *
365 * Return the pointer given the id it has been registered with. A %NULL
366 * return indicates that @id is not valid or you passed %NULL in
367 * idr_get_new().
368 *
369 * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
370 */
371void *idr_find(struct idr *idp, int id)
372{
373 int n;
374 struct idr_layer *p;
375
376 n = idp->layers * IDR_BITS;
377 p = idp->top;
378
379 /* Mask off upper bits we don't use for the search. */
380 id &= MAX_ID_MASK;
381
382 if (id >= (1 << n))
383 return NULL;
384
385 while (n > 0 && p) {
386 n -= IDR_BITS;
387 p = p->ary[(id >> n) & IDR_MASK];
388 }
389 return((void *)p);
390}
391EXPORT_SYMBOL(idr_find);
392
e15ae2dd
JJ
393static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache,
394 unsigned long flags)
1da177e4
LT
395{
396 memset(idr_layer, 0, sizeof(struct idr_layer));
397}
398
399static int init_id_cache(void)
400{
401 if (!idr_layer_cache)
e15ae2dd 402 idr_layer_cache = kmem_cache_create("idr_layer_cache",
1da177e4
LT
403 sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL);
404 return 0;
405}
406
407/**
408 * idr_init - initialize idr handle
409 * @idp: idr handle
410 *
411 * This function is use to set up the handle (@idp) that you will pass
412 * to the rest of the functions.
413 */
414void idr_init(struct idr *idp)
415{
416 init_id_cache();
417 memset(idp, 0, sizeof(struct idr));
418 spin_lock_init(&idp->lock);
419}
420EXPORT_SYMBOL(idr_init);