]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com | |
3 | * Copyright (C) 2002 by Concurrent Computer Corporation | |
4 | * Distributed under the GNU GPL license version 2. | |
5 | * | |
6 | * Modified by George Anzinger to reuse immediately and to use | |
7 | * find bit instructions. Also removed _irq on spinlocks. | |
8 | * | |
9 | * Modified by Nadia Derbey to make it RCU safe. | |
10 | * | |
11 | * Small id to pointer translation service. | |
12 | * | |
13 | * It uses a radix tree like structure as a sparse array indexed | |
14 | * by the id to obtain the pointer. The bitmap makes allocating | |
15 | * a new id quick. | |
16 | * | |
17 | * You call it to allocate an id (an int) an associate with that id a | |
18 | * pointer or what ever, we treat it as a (void *). You can pass this | |
19 | * id to a user for him to pass back at a later time. You then pass | |
20 | * that id to this code and it returns your pointer. | |
21 | ||
22 | * You can release ids at any time. When all ids are released, most of | |
23 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we | |
24 | * don't need to go to the memory "store" during an id allocate, just | |
25 | * so you don't need to be too concerned about locking and conflicts | |
26 | * with the slab allocator. | |
27 | */ | |
28 | ||
29 | #ifndef TEST // to test in user space... | |
30 | #include <linux/slab.h> | |
31 | #include <linux/init.h> | |
32 | #include <linux/module.h> | |
33 | #endif | |
34 | #include <linux/err.h> | |
35 | #include <linux/string.h> | |
36 | #include <linux/idr.h> | |
37 | ||
38 | static struct kmem_cache *idr_layer_cache; | |
39 | ||
40 | static struct idr_layer *get_from_free_list(struct idr *idp) | |
41 | { | |
42 | struct idr_layer *p; | |
43 | unsigned long flags; | |
44 | ||
45 | spin_lock_irqsave(&idp->lock, flags); | |
46 | if ((p = idp->id_free)) { | |
47 | idp->id_free = p->ary[0]; | |
48 | idp->id_free_cnt--; | |
49 | p->ary[0] = NULL; | |
50 | } | |
51 | spin_unlock_irqrestore(&idp->lock, flags); | |
52 | return(p); | |
53 | } | |
54 | ||
55 | static void idr_layer_rcu_free(struct rcu_head *head) | |
56 | { | |
57 | struct idr_layer *layer; | |
58 | ||
59 | layer = container_of(head, struct idr_layer, rcu_head); | |
60 | kmem_cache_free(idr_layer_cache, layer); | |
61 | } | |
62 | ||
63 | static inline void free_layer(struct idr_layer *p) | |
64 | { | |
65 | call_rcu(&p->rcu_head, idr_layer_rcu_free); | |
66 | } | |
67 | ||
68 | /* only called when idp->lock is held */ | |
69 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) | |
70 | { | |
71 | p->ary[0] = idp->id_free; | |
72 | idp->id_free = p; | |
73 | idp->id_free_cnt++; | |
74 | } | |
75 | ||
76 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) | |
77 | { | |
78 | unsigned long flags; | |
79 | ||
80 | /* | |
81 | * Depends on the return element being zeroed. | |
82 | */ | |
83 | spin_lock_irqsave(&idp->lock, flags); | |
84 | __move_to_free_list(idp, p); | |
85 | spin_unlock_irqrestore(&idp->lock, flags); | |
86 | } | |
87 | ||
88 | static void idr_mark_full(struct idr_layer **pa, int id) | |
89 | { | |
90 | struct idr_layer *p = pa[0]; | |
91 | int l = 0; | |
92 | ||
93 | __set_bit(id & IDR_MASK, &p->bitmap); | |
94 | /* | |
95 | * If this layer is full mark the bit in the layer above to | |
96 | * show that this part of the radix tree is full. This may | |
97 | * complete the layer above and require walking up the radix | |
98 | * tree. | |
99 | */ | |
100 | while (p->bitmap == IDR_FULL) { | |
101 | if (!(p = pa[++l])) | |
102 | break; | |
103 | id = id >> IDR_BITS; | |
104 | __set_bit((id & IDR_MASK), &p->bitmap); | |
105 | } | |
106 | } | |
107 | ||
108 | /** | |
109 | * idr_pre_get - reserve resources for idr allocation | |
110 | * @idp: idr handle | |
111 | * @gfp_mask: memory allocation flags | |
112 | * | |
113 | * This function should be called prior to calling the idr_get_new* functions. | |
114 | * It preallocates enough memory to satisfy the worst possible allocation. The | |
115 | * caller should pass in GFP_KERNEL if possible. This of course requires that | |
116 | * no spinning locks be held. | |
117 | * | |
118 | * If the system is REALLY out of memory this function returns %0, | |
119 | * otherwise %1. | |
120 | */ | |
121 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) | |
122 | { | |
123 | while (idp->id_free_cnt < IDR_FREE_MAX) { | |
124 | struct idr_layer *new; | |
125 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); | |
126 | if (new == NULL) | |
127 | return (0); | |
128 | move_to_free_list(idp, new); | |
129 | } | |
130 | return 1; | |
131 | } | |
132 | EXPORT_SYMBOL(idr_pre_get); | |
133 | ||
134 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) | |
135 | { | |
136 | int n, m, sh; | |
137 | struct idr_layer *p, *new; | |
138 | int l, id, oid; | |
139 | unsigned long bm; | |
140 | ||
141 | id = *starting_id; | |
142 | restart: | |
143 | p = idp->top; | |
144 | l = idp->layers; | |
145 | pa[l--] = NULL; | |
146 | while (1) { | |
147 | /* | |
148 | * We run around this while until we reach the leaf node... | |
149 | */ | |
150 | n = (id >> (IDR_BITS*l)) & IDR_MASK; | |
151 | bm = ~p->bitmap; | |
152 | m = find_next_bit(&bm, IDR_SIZE, n); | |
153 | if (m == IDR_SIZE) { | |
154 | /* no space available go back to previous layer. */ | |
155 | l++; | |
156 | oid = id; | |
157 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; | |
158 | ||
159 | /* if already at the top layer, we need to grow */ | |
160 | if (id >= 1 << (idp->layers * IDR_BITS)) { | |
161 | *starting_id = id; | |
162 | return IDR_NEED_TO_GROW; | |
163 | } | |
164 | p = pa[l]; | |
165 | BUG_ON(!p); | |
166 | ||
167 | /* If we need to go up one layer, continue the | |
168 | * loop; otherwise, restart from the top. | |
169 | */ | |
170 | sh = IDR_BITS * (l + 1); | |
171 | if (oid >> sh == id >> sh) | |
172 | continue; | |
173 | else | |
174 | goto restart; | |
175 | } | |
176 | if (m != n) { | |
177 | sh = IDR_BITS*l; | |
178 | id = ((id >> sh) ^ n ^ m) << sh; | |
179 | } | |
180 | if ((id >= MAX_ID_BIT) || (id < 0)) | |
181 | return IDR_NOMORE_SPACE; | |
182 | if (l == 0) | |
183 | break; | |
184 | /* | |
185 | * Create the layer below if it is missing. | |
186 | */ | |
187 | if (!p->ary[m]) { | |
188 | new = get_from_free_list(idp); | |
189 | if (!new) | |
190 | return -1; | |
191 | new->layer = l-1; | |
192 | rcu_assign_pointer(p->ary[m], new); | |
193 | p->count++; | |
194 | } | |
195 | pa[l--] = p; | |
196 | p = p->ary[m]; | |
197 | } | |
198 | ||
199 | pa[l] = p; | |
200 | return id; | |
201 | } | |
202 | ||
203 | static int idr_get_empty_slot(struct idr *idp, int starting_id, | |
204 | struct idr_layer **pa) | |
205 | { | |
206 | struct idr_layer *p, *new; | |
207 | int layers, v, id; | |
208 | unsigned long flags; | |
209 | ||
210 | id = starting_id; | |
211 | build_up: | |
212 | p = idp->top; | |
213 | layers = idp->layers; | |
214 | if (unlikely(!p)) { | |
215 | if (!(p = get_from_free_list(idp))) | |
216 | return -1; | |
217 | p->layer = 0; | |
218 | layers = 1; | |
219 | } | |
220 | /* | |
221 | * Add a new layer to the top of the tree if the requested | |
222 | * id is larger than the currently allocated space. | |
223 | */ | |
224 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { | |
225 | layers++; | |
226 | if (!p->count) { | |
227 | /* special case: if the tree is currently empty, | |
228 | * then we grow the tree by moving the top node | |
229 | * upwards. | |
230 | */ | |
231 | p->layer++; | |
232 | continue; | |
233 | } | |
234 | if (!(new = get_from_free_list(idp))) { | |
235 | /* | |
236 | * The allocation failed. If we built part of | |
237 | * the structure tear it down. | |
238 | */ | |
239 | spin_lock_irqsave(&idp->lock, flags); | |
240 | for (new = p; p && p != idp->top; new = p) { | |
241 | p = p->ary[0]; | |
242 | new->ary[0] = NULL; | |
243 | new->bitmap = new->count = 0; | |
244 | __move_to_free_list(idp, new); | |
245 | } | |
246 | spin_unlock_irqrestore(&idp->lock, flags); | |
247 | return -1; | |
248 | } | |
249 | new->ary[0] = p; | |
250 | new->count = 1; | |
251 | new->layer = layers-1; | |
252 | if (p->bitmap == IDR_FULL) | |
253 | __set_bit(0, &new->bitmap); | |
254 | p = new; | |
255 | } | |
256 | rcu_assign_pointer(idp->top, p); | |
257 | idp->layers = layers; | |
258 | v = sub_alloc(idp, &id, pa); | |
259 | if (v == IDR_NEED_TO_GROW) | |
260 | goto build_up; | |
261 | return(v); | |
262 | } | |
263 | ||
264 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) | |
265 | { | |
266 | struct idr_layer *pa[MAX_LEVEL]; | |
267 | int id; | |
268 | ||
269 | id = idr_get_empty_slot(idp, starting_id, pa); | |
270 | if (id >= 0) { | |
271 | /* | |
272 | * Successfully found an empty slot. Install the user | |
273 | * pointer and mark the slot full. | |
274 | */ | |
275 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], | |
276 | (struct idr_layer *)ptr); | |
277 | pa[0]->count++; | |
278 | idr_mark_full(pa, id); | |
279 | } | |
280 | ||
281 | return id; | |
282 | } | |
283 | ||
284 | /** | |
285 | * idr_get_new_above - allocate new idr entry above or equal to a start id | |
286 | * @idp: idr handle | |
287 | * @ptr: pointer you want associated with the id | |
288 | * @starting_id: id to start search at | |
289 | * @id: pointer to the allocated handle | |
290 | * | |
291 | * This is the allocate id function. It should be called with any | |
292 | * required locks. | |
293 | * | |
294 | * If allocation from IDR's private freelist fails, idr_get_new_above() will | |
295 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill | |
296 | * IDR's preallocation and then retry the idr_get_new_above() call. | |
297 | * | |
298 | * If the idr is full idr_get_new_above() will return %-ENOSPC. | |
299 | * | |
300 | * @id returns a value in the range @starting_id ... %0x7fffffff | |
301 | */ | |
302 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) | |
303 | { | |
304 | int rv; | |
305 | ||
306 | rv = idr_get_new_above_int(idp, ptr, starting_id); | |
307 | /* | |
308 | * This is a cheap hack until the IDR code can be fixed to | |
309 | * return proper error values. | |
310 | */ | |
311 | if (rv < 0) | |
312 | return _idr_rc_to_errno(rv); | |
313 | *id = rv; | |
314 | return 0; | |
315 | } | |
316 | EXPORT_SYMBOL(idr_get_new_above); | |
317 | ||
318 | /** | |
319 | * idr_get_new - allocate new idr entry | |
320 | * @idp: idr handle | |
321 | * @ptr: pointer you want associated with the id | |
322 | * @id: pointer to the allocated handle | |
323 | * | |
324 | * If allocation from IDR's private freelist fails, idr_get_new_above() will | |
325 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill | |
326 | * IDR's preallocation and then retry the idr_get_new_above() call. | |
327 | * | |
328 | * If the idr is full idr_get_new_above() will return %-ENOSPC. | |
329 | * | |
330 | * @id returns a value in the range %0 ... %0x7fffffff | |
331 | */ | |
332 | int idr_get_new(struct idr *idp, void *ptr, int *id) | |
333 | { | |
334 | int rv; | |
335 | ||
336 | rv = idr_get_new_above_int(idp, ptr, 0); | |
337 | /* | |
338 | * This is a cheap hack until the IDR code can be fixed to | |
339 | * return proper error values. | |
340 | */ | |
341 | if (rv < 0) | |
342 | return _idr_rc_to_errno(rv); | |
343 | *id = rv; | |
344 | return 0; | |
345 | } | |
346 | EXPORT_SYMBOL(idr_get_new); | |
347 | ||
348 | static void idr_remove_warning(int id) | |
349 | { | |
350 | printk(KERN_WARNING | |
351 | "idr_remove called for id=%d which is not allocated.\n", id); | |
352 | dump_stack(); | |
353 | } | |
354 | ||
355 | static void sub_remove(struct idr *idp, int shift, int id) | |
356 | { | |
357 | struct idr_layer *p = idp->top; | |
358 | struct idr_layer **pa[MAX_LEVEL]; | |
359 | struct idr_layer ***paa = &pa[0]; | |
360 | struct idr_layer *to_free; | |
361 | int n; | |
362 | ||
363 | *paa = NULL; | |
364 | *++paa = &idp->top; | |
365 | ||
366 | while ((shift > 0) && p) { | |
367 | n = (id >> shift) & IDR_MASK; | |
368 | __clear_bit(n, &p->bitmap); | |
369 | *++paa = &p->ary[n]; | |
370 | p = p->ary[n]; | |
371 | shift -= IDR_BITS; | |
372 | } | |
373 | n = id & IDR_MASK; | |
374 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ | |
375 | __clear_bit(n, &p->bitmap); | |
376 | rcu_assign_pointer(p->ary[n], NULL); | |
377 | to_free = NULL; | |
378 | while(*paa && ! --((**paa)->count)){ | |
379 | if (to_free) | |
380 | free_layer(to_free); | |
381 | to_free = **paa; | |
382 | **paa-- = NULL; | |
383 | } | |
384 | if (!*paa) | |
385 | idp->layers = 0; | |
386 | if (to_free) | |
387 | free_layer(to_free); | |
388 | } else | |
389 | idr_remove_warning(id); | |
390 | } | |
391 | ||
392 | /** | |
393 | * idr_remove - remove the given id and free its slot | |
394 | * @idp: idr handle | |
395 | * @id: unique key | |
396 | */ | |
397 | void idr_remove(struct idr *idp, int id) | |
398 | { | |
399 | struct idr_layer *p; | |
400 | struct idr_layer *to_free; | |
401 | ||
402 | /* Mask off upper bits we don't use for the search. */ | |
403 | id &= MAX_ID_MASK; | |
404 | ||
405 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); | |
406 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && | |
407 | idp->top->ary[0]) { | |
408 | /* | |
409 | * Single child at leftmost slot: we can shrink the tree. | |
410 | * This level is not needed anymore since when layers are | |
411 | * inserted, they are inserted at the top of the existing | |
412 | * tree. | |
413 | */ | |
414 | to_free = idp->top; | |
415 | p = idp->top->ary[0]; | |
416 | rcu_assign_pointer(idp->top, p); | |
417 | --idp->layers; | |
418 | to_free->bitmap = to_free->count = 0; | |
419 | free_layer(to_free); | |
420 | } | |
421 | while (idp->id_free_cnt >= IDR_FREE_MAX) { | |
422 | p = get_from_free_list(idp); | |
423 | /* | |
424 | * Note: we don't call the rcu callback here, since the only | |
425 | * layers that fall into the freelist are those that have been | |
426 | * preallocated. | |
427 | */ | |
428 | kmem_cache_free(idr_layer_cache, p); | |
429 | } | |
430 | return; | |
431 | } | |
432 | EXPORT_SYMBOL(idr_remove); | |
433 | ||
434 | /** | |
435 | * idr_remove_all - remove all ids from the given idr tree | |
436 | * @idp: idr handle | |
437 | * | |
438 | * idr_destroy() only frees up unused, cached idp_layers, but this | |
439 | * function will remove all id mappings and leave all idp_layers | |
440 | * unused. | |
441 | * | |
442 | * A typical clean-up sequence for objects stored in an idr tree will | |
443 | * use idr_for_each() to free all objects, if necessay, then | |
444 | * idr_remove_all() to remove all ids, and idr_destroy() to free | |
445 | * up the cached idr_layers. | |
446 | */ | |
447 | void idr_remove_all(struct idr *idp) | |
448 | { | |
449 | int n, id, max; | |
450 | int bt_mask; | |
451 | struct idr_layer *p; | |
452 | struct idr_layer *pa[MAX_LEVEL]; | |
453 | struct idr_layer **paa = &pa[0]; | |
454 | ||
455 | n = idp->layers * IDR_BITS; | |
456 | p = idp->top; | |
457 | rcu_assign_pointer(idp->top, NULL); | |
458 | max = 1 << n; | |
459 | ||
460 | id = 0; | |
461 | while (id < max) { | |
462 | while (n > IDR_BITS && p) { | |
463 | n -= IDR_BITS; | |
464 | *paa++ = p; | |
465 | p = p->ary[(id >> n) & IDR_MASK]; | |
466 | } | |
467 | ||
468 | bt_mask = id; | |
469 | id += 1 << n; | |
470 | /* Get the highest bit that the above add changed from 0->1. */ | |
471 | while (n < fls(id ^ bt_mask)) { | |
472 | if (p) | |
473 | free_layer(p); | |
474 | n += IDR_BITS; | |
475 | p = *--paa; | |
476 | } | |
477 | } | |
478 | idp->layers = 0; | |
479 | } | |
480 | EXPORT_SYMBOL(idr_remove_all); | |
481 | ||
482 | /** | |
483 | * idr_destroy - release all cached layers within an idr tree | |
484 | * @idp: idr handle | |
485 | */ | |
486 | void idr_destroy(struct idr *idp) | |
487 | { | |
488 | while (idp->id_free_cnt) { | |
489 | struct idr_layer *p = get_from_free_list(idp); | |
490 | kmem_cache_free(idr_layer_cache, p); | |
491 | } | |
492 | } | |
493 | EXPORT_SYMBOL(idr_destroy); | |
494 | ||
495 | /** | |
496 | * idr_find - return pointer for given id | |
497 | * @idp: idr handle | |
498 | * @id: lookup key | |
499 | * | |
500 | * Return the pointer given the id it has been registered with. A %NULL | |
501 | * return indicates that @id is not valid or you passed %NULL in | |
502 | * idr_get_new(). | |
503 | * | |
504 | * This function can be called under rcu_read_lock(), given that the leaf | |
505 | * pointers lifetimes are correctly managed. | |
506 | */ | |
507 | void *idr_find(struct idr *idp, int id) | |
508 | { | |
509 | int n; | |
510 | struct idr_layer *p; | |
511 | ||
512 | p = rcu_dereference_raw(idp->top); | |
513 | if (!p) | |
514 | return NULL; | |
515 | n = (p->layer+1) * IDR_BITS; | |
516 | ||
517 | /* Mask off upper bits we don't use for the search. */ | |
518 | id &= MAX_ID_MASK; | |
519 | ||
520 | if (id >= (1 << n)) | |
521 | return NULL; | |
522 | BUG_ON(n == 0); | |
523 | ||
524 | while (n > 0 && p) { | |
525 | n -= IDR_BITS; | |
526 | BUG_ON(n != p->layer*IDR_BITS); | |
527 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); | |
528 | } | |
529 | return((void *)p); | |
530 | } | |
531 | EXPORT_SYMBOL(idr_find); | |
532 | ||
533 | /** | |
534 | * idr_for_each - iterate through all stored pointers | |
535 | * @idp: idr handle | |
536 | * @fn: function to be called for each pointer | |
537 | * @data: data passed back to callback function | |
538 | * | |
539 | * Iterate over the pointers registered with the given idr. The | |
540 | * callback function will be called for each pointer currently | |
541 | * registered, passing the id, the pointer and the data pointer passed | |
542 | * to this function. It is not safe to modify the idr tree while in | |
543 | * the callback, so functions such as idr_get_new and idr_remove are | |
544 | * not allowed. | |
545 | * | |
546 | * We check the return of @fn each time. If it returns anything other | |
547 | * than %0, we break out and return that value. | |
548 | * | |
549 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). | |
550 | */ | |
551 | int idr_for_each(struct idr *idp, | |
552 | int (*fn)(int id, void *p, void *data), void *data) | |
553 | { | |
554 | int n, id, max, error = 0; | |
555 | struct idr_layer *p; | |
556 | struct idr_layer *pa[MAX_LEVEL]; | |
557 | struct idr_layer **paa = &pa[0]; | |
558 | ||
559 | n = idp->layers * IDR_BITS; | |
560 | p = rcu_dereference_raw(idp->top); | |
561 | max = 1 << n; | |
562 | ||
563 | id = 0; | |
564 | while (id < max) { | |
565 | while (n > 0 && p) { | |
566 | n -= IDR_BITS; | |
567 | *paa++ = p; | |
568 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); | |
569 | } | |
570 | ||
571 | if (p) { | |
572 | error = fn(id, (void *)p, data); | |
573 | if (error) | |
574 | break; | |
575 | } | |
576 | ||
577 | id += 1 << n; | |
578 | while (n < fls(id)) { | |
579 | n += IDR_BITS; | |
580 | p = *--paa; | |
581 | } | |
582 | } | |
583 | ||
584 | return error; | |
585 | } | |
586 | EXPORT_SYMBOL(idr_for_each); | |
587 | ||
588 | /** | |
589 | * idr_get_next - lookup next object of id to given id. | |
590 | * @idp: idr handle | |
591 | * @nextidp: pointer to lookup key | |
592 | * | |
593 | * Returns pointer to registered object with id, which is next number to | |
594 | * given id. After being looked up, *@nextidp will be updated for the next | |
595 | * iteration. | |
596 | */ | |
597 | ||
598 | void *idr_get_next(struct idr *idp, int *nextidp) | |
599 | { | |
600 | struct idr_layer *p, *pa[MAX_LEVEL]; | |
601 | struct idr_layer **paa = &pa[0]; | |
602 | int id = *nextidp; | |
603 | int n, max; | |
604 | ||
605 | /* find first ent */ | |
606 | n = idp->layers * IDR_BITS; | |
607 | max = 1 << n; | |
608 | p = rcu_dereference_raw(idp->top); | |
609 | if (!p) | |
610 | return NULL; | |
611 | ||
612 | while (id < max) { | |
613 | while (n > 0 && p) { | |
614 | n -= IDR_BITS; | |
615 | *paa++ = p; | |
616 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); | |
617 | } | |
618 | ||
619 | if (p) { | |
620 | *nextidp = id; | |
621 | return p; | |
622 | } | |
623 | ||
624 | id += 1 << n; | |
625 | while (n < fls(id)) { | |
626 | n += IDR_BITS; | |
627 | p = *--paa; | |
628 | } | |
629 | } | |
630 | return NULL; | |
631 | } | |
632 | EXPORT_SYMBOL(idr_get_next); | |
633 | ||
634 | ||
635 | /** | |
636 | * idr_replace - replace pointer for given id | |
637 | * @idp: idr handle | |
638 | * @ptr: pointer you want associated with the id | |
639 | * @id: lookup key | |
640 | * | |
641 | * Replace the pointer registered with an id and return the old value. | |
642 | * A %-ENOENT return indicates that @id was not found. | |
643 | * A %-EINVAL return indicates that @id was not within valid constraints. | |
644 | * | |
645 | * The caller must serialize with writers. | |
646 | */ | |
647 | void *idr_replace(struct idr *idp, void *ptr, int id) | |
648 | { | |
649 | int n; | |
650 | struct idr_layer *p, *old_p; | |
651 | ||
652 | p = idp->top; | |
653 | if (!p) | |
654 | return ERR_PTR(-EINVAL); | |
655 | ||
656 | n = (p->layer+1) * IDR_BITS; | |
657 | ||
658 | id &= MAX_ID_MASK; | |
659 | ||
660 | if (id >= (1 << n)) | |
661 | return ERR_PTR(-EINVAL); | |
662 | ||
663 | n -= IDR_BITS; | |
664 | while ((n > 0) && p) { | |
665 | p = p->ary[(id >> n) & IDR_MASK]; | |
666 | n -= IDR_BITS; | |
667 | } | |
668 | ||
669 | n = id & IDR_MASK; | |
670 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) | |
671 | return ERR_PTR(-ENOENT); | |
672 | ||
673 | old_p = p->ary[n]; | |
674 | rcu_assign_pointer(p->ary[n], ptr); | |
675 | ||
676 | return old_p; | |
677 | } | |
678 | EXPORT_SYMBOL(idr_replace); | |
679 | ||
680 | void __init idr_init_cache(void) | |
681 | { | |
682 | idr_layer_cache = kmem_cache_create("idr_layer_cache", | |
683 | sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); | |
684 | } | |
685 | ||
686 | /** | |
687 | * idr_init - initialize idr handle | |
688 | * @idp: idr handle | |
689 | * | |
690 | * This function is use to set up the handle (@idp) that you will pass | |
691 | * to the rest of the functions. | |
692 | */ | |
693 | void idr_init(struct idr *idp) | |
694 | { | |
695 | memset(idp, 0, sizeof(struct idr)); | |
696 | spin_lock_init(&idp->lock); | |
697 | } | |
698 | EXPORT_SYMBOL(idr_init); | |
699 | ||
700 | ||
701 | /** | |
702 | * DOC: IDA description | |
703 | * IDA - IDR based ID allocator | |
704 | * | |
705 | * This is id allocator without id -> pointer translation. Memory | |
706 | * usage is much lower than full blown idr because each id only | |
707 | * occupies a bit. ida uses a custom leaf node which contains | |
708 | * IDA_BITMAP_BITS slots. | |
709 | * | |
710 | * 2007-04-25 written by Tejun Heo <htejun@gmail.com> | |
711 | */ | |
712 | ||
713 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) | |
714 | { | |
715 | unsigned long flags; | |
716 | ||
717 | if (!ida->free_bitmap) { | |
718 | spin_lock_irqsave(&ida->idr.lock, flags); | |
719 | if (!ida->free_bitmap) { | |
720 | ida->free_bitmap = bitmap; | |
721 | bitmap = NULL; | |
722 | } | |
723 | spin_unlock_irqrestore(&ida->idr.lock, flags); | |
724 | } | |
725 | ||
726 | kfree(bitmap); | |
727 | } | |
728 | ||
729 | /** | |
730 | * ida_pre_get - reserve resources for ida allocation | |
731 | * @ida: ida handle | |
732 | * @gfp_mask: memory allocation flag | |
733 | * | |
734 | * This function should be called prior to locking and calling the | |
735 | * following function. It preallocates enough memory to satisfy the | |
736 | * worst possible allocation. | |
737 | * | |
738 | * If the system is REALLY out of memory this function returns %0, | |
739 | * otherwise %1. | |
740 | */ | |
741 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) | |
742 | { | |
743 | /* allocate idr_layers */ | |
744 | if (!idr_pre_get(&ida->idr, gfp_mask)) | |
745 | return 0; | |
746 | ||
747 | /* allocate free_bitmap */ | |
748 | if (!ida->free_bitmap) { | |
749 | struct ida_bitmap *bitmap; | |
750 | ||
751 | bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); | |
752 | if (!bitmap) | |
753 | return 0; | |
754 | ||
755 | free_bitmap(ida, bitmap); | |
756 | } | |
757 | ||
758 | return 1; | |
759 | } | |
760 | EXPORT_SYMBOL(ida_pre_get); | |
761 | ||
762 | /** | |
763 | * ida_get_new_above - allocate new ID above or equal to a start id | |
764 | * @ida: ida handle | |
765 | * @starting_id: id to start search at | |
766 | * @p_id: pointer to the allocated handle | |
767 | * | |
768 | * Allocate new ID above or equal to @ida. It should be called with | |
769 | * any required locks. | |
770 | * | |
771 | * If memory is required, it will return %-EAGAIN, you should unlock | |
772 | * and go back to the ida_pre_get() call. If the ida is full, it will | |
773 | * return %-ENOSPC. | |
774 | * | |
775 | * @p_id returns a value in the range @starting_id ... %0x7fffffff. | |
776 | */ | |
777 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) | |
778 | { | |
779 | struct idr_layer *pa[MAX_LEVEL]; | |
780 | struct ida_bitmap *bitmap; | |
781 | unsigned long flags; | |
782 | int idr_id = starting_id / IDA_BITMAP_BITS; | |
783 | int offset = starting_id % IDA_BITMAP_BITS; | |
784 | int t, id; | |
785 | ||
786 | restart: | |
787 | /* get vacant slot */ | |
788 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); | |
789 | if (t < 0) | |
790 | return _idr_rc_to_errno(t); | |
791 | ||
792 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) | |
793 | return -ENOSPC; | |
794 | ||
795 | if (t != idr_id) | |
796 | offset = 0; | |
797 | idr_id = t; | |
798 | ||
799 | /* if bitmap isn't there, create a new one */ | |
800 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; | |
801 | if (!bitmap) { | |
802 | spin_lock_irqsave(&ida->idr.lock, flags); | |
803 | bitmap = ida->free_bitmap; | |
804 | ida->free_bitmap = NULL; | |
805 | spin_unlock_irqrestore(&ida->idr.lock, flags); | |
806 | ||
807 | if (!bitmap) | |
808 | return -EAGAIN; | |
809 | ||
810 | memset(bitmap, 0, sizeof(struct ida_bitmap)); | |
811 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], | |
812 | (void *)bitmap); | |
813 | pa[0]->count++; | |
814 | } | |
815 | ||
816 | /* lookup for empty slot */ | |
817 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); | |
818 | if (t == IDA_BITMAP_BITS) { | |
819 | /* no empty slot after offset, continue to the next chunk */ | |
820 | idr_id++; | |
821 | offset = 0; | |
822 | goto restart; | |
823 | } | |
824 | ||
825 | id = idr_id * IDA_BITMAP_BITS + t; | |
826 | if (id >= MAX_ID_BIT) | |
827 | return -ENOSPC; | |
828 | ||
829 | __set_bit(t, bitmap->bitmap); | |
830 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) | |
831 | idr_mark_full(pa, idr_id); | |
832 | ||
833 | *p_id = id; | |
834 | ||
835 | /* Each leaf node can handle nearly a thousand slots and the | |
836 | * whole idea of ida is to have small memory foot print. | |
837 | * Throw away extra resources one by one after each successful | |
838 | * allocation. | |
839 | */ | |
840 | if (ida->idr.id_free_cnt || ida->free_bitmap) { | |
841 | struct idr_layer *p = get_from_free_list(&ida->idr); | |
842 | if (p) | |
843 | kmem_cache_free(idr_layer_cache, p); | |
844 | } | |
845 | ||
846 | return 0; | |
847 | } | |
848 | EXPORT_SYMBOL(ida_get_new_above); | |
849 | ||
850 | /** | |
851 | * ida_get_new - allocate new ID | |
852 | * @ida: idr handle | |
853 | * @p_id: pointer to the allocated handle | |
854 | * | |
855 | * Allocate new ID. It should be called with any required locks. | |
856 | * | |
857 | * If memory is required, it will return %-EAGAIN, you should unlock | |
858 | * and go back to the idr_pre_get() call. If the idr is full, it will | |
859 | * return %-ENOSPC. | |
860 | * | |
861 | * @id returns a value in the range %0 ... %0x7fffffff. | |
862 | */ | |
863 | int ida_get_new(struct ida *ida, int *p_id) | |
864 | { | |
865 | return ida_get_new_above(ida, 0, p_id); | |
866 | } | |
867 | EXPORT_SYMBOL(ida_get_new); | |
868 | ||
869 | /** | |
870 | * ida_remove - remove the given ID | |
871 | * @ida: ida handle | |
872 | * @id: ID to free | |
873 | */ | |
874 | void ida_remove(struct ida *ida, int id) | |
875 | { | |
876 | struct idr_layer *p = ida->idr.top; | |
877 | int shift = (ida->idr.layers - 1) * IDR_BITS; | |
878 | int idr_id = id / IDA_BITMAP_BITS; | |
879 | int offset = id % IDA_BITMAP_BITS; | |
880 | int n; | |
881 | struct ida_bitmap *bitmap; | |
882 | ||
883 | /* clear full bits while looking up the leaf idr_layer */ | |
884 | while ((shift > 0) && p) { | |
885 | n = (idr_id >> shift) & IDR_MASK; | |
886 | __clear_bit(n, &p->bitmap); | |
887 | p = p->ary[n]; | |
888 | shift -= IDR_BITS; | |
889 | } | |
890 | ||
891 | if (p == NULL) | |
892 | goto err; | |
893 | ||
894 | n = idr_id & IDR_MASK; | |
895 | __clear_bit(n, &p->bitmap); | |
896 | ||
897 | bitmap = (void *)p->ary[n]; | |
898 | if (!test_bit(offset, bitmap->bitmap)) | |
899 | goto err; | |
900 | ||
901 | /* update bitmap and remove it if empty */ | |
902 | __clear_bit(offset, bitmap->bitmap); | |
903 | if (--bitmap->nr_busy == 0) { | |
904 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ | |
905 | idr_remove(&ida->idr, idr_id); | |
906 | free_bitmap(ida, bitmap); | |
907 | } | |
908 | ||
909 | return; | |
910 | ||
911 | err: | |
912 | printk(KERN_WARNING | |
913 | "ida_remove called for id=%d which is not allocated.\n", id); | |
914 | } | |
915 | EXPORT_SYMBOL(ida_remove); | |
916 | ||
917 | /** | |
918 | * ida_destroy - release all cached layers within an ida tree | |
919 | * @ida: ida handle | |
920 | */ | |
921 | void ida_destroy(struct ida *ida) | |
922 | { | |
923 | idr_destroy(&ida->idr); | |
924 | kfree(ida->free_bitmap); | |
925 | } | |
926 | EXPORT_SYMBOL(ida_destroy); | |
927 | ||
928 | /** | |
929 | * ida_init - initialize ida handle | |
930 | * @ida: ida handle | |
931 | * | |
932 | * This function is use to set up the handle (@ida) that you will pass | |
933 | * to the rest of the functions. | |
934 | */ | |
935 | void ida_init(struct ida *ida) | |
936 | { | |
937 | memset(ida, 0, sizeof(struct ida)); | |
938 | idr_init(&ida->idr); | |
939 | ||
940 | } | |
941 | EXPORT_SYMBOL(ida_init); |