]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/list.h
[IPV4]: Use RCU locking in fib_rules.
[net-next-2.6.git] / include / linux / list.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_LIST_H
2#define _LINUX_LIST_H
3
4#ifdef __KERNEL__
5
6#include <linux/stddef.h>
7#include <linux/prefetch.h>
8#include <asm/system.h>
9
10/*
11 * These are non-NULL pointers that will result in page faults
12 * under normal circumstances, used to verify that nobody uses
13 * non-initialized list entries.
14 */
15#define LIST_POISON1 ((void *) 0x00100100)
16#define LIST_POISON2 ((void *) 0x00200200)
17
18/*
19 * Simple doubly linked list implementation.
20 *
21 * Some of the internal functions ("__xxx") are useful when
22 * manipulating whole lists rather than single entries, as
23 * sometimes we already know the next/prev entries and we can
24 * generate better code by using them directly rather than
25 * using the generic single-entry routines.
26 */
27
28struct list_head {
29 struct list_head *next, *prev;
30};
31
32#define LIST_HEAD_INIT(name) { &(name), &(name) }
33
34#define LIST_HEAD(name) \
35 struct list_head name = LIST_HEAD_INIT(name)
36
490d6ab1
ZB
37static inline void INIT_LIST_HEAD(struct list_head *list)
38{
39 list->next = list;
40 list->prev = list;
41}
1da177e4
LT
42
43/*
44 * Insert a new entry between two known consecutive entries.
45 *
46 * This is only for internal list manipulation where we know
47 * the prev/next entries already!
48 */
49static inline void __list_add(struct list_head *new,
50 struct list_head *prev,
51 struct list_head *next)
52{
53 next->prev = new;
54 new->next = next;
55 new->prev = prev;
56 prev->next = new;
57}
58
59/**
60 * list_add - add a new entry
61 * @new: new entry to be added
62 * @head: list head to add it after
63 *
64 * Insert a new entry after the specified head.
65 * This is good for implementing stacks.
66 */
67static inline void list_add(struct list_head *new, struct list_head *head)
68{
69 __list_add(new, head, head->next);
70}
71
72/**
73 * list_add_tail - add a new entry
74 * @new: new entry to be added
75 * @head: list head to add it before
76 *
77 * Insert a new entry before the specified head.
78 * This is useful for implementing queues.
79 */
80static inline void list_add_tail(struct list_head *new, struct list_head *head)
81{
82 __list_add(new, head->prev, head);
83}
84
85/*
86 * Insert a new entry between two known consecutive entries.
87 *
88 * This is only for internal list manipulation where we know
89 * the prev/next entries already!
90 */
91static inline void __list_add_rcu(struct list_head * new,
92 struct list_head * prev, struct list_head * next)
93{
94 new->next = next;
95 new->prev = prev;
96 smp_wmb();
97 next->prev = new;
98 prev->next = new;
99}
100
101/**
102 * list_add_rcu - add a new entry to rcu-protected list
103 * @new: new entry to be added
104 * @head: list head to add it after
105 *
106 * Insert a new entry after the specified head.
107 * This is good for implementing stacks.
108 *
109 * The caller must take whatever precautions are necessary
110 * (such as holding appropriate locks) to avoid racing
111 * with another list-mutation primitive, such as list_add_rcu()
112 * or list_del_rcu(), running on this same list.
113 * However, it is perfectly legal to run concurrently with
114 * the _rcu list-traversal primitives, such as
115 * list_for_each_entry_rcu().
116 */
117static inline void list_add_rcu(struct list_head *new, struct list_head *head)
118{
119 __list_add_rcu(new, head, head->next);
120}
121
122/**
123 * list_add_tail_rcu - add a new entry to rcu-protected list
124 * @new: new entry to be added
125 * @head: list head to add it before
126 *
127 * Insert a new entry before the specified head.
128 * This is useful for implementing queues.
129 *
130 * The caller must take whatever precautions are necessary
131 * (such as holding appropriate locks) to avoid racing
132 * with another list-mutation primitive, such as list_add_tail_rcu()
133 * or list_del_rcu(), running on this same list.
134 * However, it is perfectly legal to run concurrently with
135 * the _rcu list-traversal primitives, such as
136 * list_for_each_entry_rcu().
137 */
138static inline void list_add_tail_rcu(struct list_head *new,
139 struct list_head *head)
140{
141 __list_add_rcu(new, head->prev, head);
142}
143
144/*
145 * Delete a list entry by making the prev/next entries
146 * point to each other.
147 *
148 * This is only for internal list manipulation where we know
149 * the prev/next entries already!
150 */
151static inline void __list_del(struct list_head * prev, struct list_head * next)
152{
153 next->prev = prev;
154 prev->next = next;
155}
156
157/**
158 * list_del - deletes entry from list.
159 * @entry: the element to delete from the list.
160 * Note: list_empty on entry does not return true after this, the entry is
161 * in an undefined state.
162 */
163static inline void list_del(struct list_head *entry)
164{
165 __list_del(entry->prev, entry->next);
166 entry->next = LIST_POISON1;
167 entry->prev = LIST_POISON2;
168}
169
170/**
171 * list_del_rcu - deletes entry from list without re-initialization
172 * @entry: the element to delete from the list.
173 *
174 * Note: list_empty on entry does not return true after this,
175 * the entry is in an undefined state. It is useful for RCU based
176 * lockfree traversal.
177 *
178 * In particular, it means that we can not poison the forward
179 * pointers that may still be used for walking the list.
180 *
181 * The caller must take whatever precautions are necessary
182 * (such as holding appropriate locks) to avoid racing
183 * with another list-mutation primitive, such as list_del_rcu()
184 * or list_add_rcu(), running on this same list.
185 * However, it is perfectly legal to run concurrently with
186 * the _rcu list-traversal primitives, such as
187 * list_for_each_entry_rcu().
188 *
189 * Note that the caller is not permitted to immediately free
b2b18660 190 * the newly deleted entry. Instead, either synchronize_rcu()
1da177e4
LT
191 * or call_rcu() must be used to defer freeing until an RCU
192 * grace period has elapsed.
193 */
194static inline void list_del_rcu(struct list_head *entry)
195{
196 __list_del(entry->prev, entry->next);
197 entry->prev = LIST_POISON2;
198}
199
200/*
201 * list_replace_rcu - replace old entry by new one
202 * @old : the element to be replaced
203 * @new : the new element to insert
204 *
205 * The old entry will be replaced with the new entry atomically.
206 */
b88cb424
IM
207static inline void list_replace_rcu(struct list_head *old,
208 struct list_head *new)
209{
1da177e4
LT
210 new->next = old->next;
211 new->prev = old->prev;
212 smp_wmb();
213 new->next->prev = new;
214 new->prev->next = new;
b88cb424 215 old->prev = LIST_POISON2;
1da177e4
LT
216}
217
218/**
219 * list_del_init - deletes entry from list and reinitialize it.
220 * @entry: the element to delete from the list.
221 */
222static inline void list_del_init(struct list_head *entry)
223{
224 __list_del(entry->prev, entry->next);
225 INIT_LIST_HEAD(entry);
226}
227
228/**
229 * list_move - delete from one list and add as another's head
230 * @list: the entry to move
231 * @head: the head that will precede our entry
232 */
233static inline void list_move(struct list_head *list, struct list_head *head)
234{
235 __list_del(list->prev, list->next);
236 list_add(list, head);
237}
238
239/**
240 * list_move_tail - delete from one list and add as another's tail
241 * @list: the entry to move
242 * @head: the head that will follow our entry
243 */
244static inline void list_move_tail(struct list_head *list,
245 struct list_head *head)
246{
247 __list_del(list->prev, list->next);
248 list_add_tail(list, head);
249}
250
251/**
252 * list_empty - tests whether a list is empty
253 * @head: the list to test.
254 */
255static inline int list_empty(const struct list_head *head)
256{
257 return head->next == head;
258}
259
260/**
261 * list_empty_careful - tests whether a list is
262 * empty _and_ checks that no other CPU might be
263 * in the process of still modifying either member
264 *
265 * NOTE: using list_empty_careful() without synchronization
266 * can only be safe if the only activity that can happen
267 * to the list entry is list_del_init(). Eg. it cannot be used
268 * if another CPU could re-list_add() it.
269 *
270 * @head: the list to test.
271 */
272static inline int list_empty_careful(const struct list_head *head)
273{
274 struct list_head *next = head->next;
275 return (next == head) && (next == head->prev);
276}
277
278static inline void __list_splice(struct list_head *list,
279 struct list_head *head)
280{
281 struct list_head *first = list->next;
282 struct list_head *last = list->prev;
283 struct list_head *at = head->next;
284
285 first->prev = head;
286 head->next = first;
287
288 last->next = at;
289 at->prev = last;
290}
291
292/**
293 * list_splice - join two lists
294 * @list: the new list to add.
295 * @head: the place to add it in the first list.
296 */
297static inline void list_splice(struct list_head *list, struct list_head *head)
298{
299 if (!list_empty(list))
300 __list_splice(list, head);
301}
302
303/**
304 * list_splice_init - join two lists and reinitialise the emptied list.
305 * @list: the new list to add.
306 * @head: the place to add it in the first list.
307 *
308 * The list at @list is reinitialised
309 */
310static inline void list_splice_init(struct list_head *list,
311 struct list_head *head)
312{
313 if (!list_empty(list)) {
314 __list_splice(list, head);
315 INIT_LIST_HEAD(list);
316 }
317}
318
319/**
320 * list_entry - get the struct for this entry
321 * @ptr: the &struct list_head pointer.
322 * @type: the type of the struct this is embedded in.
323 * @member: the name of the list_struct within the struct.
324 */
325#define list_entry(ptr, type, member) \
326 container_of(ptr, type, member)
327
328/**
329 * list_for_each - iterate over a list
330 * @pos: the &struct list_head to use as a loop counter.
331 * @head: the head for your list.
332 */
333#define list_for_each(pos, head) \
334 for (pos = (head)->next; prefetch(pos->next), pos != (head); \
335 pos = pos->next)
336
337/**
338 * __list_for_each - iterate over a list
339 * @pos: the &struct list_head to use as a loop counter.
340 * @head: the head for your list.
341 *
342 * This variant differs from list_for_each() in that it's the
343 * simplest possible list iteration code, no prefetching is done.
344 * Use this for code that knows the list to be very short (empty
345 * or 1 entry) most of the time.
346 */
347#define __list_for_each(pos, head) \
348 for (pos = (head)->next; pos != (head); pos = pos->next)
349
350/**
351 * list_for_each_prev - iterate over a list backwards
352 * @pos: the &struct list_head to use as a loop counter.
353 * @head: the head for your list.
354 */
355#define list_for_each_prev(pos, head) \
356 for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
357 pos = pos->prev)
358
359/**
360 * list_for_each_safe - iterate over a list safe against removal of list entry
361 * @pos: the &struct list_head to use as a loop counter.
362 * @n: another &struct list_head to use as temporary storage
363 * @head: the head for your list.
364 */
365#define list_for_each_safe(pos, n, head) \
366 for (pos = (head)->next, n = pos->next; pos != (head); \
367 pos = n, n = pos->next)
368
369/**
370 * list_for_each_entry - iterate over list of given type
371 * @pos: the type * to use as a loop counter.
372 * @head: the head for your list.
373 * @member: the name of the list_struct within the struct.
374 */
375#define list_for_each_entry(pos, head, member) \
376 for (pos = list_entry((head)->next, typeof(*pos), member); \
377 prefetch(pos->member.next), &pos->member != (head); \
378 pos = list_entry(pos->member.next, typeof(*pos), member))
379
380/**
381 * list_for_each_entry_reverse - iterate backwards over list of given type.
382 * @pos: the type * to use as a loop counter.
383 * @head: the head for your list.
384 * @member: the name of the list_struct within the struct.
385 */
386#define list_for_each_entry_reverse(pos, head, member) \
387 for (pos = list_entry((head)->prev, typeof(*pos), member); \
388 prefetch(pos->member.prev), &pos->member != (head); \
389 pos = list_entry(pos->member.prev, typeof(*pos), member))
390
391/**
392 * list_prepare_entry - prepare a pos entry for use as a start point in
393 * list_for_each_entry_continue
394 * @pos: the type * to use as a start point
395 * @head: the head of the list
396 * @member: the name of the list_struct within the struct.
397 */
398#define list_prepare_entry(pos, head, member) \
399 ((pos) ? : list_entry(head, typeof(*pos), member))
400
401/**
402 * list_for_each_entry_continue - iterate over list of given type
403 * continuing after existing point
404 * @pos: the type * to use as a loop counter.
405 * @head: the head for your list.
406 * @member: the name of the list_struct within the struct.
407 */
408#define list_for_each_entry_continue(pos, head, member) \
409 for (pos = list_entry(pos->member.next, typeof(*pos), member); \
410 prefetch(pos->member.next), &pos->member != (head); \
411 pos = list_entry(pos->member.next, typeof(*pos), member))
412
413/**
414 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
415 * @pos: the type * to use as a loop counter.
416 * @n: another type * to use as temporary storage
417 * @head: the head for your list.
418 * @member: the name of the list_struct within the struct.
419 */
420#define list_for_each_entry_safe(pos, n, head, member) \
421 for (pos = list_entry((head)->next, typeof(*pos), member), \
422 n = list_entry(pos->member.next, typeof(*pos), member); \
423 &pos->member != (head); \
424 pos = n, n = list_entry(n->member.next, typeof(*n), member))
425
74459dc7
ACM
426/**
427 * list_for_each_entry_safe_continue - iterate over list of given type
428 * continuing after existing point safe against removal of list entry
429 * @pos: the type * to use as a loop counter.
430 * @n: another type * to use as temporary storage
431 * @head: the head for your list.
432 * @member: the name of the list_struct within the struct.
433 */
434#define list_for_each_entry_safe_continue(pos, n, head, member) \
8c60f3fa
ACM
435 for (pos = list_entry(pos->member.next, typeof(*pos), member), \
436 n = list_entry(pos->member.next, typeof(*pos), member); \
d8dcffee
ACM
437 &pos->member != (head); \
438 pos = n, n = list_entry(n->member.next, typeof(*n), member))
439
440/**
441 * list_for_each_entry_safe_from - iterate over list of given type
442 * from existing point safe against removal of list entry
443 * @pos: the type * to use as a loop counter.
444 * @n: another type * to use as temporary storage
445 * @head: the head for your list.
446 * @member: the name of the list_struct within the struct.
447 */
448#define list_for_each_entry_safe_from(pos, n, head, member) \
449 for (n = list_entry(pos->member.next, typeof(*pos), member); \
74459dc7
ACM
450 &pos->member != (head); \
451 pos = n, n = list_entry(n->member.next, typeof(*n), member))
452
0ad42352
DH
453/**
454 * list_for_each_entry_safe_reverse - iterate backwards over list of given type safe against
455 * removal of list entry
456 * @pos: the type * to use as a loop counter.
457 * @n: another type * to use as temporary storage
458 * @head: the head for your list.
459 * @member: the name of the list_struct within the struct.
460 */
461#define list_for_each_entry_safe_reverse(pos, n, head, member) \
462 for (pos = list_entry((head)->prev, typeof(*pos), member), \
463 n = list_entry(pos->member.prev, typeof(*pos), member); \
464 &pos->member != (head); \
465 pos = n, n = list_entry(n->member.prev, typeof(*n), member))
466
1da177e4
LT
467/**
468 * list_for_each_rcu - iterate over an rcu-protected list
469 * @pos: the &struct list_head to use as a loop counter.
470 * @head: the head for your list.
471 *
472 * This list-traversal primitive may safely run concurrently with
473 * the _rcu list-mutation primitives such as list_add_rcu()
474 * as long as the traversal is guarded by rcu_read_lock().
475 */
476#define list_for_each_rcu(pos, head) \
b24d18aa
HX
477 for (pos = (head)->next; \
478 prefetch(rcu_dereference(pos)->next), pos != (head); \
479 pos = pos->next)
1da177e4
LT
480
481#define __list_for_each_rcu(pos, head) \
b24d18aa
HX
482 for (pos = (head)->next; \
483 rcu_dereference(pos) != (head); \
484 pos = pos->next)
1da177e4
LT
485
486/**
487 * list_for_each_safe_rcu - iterate over an rcu-protected list safe
488 * against removal of list entry
489 * @pos: the &struct list_head to use as a loop counter.
490 * @n: another &struct list_head to use as temporary storage
491 * @head: the head for your list.
492 *
493 * This list-traversal primitive may safely run concurrently with
494 * the _rcu list-mutation primitives such as list_add_rcu()
495 * as long as the traversal is guarded by rcu_read_lock().
496 */
497#define list_for_each_safe_rcu(pos, n, head) \
b24d18aa
HX
498 for (pos = (head)->next; \
499 n = rcu_dereference(pos)->next, pos != (head); \
500 pos = n)
1da177e4
LT
501
502/**
503 * list_for_each_entry_rcu - iterate over rcu list of given type
504 * @pos: the type * to use as a loop counter.
505 * @head: the head for your list.
506 * @member: the name of the list_struct within the struct.
507 *
508 * This list-traversal primitive may safely run concurrently with
509 * the _rcu list-mutation primitives such as list_add_rcu()
510 * as long as the traversal is guarded by rcu_read_lock().
511 */
b24d18aa
HX
512#define list_for_each_entry_rcu(pos, head, member) \
513 for (pos = list_entry((head)->next, typeof(*pos), member); \
514 prefetch(rcu_dereference(pos)->member.next), \
515 &pos->member != (head); \
516 pos = list_entry(pos->member.next, typeof(*pos), member))
1da177e4
LT
517
518
519/**
520 * list_for_each_continue_rcu - iterate over an rcu-protected list
521 * continuing after existing point.
522 * @pos: the &struct list_head to use as a loop counter.
523 * @head: the head for your list.
524 *
525 * This list-traversal primitive may safely run concurrently with
526 * the _rcu list-mutation primitives such as list_add_rcu()
527 * as long as the traversal is guarded by rcu_read_lock().
528 */
529#define list_for_each_continue_rcu(pos, head) \
b24d18aa
HX
530 for ((pos) = (pos)->next; \
531 prefetch(rcu_dereference((pos))->next), (pos) != (head); \
532 (pos) = (pos)->next)
1da177e4
LT
533
534/*
535 * Double linked lists with a single pointer list head.
536 * Mostly useful for hash tables where the two pointer list head is
537 * too wasteful.
538 * You lose the ability to access the tail in O(1).
539 */
540
541struct hlist_head {
542 struct hlist_node *first;
543};
544
545struct hlist_node {
546 struct hlist_node *next, **pprev;
547};
548
549#define HLIST_HEAD_INIT { .first = NULL }
550#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
551#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
490d6ab1
ZB
552static inline void INIT_HLIST_NODE(struct hlist_node *h)
553{
554 h->next = NULL;
555 h->pprev = NULL;
556}
1da177e4
LT
557
558static inline int hlist_unhashed(const struct hlist_node *h)
559{
560 return !h->pprev;
561}
562
563static inline int hlist_empty(const struct hlist_head *h)
564{
565 return !h->first;
566}
567
568static inline void __hlist_del(struct hlist_node *n)
569{
570 struct hlist_node *next = n->next;
571 struct hlist_node **pprev = n->pprev;
572 *pprev = next;
573 if (next)
574 next->pprev = pprev;
575}
576
577static inline void hlist_del(struct hlist_node *n)
578{
579 __hlist_del(n);
580 n->next = LIST_POISON1;
581 n->pprev = LIST_POISON2;
582}
583
584/**
585 * hlist_del_rcu - deletes entry from hash list without re-initialization
586 * @n: the element to delete from the hash list.
587 *
588 * Note: list_unhashed() on entry does not return true after this,
589 * the entry is in an undefined state. It is useful for RCU based
590 * lockfree traversal.
591 *
592 * In particular, it means that we can not poison the forward
593 * pointers that may still be used for walking the hash list.
594 *
595 * The caller must take whatever precautions are necessary
596 * (such as holding appropriate locks) to avoid racing
597 * with another list-mutation primitive, such as hlist_add_head_rcu()
598 * or hlist_del_rcu(), running on this same list.
599 * However, it is perfectly legal to run concurrently with
600 * the _rcu list-traversal primitives, such as
601 * hlist_for_each_entry().
602 */
603static inline void hlist_del_rcu(struct hlist_node *n)
604{
605 __hlist_del(n);
606 n->pprev = LIST_POISON2;
607}
608
609static inline void hlist_del_init(struct hlist_node *n)
610{
611 if (n->pprev) {
612 __hlist_del(n);
613 INIT_HLIST_NODE(n);
614 }
615}
616
b88cb424
IM
617/*
618 * hlist_replace_rcu - replace old entry by new one
619 * @old : the element to be replaced
620 * @new : the new element to insert
621 *
622 * The old entry will be replaced with the new entry atomically.
623 */
624static inline void hlist_replace_rcu(struct hlist_node *old,
625 struct hlist_node *new)
626{
627 struct hlist_node *next = old->next;
628
629 new->next = next;
630 new->pprev = old->pprev;
631 smp_wmb();
632 if (next)
633 new->next->pprev = &new->next;
634 *new->pprev = new;
635 old->pprev = LIST_POISON2;
636}
637
1da177e4
LT
638static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
639{
640 struct hlist_node *first = h->first;
641 n->next = first;
642 if (first)
643 first->pprev = &n->next;
644 h->first = n;
645 n->pprev = &h->first;
646}
647
648
649/**
650 * hlist_add_head_rcu - adds the specified element to the specified hlist,
651 * while permitting racing traversals.
652 * @n: the element to add to the hash list.
653 * @h: the list to add to.
654 *
655 * The caller must take whatever precautions are necessary
656 * (such as holding appropriate locks) to avoid racing
657 * with another list-mutation primitive, such as hlist_add_head_rcu()
658 * or hlist_del_rcu(), running on this same list.
659 * However, it is perfectly legal to run concurrently with
660 * the _rcu list-traversal primitives, such as
665a7583 661 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
1da177e4
LT
662 * problems on Alpha CPUs. Regardless of the type of CPU, the
663 * list-traversal primitive must be guarded by rcu_read_lock().
664 */
665static inline void hlist_add_head_rcu(struct hlist_node *n,
666 struct hlist_head *h)
667{
668 struct hlist_node *first = h->first;
669 n->next = first;
670 n->pprev = &h->first;
671 smp_wmb();
672 if (first)
673 first->pprev = &n->next;
674 h->first = n;
675}
676
677/* next must be != NULL */
678static inline void hlist_add_before(struct hlist_node *n,
679 struct hlist_node *next)
680{
681 n->pprev = next->pprev;
682 n->next = next;
683 next->pprev = &n->next;
684 *(n->pprev) = n;
685}
686
687static inline void hlist_add_after(struct hlist_node *n,
688 struct hlist_node *next)
689{
690 next->next = n->next;
691 n->next = next;
692 next->pprev = &n->next;
693
694 if(next->next)
695 next->next->pprev = &next->next;
696}
697
cf4ef014
PM
698/**
699 * hlist_add_before_rcu - adds the specified element to the specified hlist
700 * before the specified node while permitting racing traversals.
701 * @n: the new element to add to the hash list.
702 * @next: the existing element to add the new element before.
703 *
704 * The caller must take whatever precautions are necessary
705 * (such as holding appropriate locks) to avoid racing
706 * with another list-mutation primitive, such as hlist_add_head_rcu()
707 * or hlist_del_rcu(), running on this same list.
708 * However, it is perfectly legal to run concurrently with
709 * the _rcu list-traversal primitives, such as
665a7583 710 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
cf4ef014
PM
711 * problems on Alpha CPUs.
712 */
e5b43760
RO
713static inline void hlist_add_before_rcu(struct hlist_node *n,
714 struct hlist_node *next)
715{
716 n->pprev = next->pprev;
717 n->next = next;
718 smp_wmb();
719 next->pprev = &n->next;
720 *(n->pprev) = n;
721}
722
cf4ef014
PM
723/**
724 * hlist_add_after_rcu - adds the specified element to the specified hlist
725 * after the specified node while permitting racing traversals.
726 * @prev: the existing element to add the new element after.
727 * @n: the new element to add to the hash list.
728 *
729 * The caller must take whatever precautions are necessary
730 * (such as holding appropriate locks) to avoid racing
731 * with another list-mutation primitive, such as hlist_add_head_rcu()
732 * or hlist_del_rcu(), running on this same list.
733 * However, it is perfectly legal to run concurrently with
734 * the _rcu list-traversal primitives, such as
665a7583 735 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
cf4ef014
PM
736 * problems on Alpha CPUs.
737 */
e5b43760
RO
738static inline void hlist_add_after_rcu(struct hlist_node *prev,
739 struct hlist_node *n)
740{
741 n->next = prev->next;
742 n->pprev = &prev->next;
743 smp_wmb();
744 prev->next = n;
745 if (n->next)
746 n->next->pprev = &n->next;
747}
748
1da177e4
LT
749#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
750
751#define hlist_for_each(pos, head) \
752 for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
753 pos = pos->next)
754
755#define hlist_for_each_safe(pos, n, head) \
756 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
757 pos = n)
758
1da177e4
LT
759/**
760 * hlist_for_each_entry - iterate over list of given type
761 * @tpos: the type * to use as a loop counter.
762 * @pos: the &struct hlist_node to use as a loop counter.
763 * @head: the head for your list.
764 * @member: the name of the hlist_node within the struct.
765 */
766#define hlist_for_each_entry(tpos, pos, head, member) \
767 for (pos = (head)->first; \
768 pos && ({ prefetch(pos->next); 1;}) && \
769 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
770 pos = pos->next)
771
772/**
773 * hlist_for_each_entry_continue - iterate over a hlist continuing after existing point
774 * @tpos: the type * to use as a loop counter.
775 * @pos: the &struct hlist_node to use as a loop counter.
776 * @member: the name of the hlist_node within the struct.
777 */
778#define hlist_for_each_entry_continue(tpos, pos, member) \
779 for (pos = (pos)->next; \
780 pos && ({ prefetch(pos->next); 1;}) && \
781 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
782 pos = pos->next)
783
784/**
785 * hlist_for_each_entry_from - iterate over a hlist continuing from existing point
786 * @tpos: the type * to use as a loop counter.
787 * @pos: the &struct hlist_node to use as a loop counter.
788 * @member: the name of the hlist_node within the struct.
789 */
790#define hlist_for_each_entry_from(tpos, pos, member) \
791 for (; pos && ({ prefetch(pos->next); 1;}) && \
792 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
793 pos = pos->next)
794
795/**
796 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
797 * @tpos: the type * to use as a loop counter.
798 * @pos: the &struct hlist_node to use as a loop counter.
799 * @n: another &struct hlist_node to use as temporary storage
800 * @head: the head for your list.
801 * @member: the name of the hlist_node within the struct.
802 */
803#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
804 for (pos = (head)->first; \
805 pos && ({ n = pos->next; 1; }) && \
806 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
807 pos = n)
808
809/**
810 * hlist_for_each_entry_rcu - iterate over rcu list of given type
665a7583 811 * @tpos: the type * to use as a loop counter.
1da177e4
LT
812 * @pos: the &struct hlist_node to use as a loop counter.
813 * @head: the head for your list.
814 * @member: the name of the hlist_node within the struct.
815 *
816 * This list-traversal primitive may safely run concurrently with
e1ba0dab 817 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
1da177e4
LT
818 * as long as the traversal is guarded by rcu_read_lock().
819 */
820#define hlist_for_each_entry_rcu(tpos, pos, head, member) \
821 for (pos = (head)->first; \
b24d18aa 822 rcu_dereference(pos) && ({ prefetch(pos->next); 1;}) && \
1da177e4 823 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
b24d18aa 824 pos = pos->next)
1da177e4
LT
825
826#else
827#warning "don't include kernel headers in userspace"
828#endif /* __KERNEL__ */
829#endif