1 /**
2 * This file was imported from the iptables sources.
3 * Copyright (C) 1999-2008 Netfilter Core Team
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; only version 2 of the License is applicable.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
19 #ifndef _LINUX_LIST_H
20 #define _LINUX_LIST_H
22 #undef offsetof
23 #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
25 /**
26 * container_of - cast a member of a structure out to the containing structure
27 *
28 * @ptr: the pointer to the member.
29 * @type: the type of the container struct this is embedded in.
30 * @member: the name of the member within the struct.
31 *
32 */
33 #define container_of(ptr, type, member) ({ \
34 const typeof( ((type *)0)->member ) *__mptr = (ptr); \
35 (type *)( (char *)__mptr - offsetof(type,member) );})
37 /*
38 * Check at compile time that something is of a particular type.
39 * Always evaluates to 1 so you may use it easily in comparisons.
40 */
41 #define typecheck(type,x) \
42 ({ type __dummy; \
43 typeof(x) __dummy2; \
44 (void)(&__dummy == &__dummy2); \
45 1; \
46 })
48 #define prefetch(x) 1
50 /* empty define to make this work in userspace -HW */
51 #define smp_wmb()
53 /*
54 * These are non-NULL pointers that will result in page faults
55 * under normal circumstances, used to verify that nobody uses
56 * non-initialized list entries.
57 */
58 #define LIST_POISON1 ((void *) 0x00100100)
59 #define LIST_POISON2 ((void *) 0x00200200)
61 /*
62 * Simple doubly linked list implementation.
63 *
64 * Some of the internal functions ("__xxx") are useful when
65 * manipulating whole lists rather than single entries, as
66 * sometimes we already know the next/prev entries and we can
67 * generate better code by using them directly rather than
68 * using the generic single-entry routines.
69 */
71 struct list_head {
72 struct list_head *next, *prev;
73 };
75 #define LIST_HEAD_INIT(name) { &(name), &(name) }
77 #define LIST_HEAD(name) \
78 struct list_head name = LIST_HEAD_INIT(name)
80 #define INIT_LIST_HEAD(ptr) do { \
81 (ptr)->next = (ptr); (ptr)->prev = (ptr); \
82 } while (0)
84 /*
85 * Insert a new entry between two known consecutive entries.
86 *
87 * This is only for internal list manipulation where we know
88 * the prev/next entries already!
89 */
90 static inline void __list_add(struct list_head *new,
91 struct list_head *prev,
92 struct list_head *next)
93 {
94 next->prev = new;
95 new->next = next;
96 new->prev = prev;
97 prev->next = new;
98 }
100 /**
101 * list_add - add a new entry
102 * @new: new entry to be added
103 * @head: list head to add it after
104 *
105 * Insert a new entry after the specified head.
106 * This is good for implementing stacks.
107 */
108 static inline void list_add(struct list_head *new, struct list_head *head)
109 {
110 __list_add(new, head, head->next);
111 }
113 /**
114 * list_add_tail - add a new entry
115 * @new: new entry to be added
116 * @head: list head to add it before
117 *
118 * Insert a new entry before the specified head.
119 * This is useful for implementing queues.
120 */
121 static inline void list_add_tail(struct list_head *new, struct list_head *head)
122 {
123 __list_add(new, head->prev, head);
124 }
126 /*
127 * Insert a new entry between two known consecutive entries.
128 *
129 * This is only for internal list manipulation where we know
130 * the prev/next entries already!
131 */
132 static inline void __list_add_rcu(struct list_head * new,
133 struct list_head * prev, struct list_head * next)
134 {
135 new->next = next;
136 new->prev = prev;
137 smp_wmb();
138 next->prev = new;
139 prev->next = new;
140 }
142 /**
143 * list_add_rcu - add a new entry to rcu-protected list
144 * @new: new entry to be added
145 * @head: list head to add it after
146 *
147 * Insert a new entry after the specified head.
148 * This is good for implementing stacks.
149 *
150 * The caller must take whatever precautions are necessary
151 * (such as holding appropriate locks) to avoid racing
152 * with another list-mutation primitive, such as list_add_rcu()
153 * or list_del_rcu(), running on this same list.
154 * However, it is perfectly legal to run concurrently with
155 * the _rcu list-traversal primitives, such as
156 * list_for_each_entry_rcu().
157 */
158 static inline void list_add_rcu(struct list_head *new, struct list_head *head)
159 {
160 __list_add_rcu(new, head, head->next);
161 }
163 /**
164 * list_add_tail_rcu - add a new entry to rcu-protected list
165 * @new: new entry to be added
166 * @head: list head to add it before
167 *
168 * Insert a new entry before the specified head.
169 * This is useful for implementing queues.
170 *
171 * The caller must take whatever precautions are necessary
172 * (such as holding appropriate locks) to avoid racing
173 * with another list-mutation primitive, such as list_add_tail_rcu()
174 * or list_del_rcu(), running on this same list.
175 * However, it is perfectly legal to run concurrently with
176 * the _rcu list-traversal primitives, such as
177 * list_for_each_entry_rcu().
178 */
179 static inline void list_add_tail_rcu(struct list_head *new,
180 struct list_head *head)
181 {
182 __list_add_rcu(new, head->prev, head);
183 }
185 /*
186 * Delete a list entry by making the prev/next entries
187 * point to each other.
188 *
189 * This is only for internal list manipulation where we know
190 * the prev/next entries already!
191 */
192 static inline void __list_del(struct list_head * prev, struct list_head * next)
193 {
194 next->prev = prev;
195 prev->next = next;
196 }
198 /**
199 * list_del - deletes entry from list.
200 * @entry: the element to delete from the list.
201 * Note: list_empty on entry does not return true after this, the entry is
202 * in an undefined state.
203 */
204 static inline void list_del(struct list_head *entry)
205 {
206 __list_del(entry->prev, entry->next);
207 entry->next = LIST_POISON1;
208 entry->prev = LIST_POISON2;
209 }
211 /**
212 * list_del_rcu - deletes entry from list without re-initialization
213 * @entry: the element to delete from the list.
214 *
215 * Note: list_empty on entry does not return true after this,
216 * the entry is in an undefined state. It is useful for RCU based
217 * lockfree traversal.
218 *
219 * In particular, it means that we can not poison the forward
220 * pointers that may still be used for walking the list.
221 *
222 * The caller must take whatever precautions are necessary
223 * (such as holding appropriate locks) to avoid racing
224 * with another list-mutation primitive, such as list_del_rcu()
225 * or list_add_rcu(), running on this same list.
226 * However, it is perfectly legal to run concurrently with
227 * the _rcu list-traversal primitives, such as
228 * list_for_each_entry_rcu().
229 *
230 * Note that the caller is not permitted to immediately free
231 * the newly deleted entry. Instead, either synchronize_kernel()
232 * or call_rcu() must be used to defer freeing until an RCU
233 * grace period has elapsed.
234 */
235 static inline void list_del_rcu(struct list_head *entry)
236 {
237 __list_del(entry->prev, entry->next);
238 entry->prev = LIST_POISON2;
239 }
241 /**
242 * list_del_init - deletes entry from list and reinitialize it.
243 * @entry: the element to delete from the list.
244 */
245 static inline void list_del_init(struct list_head *entry)
246 {
247 __list_del(entry->prev, entry->next);
248 INIT_LIST_HEAD(entry);
249 }
251 /**
252 * list_move - delete from one list and add as another's head
253 * @list: the entry to move
254 * @head: the head that will precede our entry
255 */
256 static inline void list_move(struct list_head *list, struct list_head *head)
257 {
258 __list_del(list->prev, list->next);
259 list_add(list, head);
260 }
262 /**
263 * list_move_tail - delete from one list and add as another's tail
264 * @list: the entry to move
265 * @head: the head that will follow our entry
266 */
267 static inline void list_move_tail(struct list_head *list,
268 struct list_head *head)
269 {
270 __list_del(list->prev, list->next);
271 list_add_tail(list, head);
272 }
274 /**
275 * list_empty - tests whether a list is empty
276 * @head: the list to test.
277 */
278 static inline int list_empty(const struct list_head *head)
279 {
280 return head->next == head;
281 }
283 /**
284 * list_empty_careful - tests whether a list is
285 * empty _and_ checks that no other CPU might be
286 * in the process of still modifying either member
287 *
288 * NOTE: using list_empty_careful() without synchronization
289 * can only be safe if the only activity that can happen
290 * to the list entry is list_del_init(). Eg. it cannot be used
291 * if another CPU could re-list_add() it.
292 *
293 * @head: the list to test.
294 */
295 static inline int list_empty_careful(const struct list_head *head)
296 {
297 struct list_head *next = head->next;
298 return (next == head) && (next == head->prev);
299 }
301 static inline void __list_splice(struct list_head *list,
302 struct list_head *head)
303 {
304 struct list_head *first = list->next;
305 struct list_head *last = list->prev;
306 struct list_head *at = head->next;
308 first->prev = head;
309 head->next = first;
311 last->next = at;
312 at->prev = last;
313 }
315 /**
316 * list_splice - join two lists
317 * @list: the new list to add.
318 * @head: the place to add it in the first list.
319 */
320 static inline void list_splice(struct list_head *list, struct list_head *head)
321 {
322 if (!list_empty(list))
323 __list_splice(list, head);
324 }
326 /**
327 * list_splice_init - join two lists and reinitialise the emptied list.
328 * @list: the new list to add.
329 * @head: the place to add it in the first list.
330 *
331 * The list at @list is reinitialised
332 */
333 static inline void list_splice_init(struct list_head *list,
334 struct list_head *head)
335 {
336 if (!list_empty(list)) {
337 __list_splice(list, head);
338 INIT_LIST_HEAD(list);
339 }
340 }
342 /**
343 * list_entry - get the struct for this entry
344 * @ptr: the &struct list_head pointer.
345 * @type: the type of the struct this is embedded in.
346 * @member: the name of the list_struct within the struct.
347 */
348 #define list_entry(ptr, type, member) \
349 container_of(ptr, type, member)
351 /**
352 * list_for_each - iterate over a list
353 * @pos: the &struct list_head to use as a loop counter.
354 * @head: the head for your list.
355 */
356 #define list_for_each(pos, head) \
357 for (pos = (head)->next, prefetch(pos->next); pos != (head); \
358 pos = pos->next, prefetch(pos->next))
360 /**
361 * __list_for_each - iterate over a list
362 * @pos: the &struct list_head to use as a loop counter.
363 * @head: the head for your list.
364 *
365 * This variant differs from list_for_each() in that it's the
366 * simplest possible list iteration code, no prefetching is done.
367 * Use this for code that knows the list to be very short (empty
368 * or 1 entry) most of the time.
369 */
370 #define __list_for_each(pos, head) \
371 for (pos = (head)->next; pos != (head); pos = pos->next)
373 /**
374 * list_for_each_prev - iterate over a list backwards
375 * @pos: the &struct list_head to use as a loop counter.
376 * @head: the head for your list.
377 */
378 #define list_for_each_prev(pos, head) \
379 for (pos = (head)->prev, prefetch(pos->prev); pos != (head); \
380 pos = pos->prev, prefetch(pos->prev))
382 /**
383 * list_for_each_safe - iterate over a list safe against removal of list entry
384 * @pos: the &struct list_head to use as a loop counter.
385 * @n: another &struct list_head to use as temporary storage
386 * @head: the head for your list.
387 */
388 #define list_for_each_safe(pos, n, head) \
389 for (pos = (head)->next, n = pos->next; pos != (head); \
390 pos = n, n = pos->next)
392 /**
393 * list_for_each_entry - iterate over list of given type
394 * @pos: the type * to use as a loop counter.
395 * @head: the head for your list.
396 * @member: the name of the list_struct within the struct.
397 */
398 #define list_for_each_entry(pos, head, member) \
399 for (pos = list_entry((head)->next, typeof(*pos), member), \
400 prefetch(pos->member.next); \
401 &pos->member != (head); \
402 pos = list_entry(pos->member.next, typeof(*pos), member), \
403 prefetch(pos->member.next))
405 /**
406 * list_for_each_entry_reverse - iterate backwards over list of given type.
407 * @pos: the type * to use as a loop counter.
408 * @head: the head for your list.
409 * @member: the name of the list_struct within the struct.
410 */
411 #define list_for_each_entry_reverse(pos, head, member) \
412 for (pos = list_entry((head)->prev, typeof(*pos), member), \
413 prefetch(pos->member.prev); \
414 &pos->member != (head); \
415 pos = list_entry(pos->member.prev, typeof(*pos), member), \
416 prefetch(pos->member.prev))
418 /**
419 * list_prepare_entry - prepare a pos entry for use as a start point in
420 * list_for_each_entry_continue
421 * @pos: the type * to use as a start point
422 * @head: the head of the list
423 * @member: the name of the list_struct within the struct.
424 */
425 #define list_prepare_entry(pos, head, member) \
426 ((pos) ? : list_entry(head, typeof(*pos), member))
428 /**
429 * list_for_each_entry_continue - iterate over list of given type
430 * continuing after existing point
431 * @pos: the type * to use as a loop counter.
432 * @head: the head for your list.
433 * @member: the name of the list_struct within the struct.
434 */
435 #define list_for_each_entry_continue(pos, head, member) \
436 for (pos = list_entry(pos->member.next, typeof(*pos), member), \
437 prefetch(pos->member.next); \
438 &pos->member != (head); \
439 pos = list_entry(pos->member.next, typeof(*pos), member), \
440 prefetch(pos->member.next))
442 /**
443 * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
444 * @pos: the type * to use as a loop counter.
445 * @n: another type * to use as temporary storage
446 * @head: the head for your list.
447 * @member: the name of the list_struct within the struct.
448 */
449 #define list_for_each_entry_safe(pos, n, head, member) \
450 for (pos = list_entry((head)->next, typeof(*pos), member), \
451 n = list_entry(pos->member.next, typeof(*pos), member); \
452 &pos->member != (head); \
453 pos = n, n = list_entry(n->member.next, typeof(*n), member))
455 /**
456 * list_for_each_rcu - iterate over an rcu-protected list
457 * @pos: the &struct list_head to use as a loop counter.
458 * @head: the head for your list.
459 *
460 * This list-traversal primitive may safely run concurrently with
461 * the _rcu list-mutation primitives such as list_add_rcu()
462 * as long as the traversal is guarded by rcu_read_lock().
463 */
464 #define list_for_each_rcu(pos, head) \
465 for (pos = (head)->next, prefetch(pos->next); pos != (head); \
466 pos = pos->next, ({ smp_read_barrier_depends(); 0;}), prefetch(pos->next))
468 #define __list_for_each_rcu(pos, head) \
469 for (pos = (head)->next; pos != (head); \
470 pos = pos->next, ({ smp_read_barrier_depends(); 0;}))
472 /**
473 * list_for_each_safe_rcu - iterate over an rcu-protected list safe
474 * against removal of list entry
475 * @pos: the &struct list_head to use as a loop counter.
476 * @n: another &struct list_head to use as temporary storage
477 * @head: the head for your list.
478 *
479 * This list-traversal primitive may safely run concurrently with
480 * the _rcu list-mutation primitives such as list_add_rcu()
481 * as long as the traversal is guarded by rcu_read_lock().
482 */
483 #define list_for_each_safe_rcu(pos, n, head) \
484 for (pos = (head)->next, n = pos->next; pos != (head); \
485 pos = n, ({ smp_read_barrier_depends(); 0;}), n = pos->next)
487 /**
488 * list_for_each_entry_rcu - iterate over rcu list of given type
489 * @pos: the type * to use as a loop counter.
490 * @head: the head for your list.
491 * @member: the name of the list_struct within the struct.
492 *
493 * This list-traversal primitive may safely run concurrently with
494 * the _rcu list-mutation primitives such as list_add_rcu()
495 * as long as the traversal is guarded by rcu_read_lock().
496 */
497 #define list_for_each_entry_rcu(pos, head, member) \
498 for (pos = list_entry((head)->next, typeof(*pos), member), \
499 prefetch(pos->member.next); \
500 &pos->member != (head); \
501 pos = list_entry(pos->member.next, typeof(*pos), member), \
502 ({ smp_read_barrier_depends(); 0;}), \
503 prefetch(pos->member.next))
506 /**
507 * list_for_each_continue_rcu - iterate over an rcu-protected list
508 * continuing after existing point.
509 * @pos: the &struct list_head to use as a loop counter.
510 * @head: the head for your list.
511 *
512 * This list-traversal primitive may safely run concurrently with
513 * the _rcu list-mutation primitives such as list_add_rcu()
514 * as long as the traversal is guarded by rcu_read_lock().
515 */
516 #define list_for_each_continue_rcu(pos, head) \
517 for ((pos) = (pos)->next, prefetch((pos)->next); (pos) != (head); \
518 (pos) = (pos)->next, ({ smp_read_barrier_depends(); 0;}), prefetch((pos)->next))
520 /*
521 * Double linked lists with a single pointer list head.
522 * Mostly useful for hash tables where the two pointer list head is
523 * too wasteful.
524 * You lose the ability to access the tail in O(1).
525 */
527 struct hlist_head {
528 struct hlist_node *first;
529 };
531 struct hlist_node {
532 struct hlist_node *next, **pprev;
533 };
535 #define HLIST_HEAD_INIT { .first = NULL }
536 #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
537 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
538 #define INIT_HLIST_NODE(ptr) ((ptr)->next = NULL, (ptr)->pprev = NULL)
540 static inline int hlist_unhashed(const struct hlist_node *h)
541 {
542 return !h->pprev;
543 }
545 static inline int hlist_empty(const struct hlist_head *h)
546 {
547 return !h->first;
548 }
550 static inline void __hlist_del(struct hlist_node *n)
551 {
552 struct hlist_node *next = n->next;
553 struct hlist_node **pprev = n->pprev;
554 *pprev = next;
555 if (next)
556 next->pprev = pprev;
557 }
559 static inline void hlist_del(struct hlist_node *n)
560 {
561 __hlist_del(n);
562 n->next = LIST_POISON1;
563 n->pprev = LIST_POISON2;
564 }
566 /**
567 * hlist_del_rcu - deletes entry from hash list without re-initialization
568 * @n: the element to delete from the hash list.
569 *
570 * Note: list_unhashed() on entry does not return true after this,
571 * the entry is in an undefined state. It is useful for RCU based
572 * lockfree traversal.
573 *
574 * In particular, it means that we can not poison the forward
575 * pointers that may still be used for walking the hash list.
576 *
577 * The caller must take whatever precautions are necessary
578 * (such as holding appropriate locks) to avoid racing
579 * with another list-mutation primitive, such as hlist_add_head_rcu()
580 * or hlist_del_rcu(), running on this same list.
581 * However, it is perfectly legal to run concurrently with
582 * the _rcu list-traversal primitives, such as
583 * hlist_for_each_entry().
584 */
585 static inline void hlist_del_rcu(struct hlist_node *n)
586 {
587 __hlist_del(n);
588 n->pprev = LIST_POISON2;
589 }
591 static inline void hlist_del_init(struct hlist_node *n)
592 {
593 if (n->pprev) {
594 __hlist_del(n);
595 INIT_HLIST_NODE(n);
596 }
597 }
599 #define hlist_del_rcu_init hlist_del_init
601 static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
602 {
603 struct hlist_node *first = h->first;
604 n->next = first;
605 if (first)
606 first->pprev = &n->next;
607 h->first = n;
608 n->pprev = &h->first;
609 }
612 /**
613 * hlist_add_head_rcu - adds the specified element to the specified hlist,
614 * while permitting racing traversals.
615 * @n: the element to add to the hash list.
616 * @h: the list to add to.
617 *
618 * The caller must take whatever precautions are necessary
619 * (such as holding appropriate locks) to avoid racing
620 * with another list-mutation primitive, such as hlist_add_head_rcu()
621 * or hlist_del_rcu(), running on this same list.
622 * However, it is perfectly legal to run concurrently with
623 * the _rcu list-traversal primitives, such as
624 * hlist_for_each_entry(), but only if smp_read_barrier_depends()
625 * is used to prevent memory-consistency problems on Alpha CPUs.
626 * Regardless of the type of CPU, the list-traversal primitive
627 * must be guarded by rcu_read_lock().
628 *
629 * OK, so why don't we have an hlist_for_each_entry_rcu()???
630 */
631 static inline void hlist_add_head_rcu(struct hlist_node *n,
632 struct hlist_head *h)
633 {
634 struct hlist_node *first = h->first;
635 n->next = first;
636 n->pprev = &h->first;
637 smp_wmb();
638 if (first)
639 first->pprev = &n->next;
640 h->first = n;
641 }
643 /* next must be != NULL */
644 static inline void hlist_add_before(struct hlist_node *n,
645 struct hlist_node *next)
646 {
647 n->pprev = next->pprev;
648 n->next = next;
649 next->pprev = &n->next;
650 *(n->pprev) = n;
651 }
653 static inline void hlist_add_after(struct hlist_node *n,
654 struct hlist_node *next)
655 {
656 next->next = n->next;
657 n->next = next;
658 next->pprev = &n->next;
660 if(next->next)
661 next->next->pprev = &next->next;
662 }
664 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
666 #define hlist_for_each(pos, head) \
667 for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
668 pos = pos->next)
670 #define hlist_for_each_safe(pos, n, head) \
671 for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
672 pos = n)
674 /**
675 * hlist_for_each_entry - iterate over list of given type
676 * @tpos: the type * to use as a loop counter.
677 * @pos: the &struct hlist_node to use as a loop counter.
678 * @head: the head for your list.
679 * @member: the name of the hlist_node within the struct.
680 */
681 #define hlist_for_each_entry(tpos, pos, head, member) \
682 for (pos = (head)->first; \
683 pos && ({ prefetch(pos->next); 1;}) && \
684 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
685 pos = pos->next)
687 /**
688 * hlist_for_each_entry_continue - iterate over a hlist continuing after existing point
689 * @tpos: the type * to use as a loop counter.
690 * @pos: the &struct hlist_node to use as a loop counter.
691 * @member: the name of the hlist_node within the struct.
692 */
693 #define hlist_for_each_entry_continue(tpos, pos, member) \
694 for (pos = (pos)->next; \
695 pos && ({ prefetch(pos->next); 1;}) && \
696 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
697 pos = pos->next)
699 /**
700 * hlist_for_each_entry_from - iterate over a hlist continuing from existing point
701 * @tpos: the type * to use as a loop counter.
702 * @pos: the &struct hlist_node to use as a loop counter.
703 * @member: the name of the hlist_node within the struct.
704 */
705 #define hlist_for_each_entry_from(tpos, pos, member) \
706 for (; pos && ({ prefetch(pos->next); 1;}) && \
707 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
708 pos = pos->next)
710 /**
711 * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
712 * @tpos: the type * to use as a loop counter.
713 * @pos: the &struct hlist_node to use as a loop counter.
714 * @n: another &struct hlist_node to use as temporary storage
715 * @head: the head for your list.
716 * @member: the name of the hlist_node within the struct.
717 */
718 #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
719 for (pos = (head)->first; \
720 pos && ({ n = pos->next; 1; }) && \
721 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
722 pos = n)
724 /**
725 * hlist_for_each_entry_rcu - iterate over rcu list of given type
726 * @pos: the type * to use as a loop counter.
727 * @pos: the &struct hlist_node to use as a loop counter.
728 * @head: the head for your list.
729 * @member: the name of the hlist_node within the struct.
730 *
731 * This list-traversal primitive may safely run concurrently with
732 * the _rcu list-mutation primitives such as hlist_add_rcu()
733 * as long as the traversal is guarded by rcu_read_lock().
734 */
735 #define hlist_for_each_entry_rcu(tpos, pos, head, member) \
736 for (pos = (head)->first; \
737 pos && ({ prefetch(pos->next); 1;}) && \
738 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
739 pos = pos->next, ({ smp_read_barrier_depends(); 0; }) )
741 #endif