Line data Source code
1 : #ifndef _LINUX_RCULIST_H
2 : #define _LINUX_RCULIST_H
3 :
4 : #ifdef __KERNEL__
5 :
6 : /*
7 : * RCU-protected list version
8 : */
9 : #include <linux/list.h>
10 : #include <linux/rcupdate.h>
11 :
12 : /*
13 : * Insert a new entry between two known consecutive entries.
14 : *
15 : * This is only for internal list manipulation where we know
16 : * the prev/next entries already!
17 : */
18 : static inline void __list_add_rcu(struct list_head *new,
19 : struct list_head *prev, struct list_head *next)
20 : {
21 : new->next = next;
22 : new->prev = prev;
23 : rcu_assign_pointer(prev->next, new);
24 : next->prev = new;
25 : }
26 :
27 : /**
28 : * list_add_rcu - add a new entry to rcu-protected list
29 : * @new: new entry to be added
30 : * @head: list head to add it after
31 : *
32 : * Insert a new entry after the specified head.
33 : * This is good for implementing stacks.
34 : *
35 : * The caller must take whatever precautions are necessary
36 : * (such as holding appropriate locks) to avoid racing
37 : * with another list-mutation primitive, such as list_add_rcu()
38 : * or list_del_rcu(), running on this same list.
39 : * However, it is perfectly legal to run concurrently with
40 : * the _rcu list-traversal primitives, such as
41 : * list_for_each_entry_rcu().
42 : */
43 : static inline void list_add_rcu(struct list_head *new, struct list_head *head)
44 : {
45 : __list_add_rcu(new, head, head->next);
46 : }
47 :
48 : /**
49 : * list_add_tail_rcu - add a new entry to rcu-protected list
50 : * @new: new entry to be added
51 : * @head: list head to add it before
52 : *
53 : * Insert a new entry before the specified head.
54 : * This is useful for implementing queues.
55 : *
56 : * The caller must take whatever precautions are necessary
57 : * (such as holding appropriate locks) to avoid racing
58 : * with another list-mutation primitive, such as list_add_tail_rcu()
59 : * or list_del_rcu(), running on this same list.
60 : * However, it is perfectly legal to run concurrently with
61 : * the _rcu list-traversal primitives, such as
62 : * list_for_each_entry_rcu().
63 : */
64 : static inline void list_add_tail_rcu(struct list_head *new,
65 : struct list_head *head)
66 : {
67 : __list_add_rcu(new, head->prev, head);
68 : }
69 :
70 : /**
71 : * list_del_rcu - deletes entry from list without re-initialization
72 : * @entry: the element to delete from the list.
73 : *
74 : * Note: list_empty() on entry does not return true after this,
75 : * the entry is in an undefined state. It is useful for RCU based
76 : * lockfree traversal.
77 : *
78 : * In particular, it means that we can not poison the forward
79 : * pointers that may still be used for walking the list.
80 : *
81 : * The caller must take whatever precautions are necessary
82 : * (such as holding appropriate locks) to avoid racing
83 : * with another list-mutation primitive, such as list_del_rcu()
84 : * or list_add_rcu(), running on this same list.
85 : * However, it is perfectly legal to run concurrently with
86 : * the _rcu list-traversal primitives, such as
87 : * list_for_each_entry_rcu().
88 : *
89 : * Note that the caller is not permitted to immediately free
90 : * the newly deleted entry. Instead, either synchronize_rcu()
91 : * or call_rcu() must be used to defer freeing until an RCU
92 : * grace period has elapsed.
93 : */
94 : static inline void list_del_rcu(struct list_head *entry)
95 : {
96 : __list_del(entry->prev, entry->next);
97 : entry->prev = LIST_POISON2;
98 : }
99 :
100 : /**
101 : * hlist_del_init_rcu - deletes entry from hash list with re-initialization
102 : * @n: the element to delete from the hash list.
103 : *
104 : * Note: list_unhashed() on the node return true after this. It is
105 : * useful for RCU based read lockfree traversal if the writer side
106 : * must know if the list entry is still hashed or already unhashed.
107 : *
108 : * In particular, it means that we can not poison the forward pointers
109 : * that may still be used for walking the hash list and we can only
110 : * zero the pprev pointer so list_unhashed() will return true after
111 : * this.
112 : *
113 : * The caller must take whatever precautions are necessary (such as
114 : * holding appropriate locks) to avoid racing with another
115 : * list-mutation primitive, such as hlist_add_head_rcu() or
116 : * hlist_del_rcu(), running on this same list. However, it is
117 : * perfectly legal to run concurrently with the _rcu list-traversal
118 : * primitives, such as hlist_for_each_entry_rcu().
119 : */
120 : static inline void hlist_del_init_rcu(struct hlist_node *n)
121 : {
122 : if (!hlist_unhashed(n)) {
123 : __hlist_del(n);
124 : n->pprev = NULL;
125 : }
126 : }
127 :
128 : /**
129 : * list_replace_rcu - replace old entry by new one
130 : * @old : the element to be replaced
131 : * @new : the new element to insert
132 : *
133 : * The @old entry will be replaced with the @new entry atomically.
134 : * Note: @old should not be empty.
135 : */
136 : static inline void list_replace_rcu(struct list_head *old,
137 : struct list_head *new)
138 : {
139 : new->next = old->next;
140 : new->prev = old->prev;
141 : rcu_assign_pointer(new->prev->next, new);
142 : new->next->prev = new;
143 : old->prev = LIST_POISON2;
144 : }
145 :
146 : /**
147 : * list_splice_init_rcu - splice an RCU-protected list into an existing list.
148 : * @list: the RCU-protected list to splice
149 : * @head: the place in the list to splice the first list into
150 : * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
151 : *
152 : * @head can be RCU-read traversed concurrently with this function.
153 : *
154 : * Note that this function blocks.
155 : *
156 : * Important note: the caller must take whatever action is necessary to
157 : * prevent any other updates to @head. In principle, it is possible
158 : * to modify the list as soon as sync() begins execution.
159 : * If this sort of thing becomes necessary, an alternative version
160 : * based on call_rcu() could be created. But only if -really-
161 : * needed -- there is no shortage of RCU API members.
162 : */
163 : static inline void list_splice_init_rcu(struct list_head *list,
164 : struct list_head *head,
165 : void (*sync)(void))
166 : {
167 : struct list_head *first = list->next;
168 : struct list_head *last = list->prev;
169 : struct list_head *at = head->next;
170 :
171 : if (list_empty(head))
172 : return;
173 :
174 : /* "first" and "last" tracking list, so initialize it. */
175 :
176 : INIT_LIST_HEAD(list);
177 :
178 : /*
179 : * At this point, the list body still points to the source list.
180 : * Wait for any readers to finish using the list before splicing
181 : * the list body into the new list. Any new readers will see
182 : * an empty list.
183 : */
184 :
185 : sync();
186 :
187 : /*
188 : * Readers are finished with the source list, so perform splice.
189 : * The order is important if the new list is global and accessible
190 : * to concurrent RCU readers. Note that RCU readers are not
191 : * permitted to traverse the prev pointers without excluding
192 : * this function.
193 : */
194 :
195 : last->next = at;
196 : rcu_assign_pointer(head->next, first);
197 : first->prev = head;
198 : at->prev = last;
199 : }
200 :
201 : /**
202 : * list_entry_rcu - get the struct for this entry
203 : * @ptr: the &struct list_head pointer.
204 : * @type: the type of the struct this is embedded in.
205 : * @member: the name of the list_struct within the struct.
206 : *
207 : * This primitive may safely run concurrently with the _rcu list-mutation
208 : * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
209 : */
210 : #define list_entry_rcu(ptr, type, member) \
211 : container_of(rcu_dereference(ptr), type, member)
212 :
213 : /**
214 : * list_first_entry_rcu - get the first element from a list
215 : * @ptr: the list head to take the element from.
216 : * @type: the type of the struct this is embedded in.
217 : * @member: the name of the list_struct within the struct.
218 : *
219 : * Note, that list is expected to be not empty.
220 : *
221 : * This primitive may safely run concurrently with the _rcu list-mutation
222 : * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
223 : */
224 : #define list_first_entry_rcu(ptr, type, member) \
225 : list_entry_rcu((ptr)->next, type, member)
226 :
227 : #define __list_for_each_rcu(pos, head) \
228 : for (pos = rcu_dereference((head)->next); \
229 : pos != (head); \
230 : pos = rcu_dereference(pos->next))
231 :
232 : /**
233 : * list_for_each_entry_rcu - iterate over rcu list of given type
234 : * @pos: the type * to use as a loop cursor.
235 : * @head: the head for your list.
236 : * @member: the name of the list_struct within the struct.
237 : *
238 : * This list-traversal primitive may safely run concurrently with
239 : * the _rcu list-mutation primitives such as list_add_rcu()
240 : * as long as the traversal is guarded by rcu_read_lock().
241 : */
242 : #define list_for_each_entry_rcu(pos, head, member) \
243 : for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
244 : prefetch(pos->member.next), &pos->member != (head); \
245 : pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
246 :
247 :
248 : /**
249 : * list_for_each_continue_rcu
250 : * @pos: the &struct list_head to use as a loop cursor.
251 : * @head: the head for your list.
252 : *
253 : * Iterate over an rcu-protected list, continuing after current point.
254 : *
255 : * This list-traversal primitive may safely run concurrently with
256 : * the _rcu list-mutation primitives such as list_add_rcu()
257 : * as long as the traversal is guarded by rcu_read_lock().
258 : */
259 : #define list_for_each_continue_rcu(pos, head) \
260 : for ((pos) = rcu_dereference((pos)->next); \
261 : prefetch((pos)->next), (pos) != (head); \
262 : (pos) = rcu_dereference((pos)->next))
263 :
264 : /**
265 : * list_for_each_entry_continue_rcu - continue iteration over list of given type
266 : * @pos: the type * to use as a loop cursor.
267 : * @head: the head for your list.
268 : * @member: the name of the list_struct within the struct.
269 : *
270 : * Continue to iterate over list of given type, continuing after
271 : * the current position.
272 : */
273 : #define list_for_each_entry_continue_rcu(pos, head, member) \
274 : for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
275 : prefetch(pos->member.next), &pos->member != (head); \
276 : pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
277 :
278 : /**
279 : * hlist_del_rcu - deletes entry from hash list without re-initialization
280 : * @n: the element to delete from the hash list.
281 : *
282 : * Note: list_unhashed() on entry does not return true after this,
283 : * the entry is in an undefined state. It is useful for RCU based
284 : * lockfree traversal.
285 : *
286 : * In particular, it means that we can not poison the forward
287 : * pointers that may still be used for walking the hash list.
288 : *
289 : * The caller must take whatever precautions are necessary
290 : * (such as holding appropriate locks) to avoid racing
291 : * with another list-mutation primitive, such as hlist_add_head_rcu()
292 : * or hlist_del_rcu(), running on this same list.
293 : * However, it is perfectly legal to run concurrently with
294 : * the _rcu list-traversal primitives, such as
295 : * hlist_for_each_entry().
296 : */
297 : static inline void hlist_del_rcu(struct hlist_node *n)
298 : {
299 : __hlist_del(n);
300 : n->pprev = LIST_POISON2;
301 : }
302 :
303 : /**
304 : * hlist_replace_rcu - replace old entry by new one
305 : * @old : the element to be replaced
306 : * @new : the new element to insert
307 : *
308 : * The @old entry will be replaced with the @new entry atomically.
309 : */
310 : static inline void hlist_replace_rcu(struct hlist_node *old,
311 : struct hlist_node *new)
312 : {
313 : struct hlist_node *next = old->next;
314 :
315 : new->next = next;
316 : new->pprev = old->pprev;
317 : rcu_assign_pointer(*new->pprev, new);
318 : if (next)
319 : new->next->pprev = &new->next;
320 : old->pprev = LIST_POISON2;
321 : }
322 :
323 : /**
324 : * hlist_add_head_rcu
325 : * @n: the element to add to the hash list.
326 : * @h: the list to add to.
327 : *
328 : * Description:
329 : * Adds the specified element to the specified hlist,
330 : * while permitting racing traversals.
331 : *
332 : * The caller must take whatever precautions are necessary
333 : * (such as holding appropriate locks) to avoid racing
334 : * with another list-mutation primitive, such as hlist_add_head_rcu()
335 : * or hlist_del_rcu(), running on this same list.
336 : * However, it is perfectly legal to run concurrently with
337 : * the _rcu list-traversal primitives, such as
338 : * hlist_for_each_entry_rcu(), used to prevent memory-consistency
339 : * problems on Alpha CPUs. Regardless of the type of CPU, the
340 : * list-traversal primitive must be guarded by rcu_read_lock().
341 : */
342 : static inline void hlist_add_head_rcu(struct hlist_node *n,
343 : struct hlist_head *h)
344 : {
345 : struct hlist_node *first = h->first;
346 :
347 : n->next = first;
348 : n->pprev = &h->first;
349 : rcu_assign_pointer(h->first, n);
350 : if (first)
351 : first->pprev = &n->next;
352 : }
353 :
354 : /**
355 : * hlist_add_before_rcu
356 : * @n: the new element to add to the hash list.
357 : * @next: the existing element to add the new element before.
358 : *
359 : * Description:
360 : * Adds the specified element to the specified hlist
361 : * before the specified node while permitting racing traversals.
362 : *
363 : * The caller must take whatever precautions are necessary
364 : * (such as holding appropriate locks) to avoid racing
365 : * with another list-mutation primitive, such as hlist_add_head_rcu()
366 : * or hlist_del_rcu(), running on this same list.
367 : * However, it is perfectly legal to run concurrently with
368 : * the _rcu list-traversal primitives, such as
369 : * hlist_for_each_entry_rcu(), used to prevent memory-consistency
370 : * problems on Alpha CPUs.
371 : */
372 : static inline void hlist_add_before_rcu(struct hlist_node *n,
373 : struct hlist_node *next)
374 : {
375 : n->pprev = next->pprev;
376 : n->next = next;
377 : rcu_assign_pointer(*(n->pprev), n);
378 : next->pprev = &n->next;
379 : }
380 :
381 : /**
382 : * hlist_add_after_rcu
383 : * @prev: the existing element to add the new element after.
384 : * @n: the new element to add to the hash list.
385 : *
386 : * Description:
387 : * Adds the specified element to the specified hlist
388 : * after the specified node while permitting racing traversals.
389 : *
390 : * The caller must take whatever precautions are necessary
391 : * (such as holding appropriate locks) to avoid racing
392 : * with another list-mutation primitive, such as hlist_add_head_rcu()
393 : * or hlist_del_rcu(), running on this same list.
394 : * However, it is perfectly legal to run concurrently with
395 : * the _rcu list-traversal primitives, such as
396 : * hlist_for_each_entry_rcu(), used to prevent memory-consistency
397 : * problems on Alpha CPUs.
398 : */
399 : static inline void hlist_add_after_rcu(struct hlist_node *prev,
400 : struct hlist_node *n)
401 : {
402 : n->next = prev->next;
403 : n->pprev = &prev->next;
404 : rcu_assign_pointer(prev->next, n);
405 : if (n->next)
406 : n->next->pprev = &n->next;
407 : }
408 1 :
409 1 : /**
410 1 : * hlist_for_each_entry_rcu - iterate over rcu list of given type
411 1 : * @tpos: the type * to use as a loop cursor.
412 : * @pos: the &struct hlist_node to use as a loop cursor.
413 : * @head: the head for your list.
414 : * @member: the name of the hlist_node within the struct.
415 : *
416 : * This list-traversal primitive may safely run concurrently with
417 : * the _rcu list-mutation primitives such as hlist_add_head_rcu()
418 : * as long as the traversal is guarded by rcu_read_lock().
419 : */
420 : #define hlist_for_each_entry_rcu(tpos, pos, head, member) \
421 : for (pos = rcu_dereference((head)->first); \
422 : pos && ({ prefetch(pos->next); 1; }) && \
423 : ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
424 : pos = rcu_dereference(pos->next))
425 :
426 : #endif /* __KERNEL__ */
427 : #endif
|