Line data Source code
1 : #ifndef _LINUX_WAIT_H
2 : #define _LINUX_WAIT_H
3 :
4 : #define WNOHANG 0x00000001
5 : #define WUNTRACED 0x00000002
6 : #define WSTOPPED WUNTRACED
7 : #define WEXITED 0x00000004
8 : #define WCONTINUED 0x00000008
9 : #define WNOWAIT 0x01000000 /* Don't reap, just poll status. */
10 :
11 : #define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */
12 : #define __WALL 0x40000000 /* Wait on all children, regardless of type */
13 : #define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */
14 :
15 : /* First argument to waitid: */
16 : #define P_ALL 0
17 : #define P_PID 1
18 : #define P_PGID 2
19 :
20 : #ifdef __KERNEL__
21 :
22 : #include <linux/list.h>
23 : #include <linux/stddef.h>
24 : #include <linux/spinlock.h>
25 : #include <asm/system.h>
26 : #include <asm/current.h>
27 :
28 2 : typedef struct __wait_queue wait_queue_t;
29 : typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
30 : int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
31 1 :
32 : struct __wait_queue {
33 : unsigned int flags;
34 : #define WQ_FLAG_EXCLUSIVE 0x01
35 : void *private;
36 : wait_queue_func_t func;
37 : struct list_head task_list;
38 : };
39 :
40 : struct wait_bit_key {
41 : void *flags;
42 : int bit_nr;
43 : };
44 :
45 : struct wait_bit_queue {
46 : struct wait_bit_key key;
47 : wait_queue_t wait;
48 : };
49 1 :
50 : struct __wait_queue_head {
51 : spinlock_t lock;
52 : struct list_head task_list;
53 : };
54 1 : typedef struct __wait_queue_head wait_queue_head_t;
55 :
56 : struct task_struct;
57 :
58 : /*
59 : * Macros for declaration and initialisaton of the datatypes
60 : */
61 :
62 : #define __WAITQUEUE_INITIALIZER(name, tsk) { \
63 : .private = tsk, \
64 : .func = default_wake_function, \
65 : .task_list = { NULL, NULL } }
66 :
67 : #define DECLARE_WAITQUEUE(name, tsk) \
68 : wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
69 :
70 : #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
71 : .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
72 : .task_list = { &(name).task_list, &(name).task_list } }
73 :
74 : #define DECLARE_WAIT_QUEUE_HEAD(name) \
75 : wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
76 :
77 : #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
78 : { .flags = word, .bit_nr = bit, }
79 :
80 : extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *);
81 :
82 : #define init_waitqueue_head(q) \
83 : do { \
84 : static struct lock_class_key __key; \
85 : \
86 : __init_waitqueue_head((q), &__key); \
87 : } while (0)
88 :
89 : #ifdef CONFIG_LOCKDEP
90 : # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
91 : ({ init_waitqueue_head(&name); name; })
92 : # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
93 : wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
94 : #else
95 : # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
96 : #endif
97 :
98 : static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
99 : {
100 : q->flags = 0;
101 : q->private = p;
102 : q->func = default_wake_function;
103 : }
104 :
105 : static inline void init_waitqueue_func_entry(wait_queue_t *q,
106 : wait_queue_func_t func)
107 : {
108 : q->flags = 0;
109 : q->private = NULL;
110 : q->func = func;
111 : }
112 :
113 : static inline int waitqueue_active(wait_queue_head_t *q)
114 : {
115 : return !list_empty(&q->task_list);
116 : }
117 :
118 : extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
119 : extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
120 : extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
121 :
122 : static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
123 : {
124 : list_add(&new->task_list, &head->task_list);
125 : }
126 :
127 : /*
128 : * Used for wake-one threads:
129 : */
130 : static inline void __add_wait_queue_tail(wait_queue_head_t *head,
131 : wait_queue_t *new)
132 : {
133 : list_add_tail(&new->task_list, &head->task_list);
134 : }
135 :
136 : static inline void __remove_wait_queue(wait_queue_head_t *head,
137 : wait_queue_t *old)
138 : {
139 : list_del(&old->task_list);
140 : }
141 :
142 : void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
143 : void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
144 : void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
145 : void *key);
146 : void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
147 : void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
148 : void __wake_up_bit(wait_queue_head_t *, void *, int);
149 : int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
150 : int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
151 : void wake_up_bit(void *, int);
152 : int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
153 : int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
154 : wait_queue_head_t *bit_waitqueue(void *, int);
155 :
156 : #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
157 : #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
158 : #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
159 : #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL)
160 :
161 : #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
162 : #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
163 : #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
164 : #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
165 :
166 : /*
167 : * Wakeup macros to be used to report events to the targets.
168 : */
169 : #define wake_up_poll(x, m) \
170 : __wake_up(x, TASK_NORMAL, 1, (void *) (m))
171 : #define wake_up_locked_poll(x, m) \
172 : __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
173 : #define wake_up_interruptible_poll(x, m) \
174 : __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
175 : #define wake_up_interruptible_sync_poll(x, m) \
176 : __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
177 :
178 : #define __wait_event(wq, condition) \
179 : do { \
180 : DEFINE_WAIT(__wait); \
181 : \
182 : for (;;) { \
183 : prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
184 : if (condition) \
185 : break; \
186 : schedule(); \
187 : } \
188 : finish_wait(&wq, &__wait); \
189 : } while (0)
190 :
191 : /**
192 : * wait_event - sleep until a condition gets true
193 : * @wq: the waitqueue to wait on
194 : * @condition: a C expression for the event to wait for
195 : *
196 : * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
197 : * @condition evaluates to true. The @condition is checked each time
198 : * the waitqueue @wq is woken up.
199 : *
200 : * wake_up() has to be called after changing any variable that could
201 : * change the result of the wait condition.
202 : */
203 : #define wait_event(wq, condition) \
204 : do { \
205 : if (condition) \
206 : break; \
207 : __wait_event(wq, condition); \
208 : } while (0)
209 :
210 : #define __wait_event_timeout(wq, condition, ret) \
211 : do { \
212 : DEFINE_WAIT(__wait); \
213 : \
214 : for (;;) { \
215 : prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
216 : if (condition) \
217 : break; \
218 : ret = schedule_timeout(ret); \
219 : if (!ret) \
220 : break; \
221 : } \
222 : finish_wait(&wq, &__wait); \
223 : } while (0)
224 :
225 : /**
226 : * wait_event_timeout - sleep until a condition gets true or a timeout elapses
227 : * @wq: the waitqueue to wait on
228 : * @condition: a C expression for the event to wait for
229 : * @timeout: timeout, in jiffies
230 : *
231 : * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
232 : * @condition evaluates to true. The @condition is checked each time
233 : * the waitqueue @wq is woken up.
234 : *
235 : * wake_up() has to be called after changing any variable that could
236 : * change the result of the wait condition.
237 : *
238 : * The function returns 0 if the @timeout elapsed, and the remaining
239 : * jiffies if the condition evaluated to true before the timeout elapsed.
240 : */
241 : #define wait_event_timeout(wq, condition, timeout) \
242 : ({ \
243 : long __ret = timeout; \
244 : if (!(condition)) \
245 : __wait_event_timeout(wq, condition, __ret); \
246 : __ret; \
247 : })
248 :
249 : #define __wait_event_interruptible(wq, condition, ret) \
250 : do { \
251 : DEFINE_WAIT(__wait); \
252 : \
253 : for (;;) { \
254 : prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
255 : if (condition) \
256 : break; \
257 : if (!signal_pending(current)) { \
258 : schedule(); \
259 : continue; \
260 : } \
261 : ret = -ERESTARTSYS; \
262 : break; \
263 : } \
264 : finish_wait(&wq, &__wait); \
265 : } while (0)
266 :
267 : /**
268 : * wait_event_interruptible - sleep until a condition gets true
269 : * @wq: the waitqueue to wait on
270 : * @condition: a C expression for the event to wait for
271 : *
272 : * The process is put to sleep (TASK_INTERRUPTIBLE) until the
273 : * @condition evaluates to true or a signal is received.
274 : * The @condition is checked each time the waitqueue @wq is woken up.
275 : *
276 : * wake_up() has to be called after changing any variable that could
277 : * change the result of the wait condition.
278 : *
279 : * The function will return -ERESTARTSYS if it was interrupted by a
280 : * signal and 0 if @condition evaluated to true.
281 : */
282 : #define wait_event_interruptible(wq, condition) \
283 : ({ \
284 : int __ret = 0; \
285 : if (!(condition)) \
286 : __wait_event_interruptible(wq, condition, __ret); \
287 : __ret; \
288 : })
289 :
290 : #define __wait_event_interruptible_timeout(wq, condition, ret) \
291 : do { \
292 : DEFINE_WAIT(__wait); \
293 : \
294 : for (;;) { \
295 : prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
296 : if (condition) \
297 : break; \
298 : if (!signal_pending(current)) { \
299 : ret = schedule_timeout(ret); \
300 : if (!ret) \
301 : break; \
302 : continue; \
303 : } \
304 : ret = -ERESTARTSYS; \
305 : break; \
306 : } \
307 : finish_wait(&wq, &__wait); \
308 : } while (0)
309 :
310 : /**
311 : * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
312 : * @wq: the waitqueue to wait on
313 : * @condition: a C expression for the event to wait for
314 : * @timeout: timeout, in jiffies
315 : *
316 : * The process is put to sleep (TASK_INTERRUPTIBLE) until the
317 : * @condition evaluates to true or a signal is received.
318 : * The @condition is checked each time the waitqueue @wq is woken up.
319 : *
320 : * wake_up() has to be called after changing any variable that could
321 : * change the result of the wait condition.
322 : *
323 : * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
324 : * was interrupted by a signal, and the remaining jiffies otherwise
325 : * if the condition evaluated to true before the timeout elapsed.
326 : */
327 : #define wait_event_interruptible_timeout(wq, condition, timeout) \
328 : ({ \
329 : long __ret = timeout; \
330 : if (!(condition)) \
331 : __wait_event_interruptible_timeout(wq, condition, __ret); \
332 : __ret; \
333 : })
334 :
335 : #define __wait_event_interruptible_exclusive(wq, condition, ret) \
336 : do { \
337 : DEFINE_WAIT(__wait); \
338 : \
339 : for (;;) { \
340 : prepare_to_wait_exclusive(&wq, &__wait, \
341 : TASK_INTERRUPTIBLE); \
342 : if (condition) { \
343 : finish_wait(&wq, &__wait); \
344 : break; \
345 : } \
346 : if (!signal_pending(current)) { \
347 : schedule(); \
348 : continue; \
349 : } \
350 : ret = -ERESTARTSYS; \
351 : abort_exclusive_wait(&wq, &__wait, \
352 : TASK_INTERRUPTIBLE, NULL); \
353 : break; \
354 : } \
355 : } while (0)
356 :
357 : #define wait_event_interruptible_exclusive(wq, condition) \
358 : ({ \
359 : int __ret = 0; \
360 : if (!(condition)) \
361 : __wait_event_interruptible_exclusive(wq, condition, __ret);\
362 : __ret; \
363 : })
364 :
365 : #define __wait_event_killable(wq, condition, ret) \
366 : do { \
367 : DEFINE_WAIT(__wait); \
368 : \
369 : for (;;) { \
370 : prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
371 : if (condition) \
372 : break; \
373 : if (!fatal_signal_pending(current)) { \
374 : schedule(); \
375 : continue; \
376 : } \
377 : ret = -ERESTARTSYS; \
378 : break; \
379 : } \
380 : finish_wait(&wq, &__wait); \
381 : } while (0)
382 :
383 : /**
384 : * wait_event_killable - sleep until a condition gets true
385 : * @wq: the waitqueue to wait on
386 : * @condition: a C expression for the event to wait for
387 : *
388 : * The process is put to sleep (TASK_KILLABLE) until the
389 : * @condition evaluates to true or a signal is received.
390 : * The @condition is checked each time the waitqueue @wq is woken up.
391 : *
392 : * wake_up() has to be called after changing any variable that could
393 : * change the result of the wait condition.
394 : *
395 : * The function will return -ERESTARTSYS if it was interrupted by a
396 : * signal and 0 if @condition evaluated to true.
397 : */
398 : #define wait_event_killable(wq, condition) \
399 : ({ \
400 : int __ret = 0; \
401 : if (!(condition)) \
402 : __wait_event_killable(wq, condition, __ret); \
403 : __ret; \
404 : })
405 :
406 : /*
407 : * Must be called with the spinlock in the wait_queue_head_t held.
408 : */
409 : static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q,
410 : wait_queue_t * wait)
411 : {
412 : wait->flags |= WQ_FLAG_EXCLUSIVE;
413 : __add_wait_queue_tail(q, wait);
414 : }
415 :
416 : /*
417 : * Must be called with the spinlock in the wait_queue_head_t held.
418 : */
419 : static inline void remove_wait_queue_locked(wait_queue_head_t *q,
420 : wait_queue_t * wait)
421 : {
422 : __remove_wait_queue(q, wait);
423 : }
424 :
425 : /*
426 : * These are the old interfaces to sleep waiting for an event.
427 : * They are racy. DO NOT use them, use the wait_event* interfaces above.
428 : * We plan to remove these interfaces.
429 : */
430 : extern void sleep_on(wait_queue_head_t *q);
431 : extern long sleep_on_timeout(wait_queue_head_t *q,
432 : signed long timeout);
433 : extern void interruptible_sleep_on(wait_queue_head_t *q);
434 : extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
435 : signed long timeout);
436 :
437 : /*
438 : * Waitqueues which are removed from the waitqueue_head at wakeup time
439 : */
440 : void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
441 : void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
442 : void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
443 : void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
444 : unsigned int mode, void *key);
445 : int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
446 : int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
447 :
448 : #define DEFINE_WAIT_FUNC(name, function) \
449 : wait_queue_t name = { \
450 : .private = current, \
451 : .func = function, \
452 : .task_list = LIST_HEAD_INIT((name).task_list), \
453 : }
454 :
455 : #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
456 :
457 : #define DEFINE_WAIT_BIT(name, word, bit) \
458 : struct wait_bit_queue name = { \
459 : .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
460 : .wait = { \
461 : .private = current, \
462 : .func = wake_bit_function, \
463 : .task_list = \
464 : LIST_HEAD_INIT((name).wait.task_list), \
465 : }, \
466 : }
467 :
468 : #define init_wait(wait) \
469 : do { \
470 : (wait)->private = current; \
471 : (wait)->func = autoremove_wake_function; \
472 : INIT_LIST_HEAD(&(wait)->task_list); \
473 : } while (0)
474 :
475 : /**
476 : * wait_on_bit - wait for a bit to be cleared
477 : * @word: the word being waited on, a kernel virtual address
478 : * @bit: the bit of the word being waited on
479 : * @action: the function used to sleep, which may take special actions
480 : * @mode: the task state to sleep in
481 : *
482 : * There is a standard hashed waitqueue table for generic use. This
483 : * is the part of the hashtable's accessor API that waits on a bit.
484 : * For instance, if one were to have waiters on a bitflag, one would
485 : * call wait_on_bit() in threads waiting for the bit to clear.
486 : * One uses wait_on_bit() where one is waiting for the bit to clear,
487 : * but has no intention of setting it.
488 : */
489 : static inline int wait_on_bit(void *word, int bit,
490 : int (*action)(void *), unsigned mode)
491 : {
492 : if (!test_bit(bit, word))
493 : return 0;
494 : return out_of_line_wait_on_bit(word, bit, action, mode);
495 : }
496 :
497 : /**
498 : * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
499 : * @word: the word being waited on, a kernel virtual address
500 : * @bit: the bit of the word being waited on
501 : * @action: the function used to sleep, which may take special actions
502 : * @mode: the task state to sleep in
503 : *
504 : * There is a standard hashed waitqueue table for generic use. This
505 : * is the part of the hashtable's accessor API that waits on a bit
506 : * when one intends to set it, for instance, trying to lock bitflags.
507 : * For instance, if one were to have waiters trying to set bitflag
508 : * and waiting for it to clear before setting it, one would call
509 : * wait_on_bit() in threads waiting to be able to set the bit.
510 : * One uses wait_on_bit_lock() where one is waiting for the bit to
511 : * clear with the intention of setting it, and when done, clearing it.
512 : */
513 : static inline int wait_on_bit_lock(void *word, int bit,
514 : int (*action)(void *), unsigned mode)
515 : {
516 : if (!test_and_set_bit(bit, word))
517 : return 0;
518 : return out_of_line_wait_on_bit_lock(word, bit, action, mode);
519 : }
520 :
521 : #endif /* __KERNEL__ */
522 :
523 : #endif
|