Line data Source code
1 : #ifndef __LINUX_SEQLOCK_H
2 : #define __LINUX_SEQLOCK_H
3 : /*
4 : * Reader/writer consistent mechanism without starving writers. This type of
5 : * lock for data where the reader wants a consistent set of information
6 : * and is willing to retry if the information changes. Readers never
7 : * block but they may have to retry if a writer is in
8 : * progress. Writers do not wait for readers.
9 : *
10 : * This is not as cache friendly as brlock. Also, this will not work
11 : * for data that contains pointers, because any writer could
12 : * invalidate a pointer that a reader was following.
13 : *
14 : * Expected reader usage:
15 : * do {
16 : * seq = read_seqbegin(&foo);
17 : * ...
18 : * } while (read_seqretry(&foo, seq));
19 : *
20 : *
21 : * On non-SMP the spin locks disappear but the writer still needs
22 : * to increment the sequence variables because an interrupt routine could
23 : * change the state of the data.
24 : *
25 : * Based on x86_64 vsyscall gettimeofday
26 : * by Keith Owens and Andrea Arcangeli
27 : */
28 :
29 : #include <linux/spinlock.h>
30 : #include <linux/preempt.h>
31 :
32 : typedef struct {
33 : unsigned sequence;
34 : spinlock_t lock;
35 2 : } seqlock_t;
36 :
37 : /*
38 : * These macros triggered gcc-3.x compile-time problems. We think these are
39 : * OK now. Be cautious.
40 : */
41 : #define __SEQLOCK_UNLOCKED(lockname) \
42 : { 0, __SPIN_LOCK_UNLOCKED(lockname) }
43 :
44 : #define SEQLOCK_UNLOCKED \
45 : __SEQLOCK_UNLOCKED(old_style_seqlock_init)
46 :
47 : #define seqlock_init(x) \
48 : do { \
49 : (x)->sequence = 0; \
50 : spin_lock_init(&(x)->lock); \
51 : } while (0)
52 :
53 : #define DEFINE_SEQLOCK(x) \
54 : seqlock_t x = __SEQLOCK_UNLOCKED(x)
55 :
56 : /* Lock out other writers and update the count.
57 : * Acts like a normal spin_lock/unlock.
58 : * Don't need preempt_disable() because that is in the spin_lock already.
59 : */
60 : static inline void write_seqlock(seqlock_t *sl)
61 : {
62 : spin_lock(&sl->lock);
63 : ++sl->sequence;
64 : smp_wmb();
65 : }
66 :
67 : static inline void write_sequnlock(seqlock_t *sl)
68 : {
69 : smp_wmb();
70 : sl->sequence++;
71 : spin_unlock(&sl->lock);
72 : }
73 :
74 : static inline int write_tryseqlock(seqlock_t *sl)
75 : {
76 : int ret = spin_trylock(&sl->lock);
77 :
78 : if (ret) {
79 : ++sl->sequence;
80 : smp_wmb();
81 : }
82 : return ret;
83 : }
84 :
85 : /* Start of read calculation -- fetch last complete writer token */
86 : static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
87 : {
88 : unsigned ret;
89 :
90 : repeat:
91 : ret = ACCESS_ONCE(sl->sequence);
92 : if (unlikely(ret & 1)) {
93 : cpu_relax();
94 : goto repeat;
95 : }
96 : smp_rmb();
97 :
98 : return ret;
99 : }
100 :
101 : /*
102 : * Test if reader processed invalid data.
103 : *
104 : * If sequence value changed then writer changed data while in section.
105 : */
106 : static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
107 : {
108 : smp_rmb();
109 :
110 : return (sl->sequence != start);
111 : }
112 :
113 :
114 : /*
115 : * Version using sequence counter only.
116 : * This can be used when code has its own mutex protecting the
117 : * updating starting before the write_seqcountbeqin() and ending
118 : * after the write_seqcount_end().
119 : */
120 :
121 : typedef struct seqcount {
122 : unsigned sequence;
123 : } seqcount_t;
124 :
125 : #define SEQCNT_ZERO { 0 }
126 : #define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
127 :
128 : /* Start of read using pointer to a sequence counter only. */
129 : static inline unsigned read_seqcount_begin(const seqcount_t *s)
130 : {
131 : unsigned ret;
132 :
133 : repeat:
134 : ret = s->sequence;
135 : smp_rmb();
136 : if (unlikely(ret & 1)) {
137 : cpu_relax();
138 : goto repeat;
139 : }
140 : return ret;
141 : }
142 :
143 : /*
144 : * Test if reader processed invalid data because sequence number has changed.
145 : */
146 : static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
147 : {
148 : smp_rmb();
149 :
150 : return s->sequence != start;
151 : }
152 :
153 :
154 : /*
155 : * Sequence counter only version assumes that callers are using their
156 : * own mutexing.
157 : */
158 : static inline void write_seqcount_begin(seqcount_t *s)
159 : {
160 : s->sequence++;
161 : smp_wmb();
162 : }
163 :
164 : static inline void write_seqcount_end(seqcount_t *s)
165 : {
166 : smp_wmb();
167 : s->sequence++;
168 : }
169 :
170 : /*
171 : * Possible sw/hw IRQ protected versions of the interfaces.
172 : */
173 : #define write_seqlock_irqsave(lock, flags) \
174 : do { local_irq_save(flags); write_seqlock(lock); } while (0)
175 : #define write_seqlock_irq(lock) \
176 : do { local_irq_disable(); write_seqlock(lock); } while (0)
177 : #define write_seqlock_bh(lock) \
178 : do { local_bh_disable(); write_seqlock(lock); } while (0)
179 :
180 : #define write_sequnlock_irqrestore(lock, flags) \
181 : do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
182 : #define write_sequnlock_irq(lock) \
183 : do { write_sequnlock(lock); local_irq_enable(); } while(0)
184 : #define write_sequnlock_bh(lock) \
185 : do { write_sequnlock(lock); local_bh_enable(); } while(0)
186 :
187 : #define read_seqbegin_irqsave(lock, flags) \
188 : ({ local_irq_save(flags); read_seqbegin(lock); })
189 :
190 : #define read_seqretry_irqrestore(lock, iv, flags) \
191 : ({ \
192 : int ret = read_seqretry(lock, iv); \
193 : local_irq_restore(flags); \
194 : ret; \
195 : })
196 :
197 : #endif /* __LINUX_SEQLOCK_H */
|