Line data Source code
1 : /*
2 : * NET3: Garbage Collector For AF_UNIX sockets
3 : *
4 : * Garbage Collector:
5 : * Copyright (C) Barak A. Pearlmutter.
6 : * Released under the GPL version 2 or later.
7 : *
8 : * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9 : * If it doesn't work blame me, it worked when Barak sent it.
10 : *
11 : * Assumptions:
12 : *
13 : * - object w/ a bit
14 : * - free list
15 : *
16 : * Current optimizations:
17 : *
18 : * - explicit stack instead of recursion
19 : * - tail recurse on first born instead of immediate push/pop
20 : * - we gather the stuff that should not be killed into tree
21 : * and stack is just a path from root to the current pointer.
22 : *
23 : * Future optimizations:
24 : *
25 : * - don't just push entire root set; process in place
26 : *
27 : * This program is free software; you can redistribute it and/or
28 : * modify it under the terms of the GNU General Public License
29 : * as published by the Free Software Foundation; either version
30 : * 2 of the License, or (at your option) any later version.
31 : *
32 : * Fixes:
33 : * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
34 : * Cope with changing max_files.
35 : * Al Viro 11 Oct 1998
36 : * Graph may have cycles. That is, we can send the descriptor
37 : * of foo to bar and vice versa. Current code chokes on that.
38 : * Fix: move SCM_RIGHTS ones into the separate list and then
39 : * skb_free() them all instead of doing explicit fput's.
40 : * Another problem: since fput() may block somebody may
41 : * create a new unix_socket when we are in the middle of sweep
42 : * phase. Fix: revert the logic wrt MARKED. Mark everything
43 : * upon the beginning and unmark non-junk ones.
44 : *
45 : * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
46 : * sent to connect()'ed but still not accept()'ed sockets.
47 : * Fixed. Old code had slightly different problem here:
48 : * extra fput() in situation when we passed the descriptor via
49 : * such socket and closed it (descriptor). That would happen on
50 : * each unix_gc() until the accept(). Since the struct file in
51 : * question would go to the free list and might be reused...
52 : * That might be the reason of random oopses on filp_close()
53 : * in unrelated processes.
54 : *
55 : * AV 28 Feb 1999
56 : * Kill the explicit allocation of stack. Now we keep the tree
57 : * with root in dummy + pointer (gc_current) to one of the nodes.
58 : * Stack is represented as path from gc_current to dummy. Unmark
59 : * now means "add to tree". Push == "make it a son of gc_current".
60 : * Pop == "move gc_current to parent". We keep only pointers to
61 : * parents (->gc_tree).
62 : * AV 1 Mar 1999
63 : * Damn. Added missing check for ->dead in listen queues scanning.
64 : *
65 : * Miklos Szeredi 25 Jun 2007
66 : * Reimplement with a cycle collecting algorithm. This should
67 : * solve several problems with the previous code, like being racy
68 : * wrt receive and holding up unrelated socket operations.
69 : */
70 :
71 : #include <linux/kernel.h>
72 : #include <linux/string.h>
73 : #include <linux/socket.h>
74 : #include <linux/un.h>
75 : #include <linux/net.h>
76 : #include <linux/fs.h>
77 : #include <linux/slab.h>
78 : #include <linux/skbuff.h>
79 : #include <linux/netdevice.h>
80 : #include <linux/file.h>
81 : #include <linux/proc_fs.h>
82 : #include <linux/mutex.h>
83 : #include <linux/wait.h>
84 :
85 : #include <net/sock.h>
86 : #include <net/af_unix.h>
87 : #include <net/scm.h>
88 : #include <net/tcp_states.h>
89 :
90 : /* Internal data structures and random procedures: */
91 :
92 1 : static LIST_HEAD(gc_inflight_list);
93 1 : static LIST_HEAD(gc_candidates);
94 1 : static DEFINE_SPINLOCK(unix_gc_lock);
95 1 : static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
96 :
97 : unsigned int unix_tot_inflight;
98 :
99 :
100 : static struct sock *unix_get_socket(struct file *filp)
101 : {
102 0 : struct sock *u_sock = NULL;
103 0 : struct inode *inode = filp->f_path.dentry->d_inode;
104 0 :
105 0 : /*
106 0 : * Socket ?
107 : */
108 0 : if (S_ISSOCK(inode->i_mode)) {
109 0 : struct socket *sock = SOCKET_I(inode);
110 0 : struct sock *s = sock->sk;
111 :
112 : /*
113 : * PF_UNIX ?
114 : */
115 0 : if (s && sock->ops && sock->ops->family == PF_UNIX)
116 0 : u_sock = s;
117 : }
118 0 : return u_sock;
119 : }
120 :
121 : /*
122 : * Keep the number of times in flight count for the file
123 : * descriptor if it is for an AF_UNIX socket.
124 : */
125 :
126 : void unix_inflight(struct file *fp)
127 : {
128 0 : struct sock *s = unix_get_socket(fp);
129 0 : if (s) {
130 0 : struct unix_sock *u = unix_sk(s);
131 0 : spin_lock(&unix_gc_lock);
132 0 : if (atomic_long_inc_return(&u->inflight) == 1) {
133 0 : BUG_ON(!list_empty(&u->link));
134 0 : list_add_tail(&u->link, &gc_inflight_list);
135 0 : } else {
136 0 : BUG_ON(list_empty(&u->link));
137 : }
138 0 : unix_tot_inflight++;
139 0 : spin_unlock(&unix_gc_lock);
140 : }
141 0 : }
142 :
143 : void unix_notinflight(struct file *fp)
144 : {
145 0 : struct sock *s = unix_get_socket(fp);
146 0 : if (s) {
147 0 : struct unix_sock *u = unix_sk(s);
148 0 : spin_lock(&unix_gc_lock);
149 0 : BUG_ON(list_empty(&u->link));
150 0 : if (atomic_long_dec_and_test(&u->inflight))
151 0 : list_del_init(&u->link);
152 0 : unix_tot_inflight--;
153 0 : spin_unlock(&unix_gc_lock);
154 : }
155 0 : }
156 :
157 : static inline struct sk_buff *sock_queue_head(struct sock *sk)
158 : {
159 0 : return (struct sk_buff *)&sk->sk_receive_queue;
160 : }
161 :
162 : #define receive_queue_for_each_skb(sk, next, skb) \
163 : for (skb = sock_queue_head(sk)->next, next = skb->next; \
164 : skb != sock_queue_head(sk); skb = next, next = skb->next)
165 :
166 : static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
167 : struct sk_buff_head *hitlist)
168 0 : {
169 0 : struct sk_buff *skb;
170 0 : struct sk_buff *next;
171 0 :
172 0 : spin_lock(&x->sk_receive_queue.lock);
173 0 : receive_queue_for_each_skb(x, next, skb) {
174 0 : /*
175 0 : * Do we have file descriptors ?
176 0 : */
177 0 : if (UNIXCB(skb).fp) {
178 0 : bool hit = false;
179 0 : /*
180 : * Process the descriptors of this socket
181 : */
182 0 : int nfd = UNIXCB(skb).fp->count;
183 0 : struct file **fp = UNIXCB(skb).fp->fp;
184 0 : while (nfd--) {
185 0 : /*
186 0 : * Get the socket the fd matches
187 : * if it indeed does so
188 : */
189 0 : struct sock *sk = unix_get_socket(*fp++);
190 0 : if (sk) {
191 0 : struct unix_sock *u = unix_sk(sk);
192 :
193 : /*
194 : * Ignore non-candidates, they could
195 : * have been added to the queues after
196 : * starting the garbage collection
197 : */
198 0 : if (u->gc_candidate) {
199 0 : hit = true;
200 0 : func(u);
201 : }
202 : }
203 : }
204 0 : if (hit && hitlist != NULL) {
205 0 : __skb_unlink(skb, &x->sk_receive_queue);
206 0 : __skb_queue_tail(hitlist, skb);
207 : }
208 0 : }
209 : }
210 0 : spin_unlock(&x->sk_receive_queue.lock);
211 0 : }
212 :
213 : static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
214 : struct sk_buff_head *hitlist)
215 0 : {
216 0 : if (x->sk_state != TCP_LISTEN)
217 0 : scan_inflight(x, func, hitlist);
218 0 : else {
219 0 : struct sk_buff *skb;
220 0 : struct sk_buff *next;
221 0 : struct unix_sock *u;
222 0 : LIST_HEAD(embryos);
223 0 :
224 0 : /*
225 : * For a listening socket collect the queued embryos
226 : * and perform a scan on them as well.
227 : */
228 0 : spin_lock(&x->sk_receive_queue.lock);
229 0 : receive_queue_for_each_skb(x, next, skb) {
230 0 : u = unix_sk(skb->sk);
231 0 :
232 : /*
233 : * An embryo cannot be in-flight, so it's safe
234 : * to use the list link.
235 : */
236 0 : BUG_ON(!list_empty(&u->link));
237 0 : list_add_tail(&u->link, &embryos);
238 : }
239 0 : spin_unlock(&x->sk_receive_queue.lock);
240 :
241 0 : while (!list_empty(&embryos)) {
242 0 : u = list_entry(embryos.next, struct unix_sock, link);
243 0 : scan_inflight(&u->sk, func, hitlist);
244 0 : list_del_init(&u->link);
245 : }
246 0 : }
247 : }
248 :
249 0 : static void dec_inflight(struct unix_sock *usk)
250 : {
251 0 : atomic_long_dec(&usk->inflight);
252 0 : }
253 :
254 : static void inc_inflight(struct unix_sock *usk)
255 : {
256 0 : atomic_long_inc(&usk->inflight);
257 0 : }
258 :
259 : static void inc_inflight_move_tail(struct unix_sock *u)
260 : {
261 0 : atomic_long_inc(&u->inflight);
262 : /*
263 : * If this still might be part of a cycle, move it to the end
264 : * of the list, so that it's checked even if it was already
265 : * passed over
266 : */
267 0 : if (u->gc_maybe_cycle)
268 0 : list_move_tail(&u->link, &gc_candidates);
269 0 : }
270 :
271 1 : static bool gc_in_progress = false;
272 :
273 : void wait_for_unix_gc(void)
274 : {
275 0 : wait_event(unix_gc_wait, gc_in_progress == false);
276 0 : }
277 0 :
278 0 : /* The external entry point: unix_gc() */
279 : void unix_gc(void)
280 : {
281 0 : struct unix_sock *u;
282 0 : struct unix_sock *next;
283 0 : struct sk_buff_head hitlist;
284 0 : struct list_head cursor;
285 0 : LIST_HEAD(not_cycle_list);
286 0 :
287 0 : spin_lock(&unix_gc_lock);
288 0 :
289 0 : /* Avoid a recursive GC. */
290 0 : if (gc_in_progress)
291 0 : goto out;
292 0 :
293 0 : gc_in_progress = true;
294 0 : /*
295 0 : * First, select candidates for garbage collection. Only
296 0 : * in-flight sockets are considered, and from those only ones
297 0 : * which don't have any external reference.
298 0 : *
299 0 : * Holding unix_gc_lock will protect these candidates from
300 0 : * being detached, and hence from gaining an external
301 0 : * reference. Since there are no possible receivers, all
302 0 : * buffers currently on the candidates' queues stay there
303 : * during the garbage collection.
304 : *
305 : * We also know that no new candidate can be added onto the
306 : * receive queues. Other, non candidate sockets _can_ be
307 : * added to queue, so we must make sure only to touch
308 : * candidates.
309 : */
310 0 : list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
311 0 : long total_refs;
312 0 : long inflight_refs;
313 :
314 0 : total_refs = file_count(u->sk.sk_socket->file);
315 0 : inflight_refs = atomic_long_read(&u->inflight);
316 :
317 0 : BUG_ON(inflight_refs < 1);
318 0 : BUG_ON(total_refs < inflight_refs);
319 0 : if (total_refs == inflight_refs) {
320 0 : list_move_tail(&u->link, &gc_candidates);
321 0 : u->gc_candidate = 1;
322 0 : u->gc_maybe_cycle = 1;
323 : }
324 : }
325 :
326 : /*
327 : * Now remove all internal in-flight reference to children of
328 : * the candidates.
329 : */
330 0 : list_for_each_entry(u, &gc_candidates, link)
331 0 : scan_children(&u->sk, dec_inflight, NULL);
332 0 :
333 : /*
334 : * Restore the references for children of all candidates,
335 : * which have remaining references. Do this recursively, so
336 : * only those remain, which form cyclic references.
337 : *
338 : * Use a "cursor" link, to make the list traversal safe, even
339 : * though elements might be moved about.
340 : */
341 0 : list_add(&cursor, &gc_candidates);
342 0 : while (cursor.next != &gc_candidates) {
343 0 : u = list_entry(cursor.next, struct unix_sock, link);
344 0 :
345 : /* Move cursor to after the current position. */
346 0 : list_move(&cursor, &u->link);
347 :
348 0 : if (atomic_long_read(&u->inflight) > 0) {
349 0 : list_move_tail(&u->link, ¬_cycle_list);
350 0 : u->gc_maybe_cycle = 0;
351 0 : scan_children(&u->sk, inc_inflight_move_tail, NULL);
352 : }
353 : }
354 0 : list_del(&cursor);
355 :
356 0 : /*
357 : * not_cycle_list contains those sockets which do not make up a
358 : * cycle. Restore these to the inflight list.
359 : */
360 0 : while (!list_empty(¬_cycle_list)) {
361 0 : u = list_entry(not_cycle_list.next, struct unix_sock, link);
362 0 : u->gc_candidate = 0;
363 0 : list_move_tail(&u->link, &gc_inflight_list);
364 : }
365 0 :
366 : /*
367 : * Now gc_candidates contains only garbage. Restore original
368 : * inflight counters for these as well, and remove the skbuffs
369 : * which are creating the cycle(s).
370 : */
371 0 : skb_queue_head_init(&hitlist);
372 0 : list_for_each_entry(u, &gc_candidates, link)
373 0 : scan_children(&u->sk, inc_inflight, &hitlist);
374 0 :
375 0 : spin_unlock(&unix_gc_lock);
376 :
377 : /* Here we are. Hitlist is filled. Die. */
378 0 : __skb_queue_purge(&hitlist);
379 :
380 0 : spin_lock(&unix_gc_lock);
381 :
382 : /* All candidates should have been detached by now. */
383 0 : BUG_ON(!list_empty(&gc_candidates));
384 0 : gc_in_progress = false;
385 0 : wake_up(&unix_gc_wait);
386 :
387 0 : out:
388 0 : spin_unlock(&unix_gc_lock);
389 0 : }
|