Line data Source code
1 : #ifndef _LINUX_CGROUP_H
2 : #define _LINUX_CGROUP_H
3 : /*
4 : * cgroup interface
5 : *
6 : * Copyright (C) 2003 BULL SA
7 : * Copyright (C) 2004-2006 Silicon Graphics, Inc.
8 : *
9 : */
10 :
11 : #include <linux/sched.h>
12 : #include <linux/cpumask.h>
13 : #include <linux/nodemask.h>
14 : #include <linux/rcupdate.h>
15 : #include <linux/cgroupstats.h>
16 : #include <linux/prio_heap.h>
17 : #include <linux/rwsem.h>
18 : #include <linux/idr.h>
19 :
20 : #ifdef CONFIG_CGROUPS
21 :
22 : struct cgroupfs_root;
23 : struct cgroup_subsys;
24 : struct inode;
25 : struct cgroup;
26 : struct css_id;
27 :
28 : extern int cgroup_init_early(void);
29 : extern int cgroup_init(void);
30 : extern void cgroup_lock(void);
31 : extern bool cgroup_lock_live_group(struct cgroup *cgrp);
32 : extern void cgroup_unlock(void);
33 : extern void cgroup_fork(struct task_struct *p);
34 : extern void cgroup_fork_callbacks(struct task_struct *p);
35 : extern void cgroup_post_fork(struct task_struct *p);
36 : extern void cgroup_exit(struct task_struct *p, int run_callbacks);
37 : extern int cgroupstats_build(struct cgroupstats *stats,
38 : struct dentry *dentry);
39 :
40 : extern const struct file_operations proc_cgroup_operations;
41 :
42 : /* Define the enumeration of all cgroup subsystems */
43 : #define SUBSYS(_x) _x ## _subsys_id,
44 : enum cgroup_subsys_id {
45 : #include <linux/cgroup_subsys.h>
46 : CGROUP_SUBSYS_COUNT
47 : };
48 : #undef SUBSYS
49 :
50 : /* Per-subsystem/per-cgroup state maintained by the system. */
51 : struct cgroup_subsys_state {
52 : /*
53 : * The cgroup that this subsystem is attached to. Useful
54 : * for subsystems that want to know about the cgroup
55 : * hierarchy structure
56 : */
57 : struct cgroup *cgroup;
58 :
59 : /*
60 : * State maintained by the cgroup system to allow subsystems
61 : * to be "busy". Should be accessed via css_get(),
62 : * css_tryget() and and css_put().
63 : */
64 :
65 : atomic_t refcnt;
66 :
67 : unsigned long flags;
68 : /* ID for this css, if possible */
69 : struct css_id *id;
70 : };
71 :
72 : /* bits in struct cgroup_subsys_state flags field */
73 : enum {
74 : CSS_ROOT, /* This CSS is the root of the subsystem */
75 : CSS_REMOVED, /* This CSS is dead */
76 : };
77 :
78 : /*
79 : * Call css_get() to hold a reference on the css; it can be used
80 : * for a reference obtained via:
81 : * - an existing ref-counted reference to the css
82 : * - task->cgroups for a locked task
83 : */
84 :
85 : static inline void css_get(struct cgroup_subsys_state *css)
86 : {
87 : /* We don't need to reference count the root state */
88 : if (!test_bit(CSS_ROOT, &css->flags))
89 : atomic_inc(&css->refcnt);
90 : }
91 :
92 : static inline bool css_is_removed(struct cgroup_subsys_state *css)
93 : {
94 : return test_bit(CSS_REMOVED, &css->flags);
95 : }
96 :
97 : /*
98 : * Call css_tryget() to take a reference on a css if your existing
99 : * (known-valid) reference isn't already ref-counted. Returns false if
100 : * the css has been destroyed.
101 : */
102 :
103 : static inline bool css_tryget(struct cgroup_subsys_state *css)
104 : {
105 : if (test_bit(CSS_ROOT, &css->flags))
106 : return true;
107 : while (!atomic_inc_not_zero(&css->refcnt)) {
108 : if (test_bit(CSS_REMOVED, &css->flags))
109 : return false;
110 : cpu_relax();
111 : }
112 : return true;
113 : }
114 :
115 : /*
116 : * css_put() should be called to release a reference taken by
117 : * css_get() or css_tryget()
118 : */
119 :
120 : extern void __css_put(struct cgroup_subsys_state *css);
121 : static inline void css_put(struct cgroup_subsys_state *css)
122 : {
123 : if (!test_bit(CSS_ROOT, &css->flags))
124 : __css_put(css);
125 : }
126 :
127 : /* bits in struct cgroup flags field */
128 : enum {
129 : /* Control Group is dead */
130 : CGRP_REMOVED,
131 : /*
132 : * Control Group has previously had a child cgroup or a task,
133 : * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set)
134 : */
135 : CGRP_RELEASABLE,
136 : /* Control Group requires release notifications to userspace */
137 : CGRP_NOTIFY_ON_RELEASE,
138 : /*
139 : * A thread in rmdir() is wating for this cgroup.
140 : */
141 : CGRP_WAIT_ON_RMDIR,
142 : };
143 :
144 : /* which pidlist file are we talking about? */
145 : enum cgroup_filetype {
146 : CGROUP_FILE_PROCS,
147 : CGROUP_FILE_TASKS,
148 : };
149 :
150 : /*
151 : * A pidlist is a list of pids that virtually represents the contents of one
152 : * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
153 : * a pair (one each for procs, tasks) for each pid namespace that's relevant
154 : * to the cgroup.
155 : */
156 : struct cgroup_pidlist {
157 : /*
158 : * used to find which pidlist is wanted. doesn't change as long as
159 : * this particular list stays in the list.
160 : */
161 : struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
162 : /* array of xids */
163 : pid_t *list;
164 : /* how many elements the above list has */
165 : int length;
166 : /* how many files are using the current array */
167 : int use_count;
168 : /* each of these stored in a list by its cgroup */
169 : struct list_head links;
170 : /* pointer to the cgroup we belong to, for list removal purposes */
171 : struct cgroup *owner;
172 : /* protects the other fields */
173 : struct rw_semaphore mutex;
174 : };
175 :
176 : struct cgroup {
177 : unsigned long flags; /* "unsigned long" so bitops work */
178 :
179 : /*
180 : * count users of this cgroup. >0 means busy, but doesn't
181 : * necessarily indicate the number of tasks in the cgroup
182 : */
183 : atomic_t count;
184 :
185 : /*
186 : * We link our 'sibling' struct into our parent's 'children'.
187 : * Our children link their 'sibling' into our 'children'.
188 : */
189 : struct list_head sibling; /* my parent's children */
190 : struct list_head children; /* my children */
191 :
192 : struct cgroup *parent; /* my parent */
193 : struct dentry *dentry; /* cgroup fs entry, RCU protected */
194 :
195 : /* Private pointers for each registered subsystem */
196 : struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
197 :
198 : struct cgroupfs_root *root;
199 : struct cgroup *top_cgroup;
200 :
201 : /*
202 : * List of cg_cgroup_links pointing at css_sets with
203 : * tasks in this cgroup. Protected by css_set_lock
204 : */
205 : struct list_head css_sets;
206 :
207 : /*
208 : * Linked list running through all cgroups that can
209 : * potentially be reaped by the release agent. Protected by
210 : * release_list_lock
211 : */
212 : struct list_head release_list;
213 :
214 : /*
215 : * list of pidlists, up to two for each namespace (one for procs, one
216 : * for tasks); created on demand.
217 : */
218 : struct list_head pidlists;
219 : struct mutex pidlist_mutex;
220 :
221 : /* For RCU-protected deletion */
222 : struct rcu_head rcu_head;
223 : };
224 :
225 : /*
226 : * A css_set is a structure holding pointers to a set of
227 : * cgroup_subsys_state objects. This saves space in the task struct
228 : * object and speeds up fork()/exit(), since a single inc/dec and a
229 : * list_add()/del() can bump the reference count on the entire cgroup
230 : * set for a task.
231 : */
232 :
233 : struct css_set {
234 :
235 : /* Reference count */
236 : atomic_t refcount;
237 :
238 : /*
239 : * List running through all cgroup groups in the same hash
240 : * slot. Protected by css_set_lock
241 : */
242 : struct hlist_node hlist;
243 :
244 : /*
245 : * List running through all tasks using this cgroup
246 : * group. Protected by css_set_lock
247 : */
248 : struct list_head tasks;
249 :
250 : /*
251 : * List of cg_cgroup_link objects on link chains from
252 : * cgroups referenced from this css_set. Protected by
253 : * css_set_lock
254 : */
255 : struct list_head cg_links;
256 :
257 : /*
258 : * Set of subsystem states, one for each subsystem. This array
259 : * is immutable after creation apart from the init_css_set
260 : * during subsystem registration (at boot time).
261 : */
262 : struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
263 :
264 : /* For RCU-protected deletion */
265 : struct rcu_head rcu_head;
266 : };
267 :
268 : /*
269 : * cgroup_map_cb is an abstract callback API for reporting map-valued
270 : * control files
271 : */
272 :
273 : struct cgroup_map_cb {
274 : int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value);
275 : void *state;
276 : };
277 :
278 : /*
279 : * struct cftype: handler definitions for cgroup control files
280 : *
281 : * When reading/writing to a file:
282 : * - the cgroup to use is file->f_dentry->d_parent->d_fsdata
283 : * - the 'cftype' of the file is file->f_dentry->d_fsdata
284 : */
285 :
286 : #define MAX_CFTYPE_NAME 64
287 : struct cftype {
288 : /*
289 : * By convention, the name should begin with the name of the
290 : * subsystem, followed by a period
291 : */
292 : char name[MAX_CFTYPE_NAME];
293 : int private;
294 : /*
295 : * If not 0, file mode is set to this value, otherwise it will
296 : * be figured out automatically
297 : */
298 : mode_t mode;
299 :
300 : /*
301 : * If non-zero, defines the maximum length of string that can
302 : * be passed to write_string; defaults to 64
303 : */
304 : size_t max_write_len;
305 :
306 : int (*open)(struct inode *inode, struct file *file);
307 : ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft,
308 : struct file *file,
309 : char __user *buf, size_t nbytes, loff_t *ppos);
310 : /*
311 : * read_u64() is a shortcut for the common case of returning a
312 : * single integer. Use it in place of read()
313 : */
314 : u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft);
315 : /*
316 : * read_s64() is a signed version of read_u64()
317 : */
318 : s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft);
319 : /*
320 : * read_map() is used for defining a map of key/value
321 : * pairs. It should call cb->fill(cb, key, value) for each
322 : * entry. The key/value pairs (and their ordering) should not
323 : * change between reboots.
324 : */
325 : int (*read_map)(struct cgroup *cont, struct cftype *cft,
326 : struct cgroup_map_cb *cb);
327 : /*
328 : * read_seq_string() is used for outputting a simple sequence
329 : * using seqfile.
330 : */
331 : int (*read_seq_string)(struct cgroup *cont, struct cftype *cft,
332 : struct seq_file *m);
333 :
334 : ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft,
335 : struct file *file,
336 : const char __user *buf, size_t nbytes, loff_t *ppos);
337 :
338 : /*
339 : * write_u64() is a shortcut for the common case of accepting
340 : * a single integer (as parsed by simple_strtoull) from
341 : * userspace. Use in place of write(); return 0 or error.
342 : */
343 : int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val);
344 : /*
345 : * write_s64() is a signed version of write_u64()
346 : */
347 : int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val);
348 :
349 : /*
350 : * write_string() is passed a nul-terminated kernelspace
351 : * buffer of maximum length determined by max_write_len.
352 : * Returns 0 or -ve error code.
353 : */
354 : int (*write_string)(struct cgroup *cgrp, struct cftype *cft,
355 : const char *buffer);
356 : /*
357 : * trigger() callback can be used to get some kick from the
358 : * userspace, when the actual string written is not important
359 : * at all. The private field can be used to determine the
360 : * kick type for multiplexing.
361 : */
362 : int (*trigger)(struct cgroup *cgrp, unsigned int event);
363 :
364 : int (*release)(struct inode *inode, struct file *file);
365 : };
366 :
367 : struct cgroup_scanner {
368 : struct cgroup *cg;
369 : int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan);
370 : void (*process_task)(struct task_struct *p,
371 : struct cgroup_scanner *scan);
372 : struct ptr_heap *heap;
373 : void *data;
374 : };
375 :
376 : /*
377 : * Add a new file to the given cgroup directory. Should only be
378 : * called by subsystems from within a populate() method
379 : */
380 : int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
381 : const struct cftype *cft);
382 :
383 : /*
384 : * Add a set of new files to the given cgroup directory. Should
385 : * only be called by subsystems from within a populate() method
386 : */
387 : int cgroup_add_files(struct cgroup *cgrp,
388 : struct cgroup_subsys *subsys,
389 : const struct cftype cft[],
390 : int count);
391 :
392 : int cgroup_is_removed(const struct cgroup *cgrp);
393 :
394 : int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
395 :
396 : int cgroup_task_count(const struct cgroup *cgrp);
397 :
398 : /* Return true if cgrp is a descendant of the task's cgroup */
399 : int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task);
400 :
401 : /*
402 : * When the subsys has to access css and may add permanent refcnt to css,
403 : * it should take care of racy conditions with rmdir(). Following set of
404 : * functions, is for stop/restart rmdir if necessary.
405 : * Because these will call css_get/put, "css" should be alive css.
406 : *
407 : * cgroup_exclude_rmdir();
408 : * ...do some jobs which may access arbitrary empty cgroup
409 : * cgroup_release_and_wakeup_rmdir();
410 : *
411 : * When someone removes a cgroup while cgroup_exclude_rmdir() holds it,
412 : * it sleeps and cgroup_release_and_wakeup_rmdir() will wake him up.
413 : */
414 :
415 : void cgroup_exclude_rmdir(struct cgroup_subsys_state *css);
416 : void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css);
417 :
418 : /*
419 : * Control Group subsystem type.
420 : * See Documentation/cgroups/cgroups.txt for details
421 : */
422 :
423 : struct cgroup_subsys {
424 : struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss,
425 : struct cgroup *cgrp);
426 : int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
427 : void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
428 : int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
429 : struct task_struct *tsk, bool threadgroup);
430 : void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
431 : struct cgroup *old_cgrp, struct task_struct *tsk,
432 : bool threadgroup);
433 : void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
434 : void (*exit)(struct cgroup_subsys *ss, struct task_struct *task);
435 : int (*populate)(struct cgroup_subsys *ss,
436 : struct cgroup *cgrp);
437 : void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp);
438 : void (*bind)(struct cgroup_subsys *ss, struct cgroup *root);
439 :
440 : int subsys_id;
441 : int active;
442 : int disabled;
443 : int early_init;
444 : /*
445 : * True if this subsys uses ID. ID is not available before cgroup_init()
446 : * (not available in early_init time.)
447 : */
448 : bool use_id;
449 : #define MAX_CGROUP_TYPE_NAMELEN 32
450 : const char *name;
451 :
452 : /*
453 : * Protects sibling/children links of cgroups in this
454 : * hierarchy, plus protects which hierarchy (or none) the
455 : * subsystem is a part of (i.e. root/sibling). To avoid
456 : * potential deadlocks, the following operations should not be
457 : * undertaken while holding any hierarchy_mutex:
458 : *
459 : * - allocating memory
460 : * - initiating hotplug events
461 : */
462 : struct mutex hierarchy_mutex;
463 : struct lock_class_key subsys_key;
464 :
465 : /*
466 : * Link to parent, and list entry in parent's children.
467 : * Protected by this->hierarchy_mutex and cgroup_lock()
468 : */
469 : struct cgroupfs_root *root;
470 : struct list_head sibling;
471 : /* used when use_id == true */
472 : struct idr idr;
473 : spinlock_t id_lock;
474 : };
475 :
476 : #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys;
477 : #include <linux/cgroup_subsys.h>
478 : #undef SUBSYS
479 :
480 : static inline struct cgroup_subsys_state *cgroup_subsys_state(
481 : struct cgroup *cgrp, int subsys_id)
482 : {
483 : return cgrp->subsys[subsys_id];
484 : }
485 :
486 : static inline struct cgroup_subsys_state *task_subsys_state(
487 : struct task_struct *task, int subsys_id)
488 : {
489 : return rcu_dereference(task->cgroups->subsys[subsys_id]);
490 : }
491 :
492 : static inline struct cgroup* task_cgroup(struct task_struct *task,
493 : int subsys_id)
494 : {
495 : return task_subsys_state(task, subsys_id)->cgroup;
496 : }
497 :
498 : int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *ss,
499 : char *nodename);
500 :
501 : /* A cgroup_iter should be treated as an opaque object */
502 : struct cgroup_iter {
503 : struct list_head *cg_link;
504 : struct list_head *task;
505 : };
506 :
507 : /*
508 : * To iterate across the tasks in a cgroup:
509 : *
510 : * 1) call cgroup_iter_start to intialize an iterator
511 : *
512 : * 2) call cgroup_iter_next() to retrieve member tasks until it
513 : * returns NULL or until you want to end the iteration
514 : *
515 : * 3) call cgroup_iter_end() to destroy the iterator.
516 : *
517 : * Or, call cgroup_scan_tasks() to iterate through every task in a
518 : * cgroup - cgroup_scan_tasks() holds the css_set_lock when calling
519 : * the test_task() callback, but not while calling the process_task()
520 : * callback.
521 : */
522 : void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it);
523 : struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
524 : struct cgroup_iter *it);
525 : void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
526 : int cgroup_scan_tasks(struct cgroup_scanner *scan);
527 : int cgroup_attach_task(struct cgroup *, struct task_struct *);
528 :
529 : /*
530 : * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
531 : * if cgroup_subsys.use_id == true. It can be used for looking up and scanning.
532 : * CSS ID is assigned at cgroup allocation (create) automatically
533 : * and removed when subsys calls free_css_id() function. This is because
534 : * the lifetime of cgroup_subsys_state is subsys's matter.
535 : *
536 : * Looking up and scanning function should be called under rcu_read_lock().
537 : * Taking cgroup_mutex()/hierarchy_mutex() is not necessary for following calls.
538 : * But the css returned by this routine can be "not populated yet" or "being
539 : * destroyed". The caller should check css and cgroup's status.
540 : */
541 :
542 : /*
543 : * Typically Called at ->destroy(), or somewhere the subsys frees
544 : * cgroup_subsys_state.
545 : */
546 : void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css);
547 :
548 : /* Find a cgroup_subsys_state which has given ID */
549 :
550 : struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id);
551 :
552 : /*
553 : * Get a cgroup whose id is greater than or equal to id under tree of root.
554 : * Returning a cgroup_subsys_state or NULL.
555 : */
556 : struct cgroup_subsys_state *css_get_next(struct cgroup_subsys *ss, int id,
557 : struct cgroup_subsys_state *root, int *foundid);
558 :
559 : /* Returns true if root is ancestor of cg */
560 : bool css_is_ancestor(struct cgroup_subsys_state *cg,
561 : const struct cgroup_subsys_state *root);
562 :
563 : /* Get id and depth of css */
564 : unsigned short css_id(struct cgroup_subsys_state *css);
565 : unsigned short css_depth(struct cgroup_subsys_state *css);
566 :
567 : #else /* !CONFIG_CGROUPS */
568 :
569 : static inline int cgroup_init_early(void) { return 0; }
570 : static inline int cgroup_init(void) { return 0; }
571 : static inline void cgroup_fork(struct task_struct *p) {}
572 : static inline void cgroup_fork_callbacks(struct task_struct *p) {}
573 : static inline void cgroup_post_fork(struct task_struct *p) {}
574 : static inline void cgroup_exit(struct task_struct *p, int callbacks) {}
575 :
576 : static inline void cgroup_lock(void) {}
577 : static inline void cgroup_unlock(void) {}
578 : static inline int cgroupstats_build(struct cgroupstats *stats,
579 : struct dentry *dentry)
580 : {
581 : return -EINVAL;
582 : }
583 1 :
584 : #endif /* !CONFIG_CGROUPS */
585 :
586 1 : #endif /* _LINUX_CGROUP_H */
|