| /* |
| * Generic process-grouping system. |
| * |
| * Based originally on the cpuset system, extracted by Paul Menage |
| * Copyright (C) 2006 Google, Inc |
| * |
| * Notifications support |
| * Copyright (C) 2009 Nokia Corporation |
| * Author: Kirill A. Shutemov |
| * |
| * Copyright notices from the original cpuset code: |
| * -------------------------------------------------- |
| * Copyright (C) 2003 BULL SA. |
| * Copyright (C) 2004-2006 Silicon Graphics, Inc. |
| * |
| * Portions derived from Patrick Mochel's sysfs code. |
| * sysfs is Copyright (c) 2001-3 Patrick Mochel |
| * |
| * 2003-10-10 Written by Simon Derr. |
| * 2003-10-22 Updates by Stephen Hemminger. |
| * 2004 May-July Rework by Paul Jackson. |
| * --------------------------------------------------- |
| * |
| * This file is subject to the terms and conditions of the GNU General Public |
| * License. See the file COPYING in the main directory of the Linux |
| * distribution for more details. |
| */ |
| |
| #include <linux/cgroup.h> |
| #include <linux/cred.h> |
| #include <linux/ctype.h> |
| #include <linux/errno.h> |
| #include <linux/init_task.h> |
| #include <linux/kernel.h> |
| #include <linux/list.h> |
| #include <linux/mm.h> |
| #include <linux/mutex.h> |
| #include <linux/mount.h> |
| #include <linux/pagemap.h> |
| #include <linux/proc_fs.h> |
| #include <linux/rcupdate.h> |
| #include <linux/sched.h> |
| #include <linux/backing-dev.h> |
| #include <linux/slab.h> |
| #include <linux/magic.h> |
| #include <linux/spinlock.h> |
| #include <linux/string.h> |
| #include <linux/sort.h> |
| #include <linux/kmod.h> |
| #include <linux/module.h> |
| #include <linux/delayacct.h> |
| #include <linux/cgroupstats.h> |
| #include <linux/hashtable.h> |
| #include <linux/namei.h> |
| #include <linux/pid_namespace.h> |
| #include <linux/idr.h> |
| #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ |
| #include <linux/flex_array.h> /* used in cgroup_attach_task */ |
| #include <linux/kthread.h> |
| |
| #include <linux/atomic.h> |
| |
| /* |
| * pidlists linger the following amount before being destroyed. The goal |
| * is avoiding frequent destruction in the middle of consecutive read calls |
| * Expiring in the middle is a performance problem not a correctness one. |
| * 1 sec should be enough. |
| */ |
| #define CGROUP_PIDLIST_DESTROY_DELAY HZ |
| |
| /* |
| * cgroup_mutex is the master lock. Any modification to cgroup or its |
| * hierarchy must be performed while holding it. |
| * |
| * cgroup_root_mutex nests inside cgroup_mutex and should be held to modify |
| * cgroupfs_root of any cgroup hierarchy - subsys list, flags, |
| * release_agent_path and so on. Modifying requires both cgroup_mutex and |
| * cgroup_root_mutex. Readers can acquire either of the two. This is to |
| * break the following locking order cycle. |
| * |
| * A. cgroup_mutex -> cred_guard_mutex -> s_type->i_mutex_key -> namespace_sem |
| * B. namespace_sem -> cgroup_mutex |
| * |
| * B happens only through cgroup_show_options() and using cgroup_root_mutex |
| * breaks it. |
| */ |
| #ifdef CONFIG_PROVE_RCU |
| DEFINE_MUTEX(cgroup_mutex); |
| EXPORT_SYMBOL_GPL(cgroup_mutex); /* only for lockdep */ |
| #else |
| static DEFINE_MUTEX(cgroup_mutex); |
| #endif |
| |
| static DEFINE_MUTEX(cgroup_root_mutex); |
| |
| #define cgroup_assert_mutex_or_rcu_locked() \ |
| rcu_lockdep_assert(rcu_read_lock_held() || \ |
| lockdep_is_held(&cgroup_mutex), \ |
| "cgroup_mutex or RCU read lock required"); |
| |
| #ifdef CONFIG_LOCKDEP |
| #define cgroup_assert_mutex_or_root_locked() \ |
| WARN_ON_ONCE(debug_locks && (!lockdep_is_held(&cgroup_mutex) && \ |
| !lockdep_is_held(&cgroup_root_mutex))) |
| #else |
| #define cgroup_assert_mutex_or_root_locked() do { } while (0) |
| #endif |
| |
| /* |
| * cgroup destruction makes heavy use of work items and there can be a lot |
| * of concurrent destructions. Use a separate workqueue so that cgroup |
| * destruction work items don't end up filling up max_active of system_wq |
| * which may lead to deadlock. |
| */ |
| static struct workqueue_struct *cgroup_destroy_wq; |
| |
| /* |
| * pidlist destructions need to be flushed on cgroup destruction. Use a |
| * separate workqueue as flush domain. |
| */ |
| static struct workqueue_struct *cgroup_pidlist_destroy_wq; |
| |
| /* |
| * Generate an array of cgroup subsystem pointers. At boot time, this is |
| * populated with the built in subsystems, and modular subsystems are |
| * registered after that. The mutable section of this array is protected by |
| * cgroup_mutex. |
| */ |
| #define SUBSYS(_x) [_x ## _subsys_id] = &_x ## _subsys, |
| #define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option) |
| static struct cgroup_subsys *cgroup_subsys[CGROUP_SUBSYS_COUNT] = { |
| #include <linux/cgroup_subsys.h> |
| }; |
| |
| /* |
| * The dummy hierarchy, reserved for the subsystems that are otherwise |
| * unattached - it never has more than a single cgroup, and all tasks are |
| * part of that cgroup. |
| */ |
| static struct cgroupfs_root cgroup_dummy_root; |
| |
| /* dummy_top is a shorthand for the dummy hierarchy's top cgroup */ |
| static struct cgroup * const cgroup_dummy_top = &cgroup_dummy_root.top_cgroup; |
| |
| /* The list of hierarchy roots */ |
| |
| static LIST_HEAD(cgroup_roots); |
| static int cgroup_root_count; |
| |
| /* |
| * Hierarchy ID allocation and mapping. It follows the same exclusion |
| * rules as other root ops - both cgroup_mutex and cgroup_root_mutex for |
| * writes, either for reads. |
| */ |
| static DEFINE_IDR(cgroup_hierarchy_idr); |
| |
| static struct cgroup_name root_cgroup_name = { .name = "/" }; |
| |
| /* |
| * Assign a monotonically increasing serial number to cgroups. It |
| * guarantees cgroups with bigger numbers are newer than those with smaller |
| * numbers. Also, as cgroups are always appended to the parent's |
| * ->children list, it guarantees that sibling cgroups are always sorted in |
| * the ascending serial number order on the list. Protected by |
| * cgroup_mutex. |
| */ |
| static u64 cgroup_serial_nr_next = 1; |
| |
| /* This flag indicates whether tasks in the fork and exit paths should |
| * check for fork/exit handlers to call. This avoids us having to do |
| * extra work in the fork/exit path if none of the subsystems need to |
| * be called. |
| */ |
| static int need_forkexit_callback __read_mostly; |
| |
| static struct cftype cgroup_base_files[]; |
| |
| static void cgroup_destroy_css_killed(struct cgroup *cgrp); |
| static int cgroup_destroy_locked(struct cgroup *cgrp); |
| static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], |
| bool is_add); |
| static int cgroup_file_release(struct inode *inode, struct file *file); |
| static void cgroup_pidlist_destroy_all(struct cgroup *cgrp); |
| |
| /** |
| * cgroup_css - obtain a cgroup's css for the specified subsystem |
| * @cgrp: the cgroup of interest |
| * @ss: the subsystem of interest (%NULL returns the dummy_css) |
| * |
| * Return @cgrp's css (cgroup_subsys_state) associated with @ss. This |
| * function must be called either under cgroup_mutex or rcu_read_lock() and |
| * the caller is responsible for pinning the returned css if it wants to |
| * keep accessing it outside the said locks. This function may return |
| * %NULL if @cgrp doesn't have @subsys_id enabled. |
| */ |
| static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, |
| struct cgroup_subsys *ss) |
| { |
| if (ss) |
| return rcu_dereference_check(cgrp->subsys[ss->subsys_id], |
| lockdep_is_held(&cgroup_mutex)); |
| else |
| return &cgrp->dummy_css; |
| } |
| |
| /* convenient tests for these bits */ |
| static inline bool cgroup_is_dead(const struct cgroup *cgrp) |
| { |
| return test_bit(CGRP_DEAD, &cgrp->flags); |
| } |
| |
| /** |
| * cgroup_is_descendant - test ancestry |
| * @cgrp: the cgroup to be tested |
| * @ancestor: possible ancestor of @cgrp |
| * |
| * Test whether @cgrp is a descendant of @ancestor. It also returns %true |
| * if @cgrp == @ancestor. This function is safe to call as long as @cgrp |
| * and @ancestor are accessible. |
| */ |
| bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor) |
| { |
| while (cgrp) { |
| if (cgrp == ancestor) |
| return true; |
| cgrp = cgrp->parent; |
| } |
| return false; |
| } |
| EXPORT_SYMBOL_GPL(cgroup_is_descendant); |
| |
| static int cgroup_is_releasable(const struct cgroup *cgrp) |
| { |
| const int bits = |
| (1 << CGRP_RELEASABLE) | |
| (1 << CGRP_NOTIFY_ON_RELEASE); |
| return (cgrp->flags & bits) == bits; |
| } |
| |
| static int notify_on_release(const struct cgroup *cgrp) |
| { |
| return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); |
| } |
| |
| /** |
| * for_each_css - iterate all css's of a cgroup |
| * @css: the iteration cursor |
| * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end |
| * @cgrp: the target cgroup to iterate css's of |
| * |
| * Should be called under cgroup_mutex. |
| */ |
| #define for_each_css(css, ssid, cgrp) \ |
| for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ |
| if (!((css) = rcu_dereference_check( \ |
| (cgrp)->subsys[(ssid)], \ |
| lockdep_is_held(&cgroup_mutex)))) { } \ |
| else |
| |
| /** |
| * for_each_subsys - iterate all loaded cgroup subsystems |
| * @ss: the iteration cursor |
| * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end |
| * |
| * Iterates through all loaded subsystems. Should be called under |
| * cgroup_mutex or cgroup_root_mutex. |
| */ |
| #define for_each_subsys(ss, ssid) \ |
| for (({ cgroup_assert_mutex_or_root_locked(); (ssid) = 0; }); \ |
| (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ |
| if (!((ss) = cgroup_subsys[(ssid)])) { } \ |
| else |
| |
| /** |
| * for_each_builtin_subsys - iterate all built-in cgroup subsystems |
| * @ss: the iteration cursor |
| * @i: the index of @ss, CGROUP_BUILTIN_SUBSYS_COUNT after reaching the end |
| * |
| * Bulit-in subsystems are always present and iteration itself doesn't |
| * require any synchronization. |
| */ |
| #define for_each_builtin_subsys(ss, i) \ |
| for ((i) = 0; (i) < CGROUP_BUILTIN_SUBSYS_COUNT && \ |
| (((ss) = cgroup_subsys[i]) || true); (i)++) |
| |
| /* iterate across the active hierarchies */ |
| #define for_each_active_root(root) \ |
| list_for_each_entry((root), &cgroup_roots, root_list) |
| |
| static inline struct cgroup *__d_cgrp(struct dentry *dentry) |
| { |
| return dentry->d_fsdata; |
| } |
| |
| static inline struct cfent *__d_cfe(struct dentry *dentry) |
| { |
| return dentry->d_fsdata; |
| } |
| |
| static inline struct cftype *__d_cft(struct dentry *dentry) |
| { |
| return __d_cfe(dentry)->type; |
| } |
| |
| /** |
| * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive. |
| * @cgrp: the cgroup to be checked for liveness |
| * |
| * On success, returns true; the mutex should be later unlocked. On |
| * failure returns false with no lock held. |
| */ |
| static bool cgroup_lock_live_group(struct cgroup *cgrp) |
| { |
| mutex_lock(&cgroup_mutex); |
| if (cgroup_is_dead(cgrp)) { |
| mutex_unlock(&cgroup_mutex); |
| return false; |
| } |
| return true; |
| } |
| |
| /* the list of cgroups eligible for automatic release. Protected by |
| * release_list_lock */ |
| static LIST_HEAD(release_list); |
| static DEFINE_RAW_SPINLOCK(release_list_lock); |
| static void cgroup_release_agent(struct work_struct *work); |
| static DECLARE_WORK(release_agent_work, cgroup_release_agent); |
| static void check_for_release(struct cgroup *cgrp); |
| |
| /* |
| * A cgroup can be associated with multiple css_sets as different tasks may |
| * belong to different cgroups on different hierarchies. In the other |
| * direction, a css_set is naturally associated with multiple cgroups. |
| * This M:N relationship is represented by the following link structure |
| * which exists for each association and allows traversing the associations |
| * from both sides. |
| */ |
| struct cgrp_cset_link { |
| /* the cgroup and css_set this link associates */ |
| struct cgroup *cgrp; |
| struct css_set *cset; |
| |
| /* list of cgrp_cset_links anchored at cgrp->cset_links */ |
| struct list_head cset_link; |
| |
| /* list of cgrp_cset_links anchored at css_set->cgrp_links */ |
| struct list_head cgrp_link; |
| }; |
| |
| /* The default css_set - used by init and its children prior to any |
| * hierarchies being mounted. It contains a pointer to the root state |
| * for each subsystem. Also used to anchor the list of css_sets. Not |
| * reference-counted, to improve performance when child cgroups |
| * haven't been created. |
| */ |
| |
| static struct css_set init_css_set; |
| static struct cgrp_cset_link init_cgrp_cset_link; |
| |
| /* |
| * css_set_lock protects the list of css_set objects, and the chain of |
| * tasks off each css_set. Nests outside task->alloc_lock due to |
| * css_task_iter_start(). |
| */ |
| static DEFINE_RWLOCK(css_set_lock); |
| static int css_set_count; |
| |
| /* |
| * hash table for cgroup groups. This improves the performance to find |
| * an existing css_set. This hash doesn't (currently) take into |
| * account cgroups in empty hierarchies. |
| */ |
| #define CSS_SET_HASH_BITS 7 |
| static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS); |
| |
| static unsigned long css_set_hash(struct cgroup_subsys_state *css[]) |
| { |
| unsigned long key = 0UL; |
| struct cgroup_subsys *ss; |
| int i; |
| |
| for_each_subsys(ss, i) |
| key += (unsigned long)css[i]; |
| key = (key >> 16) ^ key; |
| |
| return key; |
| } |
| |
| /* |
| * We don't maintain the lists running through each css_set to its task |
| * until after the first call to css_task_iter_start(). This reduces the |
| * fork()/exit() overhead for people who have cgroups compiled into their |
| * kernel but not actually in use. |
| */ |
| static int use_task_css_set_links __read_mostly; |
| |
| static void __put_css_set(struct css_set *cset, int taskexit) |
| { |
| struct cgrp_cset_link *link, *tmp_link; |
| |
| /* |
| * Ensure that the refcount doesn't hit zero while any readers |
| * can see it. Similar to atomic_dec_and_lock(), but for an |
| * rwlock |
| */ |
| if (atomic_add_unless(&cset->refcount, -1, 1)) |
| return; |
| write_lock(&css_set_lock); |
| if (!atomic_dec_and_test(&cset->refcount)) { |
| write_unlock(&css_set_lock); |
| return; |
| } |
| |
| /* This css_set is dead. unlink it and release cgroup refcounts */ |
| hash_del(&cset->hlist); |
| css_set_count--; |
| |
| list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) { |
| struct cgroup *cgrp = link->cgrp; |
| |
| list_del(&link->cset_link); |
| list_del(&link->cgrp_link); |
| |
| /* @cgrp can't go away while we're holding css_set_lock */ |
| if (list_empty(&cgrp->cset_links) && notify_on_release(cgrp)) { |
| if (taskexit) |
| set_bit(CGRP_RELEASABLE, &cgrp->flags); |
| check_for_release(cgrp); |
| } |
| |
| kfree(link); |
| } |
| |
| write_unlock(&css_set_lock); |
| kfree_rcu(cset, rcu_head); |
| } |
| |
| /* |
| * refcounted get/put for css_set objects |
| */ |
| static inline void get_css_set(struct css_set *cset) |
| { |
| atomic_inc(&cset->refcount); |
| } |
| |
| static inline void put_css_set(struct css_set *cset) |
| { |
| __put_css_set(cset, 0); |
| } |
| |
| static inline void put_css_set_taskexit(struct css_set *cset) |
| { |
| __put_css_set(cset, 1); |
| } |
| |
| /** |
| * compare_css_sets - helper function for find_existing_css_set(). |
| * @cset: candidate css_set being tested |
| * @old_cset: existing css_set for a task |
| * @new_cgrp: cgroup that's being entered by the task |
| * @template: desired set of css pointers in css_set (pre-calculated) |
| * |
| * Returns true if "cset" matches "old_cset" except for the hierarchy |
| * which "new_cgrp" belongs to, for which it should match "new_cgrp". |
| */ |
| static bool compare_css_sets(struct css_set *cset, |
| struct css_set *old_cset, |
| struct cgroup *new_cgrp, |
| struct cgroup_subsys_state *template[]) |
| { |
| struct list_head *l1, *l2; |
| |
| if (memcmp(template, cset->subsys, sizeof(cset->subsys))) { |
| /* Not all subsystems matched */ |
| return false; |
| } |
| |
| /* |
| * Compare cgroup pointers in order to distinguish between |
| * different cgroups in heirarchies with no subsystems. We |
| * could get by with just this check alone (and skip the |
| * memcmp above) but on most setups the memcmp check will |
| * avoid the need for this more expensive check on almost all |
| * candidates. |
| */ |
| |
| l1 = &cset->cgrp_links; |
| l2 = &old_cset->cgrp_links; |
| while (1) { |
| struct cgrp_cset_link *link1, *link2; |
| struct cgroup *cgrp1, *cgrp2; |
| |
| l1 = l1->next; |
| l2 = l2->next; |
| /* See if we reached the end - both lists are equal length. */ |
| if (l1 == &cset->cgrp_links) { |
| BUG_ON(l2 != &old_cset->cgrp_links); |
| break; |
| } else { |
| BUG_ON(l2 == &old_cset->cgrp_links); |
| } |
| /* Locate the cgroups associated with these links. */ |
| link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link); |
| link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link); |
| cgrp1 = link1->cgrp; |
| cgrp2 = link2->cgrp; |
| /* Hierarchies should be linked in the same order. */ |
| BUG_ON(cgrp1->root != cgrp2->root); |
| |
| /* |
| * If this hierarchy is the hierarchy of the cgroup |
| * that's changing, then we need to check that this |
| * css_set points to the new cgroup; if it's any other |
| * hierarchy, then this css_set should point to the |
| * same cgroup as the old css_set. |
| */ |
| if (cgrp1->root == new_cgrp->root) { |
| if (cgrp1 != new_cgrp) |
| return false; |
| } else { |
| if (cgrp1 != cgrp2) |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| /** |
| * find_existing_css_set - init css array and find the matching css_set |
| * @old_cset: the css_set that we're using before the cgroup transition |
| * @cgrp: the cgroup that we're moving into |
| * @template: out param for the new set of csses, should be clear on entry |
| */ |
| static struct css_set *find_existing_css_set(struct css_set *old_cset, |
| struct cgroup *cgrp, |
| struct cgroup_subsys_state *template[]) |
| { |
| struct cgroupfs_root *root = cgrp->root; |
| struct cgroup_subsys *ss; |
| struct css_set *cset; |
| unsigned long key; |
| int i; |
| |
| /* |
| * Build the set of subsystem state objects that we want to see in the |
| * new css_set. while subsystems can change globally, the entries here |
| * won't change, so no need for locking. |
| */ |
| for_each_subsys(ss, i) { |
| if (root->subsys_mask & (1UL << i)) { |
| /* Subsystem is in this hierarchy. So we want |
| * the subsystem state from the new |
| * cgroup */ |
| template[i] = cgroup_css(cgrp, ss); |
| } else { |
| /* Subsystem is not in this hierarchy, so we |
| * don't want to change the subsystem state */ |
| template[i] = old_cset->subsys[i]; |
| } |
| } |
| |
| key = css_set_hash(template); |
| hash_for_each_possible(css_set_table, cset, hlist, key) { |
| if (!compare_css_sets(cset, old_cset, cgrp, template)) |
| continue; |
| |
| /* This css_set matches what we need */ |
| return cset; |
| } |
| |
| /* No existing cgroup group matched */ |
| return NULL; |
| } |
| |
| static void free_cgrp_cset_links(struct list_head *links_to_free) |
| { |
| struct cgrp_cset_link *link, *tmp_link; |
| |
| list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) { |
| list_del(&link->cset_link); |
| kfree(link); |
| } |
| } |
| |
| /** |
| * allocate_cgrp_cset_links - allocate cgrp_cset_links |
| * @count: the number of links to allocate |
| * @tmp_links: list_head the allocated links are put on |
| * |
| * Allocate @count cgrp_cset_link structures and chain them on @tmp_links |
| * through ->cset_link. Returns 0 on success or -errno. |
| */ |
| static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links) |
| { |
| struct cgrp_cset_link *link; |
| int i; |
| |
| INIT_LIST_HEAD(tmp_links); |
| |
| for (i = 0; i < count; i++) { |
| link = kzalloc(sizeof(*link), GFP_KERNEL); |
| if (!link) { |
| free_cgrp_cset_links(tmp_links); |
| return -ENOMEM; |
| } |
| list_add(&link->cset_link, tmp_links); |
| } |
| return 0; |
| } |
| |
| /** |
| * link_css_set - a helper function to link a css_set to a cgroup |
| * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links() |
| * @cset: the css_set to be linked |
| * @cgrp: the destination cgroup |
| */ |
| static void link_css_set(struct list_head *tmp_links, struct css_set *cset, |
| struct cgroup *cgrp) |
| { |
| struct cgrp_cset_link *link; |
| |
| BUG_ON(list_empty(tmp_links)); |
| link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link); |
| link->cset = cset; |
| link->cgrp = cgrp; |
| list_move(&link->cset_link, &cgrp->cset_links); |
| /* |
| * Always add links to the tail of the list so that the list |
| * is sorted by order of hierarchy creation |
| */ |
| list_add_tail(&link->cgrp_link, &cset->cgrp_links); |
| } |
| |
| /** |
| * find_css_set - return a new css_set with one cgroup updated |
| * @old_cset: the baseline css_set |
| * @cgrp: the cgroup to be updated |
| * |
| * Return a new css_set that's equivalent to @old_cset, but with @cgrp |
| * substituted into the appropriate hierarchy. |
| */ |
| static struct css_set *find_css_set(struct css_set *old_cset, |
| struct cgroup *cgrp) |
| { |
| struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { }; |
| struct css_set *cset; |
| struct list_head tmp_links; |
| struct cgrp_cset_link *link; |
| unsigned long key; |
| |
| lockdep_assert_held(&cgroup_mutex); |
| |
| /* First see if we already have a cgroup group that matches |
| * the desired set */ |
| read_lock(&css_set_lock); |
| cset = find_existing_css_set(old_cset, cgrp, template); |
| if (cset) |
| get_css_set(cset); |
| read_unlock(&css_set_lock); |
| |
| if (cset) |
| return cset; |
| |
| cset = kzalloc(sizeof(*cset), GFP_KERNEL); |
| if (!cset) |
| return NULL; |
| |
| /* Allocate all the cgrp_cset_link objects that we'll need */ |
| if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) { |
| kfree(cset); |
| return NULL; |
| } |
| |
| atomic_set(&cset->refcount, 1); |
| INIT_LIST_HEAD(&cset->cgrp_links); |
| INIT_LIST_HEAD(&cset->tasks); |
| INIT_HLIST_NODE(&cset->hlist); |
| |
| /* Copy the set of subsystem state objects generated in |
| * find_existing_css_set() */ |
| memcpy(cset->subsys, template, sizeof(cset->subsys)); |
| |
| write_lock(&css_set_lock); |
| /* Add reference counts and links from the new css_set. */ |
| list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) { |
| struct cgroup *c = link->cgrp; |
| |
| if (c->root == cgrp->root) |
| c = cgrp; |
| link_css_set(&tmp_links, cset, c); |
| } |
| |
| BUG_ON(!list_empty(&tmp_links)); |
| |
| css_set_count++; |
| |
| /* Add this cgroup group to the hash table */ |
| key = css_set_hash(cset->subsys); |
| hash_add(css_set_table, &cset->hlist, key); |
| |
| write_unlock(&css_set_lock); |
| |
| return cset; |
| } |
| |
| /* |
| * Return the cgroup for "task" from the given hierarchy. Must be |
| * called with cgroup_mutex held. |
| */ |
| static struct cgroup *task_cgroup_from_root(struct task_struct *task, |
| struct cgroupfs_root *root) |
| { |
| struct css_set *cset; |
| struct cgroup *res = NULL; |
| |
| BUG_ON(!mutex_is_locked(&cgroup_mutex)); |
| read_lock(&css_set_lock); |
| /* |
| * No need to lock the task - since we hold cgroup_mutex the |
| * task can't change groups, so the only thing that can happen |
| * is that it exits and its css is set back to init_css_set. |
| */ |
| cset = task_css_set(task); |
| if (cset == &init_css_set) { |
| res = &root->top_cgroup; |
| } else { |
| struct cgrp_cset_link *link; |
| |
| list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { |
| struct cgroup *c = link->cgrp; |
| |
| if (c->root == root) { |
| res = c; |
| break; |
| } |
| } |
| } |
| read_unlock(&css_set_lock); |
| BUG_ON(!res); |
| return res; |
| } |
| |
| /* |
| * There is one global cgroup mutex. We also require taking |
| * task_lock() when dereferencing a task's cgroup subsys pointers. |
| * See "The task_lock() exception", at the end of this comment. |
| * |
| * A task must hold cgroup_mutex to modify cgroups. |
| * |
| * Any task can increment and decrement the count field without lock. |
| * So in general, code holding cgroup_mutex can't rely on the count |
| * field not changing. However, if the count goes to zero, then only |
| * cgroup_attach_task() can increment it again. Because a count of zero |
| * means that no tasks are currently attached, therefore there is no |
| * way a task attached to that cgroup can fork (the other way to |
| * increment the count). So code holding cgroup_mutex can safely |
| * assume that if the count is zero, it will stay zero. Similarly, if |
| * a task holds cgroup_mutex on a cgroup with zero count, it |
| * knows that the cgroup won't be removed, as cgroup_rmdir() |
| * needs that mutex. |
| * |
| * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't |
| * (usually) take cgroup_mutex. These are the two most performance |
| * critical pieces of code here. The exception occurs on cgroup_exit(), |
| * when a task in a notify_on_release cgroup exits. Then cgroup_mutex |
| * is taken, and if the cgroup count is zero, a usermode call made |
| * to the release agent with the name of the cgroup (path relative to |
| * the root of cgroup file system) as the argument. |
| * |
| * A cgroup can only be deleted if both its 'count' of using tasks |
| * is zero, and its list of 'children' cgroups is empty. Since all |
| * tasks in the system use _some_ cgroup, and since there is always at |
| * least one task in the system (init, pid == 1), therefore, top_cgroup |
| * always has either children cgroups and/or using tasks. So we don't |
| * need a special hack to ensure that top_cgroup cannot be deleted. |
| * |
| * The task_lock() exception |
| * |
| * The need for this exception arises from the action of |
| * cgroup_attach_task(), which overwrites one task's cgroup pointer with |
| * another. It does so using cgroup_mutex, however there are |
| * several performance critical places that need to reference |
| * task->cgroup without the expense of grabbing a system global |
| * mutex. Therefore except as noted below, when dereferencing or, as |
| * in cgroup_attach_task(), modifying a task's cgroup pointer we use |
| * task_lock(), which acts on a spinlock (task->alloc_lock) already in |
| * the task_struct routinely used for such matters. |
| * |
| * P.S. One more locking exception. RCU is used to guard the |
| * update of a tasks cgroup pointer by cgroup_attach_task() |
| */ |
| |
| /* |
| * A couple of forward declarations required, due to cyclic reference loop: |
| * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir -> |
| * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations |
| * -> cgroup_mkdir. |
| */ |
| |
| static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); |
| static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); |
| static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask); |
| static const struct inode_operations cgroup_dir_inode_operations; |
| static const struct file_operations proc_cgroupstats_operations; |
| |
| static struct backing_dev_info cgroup_backing_dev_info = { |
| .name = "cgroup", |
| .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, |
| }; |
| |
| static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb) |
| { |
| struct inode *inode = new_inode(sb); |
| |
| if (inode) { |
| inode->i_ino = get_next_ino(); |
| inode->i_mode = mode; |
| inode->i_uid = current_fsuid(); |
| inode->i_gid = current_fsgid(); |
| inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
| inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info; |
| } |
| return inode; |
| } |
| |
| static struct cgroup_name *cgroup_alloc_name(struct dentry *dentry) |
| { |
| struct cgroup_name *name; |
| |
| name = kmalloc(sizeof(*name) + dentry->d_name.len + 1, GFP_KERNEL); |
| if (!name) |
| return NULL; |
| strcpy(name->name, dentry->d_name.name); |
| return name; |
| } |
| |
| static void cgroup_free_fn(struct work_struct *work) |
| { |
| struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work); |
| |
| mutex_lock(&cgroup_mutex); |
| cgrp->root->number_of_cgroups--; |
| mutex_unlock(&cgroup_mutex); |
| |
| /* |
| * We get a ref to the parent's dentry, and put the ref when |
| * this cgroup is being freed, so it's guaranteed that the |
| * parent won't be destroyed before its children. |
| */ |
| dput(cgrp->parent->dentry); |
| |
| /* |
| * Drop the active superblock reference that we took when we |
| * created the cgroup. This will free cgrp->root, if we are |
| * holding the last reference to @sb. |
| */ |
| deactivate_super(cgrp->root->sb); |
| |
| cgroup_pidlist_destroy_all(cgrp); |
| |
| simple_xattrs_free(&cgrp->xattrs); |
| |
| kfree(rcu_dereference_raw(cgrp->name)); |
| kfree(cgrp); |
| } |
| |
| static void cgroup_free_rcu(struct rcu_head *head) |
| { |
| struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head); |
| |
| INIT_WORK(&cgrp->destroy_work, cgroup_free_fn); |
| queue_work(cgroup_destroy_wq, &cgrp->destroy_work); |
| } |
| |
| static void cgroup_diput(struct dentry *dentry, struct inode *inode) |
| { |
| /* is dentry a directory ? if so, kfree() associated cgroup */ |
| if (S_ISDIR(inode->i_mode)) { |
| struct cgroup *cgrp = dentry->d_fsdata; |
| |
| BUG_ON(!(cgroup_is_dead(cgrp))); |
| |
| /* |
| * XXX: cgrp->id is only used to look up css's. As cgroup |
| * and css's lifetimes will be decoupled, it should be made |
| * per-subsystem and moved to css->id so that lookups are |
| * successful until the target css is released. |
| */ |
| mutex_lock(&cgroup_mutex); |
| idr_remove(&cgrp->root->cgroup_idr, cgrp->id); |
| mutex_unlock(&cgroup_mutex); |
| cgrp->id = -1; |
| |
| call_rcu(&cgrp->rcu_head, cgroup_free_rcu); |
| } else { |
| struct cfent *cfe = __d_cfe(dentry); |
| struct cgroup *cgrp = dentry->d_parent->d_fsdata; |
| |
| WARN_ONCE(!list_empty(&cfe->node) && |
| cgrp != &cgrp->root->top_cgroup, |
| "cfe still linked for %s\n", cfe->type->name); |
| simple_xattrs_free(&cfe->xattrs); |
| kfree(cfe); |
| } |
| iput(inode); |
| } |
| |
| static void remove_dir(struct dentry *d) |
| { |
| struct dentry *parent = dget(d->d_parent); |
| |
| d_delete(d); |
| simple_rmdir(parent->d_inode, d); |
| dput(parent); |
| } |
| |
| static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) |
| { |
| struct cfent *cfe; |
| |
| lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex); |
| lockdep_assert_held(&cgroup_mutex); |
| |
| /* |
| * If we're doing cleanup due to failure of cgroup_create(), |
| * the corresponding @cfe may not exist. |
| */ |
| list_for_each_entry(cfe, &cgrp->files, node) { |
| struct dentry *d = cfe->dentry; |
| |
| if (cft && cfe->type != cft) |
| continue; |
| |
| dget(d); |
| d_delete(d); |
| simple_unlink(cgrp->dentry->d_inode, d); |
| list_del_init(&cfe->node); |
| dput(d); |
| |
| break; |
| } |
| } |
| |
| /** |
| * cgroup_clear_dir - remove subsys files in a cgroup directory |
| * @cgrp: target cgroup |
| * @subsys_mask: mask of the subsystem ids whose files should be removed |
| */ |
| static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask) |
| { |
| struct cgroup_subsys *ss; |
| int i; |
| |
| for_each_subsys(ss, i) { |
| struct cftype_set *set; |
| |
| if (!test_bit(i, &subsys_mask)) |
| continue; |
| list_for_each_entry(set, &ss->cftsets, node) |
| cgroup_addrm_files(cgrp, set->cfts, false); |
| } |
| } |
| |
| /* |
| * NOTE : the dentry must have been dget()'ed |
| */ |
| static void cgroup_d_remove_dir(struct dentry *dentry) |
| { |
| struct dentry *parent; |
| |
| parent = dentry->d_parent; |
| spin_lock(&parent->d_lock); |
| spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); |
| list_del_init(&dentry->d_u.d_child); |
| spin_unlock(&dentry->d_lock); |
| spin_unlock(&parent->d_lock); |
| remove_dir(dentry); |
| } |
| |
| /* |
| * Call with cgroup_mutex held. Drops reference counts on modules, including |
| * any duplicate ones that parse_cgroupfs_options took. If this function |
| * returns an error, no reference counts are touched. |
| */ |
| static int rebind_subsystems(struct cgroupfs_root *root, |
| unsigned long added_mask, unsigned removed_mask) |
| { |
| struct cgroup *cgrp = &root->top_cgroup; |
| struct cgroup_subsys *ss; |
| unsigned long pinned = 0; |
| int i, ret; |
| |
| BUG_ON(!mutex_is_locked(&cgroup_mutex)); |
| BUG_ON(!mutex_is_locked(&cgroup_root_mutex)); |
| |
| /* Check that any added subsystems are currently free */ |
| for_each_subsys(ss, i) { |
| if (!(added_mask & (1 << i))) |
| continue; |
| |
| /* is the subsystem mounted elsewhere? */ |
| if (ss->root != &cgroup_dummy_root) { |
| ret = -EBUSY; |
| goto out_put; |
| } |
| |
| /* pin the module */ |
| if (!try_module_get(ss->module)) { |
| ret = -ENOENT; |
| goto out_put; |
| } |
| pinned |= 1 << i; |
| } |
| |
| /* subsys could be missing if unloaded between parsing and here */ |
| if (added_mask != pinned) { |
| ret = -ENOENT; |
| goto out_put; |
| } |
| |
| ret = cgroup_populate_dir(cgrp, added_mask); |
| if (ret) |
| goto out_put; |
| |
| /* |
| * Nothing can fail from this point on. Remove files for the |
| * removed subsystems and rebind each subsystem. |
| */ |
| cgroup_clear_dir(cgrp, removed_mask); |
| |
| for_each_subsys(ss, i) { |
| unsigned long bit = 1UL << i; |
| |
| if (bit & added_mask) { |
| /* We're binding this subsystem to this hierarchy */ |
| BUG_ON(cgroup_css(cgrp, ss)); |
| BUG_ON(!cgroup_css(cgroup_dummy_top, ss)); |
| BUG_ON(cgroup_css(cgroup_dummy_top, ss)->cgroup != cgroup_dummy_top); |
| |
| rcu_assign_pointer(cgrp->subsys[i], |
| cgroup_css(cgroup_dummy_top, ss)); |
| cgroup_css(cgrp, ss)->cgroup = cgrp; |
| |
| ss->root = root; |
| if (ss->bind) |
| ss->bind(cgroup_css(cgrp, ss)); |
| |
| /* refcount was already taken, and we're keeping it */ |
| root->subsys_mask |= bit; |
| } else if (bit & removed_mask) { |
| /* We're removing this subsystem */ |
| BUG_ON(cgroup_css(cgrp, ss) != cgroup_css(cgroup_dummy_top, ss)); |
| BUG_ON(cgroup_css(cgrp, ss)->cgroup != cgrp); |
| |
| if (ss->bind) |
| ss->bind(cgroup_css(cgroup_dummy_top, ss)); |
| |
| cgroup_css(cgroup_dummy_top, ss)->cgroup = cgroup_dummy_top; |
| RCU_INIT_POINTER(cgrp->subsys[i], NULL); |
| |
| cgroup_subsys[i]->root = &cgroup_dummy_root; |
| |
| /* subsystem is now free - drop reference on module */ |
| module_put(ss->module); |
| root->subsys_mask &= ~bit; |
| } |
| } |
| |
| /* |
| * Mark @root has finished binding subsystems. @root->subsys_mask |
| * now matches the bound subsystems. |
| */ |
| root->flags |= CGRP_ROOT_SUBSYS_BOUND; |
| |
| return 0; |
| |
| out_put: |
| for_each_subsys(ss, i) |
| if (pinned & (1 << i)) |
| module_put(ss->module); |
| return ret; |
| } |
| |
| static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry) |
| { |
| struct cgroupfs_root *root = dentry->d_sb->s_fs_info; |
| struct cgroup_subsys *ss; |
| int ssid; |
| |
| mutex_lock(&cgroup_root_mutex); |
| for_each_subsys(ss, ssid) |
| if (root->subsys_mask & (1 << ssid)) |
| seq_printf(seq, ",%s", ss->name); |
| if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) |
| seq_puts(seq, ",sane_behavior"); |
| if (root->flags & CGRP_ROOT_NOPREFIX) |
| seq_puts(seq, ",noprefix"); |
| if (root->flags & CGRP_ROOT_XATTR) |
| seq_puts(seq, ",xattr"); |
| if (strlen(root->release_agent_path)) |
| seq_printf(seq, ",release_agent=%s", root->release_agent_path); |
| if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags)) |
| seq_puts(seq, ",clone_children"); |
| if (strlen(root->name)) |
| seq_printf(seq, ",name=%s", root->name); |
| mutex_unlock(&cgroup_root_mutex); |
| return 0; |
| } |
| |
| struct cgroup_sb_opts { |
| unsigned long subsys_mask; |
| unsigned long flags; |
| char *release_agent; |
| bool cpuset_clone_children; |
| char *name; |
| /* User explicitly requested empty subsystem */ |
| bool none; |
| |
| struct cgroupfs_root *new_root; |
| |
| }; |
| |
| /* |
| * Convert a hierarchy specifier into a bitmask of subsystems and |
| * flags. Call with cgroup_mutex held to protect the cgroup_subsys[] |
| * array. This function takes refcounts on subsystems to be used, unless it |
| * returns error, in which case no refcounts are taken. |
| */ |
| static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) |
| { |
| char *token, *o = data; |
| bool all_ss = false, one_ss = false; |
| unsigned long mask = (unsigned long)-1; |
| struct cgroup_subsys *ss; |
| int i; |
| |
| BUG_ON(!mutex_is_locked(&cgroup_mutex)); |
| |
| #ifdef CONFIG_CPUSETS |
| mask = ~(1UL << cpuset_subsys_id); |
| #endif |
| |
| memset(opts, 0, sizeof(*opts)); |
| |
| while ((token = strsep(&o, ",")) != NULL) { |
| if (!*token) |
| return -EINVAL; |
| if (!strcmp(token, "none")) { |
| /* Explicitly have no subsystems */ |
| opts->none = true; |
| continue; |
| } |
| if (!strcmp(token, "all")) { |
| /* Mutually exclusive option 'all' + subsystem name */ |
| if (one_ss) |
| return -EINVAL; |
| all_ss = true; |
| continue; |
| } |
| if (!strcmp(token, "__DEVEL__sane_behavior")) { |
| opts->flags |= CGRP_ROOT_SANE_BEHAVIOR; |
| continue; |
| } |
| if (!strcmp(token, "noprefix")) { |
| opts->flags |= CGRP_ROOT_NOPREFIX; |
| continue; |
| } |
| if (!strcmp(token, "clone_children")) { |
| opts->cpuset_clone_children = true; |
| continue; |
| } |
| if (!strcmp(token, "xattr")) { |
| opts->flags |= CGRP_ROOT_XATTR; |
| continue; |
| } |
| if (!strncmp(token, "release_agent=", 14)) { |
| /* Specifying two release agents is forbidden */ |
| if (opts->release_agent) |
| return -EINVAL; |
| opts->release_agent = |
| kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL); |
| if (!opts->release_agent) |
| return -ENOMEM; |
| continue; |
| } |
| if (!strncmp(token, "name=", 5)) { |
| const char *name = token + 5; |
| /* Can't specify an empty name */ |
| if (!strlen(name)) |
| return -EINVAL; |
| /* Must match [\w.-]+ */ |
| for (i = 0; i < strlen(name); i++) { |
| char c = name[i]; |
| if (isalnum(c)) |
| continue; |
| if ((c == '.') || (c == '-') || (c == '_')) |
| continue; |
| return -EINVAL; |
| } |
| /* Specifying two names is forbidden */ |
| if (opts->name) |
| return -EINVAL; |
| opts->name = kstrndup(name, |
| MAX_CGROUP_ROOT_NAMELEN - 1, |
| GFP_KERNEL); |
| if (!opts->name) |
| return -ENOMEM; |
| |
| continue; |
| } |
| |
| for_each_subsys(ss, i) { |
| if (strcmp(token, ss->name)) |
| continue; |
| if (ss->disabled) |
| continue; |
| |
| /* Mutually exclusive option 'all' + subsystem name */ |
| if (all_ss) |
| return -EINVAL; |
| set_bit(i, &opts->subsys_mask); |
| one_ss = true; |
| |
| break; |
| } |
| if (i == CGROUP_SUBSYS_COUNT) |
| return -ENOENT; |
| } |
| |
| /* |
| * If the 'all' option was specified select all the subsystems, |
| * otherwise if 'none', 'name=' and a subsystem name options |
| * were not specified, let's default to 'all' |
| */ |
| if (all_ss || (!one_ss && !opts->none && !opts->name)) |
| for_each_subsys(ss, i) |
| if (!ss->disabled) |
| set_bit(i, &opts->subsys_mask); |
| |
| /* Consistency checks */ |
| |
| if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) { |
| pr_warning("cgroup: sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n"); |
| |
| if (opts->flags & CGRP_ROOT_NOPREFIX) { |
| pr_err("cgroup: sane_behavior: noprefix is not allowed\n"); |
| return -EINVAL; |
| } |
| |
| if (opts->cpuset_clone_children) { |
| pr_err("cgroup: sane_behavior: clone_children is not allowed\n"); |
| return -EINVAL; |
| } |
| } |
| |
| /* |
| * Option noprefix was introduced just for backward compatibility |
| * with the old cpuset, so we allow noprefix only if mounting just |
| * the cpuset subsystem. |
| */ |
| if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask)) |
| return -EINVAL; |
| |
| |
| /* Can't specify "none" and some subsystems */ |
| if (opts->subsys_mask && opts->none) |
| return -EINVAL; |
| |
| /* |
| * We either have to specify by name or by subsystems. (So all |
| * empty hierarchies must have a name). |
| */ |
| if (!opts->subsys_mask && !opts->name) |
| return -EINVAL; |
| |
| return 0; |
| } |
| |
| static int cgroup_remount(struct super_block *sb, int *flags, char *data) |
| { |
| int ret = 0; |
| struct cgroupfs_root *root = sb->s_fs_info; |
| struct cgroup *cgrp = &root->top_cgroup; |
| struct cgroup_sb_opts opts; |
| unsigned long added_mask, removed_mask; |
| |
| if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) { |
| pr_err("cgroup: sane_behavior: remount is not allowed\n"); |
| return -EINVAL; |
| } |
| |
| mutex_lock(&cgrp->dentry->d_inode->i_mutex); |
| mutex_lock(&cgroup_mutex); |
| mutex_lock(&cgroup_root_mutex); |
| |
| /* See what subsystems are wanted */ |
| ret = parse_cgroupfs_options(data, &opts); |
| if (ret) |
| goto out_unlock; |
| |
| if (opts.subsys_mask != root->subsys_mask || opts.release_agent) |
| pr_warning("cgroup: option changes via remount are deprecated (pid=%d comm=%s)\n", |
| task_tgid_nr(current), current->comm); |
| |
| added_mask = opts.subsys_mask & ~root->subsys_mask; |
| removed_mask = root->subsys_mask & ~opts.subsys_mask; |
| |
| /* Don't allow flags or name to change at remount */ |
| if (((opts.flags ^ root->flags) & CGRP_ROOT_OPTION_MASK) || |
| (opts.name && strcmp(opts.name, root->name))) { |
| pr_err("cgroup: option or name mismatch, new: 0x%lx \"%s\", old: 0x%lx \"%s\"\n", |
| opts.flags & CGRP_ROOT_OPTION_MASK, opts.name ?: "", |
| root->flags & CGRP_ROOT_OPTION_MASK, root->name); |
| ret = -EINVAL; |
| goto out_unlock; |
| } |
| |
| /* remounting is not allowed for populated hierarchies */ |
| if (root->number_of_cgroups > 1) { |
| ret = -EBUSY; |
| goto out_unlock; |
| } |
| |
| ret = rebind_subsystems(root, added_mask, removed_mask); |
| if (ret) |
| goto out_unlock; |
| |
| if (opts.release_agent) |
| strcpy(root->release_agent_path, opts.release_agent); |
| out_unlock: |
| kfree(opts.release_agent); |
| kfree(opts.name); |
| mutex_unlock(&cgroup_root_mutex); |
| mutex_unlock(&cgroup_mutex); |
| mutex_unlock(&cgrp->dentry->d_inode->i_mutex); |
| return ret; |
| } |
| |
| static const struct super_operations cgroup_ops = { |
| .statfs = simple_statfs, |
| .drop_inode = generic_delete_inode, |
| .show_options = cgroup_show_options, |
| .remount_fs = cgroup_remount, |
| }; |
| |
| static void init_cgroup_housekeeping(struct cgroup *cgrp) |
| { |
| INIT_LIST_HEAD(&cgrp->sibling); |
| INIT_LIST_HEAD(&cgrp->children); |
| INIT_LIST_HEAD(&cgrp->files); |
| INIT_LIST_HEAD(&cgrp->cset_links); |
| INIT_LIST_HEAD(&cgrp->release_list); |
| INIT_LIST_HEAD(&cgrp->pidlists); |
| mutex_init(&cgrp->pidlist_mutex); |
| cgrp->dummy_css.cgroup = cgrp; |
| simple_xattrs_init(&cgrp->xattrs); |
| } |
| |
| static void init_cgroup_root(struct cgroupfs_root *root) |
| { |
| struct cgroup *cgrp = &root->top_cgroup; |
| |
| INIT_LIST_HEAD(&root->root_list); |
| root->number_of_cgroups = 1; |
| cgrp->root = root; |
| RCU_INIT_POINTER(cgrp->name, &root_cgroup_name); |
| init_cgroup_housekeeping(cgrp); |
| idr_init(&root->cgroup_idr); |
| } |
| |
| static int cgroup_init_root_id(struct cgroupfs_root *root, int start, int end) |
| { |
| int id; |
| |
| lockdep_assert_held(&cgroup_mutex); |
| lockdep_assert_held(&cgroup_root_mutex); |
| |
| id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, start, end, |
| GFP_KERNEL); |
| if (id < 0) |
| return id; |
| |
| root->hierarchy_id = id; |
| return 0; |
| } |
| |
| static void cgroup_exit_root_id(struct cgroupfs_root *root) |
| { |
| lockdep_assert_held(&cgroup_mutex); |
| lockdep_assert_held(&cgroup_root_mutex); |
| |
| if (root->hierarchy_id) { |
| idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id); |
| root->hierarchy_id = 0; |
| } |
| } |
| |
| static int cgroup_test_super(struct super_block *sb, void *data) |
| { |
| struct cgroup_sb_opts *opts = data; |
| struct cgroupfs_root *root = sb->s_fs_info; |
| |
| /* If we asked for a name then it must match */ |
| if (opts->name && strcmp(opts->name, root->name)) |
| return 0; |
| |
| /* |
| * If we asked for subsystems (or explicitly for no |
| * subsystems) then they must match |
| */ |
| if ((opts->subsys_mask || opts->none) |
| && (opts->subsys_mask != root->subsys_mask)) |
| return 0; |
| |
| return 1; |
| } |
| |
| static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts) |
| { |
| struct cgroupfs_root *root; |
| |
| if (!opts->subsys_mask && !opts->none) |
| return NULL; |
| |
| root = kzalloc(sizeof(*root), GFP_KERNEL); |
| if (!root) |
| return ERR_PTR(-ENOMEM); |
| |
| init_cgroup_root(root); |
| |
| /* |
| * We need to set @root->subsys_mask now so that @root can be |
| * matched by cgroup_test_super() before it finishes |
| * initialization; otherwise, competing mounts with the same |
| * options may try to bind the same subsystems instead of waiting |
| * for the first one leading to unexpected mount errors. |
| * SUBSYS_BOUND will be set once actual binding is complete. |
| */ |
| root->subsys_mask = opts->subsys_mask; |
| root->flags = opts->flags; |
| if (opts->release_agent) |
| strcpy(root->release_agent_path, opts->release_agent); |
| if (opts->name) |
| strcpy(root->name, opts->name); |
| if (opts->cpuset_clone_children) |
| set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->top_cgroup.flags); |
| return root; |
| } |
| |
| static void cgroup_free_root(struct cgroupfs_root *root) |
| { |
| if (root) { |
| /* hierarhcy ID shoulid already have been released */ |
| WARN_ON_ONCE(root->hierarchy_id); |
| |
| idr_destroy(&root->cgroup_idr); |
| kfree(root); |
| } |
| } |
| |
| static int cgroup_set_super(struct super_block *sb, void *data) |
| { |
| int ret; |
| struct cgroup_sb_opts *opts = data; |
| |
| /* If we don't have a new root, we can't set up a new sb */ |
| if (!opts->new_root) |
| return -EINVAL; |
| |
| BUG_ON(!opts->subsys_mask && !opts->none); |
| |
| ret = set_anon_super(sb, NULL); |
| if (ret) |
| return ret; |
| |
| sb->s_fs_info = opts->new_root; |
| opts->new_root->sb = sb; |
| |
| sb->s_blocksize = PAGE_CACHE_SIZE; |
| sb->s_blocksize_bits = PAGE_CACHE_SHIFT; |
| sb->s_magic = CGROUP_SUPER_MAGIC; |
| sb->s_op = &cgroup_ops; |
| |
| return 0; |
| } |
| |
| static int cgroup_get_rootdir(struct super_block *sb) |
| { |
| static const struct dentry_operations cgroup_dops = { |
| .d_iput = cgroup_diput, |
| .d_delete = always_delete_dentry, |
| }; |
| |
| struct inode *inode = |
| cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb); |
| |
| if (!inode) |
| return -ENOMEM; |
| |
| inode->i_fop = &simple_dir_operations; |
| inode->i_op = &cgroup_dir_inode_operations; |
| /* directories start off with i_nlink == 2 (for "." entry) */ |
| inc_nlink(inode); |
| sb->s_root = d_make_root(inode); |
| if (!sb->s_root) |
| return -ENOMEM; |
| /* for everything else we want ->d_op set */ |
| sb->s_d_op = &cgroup_dops; |
| return 0; |
| } |
| |
| static struct dentry *cgroup_mount(struct file_system_type *fs_type, |
| int flags, const char *unused_dev_name, |
| void *data) |
| { |
| struct cgroup_sb_opts opts; |
| struct cgroupfs_root *root; |
| int ret = 0; |
| struct super_block *sb; |
| struct cgroupfs_root *new_root; |
| struct list_head tmp_links; |
| struct inode *inode; |
| const struct cred *cred; |
| |
| /* First find the desired set of subsystems */ |
| mutex_lock(&cgroup_mutex); |
| ret = parse_cgroupfs_options(data, &opts); |
| mutex_unlock(&cgroup_mutex); |
| if (ret) |
| goto out_err; |
| |
| /* |
| * Allocate a new cgroup root. We may not need it if we're |
| * reusing an existing hierarchy. |
| */ |
| new_root = cgroup_root_from_opts(&opts); |
| if (IS_ERR(new_root)) { |
| ret = PTR_ERR(new_root); |
| goto out_err; |
| } |
| opts.new_root = new_root; |
| |
| /* Locate an existing or new sb for this hierarchy */ |
| sb = sget(fs_type, cgroup_test_super, cgroup_set_super, 0, &opts); |
| if (IS_ERR(sb)) { |
| ret = PTR_ERR(sb); |
| cgroup_free_root(opts.new_root); |
| goto out_err; |
| } |
| |
| root = sb->s_fs_info; |
| BUG_ON(!root); |
| if (root == opts.new_root) { |
| /* We used the new root structure, so this is a new hierarchy */ |
| struct cgroup *root_cgrp = &root->top_cgroup; |
| struct cgroupfs_root *existing_root; |
| int i; |
| struct css_set *cset; |
| |
| BUG_ON(sb->s_root != NULL); |
| |
| ret = cgroup_get_rootdir(sb); |
| if (ret) |
| goto drop_new_super; |
| inode = sb->s_root->d_inode; |
| |
| mutex_lock(&inode->i_mutex); |
| mutex_lock(&cgroup_mutex); |
| mutex_lock(&cgroup_root_mutex); |
| |
| ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL); |
| if (ret < 0) |
| goto unlock_drop; |
| root_cgrp->id = ret; |
| |
| /* Check for name clashes with existing mounts */ |
| ret = -EBUSY; |
| if (strlen(root->name)) |
| for_each_active_root(existing_root) |
| if (!strcmp(existing_root->name, root->name)) |
| goto unlock_drop; |
| |
| /* |
| * We're accessing css_set_count without locking |
| * css_set_lock here, but that's OK - it can only be |
| * increased by someone holding cgroup_lock, and |
| * that's us. The worst that can happen is that we |
| * have some link structures left over |
| */ |
| ret = allocate_cgrp_cset_links(css_set_count, &tmp_links); |
| if (ret) |
| goto unlock_drop; |
| |
| /* ID 0 is reserved for dummy root, 1 for unified hierarchy */ |
| ret = cgroup_init_root_id(root, 2, 0); |
| if (ret) |
| goto unlock_drop; |
| |
| sb->s_root->d_fsdata = root_cgrp; |
| root_cgrp->dentry = sb->s_root; |
| |
| /* |
| * We're inside get_sb() and will call lookup_one_len() to |
| * create the root files, which doesn't work if SELinux is |
| * in use. The following cred dancing somehow works around |
| * it. See 2ce9738ba ("cgroupfs: use init_cred when |
| * populating new cgroupfs mount") for more details. |
| */ |
| cred = override_creds(&init_cred); |
| |
| ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true); |
| if (ret) |
| goto rm_base_files; |
| |
| ret = rebind_subsystems(root, root->subsys_mask, 0); |
| if (ret) |
| goto rm_base_files; |
| |
| revert_creds(cred); |
| |
| /* |
| * There must be no failure case after here, since rebinding |
| * takes care of subsystems' refcounts, which are explicitly |
| * dropped in the failure exit path. |
| */ |
| |
| list_add(&root->root_list, &cgroup_roots); |
| cgroup_root_count++; |
| |
| /* Link the top cgroup in this hierarchy into all |
| * the css_set objects */ |
| write_lock(&css_set_lock); |
| hash_for_each(css_set_table, i, cset, hlist) |
| link_css_set(&tmp_links, cset, root_cgrp); |
| write_unlock(&css_set_lock); |
| |
| free_cgrp_cset_links(&tmp_links); |
| |
| BUG_ON(!list_empty(&root_cgrp->children)); |
| BUG_ON(root->number_of_cgroups != 1); |
| |
| mutex_unlock(&cgroup_root_mutex); |
| mutex_unlock(&cgroup_mutex); |
| mutex_unlock(&inode->i_mutex); |
| } else { |
| /* |
| * We re-used an existing hierarchy - the new root (if |
| * any) is not needed |
| */ |
| cgroup_free_root(opts.new_root); |
| |
| if ((root->flags ^ opts.flags) & CGRP_ROOT_OPTION_MASK) { |
| if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) { |
| pr_err("cgroup: sane_behavior: new mount options should match the existing superblock\n"); |
| ret = -EINVAL; |
| goto drop_new_super; |
| } else { |
| pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n"); |
| } |
| } |
| } |
| |
| kfree(opts.release_agent); |
| kfree(opts.name); |
| return dget(sb->s_root); |
| |
| rm_base_files: |
| free_cgrp_cset_links(&tmp_links); |
| cgroup_addrm_files(&root->top_cgroup, cgroup_base_files, false); |
| revert_creds(cred); |
| unlock_drop: |
| cgroup_exit_root_id(root); |
| mutex_unlock(&cgroup_root_mutex); |
| mutex_unlock(&cgroup_mutex); |
| mutex_unlock(&inode->i_mutex); |
| drop_new_super: |
| deactivate_locked_super(sb); |
| out_err: |
| kfree(opts.release_agent); |
| kfree(opts.name); |
| return ERR_PTR(ret); |
| } |
| |
| static void cgroup_kill_sb(struct super_block *sb) |
| { |
| struct cgroupfs_root *root = sb->s_fs_info; |
| struct cgroup *cgrp = &root->top_cgroup; |
| struct cgrp_cset_link *link, *tmp_link; |
| int ret; |
| |
| BUG_ON(!root); |
| |
| BUG_ON(root->number_of_cgroups != 1); |
| BUG_ON(!list_empty(&cgrp->children)); |
| |
| mutex_lock(&cgrp->dentry->d_inode->i_mutex); |
| mutex_lock(&cgroup_mutex); |
| mutex_lock(&cgroup_root_mutex); |
| |
| /* Rebind all subsystems back to the default hierarchy */ |
| if (root->flags & CGRP_ROOT_SUBSYS_BOUND) { |
| ret = rebind_subsystems(root, 0, root->subsys_mask); |
| /* Shouldn't be able to fail ... */ |
| BUG_ON(ret); |
| } |
| |
| /* |
| * Release all the links from cset_links to this hierarchy's |
| * root cgroup |
| */ |
| write_lock(&css_set_lock); |
| |
| list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { |
| list_del(&link->cset_link); |
| list_del(&link->cgrp_link); |
| kfree(link); |
| } |
| write_unlock(&css_set_lock); |
| |
| if (!list_empty(&root->root_list)) { |
| list_del(&root->root_list); |
| cgroup_root_count--; |
| } |
| |
| cgroup_exit_root_id(root); |
| |
| mutex_unlock(&cgroup_root_mutex); |
| mutex_unlock(&cgroup_mutex); |
| mutex_unlock(&cgrp->dentry->d_inode->i_mutex); |
| |
| simple_xattrs_free(&cgrp->xattrs); |
| |
| kill_litter_super(sb); |
| cgroup_free_root(root); |
| } |
| |
| static struct file_system_type cgroup_fs_type = { |
| .name = "cgroup", |
| .mount = cgroup_mount, |
| .kill_sb = cgroup_kill_sb, |
| }; |
| |
| static struct kobject *cgroup_kobj; |
| |
| /** |
| * cgroup_path - generate the path of a cgroup |
| * @cgrp: the cgroup in question |
| * @buf: the buffer to write the path into |
| * @buflen: the length of the buffer |
| * |
| * Writes path of cgroup into buf. Returns 0 on success, -errno on error. |
| * |
| * We can't generate cgroup path using dentry->d_name, as accessing |
| * dentry->name must be protected by irq-unsafe dentry->d_lock or parent |
| * inode's i_mutex, while on the other hand cgroup_path() can be called |
| * with some irq-safe spinlocks held. |
| */ |
| int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) |
| { |
| int ret = -ENAMETOOLONG; |
| char *start; |
| |
| if (!cgrp->parent) { |
| if (strlcpy(buf, "/", buflen) >= buflen) |
| return -ENAMETOOLONG; |
| return 0; |
| } |
| |
| start = buf + buflen - 1; |
| *start = '\0'; |
| |
| rcu_read_lock(); |
| do { |
| const char *name = cgroup_name(cgrp); |
| int len; |
| |
| len = strlen(name); |
| if ((start -= len) < buf) |
| goto out; |
| memcpy(start, name, len); |
| |
| if (--start < buf) |
| goto out; |
| *start = '/'; |
| |
| cgrp = cgrp->parent; |
| } while (cgrp->parent); |
| ret = 0; |
| memmove(buf, start, buf + buflen - start); |
| out: |
| rcu_read_unlock(); |
| return ret; |
| } |
| EXPORT_SYMBOL_GPL(cgroup_path); |
| |
| /** |
| * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy |
| * @task: target task |
| * @buf: the buffer to write the path into |
| * @buflen: the length of the buffer |
| * |
| * Determine @task's cgroup on the first (the one with the lowest non-zero |
| * hierarchy_id) cgroup hierarchy and copy its path into @buf. This |
| * function grabs cgroup_mutex and shouldn't be used inside locks used by |
| * cgroup controller callbacks. |
| * |
| * Returns 0 on success, fails with -%ENAMETOOLONG if @buflen is too short. |
| */ |
| int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) |
| { |
| struct cgroupfs_root *root; |
| struct cgroup *cgrp; |
| int hierarchy_id = 1, ret = 0; |
| |
| if (buflen < 2) |
| return -ENAMETOOLONG; |
| |
| mutex_lock(&cgroup_mutex); |
| |
| root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id); |
| |
| if (root) { |
| cgrp = task_cgroup_from_root(task, root); |
| ret = cgroup_path(cgrp, buf, buflen); |
| } else { |
| /* if no hierarchy exists, everyone is in "/" */ |
| memcpy(buf, "/", 2); |
| } |
| |
| mutex_unlock(&cgroup_mutex); |
| return ret; |
| } |
| EXPORT_SYMBOL_GPL(task_cgroup_path); |
| |
| /* |
| * Control Group taskset |
| */ |
| struct task_and_cgroup { |
| struct task_struct *task; |
| struct cgroup *cgrp; |
| struct css_set *cset; |
| }; |
| |
| struct cgroup_taskset { |
| struct task_and_cgroup single; |
| struct flex_array *tc_array; |
| int tc_array_len; |
| int idx; |
| struct cgroup *cur_cgrp; |
| }; |
| |
| /** |
| * cgroup_taskset_first - reset taskset and return the first task |
| * @tset: taskset of interest |
| * |
| * @tset iteration is initialized and the first task is returned. |
| */ |
| struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset) |
| { |
| if (tset->tc_array) { |
| tset->idx = 0; |
| return cgroup_taskset_next(tset); |
| } else { |
| tset->cur_cgrp = tset->single.cgrp; |
| return tset->single.task; |
| } |
| } |
| EXPORT_SYMBOL_GPL(cgroup_taskset_first); |
| |
| /** |
| * cgroup_taskset_next - iterate to the next task in taskset |
| * @tset: taskset of interest |
| * |
| * Return the next task in @tset. Iteration must have been initialized |
| * with cgroup_taskset_first(). |
| */ |
| struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset) |
| { |
| struct task_and_cgroup *tc; |
| |
| if (!tset->tc_array || tset->idx >= tset->tc_array_len) |
| return NULL; |
| |
| tc = flex_array_get(tset->tc_array, tset->idx++); |
| tset->cur_cgrp = tc->cgrp; |
| return tc->task; |
| } |
| EXPORT_SYMBOL_GPL(cgroup_taskset_next); |
| |
| /** |
| * cgroup_taskset_cur_css - return the matching css for the current task |
| * @tset: taskset of interest |
| * @subsys_id: the ID of the target subsystem |
| * |
| * Return the css for the current (last returned) task of @tset for |
| * subsystem specified by @subsys_id. This function must be preceded by |
| * either cgroup_taskset_first() or cgroup_taskset_next(). |
| */ |
| struct cgroup_subsys_state *cgroup_taskset_cur_css(struct cgroup_taskset *tset, |
| int subsys_id) |
| { |
| return cgroup_css(tset->cur_cgrp, cgroup_subsys[subsys_id]); |
| } |
| EXPORT_SYMBOL_GPL(cgroup_taskset_cur_css); |
| |
| /** |
| * cgroup_taskset_size - return the number of tasks in taskset |
| * @tset: taskset of interest |
| */ |
| int cgroup_taskset_size(struct cgroup_taskset *tset) |
| { |
| return tset->tc_array ? tset->tc_array_len : 1; |
| } |
| EXPORT_SYMBOL_GPL(cgroup_taskset_size); |
| |
| |
| /* |
| * cgroup_task_migrate - move a task from one cgroup to another. |
| * |
| * Must be called with cgroup_mutex and threadgroup locked. |
| */ |
| static void cgroup_task_migrate(struct cgroup *old_cgrp, |
| struct task_struct *tsk, |
| struct css_set *new_cset) |
| { |
| struct css_set *old_cset; |
| |
| /* |
| * We are synchronized through threadgroup_lock() against PF_EXITING |
| * setting such that we can't race against cgroup_exit() changing the |
| * css_set to init_css_set and dropping the old one. |
| */ |
| WARN_ON_ONCE(tsk->flags & PF_EXITING); |
| old_cset = task_css_set(tsk); |
| |
| task_lock(tsk); |
| rcu_assign_pointer(tsk->cgroups, new_cset); |
| task_unlock(tsk); |
| |
| /* Update the css_set linked lists if we're using them */ |
| write_lock(&css_set_lock); |
| if (!list_empty(&tsk->cg_list)) |
| list_move(&tsk->cg_list, &new_cset->tasks); |
| write_unlock(&css_set_lock); |
| |
| /* |
| * We just gained a reference on old_cset by taking it from the |
| * task. As trading it for new_cset is protected by cgroup_mutex, |
| * we're safe to drop it here; it will be freed under RCU. |
| */ |
| set_bit(CGRP_RELEASABLE, &old_cgrp->flags); |
| put_css_set(old_cset); |
| } |
| |
| /** |
| * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup |
| * @cgrp: the cgroup to attach to |
| * @tsk: the task or the leader of the threadgroup to be attached |
| * @threadgroup: attach the whole threadgroup? |
| * |
| * Call holding cgroup_mutex and the group_rwsem of the leader. Will take |
| * task_lock of @tsk or each thread in the threadgroup individually in turn. |
| */ |
| static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk, |
| bool threadgroup) |
| { |
| int retval, i, group_size; |
| struct cgroupfs_root *root = cgrp->root; |
| struct cgroup_subsys_state *css, *failed_css = NULL; |
| /* threadgroup list cursor and array */ |
| struct task_struct *leader = tsk; |
| struct task_and_cgroup *tc; |
| struct flex_array *group; |
| struct cgroup_taskset tset = { }; |
| |
| /* |
| * step 0: in order to do expensive, possibly blocking operations for |
| * every thread, we cannot iterate the thread group list, since it needs |
| * rcu or tasklist locked. instead, build an array of all threads in the |
| * group - group_rwsem prevents new threads from appearing, and if |
| * threads exit, this will just be an over-estimate. |
| */ |
| if (threadgroup) |
| group_size = get_nr_threads(tsk); |
| else |
| group_size = 1; |
| /* flex_array supports very large thread-groups better than kmalloc. */ |
| group = flex_array_alloc(sizeof(*tc), group_size, GFP_KERNEL); |
| if (!group) |
| return -ENOMEM; |
| /* pre-allocate to guarantee space while iterating in rcu read-side. */ |
| retval = flex_array_prealloc(group, 0, group_size, GFP_KERNEL); |
| if (retval) |
| goto out_free_group_list; |
| |
| i = 0; |
| /* |
| * Prevent freeing of tasks while we take a snapshot. Tasks that are |
| * already PF_EXITING could be freed from underneath us unless we |
| * take an rcu_read_lock. |
| */ |
| rcu_read_lock(); |
| do { |
| struct task_and_cgroup ent; |
| |
| /* @tsk either already exited or can't exit until the end */ |
| if (tsk->flags & PF_EXITING) |
| goto next; |
| |
| /* as per above, nr_threads may decrease, but not increase. */ |
| BUG_ON(i >= group_size); |
| ent.task = tsk; |
| ent.cgrp = task_cgroup_from_root(tsk, root); |
| /* nothing to do if this task is already in the cgroup */ |
| if (ent.cgrp == cgrp) |
| goto next; |
| /* |
| * saying GFP_ATOMIC has no effect here because we did prealloc |
| * earlier, but it's good form to communicate our expectations. |
| */ |
| retval = flex_array_put(group, i, &ent, GFP_ATOMIC); |
| BUG_ON(retval != 0); |
| i++; |
| next: |
| if (!threadgroup) |
| break; |
| } while_each_thread(leader, tsk); |
| rcu_read_unlock(); |
| /* remember the number of threads in the array for later. */ |
| group_size = i; |
| tset.tc_array = group; |
| tset.tc_array_len = group_size; |
| |
| /* methods shouldn't be called if no task is actually migrating */ |
| retval = 0; |
| if (!group_size) |
| goto out_free_group_list; |
| |
| /* |
| * step 1: check that we can legitimately attach to the cgroup. |
| */ |
| for_each_css(css, i, cgrp) { |
| if (css->ss->can_attach) { |
| retval = css->ss->can_attach(css, &tset); |
| if (retval) { |
| failed_css = css; |
| goto out_cancel_attach; |
| } |
| } |
| } |
| |
| /* |
| * step 2: make sure css_sets exist for all threads to be migrated. |
| * we use find_css_set, which allocates a new one if necessary. |
| */ |
| for (i = 0; i < group_size; i++) { |
| struct css_set *old_cset; |
| |
| tc = flex_array_get(group, i); |
| old_cset = task_css_set(tc->task); |
| tc->cset = find_css_set(old_cset, cgrp); |
| if (!tc->cset) { |
| retval = -ENOMEM; |
| goto out_put_css_set_refs; |
| } |
| } |
| |
| /* |
| * step 3: now that we're guaranteed success wrt the css_sets, |
| * proceed to move all tasks to the new cgroup. There are no |
| * failure cases after here, so this is the commit point. |
| */ |
| for (i = 0; i < group_size; i++) { |
| tc = flex_array_get(group, i); |
| cgroup_task_migrate(tc->cgrp, tc->task, tc->cset); |
| } |
| /* nothing is sensitive to fork() after this point. */ |
| |
| /* |
| * step 4: do subsystem attach callbacks. |
| */ |
| for_each_css(css, i, cgrp) |
| if (css->ss->attach) |
| css->ss->attach(css, &tset); |
| |
| /* |
| * step 5: success! and cleanup |
| */ |
| retval = 0; |
| out_put_css_set_refs: |
| if (retval) { |
| for (i = 0; i < group_size; i++) { |
| tc = flex_array_get(group, i); |
| if (!tc->cset) |
| break; |
| put_css_set(tc->cset); |
| } |
| } |
| out_cancel_attach: |
| if (retval) { |
| for_each_css(css, i, cgrp) { |
| if (css == failed_css) |
| break; |
| if (css->ss->cancel_attach) |
| css->ss->cancel_attach(css, &tset); |
| } |
| } |
| out_free_group_list: |
| flex_array_free(group); |
| return retval; |
| } |
| |
| static int cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
| { |
| struct cgroup_subsys_state *css; |
| int i; |
| int ret; |
| |
| for_each_css(css, i, cgrp) { |
| if (css->ss->allow_attach) { |
| ret = css->ss->allow_attach(css, tset); |
| if (ret) |
| return ret; |
| } else { |
| return -EACCES; |
| } |
| } |
| |
| return 0; |
| } |
| |
| /* |
| * Find the task_struct of the task to attach by vpid and pass it along to the |
| * function to attach either it or all tasks in its threadgroup. Will lock |
| * cgroup_mutex and threadgroup; may take task_lock of task. |
| */ |
| static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup) |
| { |
| struct task_struct *tsk; |
| const struct cred *cred = current_cred(), *tcred; |
| int ret; |
| |
| if (!cgroup_lock_live_group(cgrp)) |
| return -ENODEV; |
| |
| retry_find_task: |
| rcu_read_lock(); |
| if (pid) { |
| tsk = find_task_by_vpid(pid); |
| if (!tsk) { |
| rcu_read_unlock(); |
| ret = -ESRCH; |
| goto out_unlock_cgroup; |
| } |
| /* |
| * even if we're attaching all tasks in the thread group, we |
| * only need to check permissions on one of them. |
| */ |
| tcred = __task_cred(tsk); |
| if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && |
| !uid_eq(cred->euid, tcred->uid) && |
| !uid_eq(cred->euid, tcred->suid)) { |
| /* |
| * if the default permission check fails, give each |
| * cgroup a chance to extend the permission check |
| */ |
| struct cgroup_taskset tset = { }; |
| tset.single.task = tsk; |
| tset.single.cgrp = cgrp; |
| ret = cgroup_allow_attach(cgrp, &tset); |
| if (ret) { |
| rcu_read_unlock(); |
| goto out_unlock_cgroup; |
| } |
| } |
| } else |
| tsk = current; |
| |
| if (threadgroup) |
| tsk = tsk->group_leader; |
| |
| /* |
| * Workqueue threads may acquire PF_NO_SETAFFINITY and become |
| * trapped in a cpuset, or RT worker may be born in a cgroup |
| * with no rt_runtime allocated. Just say no. |
| */ |
| if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) { |
| ret = -EINVAL; |
| rcu_read_unlock(); |
| goto out_unlock_cgroup; |
| } |
| |
| get_task_struct(tsk); |
| rcu_read_unlock(); |
| |
| threadgroup_lock(tsk); |
| if (threadgroup) { |
| if (!thread_group_leader(tsk)) { |
| /* |
| * a race with de_thread from another thread's exec() |
| * may strip us of our leadership, if this happens, |
| * there is no choice but to throw this task away and |
| * try again; this is |
| * "double-double-toil-and-trouble-check locking". |
| */ |
| threadgroup_unlock(tsk); |
| put_task_struct(tsk); |
| goto retry_find_task; |
| } |
| } |
| |
| ret = cgroup_attach_task(cgrp, tsk, threadgroup); |
| |
| threadgroup_unlock(tsk); |
| |
| put_task_struct(tsk); |
| out_unlock_cgroup: |
| mutex_unlock(&cgroup_mutex); |
| return ret; |
| } |
| |
| /** |
| * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' |
| * @from: attach to all cgroups of a given task |
| * @tsk: the task to be attached |
| */ |
| int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) |
| { |
| struct cgroupfs_root *root; |
| int retval = 0; |
| |
| mutex_lock(&cgroup_mutex); |
| for_each_active_root(root) { |
| struct cgroup *from_cgrp = task_cgroup_from_root(from, root); |
| |
| retval = cgroup_attach_task(from_cgrp, tsk, false); |
| if (retval) |
| break; |
| } |
| mutex_unlock(&cgroup_mutex); |
| |
| return retval; |
| } |
| EXPORT_SYMBOL_GPL(cgroup_attach_task_all); |
| |
| static int cgroup_tasks_write(struct cgroup_subsys_state *css, |
| struct cftype *cft, u64 pid) |
| { |
| return attach_task_by_pid(css->cgroup, pid, false); |
| } |
| |
| static int cgroup_procs_write(struct cgroup_subsys_state *css, |
| struct cftype *cft, u64 tgid) |
| { |
| return attach_task_by_pid(css->cgroup, tgid, true); |
| } |
| |
| static int cgroup_release_agent_write(struct cgroup_subsys_state *css, |
| struct cftype *cft, const char *buffer) |
| { |
| BUILD_BUG_ON(sizeof(css->cgroup->root->release_agent_path) < PATH_MAX); |
| if (strlen(buffer) >= PATH_MAX) |
| return -EINVAL; |
| if (!cgroup_lock_live_group(css->cgroup)) |
| return -ENODEV; |
| mutex_lock(&cgroup_root_mutex); |
| strcpy(css->cgroup->root->release_agent_path, buffer); |
| mutex_unlock(&cgroup_root_mutex); |
| mutex_unlock(&cgroup_mutex); |
| return 0; |
| } |
| |
| static int cgroup_release_agent_show(struct seq_file *seq, void *v) |
| { |
| struct cgroup *cgrp = seq_css(seq)->cgroup; |
| |
| if (!cgroup_lock_live_group(cgrp)) |
| return -ENODEV; |
| seq_puts(seq, cgrp->root->release_agent_path); |
| seq_putc(seq, '\n'); |
| mutex_unlock(&cgroup_mutex); |
| return 0; |
| } |
| |
| static int cgroup_sane_behavior_show(struct seq_file *seq, void *v) |
| { |
| struct cgroup *cgrp = seq_css(seq)->cgroup; |
| |
| seq_printf(seq, "%d\n", cgroup_sane_behavior(cgrp)); |
| return 0; |
| } |
| |
| /* A buffer size big enough for numbers or short strings */ |
| #define CGROUP_LOCAL_BUFFER_SIZE 64 |
| |
| static ssize_t cgroup_file_write(struct file *file, const char __user *userbuf, |
| size_t nbytes, loff_t *ppos) |
| { |
| struct cfent *cfe = __d_cfe(file->f_dentry); |
| struct cftype *cft = __d_cft(file->f_dentry); |
| struct cgroup_subsys_state *css = cfe->css; |
| size_t max_bytes = cft->max_write_len ?: CGROUP_LOCAL_BUFFER_SIZE - 1; |
| char *buf; |
| int ret; |
| |
| if (nbytes >= max_bytes) |
| return -E2BIG; |
| |
| buf = kmalloc(nbytes + 1, GFP_KERNEL); |
| if (!buf) |
| return -ENOMEM; |
| |
| if (copy_from_user(buf, userbuf, nbytes)) { |
| ret = -EFAULT; |
| goto out_free; |
| } |
| |
| buf[nbytes] = '\0'; |
| |
| if (cft->write_string) { |
| ret = cft->write_string(css, cft, strstrip(buf)); |
| } else if (cft->write_u64) { |
| unsigned long long v; |
| ret = kstrtoull(buf, 0, &v); |
| if (!ret) |
| ret = cft->write_u64(css, cft, v); |
| } else if (cft->write_s64) { |
| long long v; |
| ret = kstrtoll(buf, 0, &v); |
| if (!ret) |
| ret = cft->write_s64(css, cft, v); |
| } else if (cft->trigger) { |
| ret = cft->trigger(css, (unsigned int)cft->private); |
| } else { |
| ret = -EINVAL; |
| } |
| out_free: |
| kfree(buf); |
| return ret ?: nbytes; |
| } |
| |
| /* |
| * seqfile ops/methods for returning structured data. Currently just |
| * supports string->u64 maps, but can be extended in future. |
| */ |
| |
| static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos) |
| { |
| struct cftype *cft = seq_cft(seq); |
| |
| if (cft->seq_start) { |
| return cft->seq_start(seq, ppos); |
| } else { |
| /* |
| * The same behavior and code as single_open(). Returns |
| * !NULL if pos is at the beginning; otherwise, NULL. |
| */ |
| return NULL + !*ppos; |
| } |
| } |
| |
| static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos) |
| { |
| struct cftype *cft = seq_cft(seq); |
| |
| if (cft->seq_next) { |
| return cft->seq_next(seq, v, ppos); |
| } else { |
| /* |
| * The same behavior and code as single_open(), always |
| * terminate after the initial read. |
| */ |
| ++*ppos; |
| return NULL; |
| } |
| } |
| |
| static void cgroup_seqfile_stop(struct seq_file *seq, void *v) |
| { |
| struct cftype *cft = seq_cft(seq); |
| |
| if (cft->seq_stop) |
| cft->seq_stop(seq, v); |
| } |
| |
| static int cgroup_seqfile_show(struct seq_file *m, void *arg) |
| { |
| struct cftype *cft = seq_cft(m); |
| struct cgroup_subsys_state *css = seq_css(m); |
| |
| if (cft->seq_show) |
| return cft->seq_show(m, arg); |
| |
| if (cft->read_u64) |
| seq_printf(m, "%llu\n", cft->read_u64(css, cft)); |
| else if (cft->read_s64) |
| seq_printf(m, "%lld\n", cft->read_s64(css, cft)); |
| else |
| return -EINVAL; |
| return 0; |
| } |
| |
| static struct seq_operations cgroup_seq_operations = { |
| .start = cgroup_seqfile_start, |
| .next = cgroup_seqfile_next, |
| .stop = cgroup_seqfile_stop, |
| .show = cgroup_seqfile_show, |
| }; |
| |
| static int cgroup_file_open(struct inode *inode, struct file *file) |
| { |
| struct cfent *cfe = __d_cfe(file->f_dentry); |
| struct cftype *cft = __d_cft(file->f_dentry); |
| struct cgroup *cgrp = __d_cgrp(cfe->dentry->d_parent); |
| struct cgroup_subsys_state *css; |
| struct cgroup_open_file *of; |
| int err; |
| |
| err = generic_file_open(inode, file); |
| if (err) |
| return err; |
| |
| /* |
| * If the file belongs to a subsystem, pin the css. Will be |
| * unpinned either on open failure or release. This ensures that |
| * @css stays alive for all file operations. |
| */ |
| rcu_read_lock(); |
| css = cgroup_css(cgrp, cft->ss); |
| if (cft->ss && !css_tryget(css)) |
| css = NULL; |
| rcu_read_unlock(); |
| |
| if (!css) |
| return -ENODEV; |
| |
| /* |
| * @cfe->css is used by read/write/close to determine the |
| * associated css. @file->private_data would be a better place but |
| * that's already used by seqfile. Multiple accessors may use it |
| * simultaneously which is okay as the association never changes. |
| */ |
| WARN_ON_ONCE(cfe->css && cfe->css != css); |
| cfe->css = css; |
| |
| of = __seq_open_private(file, &cgroup_seq_operations, |
| sizeof(struct cgroup_open_file)); |
| if (of) { |
| of->cfe = cfe; |
| return 0; |
| } |
| |
| if (css->ss) |
| css_put(css); |
| return -ENOMEM; |
| } |
| |
| static int cgroup_file_release(struct inode *inode, struct file *file) |
| { |
| struct cfent *cfe = __d_cfe(file->f_dentry); |
| struct cgroup_subsys_state *css = cfe->css; |
| |
| if (css->ss) |
| css_put(css); |
| return seq_release_private(inode, file); |
| } |
| |
| /* |
| * cgroup_rename - Only allow simple rename of directories in place. |
| */ |
| static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry, |
| struct inode *new_dir, struct dentry *new_dentry) |
| { |
| int ret; |
| struct cgroup_name *name, *old_name; |
| struct cgroup *cgrp; |
| |
| /* |
| * It's convinient to use parent dir's i_mutex to protected |
| * cgrp->name. |
| */ |
| lockdep_assert_held(&old_dir->i_mutex); |
| |
| if (!S_ISDIR(old_dentry->d_inode->i_mode)) |
| return -ENOTDIR; |
| if (new_dentry->d_inode) |
| return -EEXIST; |
| if (old_dir != new_dir) |
| return -EIO; |
| |
| cgrp = __d_cgrp(old_dentry); |
| |
| /* |
| * This isn't a proper migration and its usefulness is very |
| * limited. Disallow if sane_behavior. |
| */ |
| if (cgroup_sane_behavior(cgrp)) |
| return -EPERM; |
| |
| name = cgroup_alloc_name(new_dentry); |
| if (!name) |
| return -ENOMEM; |
| |
| ret = simple_rename(old_dir, old_dentry, new_dir, new_dentry); |
| if (ret) { |
| kfree(name); |
| return ret; |
| } |
| |
| old_name = rcu_dereference_protected(cgrp->name, true); |
| rcu_assign_pointer(cgrp->name, name); |
| |
| kfree_rcu(old_name, rcu_head); |
| return 0; |
| } |
| |
| static struct simple_xattrs *__d_xattrs(struct dentry *dentry) |
| { |
| if (S_ISDIR(dentry->d_inode->i_mode)) |
| return &__d_cgrp(dentry)->xattrs; |
| else |
| return &__d_cfe(dentry)->xattrs; |
| } |
| |
| static inline int xattr_enabled(struct dentry *dentry) |
| { |
| struct cgroupfs_root *root = dentry->d_sb->s_fs_info; |
| return root->flags & CGRP_ROOT_XATTR; |
| } |
| |
| static bool is_valid_xattr(const char *name) |
| { |
| if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || |
| !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) |
| return true; |
| return false; |
| } |
| |
| static int cgroup_setxattr(struct dentry *dentry, const char *name, |
| const void *val, size_t size, int flags) |
| { |
| if (!xattr_enabled(dentry)) |
| return -EOPNOTSUPP; |
| if (!is_valid_xattr(name)) |
| return -EINVAL; |
| return simple_xattr_set(__d_xattrs(dentry), name, val, size, flags); |
| } |
| |
| static int cgroup_removexattr(struct dentry *dentry, const char *name) |
| { |
| if (!xattr_enabled(dentry)) |
| return -EOPNOTSUPP; |
| if (!is_valid_xattr(name)) |
| return -EINVAL; |
| return simple_xattr_remove(__d_xattrs(dentry), name); |
| } |
| |
| static ssize_t cgroup_getxattr(struct dentry *dentry, const char *name, |
| void *buf, size_t size) |
| { |
| if (!xattr_enabled(dentry)) |
| return -EOPNOTSUPP; |
| if (!is_valid_xattr(name)) |
| return -EINVAL; |
| return simple_xattr_get(__d_xattrs(dentry), name, buf, size); |
| } |
| |
| static ssize_t cgroup_listxattr(struct dentry *dentry, char *buf, size_t size) |
| { |
| if (!xattr_enabled(dentry)) |
| return -EOPNOTSUPP; |
| return simple_xattr_list(__d_xattrs(dentry), buf, size); |
| } |
| |
| static const struct file_operations cgroup_file_operations = { |
| .read = seq_read, |
| .write = cgroup_file_write, |
| .llseek = generic_file_llseek, |
| .open = cgroup_file_open, |
| .release = cgroup_file_release, |
| }; |
| |
| static const struct inode_operations cgroup_file_inode_operations = { |
| .setxattr = cgroup_setxattr, |
| .getxattr = cgroup_getxattr, |
| .listxattr = cgroup_listxattr, |
| .removexattr = cgroup_removexattr, |
| }; |
| |
| static const struct inode_operations cgroup_dir_inode_operations = { |
| .lookup = simple_lookup, |
| .mkdir = cgroup_mkdir, |
| .rmdir = cgroup_rmdir, |
| .rename = cgroup_rename, |
| .setxattr = cgroup_setxattr, |
| .getxattr = cgroup_getxattr, |
| .listxattr = cgroup_listxattr, |
| .removexattr = cgroup_removexattr, |
| }; |
| |
| static int cgroup_create_file(struct dentry *dentry, umode_t mode, |
| struct super_block *sb) |
| { |
| struct inode *inode; |
| |
| if (!dentry) |
| return -ENOENT; |
| if (dentry->d_inode) |
| return -EEXIST; |
| |
| inode = cgroup_new_inode(mode, sb); |
| if (!inode) |
| return -ENOMEM; |
| |
| if (S_ISDIR(mode)) { |
| inode->i_op = &cgroup_dir_inode_operations; |
| inode->i_fop = &simple_dir_operations; |
| |
| /* start off with i_nlink == 2 (for "." entry) */ |
| inc_nlink(inode); |
| inc_nlink(dentry->d_parent->d_inode); |
| |
| /* |
| * Control reaches here with cgroup_mutex held. |
| * @inode->i_mutex should nest outside cgroup_mutex but we |
| * want to populate it immediately without releasing |
| * cgroup_mutex. As @inode isn't visible to anyone else |
| * yet, trylock will always succeed without affecting |
| * lockdep checks. |
| */ |
| WARN_ON_ONCE(!mutex_trylock(&inode->i_mutex)); |
| } else if (S_ISREG(mode)) { |
| inode->i_size = 0; |
| inode->i_fop = &cgroup_file_operations; |
| inode->i_op = &cgroup_file_inode_operations; |
| } |
| d_instantiate(dentry, inode); |
| dget(dentry); /* Extra count - pin the dentry in core */ |
| return 0; |
| } |
| |
| /** |
| * cgroup_file_mode - deduce file mode of a control file |
| * @cft: the control file in question |
| * |
| * returns cft->mode if ->mode is not 0 |
| * returns S_IRUGO|S_IWUSR if it has both a read and a write handler |
| * returns S_IRUGO if it has only a read handler |
| * returns S_IWUSR if it has only a write hander |
| */ |
| static umode_t cgroup_file_mode(const struct cftype *cft) |
| { |
| umode_t mode = 0; |
| |
| if (cft->mode) |
| return cft->mode; |
| |
| if (cft->read_u64 || cft->read_s64 || cft->seq_show) |
| mode |= S_IRUGO; |
| |
| if (cft->write_u64 || cft->write_s64 || cft->write_string || |
| cft->trigger) |
| mode |= S_IWUSR; |
| |
| return mode; |
| } |
| |
| static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft) |
| { |
| struct dentry *dir = cgrp->dentry; |
| struct cgroup *parent = __d_cgrp(dir); |
| struct dentry *dentry; |
| struct cfent *cfe; |
| int error; |
| umode_t mode; |
| char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 }; |
| |
| if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) && |
| !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) { |
| strcpy(name, cft->ss->name); |
| strcat(name, "."); |
| } |
| strcat(name, cft->name); |
| |
| BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex)); |
| |
| cfe = kzalloc(sizeof(*cfe), GFP_KERNEL); |
| if (!cfe) |
| return -ENOMEM; |
| |
| dentry = lookup_one_len(name, dir, strlen(name)); |
| if (IS_ERR(dentry)) { |
| error = PTR_ERR(dentry); |
| goto out; |
| } |
| |
| cfe->type = (void *)cft; |
| cfe->dentry = dentry; |
| dentry->d_fsdata = cfe; |
| simple_xattrs_init(&cfe->xattrs); |
| |
| mode = cgroup_file_mode(cft); |
| error = cgroup_create_file(dentry, mode | S_IFREG, cgrp->root->sb); |
| if (!error) { |
| list_add_tail(&cfe->node, &parent->files); |
| cfe = NULL; |
| } |
| dput(dentry); |
| out: |
| kfree(cfe); |
| return error; |
| } |
| |
| /** |
| * cgroup_addrm_files - add or remove files to a cgroup directory |
| * @cgrp: the target cgroup |
| * @cfts: array of cftypes to be added |
| * @is_add: whether to add or remove |
| * |
| * Depending on @is_add, add or remove files defined by @cfts on @cgrp. |
| * For removals, this function never fails. If addition fails, this |
| * function doesn't remove files already added. The caller is responsible |
| * for cleaning up. |
| */ |
| static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], |
| bool is_add) |
| { |
| struct cftype *cft; |
| int ret; |
| |
| lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex); |
| lockdep_assert_held(&cgroup_mutex); |
| |
| for (cft = cfts; cft->name[0] != '\0'; cft++) { |
| /* does cft->flags tell us to skip this file on @cgrp? */ |
| if ((cft->flags & CFTYPE_INSANE) && cgroup_sane_behavior(cgrp)) |
| continue; |
| if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgrp->parent) |
| continue; |
| if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgrp->parent) |
| continue; |
| |
| if (is_add) { |
| ret = cgroup_add_file(cgrp, cft); |
| if (ret) { |
| pr_warn("cgroup_addrm_files: failed to add %s, err=%d\n", |
| cft->name, ret); |
| return ret; |
| } |
| } else { |
| cgroup_rm_file(cgrp, cft); |
| } |
| } |
| return 0; |
| } |
| |
| static void cgroup_cfts_prepare(void) |
| __acquires(&cgroup_mutex) |
| { |
| /* |
| * Thanks to the entanglement with vfs inode locking, we can't walk |
| * the existing cgroups under cgroup_mutex and create files. |
| * Instead, we use css_for_each_descendant_pre() and drop RCU read |
| * lock before calling cgroup_addrm_files(). |
| */ |
| mutex_lock(&cgroup_mutex); |
| } |
| |
| static int cgroup_cfts_commit(struct cftype *cfts, bool is_add) |
| __releases(&cgroup_mutex) |
| { |
| LIST_HEAD(pending); |
| struct cgroup_subsys *ss = cfts[0].ss; |
| struct cgroup *root = &ss->root->top_cgroup; |
| struct super_block *sb = ss->root->sb; |
| struct dentry *prev = NULL; |
| struct inode *inode; |
| struct cgroup_subsys_state *css; |
| u64 update_before; |
| int ret = 0; |
| |
| /* %NULL @cfts indicates abort and don't bother if @ss isn't attached */ |
| if (!cfts || ss->root == &cgroup_dummy_root || |
| !atomic_inc_not_zero(&sb->s_active)) { |
| mutex_unlock(&cgroup_mutex); |
| return 0; |
| } |
| |
| /* |
| * All cgroups which are created after we drop cgroup_mutex will |
| * have the updated set of files, so we only need to update the |
| * cgroups created before the current @cgroup_serial_nr_next. |
| */ |
| update_before = cgroup_serial_nr_next; |
| |
| /* add/rm files for all cgroups created before */ |
| css_for_each_descendant_pre(css, cgroup_css(root, ss)) { |
| struct cgroup *cgrp = css->cgroup; |
| |
| if (cgroup_is_dead(cgrp)) |
| continue; |
| |
| inode = cgrp->dentry->d_inode; |
| dget(cgrp->dentry); |
| dput(prev); |
| prev = cgrp->dentry; |
| |
| mutex_unlock(&cgroup_mutex); |
| mutex_lock(&inode->i_mutex); |
| mutex_lock(&cgroup_mutex); |
| if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp)) |
| ret = cgroup_addrm_files(cgrp, cfts, is_add); |
| mutex_unlock(&inode->i_mutex); |
| if (ret) |
| break; |
| } |
| mutex_unlock(&cgroup_mutex); |
| dput(prev); |
| deactivate_super(sb); |
| return ret; |
| } |
| |
| /** |
| * cgroup_add_cftypes - add an array of cftypes to a subsystem |
| * @ss: target cgroup subsystem |
| * @cfts: zero-length name terminated array of cftypes |
| * |
| * Register @cfts to @ss. Files described by @cfts are created for all |
| * existing cgroups to which @ss is attached and all future cgroups will |
| * have them too. This function can be called anytime whether @ss is |
| * attached or not. |
| * |
| * Returns 0 on successful registration, -errno on failure. Note that this |
| * function currently returns 0 as long as @cfts registration is successful |
| * even if some file creation attempts on existing cgroups fail. |
| */ |
| int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) |
| { |
| struct cftype_set *set; |
| struct cftype *cft; |
| int ret; |
| |
| set = kzalloc(sizeof(*set), GFP_KERNEL); |
| if (!set) |
| return -ENOMEM; |
| |
| for (cft = cfts; cft->name[0] != '\0'; cft++) |
| cft->ss = ss; |
| |
| cgroup_cfts_prepare(); |
| set->cfts = cfts; |
| list_add_tail(&set->node, &ss->cftsets); |
| ret = cgroup_cfts_commit(cfts, true); |
| if (ret) |
| cgroup_rm_cftypes(cfts); |
| return ret; |
| } |
| EXPORT_SYMBOL_GPL(cgroup_add_cftypes); |
| |
| /** |
| * cgroup_rm_cftypes - remove an array of cftypes from a subsystem |
| * @cfts: zero-length name terminated array of cftypes |
| * |
| * Unregister @cfts. Files described by @cfts are removed from all |
| * existing cgroups and all future cgroups won't have them either. This |
| * function can be called anytime whether @cfts' subsys is attached or not. |
| * |
| * Returns 0 on successful unregistration, -ENOENT if @cfts is not |
| * registered. |
| */ |
| int cgroup_rm_cftypes(struct cftype *cfts) |
| { |
| struct cftype_set *set; |
| |
| if (!cfts || !cfts[0].ss) |
| return -ENOENT; |
| |
| cgroup_cfts_prepare(); |
| |
| list_for_each_entry(set, &cfts[0].ss->cftsets, node) { |
| if (set->cfts == cfts) { |
| list_del(&set->node); |
| kfree(set); |
| cgroup_cfts_commit(cfts, false); |
| return 0; |
| } |
| } |
| |
| cgroup_cfts_commit(NULL, false); |
| return -ENOENT; |
| } |
| |
| /** |
| * cgroup_task_count - count the number of tasks in a cgroup. |
| * @cgrp: the cgroup in question |
| * |
| * Return the number of tasks in the cgroup. |
| */ |
| int cgroup_task_count(const struct cgroup *cgrp) |
| { |
| int count = 0; |
| struct cgrp_cset_link *link; |
| |
| read_lock(&css_set_lock); |
| list_for_each_entry(link, &cgrp->cset_links, cset_link) |
| count += atomic_read(&link->cset->refcount); |
| read_unlock(&css_set_lock); |
| return count; |
| } |
| |
| /* |
| * To reduce the fork() overhead for systems that are not actually using |
| * their cgroups capability, we don't maintain the lists running through |
| * each css_set to its tasks until we see the list actually used - in other |
| * words after the first call to css_task_iter_start(). |
| */ |
| static void cgroup_enable_task_cg_lists(void) |
| { |
| struct task_struct *p, *g; |
| write_lock(&css_set_lock); |
| use_task_css_set_links = 1; |
| /* |
| * We need tasklist_lock because RCU is not safe against |
| * while_each_thread(). Besides, a forking task that has passed |
| * cgroup_post_fork() without seeing use_task_css_set_links = 1 |
| * is not guaranteed to have its child immediately visible in the |
| * tasklist if we walk through it with RCU. |
| */ |
| read_lock(&tasklist_lock); |
| do_each_thread(g, p) { |
| task_lock(p); |
| /* |
| * We should check if the process is exiting, otherwise |
| * it will race with cgroup_exit() in that the list |
| * entry won't be deleted though the process has exited. |
| * Do it while holding siglock so that we don't end up |
| * racing against cgroup_exit(). |
| */ |
| spin_lock_irq(&p->sighand->siglock); |
| if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list)) |
| list_add(&p->cg_list, &task_css_set(p)->tasks); |
| spin_unlock_irq(&p->sighand->siglock); |
| |
| task_unlock(p); |
| } while_each_thread(g, p); |
| read_unlock(&tasklist_lock); |
| write_unlock(&css_set_lock); |
| } |
| |
| /** |
| * css_next_child - find the next child of a given css |
| * @pos_css: the current position (%NULL to initiate traversal) |
| * @parent_css: css whose children to walk |
| * |
| * This function returns the next child of @parent_css and should be called |
| * under either cgroup_mutex or RCU read lock. The only requirement is |
| * that @parent_css and @pos_css are accessible. The next sibling is |
| * guaranteed to be returned regardless of their states. |
| */ |
| struct cgroup_subsys_state * |
| css_next_child(struct cgroup_subsys_state *pos_css, |
| struct cgroup_subsys_state *parent_css) |
| { |
| struct cgroup *pos = pos_css ? pos_css->cgroup : NULL; |
| struct cgroup *cgrp = parent_css->cgroup; |
| struct cgroup *next; |
| |
| cgroup_assert_mutex_or_rcu_locked(); |
| |
| /* |
| * @pos could already have been removed. Once a cgroup is removed, |
| * its ->sibling.next is no longer updated when its next sibling |
| * changes. As CGRP_DEAD assertion is serialized and happens |
| * before the cgroup is taken off the ->sibling list, if we see it |
| * unasserted, it's guaranteed that the next sibling hasn't |
| * finished its grace period even if it's already removed, and thus |
| * safe to dereference from this RCU critical section. If |
| * ->sibling.next is inaccessible, cgroup_is_dead() is guaranteed |
| * to be visible as %true here. |
| * |
| * If @pos is dead, its next pointer can't be dereferenced; |
| * however, as each cgroup is given a monotonically increasing |
| * unique serial number and always appended to the sibling list, |
| * the next one can be found by walking the parent's children until |
| * we see a cgroup with higher serial number than @pos's. While |
| * this path can be slower, it's taken only when either the current |
| * cgroup is removed or iteration and removal race. |
| */ |
| if (!pos) { |
| next = list_entry_rcu(cgrp->children.next, struct cgroup, sibling); |
| } else if (likely(!cgroup_is_dead(pos))) { |
| next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling); |
| } else { |
| list_for_each_entry_rcu(next, &cgrp->children, sibling) |
| if (next->serial_nr > pos->serial_nr) |
| break; |
| } |
| |
| if (&next->sibling == &cgrp->children) |
| return NULL; |
| |
| return cgroup_css(next, parent_css->ss); |
| } |
| EXPORT_SYMBOL_GPL(css_next_child); |
| |
| /** |
| * css_next_descendant_pre - find the next descendant for pre-order walk |
| * @pos: the current position (%NULL to initiate traversal) |
| * @root: css whose descendants to walk |
| * |
| * To be used by css_for_each_descendant_pre(). Find the next descendant |
| * to visit for pre-order traversal of @root's descendants. @root is |
| * included in the iteration and the first node to be visited. |
| * |
| * While this function requires cgroup_mutex or RCU read locking, it |
| * doesn't require the whole traversal to be contained in a single critical |
| * section. This function will return the correct next descendant as long |
| * as both @pos and @root are accessible and @pos is a descendant of @root. |
| */ |
| struct cgroup_subsys_state * |
| css_next_descendant_pre(struct cgroup_subsys_state *pos, |
| struct cgroup_subsys_state *root) |
| { |
| struct cgroup_subsys_state *next; |
| |
| cgroup_assert_mutex_or_rcu_locked(); |
| |
| /* if first iteration, visit @root */ |
| if (!pos) |
| return root; |
| |
| /* visit the first child if exists */ |
| next = css_next_child(NULL, pos); |
| if (next) |
| return next; |
| |
| /* no child, visit my or the closest ancestor's next sibling */ |
| while (pos != root) { |
| next = css_next_child(pos, css_parent(pos)); |
| if (next) |
| return next; |
| pos = css_parent(pos); |
| } |
| |
| return NULL; |
| } |
| EXPORT_SYMBOL_GPL(css_next_descendant_pre); |
| |
| /** |
| * css_rightmost_descendant - return the rightmost descendant of a css |
| * @pos: css of interest |
| * |
| * Return the rightmost descendant of @pos. If there's no descendant, @pos |
| * is returned. This can be used during pre-order traversal to skip |
| * subtree of @pos. |
| * |
| * While this function requires cgroup_mutex or RCU read locking, it |
| * doesn't require the whole traversal to be contained in a single critical |
| * section. This function will return the correct rightmost descendant as |
| * long as @pos is accessible. |
| */ |
| struct cgroup_subsys_state * |
| css_rightmost_descendant(struct cgroup_subsys_state *pos) |
| { |
| struct cgroup_subsys_state *last, *tmp; |
| |
| cgroup_assert_mutex_or_rcu_locked(); |
| |
| do { |
| last = pos; |
| /* ->prev isn't RCU safe, walk ->next till the end */ |
| pos = NULL; |
| css_for_each_child(tmp, last) |
| pos = tmp; |
| } while (pos); |
| |
| return last; |
| } |
| EXPORT_SYMBOL_GPL(css_rightmost_descendant); |
| |
| static struct cgroup_subsys_state * |
| css_leftmost_descendant(struct cgroup_subsys_state *pos) |
| { |
| struct cgroup_subsys_state *last; |
| |
| do { |
| last = pos; |
| pos = css_next_child(NULL, pos); |
| } while (pos); |
| |
| return last; |
| } |
| |
| /** |
| * css_next_descendant_post - find the next descendant for post-order walk |
| * @pos: the current position (%NULL to initiate traversal) |
| * @root: css whose descendants to walk |
| * |
| * To be used by css_for_each_descendant_post(). Find the next descendant |
| * to visit for post-order traversal of @root's descendants. @root is |
| * included in the iteration and the last node to be visited. |
| * |
| * While this function requires cgroup_mutex or RCU read locking, it |
| * doesn't require the whole traversal to be contained in a single critical |
| * section. This function will return the correct next descendant as long |
| * as both @pos and @cgroup are accessible and @pos is a descendant of |
| * @cgroup. |
| */ |
| struct cgroup_subsys_state * |
| css_next_descendant_post(struct cgroup_subsys_state *pos, |
| struct cgroup_subsys_state *root) |
| { |
| struct cgroup_subsys_state *next; |
| |
| cgroup_assert_mutex_or_rcu_locked(); |
| |
| /* if first iteration, visit leftmost descendant which may be @root */ |
| if (!pos) |
| return css_leftmost_descendant(root); |
| |
| /* if we visited @root, we're done */ |
| if (pos == root) |
| return NULL; |
| |
| /* if there's an unvisited sibling, visit its leftmost descendant */ |
| next = css_next_child(pos, css_parent(pos)); |
| if (next) |
| return css_leftmost_descendant(next); |
| |
| /* no sibling left, visit parent */ |
| return css_parent(pos); |
| } |
| EXPORT_SYMBOL_GPL(css_next_descendant_post); |
| |
| /** |
| * css_advance_task_iter - advance a task itererator to the next css_set |
| * @it: the iterator to advance |
| * |
| * Advance @it to the next css_set to walk. |
| */ |
| static void css_advance_task_iter(struct css_task_iter *it) |
| { |
| struct list_head *l = it->cset_link; |
| struct cgrp_cset_link *link; |
| struct css_set *cset; |
| |
| /* Advance to the next non-empty css_set */ |
| do { |
| l = l->next; |
| if (l == &it->origin_css->cgroup->cset_links) { |
| it->cset_link = NULL; |
| return; |
| } |
| link = list_entry(l, struct cgrp_cset_link, cset_link); |
| cset = link->cset; |
| } while (list_empty(&cset->tasks)); |
| it->cset_link = l; |
| it->task = cset->tasks.next; |
| } |
| |
| /** |
| * css_task_iter_start - initiate task iteration |
| * @css: the css to walk tasks of |
| * @it: the task iterator to use |
| * |
| * Initiate iteration through the tasks of @css. The caller can call |
| * css_task_iter_next() to walk through the tasks until the function |
| * returns NULL. On completion of iteration, css_task_iter_end() must be |
| * called. |
| * |
| * Note that this function acquires a lock which is released when the |
| * iteration finishes. The caller can't sleep while iteration is in |
| * progress. |
| */ |
| void css_task_iter_start(struct cgroup_subsys_state *css, |
| struct css_task_iter *it) |
| __acquires(css_set_lock) |
| { |
| /* |
| * The first time anyone tries to iterate across a css, we need to |
| * enable the list linking each css_set to its tasks, and fix up |
| * all existing tasks. |
| */ |
| if (!use_task_css_set_links) |
| cgroup_enable_task_cg_lists(); |
| |
| read_lock(&css_set_lock); |
| |
| it->origin_css = css; |
| it->cset_link = &css->cgroup->cset_links; |
| |
| css_advance_task_iter(it); |
| } |
| |
| /** |
| * css_task_iter_next - return the next task for the iterator |
| * @it: the task iterator being iterated |
| * |
| * The "next" function for task iteration. @it should have been |
| * initialized via css_task_iter_start(). Returns NULL when the iteration |
| * reaches the end. |
| */ |
| struct task_struct *css_task_iter_next(struct css_task_iter *it) |
| { |
| struct task_struct *res; |
| struct list_head *l = it->task; |
| struct cgrp_cset_link *link; |
| |
| /* If the iterator cg is NULL, we have no tasks */ |
| if (!it->cset_link) |
| return NULL; |
| res = list_entry(l, struct task_struct, cg_list); |
| /* Advance iterator to find next entry */ |
| l = l->next; |
| link = list_entry(it->cset_link, struct cgrp_cset_link, cset_link); |
| if (l == &link->cset->tasks) { |
| /* |
| * We reached the end of this task list - move on to the |
| * next cgrp_cset_link. |
| */ |
| css_advance_task_iter(it); |
| } else { |
| it->task = l; |
| } |
| return res; |
| } |
| |
| /** |
| * css_task_iter_end - finish task iteration |
| * @it: the task iterator to finish |
| * |
| * Finish task iteration started by css_task_iter_start(). |
| */ |
| void css_task_iter_end(struct css_task_iter *it) |
| __releases(css_set_lock) |
| { |
| read_unlock(&css_set_lock); |
| } |
| |
| static inline int started_after_time(struct task_struct *t1, |
| struct timespec *time, |
| struct task_struct *t2) |
| { |
| int start_diff = timespec_compare(&t1->start_time, time); |
| if (start_diff > 0) { |
| return 1; |
| } |