| /* |
| * Generic process-grouping system. |
| * |
| * Based originally on the cpuset system, extracted by Paul Menage |
| * Copyright (C) 2006 Google, Inc |
| * |
| * Notifications support |
| * Copyright (C) 2009 Nokia Corporation |
| * Author: Kirill A. Shutemov |
| * |
| * Copyright notices from the original cpuset code: |
| * -------------------------------------------------- |
| * Copyright (C) 2003 BULL SA. |
| * Copyright (C) 2004-2006 Silicon Graphics, Inc. |
| * |
| * Portions derived from Patrick Mochel's sysfs code. |
| * sysfs is Copyright (c) 2001-3 Patrick Mochel |
| * |
| * 2003-10-10 Written by Simon Derr. |
| * 2003-10-22 Updates by Stephen Hemminger. |
| * 2004 May-July Rework by Paul Jackson. |
| * --------------------------------------------------- |
| * |
| * This file is subject to the terms and conditions of the GNU General Public |
| * License. See the file COPYING in the main directory of the Linux |
| * distribution for more details. |
| */ |
| |
| #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| |
| #include <linux/cgroup.h> |
| #include <linux/cred.h> |
| #include <linux/ctype.h> |
| #include <linux/errno.h> |
| #include <linux/init_task.h> |
| #include <linux/kernel.h> |
| #include <linux/list.h> |
| #include <linux/magic.h> |
| #include <linux/mm.h> |
| #include <linux/mutex.h> |
| #include <linux/mount.h> |
| #include <linux/pagemap.h> |
| #include <linux/proc_fs.h> |
| #include <linux/rcupdate.h> |
| #include <linux/sched.h> |
| #include <linux/slab.h> |
| #include <linux/spinlock.h> |
| #include <linux/rwsem.h> |
| #include <linux/string.h> |
| #include <linux/sort.h> |
| #include <linux/kmod.h> |
| #include <linux/delayacct.h> |
| #include <linux/cgroupstats.h> |
| #include <linux/hashtable.h> |
| #include <linux/pid_namespace.h> |
| #include <linux/idr.h> |
| #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ |
| #include <linux/kthread.h> |
| #include <linux/delay.h> |
| |
| #include <linux/atomic.h> |
| |
| /* |
| * pidlists linger the following amount before being destroyed. The goal |
| * is avoiding frequent destruction in the middle of consecutive read calls |
| * Expiring in the middle is a performance problem not a correctness one. |
| * 1 sec should be enough. |
| */ |
| #define CGROUP_PIDLIST_DESTROY_DELAY HZ |
| |
| #define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \ |
| MAX_CFTYPE_NAME + 2) |
| |
| /* |
| * cgroup_mutex is the master lock. Any modification to cgroup or its |
| * hierarchy must be performed while holding it. |
| * |
| * css_set_rwsem protects task->cgroups pointer, the list of css_set |
| * objects, and the chain of tasks off each css_set. |
| * |
| * These locks are exported if CONFIG_PROVE_RCU so that accessors in |
| * cgroup.h can use them for lockdep annotations. |
| */ |
| #ifdef CONFIG_PROVE_RCU |
| DEFINE_MUTEX(cgroup_mutex); |
| DECLARE_RWSEM(css_set_rwsem); |
| EXPORT_SYMBOL_GPL(cgroup_mutex); |
| EXPORT_SYMBOL_GPL(css_set_rwsem); |
| #else |
| static DEFINE_MUTEX(cgroup_mutex); |
| static DECLARE_RWSEM(css_set_rwsem); |
| #endif |
| |
| /* |
| * Protects cgroup_idr and css_idr so that IDs can be released without |
| * grabbing cgroup_mutex. |
| */ |
| static DEFINE_SPINLOCK(cgroup_idr_lock); |
| |
| /* |
| * Protects cgroup_subsys->release_agent_path. Modifying it also requires |
| * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock. |
| */ |
| static DEFINE_SPINLOCK(release_agent_path_lock); |
| |
| #define cgroup_assert_mutex_or_rcu_locked() \ |
| RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ |
| !lockdep_is_held(&cgroup_mutex), \ |
| "cgroup_mutex or RCU read lock required"); |
| |
| /* |
| * cgroup destruction makes heavy use of work items and there can be a lot |
| * of concurrent destructions. Use a separate workqueue so that cgroup |
| * destruction work items don't end up filling up max_active of system_wq |
| * which may lead to deadlock. |
| */ |
| static struct workqueue_struct *cgroup_destroy_wq; |
| |
| /* |
| * pidlist destructions need to be flushed on cgroup destruction. Use a |
| * separate workqueue as flush domain. |
| */ |
| static struct workqueue_struct *cgroup_pidlist_destroy_wq; |
| |
| /* generate an array of cgroup subsystem pointers */ |
| #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys, |
| static struct cgroup_subsys *cgroup_subsys[] = { |
| #include <linux/cgroup_subsys.h> |
| }; |
| #undef SUBSYS |
| |
| /* array of cgroup subsystem names */ |
| #define SUBSYS(_x) [_x ## _cgrp_id] = #_x, |
| static const char *cgroup_subsys_name[] = { |
| #include <linux/cgroup_subsys.h> |
| }; |
| #undef SUBSYS |
| |
| /* |
| * The default hierarchy, reserved for the subsystems that are otherwise |
| * unattached - it never has more than a single cgroup, and all tasks are |
| * part of that cgroup. |
| */ |
| struct cgroup_root cgrp_dfl_root; |
| EXPORT_SYMBOL_GPL(cgrp_dfl_root); |
| |
| /* |
| * The default hierarchy always exists but is hidden until mounted for the |
| * first time. This is for backward compatibility. |
| */ |
| static bool cgrp_dfl_root_visible; |
| |
| /* |
| * Set by the boot param of the same name and makes subsystems with NULL |
| * ->dfl_files to use ->legacy_files on the default hierarchy. |
| */ |
| static bool cgroup_legacy_files_on_dfl; |
| |
| /* some controllers are not supported in the default hierarchy */ |
| static unsigned long cgrp_dfl_root_inhibit_ss_mask; |
| |
| /* The list of hierarchy roots */ |
| |
| static LIST_HEAD(cgroup_roots); |
| static int cgroup_root_count; |
| |
| /* hierarchy ID allocation and mapping, protected by cgroup_mutex */ |
| static DEFINE_IDR(cgroup_hierarchy_idr); |
| |
| /* |
| * Assign a monotonically increasing serial number to csses. It guarantees |
| * cgroups with bigger numbers are newer than those with smaller numbers. |
| * Also, as csses are always appended to the parent's ->children list, it |
| * guarantees that sibling csses are always sorted in the ascending serial |
| * number order on the list. Protected by cgroup_mutex. |
| */ |
| static u64 css_serial_nr_next = 1; |
| |
| /* |
| * These bitmask flags indicate whether tasks in the fork and exit paths have |
| * fork/exit handlers to call. This avoids us having to do extra work in the |
| * fork/exit path to check which subsystems have fork/exit callbacks. |
| */ |
| static unsigned long have_fork_callback __read_mostly; |
| static unsigned long have_exit_callback __read_mostly; |
| |
| /* Ditto for the can_fork callback. */ |
| static unsigned long have_canfork_callback __read_mostly; |
| |
| static struct cftype cgroup_dfl_base_files[]; |
| static struct cftype cgroup_legacy_base_files[]; |
| |
| static int rebind_subsystems(struct cgroup_root *dst_root, |
| unsigned long ss_mask); |
| static int cgroup_destroy_locked(struct cgroup *cgrp); |
| static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss, |
| bool visible); |
| static void css_release(struct percpu_ref *ref); |
| static void kill_css(struct cgroup_subsys_state *css); |
| static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], |
| bool is_add); |
| |
| /* IDR wrappers which synchronize using cgroup_idr_lock */ |
| static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end, |
| gfp_t gfp_mask) |
| { |
| int ret; |
| |
| idr_preload(gfp_mask); |
| spin_lock_bh(&cgroup_idr_lock); |
| ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_WAIT); |
| spin_unlock_bh(&cgroup_idr_lock); |
| idr_preload_end(); |
| return ret; |
| } |
| |
| static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id) |
| { |
| void *ret; |
| |
| spin_lock_bh(&cgroup_idr_lock); |
| ret = idr_replace(idr, ptr, id); |
| spin_unlock_bh(&cgroup_idr_lock); |
| return ret; |
| } |
| |
| static void cgroup_idr_remove(struct idr *idr, int id) |
| { |
| spin_lock_bh(&cgroup_idr_lock); |
| idr_remove(idr, id); |
| spin_unlock_bh(&cgroup_idr_lock); |
| } |
| |
| static struct cgroup *cgroup_parent(struct cgroup *cgrp) |
| { |
| struct cgroup_subsys_state *parent_css = cgrp->self.parent; |
| |
| if (parent_css) |
| return container_of(parent_css, struct cgroup, self); |
| return NULL; |
| } |
| |
| /** |
| * cgroup_css - obtain a cgroup's css for the specified subsystem |
| * @cgrp: the cgroup of interest |
| * @ss: the subsystem of interest (%NULL returns @cgrp->self) |
| * |
| * Return @cgrp's css (cgroup_subsys_state) associated with @ss. This |
| * function must be called either under cgroup_mutex or rcu_read_lock() and |
| * the caller is responsible for pinning the returned css if it wants to |
| * keep accessing it outside the said locks. This function may return |
| * %NULL if @cgrp doesn't have @subsys_id enabled. |
| */ |
| static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, |
| struct cgroup_subsys *ss) |
| { |
| if (ss) |
| return rcu_dereference_check(cgrp->subsys[ss->id], |
| lockdep_is_held(&cgroup_mutex)); |
| else |
| return &cgrp->self; |
| } |
| |
| /** |
| * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem |
| * @cgrp: the cgroup of interest |
| * @ss: the subsystem of interest (%NULL returns @cgrp->self) |
| * |
| * Similar to cgroup_css() but returns the effective css, which is defined |
| * as the matching css of the nearest ancestor including self which has @ss |
| * enabled. If @ss is associated with the hierarchy @cgrp is on, this |
| * function is guaranteed to return non-NULL css. |
| */ |
| static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp, |
| struct cgroup_subsys *ss) |
| { |
| lockdep_assert_held(&cgroup_mutex); |
| |
| if (!ss) |
| return &cgrp->self; |
| |
| if (!(cgrp->root->subsys_mask & (1 << ss->id))) |
| return NULL; |
| |
| /* |
| * This function is used while updating css associations and thus |
| * can't test the csses directly. Use ->child_subsys_mask. |
| */ |
| while (cgroup_parent(cgrp) && |
| !(cgroup_parent(cgrp)->child_subsys_mask & (1 << ss->id))) |
| cgrp = cgroup_parent(cgrp); |
| |
| return cgroup_css(cgrp, ss); |
| } |
| |
| /** |
| * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem |
| * @cgrp: the cgroup of interest |
| * @ss: the subsystem of interest |
| * |
| * Find and get the effective css of @cgrp for @ss. The effective css is |
| * defined as the matching css of the nearest ancestor including self which |
| * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on, |
| * the root css is returned, so this function always returns a valid css. |
| * The returned css must be put using css_put(). |
| */ |
| struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp, |
| struct cgroup_subsys *ss) |
| { |
| struct cgroup_subsys_state *css; |
| |
| rcu_read_lock(); |
| |
| do { |
| css = cgroup_css(cgrp, ss); |
| |
| if (css && css_tryget_online(css)) |
| goto out_unlock; |
| cgrp = cgroup_parent(cgrp); |
| } while (cgrp); |
| |
| css = init_css_set.subsys[ss->id]; |
| css_get(css); |
| out_unlock: |
| rcu_read_unlock(); |
| return css; |
| } |
| |
| /* convenient tests for these bits */ |
| static inline bool cgroup_is_dead(const struct cgroup *cgrp) |
| { |
| return !(cgrp->self.flags & CSS_ONLINE); |
| } |
| |
| struct cgroup_subsys_state *of_css(struct kernfs_open_file *of) |
| { |
| struct cgroup *cgrp = of->kn->parent->priv; |
| struct cftype *cft = of_cft(of); |
| |
| /* |
| * This is open and unprotected implementation of cgroup_css(). |
| * seq_css() is only called from a kernfs file operation which has |
| * an active reference on the file. Because all the subsystem |
| * files are drained before a css is disassociated with a cgroup, |
| * the matching css from the cgroup's subsys table is guaranteed to |
| * be and stay valid until the enclosing operation is complete. |
| */ |
| if (cft->ss) |
| return rcu_dereference_raw(cgrp->subsys[cft->ss->id]); |
| else |
| return &cgrp->self; |
| } |
| EXPORT_SYMBOL_GPL(of_css); |
| |
| /** |
| * cgroup_is_descendant - test ancestry |
| * @cgrp: the cgroup to be tested |
| * @ancestor: possible ancestor of @cgrp |
| * |
| * Test whether @cgrp is a descendant of @ancestor. It also returns %true |
| * if @cgrp == @ancestor. This function is safe to call as long as @cgrp |
| * and @ancestor are accessible. |
| */ |
| bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor) |
| { |
| while (cgrp) { |
| if (cgrp == ancestor) |
| return true; |
| cgrp = cgroup_parent(cgrp); |
| } |
| return false; |
| } |
| |
| static int notify_on_release(const struct cgroup *cgrp) |
| { |
| return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); |
| } |
| |
| /** |
| * for_each_css - iterate all css's of a cgroup |
| * @css: the iteration cursor |
| * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end |
| * @cgrp: the target cgroup to iterate css's of |
| * |
| * Should be called under cgroup_[tree_]mutex. |
| */ |
| #define for_each_css(css, ssid, cgrp) \ |
| for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ |
| if (!((css) = rcu_dereference_check( \ |
| (cgrp)->subsys[(ssid)], \ |
| lockdep_is_held(&cgroup_mutex)))) { } \ |
| else |
| |
| /** |
| * for_each_e_css - iterate all effective css's of a cgroup |
| * @css: the iteration cursor |
| * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end |
| * @cgrp: the target cgroup to iterate css's of |
| * |
| * Should be called under cgroup_[tree_]mutex. |
| */ |
| #define for_each_e_css(css, ssid, cgrp) \ |
| for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \ |
| if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \ |
| ; \ |
| else |
| |
| /** |
| * for_each_subsys - iterate all enabled cgroup subsystems |
| * @ss: the iteration cursor |
| * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end |
| */ |
| #define for_each_subsys(ss, ssid) \ |
| for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \ |
| (((ss) = cgroup_subsys[ssid]) || true); (ssid)++) |
| |
| /** |
| * for_each_subsys_which - filter for_each_subsys with a bitmask |
| * @ss: the iteration cursor |
| * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end |
| * @ss_maskp: a pointer to the bitmask |
| * |
| * The block will only run for cases where the ssid-th bit (1 << ssid) of |
| * mask is set to 1. |
| */ |
| #define for_each_subsys_which(ss, ssid, ss_maskp) \ |
| if (!CGROUP_SUBSYS_COUNT) /* to avoid spurious gcc warning */ \ |
| (ssid) = 0; \ |
| else \ |
| for_each_set_bit(ssid, ss_maskp, CGROUP_SUBSYS_COUNT) \ |
| if (((ss) = cgroup_subsys[ssid]) && false) \ |
| break; \ |
| else |
| |
| /* iterate across the hierarchies */ |
| #define for_each_root(root) \ |
| list_for_each_entry((root), &cgroup_roots, root_list) |
| |
| /* iterate over child cgrps, lock should be held throughout iteration */ |
| #define cgroup_for_each_live_child(child, cgrp) \ |
| list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \ |
| if (({ lockdep_assert_held(&cgroup_mutex); \ |
| cgroup_is_dead(child); })) \ |
| ; \ |
| else |
| |
| static void cgroup_release_agent(struct work_struct *work); |
| static void check_for_release(struct cgroup *cgrp); |
| |
| /* |
| * A cgroup can be associated with multiple css_sets as different tasks may |
| * belong to different cgroups on different hierarchies. In the other |
| * direction, a css_set is naturally associated with multiple cgroups. |
| * This M:N relationship is represented by the following link structure |
| * which exists for each association and allows traversing the associations |
| * from both sides. |
| */ |
| struct cgrp_cset_link { |
| /* the cgroup and css_set this link associates */ |
| struct cgroup *cgrp; |
| struct css_set *cset; |
| |
| /* list of cgrp_cset_links anchored at cgrp->cset_links */ |
| struct list_head cset_link; |
| |
| /* list of cgrp_cset_links anchored at css_set->cgrp_links */ |
| struct list_head cgrp_link; |
| }; |
| |
| /* |
| * The default css_set - used by init and its children prior to any |
| * hierarchies being mounted. It contains a pointer to the root state |
| * for each subsystem. Also used to anchor the list of css_sets. Not |
| * reference-counted, to improve performance when child cgroups |
| * haven't been created. |
| */ |
| struct css_set init_css_set = { |
| .refcount = ATOMIC_INIT(1), |
| .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links), |
| .tasks = LIST_HEAD_INIT(init_css_set.tasks), |
| .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks), |
| .mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node), |
| .mg_node = LIST_HEAD_INIT(init_css_set.mg_node), |
| }; |
| |
| static int css_set_count = 1; /* 1 for init_css_set */ |
| |
| /** |
| * cgroup_update_populated - updated populated count of a cgroup |
| * @cgrp: the target cgroup |
| * @populated: inc or dec populated count |
| * |
| * @cgrp is either getting the first task (css_set) or losing the last. |
| * Update @cgrp->populated_cnt accordingly. The count is propagated |
| * towards root so that a given cgroup's populated_cnt is zero iff the |
| * cgroup and all its descendants are empty. |
| * |
| * @cgrp's interface file "cgroup.populated" is zero if |
| * @cgrp->populated_cnt is zero and 1 otherwise. When @cgrp->populated_cnt |
| * changes from or to zero, userland is notified that the content of the |
| * interface file has changed. This can be used to detect when @cgrp and |
| * its descendants become populated or empty. |
| */ |
| static void cgroup_update_populated(struct cgroup *cgrp, bool populated) |
| { |
| lockdep_assert_held(&css_set_rwsem); |
| |
| do { |
| bool trigger; |
| |
| if (populated) |
| trigger = !cgrp->populated_cnt++; |
| else |
| trigger = !--cgrp->populated_cnt; |
| |
| if (!trigger) |
| break; |
| |
| if (cgrp->populated_kn) |
| kernfs_notify(cgrp->populated_kn); |
| cgrp = cgroup_parent(cgrp); |
| } while (cgrp); |
| } |
| |
| /* |
| * hash table for cgroup groups. This improves the performance to find |
| * an existing css_set. This hash doesn't (currently) take into |
| * account cgroups in empty hierarchies. |
| */ |
| #define CSS_SET_HASH_BITS 7 |
| static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS); |
| |
| static unsigned long css_set_hash(struct cgroup_subsys_state *css[]) |
| { |
| unsigned long key = 0UL; |
| struct cgroup_subsys *ss; |
| int i; |
| |
| for_each_subsys(ss, i) |
| key += (unsigned long)css[i]; |
| key = (key >> 16) ^ key; |
| |
| return key; |
| } |
| |
| static void put_css_set_locked(struct css_set *cset) |
| { |
| struct cgrp_cset_link *link, *tmp_link; |
| struct cgroup_subsys *ss; |
| int ssid; |
| |
| lockdep_assert_held(&css_set_rwsem); |
| |
| if (!atomic_dec_and_test(&cset->refcount)) |
| return; |
| |
| /* This css_set is dead. unlink it and release cgroup refcounts */ |
| for_each_subsys(ss, ssid) |
| list_del(&cset->e_cset_node[ssid]); |
| hash_del(&cset->hlist); |
| css_set_count--; |
| |
| list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) { |
| struct cgroup *cgrp = link->cgrp; |
| |
| list_del(&link->cset_link); |
| list_del(&link->cgrp_link); |
| |
| /* @cgrp can't go away while we're holding css_set_rwsem */ |
| if (list_empty(&cgrp->cset_links)) { |
| cgroup_update_populated(cgrp, false); |
| check_for_release(cgrp); |
| } |
| |
| kfree(link); |
| } |
| |
| kfree_rcu(cset, rcu_head); |
| } |
| |
| static void put_css_set(struct css_set *cset) |
| { |
| /* |
| * Ensure that the refcount doesn't hit zero while any readers |
| * can see it. Similar to atomic_dec_and_lock(), but for an |
| * rwlock |
| */ |
| if (atomic_add_unless(&cset->refcount, -1, 1)) |
| return; |
| |
| down_write(&css_set_rwsem); |
| put_css_set_locked(cset); |
| up_write(&css_set_rwsem); |
| } |
| |
| /* |
| * refcounted get/put for css_set objects |
| */ |
| static inline void get_css_set(struct css_set *cset) |
| { |
| atomic_inc(&cset->refcount); |
| } |
| |
| /** |
| * compare_css_sets - helper function for find_existing_css_set(). |
| * @cset: candidate css_set being tested |
| * @old_cset: existing css_set for a task |
| * @new_cgrp: cgroup that's being entered by the task |
| * @template: desired set of css pointers in css_set (pre-calculated) |
| * |
| * Returns true if "cset" matches "old_cset" except for the hierarchy |
| * which "new_cgrp" belongs to, for which it should match "new_cgrp". |
| */ |
| static bool compare_css_sets(struct css_set *cset, |
| struct css_set *old_cset, |
| struct cgroup *new_cgrp, |
| struct cgroup_subsys_state *template[]) |
| { |
| struct list_head *l1, *l2; |
| |
| /* |
| * On the default hierarchy, there can be csets which are |
| * associated with the same set of cgroups but different csses. |
| * Let's first ensure that csses match. |
| */ |
| if (memcmp(template, cset->subsys, sizeof(cset->subsys))) |
| return false; |
| |
| /* |
| * Compare cgroup pointers in order to distinguish between |
| * different cgroups in hierarchies. As different cgroups may |
| * share the same effective css, this comparison is always |
| * necessary. |
| */ |
| l1 = &cset->cgrp_links; |
| l2 = &old_cset->cgrp_links; |
| while (1) { |
| struct cgrp_cset_link *link1, *link2; |
| struct cgroup *cgrp1, *cgrp2; |
| |
| l1 = l1->next; |
| l2 = l2->next; |
| /* See if we reached the end - both lists are equal length. */ |
| if (l1 == &cset->cgrp_links) { |
| BUG_ON(l2 != &old_cset->cgrp_links); |
| break; |
| } else { |
| BUG_ON(l2 == &old_cset->cgrp_links); |
| } |
| /* Locate the cgroups associated with these links. */ |
| link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link); |
| link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link); |
| cgrp1 = link1->cgrp; |
| cgrp2 = link2->cgrp; |
| /* Hierarchies should be linked in the same order. */ |
| BUG_ON(cgrp1->root != cgrp2->root); |
| |
| /* |
| * If this hierarchy is the hierarchy of the cgroup |
| * that's changing, then we need to check that this |
| * css_set points to the new cgroup; if it's any other |
| * hierarchy, then this css_set should point to the |
| * same cgroup as the old css_set. |
| */ |
| if (cgrp1->root == new_cgrp->root) { |
| if (cgrp1 != new_cgrp) |
| return false; |
| } else { |
| if (cgrp1 != cgrp2) |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| /** |
| * find_existing_css_set - init css array and find the matching css_set |
| * @old_cset: the css_set that we're using before the cgroup transition |
| * @cgrp: the cgroup that we're moving into |
| * @template: out param for the new set of csses, should be clear on entry |
| */ |
| static struct css_set *find_existing_css_set(struct css_set *old_cset, |
| struct cgroup *cgrp, |
| struct cgroup_subsys_state *template[]) |
| { |
| struct cgroup_root *root = cgrp->root; |
| struct cgroup_subsys *ss; |
| struct css_set *cset; |
| unsigned long key; |
| int i; |
| |
| /* |
| * Build the set of subsystem state objects that we want to see in the |
| * new css_set. while subsystems can change globally, the entries here |
| * won't change, so no need for locking. |
| */ |
| for_each_subsys(ss, i) { |
| if (root->subsys_mask & (1UL << i)) { |
| /* |
| * @ss is in this hierarchy, so we want the |
| * effective css from @cgrp. |
| */ |
| template[i] = cgroup_e_css(cgrp, ss); |
| } else { |
| /* |
| * @ss is not in this hierarchy, so we don't want |
| * to change the css. |
| */ |
| template[i] = old_cset->subsys[i]; |
| } |
| } |
| |
| key = css_set_hash(template); |
| hash_for_each_possible(css_set_table, cset, hlist, key) { |
| if (!compare_css_sets(cset, old_cset, cgrp, template)) |
| continue; |
| |
| /* This css_set matches what we need */ |
| return cset; |
| } |
| |
| /* No existing cgroup group matched */ |
| return NULL; |
| } |
| |
| static void free_cgrp_cset_links(struct list_head *links_to_free) |
| { |
| struct cgrp_cset_link *link, *tmp_link; |
| |
| list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) { |
| list_del(&link->cset_link); |
| kfree(link); |
| } |
| } |
| |
| /** |
| * allocate_cgrp_cset_links - allocate cgrp_cset_links |
| * @count: the number of links to allocate |
| * @tmp_links: list_head the allocated links are put on |
| * |
| * Allocate @count cgrp_cset_link structures and chain them on @tmp_links |
| * through ->cset_link. Returns 0 on success or -errno. |
| */ |
| static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links) |
| { |
| struct cgrp_cset_link *link; |
| int i; |
| |
| INIT_LIST_HEAD(tmp_links); |
| |
| for (i = 0; i < count; i++) { |
| link = kzalloc(sizeof(*link), GFP_KERNEL); |
| if (!link) { |
| free_cgrp_cset_links(tmp_links); |
| return -ENOMEM; |
| } |
| list_add(&link->cset_link, tmp_links); |
| } |
| return 0; |
| } |
| |
| /** |
| * link_css_set - a helper function to link a css_set to a cgroup |
| * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links() |
| * @cset: the css_set to be linked |
| * @cgrp: the destination cgroup |
| */ |
| static void link_css_set(struct list_head *tmp_links, struct css_set *cset, |
| struct cgroup *cgrp) |
| { |
| struct cgrp_cset_link *link; |
| |
| BUG_ON(list_empty(tmp_links)); |
| |
| if (cgroup_on_dfl(cgrp)) |
| cset->dfl_cgrp = cgrp; |
| |
| link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link); |
| link->cset = cset; |
| link->cgrp = cgrp; |
| |
| if (list_empty(&cgrp->cset_links)) |
| cgroup_update_populated(cgrp, true); |
| list_move(&link->cset_link, &cgrp->cset_links); |
| |
| /* |
| * Always add links to the tail of the list so that the list |
| * is sorted by order of hierarchy creation |
| */ |
| list_add_tail(&link->cgrp_link, &cset->cgrp_links); |
| } |
| |
| /** |
| * find_css_set - return a new css_set with one cgroup updated |
| * @old_cset: the baseline css_set |
| * @cgrp: the cgroup to be updated |
| * |
| * Return a new css_set that's equivalent to @old_cset, but with @cgrp |
| * substituted into the appropriate hierarchy. |
| */ |
| static struct css_set *find_css_set(struct css_set *old_cset, |
| struct cgroup *cgrp) |
| { |
| struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { }; |
| struct css_set *cset; |
| struct list_head tmp_links; |
| struct cgrp_cset_link *link; |
| struct cgroup_subsys *ss; |
| unsigned long key; |
| int ssid; |
| |
| lockdep_assert_held(&cgroup_mutex); |
| |
| /* First see if we already have a cgroup group that matches |
| * the desired set */ |
| down_read(&css_set_rwsem); |
| cset = find_existing_css_set(old_cset, cgrp, template); |
| if (cset) |
| get_css_set(cset); |
| up_read(&css_set_rwsem); |
| |
| if (cset) |
| return cset; |
| |
| cset = kzalloc(sizeof(*cset), GFP_KERNEL); |
| if (!cset) |
| return NULL; |
| |
| /* Allocate all the cgrp_cset_link objects that we'll need */ |
| if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) { |
| kfree(cset); |
| return NULL; |
| } |
| |
| atomic_set(&cset->refcount, 1); |
| INIT_LIST_HEAD(&cset->cgrp_links); |
| INIT_LIST_HEAD(&cset->tasks); |
| INIT_LIST_HEAD(&cset->mg_tasks); |
| INIT_LIST_HEAD(&cset->mg_preload_node); |
| INIT_LIST_HEAD(&cset->mg_node); |
| INIT_HLIST_NODE(&cset->hlist); |
| |
| /* Copy the set of subsystem state objects generated in |
| * find_existing_css_set() */ |
| memcpy(cset->subsys, template, sizeof(cset->subsys)); |
| |
| down_write(&css_set_rwsem); |
| /* Add reference counts and links from the new css_set. */ |
| list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) { |
| struct cgroup *c = link->cgrp; |
| |
| if (c->root == cgrp->root) |
| c = cgrp; |
| link_css_set(&tmp_links, cset, c); |
| } |
| |
| BUG_ON(!list_empty(&tmp_links)); |
| |
| css_set_count++; |
| |
| /* Add @cset to the hash table */ |
| key = css_set_hash(cset->subsys); |
| hash_add(css_set_table, &cset->hlist, key); |
| |
| for_each_subsys(ss, ssid) |
| list_add_tail(&cset->e_cset_node[ssid], |
| &cset->subsys[ssid]->cgroup->e_csets[ssid]); |
| |
| up_write(&css_set_rwsem); |
| |
| return cset; |
| } |
| |
| void cgroup_threadgroup_change_begin(struct task_struct *tsk) |
| { |
| down_read(&tsk->signal->group_rwsem); |
| } |
| |
| void cgroup_threadgroup_change_end(struct task_struct *tsk) |
| { |
| up_read(&tsk->signal->group_rwsem); |
| } |
| |
| /** |
| * threadgroup_lock - lock threadgroup |
| * @tsk: member task of the threadgroup to lock |
| * |
| * Lock the threadgroup @tsk belongs to. No new task is allowed to enter |
| * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or |
| * change ->group_leader/pid. This is useful for cases where the threadgroup |
| * needs to stay stable across blockable operations. |
| * |
| * fork and exit explicitly call threadgroup_change_{begin|end}() for |
| * synchronization. While held, no new task will be added to threadgroup |
| * and no existing live task will have its PF_EXITING set. |
| * |
| * de_thread() does threadgroup_change_{begin|end}() when a non-leader |
| * sub-thread becomes a new leader. |
| */ |
| static void threadgroup_lock(struct task_struct *tsk) |
| { |
| down_write(&tsk->signal->group_rwsem); |
| } |
| |
| /** |
| * threadgroup_unlock - unlock threadgroup |
| * @tsk: member task of the threadgroup to unlock |
| * |
| * Reverse threadgroup_lock(). |
| */ |
| static inline void threadgroup_unlock(struct task_struct *tsk) |
| { |
| up_write(&tsk->signal->group_rwsem); |
| } |
| |
| static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root) |
| { |
| struct cgroup *root_cgrp = kf_root->kn->priv; |
| |
| return root_cgrp->root; |
| } |
| |
| static int cgroup_init_root_id(struct cgroup_root *root) |
| { |
| int id; |
| |
| lockdep_assert_held(&cgroup_mutex); |
| |
| id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL); |
| if (id < 0) |
| return id; |
| |
| root->hierarchy_id = id; |
| return 0; |
| } |
| |
| static void cgroup_exit_root_id(struct cgroup_root *root) |
| { |
| lockdep_assert_held(&cgroup_mutex); |
| |
| if (root->hierarchy_id) { |
| idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id); |
| root->hierarchy_id = 0; |
| } |
| } |
| |
| static void cgroup_free_root(struct cgroup_root *root) |
| { |
| if (root) { |
| /* hierarchy ID should already have been released */ |
| WARN_ON_ONCE(root->hierarchy_id); |
| |
| idr_destroy(&root->cgroup_idr); |
| kfree(root); |
| } |
| } |
| |
| static void cgroup_destroy_root(struct cgroup_root *root) |
| { |
| struct cgroup *cgrp = &root->cgrp; |
| struct cgrp_cset_link *link, *tmp_link; |
| |
| mutex_lock(&cgroup_mutex); |
| |
| BUG_ON(atomic_read(&root->nr_cgrps)); |
| BUG_ON(!list_empty(&cgrp->self.children)); |
| |
| /* Rebind all subsystems back to the default hierarchy */ |
| rebind_subsystems(&cgrp_dfl_root, root->subsys_mask); |
| |
| /* |
| * Release all the links from cset_links to this hierarchy's |
| * root cgroup |
| */ |
| down_write(&css_set_rwsem); |
| |
| list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { |
| list_del(&link->cset_link); |
| list_del(&link->cgrp_link); |
| kfree(link); |
| } |
| up_write(&css_set_rwsem); |
| |
| if (!list_empty(&root->root_list)) { |
| list_del(&root->root_list); |
| cgroup_root_count--; |
| } |
| |
| cgroup_exit_root_id(root); |
| |
| mutex_unlock(&cgroup_mutex); |
| |
| kernfs_destroy_root(root->kf_root); |
| cgroup_free_root(root); |
| } |
| |
| /* look up cgroup associated with given css_set on the specified hierarchy */ |
| static struct cgroup *cset_cgroup_from_root(struct css_set *cset, |
| struct cgroup_root *root) |
| { |
| struct cgroup *res = NULL; |
| |
| lockdep_assert_held(&cgroup_mutex); |
| lockdep_assert_held(&css_set_rwsem); |
| |
| if (cset == &init_css_set) { |
| res = &root->cgrp; |
| } else { |
| struct cgrp_cset_link *link; |
| |
| list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { |
| struct cgroup *c = link->cgrp; |
| |
| if (c->root == root) { |
| res = c; |
| break; |
| } |
| } |
| } |
| |
| BUG_ON(!res); |
| return res; |
| } |
| |
| /* |
| * Return the cgroup for "task" from the given hierarchy. Must be |
| * called with cgroup_mutex and css_set_rwsem held. |
| */ |
| static struct cgroup *task_cgroup_from_root(struct task_struct *task, |
| struct cgroup_root *root) |
| { |
| /* |
| * No need to lock the task - since we hold cgroup_mutex the |
| * task can't change groups, so the only thing that can happen |
| * is that it exits and its css is set back to init_css_set. |
| */ |
| return cset_cgroup_from_root(task_css_set(task), root); |
| } |
| |
| /* |
| * A task must hold cgroup_mutex to modify cgroups. |
| * |
| * Any task can increment and decrement the count field without lock. |
| * So in general, code holding cgroup_mutex can't rely on the count |
| * field not changing. However, if the count goes to zero, then only |
| * cgroup_attach_task() can increment it again. Because a count of zero |
| * means that no tasks are currently attached, therefore there is no |
| * way a task attached to that cgroup can fork (the other way to |
| * increment the count). So code holding cgroup_mutex can safely |
| * assume that if the count is zero, it will stay zero. Similarly, if |
| * a task holds cgroup_mutex on a cgroup with zero count, it |
| * knows that the cgroup won't be removed, as cgroup_rmdir() |
| * needs that mutex. |
| * |
| * A cgroup can only be deleted if both its 'count' of using tasks |
| * is zero, and its list of 'children' cgroups is empty. Since all |
| * tasks in the system use _some_ cgroup, and since there is always at |
| * least one task in the system (init, pid == 1), therefore, root cgroup |
| * always has either children cgroups and/or using tasks. So we don't |
| * need a special hack to ensure that root cgroup cannot be deleted. |
| * |
| * P.S. One more locking exception. RCU is used to guard the |
| * update of a tasks cgroup pointer by cgroup_attach_task() |
| */ |
| |
| static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask); |
| static struct kernfs_syscall_ops cgroup_kf_syscall_ops; |
| static const struct file_operations proc_cgroupstats_operations; |
| |
| static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft, |
| char *buf) |
| { |
| struct cgroup_subsys *ss = cft->ss; |
| |
| if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) && |
| !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) |
| snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s", |
| cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name, |
| cft->name); |
| else |
| strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX); |
| return buf; |
| } |
| |
| /** |
| * cgroup_file_mode - deduce file mode of a control file |
| * @cft: the control file in question |
| * |
| * returns cft->mode if ->mode is not 0 |
| * returns S_IRUGO|S_IWUSR if it has both a read and a write handler |
| * returns S_IRUGO if it has only a read handler |
| * returns S_IWUSR if it has only a write hander |
| */ |
| static umode_t cgroup_file_mode(const struct cftype *cft) |
| { |
| umode_t mode = 0; |
| |
| if (cft->mode) |
| return cft->mode; |
| |
| if (cft->read_u64 || cft->read_s64 || cft->seq_show) |
| mode |= S_IRUGO; |
| |
| if (cft->write_u64 || cft->write_s64 || cft->write) |
| mode |= S_IWUSR; |
| |
| return mode; |
| } |
| |
| static void cgroup_get(struct cgroup *cgrp) |
| { |
| WARN_ON_ONCE(cgroup_is_dead(cgrp)); |
| css_get(&cgrp->self); |
| } |
| |
| static bool cgroup_tryget(struct cgroup *cgrp) |
| { |
| return css_tryget(&cgrp->self); |
| } |
| |
| static void cgroup_put(struct cgroup *cgrp) |
| { |
| css_put(&cgrp->self); |
| } |
| |
| /** |
| * cgroup_calc_child_subsys_mask - calculate child_subsys_mask |
| * @cgrp: the target cgroup |
| * @subtree_control: the new subtree_control mask to consider |
| * |
| * On the default hierarchy, a subsystem may request other subsystems to be |
| * enabled together through its ->depends_on mask. In such cases, more |
| * subsystems than specified in "cgroup.subtree_control" may be enabled. |
| * |
| * This function calculates which subsystems need to be enabled if |
| * @subtree_control is to be applied to @cgrp. The returned mask is always |
| * a superset of @subtree_control and follows the usual hierarchy rules. |
| */ |
| static unsigned long cgroup_calc_child_subsys_mask(struct cgroup *cgrp, |
| unsigned long subtree_control) |
| { |
| struct cgroup *parent = cgroup_parent(cgrp); |
| unsigned long cur_ss_mask = subtree_control; |
| struct cgroup_subsys *ss; |
| int ssid; |
| |
| lockdep_assert_held(&cgroup_mutex); |
| |
| if (!cgroup_on_dfl(cgrp)) |
| return cur_ss_mask; |
| |
| while (true) { |
| unsigned long new_ss_mask = cur_ss_mask; |
| |
| for_each_subsys_which(ss, ssid, &cur_ss_mask) |
| new_ss_mask |= ss->depends_on; |
| |
| /* |
| * Mask out subsystems which aren't available. This can |
| * happen only if some depended-upon subsystems were bound |
| * to non-default hierarchies. |
| */ |
| if (parent) |
| new_ss_mask &= parent->child_subsys_mask; |
| else |
| new_ss_mask &= cgrp->root->subsys_mask; |
| |
| if (new_ss_mask == cur_ss_mask) |
| break; |
| cur_ss_mask = new_ss_mask; |
| } |
| |
| return cur_ss_mask; |
| } |
| |
| /** |
| * cgroup_refresh_child_subsys_mask - update child_subsys_mask |
| * @cgrp: the target cgroup |
| * |
| * Update @cgrp->child_subsys_mask according to the current |
| * @cgrp->subtree_control using cgroup_calc_child_subsys_mask(). |
| */ |
| static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp) |
| { |
| cgrp->child_subsys_mask = |
| cgroup_calc_child_subsys_mask(cgrp, cgrp->subtree_control); |
| } |
| |
| /** |
| * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods |
| * @kn: the kernfs_node being serviced |
| * |
| * This helper undoes cgroup_kn_lock_live() and should be invoked before |
| * the method finishes if locking succeeded. Note that once this function |
| * returns the cgroup returned by cgroup_kn_lock_live() may become |
| * inaccessible any time. If the caller intends to continue to access the |
| * cgroup, it should pin it before invoking this function. |
| */ |
| static void cgroup_kn_unlock(struct kernfs_node *kn) |
| { |
| struct cgroup *cgrp; |
| |
| if (kernfs_type(kn) == KERNFS_DIR) |
| cgrp = kn->priv; |
| else |
| cgrp = kn->parent->priv; |
| |
| mutex_unlock(&cgroup_mutex); |
| |
| kernfs_unbreak_active_protection(kn); |
| cgroup_put(cgrp); |
| } |
| |
| /** |
| * cgroup_kn_lock_live - locking helper for cgroup kernfs methods |
| * @kn: the kernfs_node being serviced |
| * |
| * This helper is to be used by a cgroup kernfs method currently servicing |
| * @kn. It breaks the active protection, performs cgroup locking and |
| * verifies that the associated cgroup is alive. Returns the cgroup if |
| * alive; otherwise, %NULL. A successful return should be undone by a |
| * matching cgroup_kn_unlock() invocation. |
| * |
| * Any cgroup kernfs method implementation which requires locking the |
| * associated cgroup should use this helper. It avoids nesting cgroup |
| * locking under kernfs active protection and allows all kernfs operations |
| * including self-removal. |
| */ |
| static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn) |
| { |
| struct cgroup *cgrp; |
| |
| if (kernfs_type(kn) == KERNFS_DIR) |
| cgrp = kn->priv; |
| else |
| cgrp = kn->parent->priv; |
| |
| /* |
| * We're gonna grab cgroup_mutex which nests outside kernfs |
| * active_ref. cgroup liveliness check alone provides enough |
| * protection against removal. Ensure @cgrp stays accessible and |
| * break the active_ref protection. |
| */ |
| if (!cgroup_tryget(cgrp)) |
| return NULL; |
| kernfs_break_active_protection(kn); |
| |
| mutex_lock(&cgroup_mutex); |
| |
| if (!cgroup_is_dead(cgrp)) |
| return cgrp; |
| |
| cgroup_kn_unlock(kn); |
| return NULL; |
| } |
| |
| static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft) |
| { |
| char name[CGROUP_FILE_NAME_MAX]; |
| |
| lockdep_assert_held(&cgroup_mutex); |
| kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name)); |
| } |
| |
| /** |
| * cgroup_clear_dir - remove subsys files in a cgroup directory |
| * @cgrp: target cgroup |
| * @subsys_mask: mask of the subsystem ids whose files should be removed |
| */ |
| static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask) |
| { |
| struct cgroup_subsys *ss; |
| int i; |
| |
| for_each_subsys(ss, i) { |
| struct cftype *cfts; |
| |
| if (!(subsys_mask & (1 << i))) |
| continue; |
| list_for_each_entry(cfts, &ss->cfts, node) |
| cgroup_addrm_files(cgrp, cfts, false); |
| } |
| } |
| |
| static int rebind_subsystems(struct cgroup_root *dst_root, |
| unsigned long ss_mask) |
| { |
| struct cgroup_subsys *ss; |
| unsigned long tmp_ss_mask; |
| int ssid, i, ret; |
| |
| lockdep_assert_held(&cgroup_mutex); |
| |
| for_each_subsys_which(ss, ssid, &ss_mask) { |
| /* if @ss has non-root csses attached to it, can't move */ |
| if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss))) |
| return -EBUSY; |
| |
| /* can't move between two non-dummy roots either */ |
| if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root) |
| return -EBUSY; |
| } |
| |
| /* skip creating root files on dfl_root for inhibited subsystems */ |
| tmp_ss_mask = ss_mask; |
| if (dst_root == &cgrp_dfl_root) |
| tmp_ss_mask &= ~cgrp_dfl_root_inhibit_ss_mask; |
| |
| ret = cgroup_populate_dir(&dst_root->cgrp, tmp_ss_mask); |
| if (ret) { |
| if (dst_root != &cgrp_dfl_root) |
| return ret; |
| |
| /* |
| * Rebinding back to the default root is not allowed to |
| * fail. Using both default and non-default roots should |
| * be rare. Moving subsystems back and forth even more so. |
| * Just warn about it and continue. |
| */ |
| if (cgrp_dfl_root_visible) { |
| pr_warn("failed to create files (%d) while rebinding 0x%lx to default root\n", |
| ret, ss_mask); |
| pr_warn("you may retry by moving them to a different hierarchy and unbinding\n"); |
| } |
| } |
| |
| /* |
| * Nothing can fail from this point on. Remove files for the |
| * removed subsystems and rebind each subsystem. |
| */ |
| for_each_subsys_which(ss, ssid, &ss_mask) |
| cgroup_clear_dir(&ss->root->cgrp, 1 << ssid); |
| |
| for_each_subsys_which(ss, ssid, &ss_mask) { |
| struct cgroup_root *src_root; |
| struct cgroup_subsys_state *css; |
| struct css_set *cset; |
| |
| src_root = ss->root; |
| css = cgroup_css(&src_root->cgrp, ss); |
| |
| WARN_ON(!css || cgroup_css(&dst_root->cgrp, ss)); |
| |
| RCU_INIT_POINTER(src_root->cgrp.subsys[ssid], NULL); |
| rcu_assign_pointer(dst_root->cgrp.subsys[ssid], css); |
| ss->root = dst_root; |
| css->cgroup = &dst_root->cgrp; |
| |
| down_write(&css_set_rwsem); |
| hash_for_each(css_set_table, i, cset, hlist) |
| list_move_tail(&cset->e_cset_node[ss->id], |
| &dst_root->cgrp.e_csets[ss->id]); |
| up_write(&css_set_rwsem); |
| |
| src_root->subsys_mask &= ~(1 << ssid); |
| src_root->cgrp.subtree_control &= ~(1 << ssid); |
| cgroup_refresh_child_subsys_mask(&src_root->cgrp); |
| |
| /* default hierarchy doesn't enable controllers by default */ |
| dst_root->subsys_mask |= 1 << ssid; |
| if (dst_root != &cgrp_dfl_root) { |
| dst_root->cgrp.subtree_control |= 1 << ssid; |
| cgroup_refresh_child_subsys_mask(&dst_root->cgrp); |
| } |
| |
| if (ss->bind) |
| ss->bind(css); |
| } |
| |
| kernfs_activate(dst_root->cgrp.kn); |
| return 0; |
| } |
| |
| static int cgroup_show_options(struct seq_file *seq, |
| struct kernfs_root *kf_root) |
| { |
| struct cgroup_root *root = cgroup_root_from_kf(kf_root); |
| struct cgroup_subsys *ss; |
| int ssid; |
| |
| if (root != &cgrp_dfl_root) |
| for_each_subsys(ss, ssid) |
| if (root->subsys_mask & (1 << ssid)) |
| seq_show_option(seq, ss->legacy_name, NULL); |
| if (root->flags & CGRP_ROOT_NOPREFIX) |
| seq_puts(seq, ",noprefix"); |
| if (root->flags & CGRP_ROOT_XATTR) |
| seq_puts(seq, ",xattr"); |
| |
| spin_lock(&release_agent_path_lock); |
| if (strlen(root->release_agent_path)) |
| seq_show_option(seq, "release_agent", |
| root->release_agent_path); |
| spin_unlock(&release_agent_path_lock); |
| |
| if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags)) |
| seq_puts(seq, ",clone_children"); |
| if (strlen(root->name)) |
| seq_show_option(seq, "name", root->name); |
| return 0; |
| } |
| |
| struct cgroup_sb_opts { |
| unsigned long subsys_mask; |
| unsigned int flags; |
| char *release_agent; |
| bool cpuset_clone_children; |
| char *name; |
| /* User explicitly requested empty subsystem */ |
| bool none; |
| }; |
| |
| static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) |
| { |
| char *token, *o = data; |
| bool all_ss = false, one_ss = false; |
| unsigned long mask = -1UL; |
| struct cgroup_subsys *ss; |
| int nr_opts = 0; |
| int i; |
| |
| #ifdef CONFIG_CPUSETS |
| mask = ~(1U << cpuset_cgrp_id); |
| #endif |
| |
| memset(opts, 0, sizeof(*opts)); |
| |
| while ((token = strsep(&o, ",")) != NULL) { |
| nr_opts++; |
| |
| if (!*token) |
| return -EINVAL; |
| if (!strcmp(token, "none")) { |
| /* Explicitly have no subsystems */ |
| opts->none = true; |
| continue; |
| } |
| if (!strcmp(token, "all")) { |
| /* Mutually exclusive option 'all' + subsystem name */ |
| if (one_ss) |
| return -EINVAL; |
| all_ss = true; |
| continue; |
| } |
| if (!strcmp(token, "__DEVEL__sane_behavior")) { |
| opts->flags |= CGRP_ROOT_SANE_BEHAVIOR; |
| continue; |
| } |
| if (!strcmp(token, "noprefix")) { |
| opts->flags |= CGRP_ROOT_NOPREFIX; |
| continue; |
| } |
| if (!strcmp(token, "clone_children")) { |
| opts->cpuset_clone_children = true; |
| continue; |
| } |
| if (!strcmp(token, "xattr")) { |
| opts->flags |= CGRP_ROOT_XATTR; |
| continue; |
| } |
| if (!strncmp(token, "release_agent=", 14)) { |
| /* Specifying two release agents is forbidden */ |
| if (opts->release_agent) |
| return -EINVAL; |
| opts->release_agent = |
| kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL); |
| if (!opts->release_agent) |
| return -ENOMEM; |
| continue; |
| } |
| if (!strncmp(token, "name=", 5)) { |
| const char *name = token + 5; |
| /* Can't specify an empty name */ |
| if (!strlen(name)) |
| return -EINVAL; |
| /* Must match [\w.-]+ */ |
| for (i = 0; i < strlen(name); i++) { |
| char c = name[i]; |
| if (isalnum(c)) |
| continue; |
| if ((c == '.') || (c == '-') || (c == '_')) |
| continue; |
| return -EINVAL; |
| } |
| /* Specifying two names is forbidden */ |
| if (opts->name) |
| return -EINVAL; |
| opts->name = kstrndup(name, |
| MAX_CGROUP_ROOT_NAMELEN - 1, |
| GFP_KERNEL); |
| if (!opts->name) |
| return -ENOMEM; |
| |
| continue; |
| } |
| |
| for_each_subsys(ss, i) { |
| if (strcmp(token, ss->legacy_name)) |
| continue; |
| if (ss->disabled) |
| continue; |
| |
| /* Mutually exclusive option 'all' + subsystem name */ |
| if (all_ss) |
| return -EINVAL; |
| opts->subsys_mask |= (1 << i); |
| one_ss = true; |
| |
| break; |
| } |
| if (i == CGROUP_SUBSYS_COUNT) |
| return -ENOENT; |
| } |
| |
| if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) { |
| pr_warn("sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n"); |
| if (nr_opts != 1) { |
| pr_err("sane_behavior: no other mount options allowed\n"); |
| return -EINVAL; |
| } |
| return 0; |
| } |
| |
| /* |
| * If the 'all' option was specified select all the subsystems, |
| * otherwise if 'none', 'name=' and a subsystem name options were |
| * not specified, let's default to 'all' |
| */ |
| if (all_ss || (!one_ss && !opts->none && !opts->name)) |
| for_each_subsys(ss, i) |
| if (!ss->disabled) |
| opts->subsys_mask |= (1 << i); |
| |
| /* |
| * We either have to specify by name or by subsystems. (So all |
| * empty hierarchies must have a name). |
| */ |
| if (!opts->subsys_mask && !opts->name) |
| return -EINVAL; |
| |
| /* |
| * Option noprefix was introduced just for backward compatibility |
| * with the old cpuset, so we allow noprefix only if mounting just |
| * the cpuset subsystem. |
| */ |
| if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask)) |
| return -EINVAL; |
| |
| /* Can't specify "none" and some subsystems */ |
| if (opts->subsys_mask && opts->none) |
| return -EINVAL; |
| |
| return 0; |
| } |
| |
| static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data) |
| { |
| int ret = 0; |
| struct cgroup_root *root = cgroup_root_from_kf(kf_root); |
| struct cgroup_sb_opts opts; |
| unsigned long added_mask, removed_mask; |
| |
| if (root == &cgrp_dfl_root) { |
| pr_err("remount is not allowed\n"); |
| return -EINVAL; |
| } |
| |
| mutex_lock(&cgroup_mutex); |
| |
| /* See what subsystems are wanted */ |
| ret = parse_cgroupfs_options(data, &opts); |
| if (ret) |
| goto out_unlock; |
| |
| if (opts.subsys_mask != root->subsys_mask || opts.release_agent) |
| pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n", |
| task_tgid_nr(current), current->comm); |
| |
| added_mask = opts.subsys_mask & ~root->subsys_mask; |
| removed_mask = root->subsys_mask & ~opts.subsys_mask; |
| |
| /* Don't allow flags or name to change at remount */ |
| if ((opts.flags ^ root->flags) || |
| (opts.name && strcmp(opts.name, root->name))) { |
| pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n", |
| opts.flags, opts.name ?: "", root->flags, root->name); |
| ret = -EINVAL; |
| goto out_unlock; |
| } |
| |
| /* remounting is not allowed for populated hierarchies */ |
| if (!list_empty(&root->cgrp.self.children)) { |
| ret = -EBUSY; |
| goto out_unlock; |
| } |
| |
| ret = rebind_subsystems(root, added_mask); |
| if (ret) |
| goto out_unlock; |
| |
| rebind_subsystems(&cgrp_dfl_root, removed_mask); |
| |
| if (opts.release_agent) { |
| spin_lock(&release_agent_path_lock); |
| strcpy(root->release_agent_path, opts.release_agent); |
| spin_unlock(&release_agent_path_lock); |
| } |
| out_unlock: |
| kfree(opts.release_agent); |
| kfree(opts.name); |
| mutex_unlock(&cgroup_mutex); |
| return ret; |
| } |
| |
| /* |
| * To reduce the fork() overhead for systems that are not actually using |
| * their cgroups capability, we don't maintain the lists running through |
| * each css_set to its tasks until we see the list actually used - in other |
| * words after the first mount. |
| */ |
| static bool use_task_css_set_links __read_mostly; |
| |
| static void cgroup_enable_task_cg_lists(void) |
| { |
| struct task_struct *p, *g; |
| |
| down_write(&css_set_rwsem); |
| |
| if (use_task_css_set_links) |
| goto out_unlock; |
| |
| use_task_css_set_links = true; |
| |
| /* |
| * We need tasklist_lock because RCU is not safe against |
| * while_each_thread(). Besides, a forking task that has passed |
| * cgroup_post_fork() without seeing use_task_css_set_links = 1 |
| * is not guaranteed to have its child immediately visible in the |
| * tasklist if we walk through it with RCU. |
| */ |
| read_lock(&tasklist_lock); |
| do_each_thread(g, p) { |
| WARN_ON_ONCE(!list_empty(&p->cg_list) || |
| task_css_set(p) != &init_css_set); |
| |
| /* |
| * We should check if the process is exiting, otherwise |
| * it will race with cgroup_exit() in that the list |
| * entry won't be deleted though the process has exited. |
| * Do it while holding siglock so that we don't end up |
| * racing against cgroup_exit(). |
| */ |
| spin_lock_irq(&p->sighand->siglock); |
| if (!(p->flags & PF_EXITING)) { |
| struct css_set *cset = task_css_set(p); |
| |
| list_add(&p->cg_list, &cset->tasks); |
| get_css_set(cset); |
| } |
| spin_unlock_irq(&p->sighand->siglock); |
| } while_each_thread(g, p); |
| read_unlock(&tasklist_lock); |
| out_unlock: |
| up_write(&css_set_rwsem); |
| } |
| |
| static void init_cgroup_housekeeping(struct cgroup *cgrp) |
| { |
| struct cgroup_subsys *ss; |
| int ssid; |
| |
| INIT_LIST_HEAD(&cgrp->self.sibling); |
| INIT_LIST_HEAD(&cgrp->self.children); |
| INIT_LIST_HEAD(&cgrp->cset_links); |
| INIT_LIST_HEAD(&cgrp->pidlists); |
| mutex_init(&cgrp->pidlist_mutex); |
| cgrp->self.cgroup = cgrp; |
| cgrp->self.flags |= CSS_ONLINE; |
| |
| for_each_subsys(ss, ssid) |
| INIT_LIST_HEAD(&cgrp->e_csets[ssid]); |
| |
| init_waitqueue_head(&cgrp->offline_waitq); |
| INIT_WORK(&cgrp->release_agent_work, cgroup_release_agent); |
| } |
| |
| static void init_cgroup_root(struct cgroup_root *root, |
| struct cgroup_sb_opts *opts) |
| { |
| struct cgroup *cgrp = &root->cgrp; |
| |
| INIT_LIST_HEAD(&root->root_list); |
| atomic_set(&root->nr_cgrps, 1); |
| cgrp->root = root; |
| init_cgroup_housekeeping(cgrp); |
| idr_init(&root->cgroup_idr); |
| |
| root->flags = opts->flags; |
| if (opts->release_agent) |
| strcpy(root->release_agent_path, opts->release_agent); |
| if (opts->name) |
| strcpy(root->name, opts->name); |
| if (opts->cpuset_clone_children) |
| set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags); |
| } |
| |
| static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask) |
| { |
| LIST_HEAD(tmp_links); |
| struct cgroup *root_cgrp = &root->cgrp; |
| struct cftype *base_files; |
| struct css_set *cset; |
| int i, ret; |
| |
| lockdep_assert_held(&cgroup_mutex); |
| |
| ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL); |
| if (ret < 0) |
| goto out; |
| root_cgrp->id = ret; |
| |
| ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, 0, |
| GFP_KERNEL); |
| if (ret) |
| goto out; |
| |
| /* |
| * We're accessing css_set_count without locking css_set_rwsem here, |
| * but that's OK - it can only be increased by someone holding |
| * cgroup_lock, and that's us. The worst that can happen is that we |
| * have some link structures left over |
| */ |
| ret = allocate_cgrp_cset_links(css_set_count, &tmp_links); |
| if (ret) |
| goto cancel_ref; |
| |
| ret = cgroup_init_root_id(root); |
| if (ret) |
| goto cancel_ref; |
| |
| root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops, |
| KERNFS_ROOT_CREATE_DEACTIVATED, |
| root_cgrp); |
| if (IS_ERR(root->kf_root)) { |
| ret = PTR_ERR(root->kf_root); |
| goto exit_root_id; |
| } |
| root_cgrp->kn = root->kf_root->kn; |
| |
| if (root == &cgrp_dfl_root) |
| base_files = cgroup_dfl_base_files; |
| else |
| base_files = cgroup_legacy_base_files; |
| |
| ret = cgroup_addrm_files(root_cgrp, base_files, true); |
| if (ret) |
| goto destroy_root; |
| |
| ret = rebind_subsystems(root, ss_mask); |
| if (ret) |
| goto destroy_root; |
| |
| /* |
| * There must be no failure case after here, since rebinding takes |
| * care of subsystems' refcounts, which are explicitly dropped in |
| * the failure exit path. |
| */ |
| list_add(&root->root_list, &cgroup_roots); |
| cgroup_root_count++; |
| |
| /* |
| * Link the root cgroup in this hierarchy into all the css_set |
| * objects. |
| */ |
| down_write(&css_set_rwsem); |
| hash_for_each(css_set_table, i, cset, hlist) |
| link_css_set(&tmp_links, cset, root_cgrp); |
| up_write(&css_set_rwsem); |
| |
| BUG_ON(!list_empty(&root_cgrp->self.children)); |
| BUG_ON(atomic_read(&root->nr_cgrps) != 1); |
| |
| kernfs_activate(root_cgrp->kn); |
| ret = 0; |
| goto out; |
| |
| destroy_root: |
| kernfs_destroy_root(root->kf_root); |
| root->kf_root = NULL; |
| exit_root_id: |
| cgroup_exit_root_id(root); |
| cancel_ref: |
| percpu_ref_exit(&root_cgrp->self.refcnt); |
| out: |
| free_cgrp_cset_links(&tmp_links); |
| return ret; |
| } |
| |
| static struct dentry *cgroup_mount(struct file_system_type *fs_type, |
| int flags, const char *unused_dev_name, |
| void *data) |
| { |
| struct super_block *pinned_sb = NULL; |
| struct cgroup_subsys *ss; |
| struct cgroup_root *root; |
| struct cgroup_sb_opts opts; |
| struct dentry *dentry; |
| int ret; |
| int i; |
| bool new_sb; |
| |
| /* |
| * The first time anyone tries to mount a cgroup, enable the list |
| * linking each css_set to its tasks and fix up all existing tasks. |
| */ |
| if (!use_task_css_set_links) |
| cgroup_enable_task_cg_lists(); |
| |
| mutex_lock(&cgroup_mutex); |
| |
| /* First find the desired set of subsystems */ |
| ret = parse_cgroupfs_options(data, &opts); |
| if (ret) |
| goto out_unlock; |
| |
| /* look for a matching existing root */ |
| if (opts.flags & CGRP_ROOT_SANE_BEHAVIOR) { |
| cgrp_dfl_root_visible = true; |
| root = &cgrp_dfl_root; |
| cgroup_get(&root->cgrp); |
| ret = 0; |
| goto out_unlock; |
| } |
| |
| /* |
| * Destruction of cgroup root is asynchronous, so subsystems may |
| * still be dying after the previous unmount. Let's drain the |
| * dying subsystems. We just need to ensure that the ones |
| * unmounted previously finish dying and don't care about new ones |
| * starting. Testing ref liveliness is good enough. |
| */ |
| for_each_subsys(ss, i) { |
| if (!(opts.subsys_mask & (1 << i)) || |
| ss->root == &cgrp_dfl_root) |
| continue; |
| |
| if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) { |
| mutex_unlock(&cgroup_mutex); |
| msleep(10); |
| ret = restart_syscall(); |
| goto out_free; |
| } |
| cgroup_put(&ss->root->cgrp); |
| } |
| |
| for_each_root(root) { |
| bool name_match = false; |
| |
| if (root == &cgrp_dfl_root) |
| continue; |
| |
| /* |
| * If we asked for a name then it must match. Also, if |
| * name matches but sybsys_mask doesn't, we should fail. |
| * Remember whether name matched. |
| */ |
| if (opts.name) { |
| if (strcmp(opts.name, root->name)) |
| continue; |
| name_match = true; |
| } |
| |
| /* |
| * If we asked for subsystems (or explicitly for no |
| * subsystems) then they must match. |
| */ |
| if ((opts.subsys_mask || opts.none) && |
| (opts.subsys_mask != root->subsys_mask)) { |
| if (!name_match) |
| continue; |
| ret = -EBUSY; |
| goto out_unlock; |
| } |
| |
| if (root->flags ^ opts.flags) |
| pr_warn("new mount options do not match the existing superblock, will be ignored\n"); |
| |
| /* |
| * We want to reuse @root whose lifetime is governed by its |
| * ->cgrp. Let's check whether @root is alive and keep it |
| * that way. As cgroup_kill_sb() can happen anytime, we |
| * want to block it by pinning the sb so that @root doesn't |
| * get killed before mount is complete. |
| * |
| * With the sb pinned, tryget_live can reliably indicate |
| * whether @root can be reused. If it's being killed, |
| * drain it. We can use wait_queue for the wait but this |
| * path is super cold. Let's just sleep a bit and retry. |
| */ |
| pinned_sb = kernfs_pin_sb(root->kf_root, NULL); |
| if (IS_ERR(pinned_sb) || |
| !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) { |
| mutex_unlock(&cgroup_mutex); |
| if (!IS_ERR_OR_NULL(pinned_sb)) |
| deactivate_super(pinned_sb); |
| msleep(10); |
| ret = restart_syscall(); |
| goto out_free; |
| } |
| |
| ret = 0; |
| goto out_unlock; |
| } |
| |
| /* |
| * No such thing, create a new one. name= matching without subsys |
| * specification is allowed for already existing hierarchies but we |
| * can't create new one without subsys specification. |
| */ |
| if (!opts.subsys_mask && !opts.none) { |
| ret = -EINVAL; |
| goto out_unlock; |
| } |
| |
| root = kzalloc(sizeof(*root), GFP_KERNEL); |
| if (!root) { |
| ret = -ENOMEM; |
| goto out_unlock; |
| } |
| |
| init_cgroup_root(root, &opts); |
| |
| ret = cgroup_setup_root(root, opts.subsys_mask); |
| if (ret) |
| cgroup_free_root(root); |
| |
| out_unlock: |
| mutex_unlock(&cgroup_mutex); |
| out_free: |
| kfree(opts.release_agent); |
| kfree(opts.name); |
| |
| if (ret) |
| return ERR_PTR(ret); |
| |
| dentry = kernfs_mount(fs_type, flags, root->kf_root, |
| CGROUP_SUPER_MAGIC, &new_sb); |
| if (IS_ERR(dentry) || !new_sb) |
| cgroup_put(&root->cgrp); |
| |
| /* |
| * If @pinned_sb, we're reusing an existing root and holding an |
| * extra ref on its sb. Mount is complete. Put the extra ref. |
| */ |
| if (pinned_sb) { |
| WARN_ON(new_sb); |
| deactivate_super(pinned_sb); |
| } |
| |
| return dentry; |
| } |
| |
| static void cgroup_kill_sb(struct super_block *sb) |
| { |
| struct kernfs_root *kf_root = kernfs_root_from_sb(sb); |
| struct cgroup_root *root = cgroup_root_from_kf(kf_root); |
| |
| /* |
| * If @root doesn't have any mounts or children, start killing it. |
| * This prevents new mounts by disabling percpu_ref_tryget_live(). |
| * cgroup_mount() may wait for @root's release. |
| * |
| * And don't kill the default root. |
| */ |
| if (!list_empty(&root->cgrp.self.children) || |
| root == &cgrp_dfl_root) |
| cgroup_put(&root->cgrp); |
| else |
| percpu_ref_kill(&root->cgrp.self.refcnt); |
| |
| kernfs_kill_sb(sb); |
| } |
| |
| static struct file_system_type cgroup_fs_type = { |
| .name = "cgroup", |
| .mount = cgroup_mount, |
| .kill_sb = cgroup_kill_sb, |
| }; |
| |
| /** |
| * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy |
| * @task: target task |
| * @buf: the buffer to write the path into |
| * @buflen: the length of the buffer |
| * |
| * Determine @task's cgroup on the first (the one with the lowest non-zero |
| * hierarchy_id) cgroup hierarchy and copy its path into @buf. This |
| * function grabs cgroup_mutex and shouldn't be used inside locks used by |
| * cgroup controller callbacks. |
| * |
| * Return value is the same as kernfs_path(). |
| */ |
| char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) |
| { |
| struct cgroup_root *root; |
| struct cgroup *cgrp; |
| int hierarchy_id = 1; |
| char *path = NULL; |
| |
| mutex_lock(&cgroup_mutex); |
| down_read(&css_set_rwsem); |
| |
| root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id); |
| |
| if (root) { |
| cgrp = task_cgroup_from_root(task, root); |
| path = cgroup_path(cgrp, buf, buflen); |
| } else { |
| /* if no hierarchy exists, everyone is in "/" */ |
| if (strlcpy(buf, "/", buflen) < buflen) |
| path = buf; |
| } |
| |
| up_read(&css_set_rwsem); |
| mutex_unlock(&cgroup_mutex); |
| return path; |
| } |
| EXPORT_SYMBOL_GPL(task_cgroup_path); |
| |
| /* used to track tasks and other necessary states during migration */ |
| struct cgroup_taskset { |
| /* the src and dst cset list running through cset->mg_node */ |
| struct list_head src_csets; |
| struct list_head dst_csets; |
| |
| /* |
| * Fields for cgroup_taskset_*() iteration. |
| * |
| * Before migration is committed, the target migration tasks are on |
| * ->mg_tasks of the csets on ->src_csets. After, on ->mg_tasks of |
| * the csets on ->dst_csets. ->csets point to either ->src_csets |
| * or ->dst_csets depending on whether migration is committed. |
| * |
| * ->cur_csets and ->cur_task point to the current task position |
| * during iteration. |
| */ |
| struct list_head *csets; |
| struct css_set *cur_cset; |
| struct task_struct *cur_task; |
| }; |
| |
| /** |
| * cgroup_taskset_first - reset taskset and return the first task |
| * @tset: taskset of interest |
| * |
| * @tset iteration is initialized and the first task is returned. |
| */ |
| struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset) |
| { |
| tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node); |
| tset->cur_task = NULL; |
| |
| return cgroup_taskset_next(tset); |
| } |
| |
| /** |
| * cgroup_taskset_next - iterate to the next task in taskset |
| * @tset: taskset of interest |
| * |
| * Return the next task in @tset. Iteration must have been initialized |
| * with cgroup_taskset_first(). |
| */ |
| struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset) |
| { |
| struct css_set *cset = tset->cur_cset; |
| struct task_struct *task = tset->cur_task; |
| |
| while (&cset->mg_node != tset->csets) { |
| if (!task) |
| task = list_first_entry(&cset->mg_tasks, |
| struct task_struct, cg_list); |
| else |
| task = list_next_entry(task, cg_list); |
| |
| if (&task->cg_list != &cset->mg_tasks) { |
| tset->cur_cset = cset; |
| tset->cur_task = task; |
| return task; |
| } |
| |
| cset = list_next_entry(cset, mg_node); |
| task = NULL; |
| } |
| |
| return NULL; |
| } |
| |
| /** |
| * cgroup_task_migrate - move a task from one cgroup to another. |
| * @old_cgrp: the cgroup @tsk is being migrated from |
| * @tsk: the task being migrated |
| * @new_cset: the new css_set @tsk is being attached to |
| * |
| * Must be called with cgroup_mutex, threadgroup and css_set_rwsem locked. |
| */ |
| static void cgroup_task_migrate(struct cgroup *old_cgrp, |
| struct task_struct *tsk, |
| struct css_set *new_cset) |
| { |
| struct css_set *old_cset; |
| |
| lockdep_assert_held(&cgroup_mutex); |
| lockdep_assert_held(&css_set_rwsem); |
| |
| /* |
| * We are synchronized through threadgroup_lock() against PF_EXITING |
| * setting such that we can't race against cgroup_exit() changing the |
| * css_set to init_css_set and dropping the old one. |
| */ |
| WARN_ON_ONCE(tsk->flags & PF_EXITING); |
| old_cset = task_css_set(tsk); |
| |
| get_css_set(new_cset); |
| rcu_assign_pointer(tsk->cgroups, new_cset); |
| |
| /* |
| * Use move_tail so that cgroup_taskset_first() still returns the |
| * leader after migration. This works because cgroup_migrate() |
| * ensures that the dst_cset of the leader is the first on the |
| * tset's dst_csets list. |
| */ |
| list_move_tail(&tsk->cg_list, &new_cset->mg_tasks); |
| |
| /* |
| * We just gained a reference on old_cset by taking it from the |
| * task. As trading it for new_cset is protected by cgroup_mutex, |
| * we're safe to drop it here; it will be freed under RCU. |
| */ |
| put_css_set_locked(old_cset); |
| } |
| |
| /** |
| * cgroup_migrate_finish - cleanup after attach |
| * @preloaded_csets: list of preloaded css_sets |
| * |
| * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst(). See |
| * those functions for details. |
| */ |
| static void cgroup_migrate_finish(struct list_head *preloaded_csets) |
| { |
| struct css_set *cset, *tmp_cset; |
| |
| lockdep_assert_held(&cgroup_mutex); |
| |
| down_write(&css_set_rwsem); |
| list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) { |
| cset->mg_src_cgrp = NULL; |
| cset->mg_dst_cset = NULL; |
| list_del_init(&cset->mg_preload_node); |
| put_css_set_locked(cset); |
| } |
| up_write(&css_set_rwsem); |
| } |
| |
| /** |
| * cgroup_migrate_add_src - add a migration source css_set |
| * @src_cset: the source css_set to add |
| * @dst_cgrp: the destination cgroup |
| * @preloaded_csets: list of preloaded css_sets |
| * |
| * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp. Pin |
| * @src_cset and add it to @preloaded_csets, which should later be cleaned |
| * up by cgroup_migrate_finish(). |
| * |
| * This function may be called without holding threadgroup_lock even if the |
| * target is a process. Threads may be created and destroyed but as long |
| * as cgroup_mutex is not dropped, no new css_set can be put into play and |
| * the preloaded css_sets are guaranteed to cover all migrations. |
| */ |
| static void cgroup_migrate_add_src(struct css_set *src_cset, |
| struct cgroup *dst_cgrp, |
| struct list_head *preloaded_csets) |
| { |
| struct cgroup *src_cgrp; |
| |
| lockdep_assert_held(&cgroup_mutex); |
| lockdep_assert_held(&css_set_rwsem); |
| |
| src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root); |
| |
| if (!list_empty(&src_cset->mg_preload_node)) |
| return; |
| |
| WARN_ON(src_cset->mg_src_cgrp); |
| WARN_ON(!list_empty(&src_cset->mg_tasks)); |
| WARN_ON(!list_empty(&src_cset->mg_node)); |
| |
| src_cset->mg_src_cgrp = src_cgrp; |
| get_css_set(src_cset); |
| list_add(&src_cset->mg_preload_node, preloaded_csets); |
| } |
| |
| /** |
| * cgroup_migrate_prepare_dst - prepare destination css_sets for migration |
| * @dst_cgrp: the destination cgroup (may be %NULL) |
| * @preloaded_csets: list of preloaded source css_sets |
| * |
| * Tasks are about to be moved to @dst_cgrp and all the source css_sets |
| * have been preloaded to @preloaded_csets. This function looks up and |
| * pins all destination css_sets, links each to its source, and append them |
| * to @preloaded_csets. If @dst_cgrp is %NULL, the destination of each |
| * source css_set is assumed to be its cgroup on the default hierarchy. |
| * |
| * This function must be called after cgroup_migrate_add_src() has been |
| * called on each migration source css_set. After migration is performed |
| * using cgroup_migrate(), cgroup_migrate_finish() must be called on |
| * @preloaded_csets. |
| */ |
| static int cgroup_migrate_prepare_dst(struct cgroup *dst_cgrp, |
| struct list_head *preloaded_csets) |
| { |
| LIST_HEAD(csets); |
| struct css_set *src_cset, *tmp_cset; |
| |
| lockdep_assert_held(&cgroup_mutex); |
| |
| /* |
| * Except for the root, child_subsys_mask must be zero for a cgroup |
| * with tasks so that child cgroups don't compete against tasks. |
| */ |
| if (dst_cgrp && cgroup_on_dfl(dst_cgrp) && cgroup_parent(dst_cgrp) && |
| dst_cgrp->child_subsys_mask) |
| return -EBUSY; |
| |
| /* look up the dst cset for each src cset and link it to src */ |
| list_for_each_entry_safe(src_cset, tmp_cset, preloaded_csets, mg_preload_node) { |
| struct css_set *dst_cset; |
| |
| dst_cset = find_css_set(src_cset, |
| dst_cgrp ?: src_cset->dfl_cgrp); |
| if (!dst_cset) |
| goto err; |
| |
| WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset); |
| |
| /* |
| * If src cset equals dst, it's noop. Drop the src. |
| * cgroup_migrate() will skip the cset too. Note that we |
| * can't handle src == dst as some nodes are used by both. |
| */ |
| if (src_cset == dst_cset) { |
| src_cset->mg_src_cgrp = NULL; |
| list_del_init(&src_cset->mg_preload_node); |
| put_css_set(src_cset); |
| put_css_set(dst_cset); |
| continue; |
| } |
| |
| src_cset->mg_dst_cset = dst_cset; |
| |
| if (list_empty(&dst_cset->mg_preload_node)) |
| list_add(&dst_cset->mg_preload_node, &csets); |
| else |
| put_css_set(dst_cset); |
| } |
| |
| list_splice_tail(&csets, preloaded_csets); |
| return 0; |
| err: |
| cgroup_migrate_finish(&csets); |
| return -ENOMEM; |
| } |
| |
| /** |
| * cgroup_migrate - migrate a process or task to a cgroup |
| * @cgrp: the destination cgroup |
| * @leader: the leader of the process or the task to migrate |
| * @threadgroup: whether @leader points to the whole process or a single task |
| * |
| * Migrate a process or task denoted by @leader to @cgrp. If migrating a |
| * process, the caller must be holding threadgroup_lock of @leader. The |
| * caller is also responsible for invoking cgroup_migrate_add_src() and |
| * cgroup_migrate_prepare_dst() on the targets before invoking this |
| * function and following up with cgroup_migrate_finish(). |
| * |
| * As long as a controller's ->can_attach() doesn't fail, this function is |
| * guaranteed to succeed. This means that, excluding ->can_attach() |
| * failure, when migrating multiple targets, the success or failure can be |
| * decided for all targets by invoking group_migrate_prepare_dst() before |
| * actually starting migrating. |
| */ |
| static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader, |
| bool threadgroup) |
| { |
| struct cgroup_taskset tset = { |
| .src_csets = LIST_HEAD_INIT(tset.src_csets), |
| .dst_csets = LIST_HEAD_INIT(tset.dst_csets), |
| .csets = &tset.src_csets, |
| }; |
| struct cgroup_subsys_state *css, *failed_css = NULL; |
| struct css_set *cset, *tmp_cset; |
| struct task_struct *task, *tmp_task; |
| int i, ret; |
| |
| /* |
| * Prevent freeing of tasks while we take a snapshot. Tasks that are |
| * already PF_EXITING could be freed from underneath us unless we |
| * take an rcu_read_lock. |
| */ |
| down_write(&css_set_rwsem); |
| rcu_read_lock(); |
| task = leader; |
| do { |
| /* @task either already exited or can't exit until the end */ |
| if (task->flags & PF_EXITING) |
| goto next; |
| |
| /* leave @task alone if post_fork() hasn't linked it yet */ |
| if (list_empty(&task->cg_list)) |
| goto next; |
| |
| cset = task_css_set(task); |
| if (!cset->mg_src_cgrp) |
| goto next; |
| |
| /* |
| * cgroup_taskset_first() must always return the leader. |
| * Take care to avoid disturbing the ordering. |
| */ |
| list_move_tail(&task->cg_list, &cset->mg_tasks); |
| if (list_empty(&cset->mg_node)) |
| list_add_tail(&cset->mg_node, &tset.src_csets); |
| if (list_empty(&cset->mg_dst_cset->mg_node)) |
| list_move_tail(&cset->mg_dst_cset->mg_node, |
| &tset.dst_csets); |
| next: |
| if (!threadgroup) |
| break; |
| } while_each_thread(leader, task); |
| rcu_read_unlock(); |
| up_write(&css_set_rwsem); |
| |
| /* methods shouldn't be called if no task is actually migrating */ |
| if (list_empty(&tset.src_csets)) |
| return 0; |
| |
| /* check that we can legitimately attach to the cgroup */ |
| for_each_e_css(css, i, cgrp) { |
| if (css->ss->can_attach) { |
| ret = css->ss->can_attach(css, &tset); |
| if (ret) { |
| failed_css = css; |
| goto out_cancel_attach; |
| } |
| } |
| } |
| |
| /* |
| * Now that we're guaranteed success, proceed to move all tasks to |
| * the new cgroup. There are no failure cases after here, so this |
| * is the commit point. |
| */ |
| down_write(&css_set_rwsem); |
| list_for_each_entry(cset, &tset.src_csets, mg_node) { |
| list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) |
| cgroup_task_migrate(cset->mg_src_cgrp, task, |
| cset->mg_dst_cset); |
| } |
| up_write(&css_set_rwsem); |
| |
| /* |
| * Migration is committed, all target tasks are now on dst_csets. |
| * Nothing is sensitive to fork() after this point. Notify |
| * controllers that migration is complete. |
| */ |
| tset.csets = &tset.dst_csets; |
| |
| for_each_e_css(css, i, cgrp) |
| if (css->ss->attach) |
| css->ss->attach(css, &tset); |
| |
| ret = 0; |
| goto out_release_tset; |
| |
| out_cancel_attach: |
| for_each_e_css(css, i, cgrp) { |
| if (css == failed_css) |
| break; |
| if (css->ss->cancel_attach) |
| css->ss->cancel_attach(css, &tset); |
| } |
| out_release_tset: |
| down_write(&css_set_rwsem); |
| list_splice_init(&tset.dst_csets, &tset.src_csets); |
| list_for_each_entry_safe(cset, tmp_cset, &tset.src_csets, mg_node) { |
| list_splice_tail_init(&cset->mg_tasks, &cset->tasks); |
| list_del_init(&cset->mg_node); |
| } |
| up_write(&css_set_rwsem); |
| return ret; |
| } |
| |
| /** |
| * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup |
| * @dst_cgrp: the cgroup to attach to |
| * @leader: the task or the leader of the threadgroup to be attached |
| * @threadgroup: attach the whole threadgroup? |
| * |
| * Call holding cgroup_mutex and threadgroup_lock of @leader. |
| */ |
| static int cgroup_attach_task(struct cgroup *dst_cgrp, |
| struct task_struct *leader, bool threadgroup) |
| { |
| LIST_HEAD(preloaded_csets); |
| struct task_struct *task; |
| int ret; |
| |
| /* look up all src csets */ |
| down_read(&css_set_rwsem); |
| rcu_read_lock(); |
| task = leader; |
| do { |
| cgroup_migrate_add_src(task_css_set(task), dst_cgrp, |
| &preloaded_csets); |
| if (!threadgroup) |
| break; |
| } while_each_thread(leader, task); |
| rcu_read_unlock(); |
| up_read(&css_set_rwsem); |
| |
| /* prepare dst csets and commit */ |
| ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets); |
| if (!ret) |
| ret = cgroup_migrate(dst_cgrp, leader, threadgroup); |
| |
| cgroup_migrate_finish(&preloaded_csets); |
| return ret; |
| } |
| |
| static int cgroup_procs_write_permission(struct task_struct *task, |
| struct cgroup *dst_cgrp, |
| struct kernfs_open_file *of) |
| { |
| const struct cred *cred = current_cred(); |
| const struct cred *tcred = get_task_cred(task); |
| int ret = 0; |
| |
| /* |
| * even if we're attaching all tasks in the thread group, we only |
| * need to check permissions on one of them. |
| */ |
| if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && |
| !uid_eq(cred->euid, tcred->uid) && |
| !uid_eq(cred->euid, tcred->suid)) |
| ret = -EACCES; |
| |
| if (!ret && cgroup_on_dfl(dst_cgrp)) { |
| struct super_block *sb = of->file->f_path.dentry->d_sb; |
| struct cgroup *cgrp; |
| struct inode *inode; |
| |
| down_read(&css_set_rwsem); |
| cgrp = task_cgroup_from_root(task, &cgrp_dfl_root); |
| up_read(&css_set_rwsem); |
| |
| while (!cgroup_is_descendant(dst_cgrp, cgrp)) |
| cgrp = cgroup_parent(cgrp); |
| |
| ret = -ENOMEM; |
| inode = kernfs_get_inode(sb, cgrp->procs_kn); |
| if (inode) { |
| ret = inode_permission(inode, MAY_WRITE); |
| iput(inode); |
| } |
| } |
| |
| put_cred(tcred); |
| return ret; |
| } |
| |
| /* |
| * Find the task_struct of the task to attach by vpid and pass it along to the |
| * function to attach either it or all tasks in its threadgroup. Will lock |
| * cgroup_mutex and threadgroup. |
| */ |
| static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf, |
| size_t nbytes, loff_t off, bool threadgroup) |
| { |
| struct task_struct *tsk; |
| struct cgroup *cgrp; |
| pid_t pid; |
| int ret; |
| |
| if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) |
| return -EINVAL; |
| |
| cgrp = cgroup_kn_lock_live(of->kn); |
| if (!cgrp) |
| return -ENODEV; |
| |
| retry_find_task: |
| rcu_read_lock(); |
| if (pid) { |
| tsk = find_task_by_vpid(pid); |
| if (!tsk) { |
| rcu_read_unlock(); |
| ret = -ESRCH; |
| goto out_unlock_cgroup; |
| } |
| } else { |
| tsk = current; |
| } |
| |
| if (threadgroup) |
| tsk = tsk->group_leader; |
| |
| /* |
| * Workqueue threads may acquire PF_NO_SETAFFINITY and become |
| * trapped in a cpuset, or RT worker may be born in a cgroup |
| * with no rt_runtime allocated. Just say no. |
| */ |
| if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) { |
| ret = -EINVAL; |
| rcu_read_unlock(); |
| goto out_unlock_cgroup; |
| } |
| |
| get_task_struct(tsk); |
| rcu_read_unlock(); |
| |
| threadgroup_lock(tsk); |
| if (threadgroup) { |
| if (!thread_group_leader(tsk)) { |
| /* |
| * a race with de_thread from another thread's exec() |
| * may strip us of our leadership, if this happens, |
| * there is no choice but to throw this task away and |
| * try again; this is |
| * "double-double-toil-and-trouble-check locking". |
| */ |
| threadgroup_unlock(tsk); |
| put_task_struct(tsk); |
| goto retry_find_task; |
| } |
| } |
| |
| ret = cgroup_procs_write_permission(tsk, cgrp, of); |
| if (!ret) |
| ret = cgroup_attach_task(cgrp, tsk, threadgroup); |
| |
| threadgroup_unlock(tsk); |
| |
| put_task_struct(tsk); |
| out_unlock_cgroup: |
| cgroup_kn_unlock(of->kn); |
| return ret ?: nbytes; |
| } |
| |
| /** |
| * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' |
| * @from: attach to all cgroups of a given task |
| * @tsk: the task to be attached |
| */ |
| int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) |
| { |
| struct cgroup_root *root; |
| int retval = 0; |
| |
| mutex_lock(&cgroup_mutex); |
| for_each_root(root) { |
| struct cgroup *from_cgrp; |
| |
| if (root == &cgrp_dfl_root) |
| continue; |
| |
| down_read(&css_set_rwsem); |
| from_cgrp = task_cgroup_from_root(from, root); |
| up_read(&css_set_rwsem); |
| |
| retval = cgroup_attach_task(from_cgrp, tsk, false); |
| if (retval) |
| break; |
| } |
| mutex_unlock(&cgroup_mutex); |
| |
| return retval; |
| } |
| EXPORT_SYMBOL_GPL(cgroup_attach_task_all); |
| |
| static ssize_t cgroup_tasks_write(struct kernfs_open_file *of, |
| char *buf, size_t nbytes, loff_t off) |
| { |
| return __cgroup_procs_write(of, buf, nbytes, off, false); |
| } |
| |
| static ssize_t cgroup_procs_write(struct kernfs_open_file *of, |
| char *buf, size_t nbytes, loff_t off) |
| { |
| return __cgroup_procs_write(of, buf, nbytes, off, true); |
| } |
| |
| static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, |
| char *buf, size_t nbytes, loff_t off) |
| { |
| struct cgroup *cgrp; |
| |
| BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); |
| |
| cgrp = cgroup_kn_lock_live(of->kn); |
| if (!cgrp) |
| return -ENODEV; |
| spin_lock(&release_agent_path_lock); |
| strlcpy(cgrp->root->release_agent_path, strstrip(buf), |
| sizeof(cgrp->root->release_agent_path)); |
| spin_unlock(&release_agent_path_lock); |
| cgroup_kn_unlock(of->kn); |
| return nbytes; |
| } |
| |
| static int cgroup_release_agent_show(struct seq_file *seq, void *v) |
| { |
| struct cgroup *cgrp = seq_css(seq)->cgroup; |
| |
| spin_lock(&release_agent_path_lock); |
| seq_puts(seq, cgrp->root->release_agent_path); |
| spin_unlock(&release_agent_path_lock); |
| seq_putc(seq, '\n'); |
| return 0; |
| } |
| |
| static int cgroup_sane_behavior_show(struct seq_file *seq, void *v) |
| { |
| seq_puts(seq, "0\n"); |
| return 0; |
| } |
| |
| static void cgroup_print_ss_mask(struct seq_file *seq, unsigned long ss_mask) |
| { |
| struct cgroup_subsys *ss; |
| bool printed = false; |
| int ssid; |
| |
| for_each_subsys_which(ss, ssid, &ss_mask) { |
| if (printed) |
| seq_putc(seq, ' '); |
| seq_printf(seq, "%s", ss->name); |
| printed = true; |
| } |
| if (printed) |
| seq_putc(seq, '\n'); |
| } |
| |
| /* show controllers which are currently attached to the default hierarchy */ |
| static int cgroup_root_controllers_show(struct seq_file *seq, void *v) |
| { |
| struct cgroup *cgrp = seq_css(seq)->cgroup; |
| |
| cgroup_print_ss_mask(seq, cgrp->root->subsys_mask & |
| ~cgrp_dfl_root_inhibit_ss_mask); |
| return 0; |
| } |
| |
| /* show controllers which are enabled from the parent */ |
| static int cgroup_controllers_show(struct seq_file *seq, void *v) |
| { |
| struct cgroup *cgrp = seq_css(seq)->cgroup; |
| |
| cgroup_print_ss_mask(seq, cgroup_parent(cgrp)->subtree_control); |
| return 0; |
| } |
| |
| /* show controllers which are enabled for a given cgroup's children */ |
| static int cgroup_subtree_control_show(struct seq_file *seq, void *v) |
| { |
| struct cgroup *cgrp = seq_css(seq)->cgroup; |
| |
| cgroup_print_ss_mask(seq, cgrp->subtree_control); |
| return 0; |
| } |
| |
| /** |
| * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy |
| * @cgrp: root of the subtree to update csses for |
| * |
| * @cgrp's child_subsys_mask has changed and its subtree's (self excluded) |
| * css associations need to be updated accordingly. This function looks up |
| * all css_sets which are attached to the subtree, creates the matching |
| * updated css_sets and migrates the tasks to the new ones. |
| */ |
| static int cgroup_update_dfl_csses(struct cgroup *cgrp) |
| { |
| LIST_HEAD(preloaded_csets); |
| struct cgroup_subsys_state *css; |
| struct css_set *src_cset; |
| int ret; |
| |
| lockdep_assert_held(&cgroup_mutex); |
| |
| /* look up all csses currently attached to @cgrp's subtree */ |
| down_read(&css_set_rwsem); |
| css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) { |
| struct cgrp_cset_link *link; |
| |
| /* self is not affected by child_subsys_mask change */ |
| if (css->cgroup == cgrp) |
| continue; |
| |
| list_for_each_entry(link, &css->cgroup->cset_links, cset_link) |
| cgroup_migrate_add_src(link->cset, cgrp, |
| &preloaded_csets); |
| } |
| up_read(&css_set_rwsem); |
| |
| /* NULL dst indicates self on default hierarchy */ |
| ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets); |
| if (ret) |
| goto out_finish; |
| |
| list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) { |
| struct task_struct *last_task = NULL, *task; |
| |
| /* src_csets precede dst_csets, break on the first dst_cset */ |
| if (!src_cset->mg_src_cgrp) |
| break; |
| |
| /* |
| * All tasks in src_cset need to be migrated to the |
| * matching dst_cset. Empty it process by process. We |
| * walk tasks but migrate processes. The leader might even |
| * belong to a different cset but such src_cset would also |
| * be among the target src_csets because the default |
| * hierarchy enforces per-process membership. |
| */ |
| while (true) { |
| down_read(&css_set_rwsem); |
| task = list_first_entry_or_null(&src_cset->tasks, |
| struct task_struct, cg_list); |
| if (task) { |
| task = task->group_leader; |
| WARN_ON_ONCE(!task_css_set(task)->mg_src_cgrp); |
| get_task_struct(task); |
| } |
| up_read(&css_set_rwsem); |
| |
| if (!task) |
| break; |
| |
| /* guard against possible infinite loop */ |
| if (WARN(last_task == task, |
| "cgroup: update_dfl_csses failed to make progress, aborting in inconsistent state\n")) |
| goto out_finish; |
| last_task = task; |
| |
| threadgroup_lock(task); |
| /* raced against de_thread() from another thread? */ |
| if (!thread_group_leader(task)) { |
| threadgroup_unlock(task); |
| put_task_struct(task); |
| continue; |
| } |
| |
| ret = cgroup_migrate(src_cset->dfl_cgrp, task, true); |
| |
| threadgroup_unlock(task); |
| put_task_struct(task); |
| |
| if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret)) |
| goto out_finish; |
| } |
| } |
| |
| out_finish: |
| cgroup_migrate_finish(&preloaded_csets); |
| return ret; |
| } |
| |
| /* change the enabled child controllers for a cgroup in the default hierarchy */ |
| static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of, |
| char *buf, size_t nbytes, |
| loff_t off) |
| { |
| unsigned long enable = 0, disable = 0; |
| unsigned long css_enable, css_disable, old_sc, new_sc, old_ss, new_ss; |
| struct cgroup *cgrp, *child; |
| struct cgroup_subsys *ss; |
| char *tok; |
| int ssid, ret; |
| |
| /* |
| * Parse input - space separated list of subsystem names prefixed |
| * with either + or -. |
| */ |
| buf = strstrip(buf); |
| while ((tok = strsep(&buf, " "))) { |
| unsigned long tmp_ss_mask = ~cgrp_dfl_root_inhibit_ss_mask; |
| |
| if (tok[0] == '\0') |
| continue; |
| for_each_subsys_which(ss, ssid, &tmp_ss_mask) { |
| if (ss->disabled || strcmp(tok + 1, ss->name)) |
| continue; |
| |
| if (*tok == '+') { |
| enable |= 1 << ssid; |
| disable &= ~(1 << ssid); |
| } else if (*tok == '-') { |
| disable |= 1 << ssid; |
| enable &= ~(1 << ssid); |
| } else { |
| return -EINVAL; |
| } |
| break; |
| } |
| if (ssid == CGROUP_SUBSYS_COUNT) |
| return -EINVAL; |
| } |
| |
| cgrp = cgroup_kn_lock_live(of->kn); |
| if (!cgrp) |
| return -ENODEV; |
| |
| for_each_subsys(ss, ssid) { |
| if (enable & (1 << ssid)) { |
| if (cgrp->subtree_control & (1 << ssid)) { |
| enable &= ~(1 << ssid); |
| continue; |
| } |
| |
| /* unavailable or not enabled on the parent? */ |
| if (!(cgrp_dfl_root.subsys_mask & (1 << ssid)) || |
| (cgroup_parent(cgrp) && |
| !(cgroup_parent(cgrp)->subtree_control & (1 << ssid)))) { |
| ret = -ENOENT; |
| goto out_unlock; |
| } |
| } else if (disable & (1 << ssid)) { |
| if (!(cgrp->subtree_control & (1 << ssid))) { |
| disable &= ~(1 << ssid); |
| continue; |
| } |
| |
| /* a child has it enabled? */ |
| cgroup_for_each_live_child(child, cgrp) { |
| if (child->subtree_control & (1 << ssid)) { |
| ret = -EBUSY; |
| goto out_unlock; |
| } |
| } |
| } |
| } |
| |
| if (!enable && !disable) { |
| ret = 0; |
| goto out_unlock; |
| } |
| |
| /* |
| * Except for the root, subtree_control must be zero for a cgroup |
| * with tasks so that child cgroups don't compete against tasks. |
| */ |
| if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) { |
| ret = -EBUSY; |
| goto out_unlock; |
| } |
| |
| /* |
| * Update subsys masks and calculate what needs to be done. More |
| * subsystems than specified may need to be enabled or disabled |
| * depending on subsystem dependencies. |
| */ |
| old_sc = cgrp->subtree_control; |
| old_ss = cgrp->child_subsys_mask; |
| new_sc = (old_sc | enable) & ~disable; |
| new_ss = cgroup_calc_child_subsys_mask(cgrp, new_sc); |
| |
| css_enable = ~old_ss & new_ss; |
| css_disable = old_ss & ~new_ss; |
| enable |= css_enable; |
| disable |= css_disable; |
| |
| /* |
| * Because css offlining is asynchronous, userland might try to |
| * re-enable the same controller while the previous instance is |
| * still around. In such cases, wait till it's gone using |
| * offline_waitq. |
| */ |
| for_each_subsys_which(ss, ssid, &css_enable) { |
| cgroup_for_each_live_child(child, cgrp) { |
| DEFINE_WAIT(wait); |
| |
| if (!cgroup_css(child, ss)) |
| continue; |
| |
| cgroup_get(child); |
| prepare_to_wait(&child->offline_waitq, &wait, |
| TASK_UNINTERRUPTIBLE); |
| cgroup_kn_unlock(of->kn); |
| schedule(); |
| finish_wait(&child->offline_waitq, &wait); |
| cgroup_put(child); |
| |
| return restart_syscall(); |
| } |
| } |
| |
| cgrp->subtree_control = new_sc; |
| cgrp->child_subsys_mask = new_ss; |
| |
| /* |
| * Create new csses or make the existing ones visible. A css is |
| * created invisible if it's being implicitly enabled through |
| * dependency. An invisible css is made visible when the userland |
| * explicitly enables it. |
| */ |
| for_each_subsys(ss, ssid) { |
| if (!(enable & (1 << ssid))) |
| continue; |
| |
| cgroup_for_each_live_child(child, cgrp) { |
| if (css_enable & (1 << ssid)) |
| ret = create_css(child, ss, |
| cgrp->subtree_control & (1 << ssid)); |
| else |
| ret = cgroup_populate_dir(child, 1 << ssid); |
| if (ret) |
| goto err_undo_css; |
| } |
| } |
| |
| /* |
| * At this point, cgroup_e_css() results reflect the new csses |
| * making the following cgroup_update_dfl_csses() properly update |
| * css associations of all tasks in the subtree. |
| */ |
| ret = cgroup_update_dfl_csses(cgrp); |
| if (ret) |
| goto err_undo_css; |
| |
| /* |
| * All tasks are migrated out of disabled csses. Kill or hide |
| * them. A css is hidden when the userland requests it to be |
| * disabled while other subsystems are still depending on it. The |
| * css must not actively control resources and be in the vanilla |
| * state if it's made visible again later. Controllers which may |
| * be depended upon should provide ->css_reset() for this purpose. |
| */ |
| for_each_subsys(ss, ssid) { |
| if (!(disable & (1 << ssid))) |
| continue; |
| |
| cgroup_for_each_live_child(child, cgrp) { |
| struct cgroup_subsys_state *css = cgroup_css(child, ss); |
| |
| if (css_disable & (1 << ssid)) { |
| kill_css(css); |
| } else { |
| cgroup_clear_dir(child, 1 << ssid); |
| if (ss->css_reset) |
| ss->css_reset(css); |
| } |
| } |
| } |
| |
| /* |
| * The effective csses of all the descendants (excluding @cgrp) may |
| * have changed. Subsystems can optionally subscribe to this event |
| * by implementing ->css_e_css_changed() which is invoked if any of |
| * the effective csses seen from the css's cgroup may have changed. |
| */ |
| for_each_subsys(ss, ssid) { |
| struct cgroup_subsys_state *this_css = cgroup_css(cgrp, ss); |
| struct cgroup_subsys_state *css; |
| |
| if (!ss->css_e_css_changed || !this_css) |
| continue; |
| |
| css_for_each_descendant_pre(css, this_css) |
| if (css != this_css) |
| ss->css_e_css_changed(css); |
| } |
| |
| kernfs_activate(cgrp->kn); |
| ret = 0; |
| out_unlock: |
| cgroup_kn_unlock(of->kn); |
| return ret ?: nbytes; |
| |
| err_undo_css: |
| cgrp->subtree_control = old_sc; |
| cgrp->child_subsys_mask = old_ss; |
| |
| for_each_subsys(ss, ssid) { |
| if (!(enable & (1 << ssid))) |
| continue; |
| |
| cgroup_for_each_live_child(child, cgrp) { |
| struct cgroup_subsys_state *css = cgroup_css(child, ss); |
| |
| if (!css) |
| continue; |
| |
| if (css_enable & (1 << ssid)) |
| kill_css(css); |
| else |
| cgroup_clear_dir(child, 1 << ssid); |
| } |
| } |
| goto out_unlock; |
| } |
| |
| static int cgroup_populated_show(struct seq_file *seq, void *v) |
| { |
| seq_printf(seq, "%d\n", (bool)seq_css(seq)->cgroup->populated_cnt); |
| return 0; |
| } |
| |
| static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf, |
| size_t nbytes, loff_t off) |
| { |
| struct cgroup *cgrp = of->kn->parent->priv; |
| struct cftype *cft = of->kn->priv; |
| struct cgroup_subsys_state *css; |
| int ret; |
| |
| if (cft->write) |
| return cft->write(of, buf, nbytes, off); |
| |
| /* |
| * kernfs guarantees that a file isn't deleted with operations in |
| * flight, which means that the matching css is and stays alive and |
| * doesn't need to be pinned. The RCU locking is not necessary |
| * either. It's just for the convenience of using cgroup_css(). |
| */ |
| rcu_read_lock(); |
| css = cgroup_css(cgrp, cft->ss); |
| rcu_read_unlock(); |
| |
| if (cft->write_u64) { |
| unsigned long long v; |
| ret = kstrtoull(buf, 0, &v); |
| if (!ret) |
| ret = cft->write_u64(css, cft, v); |
| } else if (cft->write_s64) { |
| long long v; |
| ret = kstrtoll(buf, 0, &v); |
| if (!ret) |
| ret = cft->write_s64(css, cft, v); |
| } else { |
| ret = -EINVAL; |
| } |
| |
| return ret ?: nbytes; |
| } |
| |
| static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos) |
| { |
| return seq_cft(seq)->seq_start(seq, ppos); |
| } |
| |
| static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos) |
| { |
| return seq_cft(seq)->seq_next(seq, v, ppos); |
| } |
| |
| static void cgroup_seqfile_stop(struct seq_file *seq, void *v) |
| { |
| seq_cft(seq)->seq_stop(seq, v); |
| } |
| |
| static int cgroup_seqfile_show(struct seq_file *m, void *arg) |
| { |
| struct cftype *cft = seq_cft(m); |
| struct cgroup_subsys_state *css = seq_css(m); |
| |
| if (cft->seq_show) |
| return cft->seq_show(m, arg); |
| |
| if (cft->read_u64) |
| seq_printf(m, "%llu\n", cft->read_u64(css, cft)); |
| else if (cft->read_s64) |
| seq_printf(m, "%lld\n", cft->read_s64(css, cft)); |
| else |
| return -EINVAL; |
| return 0; |
| } |
| |
| static struct kernfs_ops cgroup_kf_single_ops = { |
| .atomic_write_len = PAGE_SIZE, |
| .write = cgroup_file_write, |
| .seq_show = cgroup_seqfile_show, |
| }; |
| |
| static struct kernfs_ops cgroup_kf_ops = { |
| .atomic_write_len = PAGE_SIZE, |
| .write = cgroup_file_write, |
| .seq_start = cgroup_seqfile_start, |
| .seq_next = cgroup_seqfile_next, |
| .seq_stop = cgroup_seqfile_stop, |
| .seq_show = cgroup_seqfile_show, |
| }; |
| |
| /* |
| * cgroup_rename - Only allow simple rename of directories in place. |
| */ |
| static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent, |
| const char *new_name_str) |
| { |
| struct cgroup *cgrp = kn->priv; |
| int ret; |
| |
| if (kernfs_type(kn) != KERNFS_DIR) |
| return -ENOTDIR; |
| if (kn->parent != new_parent) |
| return -EIO; |
| |
| /* |
| * This isn't a proper migration and its usefulness is very |
| * limited. Disallow on the default hierarchy. |
| */ |
| if (cgroup_on_dfl(cgrp)) |
| return -EPERM; |
| |
| /* |
| * We're gonna grab cgroup_mutex which nests outside kernfs |
| * active_ref. kernfs_rename() doesn't require active_ref |
| * protection. Break them before grabbing cgroup_mutex. |
| */ |
| kernfs_break_active_protection(new_parent); |
| kernfs_break_active_protection(kn); |
| |
| mutex_lock(&cgroup_mutex); |
| |
| ret = kernfs_rename(kn, new_parent, new_name_str); |
| |
| mutex_unlock(&cgroup_mutex); |
| |
| kernfs_unbreak_active_protection(kn); |
| kernfs_unbreak_active_protection(new_parent); |
| return ret; |
| } |
| |
| /* set uid and gid of cgroup dirs and files to that of the creator */ |
| static int cgroup_kn_set_ugid(struct kernfs_node *kn) |
| { |
| struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, |
| .ia_uid = current_fsuid(), |
| .ia_gid = current_fsgid(), }; |
| |
| if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && |
| gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) |
| return 0; |
| |
| return kernfs_setattr(kn, &iattr); |
| } |
| |
| static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft) |
| { |
| char name[CGROUP_FILE_NAME_MAX]; |
| struct kernfs_node *kn; |
| struct lock_class_key *key = NULL; |
| int ret; |
| |
| #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| key = &cft->lockdep_key; |
| #endif |
| kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name), |
| cgroup_file_mode(cft), 0, cft->kf_ops, cft, |
| NULL, key); |
| if (IS_ERR(kn)) |
| return PTR_ERR(kn); |
| |
| ret = cgroup_kn_set_ugid(kn); |
| if (ret) { |
| kernfs_remove(kn); |
| return ret; |
| } |
| |
| if (cft->write == cgroup_procs_write) |
| cgrp->procs_kn = kn; |
| else if (cft->seq_show == cgroup_populated_show) |
| cgrp->populated_kn = kn; |
| return 0; |
| } |
| |
| /** |
| * cgroup_addrm_files - add or remove files to a cgroup directory |
| * @cgrp: the target cgroup |
| * @cfts: array of cftypes to be added |
| * @is_add: whether to add or remove |
| * |
| * Depending on @is_add, add or remove files defined by @cfts on @cgrp. |
| * For removals, this function never fails. If addition fails, this |
| * function doesn't remove files already added. The caller is responsible |
| * for cleaning up. |
| */ |
| static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], |
| bool is_add) |
| { |
| struct cftype *cft; |
| int ret; |
| |
| lockdep_assert_held(&cgroup_mutex); |
| |
| for (cft = cfts; cft->name[0] != '\0'; cft++) { |
| /* does cft->flags tell us to skip this file on @cgrp? */ |
| if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp)) |
| continue; |
| if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp)) |
| continue; |
| if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp)) |
| continue; |
| if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp)) |
| continue; |
| |
| if (is_add) { |
| ret = cgroup_add_file(cgrp, cft); |
| if (ret) { |
| pr_warn("%s: failed to add %s, err=%d\n", |
| __func__, cft->name, ret); |
| return ret; |
| } |
| } else { |
| cgroup_rm_file(cgrp, cft); |
| } |
| } |
| return 0; |
| } |
| |
| static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add) |
| { |
| LIST_HEAD(pending); |
| struct cgroup_subsys *ss = cfts[0].ss; |
| struct cgroup *root = &ss->root->cgrp; |
| struct cgroup_subsys_state *css; |
| int ret = 0; |
| |
| lockdep_assert_held(&cgroup_mutex); |
| |
| /* add/rm files for all cgroups created before */ |
| css_for_each_descendant_pre(css, cgroup_css(root, ss)) { |
| struct cgroup *cgrp = css->cgroup; |
| |
| if (cgroup_is_dead(cgrp)) |
| continue; |
| |
| ret = cgroup_addrm_files(cgrp, cfts, is_add); |
| if (ret) |
| break; |
| } |
| |
| if (is_add && !ret) |
| kernfs_activate(root->kn); |
| return ret; |
| } |
| |
| static void cgroup_exit_cftypes(struct cftype *cfts) |
| { |
| struct cftype *cft; |
| |
| for (cft = cfts; cft->name[0] != '\0'; cft++) { |
| /* free copy for custom atomic_write_len, see init_cftypes() */ |
| if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) |
| kfree(cft->kf_ops); |
| cft->kf_ops = NULL; |
| cft->ss = NULL; |
| |
| /* revert flags set by cgroup core while adding @cfts */ |
| cft->flags &= |