2 * cgroup_freezer.c - control group freezer subsystem
4 * Copyright IBM Corporation, 2007
6 * Author : Cedric Le Goater <clg@fr.ibm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2.1 of the GNU Lesser General Public License
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it would be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/cgroup.h>
21 #include <linux/uaccess.h>
22 #include <linux/freezer.h>
23 #include <linux/seq_file.h>
32 struct cgroup_subsys_state css;
33 enum freezer_state state;
34 spinlock_t lock; /* protects _writes_ to state */
37 static inline struct freezer *cgroup_freezer(
38 struct cgroup *cgroup)
41 cgroup_subsys_state(cgroup, freezer_subsys_id),
45 static inline struct freezer *task_freezer(struct task_struct *task)
47 return container_of(task_subsys_state(task, freezer_subsys_id),
51 int cgroup_freezing_or_frozen(struct task_struct *task)
53 struct freezer *freezer;
54 enum freezer_state state;
57 freezer = task_freezer(task);
58 if (!freezer->css.cgroup->parent)
59 state = CGROUP_THAWED; /* root cgroup can't be frozen */
61 state = freezer->state;
64 return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
68 * cgroups_write_string() limits the size of freezer state strings to
69 * CGROUP_LOCAL_BUFFER_SIZE
71 static const char *freezer_state_strs[] = {
79 * Transitions are caused by userspace writes to the freezer.state file.
80 * The values in parenthesis are state labels. The rest are edge labels.
82 * (THAWED) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
84 * | \_______THAWED_______/ |
85 * \__________________________THAWED____________/
88 struct cgroup_subsys freezer_subsys;
90 /* Locks taken and their ordering
91 * ------------------------------
92 * cgroup_mutex (AKA cgroup_lock)
95 * task->alloc_lock (AKA task_lock)
96 * task->sighand->siglock
98 * cgroup code forces css_set_lock to be taken before task->alloc_lock
100 * freezer_create(), freezer_destroy():
101 * cgroup_mutex [ by cgroup core ]
103 * freezer_can_attach():
104 * cgroup_mutex (held by caller of can_attach)
106 * cgroup_freezing_or_frozen():
107 * task->alloc_lock (to get task's cgroup)
109 * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
111 * sighand->siglock (if the cgroup is freezing)
116 * write_lock css_set_lock (cgroup iterator start)
118 * read_lock css_set_lock (cgroup iterator start)
120 * freezer_write() (freeze):
123 * write_lock css_set_lock (cgroup iterator start)
125 * read_lock css_set_lock (cgroup iterator start)
126 * sighand->siglock (fake signal delivery inside freeze_task())
128 * freezer_write() (unfreeze):
131 * write_lock css_set_lock (cgroup iterator start)
133 * read_lock css_set_lock (cgroup iterator start)
134 * task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
137 static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
138 struct cgroup *cgroup)
140 struct freezer *freezer;
142 freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
144 return ERR_PTR(-ENOMEM);
146 spin_lock_init(&freezer->lock);
147 freezer->state = CGROUP_THAWED;
148 return &freezer->css;
151 static void freezer_destroy(struct cgroup_subsys *ss,
152 struct cgroup *cgroup)
154 kfree(cgroup_freezer(cgroup));
157 /* Task is frozen or will freeze immediately when next it gets woken */
158 static bool is_task_frozen_enough(struct task_struct *task)
160 return frozen(task) ||
161 (task_is_stopped_or_traced(task) && freezing(task));
165 * The call to cgroup_lock() in the freezer.state write method prevents
166 * a write to that file racing against an attach, and hence the
167 * can_attach() result will remain valid until the attach completes.
169 static int freezer_can_attach(struct cgroup_subsys *ss,
170 struct cgroup *new_cgroup,
171 struct task_struct *task, bool threadgroup)
173 struct freezer *freezer;
175 if ((current != task) && (!capable(CAP_SYS_ADMIN))) {
176 const struct cred *cred = current_cred(), *tcred;
178 tcred = __task_cred(task);
179 if (cred->euid != tcred->uid && cred->euid != tcred->suid)
184 * Anything frozen can't move or be moved to/from.
186 * Since orig_freezer->state == FROZEN means that @task has been
187 * frozen, so it's sufficient to check the latter condition.
190 if (is_task_frozen_enough(task))
193 freezer = cgroup_freezer(new_cgroup);
194 if (freezer->state == CGROUP_FROZEN)
198 struct task_struct *c;
201 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
202 if (is_task_frozen_enough(c)) {
213 static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
215 struct freezer *freezer;
218 * No lock is needed, since the task isn't on tasklist yet,
219 * so it can't be moved to another cgroup, which means the
220 * freezer won't be removed and will be valid during this
221 * function call. Nevertheless, apply RCU read-side critical
222 * section to suppress RCU lockdep false positives.
225 freezer = task_freezer(task);
229 * The root cgroup is non-freezable, so we can skip the
232 if (!freezer->css.cgroup->parent)
235 spin_lock_irq(&freezer->lock);
236 BUG_ON(freezer->state == CGROUP_FROZEN);
238 /* Locking avoids race with FREEZING -> THAWED transitions. */
239 if (freezer->state == CGROUP_FREEZING)
240 freeze_task(task, true);
241 spin_unlock_irq(&freezer->lock);
245 * caller must hold freezer->lock
247 static void update_freezer_state(struct cgroup *cgroup,
248 struct freezer *freezer)
250 struct cgroup_iter it;
251 struct task_struct *task;
252 unsigned int nfrozen = 0, ntotal = 0;
254 cgroup_iter_start(cgroup, &it);
255 while ((task = cgroup_iter_next(cgroup, &it))) {
257 if (is_task_frozen_enough(task))
262 * Transition to FROZEN when no new tasks can be added ensures
263 * that we never exist in the FROZEN state while there are unfrozen
266 if (nfrozen == ntotal)
267 freezer->state = CGROUP_FROZEN;
268 else if (nfrozen > 0)
269 freezer->state = CGROUP_FREEZING;
271 freezer->state = CGROUP_THAWED;
272 cgroup_iter_end(cgroup, &it);
275 static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
278 struct freezer *freezer;
279 enum freezer_state state;
281 if (!cgroup_lock_live_group(cgroup))
284 freezer = cgroup_freezer(cgroup);
285 spin_lock_irq(&freezer->lock);
286 state = freezer->state;
287 if (state == CGROUP_FREEZING) {
288 /* We change from FREEZING to FROZEN lazily if the cgroup was
289 * only partially frozen when we exitted write. */
290 update_freezer_state(cgroup, freezer);
291 state = freezer->state;
293 spin_unlock_irq(&freezer->lock);
296 seq_puts(m, freezer_state_strs[state]);
301 static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
303 struct cgroup_iter it;
304 struct task_struct *task;
305 unsigned int num_cant_freeze_now = 0;
307 freezer->state = CGROUP_FREEZING;
308 cgroup_iter_start(cgroup, &it);
309 while ((task = cgroup_iter_next(cgroup, &it))) {
310 if (!freeze_task(task, true))
312 if (is_task_frozen_enough(task))
314 if (!freezing(task) && !freezer_should_skip(task))
315 num_cant_freeze_now++;
317 cgroup_iter_end(cgroup, &it);
319 return num_cant_freeze_now ? -EBUSY : 0;
322 static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
324 struct cgroup_iter it;
325 struct task_struct *task;
327 cgroup_iter_start(cgroup, &it);
328 while ((task = cgroup_iter_next(cgroup, &it))) {
331 cgroup_iter_end(cgroup, &it);
333 freezer->state = CGROUP_THAWED;
336 static int freezer_change_state(struct cgroup *cgroup,
337 enum freezer_state goal_state)
339 struct freezer *freezer;
342 freezer = cgroup_freezer(cgroup);
344 spin_lock_irq(&freezer->lock);
346 update_freezer_state(cgroup, freezer);
347 if (goal_state == freezer->state)
350 switch (goal_state) {
352 unfreeze_cgroup(cgroup, freezer);
355 retval = try_to_freeze_cgroup(cgroup, freezer);
361 spin_unlock_irq(&freezer->lock);
366 static int freezer_write(struct cgroup *cgroup,
371 enum freezer_state goal_state;
373 if (strcmp(buffer, freezer_state_strs[CGROUP_THAWED]) == 0)
374 goal_state = CGROUP_THAWED;
375 else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0)
376 goal_state = CGROUP_FROZEN;
380 if (!cgroup_lock_live_group(cgroup))
382 retval = freezer_change_state(cgroup, goal_state);
387 static struct cftype files[] = {
390 .read_seq_string = freezer_read,
391 .write_string = freezer_write,
395 static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup)
399 return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files));
402 struct cgroup_subsys freezer_subsys = {
404 .create = freezer_create,
405 .destroy = freezer_destroy,
406 .populate = freezer_populate,
407 .subsys_id = freezer_subsys_id,
408 .can_attach = freezer_can_attach,
410 .fork = freezer_fork,