Merge branch 'linux-tegra-2.6.36' into android-tegra-2.6.36
[firefly-linux-kernel-4.4.55.git] / kernel / cgroup_freezer.c
1 /*
2  * cgroup_freezer.c -  control group freezer subsystem
3  *
4  * Copyright IBM Corporation, 2007
5  *
6  * Author : Cedric Le Goater <clg@fr.ibm.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of version 2.1 of the GNU Lesser General Public License
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it would be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15  */
16
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/cgroup.h>
20 #include <linux/fs.h>
21 #include <linux/uaccess.h>
22 #include <linux/freezer.h>
23 #include <linux/seq_file.h>
24
25 enum freezer_state {
26         CGROUP_THAWED = 0,
27         CGROUP_FREEZING,
28         CGROUP_FROZEN,
29 };
30
31 struct freezer {
32         struct cgroup_subsys_state css;
33         enum freezer_state state;
34         spinlock_t lock; /* protects _writes_ to state */
35 };
36
37 static inline struct freezer *cgroup_freezer(
38                 struct cgroup *cgroup)
39 {
40         return container_of(
41                 cgroup_subsys_state(cgroup, freezer_subsys_id),
42                 struct freezer, css);
43 }
44
45 static inline struct freezer *task_freezer(struct task_struct *task)
46 {
47         return container_of(task_subsys_state(task, freezer_subsys_id),
48                             struct freezer, css);
49 }
50
51 int cgroup_freezing_or_frozen(struct task_struct *task)
52 {
53         struct freezer *freezer;
54         enum freezer_state state;
55
56         task_lock(task);
57         freezer = task_freezer(task);
58         if (!freezer->css.cgroup->parent)
59                 state = CGROUP_THAWED; /* root cgroup can't be frozen */
60         else
61                 state = freezer->state;
62         task_unlock(task);
63
64         return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
65 }
66
67 /*
68  * cgroups_write_string() limits the size of freezer state strings to
69  * CGROUP_LOCAL_BUFFER_SIZE
70  */
71 static const char *freezer_state_strs[] = {
72         "THAWED",
73         "FREEZING",
74         "FROZEN",
75 };
76
77 /*
78  * State diagram
79  * Transitions are caused by userspace writes to the freezer.state file.
80  * The values in parenthesis are state labels. The rest are edge labels.
81  *
82  * (THAWED) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
83  *    ^ ^                    |                     |
84  *    | \_______THAWED_______/                     |
85  *    \__________________________THAWED____________/
86  */
87
88 struct cgroup_subsys freezer_subsys;
89
90 /* Locks taken and their ordering
91  * ------------------------------
92  * cgroup_mutex (AKA cgroup_lock)
93  * freezer->lock
94  * css_set_lock
95  * task->alloc_lock (AKA task_lock)
96  * task->sighand->siglock
97  *
98  * cgroup code forces css_set_lock to be taken before task->alloc_lock
99  *
100  * freezer_create(), freezer_destroy():
101  * cgroup_mutex [ by cgroup core ]
102  *
103  * freezer_can_attach():
104  * cgroup_mutex (held by caller of can_attach)
105  *
106  * cgroup_freezing_or_frozen():
107  * task->alloc_lock (to get task's cgroup)
108  *
109  * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
110  * freezer->lock
111  *  sighand->siglock (if the cgroup is freezing)
112  *
113  * freezer_read():
114  * cgroup_mutex
115  *  freezer->lock
116  *   write_lock css_set_lock (cgroup iterator start)
117  *    task->alloc_lock
118  *   read_lock css_set_lock (cgroup iterator start)
119  *
120  * freezer_write() (freeze):
121  * cgroup_mutex
122  *  freezer->lock
123  *   write_lock css_set_lock (cgroup iterator start)
124  *    task->alloc_lock
125  *   read_lock css_set_lock (cgroup iterator start)
126  *    sighand->siglock (fake signal delivery inside freeze_task())
127  *
128  * freezer_write() (unfreeze):
129  * cgroup_mutex
130  *  freezer->lock
131  *   write_lock css_set_lock (cgroup iterator start)
132  *    task->alloc_lock
133  *   read_lock css_set_lock (cgroup iterator start)
134  *    task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
135  *     sighand->siglock
136  */
137 static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
138                                                   struct cgroup *cgroup)
139 {
140         struct freezer *freezer;
141
142         freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
143         if (!freezer)
144                 return ERR_PTR(-ENOMEM);
145
146         spin_lock_init(&freezer->lock);
147         freezer->state = CGROUP_THAWED;
148         return &freezer->css;
149 }
150
151 static void freezer_destroy(struct cgroup_subsys *ss,
152                             struct cgroup *cgroup)
153 {
154         kfree(cgroup_freezer(cgroup));
155 }
156
157 /* Task is frozen or will freeze immediately when next it gets woken */
158 static bool is_task_frozen_enough(struct task_struct *task)
159 {
160         return frozen(task) ||
161                 (task_is_stopped_or_traced(task) && freezing(task));
162 }
163
164 /*
165  * The call to cgroup_lock() in the freezer.state write method prevents
166  * a write to that file racing against an attach, and hence the
167  * can_attach() result will remain valid until the attach completes.
168  */
169 static int freezer_can_attach(struct cgroup_subsys *ss,
170                               struct cgroup *new_cgroup,
171                               struct task_struct *task, bool threadgroup)
172 {
173         struct freezer *freezer;
174
175         if ((current != task) && (!capable(CAP_SYS_ADMIN))) {
176                 const struct cred *cred = current_cred(), *tcred;
177
178                 tcred = __task_cred(task);
179                 if (cred->euid != tcred->uid && cred->euid != tcred->suid)
180                         return -EPERM;
181         }
182
183         /*
184          * Anything frozen can't move or be moved to/from.
185          *
186          * Since orig_freezer->state == FROZEN means that @task has been
187          * frozen, so it's sufficient to check the latter condition.
188          */
189
190         if (is_task_frozen_enough(task))
191                 return -EBUSY;
192
193         freezer = cgroup_freezer(new_cgroup);
194         if (freezer->state == CGROUP_FROZEN)
195                 return -EBUSY;
196
197         if (threadgroup) {
198                 struct task_struct *c;
199
200                 rcu_read_lock();
201                 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
202                         if (is_task_frozen_enough(c)) {
203                                 rcu_read_unlock();
204                                 return -EBUSY;
205                         }
206                 }
207                 rcu_read_unlock();
208         }
209
210         return 0;
211 }
212
213 static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
214 {
215         struct freezer *freezer;
216
217         /*
218          * No lock is needed, since the task isn't on tasklist yet,
219          * so it can't be moved to another cgroup, which means the
220          * freezer won't be removed and will be valid during this
221          * function call.  Nevertheless, apply RCU read-side critical
222          * section to suppress RCU lockdep false positives.
223          */
224         rcu_read_lock();
225         freezer = task_freezer(task);
226         rcu_read_unlock();
227
228         /*
229          * The root cgroup is non-freezable, so we can skip the
230          * following check.
231          */
232         if (!freezer->css.cgroup->parent)
233                 return;
234
235         spin_lock_irq(&freezer->lock);
236         BUG_ON(freezer->state == CGROUP_FROZEN);
237
238         /* Locking avoids race with FREEZING -> THAWED transitions. */
239         if (freezer->state == CGROUP_FREEZING)
240                 freeze_task(task, true);
241         spin_unlock_irq(&freezer->lock);
242 }
243
244 /*
245  * caller must hold freezer->lock
246  */
247 static void update_freezer_state(struct cgroup *cgroup,
248                                  struct freezer *freezer)
249 {
250         struct cgroup_iter it;
251         struct task_struct *task;
252         unsigned int nfrozen = 0, ntotal = 0;
253
254         cgroup_iter_start(cgroup, &it);
255         while ((task = cgroup_iter_next(cgroup, &it))) {
256                 ntotal++;
257                 if (is_task_frozen_enough(task))
258                         nfrozen++;
259         }
260
261         /*
262          * Transition to FROZEN when no new tasks can be added ensures
263          * that we never exist in the FROZEN state while there are unfrozen
264          * tasks.
265          */
266         if (nfrozen == ntotal)
267                 freezer->state = CGROUP_FROZEN;
268         else if (nfrozen > 0)
269                 freezer->state = CGROUP_FREEZING;
270         else
271                 freezer->state = CGROUP_THAWED;
272         cgroup_iter_end(cgroup, &it);
273 }
274
275 static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
276                         struct seq_file *m)
277 {
278         struct freezer *freezer;
279         enum freezer_state state;
280
281         if (!cgroup_lock_live_group(cgroup))
282                 return -ENODEV;
283
284         freezer = cgroup_freezer(cgroup);
285         spin_lock_irq(&freezer->lock);
286         state = freezer->state;
287         if (state == CGROUP_FREEZING) {
288                 /* We change from FREEZING to FROZEN lazily if the cgroup was
289                  * only partially frozen when we exitted write. */
290                 update_freezer_state(cgroup, freezer);
291                 state = freezer->state;
292         }
293         spin_unlock_irq(&freezer->lock);
294         cgroup_unlock();
295
296         seq_puts(m, freezer_state_strs[state]);
297         seq_putc(m, '\n');
298         return 0;
299 }
300
301 static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
302 {
303         struct cgroup_iter it;
304         struct task_struct *task;
305         unsigned int num_cant_freeze_now = 0;
306
307         freezer->state = CGROUP_FREEZING;
308         cgroup_iter_start(cgroup, &it);
309         while ((task = cgroup_iter_next(cgroup, &it))) {
310                 if (!freeze_task(task, true))
311                         continue;
312                 if (is_task_frozen_enough(task))
313                         continue;
314                 if (!freezing(task) && !freezer_should_skip(task))
315                         num_cant_freeze_now++;
316         }
317         cgroup_iter_end(cgroup, &it);
318
319         return num_cant_freeze_now ? -EBUSY : 0;
320 }
321
322 static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
323 {
324         struct cgroup_iter it;
325         struct task_struct *task;
326
327         cgroup_iter_start(cgroup, &it);
328         while ((task = cgroup_iter_next(cgroup, &it))) {
329                 thaw_process(task);
330         }
331         cgroup_iter_end(cgroup, &it);
332
333         freezer->state = CGROUP_THAWED;
334 }
335
336 static int freezer_change_state(struct cgroup *cgroup,
337                                 enum freezer_state goal_state)
338 {
339         struct freezer *freezer;
340         int retval = 0;
341
342         freezer = cgroup_freezer(cgroup);
343
344         spin_lock_irq(&freezer->lock);
345
346         update_freezer_state(cgroup, freezer);
347         if (goal_state == freezer->state)
348                 goto out;
349
350         switch (goal_state) {
351         case CGROUP_THAWED:
352                 unfreeze_cgroup(cgroup, freezer);
353                 break;
354         case CGROUP_FROZEN:
355                 retval = try_to_freeze_cgroup(cgroup, freezer);
356                 break;
357         default:
358                 BUG();
359         }
360 out:
361         spin_unlock_irq(&freezer->lock);
362
363         return retval;
364 }
365
366 static int freezer_write(struct cgroup *cgroup,
367                          struct cftype *cft,
368                          const char *buffer)
369 {
370         int retval;
371         enum freezer_state goal_state;
372
373         if (strcmp(buffer, freezer_state_strs[CGROUP_THAWED]) == 0)
374                 goal_state = CGROUP_THAWED;
375         else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0)
376                 goal_state = CGROUP_FROZEN;
377         else
378                 return -EINVAL;
379
380         if (!cgroup_lock_live_group(cgroup))
381                 return -ENODEV;
382         retval = freezer_change_state(cgroup, goal_state);
383         cgroup_unlock();
384         return retval;
385 }
386
387 static struct cftype files[] = {
388         {
389                 .name = "state",
390                 .read_seq_string = freezer_read,
391                 .write_string = freezer_write,
392         },
393 };
394
395 static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup)
396 {
397         if (!cgroup->parent)
398                 return 0;
399         return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files));
400 }
401
402 struct cgroup_subsys freezer_subsys = {
403         .name           = "freezer",
404         .create         = freezer_create,
405         .destroy        = freezer_destroy,
406         .populate       = freezer_populate,
407         .subsys_id      = freezer_subsys_id,
408         .can_attach     = freezer_can_attach,
409         .attach         = NULL,
410         .fork           = freezer_fork,
411         .exit           = NULL,
412 };