hugetlb/cgroup: add support for cgroup removal
[firefly-linux-kernel-4.4.55.git] / mm / hugetlb_cgroup.c
index 63e04cfa437dc1e754f324a363e2bdcd98c41142..bc518bedea98e185cf6082c154357e546fd2d4be 100644 (file)
@@ -105,10 +105,76 @@ static void hugetlb_cgroup_destroy(struct cgroup *cgroup)
        kfree(h_cgroup);
 }
 
+
+/*
+ * Should be called with hugetlb_lock held.
+ * Since we are holding hugetlb_lock, pages cannot get moved from
+ * active list or uncharged from the cgroup, So no need to get
+ * page reference and test for page active here. This function
+ * cannot fail.
+ */
+static void hugetlb_cgroup_move_parent(int idx, struct cgroup *cgroup,
+                                      struct page *page)
+{
+       int csize;
+       struct res_counter *counter;
+       struct res_counter *fail_res;
+       struct hugetlb_cgroup *page_hcg;
+       struct hugetlb_cgroup *h_cg   = hugetlb_cgroup_from_cgroup(cgroup);
+       struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(cgroup);
+
+       page_hcg = hugetlb_cgroup_from_page(page);
+       /*
+        * We can have pages in active list without any cgroup
+        * ie, hugepage with less than 3 pages. We can safely
+        * ignore those pages.
+        */
+       if (!page_hcg || page_hcg != h_cg)
+               goto out;
+
+       csize = PAGE_SIZE << compound_order(page);
+       if (!parent) {
+               parent = root_h_cgroup;
+               /* root has no limit */
+               res_counter_charge_nofail(&parent->hugepage[idx],
+                                         csize, &fail_res);
+       }
+       counter = &h_cg->hugepage[idx];
+       res_counter_uncharge_until(counter, counter->parent, csize);
+
+       set_hugetlb_cgroup(page, parent);
+out:
+       return;
+}
+
+/*
+ * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
+ * the parent cgroup.
+ */
 static int hugetlb_cgroup_pre_destroy(struct cgroup *cgroup)
 {
-       /* We will add the cgroup removal support in later patches */
-          return -EBUSY;
+       struct hstate *h;
+       struct page *page;
+       int ret = 0, idx = 0;
+
+       do {
+               if (cgroup_task_count(cgroup) ||
+                   !list_empty(&cgroup->children)) {
+                       ret = -EBUSY;
+                       goto out;
+               }
+               for_each_hstate(h) {
+                       spin_lock(&hugetlb_lock);
+                       list_for_each_entry(page, &h->hugepage_activelist, lru)
+                               hugetlb_cgroup_move_parent(idx, cgroup, page);
+
+                       spin_unlock(&hugetlb_lock);
+                       idx++;
+               }
+               cond_resched();
+       } while (hugetlb_cgroup_have_usage(cgroup));
+out:
+       return ret;
 }
 
 int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,