shm: allow exit_shm in parallel if only marking orphans
[firefly-linux-kernel-4.4.55.git] / ipc / shm.c
index 1fc3a61b443b237b28bab792ab1aefcf398dbf70..7fc9f9f3a26b874c7fa558d22618273e9eb357d3 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -268,32 +268,6 @@ static void shm_close(struct vm_area_struct *vma)
        up_write(&shm_ids(ns).rwsem);
 }
 
-/* Called with ns->shm_ids(ns).rwsem locked */
-static void shm_mark_orphan(struct shmid_kernel *shp, struct ipc_namespace *ns)
-{
-       if (WARN_ON(shp->shm_creator != current)) /* Remove me when it works */
-               return;
-
-       /*
-        * Mark it as orphaned to destroy the segment when
-        * kernel.shm_rmid_forced is changed.
-        * It is noop if the following shm_may_destroy() returns true.
-        */
-       shp->shm_creator = NULL;
-
-       /*
-        * Don't even try to destroy it.  If shm_rmid_forced=0 and IPC_RMID
-        * is not set, it shouldn't be deleted here.
-        */
-       if (!ns->shm_rmid_forced)
-               return;
-
-       if (shm_may_destroy(ns, shp)) {
-               shm_lock_by_ptr(shp);
-               shm_destroy(ns, shp);
-       }
-}
-
 /* Called with ns->shm_ids(ns).rwsem locked */
 static int shm_try_destroy_orphaned(int id, void *p, void *data)
 {
@@ -325,20 +299,49 @@ void shm_destroy_orphaned(struct ipc_namespace *ns)
        up_write(&shm_ids(ns).rwsem);
 }
 
-
+/* Locking assumes this will only be called with task == current */
 void exit_shm(struct task_struct *task)
 {
        struct ipc_namespace *ns = task->nsproxy->ipc_ns;
        struct shmid_kernel *shp, *n;
 
-       if (shm_ids(ns).in_use == 0)
+       if (list_empty(&task->sysvshm.shm_clist))
+               return;
+
+       /*
+        * If kernel.shm_rmid_forced is not set then only keep track of
+        * which shmids are orphaned, so that a later set of the sysctl
+        * can clean them up.
+        */
+       if (!ns->shm_rmid_forced) {
+               down_read(&shm_ids(ns).rwsem);
+               list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
+                       shp->shm_creator = NULL;
+               /*
+                * Only under read lock but we are only called on current
+                * so no entry on the list will be shared.
+                */
+               list_del(&task->sysvshm.shm_clist);
+               up_read(&shm_ids(ns).rwsem);
                return;
+       }
 
-       /* Destroy all already created segments, but not mapped yet */
+       /*
+        * Destroy all already created segments, that were not yet mapped,
+        * and mark any mapped as orphan to cover the sysctl toggling.
+        * Destroy is skipped if shm_may_destroy() returns false.
+        */
        down_write(&shm_ids(ns).rwsem);
-       list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist)
-               shm_mark_orphan(shp, ns);
-       /* remove the list head from any segments still attached */
+       list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
+               shp->shm_creator = NULL;
+
+               if (shm_may_destroy(ns, shp)) {
+                       shm_lock_by_ptr(shp);
+                       shm_destroy(ns, shp);
+               }
+       }
+
+       /* Remove the list head from any segments still attached. */
        list_del(&task->sysvshm.shm_clist);
        up_write(&shm_ids(ns).rwsem);
 }