target: Prevent cmd->se_queue_node double add
authorRoland Dreier <roland@purestorage.com>
Thu, 29 Sep 2011 05:12:07 +0000 (22:12 -0700)
committerNicholas Bellinger <nab@linux-iscsi.org>
Mon, 24 Oct 2011 03:17:51 +0000 (03:17 +0000)
This patch addresses a bug with the lio-core-2.6.git conversion of
transport_add_cmd_to_queue() to use a single embedded list_head, instead
of individual struct se_queue_req allocations allowing a single se_cmd to
be added to the queue mulitple times.  This was changed in the following:

commit 2a9e4d5ca5d99f4c600578d6285d45142e7e5208
Author: Andy Grover <agrover@redhat.com>
Date:   Tue Apr 26 17:45:51 2011 -0700

    target: Embed qr in struct se_cmd

The problem is that some target code still assumes performing multiple
adds is allowed via transport_add_cmd_to_queue(), which ends up causing
list corruption in qobj->qobj_list code.  This patch addresses this
by removing an existing struct se_cmd from the list before the add, and
removes an unnecessary list walk in transport_remove_cmd_from_queue()

It also changes cmd->t_transport_queue_active to use explict sets intead
of increment/decrement to prevent confusion during exception path handling.

Signed-off-by: Roland Dreier <roland@purestorage.com>
Cc: Andy Grover <agrover@redhat.com>
Cc: stable@kernel.org
Signed-off-by: Nicholas Bellinger <nab@risingtidesystems.com>
drivers/target/target_core_tmr.c
drivers/target/target_core_transport.c

index 662473c5404361a1c4c300ab528a1a2fba567695..98b12a8923f0abc20b2c4a57b8efd3f6e9b92c7a 100644 (file)
@@ -339,7 +339,7 @@ int core_tmr_lun_reset(
 
                atomic_dec(&cmd->t_transport_queue_active);
                atomic_dec(&qobj->queue_cnt);
-               list_del(&cmd->se_queue_node);
+               list_del_init(&cmd->se_queue_node);
                spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
                pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
index 4b2d6bf87a12bbd8cf3582cd0ed09fc996519611..046da7f38230c4b0cac5ac825ad0dbd9c7a77c2e 100644 (file)
@@ -620,8 +620,6 @@ static void transport_add_cmd_to_queue(
        struct se_queue_obj *qobj = &dev->dev_queue_obj;
        unsigned long flags;
 
-       INIT_LIST_HEAD(&cmd->se_queue_node);
-
        if (t_state) {
                spin_lock_irqsave(&cmd->t_state_lock, flags);
                cmd->t_state = t_state;
@@ -630,15 +628,21 @@ static void transport_add_cmd_to_queue(
        }
 
        spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
+
+       /* If the cmd is already on the list, remove it before we add it */
+       if (!list_empty(&cmd->se_queue_node))
+               list_del(&cmd->se_queue_node);
+       else
+               atomic_inc(&qobj->queue_cnt);
+
        if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) {
                cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL;
                list_add(&cmd->se_queue_node, &qobj->qobj_list);
        } else
                list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
-       atomic_inc(&cmd->t_transport_queue_active);
+       atomic_set(&cmd->t_transport_queue_active, 1);
        spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
-       atomic_inc(&qobj->queue_cnt);
        wake_up_interruptible(&qobj->thread_wq);
 }
 
@@ -655,9 +659,9 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj)
        }
        cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
 
-       atomic_dec(&cmd->t_transport_queue_active);
+       atomic_set(&cmd->t_transport_queue_active, 0);
 
-       list_del(&cmd->se_queue_node);
+       list_del_init(&cmd->se_queue_node);
        atomic_dec(&qobj->queue_cnt);
        spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
@@ -667,7 +671,6 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj)
 static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
                struct se_queue_obj *qobj)
 {
-       struct se_cmd *t;
        unsigned long flags;
 
        spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
@@ -675,14 +678,9 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
                spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
                return;
        }
-
-       list_for_each_entry(t, &qobj->qobj_list, se_queue_node)
-               if (t == cmd) {
-                       atomic_dec(&cmd->t_transport_queue_active);
-                       atomic_dec(&qobj->queue_cnt);
-                       list_del(&cmd->se_queue_node);
-                       break;
-               }
+       atomic_set(&cmd->t_transport_queue_active, 0);
+       atomic_dec(&qobj->queue_cnt);
+       list_del_init(&cmd->se_queue_node);
        spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
        if (atomic_read(&cmd->t_transport_queue_active)) {
@@ -1066,7 +1064,7 @@ static void transport_release_all_cmds(struct se_device *dev)
        list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list,
                                se_queue_node) {
                t_state = cmd->t_state;
-               list_del(&cmd->se_queue_node);
+               list_del_init(&cmd->se_queue_node);
                spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
                                flags);
 
@@ -1597,6 +1595,7 @@ void transport_init_se_cmd(
        INIT_LIST_HEAD(&cmd->se_delayed_node);
        INIT_LIST_HEAD(&cmd->se_ordered_node);
        INIT_LIST_HEAD(&cmd->se_qf_node);
+       INIT_LIST_HEAD(&cmd->se_queue_node);
 
        INIT_LIST_HEAD(&cmd->t_task_list);
        init_completion(&cmd->transport_lun_fe_stop_comp);