target: Drop lun_sep_lock for se_lun->lun_se_dev RCU usage
[firefly-linux-kernel-4.4.55.git] / drivers / target / target_core_device.c
index 8485e9a789fc5253b44da876db6e384fef994a98..3baa6cd7fded2223d1fe76d8fbae8d681474f528 100644 (file)
@@ -61,7 +61,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
        struct se_lun *se_lun = NULL;
        struct se_session *se_sess = se_cmd->se_sess;
        struct se_node_acl *nacl = se_sess->se_node_acl;
-       struct se_device *dev;
        struct se_dev_entry *deve;
 
        if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
@@ -128,16 +127,21 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                percpu_ref_get(&se_lun->lun_ref);
                se_cmd->lun_ref_active = true;
        }
+       /*
+        * RCU reference protected by percpu se_lun->lun_ref taken above that
+        * must drop to zero (including initial reference) before this se_lun
+        * pointer can be kfree_rcu() by the final se_lun->lun_group put via
+        * target_core_fabric_configfs.c:target_fabric_port_release
+        */
+       se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
+       atomic_long_inc(&se_cmd->se_dev->num_cmds);
 
-       /* Directly associate cmd with se_dev */
-       se_cmd->se_dev = se_lun->lun_se_dev;
-
-       dev = se_lun->lun_se_dev;
-       atomic_long_inc(&dev->num_cmds);
        if (se_cmd->data_direction == DMA_TO_DEVICE)
-               atomic_long_add(se_cmd->data_length, &dev->write_bytes);
+               atomic_long_add(se_cmd->data_length,
+                               &se_cmd->se_dev->write_bytes);
        else if (se_cmd->data_direction == DMA_FROM_DEVICE)
-               atomic_long_add(se_cmd->data_length, &dev->read_bytes);
+               atomic_long_add(se_cmd->data_length,
+                               &se_cmd->se_dev->read_bytes);
 
        return 0;
 }
@@ -173,10 +177,11 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                        unpacked_lun);
                return -ENODEV;
        }
-
-       /* Directly associate cmd with se_dev */
-       se_cmd->se_dev = se_lun->lun_se_dev;
-       se_tmr->tmr_dev = se_lun->lun_se_dev;
+       /*
+        * XXX: Add percpu se_lun->lun_ref reference count for TMR
+        */
+       se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
+       se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
 
        spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
        list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
@@ -389,6 +394,11 @@ void core_disable_device_list_for_node(
        struct se_node_acl *nacl,
        struct se_portal_group *tpg)
 {
+       /*
+        * rcu_dereference_raw protected by se_lun->lun_group symlink
+        * reference to se_device->dev_group.
+        */
+       struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
        /*
         * If the MappedLUN entry is being disabled, the entry in
         * lun->lun_deve_list must be removed now before clearing the
@@ -426,7 +436,7 @@ void core_disable_device_list_for_node(
 
        kfree_rcu(orig, rcu_head);
 
-       core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
+       core_scsi3_free_pr_reg_from_nacl(dev, nacl);
 }
 
 /*      core_clear_lun_from_tpg():
@@ -629,6 +639,11 @@ int core_dev_add_initiator_node_lun_acl(
        u32 lun_access)
 {
        struct se_node_acl *nacl = lacl->se_lun_nacl;
+       /*
+        * rcu_dereference_raw protected by se_lun->lun_group symlink
+        * reference to se_device->dev_group.
+        */
+       struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 
        if (!nacl)
                return -EINVAL;
@@ -652,7 +667,7 @@ int core_dev_add_initiator_node_lun_acl(
         * Check to see if there are any existing persistent reservation APTPL
         * pre-registrations that need to be enabled for this LUN ACL..
         */
-       core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl,
+       core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
                                            lacl->mapped_lun);
        return 0;
 }
@@ -746,6 +761,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
        dev->se_hba = hba;
        dev->transport = hba->backend->ops;
        dev->prot_length = sizeof(struct se_dif_v1_tuple);
+       dev->hba_index = hba->hba_index;
 
        INIT_LIST_HEAD(&dev->dev_list);
        INIT_LIST_HEAD(&dev->dev_sep_list);
@@ -802,8 +818,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
        dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
 
        xcopy_lun = &dev->xcopy_lun;
-       xcopy_lun->lun_se_dev = dev;
-       spin_lock_init(&xcopy_lun->lun_sep_lock);
+       rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
        init_completion(&xcopy_lun->lun_ref_comp);
        INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
        INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);