IB/srp: Fix race conditions related to task management
[firefly-linux-kernel-4.4.55.git] / drivers / infiniband / ulp / srp / ib_srp.c
index 3db9a659719b0f6283af610bf01b8f65d27292a9..e397f1b0af09afaff0d3795571c36505ec905ef1 100644 (file)
@@ -1519,7 +1519,7 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
 
        if (dev->use_fast_reg) {
                state.sg = idb_sg;
-               sg_set_buf(idb_sg, req->indirect_desc, idb_len);
+               sg_init_one(idb_sg, req->indirect_desc, idb_len);
                idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
 #ifdef CONFIG_NEED_SG_DMA_LENGTH
                idb_sg->dma_length = idb_sg->length;          /* hack^2 */
@@ -1787,17 +1787,24 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
        if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
                spin_lock_irqsave(&ch->lock, flags);
                ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
+               if (rsp->tag == ch->tsk_mgmt_tag) {
+                       ch->tsk_mgmt_status = -1;
+                       if (be32_to_cpu(rsp->resp_data_len) >= 4)
+                               ch->tsk_mgmt_status = rsp->data[3];
+                       complete(&ch->tsk_mgmt_done);
+               } else {
+                       shost_printk(KERN_ERR, target->scsi_host,
+                                    "Received tsk mgmt response too late for tag %#llx\n",
+                                    rsp->tag);
+               }
                spin_unlock_irqrestore(&ch->lock, flags);
-
-               ch->tsk_mgmt_status = -1;
-               if (be32_to_cpu(rsp->resp_data_len) >= 4)
-                       ch->tsk_mgmt_status = rsp->data[3];
-               complete(&ch->tsk_mgmt_done);
        } else {
                scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
-               if (scmnd) {
+               if (scmnd && scmnd->host_scribble) {
                        req = (void *)scmnd->host_scribble;
                        scmnd = srp_claim_req(ch, req, NULL, scmnd);
+               } else {
+                       scmnd = NULL;
                }
                if (!scmnd) {
                        shost_printk(KERN_ERR, target->scsi_host,
@@ -2469,19 +2476,18 @@ srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
 }
 
 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
-                            u8 func)
+                            u8 func, u8 *status)
 {
        struct srp_target_port *target = ch->target;
        struct srp_rport *rport = target->rport;
        struct ib_device *dev = target->srp_host->srp_dev->dev;
        struct srp_iu *iu;
        struct srp_tsk_mgmt *tsk_mgmt;
+       int res;
 
        if (!ch->connected || target->qp_in_error)
                return -1;
 
-       init_completion(&ch->tsk_mgmt_done);
-
        /*
         * Lock the rport mutex to avoid that srp_create_ch_ib() is
         * invoked while a task management function is being sent.
@@ -2504,10 +2510,16 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
 
        tsk_mgmt->opcode        = SRP_TSK_MGMT;
        int_to_scsilun(lun, &tsk_mgmt->lun);
-       tsk_mgmt->tag           = req_tag | SRP_TAG_TSK_MGMT;
        tsk_mgmt->tsk_mgmt_func = func;
        tsk_mgmt->task_tag      = req_tag;
 
+       spin_lock_irq(&ch->lock);
+       ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
+       tsk_mgmt->tag = ch->tsk_mgmt_tag;
+       spin_unlock_irq(&ch->lock);
+
+       init_completion(&ch->tsk_mgmt_done);
+
        ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
                                      DMA_TO_DEVICE);
        if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
@@ -2516,13 +2528,15 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
 
                return -1;
        }
+       res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
+                                       msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
+       if (res > 0 && status)
+               *status = ch->tsk_mgmt_status;
        mutex_unlock(&rport->mutex);
 
-       if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
-                                        msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
-               return -1;
+       WARN_ON_ONCE(res < 0);
 
-       return 0;
+       return res > 0 ? 0 : -1;
 }
 
 static int srp_abort(struct scsi_cmnd *scmnd)
@@ -2548,7 +2562,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
        shost_printk(KERN_ERR, target->scsi_host,
                     "Sending SRP abort for tag %#x\n", tag);
        if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
-                             SRP_TSK_ABORT_TASK) == 0)
+                             SRP_TSK_ABORT_TASK, NULL) == 0)
                ret = SUCCESS;
        else if (target->rport->state == SRP_RPORT_LOST)
                ret = FAST_IO_FAIL;
@@ -2566,14 +2580,15 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
        struct srp_target_port *target = host_to_target(scmnd->device->host);
        struct srp_rdma_ch *ch;
        int i;
+       u8 status;
 
        shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
 
        ch = &target->ch[0];
        if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
-                             SRP_TSK_LUN_RESET))
+                             SRP_TSK_LUN_RESET, &status))
                return FAILED;
-       if (ch->tsk_mgmt_status)
+       if (status)
                return FAILED;
 
        for (i = 0; i < target->ch_count; i++) {