Merge branch 'for-3.5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / qla2xxx / qla_bsg.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2011 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12
13 /* BSG support for ELS/CT pass through */
14 void
15 qla2x00_bsg_job_done(void *data, void *ptr, int res)
16 {
17         srb_t *sp = (srb_t *)ptr;
18         struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
19         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
20
21         bsg_job->reply->result = res;
22         bsg_job->job_done(bsg_job);
23         sp->free(vha, sp);
24 }
25
26 void
27 qla2x00_bsg_sp_free(void *data, void *ptr)
28 {
29         srb_t *sp = (srb_t *)ptr;
30         struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
31         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
32         struct qla_hw_data *ha = vha->hw;
33
34         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
35             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
36
37         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
38             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
39
40         if (sp->type == SRB_CT_CMD ||
41             sp->type == SRB_ELS_CMD_HST)
42                 kfree(sp->fcport);
43         mempool_free(sp, vha->hw->srb_mempool);
44 }
45
46 int
47 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
48         struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
49 {
50         int i, ret, num_valid;
51         uint8_t *bcode;
52         struct qla_fcp_prio_entry *pri_entry;
53         uint32_t *bcode_val_ptr, bcode_val;
54
55         ret = 1;
56         num_valid = 0;
57         bcode = (uint8_t *)pri_cfg;
58         bcode_val_ptr = (uint32_t *)pri_cfg;
59         bcode_val = (uint32_t)(*bcode_val_ptr);
60
61         if (bcode_val == 0xFFFFFFFF) {
62                 /* No FCP Priority config data in flash */
63                 ql_dbg(ql_dbg_user, vha, 0x7051,
64                     "No FCP Priority config data.\n");
65                 return 0;
66         }
67
68         if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
69                         bcode[3] != 'S') {
70                 /* Invalid FCP priority data header*/
71                 ql_dbg(ql_dbg_user, vha, 0x7052,
72                     "Invalid FCP Priority data header. bcode=0x%x.\n",
73                     bcode_val);
74                 return 0;
75         }
76         if (flag != 1)
77                 return ret;
78
79         pri_entry = &pri_cfg->entry[0];
80         for (i = 0; i < pri_cfg->num_entries; i++) {
81                 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
82                         num_valid++;
83                 pri_entry++;
84         }
85
86         if (num_valid == 0) {
87                 /* No valid FCP priority data entries */
88                 ql_dbg(ql_dbg_user, vha, 0x7053,
89                     "No valid FCP Priority data entries.\n");
90                 ret = 0;
91         } else {
92                 /* FCP priority data is valid */
93                 ql_dbg(ql_dbg_user, vha, 0x7054,
94                     "Valid FCP priority data. num entries = %d.\n",
95                     num_valid);
96         }
97
98         return ret;
99 }
100
101 static int
102 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
103 {
104         struct Scsi_Host *host = bsg_job->shost;
105         scsi_qla_host_t *vha = shost_priv(host);
106         struct qla_hw_data *ha = vha->hw;
107         int ret = 0;
108         uint32_t len;
109         uint32_t oper;
110
111         if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
112                 ret = -EINVAL;
113                 goto exit_fcp_prio_cfg;
114         }
115
116         /* Get the sub command */
117         oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
118
119         /* Only set config is allowed if config memory is not allocated */
120         if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
121                 ret = -EINVAL;
122                 goto exit_fcp_prio_cfg;
123         }
124         switch (oper) {
125         case QLFC_FCP_PRIO_DISABLE:
126                 if (ha->flags.fcp_prio_enabled) {
127                         ha->flags.fcp_prio_enabled = 0;
128                         ha->fcp_prio_cfg->attributes &=
129                                 ~FCP_PRIO_ATTR_ENABLE;
130                         qla24xx_update_all_fcp_prio(vha);
131                         bsg_job->reply->result = DID_OK;
132                 } else {
133                         ret = -EINVAL;
134                         bsg_job->reply->result = (DID_ERROR << 16);
135                         goto exit_fcp_prio_cfg;
136                 }
137                 break;
138
139         case QLFC_FCP_PRIO_ENABLE:
140                 if (!ha->flags.fcp_prio_enabled) {
141                         if (ha->fcp_prio_cfg) {
142                                 ha->flags.fcp_prio_enabled = 1;
143                                 ha->fcp_prio_cfg->attributes |=
144                                     FCP_PRIO_ATTR_ENABLE;
145                                 qla24xx_update_all_fcp_prio(vha);
146                                 bsg_job->reply->result = DID_OK;
147                         } else {
148                                 ret = -EINVAL;
149                                 bsg_job->reply->result = (DID_ERROR << 16);
150                                 goto exit_fcp_prio_cfg;
151                         }
152                 }
153                 break;
154
155         case QLFC_FCP_PRIO_GET_CONFIG:
156                 len = bsg_job->reply_payload.payload_len;
157                 if (!len || len > FCP_PRIO_CFG_SIZE) {
158                         ret = -EINVAL;
159                         bsg_job->reply->result = (DID_ERROR << 16);
160                         goto exit_fcp_prio_cfg;
161                 }
162
163                 bsg_job->reply->result = DID_OK;
164                 bsg_job->reply->reply_payload_rcv_len =
165                         sg_copy_from_buffer(
166                         bsg_job->reply_payload.sg_list,
167                         bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
168                         len);
169
170                 break;
171
172         case QLFC_FCP_PRIO_SET_CONFIG:
173                 len = bsg_job->request_payload.payload_len;
174                 if (!len || len > FCP_PRIO_CFG_SIZE) {
175                         bsg_job->reply->result = (DID_ERROR << 16);
176                         ret = -EINVAL;
177                         goto exit_fcp_prio_cfg;
178                 }
179
180                 if (!ha->fcp_prio_cfg) {
181                         ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
182                         if (!ha->fcp_prio_cfg) {
183                                 ql_log(ql_log_warn, vha, 0x7050,
184                                     "Unable to allocate memory for fcp prio "
185                                     "config data (%x).\n", FCP_PRIO_CFG_SIZE);
186                                 bsg_job->reply->result = (DID_ERROR << 16);
187                                 ret = -ENOMEM;
188                                 goto exit_fcp_prio_cfg;
189                         }
190                 }
191
192                 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
193                 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
194                 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
195                         FCP_PRIO_CFG_SIZE);
196
197                 /* validate fcp priority data */
198
199                 if (!qla24xx_fcp_prio_cfg_valid(vha,
200                     (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
201                         bsg_job->reply->result = (DID_ERROR << 16);
202                         ret = -EINVAL;
203                         /* If buffer was invalidatic int
204                          * fcp_prio_cfg is of no use
205                          */
206                         vfree(ha->fcp_prio_cfg);
207                         ha->fcp_prio_cfg = NULL;
208                         goto exit_fcp_prio_cfg;
209                 }
210
211                 ha->flags.fcp_prio_enabled = 0;
212                 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
213                         ha->flags.fcp_prio_enabled = 1;
214                 qla24xx_update_all_fcp_prio(vha);
215                 bsg_job->reply->result = DID_OK;
216                 break;
217         default:
218                 ret = -EINVAL;
219                 break;
220         }
221 exit_fcp_prio_cfg:
222         bsg_job->job_done(bsg_job);
223         return ret;
224 }
225
226 static int
227 qla2x00_process_els(struct fc_bsg_job *bsg_job)
228 {
229         struct fc_rport *rport;
230         fc_port_t *fcport = NULL;
231         struct Scsi_Host *host;
232         scsi_qla_host_t *vha;
233         struct qla_hw_data *ha;
234         srb_t *sp;
235         const char *type;
236         int req_sg_cnt, rsp_sg_cnt;
237         int rval =  (DRIVER_ERROR << 16);
238         uint16_t nextlid = 0;
239
240         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
241                 rport = bsg_job->rport;
242                 fcport = *(fc_port_t **) rport->dd_data;
243                 host = rport_to_shost(rport);
244                 vha = shost_priv(host);
245                 ha = vha->hw;
246                 type = "FC_BSG_RPT_ELS";
247         } else {
248                 host = bsg_job->shost;
249                 vha = shost_priv(host);
250                 ha = vha->hw;
251                 type = "FC_BSG_HST_ELS_NOLOGIN";
252         }
253
254         /* pass through is supported only for ISP 4Gb or higher */
255         if (!IS_FWI2_CAPABLE(ha)) {
256                 ql_dbg(ql_dbg_user, vha, 0x7001,
257                     "ELS passthru not supported for ISP23xx based adapters.\n");
258                 rval = -EPERM;
259                 goto done;
260         }
261
262         /*  Multiple SG's are not supported for ELS requests */
263         if (bsg_job->request_payload.sg_cnt > 1 ||
264                 bsg_job->reply_payload.sg_cnt > 1) {
265                 ql_dbg(ql_dbg_user, vha, 0x7002,
266                     "Multiple SG's are not suppored for ELS requests, "
267                     "request_sg_cnt=%x reply_sg_cnt=%x.\n",
268                     bsg_job->request_payload.sg_cnt,
269                     bsg_job->reply_payload.sg_cnt);
270                 rval = -EPERM;
271                 goto done;
272         }
273
274         /* ELS request for rport */
275         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
276                 /* make sure the rport is logged in,
277                  * if not perform fabric login
278                  */
279                 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
280                         ql_dbg(ql_dbg_user, vha, 0x7003,
281                             "Failed to login port %06X for ELS passthru.\n",
282                             fcport->d_id.b24);
283                         rval = -EIO;
284                         goto done;
285                 }
286         } else {
287                 /* Allocate a dummy fcport structure, since functions
288                  * preparing the IOCB and mailbox command retrieves port
289                  * specific information from fcport structure. For Host based
290                  * ELS commands there will be no fcport structure allocated
291                  */
292                 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
293                 if (!fcport) {
294                         rval = -ENOMEM;
295                         goto done;
296                 }
297
298                 /* Initialize all required  fields of fcport */
299                 fcport->vha = vha;
300                 fcport->d_id.b.al_pa =
301                         bsg_job->request->rqst_data.h_els.port_id[0];
302                 fcport->d_id.b.area =
303                         bsg_job->request->rqst_data.h_els.port_id[1];
304                 fcport->d_id.b.domain =
305                         bsg_job->request->rqst_data.h_els.port_id[2];
306                 fcport->loop_id =
307                         (fcport->d_id.b.al_pa == 0xFD) ?
308                         NPH_FABRIC_CONTROLLER : NPH_F_PORT;
309         }
310
311         if (!vha->flags.online) {
312                 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
313                 rval = -EIO;
314                 goto done;
315         }
316
317         req_sg_cnt =
318                 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
319                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
320         if (!req_sg_cnt) {
321                 rval = -ENOMEM;
322                 goto done_free_fcport;
323         }
324
325         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
326                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
327         if (!rsp_sg_cnt) {
328                 rval = -ENOMEM;
329                 goto done_free_fcport;
330         }
331
332         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
333                 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
334                 ql_log(ql_log_warn, vha, 0x7008,
335                     "dma mapping resulted in different sg counts, "
336                     "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
337                     "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
338                     req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
339                 rval = -EAGAIN;
340                 goto done_unmap_sg;
341         }
342
343         /* Alloc SRB structure */
344         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
345         if (!sp) {
346                 rval = -ENOMEM;
347                 goto done_unmap_sg;
348         }
349
350         sp->type =
351                 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
352                 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
353         sp->name =
354                 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
355                 "bsg_els_rpt" : "bsg_els_hst");
356         sp->u.bsg_job = bsg_job;
357         sp->free = qla2x00_bsg_sp_free;
358         sp->done = qla2x00_bsg_job_done;
359
360         ql_dbg(ql_dbg_user, vha, 0x700a,
361             "bsg rqst type: %s els type: %x - loop-id=%x "
362             "portid=%-2x%02x%02x.\n", type,
363             bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
364             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
365
366         rval = qla2x00_start_sp(sp);
367         if (rval != QLA_SUCCESS) {
368                 ql_log(ql_log_warn, vha, 0x700e,
369                     "qla2x00_start_sp failed = %d\n", rval);
370                 mempool_free(sp, ha->srb_mempool);
371                 rval = -EIO;
372                 goto done_unmap_sg;
373         }
374         return rval;
375
376 done_unmap_sg:
377         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
378                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
379         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
380                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
381         goto done_free_fcport;
382
383 done_free_fcport:
384         if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
385                 kfree(fcport);
386 done:
387         return rval;
388 }
389
390 inline uint16_t
391 qla24xx_calc_ct_iocbs(uint16_t dsds)
392 {
393         uint16_t iocbs;
394
395         iocbs = 1;
396         if (dsds > 2) {
397                 iocbs += (dsds - 2) / 5;
398                 if ((dsds - 2) % 5)
399                         iocbs++;
400         }
401         return iocbs;
402 }
403
404 static int
405 qla2x00_process_ct(struct fc_bsg_job *bsg_job)
406 {
407         srb_t *sp;
408         struct Scsi_Host *host = bsg_job->shost;
409         scsi_qla_host_t *vha = shost_priv(host);
410         struct qla_hw_data *ha = vha->hw;
411         int rval = (DRIVER_ERROR << 16);
412         int req_sg_cnt, rsp_sg_cnt;
413         uint16_t loop_id;
414         struct fc_port *fcport;
415         char  *type = "FC_BSG_HST_CT";
416
417         req_sg_cnt =
418                 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
419                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
420         if (!req_sg_cnt) {
421                 ql_log(ql_log_warn, vha, 0x700f,
422                     "dma_map_sg return %d for request\n", req_sg_cnt);
423                 rval = -ENOMEM;
424                 goto done;
425         }
426
427         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
428                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
429         if (!rsp_sg_cnt) {
430                 ql_log(ql_log_warn, vha, 0x7010,
431                     "dma_map_sg return %d for reply\n", rsp_sg_cnt);
432                 rval = -ENOMEM;
433                 goto done;
434         }
435
436         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
437             (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
438                 ql_log(ql_log_warn, vha, 0x7011,
439                     "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
440                     "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
441                     req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
442                 rval = -EAGAIN;
443                 goto done_unmap_sg;
444         }
445
446         if (!vha->flags.online) {
447                 ql_log(ql_log_warn, vha, 0x7012,
448                     "Host is not online.\n");
449                 rval = -EIO;
450                 goto done_unmap_sg;
451         }
452
453         loop_id =
454                 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
455                         >> 24;
456         switch (loop_id) {
457         case 0xFC:
458                 loop_id = cpu_to_le16(NPH_SNS);
459                 break;
460         case 0xFA:
461                 loop_id = vha->mgmt_svr_loop_id;
462                 break;
463         default:
464                 ql_dbg(ql_dbg_user, vha, 0x7013,
465                     "Unknown loop id: %x.\n", loop_id);
466                 rval = -EINVAL;
467                 goto done_unmap_sg;
468         }
469
470         /* Allocate a dummy fcport structure, since functions preparing the
471          * IOCB and mailbox command retrieves port specific information
472          * from fcport structure. For Host based ELS commands there will be
473          * no fcport structure allocated
474          */
475         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
476         if (!fcport) {
477                 ql_log(ql_log_warn, vha, 0x7014,
478                     "Failed to allocate fcport.\n");
479                 rval = -ENOMEM;
480                 goto done_unmap_sg;
481         }
482
483         /* Initialize all required  fields of fcport */
484         fcport->vha = vha;
485         fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
486         fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
487         fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
488         fcport->loop_id = loop_id;
489
490         /* Alloc SRB structure */
491         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
492         if (!sp) {
493                 ql_log(ql_log_warn, vha, 0x7015,
494                     "qla2x00_get_sp failed.\n");
495                 rval = -ENOMEM;
496                 goto done_free_fcport;
497         }
498
499         sp->type = SRB_CT_CMD;
500         sp->name = "bsg_ct";
501         sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
502         sp->u.bsg_job = bsg_job;
503         sp->free = qla2x00_bsg_sp_free;
504         sp->done = qla2x00_bsg_job_done;
505
506         ql_dbg(ql_dbg_user, vha, 0x7016,
507             "bsg rqst type: %s else type: %x - "
508             "loop-id=%x portid=%02x%02x%02x.\n", type,
509             (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
510             fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
511             fcport->d_id.b.al_pa);
512
513         rval = qla2x00_start_sp(sp);
514         if (rval != QLA_SUCCESS) {
515                 ql_log(ql_log_warn, vha, 0x7017,
516                     "qla2x00_start_sp failed=%d.\n", rval);
517                 mempool_free(sp, ha->srb_mempool);
518                 rval = -EIO;
519                 goto done_free_fcport;
520         }
521         return rval;
522
523 done_free_fcport:
524         kfree(fcport);
525 done_unmap_sg:
526         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
527                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
528         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
529                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
530 done:
531         return rval;
532 }
533
534 /* Set the port configuration to enable the
535  * internal loopback on ISP81XX
536  */
537 static inline int
538 qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
539     uint16_t *new_config)
540 {
541         int ret = 0;
542         int rval = 0;
543         struct qla_hw_data *ha = vha->hw;
544
545         if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
546                 goto done_set_internal;
547
548         new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
549         memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
550
551         ha->notify_dcbx_comp = 1;
552         ret = qla81xx_set_port_config(vha, new_config);
553         if (ret != QLA_SUCCESS) {
554                 ql_log(ql_log_warn, vha, 0x7021,
555                     "set port config failed.\n");
556                 ha->notify_dcbx_comp = 0;
557                 rval = -EINVAL;
558                 goto done_set_internal;
559         }
560
561         /* Wait for DCBX complete event */
562         if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
563                 ql_dbg(ql_dbg_user, vha, 0x7022,
564                     "State change notification not received.\n");
565         } else
566                 ql_dbg(ql_dbg_user, vha, 0x7023,
567                     "State change received.\n");
568
569         ha->notify_dcbx_comp = 0;
570
571 done_set_internal:
572         return rval;
573 }
574
575 /* Set the port configuration to disable the
576  * internal loopback on ISP81XX
577  */
578 static inline int
579 qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
580     int wait)
581 {
582         int ret = 0;
583         int rval = 0;
584         uint16_t new_config[4];
585         struct qla_hw_data *ha = vha->hw;
586
587         if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
588                 goto done_reset_internal;
589
590         memset(new_config, 0 , sizeof(new_config));
591         if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
592                         ENABLE_INTERNAL_LOOPBACK) {
593                 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
594                 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
595
596                 ha->notify_dcbx_comp = wait;
597                 ret = qla81xx_set_port_config(vha, new_config);
598                 if (ret != QLA_SUCCESS) {
599                         ql_log(ql_log_warn, vha, 0x7025,
600                             "Set port config failed.\n");
601                         ha->notify_dcbx_comp = 0;
602                         rval = -EINVAL;
603                         goto done_reset_internal;
604                 }
605
606                 /* Wait for DCBX complete event */
607                 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
608                         (20 * HZ))) {
609                         ql_dbg(ql_dbg_user, vha, 0x7026,
610                             "State change notification not received.\n");
611                         ha->notify_dcbx_comp = 0;
612                         rval = -EINVAL;
613                         goto done_reset_internal;
614                 } else
615                         ql_dbg(ql_dbg_user, vha, 0x7027,
616                             "State change received.\n");
617
618                 ha->notify_dcbx_comp = 0;
619         }
620 done_reset_internal:
621         return rval;
622 }
623
624 static int
625 qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
626 {
627         struct Scsi_Host *host = bsg_job->shost;
628         scsi_qla_host_t *vha = shost_priv(host);
629         struct qla_hw_data *ha = vha->hw;
630         int rval;
631         uint8_t command_sent;
632         char *type;
633         struct msg_echo_lb elreq;
634         uint16_t response[MAILBOX_REGISTER_COUNT];
635         uint16_t config[4], new_config[4];
636         uint8_t *fw_sts_ptr;
637         uint8_t *req_data = NULL;
638         dma_addr_t req_data_dma;
639         uint32_t req_data_len;
640         uint8_t *rsp_data = NULL;
641         dma_addr_t rsp_data_dma;
642         uint32_t rsp_data_len;
643
644         if (!vha->flags.online) {
645                 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
646                 return -EIO;
647         }
648
649         elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
650                 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
651                 DMA_TO_DEVICE);
652
653         if (!elreq.req_sg_cnt) {
654                 ql_log(ql_log_warn, vha, 0x701a,
655                     "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
656                 return -ENOMEM;
657         }
658
659         elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
660                 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
661                 DMA_FROM_DEVICE);
662
663         if (!elreq.rsp_sg_cnt) {
664                 ql_log(ql_log_warn, vha, 0x701b,
665                     "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
666                 rval = -ENOMEM;
667                 goto done_unmap_req_sg;
668         }
669
670         if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
671                 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
672                 ql_log(ql_log_warn, vha, 0x701c,
673                     "dma mapping resulted in different sg counts, "
674                     "request_sg_cnt: %x dma_request_sg_cnt: %x "
675                     "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
676                     bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
677                     bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
678                 rval = -EAGAIN;
679                 goto done_unmap_sg;
680         }
681         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
682         req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
683                 &req_data_dma, GFP_KERNEL);
684         if (!req_data) {
685                 ql_log(ql_log_warn, vha, 0x701d,
686                     "dma alloc failed for req_data.\n");
687                 rval = -ENOMEM;
688                 goto done_unmap_sg;
689         }
690
691         rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
692                 &rsp_data_dma, GFP_KERNEL);
693         if (!rsp_data) {
694                 ql_log(ql_log_warn, vha, 0x7004,
695                     "dma alloc failed for rsp_data.\n");
696                 rval = -ENOMEM;
697                 goto done_free_dma_req;
698         }
699
700         /* Copy the request buffer in req_data now */
701         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
702                 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
703
704         elreq.send_dma = req_data_dma;
705         elreq.rcv_dma = rsp_data_dma;
706         elreq.transfer_size = req_data_len;
707
708         elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
709
710         if ((ha->current_topology == ISP_CFG_F ||
711             ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
712             le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
713             && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
714                 elreq.options == EXTERNAL_LOOPBACK) {
715                 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
716                 ql_dbg(ql_dbg_user, vha, 0x701e,
717                     "BSG request type: %s.\n", type);
718                 command_sent = INT_DEF_LB_ECHO_CMD;
719                 rval = qla2x00_echo_test(vha, &elreq, response);
720         } else {
721                 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
722                         memset(config, 0, sizeof(config));
723                         memset(new_config, 0, sizeof(new_config));
724                         if (qla81xx_get_port_config(vha, config)) {
725                                 ql_log(ql_log_warn, vha, 0x701f,
726                                     "Get port config failed.\n");
727                                 bsg_job->reply->result = (DID_ERROR << 16);
728                                 rval = -EPERM;
729                                 goto done_free_dma_req;
730                         }
731
732                         if (elreq.options != EXTERNAL_LOOPBACK) {
733                                 ql_dbg(ql_dbg_user, vha, 0x7020,
734                                     "Internal: current port config = %x\n",
735                                     config[0]);
736                                 if (qla81xx_set_internal_loopback(vha, config,
737                                         new_config)) {
738                                         ql_log(ql_log_warn, vha, 0x7024,
739                                             "Internal loopback failed.\n");
740                                         bsg_job->reply->result =
741                                                 (DID_ERROR << 16);
742                                         rval = -EPERM;
743                                         goto done_free_dma_req;
744                                 }
745                         } else {
746                                 /* For external loopback to work
747                                  * ensure internal loopback is disabled
748                                  */
749                                 if (qla81xx_reset_internal_loopback(vha,
750                                         config, 1)) {
751                                         bsg_job->reply->result =
752                                                 (DID_ERROR << 16);
753                                         rval = -EPERM;
754                                         goto done_free_dma_req;
755                                 }
756                         }
757
758                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
759                         ql_dbg(ql_dbg_user, vha, 0x7028,
760                             "BSG request type: %s.\n", type);
761
762                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
763                         rval = qla2x00_loopback_test(vha, &elreq, response);
764
765                         if (new_config[0]) {
766                                 /* Revert back to original port config
767                                  * Also clear internal loopback
768                                  */
769                                 qla81xx_reset_internal_loopback(vha,
770                                     new_config, 0);
771                         }
772
773                         if (response[0] == MBS_COMMAND_ERROR &&
774                                         response[1] == MBS_LB_RESET) {
775                                 ql_log(ql_log_warn, vha, 0x7029,
776                                     "MBX command error, Aborting ISP.\n");
777                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
778                                 qla2xxx_wake_dpc(vha);
779                                 qla2x00_wait_for_chip_reset(vha);
780                                 /* Also reset the MPI */
781                                 if (qla81xx_restart_mpi_firmware(vha) !=
782                                     QLA_SUCCESS) {
783                                         ql_log(ql_log_warn, vha, 0x702a,
784                                             "MPI reset failed.\n");
785                                 }
786
787                                 bsg_job->reply->result = (DID_ERROR << 16);
788                                 rval = -EIO;
789                                 goto done_free_dma_req;
790                         }
791                 } else {
792                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
793                         ql_dbg(ql_dbg_user, vha, 0x702b,
794                             "BSG request type: %s.\n", type);
795                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
796                         rval = qla2x00_loopback_test(vha, &elreq, response);
797                 }
798         }
799
800         if (rval) {
801                 ql_log(ql_log_warn, vha, 0x702c,
802                     "Vendor request %s failed.\n", type);
803
804                 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
805                     sizeof(struct fc_bsg_reply);
806
807                 memcpy(fw_sts_ptr, response, sizeof(response));
808                 fw_sts_ptr += sizeof(response);
809                 *fw_sts_ptr = command_sent;
810                 rval = 0;
811                 bsg_job->reply->result = (DID_ERROR << 16);
812         } else {
813                 ql_dbg(ql_dbg_user, vha, 0x702d,
814                     "Vendor request %s completed.\n", type);
815
816                 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
817                         sizeof(response) + sizeof(uint8_t);
818                 bsg_job->reply->reply_payload_rcv_len =
819                         bsg_job->reply_payload.payload_len;
820                 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
821                         sizeof(struct fc_bsg_reply);
822                 memcpy(fw_sts_ptr, response, sizeof(response));
823                 fw_sts_ptr += sizeof(response);
824                 *fw_sts_ptr = command_sent;
825                 bsg_job->reply->result = DID_OK;
826                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
827                         bsg_job->reply_payload.sg_cnt, rsp_data,
828                         rsp_data_len);
829         }
830         bsg_job->job_done(bsg_job);
831
832         dma_free_coherent(&ha->pdev->dev, rsp_data_len,
833                 rsp_data, rsp_data_dma);
834 done_free_dma_req:
835         dma_free_coherent(&ha->pdev->dev, req_data_len,
836                 req_data, req_data_dma);
837 done_unmap_sg:
838         dma_unmap_sg(&ha->pdev->dev,
839             bsg_job->reply_payload.sg_list,
840             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
841 done_unmap_req_sg:
842         dma_unmap_sg(&ha->pdev->dev,
843             bsg_job->request_payload.sg_list,
844             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
845         return rval;
846 }
847
848 static int
849 qla84xx_reset(struct fc_bsg_job *bsg_job)
850 {
851         struct Scsi_Host *host = bsg_job->shost;
852         scsi_qla_host_t *vha = shost_priv(host);
853         struct qla_hw_data *ha = vha->hw;
854         int rval = 0;
855         uint32_t flag;
856
857         if (!IS_QLA84XX(ha)) {
858                 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
859                 return -EINVAL;
860         }
861
862         flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
863
864         rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
865
866         if (rval) {
867                 ql_log(ql_log_warn, vha, 0x7030,
868                     "Vendor request 84xx reset failed.\n");
869                 rval = 0;
870                 bsg_job->reply->result = (DID_ERROR << 16);
871
872         } else {
873                 ql_dbg(ql_dbg_user, vha, 0x7031,
874                     "Vendor request 84xx reset completed.\n");
875                 bsg_job->reply->result = DID_OK;
876         }
877
878         bsg_job->job_done(bsg_job);
879         return rval;
880 }
881
882 static int
883 qla84xx_updatefw(struct fc_bsg_job *bsg_job)
884 {
885         struct Scsi_Host *host = bsg_job->shost;
886         scsi_qla_host_t *vha = shost_priv(host);
887         struct qla_hw_data *ha = vha->hw;
888         struct verify_chip_entry_84xx *mn = NULL;
889         dma_addr_t mn_dma, fw_dma;
890         void *fw_buf = NULL;
891         int rval = 0;
892         uint32_t sg_cnt;
893         uint32_t data_len;
894         uint16_t options;
895         uint32_t flag;
896         uint32_t fw_ver;
897
898         if (!IS_QLA84XX(ha)) {
899                 ql_dbg(ql_dbg_user, vha, 0x7032,
900                     "Not 84xx, exiting.\n");
901                 return -EINVAL;
902         }
903
904         sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
905                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
906         if (!sg_cnt) {
907                 ql_log(ql_log_warn, vha, 0x7033,
908                     "dma_map_sg returned %d for request.\n", sg_cnt);
909                 return -ENOMEM;
910         }
911
912         if (sg_cnt != bsg_job->request_payload.sg_cnt) {
913                 ql_log(ql_log_warn, vha, 0x7034,
914                     "DMA mapping resulted in different sg counts, "
915                     "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
916                     bsg_job->request_payload.sg_cnt, sg_cnt);
917                 rval = -EAGAIN;
918                 goto done_unmap_sg;
919         }
920
921         data_len = bsg_job->request_payload.payload_len;
922         fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
923                 &fw_dma, GFP_KERNEL);
924         if (!fw_buf) {
925                 ql_log(ql_log_warn, vha, 0x7035,
926                     "DMA alloc failed for fw_buf.\n");
927                 rval = -ENOMEM;
928                 goto done_unmap_sg;
929         }
930
931         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
932                 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
933
934         mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
935         if (!mn) {
936                 ql_log(ql_log_warn, vha, 0x7036,
937                     "DMA alloc failed for fw buffer.\n");
938                 rval = -ENOMEM;
939                 goto done_free_fw_buf;
940         }
941
942         flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
943         fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
944
945         memset(mn, 0, sizeof(struct access_chip_84xx));
946         mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
947         mn->entry_count = 1;
948
949         options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
950         if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
951                 options |= VCO_DIAG_FW;
952
953         mn->options = cpu_to_le16(options);
954         mn->fw_ver =  cpu_to_le32(fw_ver);
955         mn->fw_size =  cpu_to_le32(data_len);
956         mn->fw_seq_size =  cpu_to_le32(data_len);
957         mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
958         mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
959         mn->dseg_length = cpu_to_le32(data_len);
960         mn->data_seg_cnt = cpu_to_le16(1);
961
962         rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
963
964         if (rval) {
965                 ql_log(ql_log_warn, vha, 0x7037,
966                     "Vendor request 84xx updatefw failed.\n");
967
968                 rval = 0;
969                 bsg_job->reply->result = (DID_ERROR << 16);
970         } else {
971                 ql_dbg(ql_dbg_user, vha, 0x7038,
972                     "Vendor request 84xx updatefw completed.\n");
973
974                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
975                 bsg_job->reply->result = DID_OK;
976         }
977
978         bsg_job->job_done(bsg_job);
979         dma_pool_free(ha->s_dma_pool, mn, mn_dma);
980
981 done_free_fw_buf:
982         dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
983
984 done_unmap_sg:
985         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
986                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
987
988         return rval;
989 }
990
991 static int
992 qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
993 {
994         struct Scsi_Host *host = bsg_job->shost;
995         scsi_qla_host_t *vha = shost_priv(host);
996         struct qla_hw_data *ha = vha->hw;
997         struct access_chip_84xx *mn = NULL;
998         dma_addr_t mn_dma, mgmt_dma;
999         void *mgmt_b = NULL;
1000         int rval = 0;
1001         struct qla_bsg_a84_mgmt *ql84_mgmt;
1002         uint32_t sg_cnt;
1003         uint32_t data_len = 0;
1004         uint32_t dma_direction = DMA_NONE;
1005
1006         if (!IS_QLA84XX(ha)) {
1007                 ql_log(ql_log_warn, vha, 0x703a,
1008                     "Not 84xx, exiting.\n");
1009                 return -EINVAL;
1010         }
1011
1012         ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
1013                 sizeof(struct fc_bsg_request));
1014         if (!ql84_mgmt) {
1015                 ql_log(ql_log_warn, vha, 0x703b,
1016                     "MGMT header not provided, exiting.\n");
1017                 return -EINVAL;
1018         }
1019
1020         mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1021         if (!mn) {
1022                 ql_log(ql_log_warn, vha, 0x703c,
1023                     "DMA alloc failed for fw buffer.\n");
1024                 return -ENOMEM;
1025         }
1026
1027         memset(mn, 0, sizeof(struct access_chip_84xx));
1028         mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1029         mn->entry_count = 1;
1030
1031         switch (ql84_mgmt->mgmt.cmd) {
1032         case QLA84_MGMT_READ_MEM:
1033         case QLA84_MGMT_GET_INFO:
1034                 sg_cnt = dma_map_sg(&ha->pdev->dev,
1035                         bsg_job->reply_payload.sg_list,
1036                         bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1037                 if (!sg_cnt) {
1038                         ql_log(ql_log_warn, vha, 0x703d,
1039                             "dma_map_sg returned %d for reply.\n", sg_cnt);
1040                         rval = -ENOMEM;
1041                         goto exit_mgmt;
1042                 }
1043
1044                 dma_direction = DMA_FROM_DEVICE;
1045
1046                 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1047                         ql_log(ql_log_warn, vha, 0x703e,
1048                             "DMA mapping resulted in different sg counts, "
1049                             "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1050                             bsg_job->reply_payload.sg_cnt, sg_cnt);
1051                         rval = -EAGAIN;
1052                         goto done_unmap_sg;
1053                 }
1054
1055                 data_len = bsg_job->reply_payload.payload_len;
1056
1057                 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1058                     &mgmt_dma, GFP_KERNEL);
1059                 if (!mgmt_b) {
1060                         ql_log(ql_log_warn, vha, 0x703f,
1061                             "DMA alloc failed for mgmt_b.\n");
1062                         rval = -ENOMEM;
1063                         goto done_unmap_sg;
1064                 }
1065
1066                 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1067                         mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1068                         mn->parameter1 =
1069                                 cpu_to_le32(
1070                                 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1071
1072                 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1073                         mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1074                         mn->parameter1 =
1075                                 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1076
1077                         mn->parameter2 =
1078                                 cpu_to_le32(
1079                                 ql84_mgmt->mgmt.mgmtp.u.info.context);
1080                 }
1081                 break;
1082
1083         case QLA84_MGMT_WRITE_MEM:
1084                 sg_cnt = dma_map_sg(&ha->pdev->dev,
1085                         bsg_job->request_payload.sg_list,
1086                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1087
1088                 if (!sg_cnt) {
1089                         ql_log(ql_log_warn, vha, 0x7040,
1090                             "dma_map_sg returned %d.\n", sg_cnt);
1091                         rval = -ENOMEM;
1092                         goto exit_mgmt;
1093                 }
1094
1095                 dma_direction = DMA_TO_DEVICE;
1096
1097                 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1098                         ql_log(ql_log_warn, vha, 0x7041,
1099                             "DMA mapping resulted in different sg counts, "
1100                             "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1101                             bsg_job->request_payload.sg_cnt, sg_cnt);
1102                         rval = -EAGAIN;
1103                         goto done_unmap_sg;
1104                 }
1105
1106                 data_len = bsg_job->request_payload.payload_len;
1107                 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1108                         &mgmt_dma, GFP_KERNEL);
1109                 if (!mgmt_b) {
1110                         ql_log(ql_log_warn, vha, 0x7042,
1111                             "DMA alloc failed for mgmt_b.\n");
1112                         rval = -ENOMEM;
1113                         goto done_unmap_sg;
1114                 }
1115
1116                 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1117                         bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1118
1119                 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1120                 mn->parameter1 =
1121                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1122                 break;
1123
1124         case QLA84_MGMT_CHNG_CONFIG:
1125                 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1126                 mn->parameter1 =
1127                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1128
1129                 mn->parameter2 =
1130                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1131
1132                 mn->parameter3 =
1133                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1134                 break;
1135
1136         default:
1137                 rval = -EIO;
1138                 goto exit_mgmt;
1139         }
1140
1141         if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1142                 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1143                 mn->dseg_count = cpu_to_le16(1);
1144                 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1145                 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1146                 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1147         }
1148
1149         rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1150
1151         if (rval) {
1152                 ql_log(ql_log_warn, vha, 0x7043,
1153                     "Vendor request 84xx mgmt failed.\n");
1154
1155                 rval = 0;
1156                 bsg_job->reply->result = (DID_ERROR << 16);
1157
1158         } else {
1159                 ql_dbg(ql_dbg_user, vha, 0x7044,
1160                     "Vendor request 84xx mgmt completed.\n");
1161
1162                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1163                 bsg_job->reply->result = DID_OK;
1164
1165                 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1166                         (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1167                         bsg_job->reply->reply_payload_rcv_len =
1168                                 bsg_job->reply_payload.payload_len;
1169
1170                         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1171                                 bsg_job->reply_payload.sg_cnt, mgmt_b,
1172                                 data_len);
1173                 }
1174         }
1175
1176         bsg_job->job_done(bsg_job);
1177
1178 done_unmap_sg:
1179         if (mgmt_b)
1180                 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1181
1182         if (dma_direction == DMA_TO_DEVICE)
1183                 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1184                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1185         else if (dma_direction == DMA_FROM_DEVICE)
1186                 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1187                         bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1188
1189 exit_mgmt:
1190         dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1191
1192         return rval;
1193 }
1194
1195 static int
1196 qla24xx_iidma(struct fc_bsg_job *bsg_job)
1197 {
1198         struct Scsi_Host *host = bsg_job->shost;
1199         scsi_qla_host_t *vha = shost_priv(host);
1200         int rval = 0;
1201         struct qla_port_param *port_param = NULL;
1202         fc_port_t *fcport = NULL;
1203         uint16_t mb[MAILBOX_REGISTER_COUNT];
1204         uint8_t *rsp_ptr = NULL;
1205
1206         if (!IS_IIDMA_CAPABLE(vha->hw)) {
1207                 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1208                 return -EINVAL;
1209         }
1210
1211         port_param = (struct qla_port_param *)((char *)bsg_job->request +
1212                 sizeof(struct fc_bsg_request));
1213         if (!port_param) {
1214                 ql_log(ql_log_warn, vha, 0x7047,
1215                     "port_param header not provided.\n");
1216                 return -EINVAL;
1217         }
1218
1219         if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1220                 ql_log(ql_log_warn, vha, 0x7048,
1221                     "Invalid destination type.\n");
1222                 return -EINVAL;
1223         }
1224
1225         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1226                 if (fcport->port_type != FCT_TARGET)
1227                         continue;
1228
1229                 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1230                         fcport->port_name, sizeof(fcport->port_name)))
1231                         continue;
1232                 break;
1233         }
1234
1235         if (!fcport) {
1236                 ql_log(ql_log_warn, vha, 0x7049,
1237                     "Failed to find port.\n");
1238                 return -EINVAL;
1239         }
1240
1241         if (atomic_read(&fcport->state) != FCS_ONLINE) {
1242                 ql_log(ql_log_warn, vha, 0x704a,
1243                     "Port is not online.\n");
1244                 return -EINVAL;
1245         }
1246
1247         if (fcport->flags & FCF_LOGIN_NEEDED) {
1248                 ql_log(ql_log_warn, vha, 0x704b,
1249                     "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1250                 return -EINVAL;
1251         }
1252
1253         if (port_param->mode)
1254                 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1255                         port_param->speed, mb);
1256         else
1257                 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1258                         &port_param->speed, mb);
1259
1260         if (rval) {
1261                 ql_log(ql_log_warn, vha, 0x704c,
1262                     "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
1263                     "%04x %x %04x %04x.\n", fcport->port_name[0],
1264                     fcport->port_name[1], fcport->port_name[2],
1265                     fcport->port_name[3], fcport->port_name[4],
1266                     fcport->port_name[5], fcport->port_name[6],
1267                     fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
1268                 rval = 0;
1269                 bsg_job->reply->result = (DID_ERROR << 16);
1270
1271         } else {
1272                 if (!port_param->mode) {
1273                         bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1274                                 sizeof(struct qla_port_param);
1275
1276                         rsp_ptr = ((uint8_t *)bsg_job->reply) +
1277                                 sizeof(struct fc_bsg_reply);
1278
1279                         memcpy(rsp_ptr, port_param,
1280                                 sizeof(struct qla_port_param));
1281                 }
1282
1283                 bsg_job->reply->result = DID_OK;
1284         }
1285
1286         bsg_job->job_done(bsg_job);
1287         return rval;
1288 }
1289
1290 static int
1291 qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1292         uint8_t is_update)
1293 {
1294         uint32_t start = 0;
1295         int valid = 0;
1296         struct qla_hw_data *ha = vha->hw;
1297
1298         if (unlikely(pci_channel_offline(ha->pdev)))
1299                 return -EINVAL;
1300
1301         start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1302         if (start > ha->optrom_size) {
1303                 ql_log(ql_log_warn, vha, 0x7055,
1304                     "start %d > optrom_size %d.\n", start, ha->optrom_size);
1305                 return -EINVAL;
1306         }
1307
1308         if (ha->optrom_state != QLA_SWAITING) {
1309                 ql_log(ql_log_info, vha, 0x7056,
1310                     "optrom_state %d.\n", ha->optrom_state);
1311                 return -EBUSY;
1312         }
1313
1314         ha->optrom_region_start = start;
1315         ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1316         if (is_update) {
1317                 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1318                         valid = 1;
1319                 else if (start == (ha->flt_region_boot * 4) ||
1320                     start == (ha->flt_region_fw * 4))
1321                         valid = 1;
1322                 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1323                     IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
1324                         valid = 1;
1325                 if (!valid) {
1326                         ql_log(ql_log_warn, vha, 0x7058,
1327                             "Invalid start region 0x%x/0x%x.\n", start,
1328                             bsg_job->request_payload.payload_len);
1329                         return -EINVAL;
1330                 }
1331
1332                 ha->optrom_region_size = start +
1333                     bsg_job->request_payload.payload_len > ha->optrom_size ?
1334                     ha->optrom_size - start :
1335                     bsg_job->request_payload.payload_len;
1336                 ha->optrom_state = QLA_SWRITING;
1337         } else {
1338                 ha->optrom_region_size = start +
1339                     bsg_job->reply_payload.payload_len > ha->optrom_size ?
1340                     ha->optrom_size - start :
1341                     bsg_job->reply_payload.payload_len;
1342                 ha->optrom_state = QLA_SREADING;
1343         }
1344
1345         ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1346         if (!ha->optrom_buffer) {
1347                 ql_log(ql_log_warn, vha, 0x7059,
1348                     "Read: Unable to allocate memory for optrom retrieval "
1349                     "(%x)\n", ha->optrom_region_size);
1350
1351                 ha->optrom_state = QLA_SWAITING;
1352                 return -ENOMEM;
1353         }
1354
1355         memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1356         return 0;
1357 }
1358
1359 static int
1360 qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1361 {
1362         struct Scsi_Host *host = bsg_job->shost;
1363         scsi_qla_host_t *vha = shost_priv(host);
1364         struct qla_hw_data *ha = vha->hw;
1365         int rval = 0;
1366
1367         if (ha->flags.isp82xx_reset_hdlr_active)
1368                 return -EBUSY;
1369
1370         rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1371         if (rval)
1372                 return rval;
1373
1374         ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1375             ha->optrom_region_start, ha->optrom_region_size);
1376
1377         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1378             bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1379             ha->optrom_region_size);
1380
1381         bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
1382         bsg_job->reply->result = DID_OK;
1383         vfree(ha->optrom_buffer);
1384         ha->optrom_buffer = NULL;
1385         ha->optrom_state = QLA_SWAITING;
1386         bsg_job->job_done(bsg_job);
1387         return rval;
1388 }
1389
1390 static int
1391 qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1392 {
1393         struct Scsi_Host *host = bsg_job->shost;
1394         scsi_qla_host_t *vha = shost_priv(host);
1395         struct qla_hw_data *ha = vha->hw;
1396         int rval = 0;
1397
1398         rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1399         if (rval)
1400                 return rval;
1401
1402         /* Set the isp82xx_no_md_cap not to capture minidump */
1403         ha->flags.isp82xx_no_md_cap = 1;
1404
1405         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1406             bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1407             ha->optrom_region_size);
1408
1409         ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1410             ha->optrom_region_start, ha->optrom_region_size);
1411
1412         bsg_job->reply->result = DID_OK;
1413         vfree(ha->optrom_buffer);
1414         ha->optrom_buffer = NULL;
1415         ha->optrom_state = QLA_SWAITING;
1416         bsg_job->job_done(bsg_job);
1417         return rval;
1418 }
1419
1420 static int
1421 qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
1422 {
1423         struct Scsi_Host *host = bsg_job->shost;
1424         scsi_qla_host_t *vha = shost_priv(host);
1425         struct qla_hw_data *ha = vha->hw;
1426         int rval = 0;
1427         uint8_t bsg[DMA_POOL_SIZE];
1428         struct qla_image_version_list *list = (void *)bsg;
1429         struct qla_image_version *image;
1430         uint32_t count;
1431         dma_addr_t sfp_dma;
1432         void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1433         if (!sfp) {
1434                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1435                     EXT_STATUS_NO_MEMORY;
1436                 goto done;
1437         }
1438
1439         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1440             bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1441
1442         image = list->version;
1443         count = list->count;
1444         while (count--) {
1445                 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1446                 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1447                     image->field_address.device, image->field_address.offset,
1448                     sizeof(image->field_info), image->field_address.option);
1449                 if (rval) {
1450                         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1451                             EXT_STATUS_MAILBOX;
1452                         goto dealloc;
1453                 }
1454                 image++;
1455         }
1456
1457         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1458
1459 dealloc:
1460         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1461
1462 done:
1463         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1464         bsg_job->reply->result = DID_OK << 16;
1465         bsg_job->job_done(bsg_job);
1466
1467         return 0;
1468 }
1469
1470 static int
1471 qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
1472 {
1473         struct Scsi_Host *host = bsg_job->shost;
1474         scsi_qla_host_t *vha = shost_priv(host);
1475         struct qla_hw_data *ha = vha->hw;
1476         int rval = 0;
1477         uint8_t bsg[DMA_POOL_SIZE];
1478         struct qla_status_reg *sr = (void *)bsg;
1479         dma_addr_t sfp_dma;
1480         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1481         if (!sfp) {
1482                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1483                     EXT_STATUS_NO_MEMORY;
1484                 goto done;
1485         }
1486
1487         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1488             bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1489
1490         rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1491             sr->field_address.device, sr->field_address.offset,
1492             sizeof(sr->status_reg), sr->field_address.option);
1493         sr->status_reg = *sfp;
1494
1495         if (rval) {
1496                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1497                     EXT_STATUS_MAILBOX;
1498                 goto dealloc;
1499         }
1500
1501         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1502             bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1503
1504         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1505
1506 dealloc:
1507         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1508
1509 done:
1510         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1511         bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
1512         bsg_job->reply->result = DID_OK << 16;
1513         bsg_job->job_done(bsg_job);
1514
1515         return 0;
1516 }
1517
1518 static int
1519 qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
1520 {
1521         struct Scsi_Host *host = bsg_job->shost;
1522         scsi_qla_host_t *vha = shost_priv(host);
1523         struct qla_hw_data *ha = vha->hw;
1524         int rval = 0;
1525         uint8_t bsg[DMA_POOL_SIZE];
1526         struct qla_status_reg *sr = (void *)bsg;
1527         dma_addr_t sfp_dma;
1528         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1529         if (!sfp) {
1530                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1531                     EXT_STATUS_NO_MEMORY;
1532                 goto done;
1533         }
1534
1535         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1536             bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1537
1538         *sfp = sr->status_reg;
1539         rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1540             sr->field_address.device, sr->field_address.offset,
1541             sizeof(sr->status_reg), sr->field_address.option);
1542
1543         if (rval) {
1544                 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1545                     EXT_STATUS_MAILBOX;
1546                 goto dealloc;
1547         }
1548
1549         bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1550
1551 dealloc:
1552         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1553
1554 done:
1555         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1556         bsg_job->reply->result = DID_OK << 16;
1557         bsg_job->job_done(bsg_job);
1558
1559         return 0;
1560 }
1561
1562 static int
1563 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1564 {
1565         switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
1566         case QL_VND_LOOPBACK:
1567                 return qla2x00_process_loopback(bsg_job);
1568
1569         case QL_VND_A84_RESET:
1570                 return qla84xx_reset(bsg_job);
1571
1572         case QL_VND_A84_UPDATE_FW:
1573                 return qla84xx_updatefw(bsg_job);
1574
1575         case QL_VND_A84_MGMT_CMD:
1576                 return qla84xx_mgmt_cmd(bsg_job);
1577
1578         case QL_VND_IIDMA:
1579                 return qla24xx_iidma(bsg_job);
1580
1581         case QL_VND_FCP_PRIO_CFG_CMD:
1582                 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
1583
1584         case QL_VND_READ_FLASH:
1585                 return qla2x00_read_optrom(bsg_job);
1586
1587         case QL_VND_UPDATE_FLASH:
1588                 return qla2x00_update_optrom(bsg_job);
1589
1590         case QL_VND_SET_FRU_VERSION:
1591                 return qla2x00_update_fru_versions(bsg_job);
1592
1593         case QL_VND_READ_FRU_STATUS:
1594                 return qla2x00_read_fru_status(bsg_job);
1595
1596         case QL_VND_WRITE_FRU_STATUS:
1597                 return qla2x00_write_fru_status(bsg_job);
1598
1599         default:
1600                 bsg_job->reply->result = (DID_ERROR << 16);
1601                 bsg_job->job_done(bsg_job);
1602                 return -ENOSYS;
1603         }
1604 }
1605
1606 int
1607 qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1608 {
1609         int ret = -EINVAL;
1610         struct fc_rport *rport;
1611         fc_port_t *fcport = NULL;
1612         struct Scsi_Host *host;
1613         scsi_qla_host_t *vha;
1614
1615         /* In case no data transferred. */
1616         bsg_job->reply->reply_payload_rcv_len = 0;
1617
1618         if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1619                 rport = bsg_job->rport;
1620                 fcport = *(fc_port_t **) rport->dd_data;
1621                 host = rport_to_shost(rport);
1622                 vha = shost_priv(host);
1623         } else {
1624                 host = bsg_job->shost;
1625                 vha = shost_priv(host);
1626         }
1627
1628         if (qla2x00_reset_active(vha)) {
1629                 ql_dbg(ql_dbg_user, vha, 0x709f,
1630                     "BSG: ISP abort active/needed -- cmd=%d.\n",
1631                     bsg_job->request->msgcode);
1632                 bsg_job->reply->result = (DID_ERROR << 16);
1633                 bsg_job->job_done(bsg_job);
1634                 return -EBUSY;
1635         }
1636
1637         ql_dbg(ql_dbg_user, vha, 0x7000,
1638             "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
1639
1640         switch (bsg_job->request->msgcode) {
1641         case FC_BSG_RPT_ELS:
1642         case FC_BSG_HST_ELS_NOLOGIN:
1643                 ret = qla2x00_process_els(bsg_job);
1644                 break;
1645         case FC_BSG_HST_CT:
1646                 ret = qla2x00_process_ct(bsg_job);
1647                 break;
1648         case FC_BSG_HST_VENDOR:
1649                 ret = qla2x00_process_vendor_specific(bsg_job);
1650                 break;
1651         case FC_BSG_HST_ADD_RPORT:
1652         case FC_BSG_HST_DEL_RPORT:
1653         case FC_BSG_RPT_CT:
1654         default:
1655                 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
1656                 bsg_job->reply->result = ret;
1657                 break;
1658         }
1659         return ret;
1660 }
1661
1662 int
1663 qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1664 {
1665         scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
1666         struct qla_hw_data *ha = vha->hw;
1667         srb_t *sp;
1668         int cnt, que;
1669         unsigned long flags;
1670         struct req_que *req;
1671
1672         /* find the bsg job from the active list of commands */
1673         spin_lock_irqsave(&ha->hardware_lock, flags);
1674         for (que = 0; que < ha->max_req_queues; que++) {
1675                 req = ha->req_q_map[que];
1676                 if (!req)
1677                         continue;
1678
1679                 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1680                         sp = req->outstanding_cmds[cnt];
1681                         if (sp) {
1682                                 if (((sp->type == SRB_CT_CMD) ||
1683                                         (sp->type == SRB_ELS_CMD_HST))
1684                                         && (sp->u.bsg_job == bsg_job)) {
1685                                         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1686                                         if (ha->isp_ops->abort_command(sp)) {
1687                                                 ql_log(ql_log_warn, vha, 0x7089,
1688                                                     "mbx abort_command "
1689                                                     "failed.\n");
1690                                                 bsg_job->req->errors =
1691                                                 bsg_job->reply->result = -EIO;
1692                                         } else {
1693                                                 ql_dbg(ql_dbg_user, vha, 0x708a,
1694                                                     "mbx abort_command "
1695                                                     "success.\n");
1696                                                 bsg_job->req->errors =
1697                                                 bsg_job->reply->result = 0;
1698                                         }
1699                                         spin_lock_irqsave(&ha->hardware_lock, flags);
1700                                         goto done;
1701                                 }
1702                         }
1703                 }
1704         }
1705         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1706         ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
1707         bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
1708         return 0;
1709
1710 done:
1711         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1712         if (bsg_job->request->msgcode == FC_BSG_HST_CT)
1713                 kfree(sp->fcport);
1714         mempool_free(sp, ha->srb_mempool);
1715         return 0;
1716 }