Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[firefly-linux-kernel-4.4.55.git] / drivers / scsi / bnx2fc / bnx2fc_hwi.c
1 /* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
2  * This file contains the code that low level functions that interact
3  * with 57712 FCoE firmware.
4  *
5  * Copyright (c) 2008 - 2010 Broadcom Corporation
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation.
10  *
11  * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
12  */
13
14 #include "bnx2fc.h"
15
16 DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
17
18 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
19                                         struct fcoe_kcqe *new_cqe_kcqe);
20 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
21                                         struct fcoe_kcqe *ofld_kcqe);
22 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
23                                                 struct fcoe_kcqe *ofld_kcqe);
24 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
25 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
26                                         struct fcoe_kcqe *conn_destroy);
27
28 int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
29 {
30         struct fcoe_kwqe_stat stat_req;
31         struct kwqe *kwqe_arr[2];
32         int num_kwqes = 1;
33         int rc = 0;
34
35         memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
36         stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
37         stat_req.hdr.flags =
38                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
39
40         stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
41         stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
42
43         kwqe_arr[0] = (struct kwqe *) &stat_req;
44
45         if (hba->cnic && hba->cnic->submit_kwqes)
46                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
47
48         return rc;
49 }
50
51 /**
52  * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
53  *
54  * @hba:        adapter structure pointer
55  *
56  * Send down FCoE firmware init KWQEs which initiates the initial handshake
57  *      with the f/w.
58  *
59  */
60 int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
61 {
62         struct fcoe_kwqe_init1 fcoe_init1;
63         struct fcoe_kwqe_init2 fcoe_init2;
64         struct fcoe_kwqe_init3 fcoe_init3;
65         struct kwqe *kwqe_arr[3];
66         int num_kwqes = 3;
67         int rc = 0;
68
69         if (!hba->cnic) {
70                 printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n");
71                 return -ENODEV;
72         }
73
74         /* fill init1 KWQE */
75         memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
76         fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
77         fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
78                                         FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
79
80         fcoe_init1.num_tasks = BNX2FC_MAX_TASKS;
81         fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
82         fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
83         fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
84         fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
85         fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
86         fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
87         fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
88         fcoe_init1.task_list_pbl_addr_hi =
89                                 (u32) ((u64) hba->task_ctx_bd_dma >> 32);
90         fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
91
92         fcoe_init1.flags = (PAGE_SHIFT <<
93                                 FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
94
95         fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
96
97         /* fill init2 KWQE */
98         memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
99         fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
100         fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
101                                         FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
102
103         fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION;
104         fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION;
105
106         fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
107         fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
108                                            ((u64) hba->hash_tbl_pbl_dma >> 32);
109
110         fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
111         fcoe_init2.t2_hash_tbl_addr_hi = (u32)
112                                           ((u64) hba->t2_hash_tbl_dma >> 32);
113
114         fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
115         fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
116                                         ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
117
118         fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
119
120         /* fill init3 KWQE */
121         memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
122         fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
123         fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
124                                         FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
125         fcoe_init3.error_bit_map_lo = 0xffffffff;
126         fcoe_init3.error_bit_map_hi = 0xffffffff;
127
128         fcoe_init3.perf_config = 1;
129
130         kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
131         kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
132         kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
133
134         if (hba->cnic && hba->cnic->submit_kwqes)
135                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
136
137         return rc;
138 }
139 int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
140 {
141         struct fcoe_kwqe_destroy fcoe_destroy;
142         struct kwqe *kwqe_arr[2];
143         int num_kwqes = 1;
144         int rc = -1;
145
146         /* fill destroy KWQE */
147         memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
148         fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
149         fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
150                                         FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
151         kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
152
153         if (hba->cnic && hba->cnic->submit_kwqes)
154                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
155         return rc;
156 }
157
158 /**
159  * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
160  *
161  * @port:               port structure pointer
162  * @tgt:                bnx2fc_rport structure pointer
163  */
164 int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
165                                         struct bnx2fc_rport *tgt)
166 {
167         struct fc_lport *lport = port->lport;
168         struct bnx2fc_hba *hba = port->priv;
169         struct kwqe *kwqe_arr[4];
170         struct fcoe_kwqe_conn_offload1 ofld_req1;
171         struct fcoe_kwqe_conn_offload2 ofld_req2;
172         struct fcoe_kwqe_conn_offload3 ofld_req3;
173         struct fcoe_kwqe_conn_offload4 ofld_req4;
174         struct fc_rport_priv *rdata = tgt->rdata;
175         struct fc_rport *rport = tgt->rport;
176         int num_kwqes = 4;
177         u32 port_id;
178         int rc = 0;
179         u16 conn_id;
180
181         /* Initialize offload request 1 structure */
182         memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
183
184         ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
185         ofld_req1.hdr.flags =
186                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
187
188
189         conn_id = (u16)tgt->fcoe_conn_id;
190         ofld_req1.fcoe_conn_id = conn_id;
191
192
193         ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
194         ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
195
196         ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
197         ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
198
199         ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
200         ofld_req1.rq_first_pbe_addr_hi =
201                                 (u32)((u64) tgt->rq_dma >> 32);
202
203         ofld_req1.rq_prod = 0x8000;
204
205         /* Initialize offload request 2 structure */
206         memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
207
208         ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
209         ofld_req2.hdr.flags =
210                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
211
212         ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
213
214         ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
215         ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
216
217         ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
218         ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
219
220         ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
221         ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
222
223         /* Initialize offload request 3 structure */
224         memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
225
226         ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
227         ofld_req3.hdr.flags =
228                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
229
230         ofld_req3.vlan_tag = hba->vlan_id <<
231                                 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
232         ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
233
234         port_id = fc_host_port_id(lport->host);
235         if (port_id == 0) {
236                 BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
237                 return -EINVAL;
238         }
239
240         /*
241          * Store s_id of the initiator for further reference. This will
242          * be used during disable/destroy during linkdown processing as
243          * when the lport is reset, the port_id also is reset to 0
244          */
245         tgt->sid = port_id;
246         ofld_req3.s_id[0] = (port_id & 0x000000FF);
247         ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
248         ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
249
250         port_id = rport->port_id;
251         ofld_req3.d_id[0] = (port_id & 0x000000FF);
252         ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
253         ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
254
255         ofld_req3.tx_total_conc_seqs = rdata->max_seq;
256
257         ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
258         ofld_req3.rx_max_fc_pay_len  = lport->mfs;
259
260         ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
261         ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
262         ofld_req3.rx_open_seqs_exch_c3 = 1;
263
264         ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
265         ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
266
267         /* set mul_n_port_ids supported flag to 0, until it is supported */
268         ofld_req3.flags = 0;
269         /*
270         ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
271                             FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
272         */
273         /* Info from PLOGI response */
274         ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
275                              FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
276
277         ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
278                              FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
279
280         /* vlan flag */
281         ofld_req3.flags |= (hba->vlan_enabled <<
282                             FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
283
284         /* C2_VALID and ACK flags are not set as they are not suppported */
285
286
287         /* Initialize offload request 4 structure */
288         memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
289         ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
290         ofld_req4.hdr.flags =
291                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
292
293         ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
294
295
296         ofld_req4.src_mac_addr_lo[0] =  port->data_src_addr[5];
297                                                         /* local mac */
298         ofld_req4.src_mac_addr_lo[1] =  port->data_src_addr[4];
299         ofld_req4.src_mac_addr_mid[0] =  port->data_src_addr[3];
300         ofld_req4.src_mac_addr_mid[1] =  port->data_src_addr[2];
301         ofld_req4.src_mac_addr_hi[0] =  port->data_src_addr[1];
302         ofld_req4.src_mac_addr_hi[1] =  port->data_src_addr[0];
303         ofld_req4.dst_mac_addr_lo[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
304         ofld_req4.dst_mac_addr_lo[1] =  hba->ctlr.dest_addr[4];
305         ofld_req4.dst_mac_addr_mid[0] =  hba->ctlr.dest_addr[3];
306         ofld_req4.dst_mac_addr_mid[1] =  hba->ctlr.dest_addr[2];
307         ofld_req4.dst_mac_addr_hi[0] =  hba->ctlr.dest_addr[1];
308         ofld_req4.dst_mac_addr_hi[1] =  hba->ctlr.dest_addr[0];
309
310         ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
311         ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
312
313         ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
314         ofld_req4.confq_pbl_base_addr_hi =
315                                         (u32)((u64) tgt->confq_pbl_dma >> 32);
316
317         kwqe_arr[0] = (struct kwqe *) &ofld_req1;
318         kwqe_arr[1] = (struct kwqe *) &ofld_req2;
319         kwqe_arr[2] = (struct kwqe *) &ofld_req3;
320         kwqe_arr[3] = (struct kwqe *) &ofld_req4;
321
322         if (hba->cnic && hba->cnic->submit_kwqes)
323                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
324
325         return rc;
326 }
327
328 /**
329  * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
330  *
331  * @port:               port structure pointer
332  * @tgt:                bnx2fc_rport structure pointer
333  */
334 static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
335                                         struct bnx2fc_rport *tgt)
336 {
337         struct kwqe *kwqe_arr[2];
338         struct bnx2fc_hba *hba = port->priv;
339         struct fcoe_kwqe_conn_enable_disable enbl_req;
340         struct fc_lport *lport = port->lport;
341         struct fc_rport *rport = tgt->rport;
342         int num_kwqes = 1;
343         int rc = 0;
344         u32 port_id;
345
346         memset(&enbl_req, 0x00,
347                sizeof(struct fcoe_kwqe_conn_enable_disable));
348         enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
349         enbl_req.hdr.flags =
350                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
351
352         enbl_req.src_mac_addr_lo[0] =  port->data_src_addr[5];
353                                                         /* local mac */
354         enbl_req.src_mac_addr_lo[1] =  port->data_src_addr[4];
355         enbl_req.src_mac_addr_mid[0] =  port->data_src_addr[3];
356         enbl_req.src_mac_addr_mid[1] =  port->data_src_addr[2];
357         enbl_req.src_mac_addr_hi[0] =  port->data_src_addr[1];
358         enbl_req.src_mac_addr_hi[1] =  port->data_src_addr[0];
359         memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
360
361         enbl_req.dst_mac_addr_lo[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
362         enbl_req.dst_mac_addr_lo[1] =  hba->ctlr.dest_addr[4];
363         enbl_req.dst_mac_addr_mid[0] =  hba->ctlr.dest_addr[3];
364         enbl_req.dst_mac_addr_mid[1] =  hba->ctlr.dest_addr[2];
365         enbl_req.dst_mac_addr_hi[0] =  hba->ctlr.dest_addr[1];
366         enbl_req.dst_mac_addr_hi[1] =  hba->ctlr.dest_addr[0];
367
368         port_id = fc_host_port_id(lport->host);
369         if (port_id != tgt->sid) {
370                 printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
371                                 "sid = 0x%x\n", port_id, tgt->sid);
372                 port_id = tgt->sid;
373         }
374         enbl_req.s_id[0] = (port_id & 0x000000FF);
375         enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
376         enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
377
378         port_id = rport->port_id;
379         enbl_req.d_id[0] = (port_id & 0x000000FF);
380         enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
381         enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
382         enbl_req.vlan_tag = hba->vlan_id <<
383                                 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
384         enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
385         enbl_req.vlan_flag = hba->vlan_enabled;
386         enbl_req.context_id = tgt->context_id;
387         enbl_req.conn_id = tgt->fcoe_conn_id;
388
389         kwqe_arr[0] = (struct kwqe *) &enbl_req;
390
391         if (hba->cnic && hba->cnic->submit_kwqes)
392                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
393         return rc;
394 }
395
396 /**
397  * bnx2fc_send_session_disable_req - initiates FCoE Session disable
398  *
399  * @port:               port structure pointer
400  * @tgt:                bnx2fc_rport structure pointer
401  */
402 int bnx2fc_send_session_disable_req(struct fcoe_port *port,
403                                     struct bnx2fc_rport *tgt)
404 {
405         struct bnx2fc_hba *hba = port->priv;
406         struct fcoe_kwqe_conn_enable_disable disable_req;
407         struct kwqe *kwqe_arr[2];
408         struct fc_rport *rport = tgt->rport;
409         int num_kwqes = 1;
410         int rc = 0;
411         u32 port_id;
412
413         memset(&disable_req, 0x00,
414                sizeof(struct fcoe_kwqe_conn_enable_disable));
415         disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
416         disable_req.hdr.flags =
417                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
418
419         disable_req.src_mac_addr_lo[0] =  tgt->src_addr[5];
420         disable_req.src_mac_addr_lo[1] =  tgt->src_addr[4];
421         disable_req.src_mac_addr_mid[0] =  tgt->src_addr[3];
422         disable_req.src_mac_addr_mid[1] =  tgt->src_addr[2];
423         disable_req.src_mac_addr_hi[0] =  tgt->src_addr[1];
424         disable_req.src_mac_addr_hi[1] =  tgt->src_addr[0];
425
426         disable_req.dst_mac_addr_lo[0] =  hba->ctlr.dest_addr[5];/* fcf mac */
427         disable_req.dst_mac_addr_lo[1] =  hba->ctlr.dest_addr[4];
428         disable_req.dst_mac_addr_mid[0] =  hba->ctlr.dest_addr[3];
429         disable_req.dst_mac_addr_mid[1] =  hba->ctlr.dest_addr[2];
430         disable_req.dst_mac_addr_hi[0] =  hba->ctlr.dest_addr[1];
431         disable_req.dst_mac_addr_hi[1] =  hba->ctlr.dest_addr[0];
432
433         port_id = tgt->sid;
434         disable_req.s_id[0] = (port_id & 0x000000FF);
435         disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
436         disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
437
438
439         port_id = rport->port_id;
440         disable_req.d_id[0] = (port_id & 0x000000FF);
441         disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
442         disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
443         disable_req.context_id = tgt->context_id;
444         disable_req.conn_id = tgt->fcoe_conn_id;
445         disable_req.vlan_tag = hba->vlan_id <<
446                                 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
447         disable_req.vlan_tag |=
448                         3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
449         disable_req.vlan_flag = hba->vlan_enabled;
450
451         kwqe_arr[0] = (struct kwqe *) &disable_req;
452
453         if (hba->cnic && hba->cnic->submit_kwqes)
454                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
455
456         return rc;
457 }
458
459 /**
460  * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
461  *
462  * @port:               port structure pointer
463  * @tgt:                bnx2fc_rport structure pointer
464  */
465 int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
466                                         struct bnx2fc_rport *tgt)
467 {
468         struct fcoe_kwqe_conn_destroy destroy_req;
469         struct kwqe *kwqe_arr[2];
470         int num_kwqes = 1;
471         int rc = 0;
472
473         memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
474         destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
475         destroy_req.hdr.flags =
476                 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
477
478         destroy_req.context_id = tgt->context_id;
479         destroy_req.conn_id = tgt->fcoe_conn_id;
480
481         kwqe_arr[0] = (struct kwqe *) &destroy_req;
482
483         if (hba->cnic && hba->cnic->submit_kwqes)
484                 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
485
486         return rc;
487 }
488
489 static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport)
490 {
491         struct bnx2fc_lport *blport;
492
493         spin_lock_bh(&hba->hba_lock);
494         list_for_each_entry(blport, &hba->vports, list) {
495                 if (blport->lport == lport) {
496                         spin_unlock_bh(&hba->hba_lock);
497                         return true;
498                 }
499         }
500         spin_unlock_bh(&hba->hba_lock);
501         return false;
502
503 }
504
505
506 static void bnx2fc_unsol_els_work(struct work_struct *work)
507 {
508         struct bnx2fc_unsol_els *unsol_els;
509         struct fc_lport *lport;
510         struct bnx2fc_hba *hba;
511         struct fc_frame *fp;
512
513         unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
514         lport = unsol_els->lport;
515         fp = unsol_els->fp;
516         hba = unsol_els->hba;
517         if (is_valid_lport(hba, lport))
518                 fc_exch_recv(lport, fp);
519         kfree(unsol_els);
520 }
521
522 void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
523                                    unsigned char *buf,
524                                    u32 frame_len, u16 l2_oxid)
525 {
526         struct fcoe_port *port = tgt->port;
527         struct fc_lport *lport = port->lport;
528         struct bnx2fc_hba *hba = port->priv;
529         struct bnx2fc_unsol_els *unsol_els;
530         struct fc_frame_header *fh;
531         struct fc_frame *fp;
532         struct sk_buff *skb;
533         u32 payload_len;
534         u32 crc;
535         u8 op;
536
537
538         unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
539         if (!unsol_els) {
540                 BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
541                 return;
542         }
543
544         BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
545                 l2_oxid, frame_len);
546
547         payload_len = frame_len - sizeof(struct fc_frame_header);
548
549         fp = fc_frame_alloc(lport, payload_len);
550         if (!fp) {
551                 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
552                 kfree(unsol_els);
553                 return;
554         }
555
556         fh = (struct fc_frame_header *) fc_frame_header_get(fp);
557         /* Copy FC Frame header and payload into the frame */
558         memcpy(fh, buf, frame_len);
559
560         if (l2_oxid != FC_XID_UNKNOWN)
561                 fh->fh_ox_id = htons(l2_oxid);
562
563         skb = fp_skb(fp);
564
565         if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
566             (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
567
568                 if (fh->fh_type == FC_TYPE_ELS) {
569                         op = fc_frame_payload_op(fp);
570                         if ((op == ELS_TEST) || (op == ELS_ESTC) ||
571                             (op == ELS_FAN) || (op == ELS_CSU)) {
572                                 /*
573                                  * No need to reply for these
574                                  * ELS requests
575                                  */
576                                 printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
577                                 kfree_skb(skb);
578                                 kfree(unsol_els);
579                                 return;
580                         }
581                 }
582                 crc = fcoe_fc_crc(fp);
583                 fc_frame_init(fp);
584                 fr_dev(fp) = lport;
585                 fr_sof(fp) = FC_SOF_I3;
586                 fr_eof(fp) = FC_EOF_T;
587                 fr_crc(fp) = cpu_to_le32(~crc);
588                 unsol_els->lport = lport;
589                 unsol_els->hba = hba;
590                 unsol_els->fp = fp;
591                 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
592                 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
593         } else {
594                 BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
595                 kfree_skb(skb);
596                 kfree(unsol_els);
597         }
598 }
599
600 static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
601 {
602         u8 num_rq;
603         struct fcoe_err_report_entry *err_entry;
604         unsigned char *rq_data;
605         unsigned char *buf = NULL, *buf1;
606         int i;
607         u16 xid;
608         u32 frame_len, len;
609         struct bnx2fc_cmd *io_req = NULL;
610         struct fcoe_task_ctx_entry *task, *task_page;
611         struct bnx2fc_hba *hba = tgt->port->priv;
612         int task_idx, index;
613         int rc = 0;
614
615
616         BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
617         switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
618         case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
619                 frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
620                              FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
621
622                 num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
623
624                 spin_lock_bh(&tgt->tgt_lock);
625                 rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
626                 spin_unlock_bh(&tgt->tgt_lock);
627
628                 if (rq_data) {
629                         buf = rq_data;
630                 } else {
631                         buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
632                                               GFP_ATOMIC);
633
634                         if (!buf1) {
635                                 BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
636                                 break;
637                         }
638
639                         for (i = 0; i < num_rq; i++) {
640                                 spin_lock_bh(&tgt->tgt_lock);
641                                 rq_data = (unsigned char *)
642                                            bnx2fc_get_next_rqe(tgt, 1);
643                                 spin_unlock_bh(&tgt->tgt_lock);
644                                 len = BNX2FC_RQ_BUF_SZ;
645                                 memcpy(buf1, rq_data, len);
646                                 buf1 += len;
647                         }
648                 }
649                 bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
650                                               FC_XID_UNKNOWN);
651
652                 if (buf != rq_data)
653                         kfree(buf);
654                 spin_lock_bh(&tgt->tgt_lock);
655                 bnx2fc_return_rqe(tgt, num_rq);
656                 spin_unlock_bh(&tgt->tgt_lock);
657                 break;
658
659         case FCOE_ERROR_DETECTION_CQE_TYPE:
660                 /*
661                  * In case of error reporting CQE a single RQ entry
662                  * is consumed.
663                  */
664                 spin_lock_bh(&tgt->tgt_lock);
665                 num_rq = 1;
666                 err_entry = (struct fcoe_err_report_entry *)
667                              bnx2fc_get_next_rqe(tgt, 1);
668                 xid = err_entry->fc_hdr.ox_id;
669                 BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
670                 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
671                         err_entry->data.err_warn_bitmap_hi,
672                         err_entry->data.err_warn_bitmap_lo);
673                 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
674                         err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
675
676                 bnx2fc_return_rqe(tgt, 1);
677
678                 if (xid > BNX2FC_MAX_XID) {
679                         BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
680                                    xid);
681                         spin_unlock_bh(&tgt->tgt_lock);
682                         break;
683                 }
684
685                 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
686                 index = xid % BNX2FC_TASKS_PER_PAGE;
687                 task_page = (struct fcoe_task_ctx_entry *)
688                                                 hba->task_ctx[task_idx];
689                 task = &(task_page[index]);
690
691                 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
692                 if (!io_req) {
693                         spin_unlock_bh(&tgt->tgt_lock);
694                         break;
695                 }
696
697                 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
698                         printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
699                         spin_unlock_bh(&tgt->tgt_lock);
700                         break;
701                 }
702
703                 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
704                                        &io_req->req_flags)) {
705                         BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
706                                             "progress.. ignore unsol err\n");
707                         spin_unlock_bh(&tgt->tgt_lock);
708                         break;
709                 }
710
711                 /*
712                  * If ABTS is already in progress, and FW error is
713                  * received after that, do not cancel the timeout_work
714                  * and let the error recovery continue by explicitly
715                  * logging out the target, when the ABTS eventually
716                  * times out.
717                  */
718                 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
719                                       &io_req->req_flags)) {
720                         /*
721                          * Cancel the timeout_work, as we received IO
722                          * completion with FW error.
723                          */
724                         if (cancel_delayed_work(&io_req->timeout_work))
725                                 kref_put(&io_req->refcount,
726                                          bnx2fc_cmd_release); /* timer hold */
727
728                         rc = bnx2fc_initiate_abts(io_req);
729                         if (rc != SUCCESS) {
730                                 BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
731                                         "failed. issue cleanup\n");
732                                 rc = bnx2fc_initiate_cleanup(io_req);
733                                 BUG_ON(rc);
734                         }
735                 } else
736                         printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
737                                             "in ABTS processing\n", xid);
738                 spin_unlock_bh(&tgt->tgt_lock);
739                 break;
740
741         case FCOE_WARNING_DETECTION_CQE_TYPE:
742                 /*
743                  *In case of warning reporting CQE a single RQ entry
744                  * is consumes.
745                  */
746                 spin_lock_bh(&tgt->tgt_lock);
747                 num_rq = 1;
748                 err_entry = (struct fcoe_err_report_entry *)
749                              bnx2fc_get_next_rqe(tgt, 1);
750                 xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
751                 BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
752                 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
753                         err_entry->data.err_warn_bitmap_hi,
754                         err_entry->data.err_warn_bitmap_lo);
755                 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
756                         err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
757
758                 bnx2fc_return_rqe(tgt, 1);
759                 spin_unlock_bh(&tgt->tgt_lock);
760                 break;
761
762         default:
763                 printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
764                 break;
765         }
766 }
767
768 void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
769 {
770         struct fcoe_task_ctx_entry *task;
771         struct fcoe_task_ctx_entry *task_page;
772         struct fcoe_port *port = tgt->port;
773         struct bnx2fc_hba *hba = port->priv;
774         struct bnx2fc_cmd *io_req;
775         int task_idx, index;
776         u16 xid;
777         u8  cmd_type;
778         u8 rx_state = 0;
779         u8 num_rq;
780
781         spin_lock_bh(&tgt->tgt_lock);
782         xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
783         if (xid >= BNX2FC_MAX_TASKS) {
784                 printk(KERN_ALERT PFX "ERROR:xid out of range\n");
785                 spin_unlock_bh(&tgt->tgt_lock);
786                 return;
787         }
788         task_idx = xid / BNX2FC_TASKS_PER_PAGE;
789         index = xid % BNX2FC_TASKS_PER_PAGE;
790         task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
791         task = &(task_page[index]);
792
793         num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
794                    FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
795                    FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
796
797         io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
798
799         if (io_req == NULL) {
800                 printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
801                 spin_unlock_bh(&tgt->tgt_lock);
802                 return;
803         }
804
805         /* Timestamp IO completion time */
806         cmd_type = io_req->cmd_type;
807
808         rx_state = ((task->rxwr_txrd.var_ctx.rx_flags &
809                     FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >>
810                     FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT);
811
812         /* Process other IO completion types */
813         switch (cmd_type) {
814         case BNX2FC_SCSI_CMD:
815                 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
816                         bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
817                         spin_unlock_bh(&tgt->tgt_lock);
818                         return;
819                 }
820
821                 if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
822                         bnx2fc_process_abts_compl(io_req, task, num_rq);
823                 else if (rx_state ==
824                          FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
825                         bnx2fc_process_cleanup_compl(io_req, task, num_rq);
826                 else
827                         printk(KERN_ERR PFX "Invalid rx state - %d\n",
828                                 rx_state);
829                 break;
830
831         case BNX2FC_TASK_MGMT_CMD:
832                 BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
833                 bnx2fc_process_tm_compl(io_req, task, num_rq);
834                 break;
835
836         case BNX2FC_ABTS:
837                 /*
838                  * ABTS request received by firmware. ABTS response
839                  * will be delivered to the task belonging to the IO
840                  * that was aborted
841                  */
842                 BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
843                 kref_put(&io_req->refcount, bnx2fc_cmd_release);
844                 break;
845
846         case BNX2FC_ELS:
847                 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED)
848                         bnx2fc_process_els_compl(io_req, task, num_rq);
849                 else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
850                         bnx2fc_process_abts_compl(io_req, task, num_rq);
851                 else if (rx_state ==
852                          FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
853                         bnx2fc_process_cleanup_compl(io_req, task, num_rq);
854                 else
855                         printk(KERN_ERR PFX "Invalid rx state =  %d\n",
856                                 rx_state);
857                 break;
858
859         case BNX2FC_CLEANUP:
860                 BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
861                 kref_put(&io_req->refcount, bnx2fc_cmd_release);
862                 break;
863
864         default:
865                 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
866                 break;
867         }
868         spin_unlock_bh(&tgt->tgt_lock);
869 }
870
871 void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
872 {
873         struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
874         u32 msg;
875
876         wmb();
877         rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit <<
878                         FCOE_CQE_TOGGLE_BIT_SHIFT);
879         msg = *((u32 *)rx_db);
880         writel(cpu_to_le32(msg), tgt->ctx_base);
881         mmiowb();
882
883 }
884
885 struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
886 {
887         struct bnx2fc_work *work;
888         work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
889         if (!work)
890                 return NULL;
891
892         INIT_LIST_HEAD(&work->list);
893         work->tgt = tgt;
894         work->wqe = wqe;
895         return work;
896 }
897
898 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
899 {
900         struct fcoe_cqe *cq;
901         u32 cq_cons;
902         struct fcoe_cqe *cqe;
903         u32 num_free_sqes = 0;
904         u16 wqe;
905
906         /*
907          * cq_lock is a low contention lock used to protect
908          * the CQ data structure from being freed up during
909          * the upload operation
910          */
911         spin_lock_bh(&tgt->cq_lock);
912
913         if (!tgt->cq) {
914                 printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
915                 spin_unlock_bh(&tgt->cq_lock);
916                 return 0;
917         }
918         cq = tgt->cq;
919         cq_cons = tgt->cq_cons_idx;
920         cqe = &cq[cq_cons];
921
922         while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
923                (tgt->cq_curr_toggle_bit <<
924                FCOE_CQE_TOGGLE_BIT_SHIFT)) {
925
926                 /* new entry on the cq */
927                 if (wqe & FCOE_CQE_CQE_TYPE) {
928                         /* Unsolicited event notification */
929                         bnx2fc_process_unsol_compl(tgt, wqe);
930                 } else {
931                         /* Pending work request completion */
932                         struct bnx2fc_work *work = NULL;
933                         struct bnx2fc_percpu_s *fps = NULL;
934                         unsigned int cpu = wqe % num_possible_cpus();
935
936                         fps = &per_cpu(bnx2fc_percpu, cpu);
937                         spin_lock_bh(&fps->fp_work_lock);
938                         if (unlikely(!fps->iothread))
939                                 goto unlock;
940
941                         work = bnx2fc_alloc_work(tgt, wqe);
942                         if (work)
943                                 list_add_tail(&work->list,
944                                               &fps->work_list);
945 unlock:
946                         spin_unlock_bh(&fps->fp_work_lock);
947
948                         /* Pending work request completion */
949                         if (fps->iothread && work)
950                                 wake_up_process(fps->iothread);
951                         else
952                                 bnx2fc_process_cq_compl(tgt, wqe);
953                 }
954                 cqe++;
955                 tgt->cq_cons_idx++;
956                 num_free_sqes++;
957
958                 if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
959                         tgt->cq_cons_idx = 0;
960                         cqe = cq;
961                         tgt->cq_curr_toggle_bit =
962                                 1 - tgt->cq_curr_toggle_bit;
963                 }
964         }
965         bnx2fc_arm_cq(tgt);
966         atomic_add(num_free_sqes, &tgt->free_sqes);
967         spin_unlock_bh(&tgt->cq_lock);
968         return 0;
969 }
970
971 /**
972  * bnx2fc_fastpath_notification - process global event queue (KCQ)
973  *
974  * @hba:                adapter structure pointer
975  * @new_cqe_kcqe:       pointer to newly DMA'd KCQ entry
976  *
977  * Fast path event notification handler
978  */
979 static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
980                                         struct fcoe_kcqe *new_cqe_kcqe)
981 {
982         u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
983         struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
984
985         if (!tgt) {
986                 printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id);
987                 return;
988         }
989
990         bnx2fc_process_new_cqes(tgt);
991 }
992
993 /**
994  * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
995  *
996  * @hba:        adapter structure pointer
997  * @ofld_kcqe:  connection offload kcqe pointer
998  *
999  * handle session offload completion, enable the session if offload is
1000  * successful.
1001  */
1002 static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
1003                                         struct fcoe_kcqe *ofld_kcqe)
1004 {
1005         struct bnx2fc_rport             *tgt;
1006         struct fcoe_port                *port;
1007         u32                             conn_id;
1008         u32                             context_id;
1009         int                             rc;
1010
1011         conn_id = ofld_kcqe->fcoe_conn_id;
1012         context_id = ofld_kcqe->fcoe_conn_context_id;
1013         tgt = hba->tgt_ofld_list[conn_id];
1014         if (!tgt) {
1015                 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
1016                 return;
1017         }
1018         BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
1019                 ofld_kcqe->fcoe_conn_context_id);
1020         port = tgt->port;
1021         if (hba != tgt->port->priv) {
1022                 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n");
1023                 goto ofld_cmpl_err;
1024         }
1025         /*
1026          * cnic has allocated a context_id for this session; use this
1027          * while enabling the session.
1028          */
1029         tgt->context_id = context_id;
1030         if (ofld_kcqe->completion_status) {
1031                 if (ofld_kcqe->completion_status ==
1032                                 FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
1033                         printk(KERN_ERR PFX "unable to allocate FCoE context "
1034                                 "resources\n");
1035                         set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
1036                 }
1037                 goto ofld_cmpl_err;
1038         } else {
1039
1040                 /* now enable the session */
1041                 rc = bnx2fc_send_session_enable_req(port, tgt);
1042                 if (rc) {
1043                         printk(KERN_ALERT PFX "enable session failed\n");
1044                         goto ofld_cmpl_err;
1045                 }
1046         }
1047         return;
1048 ofld_cmpl_err:
1049         set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1050         wake_up_interruptible(&tgt->ofld_wait);
1051 }
1052
1053 /**
1054  * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
1055  *
1056  * @hba:        adapter structure pointer
1057  * @ofld_kcqe:  connection offload kcqe pointer
1058  *
1059  * handle session enable completion, mark the rport as ready
1060  */
1061
1062 static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1063                                                 struct fcoe_kcqe *ofld_kcqe)
1064 {
1065         struct bnx2fc_rport             *tgt;
1066         u32                             conn_id;
1067         u32                             context_id;
1068
1069         context_id = ofld_kcqe->fcoe_conn_context_id;
1070         conn_id = ofld_kcqe->fcoe_conn_id;
1071         tgt = hba->tgt_ofld_list[conn_id];
1072         if (!tgt) {
1073                 printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n");
1074                 return;
1075         }
1076
1077         BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
1078                 ofld_kcqe->fcoe_conn_context_id);
1079
1080         /*
1081          * context_id should be the same for this target during offload
1082          * and enable
1083          */
1084         if (tgt->context_id != context_id) {
1085                 printk(KERN_ALERT PFX "context id mis-match\n");
1086                 return;
1087         }
1088         if (hba != tgt->port->priv) {
1089                 printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
1090                 goto enbl_cmpl_err;
1091         }
1092         if (ofld_kcqe->completion_status) {
1093                 goto enbl_cmpl_err;
1094         } else {
1095                 /* enable successful - rport ready for issuing IOs */
1096                 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1097                 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1098                 wake_up_interruptible(&tgt->ofld_wait);
1099         }
1100         return;
1101
1102 enbl_cmpl_err:
1103         set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1104         wake_up_interruptible(&tgt->ofld_wait);
1105 }
1106
1107 static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1108                                         struct fcoe_kcqe *disable_kcqe)
1109 {
1110
1111         struct bnx2fc_rport             *tgt;
1112         u32                             conn_id;
1113
1114         conn_id = disable_kcqe->fcoe_conn_id;
1115         tgt = hba->tgt_ofld_list[conn_id];
1116         if (!tgt) {
1117                 printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n");
1118                 return;
1119         }
1120
1121         BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1122
1123         if (disable_kcqe->completion_status) {
1124                 printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n",
1125                         disable_kcqe->completion_status);
1126                 return;
1127         } else {
1128                 /* disable successful */
1129                 BNX2FC_TGT_DBG(tgt, "disable successful\n");
1130                 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1131                 set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1132                 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1133                 wake_up_interruptible(&tgt->upld_wait);
1134         }
1135 }
1136
1137 static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1138                                         struct fcoe_kcqe *destroy_kcqe)
1139 {
1140         struct bnx2fc_rport             *tgt;
1141         u32                             conn_id;
1142
1143         conn_id = destroy_kcqe->fcoe_conn_id;
1144         tgt = hba->tgt_ofld_list[conn_id];
1145         if (!tgt) {
1146                 printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n");
1147                 return;
1148         }
1149
1150         BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1151
1152         if (destroy_kcqe->completion_status) {
1153                 printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n",
1154                         destroy_kcqe->completion_status);
1155                 return;
1156         } else {
1157                 /* destroy successful */
1158                 BNX2FC_TGT_DBG(tgt, "upload successful\n");
1159                 clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1160                 set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
1161                 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1162                 wake_up_interruptible(&tgt->upld_wait);
1163         }
1164 }
1165
1166 static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1167 {
1168         switch (err_code) {
1169         case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
1170                 printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
1171                 break;
1172
1173         case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
1174                 printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
1175                 break;
1176
1177         case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
1178                 printk(KERN_ERR PFX "init_failure due to NIC error\n");
1179                 break;
1180         case FCOE_KCQE_COMPLETION_STATUS_ERROR:
1181                 printk(KERN_ERR PFX "init failure due to compl status err\n");
1182                 break;
1183         case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION:
1184                 printk(KERN_ERR PFX "init failure due to HSI mismatch\n");
1185         default:
1186                 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1187         }
1188 }
1189
1190 /**
1191  * bnx2fc_indicae_kcqe - process KCQE
1192  *
1193  * @hba:        adapter structure pointer
1194  * @kcqe:       kcqe pointer
1195  * @num_cqe:    Number of completion queue elements
1196  *
1197  * Generic KCQ event handler
1198  */
1199 void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1200                                         u32 num_cqe)
1201 {
1202         struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
1203         int i = 0;
1204         struct fcoe_kcqe *kcqe = NULL;
1205
1206         while (i < num_cqe) {
1207                 kcqe = (struct fcoe_kcqe *) kcq[i++];
1208
1209                 switch (kcqe->op_code) {
1210                 case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
1211                         bnx2fc_fastpath_notification(hba, kcqe);
1212                         break;
1213
1214                 case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
1215                         bnx2fc_process_ofld_cmpl(hba, kcqe);
1216                         break;
1217
1218                 case FCOE_KCQE_OPCODE_ENABLE_CONN:
1219                         bnx2fc_process_enable_conn_cmpl(hba, kcqe);
1220                         break;
1221
1222                 case FCOE_KCQE_OPCODE_INIT_FUNC:
1223                         if (kcqe->completion_status !=
1224                                         FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1225                                 bnx2fc_init_failure(hba,
1226                                                 kcqe->completion_status);
1227                         } else {
1228                                 set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1229                                 bnx2fc_get_link_state(hba);
1230                                 printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
1231                                         (u8)hba->pcidev->bus->number);
1232                         }
1233                         break;
1234
1235                 case FCOE_KCQE_OPCODE_DESTROY_FUNC:
1236                         if (kcqe->completion_status !=
1237                                         FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1238
1239                                 printk(KERN_ERR PFX "DESTROY failed\n");
1240                         } else {
1241                                 printk(KERN_ERR PFX "DESTROY success\n");
1242                         }
1243                         hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
1244                         wake_up_interruptible(&hba->destroy_wait);
1245                         break;
1246
1247                 case FCOE_KCQE_OPCODE_DISABLE_CONN:
1248                         bnx2fc_process_conn_disable_cmpl(hba, kcqe);
1249                         break;
1250
1251                 case FCOE_KCQE_OPCODE_DESTROY_CONN:
1252                         bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
1253                         break;
1254
1255                 case FCOE_KCQE_OPCODE_STAT_FUNC:
1256                         if (kcqe->completion_status !=
1257                             FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
1258                                 printk(KERN_ERR PFX "STAT failed\n");
1259                         complete(&hba->stat_req_done);
1260                         break;
1261
1262                 case FCOE_KCQE_OPCODE_FCOE_ERROR:
1263                         /* fall thru */
1264                 default:
1265                         printk(KERN_ALERT PFX "unknown opcode 0x%x\n",
1266                                                                 kcqe->op_code);
1267                 }
1268         }
1269 }
1270
1271 void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
1272 {
1273         struct fcoe_sqe *sqe;
1274
1275         sqe = &tgt->sq[tgt->sq_prod_idx];
1276
1277         /* Fill SQ WQE */
1278         sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
1279         sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
1280
1281         /* Advance SQ Prod Idx */
1282         if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
1283                 tgt->sq_prod_idx = 0;
1284                 tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
1285         }
1286 }
1287
1288 void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
1289 {
1290         struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
1291         u32 msg;
1292
1293         wmb();
1294         sq_db->prod = tgt->sq_prod_idx |
1295                                 (tgt->sq_curr_toggle_bit << 15);
1296         msg = *((u32 *)sq_db);
1297         writel(cpu_to_le32(msg), tgt->ctx_base);
1298         mmiowb();
1299
1300 }
1301
1302 int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1303 {
1304         u32 context_id = tgt->context_id;
1305         struct fcoe_port *port = tgt->port;
1306         u32 reg_off;
1307         resource_size_t reg_base;
1308         struct bnx2fc_hba *hba = port->priv;
1309
1310         reg_base = pci_resource_start(hba->pcidev,
1311                                         BNX2X_DOORBELL_PCI_BAR);
1312         reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
1313                         (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
1314         tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
1315         if (!tgt->ctx_base)
1316                 return -ENOMEM;
1317         return 0;
1318 }
1319
1320 char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1321 {
1322         char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
1323
1324         if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
1325                 return NULL;
1326
1327         tgt->rq_cons_idx += num_items;
1328
1329         if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
1330                 tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
1331
1332         return buf;
1333 }
1334
1335 void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1336 {
1337         /* return the rq buffer */
1338         u32 next_prod_idx = tgt->rq_prod_idx + num_items;
1339         if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
1340                 /* Wrap around RQ */
1341                 next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
1342         }
1343         tgt->rq_prod_idx = next_prod_idx;
1344         tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1345 }
1346
1347 void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1348                               struct fcoe_task_ctx_entry *task,
1349                               u16 orig_xid)
1350 {
1351         u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
1352         struct bnx2fc_rport *tgt = io_req->tgt;
1353         u32 context_id = tgt->context_id;
1354
1355         memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1356
1357         /* Tx Write Rx Read */
1358         /* init flags */
1359         task->txwr_rxrd.const_ctx.init_flags = task_type <<
1360                                 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1361         task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1362                                 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1363         task->txwr_rxrd.const_ctx.init_flags |=
1364                                 FCOE_TASK_DEV_TYPE_DISK <<
1365                                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1366         task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
1367
1368         /* Tx flags */
1369         task->txwr_rxrd.const_ctx.tx_flags =
1370                                 FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
1371                                 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1372
1373         /* Rx Read Tx Write */
1374         task->rxwr_txrd.const_ctx.init_flags = context_id <<
1375                                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1376         task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1377                                 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1378 }
1379
1380 void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1381                                 struct fcoe_task_ctx_entry *task)
1382 {
1383         struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
1384         struct bnx2fc_rport *tgt = io_req->tgt;
1385         struct fc_frame_header *fc_hdr;
1386         struct fcoe_ext_mul_sges_ctx *sgl;
1387         u8 task_type = 0;
1388         u64 *hdr;
1389         u64 temp_hdr[3];
1390         u32 context_id;
1391
1392
1393         /* Obtain task_type */
1394         if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
1395             (io_req->cmd_type == BNX2FC_ELS)) {
1396                 task_type = FCOE_TASK_TYPE_MIDPATH;
1397         } else if (io_req->cmd_type == BNX2FC_ABTS) {
1398                 task_type = FCOE_TASK_TYPE_ABTS;
1399         }
1400
1401         memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1402
1403         /* Setup the task from io_req for easy reference */
1404         io_req->task = task;
1405
1406         BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
1407                 io_req->cmd_type, task_type);
1408
1409         /* Tx only */
1410         if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
1411             (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
1412                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1413                                 (u32)mp_req->mp_req_bd_dma;
1414                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1415                                 (u32)((u64)mp_req->mp_req_bd_dma >> 32);
1416                 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1;
1417         }
1418
1419         /* Tx Write Rx Read */
1420         /* init flags */
1421         task->txwr_rxrd.const_ctx.init_flags = task_type <<
1422                                 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1423         task->txwr_rxrd.const_ctx.init_flags |=
1424                                 FCOE_TASK_DEV_TYPE_DISK <<
1425                                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1426         task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1427                                 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1428
1429         /* tx flags */
1430         task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT <<
1431                                 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1432
1433         /* Rx Write Tx Read */
1434         task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1435
1436         /* rx flags */
1437         task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1438                                 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1439
1440         context_id = tgt->context_id;
1441         task->rxwr_txrd.const_ctx.init_flags = context_id <<
1442                                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1443
1444         fc_hdr = &(mp_req->req_fc_hdr);
1445         if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1446                 fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
1447                 fc_hdr->fh_rx_id = htons(0xffff);
1448                 task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1449         } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
1450                 fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
1451         }
1452
1453         /* Fill FC Header into middle path buffer */
1454         hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr;
1455         memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
1456         hdr[0] = cpu_to_be64(temp_hdr[0]);
1457         hdr[1] = cpu_to_be64(temp_hdr[1]);
1458         hdr[2] = cpu_to_be64(temp_hdr[2]);
1459
1460         /* Rx Only */
1461         if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1462                 sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1463
1464                 sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma;
1465                 sgl->mul_sgl.cur_sge_addr.hi =
1466                                 (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
1467                 sgl->mul_sgl.sgl_size = 1;
1468         }
1469 }
1470
1471 void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1472                              struct fcoe_task_ctx_entry *task)
1473 {
1474         u8 task_type;
1475         struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1476         struct io_bdt *bd_tbl = io_req->bd_tbl;
1477         struct bnx2fc_rport *tgt = io_req->tgt;
1478         struct fcoe_cached_sge_ctx *cached_sge;
1479         struct fcoe_ext_mul_sges_ctx *sgl;
1480         u64 *fcp_cmnd;
1481         u64 tmp_fcp_cmnd[4];
1482         u32 context_id;
1483         int cnt, i;
1484         int bd_count;
1485
1486         memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1487
1488         /* Setup the task from io_req for easy reference */
1489         io_req->task = task;
1490
1491         if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1492                 task_type = FCOE_TASK_TYPE_WRITE;
1493         else
1494                 task_type = FCOE_TASK_TYPE_READ;
1495
1496         /* Tx only */
1497         if (task_type == FCOE_TASK_TYPE_WRITE) {
1498                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
1499                                 (u32)bd_tbl->bd_tbl_dma;
1500                 task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
1501                                 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1502                 task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
1503                                 bd_tbl->bd_valid;
1504         }
1505
1506         /*Tx Write Rx Read */
1507         /* Init state to NORMAL */
1508         task->txwr_rxrd.const_ctx.init_flags = task_type <<
1509                                 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1510         task->txwr_rxrd.const_ctx.init_flags |=
1511                                 FCOE_TASK_DEV_TYPE_DISK <<
1512                                 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1513         task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1514                                 FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
1515         /* tx flags */
1516         task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
1517                                 FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
1518
1519         /* Set initial seq counter */
1520         task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1;
1521
1522         /* Fill FCP_CMND IU */
1523         fcp_cmnd = (u64 *)
1524                     task->txwr_rxrd.union_ctx.fcp_cmd.opaque;
1525         bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
1526
1527         /* swap fcp_cmnd */
1528         cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
1529
1530         for (i = 0; i < cnt; i++) {
1531                 *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
1532                 fcp_cmnd++;
1533         }
1534
1535         /* Rx Write Tx Read */
1536         task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len;
1537
1538         context_id = tgt->context_id;
1539         task->rxwr_txrd.const_ctx.init_flags = context_id <<
1540                                 FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
1541
1542         /* rx flags */
1543         /* Set state to "waiting for the first packet" */
1544         task->rxwr_txrd.var_ctx.rx_flags |= 1 <<
1545                                 FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT;
1546
1547         task->rxwr_txrd.var_ctx.rx_id = 0xffff;
1548
1549         /* Rx Only */
1550         cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge;
1551         sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
1552         bd_count = bd_tbl->bd_valid;
1553         if (task_type == FCOE_TASK_TYPE_READ) {
1554                 if (bd_count == 1) {
1555
1556                         struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1557
1558                         cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1559                         cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1560                         cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1561                         task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1562                                 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1563                 } else if (bd_count == 2) {
1564                         struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1565
1566                         cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo;
1567                         cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi;
1568                         cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len;
1569
1570                         fcoe_bd_tbl++;
1571                         cached_sge->second_buf_addr.lo =
1572                                                  fcoe_bd_tbl->buf_addr_lo;
1573                         cached_sge->second_buf_addr.hi =
1574                                                 fcoe_bd_tbl->buf_addr_hi;
1575                         cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len;
1576                         task->txwr_rxrd.const_ctx.init_flags |= 1 <<
1577                                 FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT;
1578                 } else {
1579
1580                         sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma;
1581                         sgl->mul_sgl.cur_sge_addr.hi =
1582                                         (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1583                         sgl->mul_sgl.sgl_size = bd_count;
1584                 }
1585         }
1586 }
1587
1588 /**
1589  * bnx2fc_setup_task_ctx - allocate and map task context
1590  *
1591  * @hba:        pointer to adapter structure
1592  *
1593  * allocate memory for task context, and associated BD table to be used
1594  * by firmware
1595  *
1596  */
1597 int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1598 {
1599         int rc = 0;
1600         struct regpair *task_ctx_bdt;
1601         dma_addr_t addr;
1602         int i;
1603
1604         /*
1605          * Allocate task context bd table. A page size of bd table
1606          * can map 256 buffers. Each buffer contains 32 task context
1607          * entries. Hence the limit with one page is 8192 task context
1608          * entries.
1609          */
1610         hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1611                                                   PAGE_SIZE,
1612                                                   &hba->task_ctx_bd_dma,
1613                                                   GFP_KERNEL);
1614         if (!hba->task_ctx_bd_tbl) {
1615                 printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1616                 rc = -1;
1617                 goto out;
1618         }
1619         memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
1620
1621         /*
1622          * Allocate task_ctx which is an array of pointers pointing to
1623          * a page containing 32 task contexts
1624          */
1625         hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)),
1626                                  GFP_KERNEL);
1627         if (!hba->task_ctx) {
1628                 printk(KERN_ERR PFX "unable to allocate task context array\n");
1629                 rc = -1;
1630                 goto out1;
1631         }
1632
1633         /*
1634          * Allocate task_ctx_dma which is an array of dma addresses
1635          */
1636         hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ *
1637                                         sizeof(dma_addr_t)), GFP_KERNEL);
1638         if (!hba->task_ctx_dma) {
1639                 printk(KERN_ERR PFX "unable to alloc context mapping array\n");
1640                 rc = -1;
1641                 goto out2;
1642         }
1643
1644         task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1645         for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1646
1647                 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1648                                                       PAGE_SIZE,
1649                                                       &hba->task_ctx_dma[i],
1650                                                       GFP_KERNEL);
1651                 if (!hba->task_ctx[i]) {
1652                         printk(KERN_ERR PFX "unable to alloc task context\n");
1653                         rc = -1;
1654                         goto out3;
1655                 }
1656                 memset(hba->task_ctx[i], 0, PAGE_SIZE);
1657                 addr = (u64)hba->task_ctx_dma[i];
1658                 task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
1659                 task_ctx_bdt->lo = cpu_to_le32((u32)addr);
1660                 task_ctx_bdt++;
1661         }
1662         return 0;
1663
1664 out3:
1665         for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1666                 if (hba->task_ctx[i]) {
1667
1668                         dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1669                                 hba->task_ctx[i], hba->task_ctx_dma[i]);
1670                         hba->task_ctx[i] = NULL;
1671                 }
1672         }
1673
1674         kfree(hba->task_ctx_dma);
1675         hba->task_ctx_dma = NULL;
1676 out2:
1677         kfree(hba->task_ctx);
1678         hba->task_ctx = NULL;
1679 out1:
1680         dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1681                         hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
1682         hba->task_ctx_bd_tbl = NULL;
1683 out:
1684         return rc;
1685 }
1686
1687 void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1688 {
1689         int i;
1690
1691         if (hba->task_ctx_bd_tbl) {
1692                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1693                                     hba->task_ctx_bd_tbl,
1694                                     hba->task_ctx_bd_dma);
1695                 hba->task_ctx_bd_tbl = NULL;
1696         }
1697
1698         if (hba->task_ctx) {
1699                 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1700                         if (hba->task_ctx[i]) {
1701                                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1702                                                     hba->task_ctx[i],
1703                                                     hba->task_ctx_dma[i]);
1704                                 hba->task_ctx[i] = NULL;
1705                         }
1706                 }
1707                 kfree(hba->task_ctx);
1708                 hba->task_ctx = NULL;
1709         }
1710
1711         kfree(hba->task_ctx_dma);
1712         hba->task_ctx_dma = NULL;
1713 }
1714
1715 static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
1716 {
1717         int i;
1718         int segment_count;
1719         int hash_table_size;
1720         u32 *pbl;
1721
1722         segment_count = hba->hash_tbl_segment_count;
1723         hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1724                 sizeof(struct fcoe_hash_table_entry);
1725
1726         pbl = hba->hash_tbl_pbl;
1727         for (i = 0; i < segment_count; ++i) {
1728                 dma_addr_t dma_address;
1729
1730                 dma_address = le32_to_cpu(*pbl);
1731                 ++pbl;
1732                 dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
1733                 ++pbl;
1734                 dma_free_coherent(&hba->pcidev->dev,
1735                                   BNX2FC_HASH_TBL_CHUNK_SIZE,
1736                                   hba->hash_tbl_segments[i],
1737                                   dma_address);
1738
1739         }
1740
1741         if (hba->hash_tbl_pbl) {
1742                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1743                                     hba->hash_tbl_pbl,
1744                                     hba->hash_tbl_pbl_dma);
1745                 hba->hash_tbl_pbl = NULL;
1746         }
1747 }
1748
1749 static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
1750 {
1751         int i;
1752         int hash_table_size;
1753         int segment_count;
1754         int segment_array_size;
1755         int dma_segment_array_size;
1756         dma_addr_t *dma_segment_array;
1757         u32 *pbl;
1758
1759         hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1760                 sizeof(struct fcoe_hash_table_entry);
1761
1762         segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
1763         segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
1764         hba->hash_tbl_segment_count = segment_count;
1765
1766         segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
1767         hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
1768         if (!hba->hash_tbl_segments) {
1769                 printk(KERN_ERR PFX "hash table pointers alloc failed\n");
1770                 return -ENOMEM;
1771         }
1772         dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
1773         dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
1774         if (!dma_segment_array) {
1775                 printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
1776                 return -ENOMEM;
1777         }
1778
1779         for (i = 0; i < segment_count; ++i) {
1780                 hba->hash_tbl_segments[i] =
1781                         dma_alloc_coherent(&hba->pcidev->dev,
1782                                            BNX2FC_HASH_TBL_CHUNK_SIZE,
1783                                            &dma_segment_array[i],
1784                                            GFP_KERNEL);
1785                 if (!hba->hash_tbl_segments[i]) {
1786                         printk(KERN_ERR PFX "hash segment alloc failed\n");
1787                         while (--i >= 0) {
1788                                 dma_free_coherent(&hba->pcidev->dev,
1789                                                     BNX2FC_HASH_TBL_CHUNK_SIZE,
1790                                                     hba->hash_tbl_segments[i],
1791                                                     dma_segment_array[i]);
1792                                 hba->hash_tbl_segments[i] = NULL;
1793                         }
1794                         kfree(dma_segment_array);
1795                         return -ENOMEM;
1796                 }
1797                 memset(hba->hash_tbl_segments[i], 0,
1798                        BNX2FC_HASH_TBL_CHUNK_SIZE);
1799         }
1800
1801         hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
1802                                                PAGE_SIZE,
1803                                                &hba->hash_tbl_pbl_dma,
1804                                                GFP_KERNEL);
1805         if (!hba->hash_tbl_pbl) {
1806                 printk(KERN_ERR PFX "hash table pbl alloc failed\n");
1807                 kfree(dma_segment_array);
1808                 return -ENOMEM;
1809         }
1810         memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
1811
1812         pbl = hba->hash_tbl_pbl;
1813         for (i = 0; i < segment_count; ++i) {
1814                 u64 paddr = dma_segment_array[i];
1815                 *pbl = cpu_to_le32((u32) paddr);
1816                 ++pbl;
1817                 *pbl = cpu_to_le32((u32) (paddr >> 32));
1818                 ++pbl;
1819         }
1820         pbl = hba->hash_tbl_pbl;
1821         i = 0;
1822         while (*pbl && *(pbl + 1)) {
1823                 u32 lo;
1824                 u32 hi;
1825                 lo = *pbl;
1826                 ++pbl;
1827                 hi = *pbl;
1828                 ++pbl;
1829                 ++i;
1830         }
1831         kfree(dma_segment_array);
1832         return 0;
1833 }
1834
1835 /**
1836  * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
1837  *
1838  * @hba:        Pointer to adapter structure
1839  *
1840  */
1841 int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
1842 {
1843         u64 addr;
1844         u32 mem_size;
1845         int i;
1846
1847         if (bnx2fc_allocate_hash_table(hba))
1848                 return -ENOMEM;
1849
1850         mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
1851         hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
1852                                                   &hba->t2_hash_tbl_ptr_dma,
1853                                                   GFP_KERNEL);
1854         if (!hba->t2_hash_tbl_ptr) {
1855                 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
1856                 bnx2fc_free_fw_resc(hba);
1857                 return -ENOMEM;
1858         }
1859         memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
1860
1861         mem_size = BNX2FC_NUM_MAX_SESS *
1862                                 sizeof(struct fcoe_t2_hash_table_entry);
1863         hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
1864                                               &hba->t2_hash_tbl_dma,
1865                                               GFP_KERNEL);
1866         if (!hba->t2_hash_tbl) {
1867                 printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
1868                 bnx2fc_free_fw_resc(hba);
1869                 return -ENOMEM;
1870         }
1871         memset(hba->t2_hash_tbl, 0x00, mem_size);
1872         for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
1873                 addr = (unsigned long) hba->t2_hash_tbl_dma +
1874                          ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
1875                 hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
1876                 hba->t2_hash_tbl[i].next.hi = addr >> 32;
1877         }
1878
1879         hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
1880                                                PAGE_SIZE, &hba->dummy_buf_dma,
1881                                                GFP_KERNEL);
1882         if (!hba->dummy_buffer) {
1883                 printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
1884                 bnx2fc_free_fw_resc(hba);
1885                 return -ENOMEM;
1886         }
1887
1888         hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
1889                                                PAGE_SIZE,
1890                                                &hba->stats_buf_dma,
1891                                                GFP_KERNEL);
1892         if (!hba->stats_buffer) {
1893                 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
1894                 bnx2fc_free_fw_resc(hba);
1895                 return -ENOMEM;
1896         }
1897         memset(hba->stats_buffer, 0x00, PAGE_SIZE);
1898
1899         return 0;
1900 }
1901
1902 void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
1903 {
1904         u32 mem_size;
1905
1906         if (hba->stats_buffer) {
1907                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1908                                   hba->stats_buffer, hba->stats_buf_dma);
1909                 hba->stats_buffer = NULL;
1910         }
1911
1912         if (hba->dummy_buffer) {
1913                 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1914                                   hba->dummy_buffer, hba->dummy_buf_dma);
1915                 hba->dummy_buffer = NULL;
1916         }
1917
1918         if (hba->t2_hash_tbl_ptr) {
1919                 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
1920                 dma_free_coherent(&hba->pcidev->dev, mem_size,
1921                                     hba->t2_hash_tbl_ptr,
1922                                     hba->t2_hash_tbl_ptr_dma);
1923                 hba->t2_hash_tbl_ptr = NULL;
1924         }
1925
1926         if (hba->t2_hash_tbl) {
1927                 mem_size = BNX2FC_NUM_MAX_SESS *
1928                             sizeof(struct fcoe_t2_hash_table_entry);
1929                 dma_free_coherent(&hba->pcidev->dev, mem_size,
1930                                     hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
1931                 hba->t2_hash_tbl = NULL;
1932         }
1933         bnx2fc_free_hash_table(hba);
1934 }