Merge remote-tracking branch 'lsk/v3.10/topic/gator' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / drivers / infiniband / ulp / isert / ib_isert.c
1 /*******************************************************************************
2  * This file contains iSCSI extentions for RDMA (iSER) Verbs
3  *
4  * (c) Copyright 2013 RisingTide Systems LLC.
5  *
6  * Nicholas A. Bellinger <nab@linux-iscsi.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  ****************************************************************************/
18
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
23 #include <linux/in.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
30 #include <linux/semaphore.h>
31
32 #include "isert_proto.h"
33 #include "ib_isert.h"
34
35 #define ISERT_MAX_CONN          8
36 #define ISER_MAX_RX_CQ_LEN      (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN      (ISERT_QP_MAX_REQ_DTOS  * ISERT_MAX_CONN)
38
39 static DEFINE_MUTEX(device_list_mutex);
40 static LIST_HEAD(device_list);
41 static struct workqueue_struct *isert_rx_wq;
42 static struct workqueue_struct *isert_comp_wq;
43 static struct kmem_cache *isert_cmd_cache;
44
45 static void
46 isert_qp_event_callback(struct ib_event *e, void *context)
47 {
48         struct isert_conn *isert_conn = (struct isert_conn *)context;
49
50         pr_err("isert_qp_event_callback event: %d\n", e->event);
51         switch (e->event) {
52         case IB_EVENT_COMM_EST:
53                 rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
54                 break;
55         case IB_EVENT_QP_LAST_WQE_REACHED:
56                 pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
57                 break;
58         default:
59                 break;
60         }
61 }
62
63 static int
64 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
65 {
66         int ret;
67
68         ret = ib_query_device(ib_dev, devattr);
69         if (ret) {
70                 pr_err("ib_query_device() failed: %d\n", ret);
71                 return ret;
72         }
73         pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
74         pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
75
76         return 0;
77 }
78
79 static int
80 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
81 {
82         struct isert_device *device = isert_conn->conn_device;
83         struct ib_qp_init_attr attr;
84         struct ib_device_attr devattr;
85         int ret, index, min_index = 0;
86
87         memset(&devattr, 0, sizeof(struct ib_device_attr));
88         ret = isert_query_device(cma_id->device, &devattr);
89         if (ret)
90                 return ret;
91
92         mutex_lock(&device_list_mutex);
93         for (index = 0; index < device->cqs_used; index++)
94                 if (device->cq_active_qps[index] <
95                     device->cq_active_qps[min_index])
96                         min_index = index;
97         device->cq_active_qps[min_index]++;
98         pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
99         mutex_unlock(&device_list_mutex);
100
101         memset(&attr, 0, sizeof(struct ib_qp_init_attr));
102         attr.event_handler = isert_qp_event_callback;
103         attr.qp_context = isert_conn;
104         attr.send_cq = device->dev_tx_cq[min_index];
105         attr.recv_cq = device->dev_rx_cq[min_index];
106         attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
107         attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
108         /*
109          * FIXME: Use devattr.max_sge - 2 for max_send_sge as
110          * work-around for RDMA_READ..
111          */
112         attr.cap.max_send_sge = devattr.max_sge - 2;
113         isert_conn->max_sge = attr.cap.max_send_sge;
114
115         attr.cap.max_recv_sge = 1;
116         attr.sq_sig_type = IB_SIGNAL_REQ_WR;
117         attr.qp_type = IB_QPT_RC;
118
119         pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
120                  cma_id->device);
121         pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
122                  isert_conn->conn_pd->device);
123
124         ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
125         if (ret) {
126                 pr_err("rdma_create_qp failed for cma_id %d\n", ret);
127                 return ret;
128         }
129         isert_conn->conn_qp = cma_id->qp;
130         pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
131
132         return 0;
133 }
134
135 static void
136 isert_cq_event_callback(struct ib_event *e, void *context)
137 {
138         pr_debug("isert_cq_event_callback event: %d\n", e->event);
139 }
140
141 static int
142 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
143 {
144         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
145         struct iser_rx_desc *rx_desc;
146         struct ib_sge *rx_sg;
147         u64 dma_addr;
148         int i, j;
149
150         isert_conn->conn_rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
151                                 sizeof(struct iser_rx_desc), GFP_KERNEL);
152         if (!isert_conn->conn_rx_descs)
153                 goto fail;
154
155         rx_desc = isert_conn->conn_rx_descs;
156
157         for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
158                 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
159                                         ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
160                 if (ib_dma_mapping_error(ib_dev, dma_addr))
161                         goto dma_map_fail;
162
163                 rx_desc->dma_addr = dma_addr;
164
165                 rx_sg = &rx_desc->rx_sg;
166                 rx_sg->addr = rx_desc->dma_addr;
167                 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
168                 rx_sg->lkey = isert_conn->conn_mr->lkey;
169         }
170
171         isert_conn->conn_rx_desc_head = 0;
172         return 0;
173
174 dma_map_fail:
175         rx_desc = isert_conn->conn_rx_descs;
176         for (j = 0; j < i; j++, rx_desc++) {
177                 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
178                                     ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
179         }
180         kfree(isert_conn->conn_rx_descs);
181         isert_conn->conn_rx_descs = NULL;
182 fail:
183         return -ENOMEM;
184 }
185
186 static void
187 isert_free_rx_descriptors(struct isert_conn *isert_conn)
188 {
189         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
190         struct iser_rx_desc *rx_desc;
191         int i;
192
193         if (!isert_conn->conn_rx_descs)
194                 return;
195
196         rx_desc = isert_conn->conn_rx_descs;
197         for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++)  {
198                 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
199                                     ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
200         }
201
202         kfree(isert_conn->conn_rx_descs);
203         isert_conn->conn_rx_descs = NULL;
204 }
205
206 static void isert_cq_tx_callback(struct ib_cq *, void *);
207 static void isert_cq_rx_callback(struct ib_cq *, void *);
208
209 static int
210 isert_create_device_ib_res(struct isert_device *device)
211 {
212         struct ib_device *ib_dev = device->ib_device;
213         struct isert_cq_desc *cq_desc;
214         int ret = 0, i, j;
215
216         device->cqs_used = min_t(int, num_online_cpus(),
217                                  device->ib_device->num_comp_vectors);
218         device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
219         pr_debug("Using %d CQs, device %s supports %d vectors\n",
220                  device->cqs_used, device->ib_device->name,
221                  device->ib_device->num_comp_vectors);
222         device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
223                                 device->cqs_used, GFP_KERNEL);
224         if (!device->cq_desc) {
225                 pr_err("Unable to allocate device->cq_desc\n");
226                 return -ENOMEM;
227         }
228         cq_desc = device->cq_desc;
229
230         device->dev_pd = ib_alloc_pd(ib_dev);
231         if (IS_ERR(device->dev_pd)) {
232                 ret = PTR_ERR(device->dev_pd);
233                 pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
234                 goto out_cq_desc;
235         }
236
237         for (i = 0; i < device->cqs_used; i++) {
238                 cq_desc[i].device = device;
239                 cq_desc[i].cq_index = i;
240
241                 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
242                                                 isert_cq_rx_callback,
243                                                 isert_cq_event_callback,
244                                                 (void *)&cq_desc[i],
245                                                 ISER_MAX_RX_CQ_LEN, i);
246                 if (IS_ERR(device->dev_rx_cq[i])) {
247                         ret = PTR_ERR(device->dev_rx_cq[i]);
248                         device->dev_rx_cq[i] = NULL;
249                         goto out_cq;
250                 }
251
252                 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
253                                                 isert_cq_tx_callback,
254                                                 isert_cq_event_callback,
255                                                 (void *)&cq_desc[i],
256                                                 ISER_MAX_TX_CQ_LEN, i);
257                 if (IS_ERR(device->dev_tx_cq[i])) {
258                         ret = PTR_ERR(device->dev_tx_cq[i]);
259                         device->dev_tx_cq[i] = NULL;
260                         goto out_cq;
261                 }
262
263                 ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
264                 if (ret)
265                         goto out_cq;
266
267                 ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
268                 if (ret)
269                         goto out_cq;
270         }
271
272         device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
273         if (IS_ERR(device->dev_mr)) {
274                 ret = PTR_ERR(device->dev_mr);
275                 pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
276                 goto out_cq;
277         }
278
279         return 0;
280
281 out_cq:
282         for (j = 0; j < i; j++) {
283                 cq_desc = &device->cq_desc[j];
284
285                 if (device->dev_rx_cq[j]) {
286                         cancel_work_sync(&cq_desc->cq_rx_work);
287                         ib_destroy_cq(device->dev_rx_cq[j]);
288                 }
289                 if (device->dev_tx_cq[j]) {
290                         cancel_work_sync(&cq_desc->cq_tx_work);
291                         ib_destroy_cq(device->dev_tx_cq[j]);
292                 }
293         }
294         ib_dealloc_pd(device->dev_pd);
295
296 out_cq_desc:
297         kfree(device->cq_desc);
298
299         return ret;
300 }
301
302 static void
303 isert_free_device_ib_res(struct isert_device *device)
304 {
305         struct isert_cq_desc *cq_desc;
306         int i;
307
308         for (i = 0; i < device->cqs_used; i++) {
309                 cq_desc = &device->cq_desc[i];
310
311                 cancel_work_sync(&cq_desc->cq_rx_work);
312                 cancel_work_sync(&cq_desc->cq_tx_work);
313                 ib_destroy_cq(device->dev_rx_cq[i]);
314                 ib_destroy_cq(device->dev_tx_cq[i]);
315                 device->dev_rx_cq[i] = NULL;
316                 device->dev_tx_cq[i] = NULL;
317         }
318
319         ib_dereg_mr(device->dev_mr);
320         ib_dealloc_pd(device->dev_pd);
321         kfree(device->cq_desc);
322 }
323
324 static void
325 isert_device_try_release(struct isert_device *device)
326 {
327         mutex_lock(&device_list_mutex);
328         device->refcount--;
329         if (!device->refcount) {
330                 isert_free_device_ib_res(device);
331                 list_del(&device->dev_node);
332                 kfree(device);
333         }
334         mutex_unlock(&device_list_mutex);
335 }
336
337 static struct isert_device *
338 isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
339 {
340         struct isert_device *device;
341         int ret;
342
343         mutex_lock(&device_list_mutex);
344         list_for_each_entry(device, &device_list, dev_node) {
345                 if (device->ib_device->node_guid == cma_id->device->node_guid) {
346                         device->refcount++;
347                         mutex_unlock(&device_list_mutex);
348                         return device;
349                 }
350         }
351
352         device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
353         if (!device) {
354                 mutex_unlock(&device_list_mutex);
355                 return ERR_PTR(-ENOMEM);
356         }
357
358         INIT_LIST_HEAD(&device->dev_node);
359
360         device->ib_device = cma_id->device;
361         ret = isert_create_device_ib_res(device);
362         if (ret) {
363                 kfree(device);
364                 mutex_unlock(&device_list_mutex);
365                 return ERR_PTR(ret);
366         }
367
368         device->refcount++;
369         list_add_tail(&device->dev_node, &device_list);
370         mutex_unlock(&device_list_mutex);
371
372         return device;
373 }
374
375 static int
376 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
377 {
378         struct iscsi_np *np = cma_id->context;
379         struct isert_np *isert_np = np->np_context;
380         struct isert_conn *isert_conn;
381         struct isert_device *device;
382         struct ib_device *ib_dev = cma_id->device;
383         int ret = 0;
384
385         spin_lock_bh(&np->np_thread_lock);
386         if (!np->enabled) {
387                 spin_unlock_bh(&np->np_thread_lock);
388                 pr_debug("iscsi_np is not enabled, reject connect request\n");
389                 return rdma_reject(cma_id, NULL, 0);
390         }
391         spin_unlock_bh(&np->np_thread_lock);
392
393         pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
394                  cma_id, cma_id->context);
395
396         isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
397         if (!isert_conn) {
398                 pr_err("Unable to allocate isert_conn\n");
399                 return -ENOMEM;
400         }
401         isert_conn->state = ISER_CONN_INIT;
402         INIT_LIST_HEAD(&isert_conn->conn_accept_node);
403         init_completion(&isert_conn->conn_login_comp);
404         init_completion(&isert_conn->conn_wait);
405         init_completion(&isert_conn->conn_wait_comp_err);
406         kref_init(&isert_conn->conn_kref);
407         kref_get(&isert_conn->conn_kref);
408         mutex_init(&isert_conn->conn_mutex);
409
410         cma_id->context = isert_conn;
411         isert_conn->conn_cm_id = cma_id;
412         isert_conn->responder_resources = event->param.conn.responder_resources;
413         isert_conn->initiator_depth = event->param.conn.initiator_depth;
414         pr_debug("Using responder_resources: %u initiator_depth: %u\n",
415                  isert_conn->responder_resources, isert_conn->initiator_depth);
416
417         isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
418                                         ISER_RX_LOGIN_SIZE, GFP_KERNEL);
419         if (!isert_conn->login_buf) {
420                 pr_err("Unable to allocate isert_conn->login_buf\n");
421                 ret = -ENOMEM;
422                 goto out;
423         }
424
425         isert_conn->login_req_buf = isert_conn->login_buf;
426         isert_conn->login_rsp_buf = isert_conn->login_buf +
427                                     ISCSI_DEF_MAX_RECV_SEG_LEN;
428         pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
429                  isert_conn->login_buf, isert_conn->login_req_buf,
430                  isert_conn->login_rsp_buf);
431
432         isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
433                                 (void *)isert_conn->login_req_buf,
434                                 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
435
436         ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
437         if (ret) {
438                 pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
439                        ret);
440                 isert_conn->login_req_dma = 0;
441                 goto out_login_buf;
442         }
443
444         isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
445                                         (void *)isert_conn->login_rsp_buf,
446                                         ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
447
448         ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
449         if (ret) {
450                 pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
451                        ret);
452                 isert_conn->login_rsp_dma = 0;
453                 goto out_req_dma_map;
454         }
455
456         device = isert_device_find_by_ib_dev(cma_id);
457         if (IS_ERR(device)) {
458                 ret = PTR_ERR(device);
459                 goto out_rsp_dma_map;
460         }
461
462         isert_conn->conn_device = device;
463         isert_conn->conn_pd = device->dev_pd;
464         isert_conn->conn_mr = device->dev_mr;
465
466         ret = isert_conn_setup_qp(isert_conn, cma_id);
467         if (ret)
468                 goto out_conn_dev;
469
470         mutex_lock(&isert_np->np_accept_mutex);
471         list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
472         mutex_unlock(&isert_np->np_accept_mutex);
473
474         pr_debug("isert_connect_request() up np_sem np: %p\n", np);
475         up(&isert_np->np_sem);
476         return 0;
477
478 out_conn_dev:
479         isert_device_try_release(device);
480 out_rsp_dma_map:
481         ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
482                             ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
483 out_req_dma_map:
484         ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
485                             ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
486 out_login_buf:
487         kfree(isert_conn->login_buf);
488 out:
489         kfree(isert_conn);
490         return ret;
491 }
492
493 static void
494 isert_connect_release(struct isert_conn *isert_conn)
495 {
496         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
497         struct isert_device *device = isert_conn->conn_device;
498         int cq_index;
499
500         pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
501
502         if (isert_conn->conn_qp) {
503                 cq_index = ((struct isert_cq_desc *)
504                         isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
505                 pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
506                 isert_conn->conn_device->cq_active_qps[cq_index]--;
507
508                 rdma_destroy_qp(isert_conn->conn_cm_id);
509         }
510
511         isert_free_rx_descriptors(isert_conn);
512         rdma_destroy_id(isert_conn->conn_cm_id);
513
514         if (isert_conn->login_buf) {
515                 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
516                                     ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
517                 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
518                                     ISCSI_DEF_MAX_RECV_SEG_LEN,
519                                     DMA_FROM_DEVICE);
520                 kfree(isert_conn->login_buf);
521         }
522         kfree(isert_conn);
523
524         if (device)
525                 isert_device_try_release(device);
526
527         pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
528 }
529
530 static void
531 isert_connected_handler(struct rdma_cm_id *cma_id)
532 {
533         return;
534 }
535
536 static void
537 isert_release_conn_kref(struct kref *kref)
538 {
539         struct isert_conn *isert_conn = container_of(kref,
540                                 struct isert_conn, conn_kref);
541
542         pr_debug("Calling isert_connect_release for final kref %s/%d\n",
543                  current->comm, current->pid);
544
545         isert_connect_release(isert_conn);
546 }
547
548 static void
549 isert_put_conn(struct isert_conn *isert_conn)
550 {
551         kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
552 }
553
554 static void
555 isert_disconnect_work(struct work_struct *work)
556 {
557         struct isert_conn *isert_conn = container_of(work,
558                                 struct isert_conn, conn_logout_work);
559
560         pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
561         mutex_lock(&isert_conn->conn_mutex);
562         if (isert_conn->state == ISER_CONN_UP)
563                 isert_conn->state = ISER_CONN_TERMINATING;
564
565         if (isert_conn->post_recv_buf_count == 0 &&
566             atomic_read(&isert_conn->post_send_buf_count) == 0) {
567                 mutex_unlock(&isert_conn->conn_mutex);
568                 goto wake_up;
569         }
570         if (!isert_conn->conn_cm_id) {
571                 mutex_unlock(&isert_conn->conn_mutex);
572                 isert_put_conn(isert_conn);
573                 return;
574         }
575
576         if (isert_conn->disconnect) {
577                 /* Send DREQ/DREP towards our initiator */
578                 rdma_disconnect(isert_conn->conn_cm_id);
579         }
580
581         mutex_unlock(&isert_conn->conn_mutex);
582
583 wake_up:
584         complete(&isert_conn->conn_wait);
585         isert_put_conn(isert_conn);
586 }
587
588 static void
589 isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
590 {
591         struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
592
593         isert_conn->disconnect = disconnect;
594         INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
595         schedule_work(&isert_conn->conn_logout_work);
596 }
597
598 static int
599 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
600 {
601         int ret = 0;
602         bool disconnect = false;
603
604         pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
605                  event->event, event->status, cma_id->context, cma_id);
606
607         switch (event->event) {
608         case RDMA_CM_EVENT_CONNECT_REQUEST:
609                 ret = isert_connect_request(cma_id, event);
610                 break;
611         case RDMA_CM_EVENT_ESTABLISHED:
612                 isert_connected_handler(cma_id);
613                 break;
614         case RDMA_CM_EVENT_ADDR_CHANGE:    /* FALLTHRU */
615         case RDMA_CM_EVENT_DISCONNECTED:   /* FALLTHRU */
616         case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
617                 disconnect = true;
618         case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
619                 isert_disconnected_handler(cma_id, disconnect);
620                 break;
621         case RDMA_CM_EVENT_CONNECT_ERROR:
622         default:
623                 pr_err("Unhandled RDMA CMA event: %d\n", event->event);
624                 break;
625         }
626
627         if (ret != 0) {
628                 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
629                        event->event, ret);
630                 dump_stack();
631         }
632
633         return ret;
634 }
635
636 static int
637 isert_post_recv(struct isert_conn *isert_conn, u32 count)
638 {
639         struct ib_recv_wr *rx_wr, *rx_wr_failed;
640         int i, ret;
641         unsigned int rx_head = isert_conn->conn_rx_desc_head;
642         struct iser_rx_desc *rx_desc;
643
644         for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
645                 rx_desc         = &isert_conn->conn_rx_descs[rx_head];
646                 rx_wr->wr_id    = (unsigned long)rx_desc;
647                 rx_wr->sg_list  = &rx_desc->rx_sg;
648                 rx_wr->num_sge  = 1;
649                 rx_wr->next     = rx_wr + 1;
650                 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
651         }
652
653         rx_wr--;
654         rx_wr->next = NULL; /* mark end of work requests list */
655
656         isert_conn->post_recv_buf_count += count;
657         ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
658                                 &rx_wr_failed);
659         if (ret) {
660                 pr_err("ib_post_recv() failed with ret: %d\n", ret);
661                 isert_conn->post_recv_buf_count -= count;
662         } else {
663                 pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
664                 isert_conn->conn_rx_desc_head = rx_head;
665         }
666         return ret;
667 }
668
669 static int
670 isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
671 {
672         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
673         struct ib_send_wr send_wr, *send_wr_failed;
674         int ret;
675
676         ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
677                                       ISER_HEADERS_LEN, DMA_TO_DEVICE);
678
679         send_wr.next    = NULL;
680         send_wr.wr_id   = (unsigned long)tx_desc;
681         send_wr.sg_list = tx_desc->tx_sg;
682         send_wr.num_sge = tx_desc->num_sge;
683         send_wr.opcode  = IB_WR_SEND;
684         send_wr.send_flags = IB_SEND_SIGNALED;
685
686         atomic_inc(&isert_conn->post_send_buf_count);
687
688         ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
689         if (ret) {
690                 pr_err("ib_post_send() failed, ret: %d\n", ret);
691                 atomic_dec(&isert_conn->post_send_buf_count);
692         }
693
694         return ret;
695 }
696
697 static void
698 isert_create_send_desc(struct isert_conn *isert_conn,
699                        struct isert_cmd *isert_cmd,
700                        struct iser_tx_desc *tx_desc)
701 {
702         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
703
704         ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
705                                    ISER_HEADERS_LEN, DMA_TO_DEVICE);
706
707         memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
708         tx_desc->iser_header.flags = ISER_VER;
709
710         tx_desc->num_sge = 1;
711         tx_desc->isert_cmd = isert_cmd;
712
713         if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
714                 tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
715                 pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
716         }
717 }
718
719 static int
720 isert_init_tx_hdrs(struct isert_conn *isert_conn,
721                    struct iser_tx_desc *tx_desc)
722 {
723         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
724         u64 dma_addr;
725
726         dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
727                         ISER_HEADERS_LEN, DMA_TO_DEVICE);
728         if (ib_dma_mapping_error(ib_dev, dma_addr)) {
729                 pr_err("ib_dma_mapping_error() failed\n");
730                 return -ENOMEM;
731         }
732
733         tx_desc->dma_addr = dma_addr;
734         tx_desc->tx_sg[0].addr  = tx_desc->dma_addr;
735         tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
736         tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
737
738         pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
739                  " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
740                  tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
741
742         return 0;
743 }
744
745 static void
746 isert_init_send_wr(struct isert_cmd *isert_cmd, struct ib_send_wr *send_wr)
747 {
748         isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
749         send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
750         send_wr->opcode = IB_WR_SEND;
751         send_wr->send_flags = IB_SEND_SIGNALED;
752         send_wr->sg_list = &isert_cmd->tx_desc.tx_sg[0];
753         send_wr->num_sge = isert_cmd->tx_desc.num_sge;
754 }
755
756 static int
757 isert_rdma_post_recvl(struct isert_conn *isert_conn)
758 {
759         struct ib_recv_wr rx_wr, *rx_wr_fail;
760         struct ib_sge sge;
761         int ret;
762
763         memset(&sge, 0, sizeof(struct ib_sge));
764         sge.addr = isert_conn->login_req_dma;
765         sge.length = ISER_RX_LOGIN_SIZE;
766         sge.lkey = isert_conn->conn_mr->lkey;
767
768         pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
769                 sge.addr, sge.length, sge.lkey);
770
771         memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
772         rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
773         rx_wr.sg_list = &sge;
774         rx_wr.num_sge = 1;
775
776         isert_conn->post_recv_buf_count++;
777         ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
778         if (ret) {
779                 pr_err("ib_post_recv() failed: %d\n", ret);
780                 isert_conn->post_recv_buf_count--;
781         }
782
783         pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
784         return ret;
785 }
786
787 static int
788 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
789                    u32 length)
790 {
791         struct isert_conn *isert_conn = conn->context;
792         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
793         struct iser_tx_desc *tx_desc = &isert_conn->conn_login_tx_desc;
794         int ret;
795
796         isert_create_send_desc(isert_conn, NULL, tx_desc);
797
798         memcpy(&tx_desc->iscsi_header, &login->rsp[0],
799                sizeof(struct iscsi_hdr));
800
801         isert_init_tx_hdrs(isert_conn, tx_desc);
802
803         if (length > 0) {
804                 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
805
806                 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
807                                            length, DMA_TO_DEVICE);
808
809                 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
810
811                 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
812                                               length, DMA_TO_DEVICE);
813
814                 tx_dsg->addr    = isert_conn->login_rsp_dma;
815                 tx_dsg->length  = length;
816                 tx_dsg->lkey    = isert_conn->conn_mr->lkey;
817                 tx_desc->num_sge = 2;
818         }
819         if (!login->login_failed) {
820                 if (login->login_complete) {
821                         ret = isert_alloc_rx_descriptors(isert_conn);
822                         if (ret)
823                                 return ret;
824
825                         ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
826                         if (ret)
827                                 return ret;
828
829                         isert_conn->state = ISER_CONN_UP;
830                         goto post_send;
831                 }
832
833                 ret = isert_rdma_post_recvl(isert_conn);
834                 if (ret)
835                         return ret;
836         }
837 post_send:
838         ret = isert_post_send(isert_conn, tx_desc);
839         if (ret)
840                 return ret;
841
842         return 0;
843 }
844
845 static void
846 isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
847                    struct isert_conn *isert_conn)
848 {
849         struct iscsi_conn *conn = isert_conn->conn;
850         struct iscsi_login *login = conn->conn_login;
851         int size;
852
853         if (!login) {
854                 pr_err("conn->conn_login is NULL\n");
855                 dump_stack();
856                 return;
857         }
858
859         if (login->first_request) {
860                 struct iscsi_login_req *login_req =
861                         (struct iscsi_login_req *)&rx_desc->iscsi_header;
862                 /*
863                  * Setup the initial iscsi_login values from the leading
864                  * login request PDU.
865                  */
866                 login->leading_connection = (!login_req->tsih) ? 1 : 0;
867                 login->current_stage =
868                         (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
869                          >> 2;
870                 login->version_min      = login_req->min_version;
871                 login->version_max      = login_req->max_version;
872                 memcpy(login->isid, login_req->isid, 6);
873                 login->cmd_sn           = be32_to_cpu(login_req->cmdsn);
874                 login->init_task_tag    = login_req->itt;
875                 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
876                 login->cid              = be16_to_cpu(login_req->cid);
877                 login->tsih             = be16_to_cpu(login_req->tsih);
878         }
879
880         memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
881
882         size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
883         pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
884                  size, rx_buflen, MAX_KEY_VALUE_PAIRS);
885         memcpy(login->req_buf, &rx_desc->data[0], size);
886
887         complete(&isert_conn->conn_login_comp);
888 }
889
890 static void
891 isert_release_cmd(struct iscsi_cmd *cmd)
892 {
893         struct isert_cmd *isert_cmd = container_of(cmd, struct isert_cmd,
894                                                    iscsi_cmd);
895
896         pr_debug("Entering isert_release_cmd %p >>>>>>>>>>>>>>>.\n", isert_cmd);
897
898         kfree(cmd->buf_ptr);
899         kfree(cmd->tmr_req);
900
901         kmem_cache_free(isert_cmd_cache, isert_cmd);
902 }
903
904 static struct iscsi_cmd
905 *isert_alloc_cmd(struct iscsi_conn *conn, gfp_t gfp)
906 {
907         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
908         struct isert_cmd *isert_cmd;
909
910         isert_cmd = kmem_cache_zalloc(isert_cmd_cache, gfp);
911         if (!isert_cmd) {
912                 pr_err("Unable to allocate isert_cmd\n");
913                 return NULL;
914         }
915         isert_cmd->conn = isert_conn;
916         isert_cmd->iscsi_cmd.release_cmd = &isert_release_cmd;
917
918         return &isert_cmd->iscsi_cmd;
919 }
920
921 static int
922 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
923                       struct isert_cmd *isert_cmd, struct iser_rx_desc *rx_desc,
924                       unsigned char *buf)
925 {
926         struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
927         struct iscsi_conn *conn = isert_conn->conn;
928         struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
929         struct scatterlist *sg;
930         int imm_data, imm_data_len, unsol_data, sg_nents, rc;
931         bool dump_payload = false;
932
933         rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
934         if (rc < 0)
935                 return rc;
936
937         imm_data = cmd->immediate_data;
938         imm_data_len = cmd->first_burst_len;
939         unsol_data = cmd->unsolicited_data;
940
941         rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
942         if (rc < 0) {
943                 return 0;
944         } else if (rc > 0) {
945                 dump_payload = true;
946                 goto sequence_cmd;
947         }
948
949         if (!imm_data)
950                 return 0;
951
952         sg = &cmd->se_cmd.t_data_sg[0];
953         sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
954
955         pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
956                  sg, sg_nents, &rx_desc->data[0], imm_data_len);
957
958         sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
959
960         cmd->write_data_done += imm_data_len;
961
962         if (cmd->write_data_done == cmd->se_cmd.data_length) {
963                 spin_lock_bh(&cmd->istate_lock);
964                 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
965                 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
966                 spin_unlock_bh(&cmd->istate_lock);
967         }
968
969 sequence_cmd:
970         rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
971
972         if (!rc && dump_payload == false && unsol_data)
973                 iscsit_set_unsoliticed_dataout(cmd);
974         else if (dump_payload && imm_data)
975                 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
976
977         return 0;
978 }
979
980 static int
981 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
982                            struct iser_rx_desc *rx_desc, unsigned char *buf)
983 {
984         struct scatterlist *sg_start;
985         struct iscsi_conn *conn = isert_conn->conn;
986         struct iscsi_cmd *cmd = NULL;
987         struct iscsi_data *hdr = (struct iscsi_data *)buf;
988         u32 unsol_data_len = ntoh24(hdr->dlength);
989         int rc, sg_nents, sg_off, page_off;
990
991         rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
992         if (rc < 0)
993                 return rc;
994         else if (!cmd)
995                 return 0;
996         /*
997          * FIXME: Unexpected unsolicited_data out
998          */
999         if (!cmd->unsolicited_data) {
1000                 pr_err("Received unexpected solicited data payload\n");
1001                 dump_stack();
1002                 return -1;
1003         }
1004
1005         pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
1006                  unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
1007
1008         sg_off = cmd->write_data_done / PAGE_SIZE;
1009         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1010         sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1011         page_off = cmd->write_data_done % PAGE_SIZE;
1012         /*
1013          * FIXME: Non page-aligned unsolicited_data out
1014          */
1015         if (page_off) {
1016                 pr_err("Received unexpected non-page aligned data payload\n");
1017                 dump_stack();
1018                 return -1;
1019         }
1020         pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
1021                  sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
1022
1023         sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1024                             unsol_data_len);
1025
1026         rc = iscsit_check_dataout_payload(cmd, hdr, false);
1027         if (rc < 0)
1028                 return rc;
1029
1030         return 0;
1031 }
1032
1033 static int
1034 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1035                 uint32_t read_stag, uint64_t read_va,
1036                 uint32_t write_stag, uint64_t write_va)
1037 {
1038         struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1039         struct iscsi_conn *conn = isert_conn->conn;
1040         struct iscsi_cmd *cmd;
1041         struct isert_cmd *isert_cmd;
1042         int ret = -EINVAL;
1043         u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1044
1045         switch (opcode) {
1046         case ISCSI_OP_SCSI_CMD:
1047                 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1048                 if (!cmd)
1049                         break;
1050
1051                 isert_cmd = container_of(cmd, struct isert_cmd, iscsi_cmd);
1052                 isert_cmd->read_stag = read_stag;
1053                 isert_cmd->read_va = read_va;
1054                 isert_cmd->write_stag = write_stag;
1055                 isert_cmd->write_va = write_va;
1056
1057                 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd,
1058                                         rx_desc, (unsigned char *)hdr);
1059                 break;
1060         case ISCSI_OP_NOOP_OUT:
1061                 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1062                 if (!cmd)
1063                         break;
1064
1065                 ret = iscsit_handle_nop_out(conn, cmd, (unsigned char *)hdr);
1066                 break;
1067         case ISCSI_OP_SCSI_DATA_OUT:
1068                 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1069                                                 (unsigned char *)hdr);
1070                 break;
1071         case ISCSI_OP_SCSI_TMFUNC:
1072                 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1073                 if (!cmd)
1074                         break;
1075
1076                 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1077                                                 (unsigned char *)hdr);
1078                 break;
1079         case ISCSI_OP_LOGOUT:
1080                 cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
1081                 if (!cmd)
1082                         break;
1083
1084                 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1085                 if (ret > 0)
1086                         wait_for_completion_timeout(&conn->conn_logout_comp,
1087                                                     SECONDS_FOR_LOGOUT_COMP *
1088                                                     HZ);
1089                 break;
1090         default:
1091                 pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1092                 dump_stack();
1093                 break;
1094         }
1095
1096         return ret;
1097 }
1098
1099 static void
1100 isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1101 {
1102         struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1103         uint64_t read_va = 0, write_va = 0;
1104         uint32_t read_stag = 0, write_stag = 0;
1105         int rc;
1106
1107         switch (iser_hdr->flags & 0xF0) {
1108         case ISCSI_CTRL:
1109                 if (iser_hdr->flags & ISER_RSV) {
1110                         read_stag = be32_to_cpu(iser_hdr->read_stag);
1111                         read_va = be64_to_cpu(iser_hdr->read_va);
1112                         pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
1113                                  read_stag, (unsigned long long)read_va);
1114                 }
1115                 if (iser_hdr->flags & ISER_WSV) {
1116                         write_stag = be32_to_cpu(iser_hdr->write_stag);
1117                         write_va = be64_to_cpu(iser_hdr->write_va);
1118                         pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
1119                                  write_stag, (unsigned long long)write_va);
1120                 }
1121
1122                 pr_debug("ISER ISCSI_CTRL PDU\n");
1123                 break;
1124         case ISER_HELLO:
1125                 pr_err("iSER Hello message\n");
1126                 break;
1127         default:
1128                 pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1129                 break;
1130         }
1131
1132         rc = isert_rx_opcode(isert_conn, rx_desc,
1133                              read_stag, read_va, write_stag, write_va);
1134 }
1135
1136 static void
1137 isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
1138                     unsigned long xfer_len)
1139 {
1140         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1141         struct iscsi_hdr *hdr;
1142         u64 rx_dma;
1143         int rx_buflen, outstanding;
1144
1145         if ((char *)desc == isert_conn->login_req_buf) {
1146                 rx_dma = isert_conn->login_req_dma;
1147                 rx_buflen = ISER_RX_LOGIN_SIZE;
1148                 pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1149                          rx_dma, rx_buflen);
1150         } else {
1151                 rx_dma = desc->dma_addr;
1152                 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1153                 pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1154                          rx_dma, rx_buflen);
1155         }
1156
1157         ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1158
1159         hdr = &desc->iscsi_header;
1160         pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1161                  hdr->opcode, hdr->itt, hdr->flags,
1162                  (int)(xfer_len - ISER_HEADERS_LEN));
1163
1164         if ((char *)desc == isert_conn->login_req_buf)
1165                 isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
1166                                    isert_conn);
1167         else
1168                 isert_rx_do_work(desc, isert_conn);
1169
1170         ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1171                                       DMA_FROM_DEVICE);
1172
1173         isert_conn->post_recv_buf_count--;
1174         pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
1175                  isert_conn->post_recv_buf_count);
1176
1177         if ((char *)desc == isert_conn->login_req_buf)
1178                 return;
1179
1180         outstanding = isert_conn->post_recv_buf_count;
1181         if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1182                 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1183                                 ISERT_MIN_POSTED_RX);
1184                 err = isert_post_recv(isert_conn, count);
1185                 if (err) {
1186                         pr_err("isert_post_recv() count: %d failed, %d\n",
1187                                count, err);
1188                 }
1189         }
1190 }
1191
1192 static void
1193 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1194 {
1195         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1196         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1197
1198         pr_debug("isert_unmap_cmd >>>>>>>>>>>>>>>>>>>>>>>\n");
1199
1200         if (wr->sge) {
1201                 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
1202                 wr->sge = NULL;
1203         }
1204
1205         kfree(wr->send_wr);
1206         wr->send_wr = NULL;
1207
1208         kfree(isert_cmd->ib_sge);
1209         isert_cmd->ib_sge = NULL;
1210 }
1211
1212 static void
1213 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1214 {
1215         struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1216         struct isert_conn *isert_conn = isert_cmd->conn;
1217         struct iscsi_conn *conn = isert_conn->conn;
1218
1219         pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
1220
1221         switch (cmd->iscsi_opcode) {
1222         case ISCSI_OP_SCSI_CMD:
1223                 spin_lock_bh(&conn->cmd_lock);
1224                 if (!list_empty(&cmd->i_conn_node))
1225                         list_del_init(&cmd->i_conn_node);
1226                 spin_unlock_bh(&conn->cmd_lock);
1227
1228                 if (cmd->data_direction == DMA_TO_DEVICE) {
1229                         iscsit_stop_dataout_timer(cmd);
1230                         /*
1231                          * Check for special case during comp_err where
1232                          * WRITE_PENDING has been handed off from core,
1233                          * but requires an extra target_put_sess_cmd()
1234                          * before transport_generic_free_cmd() below.
1235                          */
1236                         if (comp_err &&
1237                             cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1238                                 struct se_cmd *se_cmd = &cmd->se_cmd;
1239
1240                                 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1241                         }
1242                 }
1243
1244                 isert_unmap_cmd(isert_cmd, isert_conn);
1245                 transport_generic_free_cmd(&cmd->se_cmd, 0);
1246                 break;
1247         case ISCSI_OP_SCSI_TMFUNC:
1248                 spin_lock_bh(&conn->cmd_lock);
1249                 if (!list_empty(&cmd->i_conn_node))
1250                         list_del_init(&cmd->i_conn_node);
1251                 spin_unlock_bh(&conn->cmd_lock);
1252
1253                 transport_generic_free_cmd(&cmd->se_cmd, 0);
1254                 break;
1255         case ISCSI_OP_REJECT:
1256         case ISCSI_OP_NOOP_OUT:
1257                 spin_lock_bh(&conn->cmd_lock);
1258                 if (!list_empty(&cmd->i_conn_node))
1259                         list_del_init(&cmd->i_conn_node);
1260                 spin_unlock_bh(&conn->cmd_lock);
1261
1262                 /*
1263                  * Handle special case for REJECT when iscsi_add_reject*() has
1264                  * overwritten the original iscsi_opcode assignment, and the
1265                  * associated cmd->se_cmd needs to be released.
1266                  */
1267                 if (cmd->se_cmd.se_tfo != NULL) {
1268                         pr_debug("Calling transport_generic_free_cmd from"
1269                                  " isert_put_cmd for 0x%02x\n",
1270                                  cmd->iscsi_opcode);
1271                         transport_generic_free_cmd(&cmd->se_cmd, 0);
1272                         break;
1273                 }
1274                 /*
1275                  * Fall-through
1276                  */
1277         default:
1278                 isert_release_cmd(cmd);
1279                 break;
1280         }
1281 }
1282
1283 static void
1284 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1285 {
1286         if (tx_desc->dma_addr != 0) {
1287                 pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
1288                 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1289                                     ISER_HEADERS_LEN, DMA_TO_DEVICE);
1290                 tx_desc->dma_addr = 0;
1291         }
1292 }
1293
1294 static void
1295 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1296                      struct ib_device *ib_dev, bool comp_err)
1297 {
1298         if (isert_cmd->sense_buf_dma != 0) {
1299                 pr_debug("Calling ib_dma_unmap_single for isert_cmd->sense_buf_dma\n");
1300                 ib_dma_unmap_single(ib_dev, isert_cmd->sense_buf_dma,
1301                                     isert_cmd->sense_buf_len, DMA_TO_DEVICE);
1302                 isert_cmd->sense_buf_dma = 0;
1303         }
1304
1305         isert_unmap_tx_desc(tx_desc, ib_dev);
1306         isert_put_cmd(isert_cmd, comp_err);
1307 }
1308
1309 static void
1310 isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1311                            struct isert_cmd *isert_cmd)
1312 {
1313         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1314         struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1315         struct se_cmd *se_cmd = &cmd->se_cmd;
1316         struct ib_device *ib_dev = isert_cmd->conn->conn_cm_id->device;
1317
1318         iscsit_stop_dataout_timer(cmd);
1319
1320         if (wr->sge) {
1321                 pr_debug("isert_do_rdma_read_comp: Unmapping wr->sge from t_data_sg\n");
1322                 ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge, DMA_TO_DEVICE);
1323                 wr->sge = NULL;
1324         }
1325
1326         if (isert_cmd->ib_sge) {
1327                 pr_debug("isert_do_rdma_read_comp: Freeing isert_cmd->ib_sge\n");
1328                 kfree(isert_cmd->ib_sge);
1329                 isert_cmd->ib_sge = NULL;
1330         }
1331
1332         cmd->write_data_done = se_cmd->data_length;
1333         wr->send_wr_num = 0;
1334
1335         pr_debug("isert_do_rdma_read_comp, calling target_execute_cmd\n");
1336         spin_lock_bh(&cmd->istate_lock);
1337         cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1338         cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1339         spin_unlock_bh(&cmd->istate_lock);
1340
1341         target_execute_cmd(se_cmd);
1342 }
1343
1344 static void
1345 isert_do_control_comp(struct work_struct *work)
1346 {
1347         struct isert_cmd *isert_cmd = container_of(work,
1348                         struct isert_cmd, comp_work);
1349         struct isert_conn *isert_conn = isert_cmd->conn;
1350         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1351         struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1352
1353         switch (cmd->i_state) {
1354         case ISTATE_SEND_TASKMGTRSP:
1355                 pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
1356
1357                 atomic_dec(&isert_conn->post_send_buf_count);
1358                 iscsit_tmr_post_handler(cmd, cmd->conn);
1359
1360                 cmd->i_state = ISTATE_SENT_STATUS;
1361                 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1362                 break;
1363         case ISTATE_SEND_REJECT:
1364                 pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
1365                 atomic_dec(&isert_conn->post_send_buf_count);
1366
1367                 cmd->i_state = ISTATE_SENT_STATUS;
1368                 isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
1369                 break;
1370         case ISTATE_SEND_LOGOUTRSP:
1371                 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1372
1373                 atomic_dec(&isert_conn->post_send_buf_count);
1374                 iscsit_logout_post_handler(cmd, cmd->conn);
1375                 break;
1376         default:
1377                 pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
1378                 dump_stack();
1379                 break;
1380         }
1381 }
1382
1383 static void
1384 isert_response_completion(struct iser_tx_desc *tx_desc,
1385                           struct isert_cmd *isert_cmd,
1386                           struct isert_conn *isert_conn,
1387                           struct ib_device *ib_dev)
1388 {
1389         struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1390         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1391
1392         if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1393             cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1394             cmd->i_state == ISTATE_SEND_REJECT) {
1395                 isert_unmap_tx_desc(tx_desc, ib_dev);
1396
1397                 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1398                 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1399                 return;
1400         }
1401         atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1402
1403         cmd->i_state = ISTATE_SENT_STATUS;
1404         isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1405 }
1406
1407 static void
1408 isert_send_completion(struct iser_tx_desc *tx_desc,
1409                       struct isert_conn *isert_conn)
1410 {
1411         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1412         struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1413         struct isert_rdma_wr *wr;
1414
1415         if (!isert_cmd) {
1416                 atomic_dec(&isert_conn->post_send_buf_count);
1417                 isert_unmap_tx_desc(tx_desc, ib_dev);
1418                 return;
1419         }
1420         wr = &isert_cmd->rdma_wr;
1421
1422         switch (wr->iser_ib_op) {
1423         case ISER_IB_RECV:
1424                 pr_err("isert_send_completion: Got ISER_IB_RECV\n");
1425                 dump_stack();
1426                 break;
1427         case ISER_IB_SEND:
1428                 pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
1429                 isert_response_completion(tx_desc, isert_cmd,
1430                                           isert_conn, ib_dev);
1431                 break;
1432         case ISER_IB_RDMA_WRITE:
1433                 pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
1434                 dump_stack();
1435                 break;
1436         case ISER_IB_RDMA_READ:
1437                 pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
1438
1439                 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1440                 isert_completion_rdma_read(tx_desc, isert_cmd);
1441                 break;
1442         default:
1443                 pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
1444                 dump_stack();
1445                 break;
1446         }
1447 }
1448
1449 static void
1450 isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
1451 {
1452         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1453         struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
1454
1455         if (!isert_cmd)
1456                 isert_unmap_tx_desc(tx_desc, ib_dev);
1457         else
1458                 isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
1459 }
1460
1461 static void
1462 isert_cq_rx_comp_err(struct isert_conn *isert_conn)
1463 {
1464         struct iscsi_conn *conn = isert_conn->conn;
1465
1466         if (isert_conn->post_recv_buf_count)
1467                 return;
1468
1469         if (conn->sess) {
1470                 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
1471                 target_wait_for_sess_cmds(conn->sess->se_sess);
1472         }
1473
1474         while (atomic_read(&isert_conn->post_send_buf_count))
1475                 msleep(3000);
1476
1477         mutex_lock(&isert_conn->conn_mutex);
1478         isert_conn->state = ISER_CONN_DOWN;
1479         mutex_unlock(&isert_conn->conn_mutex);
1480
1481         iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1482
1483         complete(&isert_conn->conn_wait_comp_err);
1484 }
1485
1486 static void
1487 isert_cq_tx_work(struct work_struct *work)
1488 {
1489         struct isert_cq_desc *cq_desc = container_of(work,
1490                                 struct isert_cq_desc, cq_tx_work);
1491         struct isert_device *device = cq_desc->device;
1492         int cq_index = cq_desc->cq_index;
1493         struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
1494         struct isert_conn *isert_conn;
1495         struct iser_tx_desc *tx_desc;
1496         struct ib_wc wc;
1497
1498         while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
1499                 tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
1500                 isert_conn = wc.qp->qp_context;
1501
1502                 if (wc.status == IB_WC_SUCCESS) {
1503                         isert_send_completion(tx_desc, isert_conn);
1504                 } else {
1505                         pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1506                         pr_debug("TX wc.status: 0x%08x\n", wc.status);
1507                         atomic_dec(&isert_conn->post_send_buf_count);
1508                         isert_cq_tx_comp_err(tx_desc, isert_conn);
1509                 }
1510         }
1511
1512         ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
1513 }
1514
1515 static void
1516 isert_cq_tx_callback(struct ib_cq *cq, void *context)
1517 {
1518         struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1519
1520         INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
1521         queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1522 }
1523
1524 static void
1525 isert_cq_rx_work(struct work_struct *work)
1526 {
1527         struct isert_cq_desc *cq_desc = container_of(work,
1528                         struct isert_cq_desc, cq_rx_work);
1529         struct isert_device *device = cq_desc->device;
1530         int cq_index = cq_desc->cq_index;
1531         struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
1532         struct isert_conn *isert_conn;
1533         struct iser_rx_desc *rx_desc;
1534         struct ib_wc wc;
1535         unsigned long xfer_len;
1536
1537         while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
1538                 rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
1539                 isert_conn = wc.qp->qp_context;
1540
1541                 if (wc.status == IB_WC_SUCCESS) {
1542                         xfer_len = (unsigned long)wc.byte_len;
1543                         isert_rx_completion(rx_desc, isert_conn, xfer_len);
1544                 } else {
1545                         pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
1546                         if (wc.status != IB_WC_WR_FLUSH_ERR)
1547                                 pr_debug("RX wc.status: 0x%08x\n", wc.status);
1548
1549                         isert_conn->post_recv_buf_count--;
1550                         isert_cq_rx_comp_err(isert_conn);
1551                 }
1552         }
1553
1554         ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
1555 }
1556
1557 static void
1558 isert_cq_rx_callback(struct ib_cq *cq, void *context)
1559 {
1560         struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1561
1562         INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
1563         queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1564 }
1565
1566 static int
1567 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
1568 {
1569         struct ib_send_wr *wr_failed;
1570         int ret;
1571
1572         atomic_inc(&isert_conn->post_send_buf_count);
1573
1574         ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
1575                            &wr_failed);
1576         if (ret) {
1577                 pr_err("ib_post_send failed with %d\n", ret);
1578                 atomic_dec(&isert_conn->post_send_buf_count);
1579                 return ret;
1580         }
1581         return ret;
1582 }
1583
1584 static int
1585 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1586 {
1587         struct isert_cmd *isert_cmd = container_of(cmd,
1588                                         struct isert_cmd, iscsi_cmd);
1589         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1590         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1591         struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
1592                                 &isert_cmd->tx_desc.iscsi_header;
1593
1594         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1595         iscsit_build_rsp_pdu(cmd, conn, true, hdr);
1596         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1597         /*
1598          * Attach SENSE DATA payload to iSCSI Response PDU
1599          */
1600         if (cmd->se_cmd.sense_buffer &&
1601             ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1602             (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
1603                 struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1604                 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1605                 u32 padding, sense_len;
1606
1607                 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
1608                                    cmd->sense_buffer);
1609                 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
1610
1611                 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
1612                 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
1613                 sense_len = cmd->se_cmd.scsi_sense_length + padding;
1614
1615                 isert_cmd->sense_buf_dma = ib_dma_map_single(ib_dev,
1616                                 (void *)cmd->sense_buffer, sense_len,
1617                                 DMA_TO_DEVICE);
1618
1619                 isert_cmd->sense_buf_len = sense_len;
1620                 tx_dsg->addr    = isert_cmd->sense_buf_dma;
1621                 tx_dsg->length  = sense_len;
1622                 tx_dsg->lkey    = isert_conn->conn_mr->lkey;
1623                 isert_cmd->tx_desc.num_sge = 2;
1624         }
1625
1626         isert_init_send_wr(isert_cmd, send_wr);
1627
1628         pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1629
1630         return isert_post_response(isert_conn, isert_cmd);
1631 }
1632
1633 static int
1634 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1635                 bool nopout_response)
1636 {
1637         struct isert_cmd *isert_cmd = container_of(cmd,
1638                                 struct isert_cmd, iscsi_cmd);
1639         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1640         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1641
1642         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1643         iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
1644                                &isert_cmd->tx_desc.iscsi_header,
1645                                nopout_response);
1646         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1647         isert_init_send_wr(isert_cmd, send_wr);
1648
1649         pr_debug("Posting NOPIN Reponse IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1650
1651         return isert_post_response(isert_conn, isert_cmd);
1652 }
1653
1654 static int
1655 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1656 {
1657         struct isert_cmd *isert_cmd = container_of(cmd,
1658                                 struct isert_cmd, iscsi_cmd);
1659         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1660         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1661
1662         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1663         iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
1664                                 &isert_cmd->tx_desc.iscsi_header);
1665         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1666         isert_init_send_wr(isert_cmd, send_wr);
1667
1668         pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1669
1670         return isert_post_response(isert_conn, isert_cmd);
1671 }
1672
1673 static int
1674 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1675 {
1676         struct isert_cmd *isert_cmd = container_of(cmd,
1677                                 struct isert_cmd, iscsi_cmd);
1678         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1679         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1680
1681         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1682         iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
1683                                   &isert_cmd->tx_desc.iscsi_header);
1684         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1685         isert_init_send_wr(isert_cmd, send_wr);
1686
1687         pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1688
1689         return isert_post_response(isert_conn, isert_cmd);
1690 }
1691
1692 static int
1693 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1694 {
1695         struct isert_cmd *isert_cmd = container_of(cmd,
1696                                 struct isert_cmd, iscsi_cmd);
1697         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1698         struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
1699         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1700         struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
1701         struct iscsi_reject *hdr =
1702                 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
1703
1704         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1705         iscsit_build_reject(cmd, conn, hdr);
1706         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1707
1708         hton24(hdr->dlength, ISCSI_HDR_LEN);
1709         isert_cmd->sense_buf_dma = ib_dma_map_single(ib_dev,
1710                         (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
1711                         DMA_TO_DEVICE);
1712         isert_cmd->sense_buf_len = ISCSI_HDR_LEN;
1713         tx_dsg->addr    = isert_cmd->sense_buf_dma;
1714         tx_dsg->length  = ISCSI_HDR_LEN;
1715         tx_dsg->lkey    = isert_conn->conn_mr->lkey;
1716         isert_cmd->tx_desc.num_sge = 2;
1717
1718         isert_init_send_wr(isert_cmd, send_wr);
1719
1720         pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
1721
1722         return isert_post_response(isert_conn, isert_cmd);
1723 }
1724
1725 static int
1726 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1727                     struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
1728                     u32 data_left, u32 offset)
1729 {
1730         struct iscsi_cmd *cmd = &isert_cmd->iscsi_cmd;
1731         struct scatterlist *sg_start, *tmp_sg;
1732         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1733         u32 sg_off, page_off;
1734         int i = 0, sg_nents;
1735
1736         sg_off = offset / PAGE_SIZE;
1737         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1738         sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
1739         page_off = offset % PAGE_SIZE;
1740
1741         send_wr->sg_list = ib_sge;
1742         send_wr->num_sge = sg_nents;
1743         send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
1744         /*
1745          * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
1746          */
1747         for_each_sg(sg_start, tmp_sg, sg_nents, i) {
1748                 pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
1749                          (unsigned long long)tmp_sg->dma_address,
1750                          tmp_sg->length, page_off);
1751
1752                 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
1753                 ib_sge->length = min_t(u32, data_left,
1754                                 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
1755                 ib_sge->lkey = isert_conn->conn_mr->lkey;
1756
1757                 pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u\n",
1758                          ib_sge->addr, ib_sge->length);
1759                 page_off = 0;
1760                 data_left -= ib_sge->length;
1761                 ib_sge++;
1762                 pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
1763         }
1764
1765         pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
1766                  send_wr->sg_list, send_wr->num_sge);
1767
1768         return sg_nents;
1769 }
1770
1771 static int
1772 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
1773 {
1774         struct se_cmd *se_cmd = &cmd->se_cmd;
1775         struct isert_cmd *isert_cmd = container_of(cmd,
1776                                         struct isert_cmd, iscsi_cmd);
1777         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1778         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1779         struct ib_send_wr *wr_failed, *send_wr;
1780         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1781         struct ib_sge *ib_sge;
1782         struct scatterlist *sg;
1783         u32 offset = 0, data_len, data_left, rdma_write_max;
1784         int rc, ret = 0, count, sg_nents, i, ib_sge_cnt;
1785
1786         pr_debug("RDMA_WRITE: data_length: %u\n", se_cmd->data_length);
1787
1788         sg = &se_cmd->t_data_sg[0];
1789         sg_nents = se_cmd->t_data_nents;
1790
1791         count = ib_dma_map_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
1792         if (unlikely(!count)) {
1793                 pr_err("Unable to map put_datain SGs\n");
1794                 return -EINVAL;
1795         }
1796         wr->sge = sg;
1797         wr->num_sge = sg_nents;
1798         pr_debug("Mapped IB count: %u sg: %p sg_nents: %u for RDMA_WRITE\n",
1799                  count, sg, sg_nents);
1800
1801         ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
1802         if (!ib_sge) {
1803                 pr_warn("Unable to allocate datain ib_sge\n");
1804                 ret = -ENOMEM;
1805                 goto unmap_sg;
1806         }
1807         isert_cmd->ib_sge = ib_sge;
1808
1809         pr_debug("Allocated ib_sge: %p from t_data_ents: %d for RDMA_WRITE\n",
1810                  ib_sge, se_cmd->t_data_nents);
1811
1812         wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
1813         wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
1814                                 GFP_KERNEL);
1815         if (!wr->send_wr) {
1816                 pr_err("Unable to allocate wr->send_wr\n");
1817                 ret = -ENOMEM;
1818                 goto unmap_sg;
1819         }
1820         pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1821                  wr->send_wr, wr->send_wr_num);
1822
1823         iscsit_increment_maxcmdsn(cmd, conn->sess);
1824         cmd->stat_sn = conn->stat_sn++;
1825
1826         wr->isert_cmd = isert_cmd;
1827         rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
1828         data_left = se_cmd->data_length;
1829
1830         for (i = 0; i < wr->send_wr_num; i++) {
1831                 send_wr = &isert_cmd->rdma_wr.send_wr[i];
1832                 data_len = min(data_left, rdma_write_max);
1833
1834                 send_wr->opcode = IB_WR_RDMA_WRITE;
1835                 send_wr->send_flags = 0;
1836                 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
1837                 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
1838
1839                 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
1840                                         send_wr, data_len, offset);
1841                 ib_sge += ib_sge_cnt;
1842
1843                 if (i + 1 == wr->send_wr_num)
1844                         send_wr->next = &isert_cmd->tx_desc.send_wr;
1845                 else
1846                         send_wr->next = &wr->send_wr[i + 1];
1847
1848                 offset += data_len;
1849                 data_left -= data_len;
1850         }
1851         /*
1852          * Build isert_conn->tx_desc for iSCSI response PDU and attach
1853          */
1854         isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1855         iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
1856                              &isert_cmd->tx_desc.iscsi_header);
1857         isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
1858         isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
1859
1860         atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1861
1862         rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
1863         if (rc) {
1864                 pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
1865                 atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
1866         }
1867         pr_debug("Posted RDMA_WRITE + Response for iSER Data READ\n");
1868         return 1;
1869
1870 unmap_sg:
1871         ib_dma_unmap_sg(ib_dev, sg, sg_nents, DMA_TO_DEVICE);
1872         return ret;
1873 }
1874
1875 static int
1876 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
1877 {
1878         struct se_cmd *se_cmd = &cmd->se_cmd;
1879         struct isert_cmd *isert_cmd = container_of(cmd,
1880                                         struct isert_cmd, iscsi_cmd);
1881         struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1882         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
1883         struct ib_send_wr *wr_failed, *send_wr;
1884         struct ib_sge *ib_sge;
1885         struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
1886         struct scatterlist *sg_start;
1887         u32 sg_off, sg_nents, page_off, va_offset = 0;
1888         u32 offset = 0, data_len, data_left, rdma_write_max;
1889         int rc, ret = 0, count, i, ib_sge_cnt;
1890
1891         pr_debug("RDMA_READ: data_length: %u write_data_done: %u\n",
1892                  se_cmd->data_length, cmd->write_data_done);
1893
1894         sg_off = cmd->write_data_done / PAGE_SIZE;
1895         sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1896         page_off = cmd->write_data_done % PAGE_SIZE;
1897
1898         pr_debug("RDMA_READ: sg_off: %d, sg_start: %p page_off: %d\n",
1899                  sg_off, sg_start, page_off);
1900
1901         data_left = se_cmd->data_length - cmd->write_data_done;
1902         sg_nents = se_cmd->t_data_nents - sg_off;
1903
1904         pr_debug("RDMA_READ: data_left: %d, sg_nents: %d\n",
1905                  data_left, sg_nents);
1906
1907         count = ib_dma_map_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
1908         if (unlikely(!count)) {
1909                 pr_err("Unable to map get_dataout SGs\n");
1910                 return -EINVAL;
1911         }
1912         wr->sge = sg_start;
1913         wr->num_sge = sg_nents;
1914         pr_debug("Mapped IB count: %u sg_start: %p sg_nents: %u for RDMA_READ\n",
1915                  count, sg_start, sg_nents);
1916
1917         ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
1918         if (!ib_sge) {
1919                 pr_warn("Unable to allocate dataout ib_sge\n");
1920                 ret = -ENOMEM;
1921                 goto unmap_sg;
1922         }
1923         isert_cmd->ib_sge = ib_sge;
1924
1925         pr_debug("Using ib_sge: %p from sg_ents: %d for RDMA_READ\n",
1926                  ib_sge, sg_nents);
1927
1928         wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
1929         wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
1930                                 GFP_KERNEL);
1931         if (!wr->send_wr) {
1932                 pr_debug("Unable to allocate wr->send_wr\n");
1933                 ret = -ENOMEM;
1934                 goto unmap_sg;
1935         }
1936         pr_debug("Allocated wr->send_wr: %p wr->send_wr_num: %u\n",
1937                  wr->send_wr, wr->send_wr_num);
1938
1939         isert_cmd->tx_desc.isert_cmd = isert_cmd;
1940
1941         wr->iser_ib_op = ISER_IB_RDMA_READ;
1942         wr->isert_cmd = isert_cmd;
1943         rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
1944         offset = cmd->write_data_done;
1945
1946         for (i = 0; i < wr->send_wr_num; i++) {
1947                 send_wr = &isert_cmd->rdma_wr.send_wr[i];
1948                 data_len = min(data_left, rdma_write_max);
1949
1950                 send_wr->opcode = IB_WR_RDMA_READ;
1951                 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
1952                 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
1953
1954                 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
1955                                         send_wr, data_len, offset);
1956                 ib_sge += ib_sge_cnt;
1957
1958                 if (i + 1 == wr->send_wr_num)
1959                         send_wr->send_flags = IB_SEND_SIGNALED;
1960                 else
1961                         send_wr->next = &wr->send_wr[i + 1];
1962
1963                 offset += data_len;
1964                 va_offset += data_len;
1965                 data_left -= data_len;
1966         }
1967
1968         atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
1969
1970         rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
1971         if (rc) {
1972                 pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
1973                 atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
1974         }
1975         pr_debug("Posted RDMA_READ memory for ISER Data WRITE\n");
1976         return 0;
1977
1978 unmap_sg:
1979         ib_dma_unmap_sg(ib_dev, sg_start, sg_nents, DMA_FROM_DEVICE);
1980         return ret;
1981 }
1982
1983 static int
1984 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
1985 {
1986         int ret;
1987
1988         switch (state) {
1989         case ISTATE_SEND_NOPIN_WANT_RESPONSE:
1990                 ret = isert_put_nopin(cmd, conn, false);
1991                 break;
1992         default:
1993                 pr_err("Unknown immediate state: 0x%02x\n", state);
1994                 ret = -EINVAL;
1995                 break;
1996         }
1997
1998         return ret;
1999 }
2000
2001 static int
2002 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2003 {
2004         int ret;
2005
2006         switch (state) {
2007         case ISTATE_SEND_LOGOUTRSP:
2008                 ret = isert_put_logout_rsp(cmd, conn);
2009                 if (!ret) {
2010                         pr_debug("Returning iSER Logout -EAGAIN\n");
2011                         ret = -EAGAIN;
2012                 }
2013                 break;
2014         case ISTATE_SEND_NOPIN:
2015                 ret = isert_put_nopin(cmd, conn, true);
2016                 break;
2017         case ISTATE_SEND_TASKMGTRSP:
2018                 ret = isert_put_tm_rsp(cmd, conn);
2019                 break;
2020         case ISTATE_SEND_REJECT:
2021                 ret = isert_put_reject(cmd, conn);
2022                 break;
2023         case ISTATE_SEND_STATUS:
2024                 /*
2025                  * Special case for sending non GOOD SCSI status from TX thread
2026                  * context during pre se_cmd excecution failure.
2027                  */
2028                 ret = isert_put_response(conn, cmd);
2029                 break;
2030         default:
2031                 pr_err("Unknown response state: 0x%02x\n", state);
2032                 ret = -EINVAL;
2033                 break;
2034         }
2035
2036         return ret;
2037 }
2038
2039 static int
2040 isert_setup_np(struct iscsi_np *np,
2041                struct __kernel_sockaddr_storage *ksockaddr)
2042 {
2043         struct isert_np *isert_np;
2044         struct rdma_cm_id *isert_lid;
2045         struct sockaddr *sa;
2046         int ret;
2047
2048         isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
2049         if (!isert_np) {
2050                 pr_err("Unable to allocate struct isert_np\n");
2051                 return -ENOMEM;
2052         }
2053         sema_init(&isert_np->np_sem, 0);
2054         mutex_init(&isert_np->np_accept_mutex);
2055         INIT_LIST_HEAD(&isert_np->np_accept_list);
2056         init_completion(&isert_np->np_login_comp);
2057
2058         sa = (struct sockaddr *)ksockaddr;
2059         pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
2060         /*
2061          * Setup the np->np_sockaddr from the passed sockaddr setup
2062          * in iscsi_target_configfs.c code..
2063          */
2064         memcpy(&np->np_sockaddr, ksockaddr,
2065                sizeof(struct __kernel_sockaddr_storage));
2066
2067         isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
2068                                 IB_QPT_RC);
2069         if (IS_ERR(isert_lid)) {
2070                 pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
2071                        PTR_ERR(isert_lid));
2072                 ret = PTR_ERR(isert_lid);
2073                 goto out;
2074         }
2075
2076         ret = rdma_bind_addr(isert_lid, sa);
2077         if (ret) {
2078                 pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
2079                 goto out_lid;
2080         }
2081
2082         ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
2083         if (ret) {
2084                 pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
2085                 goto out_lid;
2086         }
2087
2088         isert_np->np_cm_id = isert_lid;
2089         np->np_context = isert_np;
2090         pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
2091
2092         return 0;
2093
2094 out_lid:
2095         rdma_destroy_id(isert_lid);
2096 out:
2097         kfree(isert_np);
2098         return ret;
2099 }
2100
2101 static int
2102 isert_rdma_accept(struct isert_conn *isert_conn)
2103 {
2104         struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2105         struct rdma_conn_param cp;
2106         int ret;
2107
2108         memset(&cp, 0, sizeof(struct rdma_conn_param));
2109         cp.responder_resources = isert_conn->responder_resources;
2110         cp.initiator_depth = isert_conn->initiator_depth;
2111         cp.retry_count = 7;
2112         cp.rnr_retry_count = 7;
2113
2114         pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
2115
2116         ret = rdma_accept(cm_id, &cp);
2117         if (ret) {
2118                 pr_err("rdma_accept() failed with: %d\n", ret);
2119                 return ret;
2120         }
2121
2122         pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
2123
2124         return 0;
2125 }
2126
2127 static int
2128 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
2129 {
2130         struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
2131         int ret;
2132
2133         pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
2134
2135         ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
2136         if (ret)
2137                 return ret;
2138
2139         pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
2140         return 0;
2141 }
2142
2143 static void
2144 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
2145                     struct isert_conn *isert_conn)
2146 {
2147         struct rdma_cm_id *cm_id = isert_conn->conn_cm_id;
2148         struct rdma_route *cm_route = &cm_id->route;
2149         struct sockaddr_in *sock_in;
2150         struct sockaddr_in6 *sock_in6;
2151
2152         conn->login_family = np->np_sockaddr.ss_family;
2153
2154         if (np->np_sockaddr.ss_family == AF_INET6) {
2155                 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
2156                 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
2157                          &sock_in6->sin6_addr.in6_u);
2158                 conn->login_port = ntohs(sock_in6->sin6_port);
2159
2160                 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
2161                 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
2162                          &sock_in6->sin6_addr.in6_u);
2163                 conn->local_port = ntohs(sock_in6->sin6_port);
2164         } else {
2165                 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
2166                 sprintf(conn->login_ip, "%pI4",
2167                         &sock_in->sin_addr.s_addr);
2168                 conn->login_port = ntohs(sock_in->sin_port);
2169
2170                 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
2171                 sprintf(conn->local_ip, "%pI4",
2172                         &sock_in->sin_addr.s_addr);
2173                 conn->local_port = ntohs(sock_in->sin_port);
2174         }
2175 }
2176
2177 static int
2178 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
2179 {
2180         struct isert_np *isert_np = (struct isert_np *)np->np_context;
2181         struct isert_conn *isert_conn;
2182         int max_accept = 0, ret;
2183
2184 accept_wait:
2185         ret = down_interruptible(&isert_np->np_sem);
2186         if (max_accept > 5)
2187                 return -ENODEV;
2188
2189         spin_lock_bh(&np->np_thread_lock);
2190         if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
2191                 spin_unlock_bh(&np->np_thread_lock);
2192                 pr_debug("np_thread_state %d for isert_accept_np\n",
2193                          np->np_thread_state);
2194                 /**
2195                  * No point in stalling here when np_thread
2196                  * is in state RESET/SHUTDOWN/EXIT - bail
2197                  **/
2198                 return -ENODEV;
2199         }
2200         spin_unlock_bh(&np->np_thread_lock);
2201
2202         mutex_lock(&isert_np->np_accept_mutex);
2203         if (list_empty(&isert_np->np_accept_list)) {
2204                 mutex_unlock(&isert_np->np_accept_mutex);
2205                 max_accept++;
2206                 goto accept_wait;
2207         }
2208         isert_conn = list_first_entry(&isert_np->np_accept_list,
2209                         struct isert_conn, conn_accept_node);
2210         list_del_init(&isert_conn->conn_accept_node);
2211         mutex_unlock(&isert_np->np_accept_mutex);
2212
2213         conn->context = isert_conn;
2214         isert_conn->conn = conn;
2215         max_accept = 0;
2216
2217         ret = isert_rdma_post_recvl(isert_conn);
2218         if (ret)
2219                 return ret;
2220
2221         ret = isert_rdma_accept(isert_conn);
2222         if (ret)
2223                 return ret;
2224
2225         isert_set_conn_info(np, conn, isert_conn);
2226
2227         pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
2228         return 0;
2229 }
2230
2231 static void
2232 isert_free_np(struct iscsi_np *np)
2233 {
2234         struct isert_np *isert_np = (struct isert_np *)np->np_context;
2235
2236         rdma_destroy_id(isert_np->np_cm_id);
2237
2238         np->np_context = NULL;
2239         kfree(isert_np);
2240 }
2241
2242 static void isert_wait_conn(struct iscsi_conn *conn)
2243 {
2244         struct isert_conn *isert_conn = conn->context;
2245
2246         pr_debug("isert_wait_conn: Starting \n");
2247
2248         mutex_lock(&isert_conn->conn_mutex);
2249         if (isert_conn->conn_cm_id) {
2250                 pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
2251                 rdma_disconnect(isert_conn->conn_cm_id);
2252         }
2253         /*
2254          * Only wait for conn_wait_comp_err if the isert_conn made it
2255          * into full feature phase..
2256          */
2257         if (isert_conn->state == ISER_CONN_INIT) {
2258                 mutex_unlock(&isert_conn->conn_mutex);
2259                 return;
2260         }
2261         if (isert_conn->state == ISER_CONN_UP)
2262                 isert_conn->state = ISER_CONN_TERMINATING;
2263         mutex_unlock(&isert_conn->conn_mutex);
2264
2265         wait_for_completion(&isert_conn->conn_wait_comp_err);
2266
2267         wait_for_completion(&isert_conn->conn_wait);
2268 }
2269
2270 static void isert_free_conn(struct iscsi_conn *conn)
2271 {
2272         struct isert_conn *isert_conn = conn->context;
2273
2274         isert_put_conn(isert_conn);
2275 }
2276
2277 static struct iscsit_transport iser_target_transport = {
2278         .name                   = "IB/iSER",
2279         .transport_type         = ISCSI_INFINIBAND,
2280         .owner                  = THIS_MODULE,
2281         .iscsit_setup_np        = isert_setup_np,
2282         .iscsit_accept_np       = isert_accept_np,
2283         .iscsit_free_np         = isert_free_np,
2284         .iscsit_wait_conn       = isert_wait_conn,
2285         .iscsit_free_conn       = isert_free_conn,
2286         .iscsit_alloc_cmd       = isert_alloc_cmd,
2287         .iscsit_get_login_rx    = isert_get_login_rx,
2288         .iscsit_put_login_tx    = isert_put_login_tx,
2289         .iscsit_immediate_queue = isert_immediate_queue,
2290         .iscsit_response_queue  = isert_response_queue,
2291         .iscsit_get_dataout     = isert_get_dataout,
2292         .iscsit_queue_data_in   = isert_put_datain,
2293         .iscsit_queue_status    = isert_put_response,
2294 };
2295
2296 static int __init isert_init(void)
2297 {
2298         int ret;
2299
2300         isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
2301         if (!isert_rx_wq) {
2302                 pr_err("Unable to allocate isert_rx_wq\n");
2303                 return -ENOMEM;
2304         }
2305
2306         isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
2307         if (!isert_comp_wq) {
2308                 pr_err("Unable to allocate isert_comp_wq\n");
2309                 ret = -ENOMEM;
2310                 goto destroy_rx_wq;
2311         }
2312
2313         isert_cmd_cache = kmem_cache_create("isert_cmd_cache",
2314                         sizeof(struct isert_cmd), __alignof__(struct isert_cmd),
2315                         0, NULL);
2316         if (!isert_cmd_cache) {
2317                 pr_err("Unable to create isert_cmd_cache\n");
2318                 ret = -ENOMEM;
2319                 goto destroy_tx_cq;
2320         }
2321
2322         iscsit_register_transport(&iser_target_transport);
2323         pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
2324         return 0;
2325
2326 destroy_tx_cq:
2327         destroy_workqueue(isert_comp_wq);
2328 destroy_rx_wq:
2329         destroy_workqueue(isert_rx_wq);
2330         return ret;
2331 }
2332
2333 static void __exit isert_exit(void)
2334 {
2335         flush_scheduled_work();
2336         kmem_cache_destroy(isert_cmd_cache);
2337         destroy_workqueue(isert_comp_wq);
2338         destroy_workqueue(isert_rx_wq);
2339         iscsit_unregister_transport(&iser_target_transport);
2340         pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
2341 }
2342
2343 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
2344 MODULE_VERSION("0.1");
2345 MODULE_AUTHOR("nab@Linux-iSCSI.org");
2346 MODULE_LICENSE("GPL");
2347
2348 module_init(isert_init);
2349 module_exit(isert_exit);