Merge tag 'driver-core-4.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[firefly-linux-kernel-4.4.55.git] / drivers / staging / lustre / lnet / klnds / o2iblnd / o2iblnd.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lnet/klnds/o2iblnd/o2iblnd.c
37  *
38  * Author: Eric Barton <eric@bartonsoftware.com>
39  */
40
41 #include "o2iblnd.h"
42 #include <asm/div64.h>
43
44 static lnd_t the_o2iblnd = {
45         .lnd_type       = O2IBLND,
46         .lnd_startup    = kiblnd_startup,
47         .lnd_shutdown   = kiblnd_shutdown,
48         .lnd_ctl        = kiblnd_ctl,
49         .lnd_query      = kiblnd_query,
50         .lnd_send       = kiblnd_send,
51         .lnd_recv       = kiblnd_recv,
52 };
53
54 kib_data_t            kiblnd_data;
55
56 static __u32 kiblnd_cksum(void *ptr, int nob)
57 {
58         char  *c  = ptr;
59         __u32  sum = 0;
60
61         while (nob-- > 0)
62                 sum = ((sum << 1) | (sum >> 31)) + *c++;
63
64         /* ensure I don't return 0 (== no checksum) */
65         return (sum == 0) ? 1 : sum;
66 }
67
68 static char *kiblnd_msgtype2str(int type)
69 {
70         switch (type) {
71         case IBLND_MSG_CONNREQ:
72                 return "CONNREQ";
73
74         case IBLND_MSG_CONNACK:
75                 return "CONNACK";
76
77         case IBLND_MSG_NOOP:
78                 return "NOOP";
79
80         case IBLND_MSG_IMMEDIATE:
81                 return "IMMEDIATE";
82
83         case IBLND_MSG_PUT_REQ:
84                 return "PUT_REQ";
85
86         case IBLND_MSG_PUT_NAK:
87                 return "PUT_NAK";
88
89         case IBLND_MSG_PUT_ACK:
90                 return "PUT_ACK";
91
92         case IBLND_MSG_PUT_DONE:
93                 return "PUT_DONE";
94
95         case IBLND_MSG_GET_REQ:
96                 return "GET_REQ";
97
98         case IBLND_MSG_GET_DONE:
99                 return "GET_DONE";
100
101         default:
102                 return "???";
103         }
104 }
105
106 static int kiblnd_msgtype2size(int type)
107 {
108         const int hdr_size = offsetof(kib_msg_t, ibm_u);
109
110         switch (type) {
111         case IBLND_MSG_CONNREQ:
112         case IBLND_MSG_CONNACK:
113                 return hdr_size + sizeof(kib_connparams_t);
114
115         case IBLND_MSG_NOOP:
116                 return hdr_size;
117
118         case IBLND_MSG_IMMEDIATE:
119                 return offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]);
120
121         case IBLND_MSG_PUT_REQ:
122                 return hdr_size + sizeof(kib_putreq_msg_t);
123
124         case IBLND_MSG_PUT_ACK:
125                 return hdr_size + sizeof(kib_putack_msg_t);
126
127         case IBLND_MSG_GET_REQ:
128                 return hdr_size + sizeof(kib_get_msg_t);
129
130         case IBLND_MSG_PUT_NAK:
131         case IBLND_MSG_PUT_DONE:
132         case IBLND_MSG_GET_DONE:
133                 return hdr_size + sizeof(kib_completion_msg_t);
134         default:
135                 return -1;
136         }
137 }
138
139 static int kiblnd_unpack_rd(kib_msg_t *msg, int flip)
140 {
141         kib_rdma_desc_t   *rd;
142         int             nob;
143         int             n;
144         int             i;
145
146         LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ ||
147                  msg->ibm_type == IBLND_MSG_PUT_ACK);
148
149         rd = msg->ibm_type == IBLND_MSG_GET_REQ ?
150                               &msg->ibm_u.get.ibgm_rd :
151                               &msg->ibm_u.putack.ibpam_rd;
152
153         if (flip) {
154                 __swab32s(&rd->rd_key);
155                 __swab32s(&rd->rd_nfrags);
156         }
157
158         n = rd->rd_nfrags;
159
160         if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) {
161                 CERROR("Bad nfrags: %d, should be 0 < n <= %d\n",
162                        n, IBLND_MAX_RDMA_FRAGS);
163                 return 1;
164         }
165
166         nob = offsetof(kib_msg_t, ibm_u) +
167               kiblnd_rd_msg_size(rd, msg->ibm_type, n);
168
169         if (msg->ibm_nob < nob) {
170                 CERROR("Short %s: %d(%d)\n",
171                        kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob);
172                 return 1;
173         }
174
175         if (!flip)
176                 return 0;
177
178         for (i = 0; i < n; i++) {
179                 __swab32s(&rd->rd_frags[i].rf_nob);
180                 __swab64s(&rd->rd_frags[i].rf_addr);
181         }
182
183         return 0;
184 }
185
186 void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
187                      int credits, lnet_nid_t dstnid, __u64 dststamp)
188 {
189         kib_net_t *net = ni->ni_data;
190
191         /* CAVEAT EMPTOR! all message fields not set here should have been
192          * initialised previously. */
193         msg->ibm_magic    = IBLND_MSG_MAGIC;
194         msg->ibm_version  = version;
195         /*   ibm_type */
196         msg->ibm_credits  = credits;
197         /*   ibm_nob */
198         msg->ibm_cksum    = 0;
199         msg->ibm_srcnid   = ni->ni_nid;
200         msg->ibm_srcstamp = net->ibn_incarnation;
201         msg->ibm_dstnid   = dstnid;
202         msg->ibm_dststamp = dststamp;
203
204         if (*kiblnd_tunables.kib_cksum) {
205                 /* NB ibm_cksum zero while computing cksum */
206                 msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob);
207         }
208 }
209
210 int kiblnd_unpack_msg(kib_msg_t *msg, int nob)
211 {
212         const int hdr_size = offsetof(kib_msg_t, ibm_u);
213         __u32     msg_cksum;
214         __u16     version;
215         int       msg_nob;
216         int       flip;
217
218         /* 6 bytes are enough to have received magic + version */
219         if (nob < 6) {
220                 CERROR("Short message: %d\n", nob);
221                 return -EPROTO;
222         }
223
224         if (msg->ibm_magic == IBLND_MSG_MAGIC) {
225                 flip = 0;
226         } else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) {
227                 flip = 1;
228         } else {
229                 CERROR("Bad magic: %08x\n", msg->ibm_magic);
230                 return -EPROTO;
231         }
232
233         version = flip ? __swab16(msg->ibm_version) : msg->ibm_version;
234         if (version != IBLND_MSG_VERSION &&
235             version != IBLND_MSG_VERSION_1) {
236                 CERROR("Bad version: %x\n", version);
237                 return -EPROTO;
238         }
239
240         if (nob < hdr_size) {
241                 CERROR("Short message: %d\n", nob);
242                 return -EPROTO;
243         }
244
245         msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
246         if (msg_nob > nob) {
247                 CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
248                 return -EPROTO;
249         }
250
251         /* checksum must be computed with ibm_cksum zero and BEFORE anything
252          * gets flipped */
253         msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
254         msg->ibm_cksum = 0;
255         if (msg_cksum != 0 &&
256             msg_cksum != kiblnd_cksum(msg, msg_nob)) {
257                 CERROR("Bad checksum\n");
258                 return -EPROTO;
259         }
260
261         msg->ibm_cksum = msg_cksum;
262
263         if (flip) {
264                 /* leave magic unflipped as a clue to peer endianness */
265                 msg->ibm_version = version;
266                 CLASSERT(sizeof(msg->ibm_type) == 1);
267                 CLASSERT(sizeof(msg->ibm_credits) == 1);
268                 msg->ibm_nob     = msg_nob;
269                 __swab64s(&msg->ibm_srcnid);
270                 __swab64s(&msg->ibm_srcstamp);
271                 __swab64s(&msg->ibm_dstnid);
272                 __swab64s(&msg->ibm_dststamp);
273         }
274
275         if (msg->ibm_srcnid == LNET_NID_ANY) {
276                 CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid));
277                 return -EPROTO;
278         }
279
280         if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) {
281                 CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type),
282                        msg_nob, kiblnd_msgtype2size(msg->ibm_type));
283                 return -EPROTO;
284         }
285
286         switch (msg->ibm_type) {
287         default:
288                 CERROR("Unknown message type %x\n", msg->ibm_type);
289                 return -EPROTO;
290
291         case IBLND_MSG_NOOP:
292         case IBLND_MSG_IMMEDIATE:
293         case IBLND_MSG_PUT_REQ:
294                 break;
295
296         case IBLND_MSG_PUT_ACK:
297         case IBLND_MSG_GET_REQ:
298                 if (kiblnd_unpack_rd(msg, flip))
299                         return -EPROTO;
300                 break;
301
302         case IBLND_MSG_PUT_NAK:
303         case IBLND_MSG_PUT_DONE:
304         case IBLND_MSG_GET_DONE:
305                 if (flip)
306                         __swab32s(&msg->ibm_u.completion.ibcm_status);
307                 break;
308
309         case IBLND_MSG_CONNREQ:
310         case IBLND_MSG_CONNACK:
311                 if (flip) {
312                         __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth);
313                         __swab16s(&msg->ibm_u.connparams.ibcp_max_frags);
314                         __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);
315                 }
316                 break;
317         }
318         return 0;
319 }
320
321 int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
322 {
323         kib_peer_t      *peer;
324         kib_net_t       *net = ni->ni_data;
325         int             cpt = lnet_cpt_of_nid(nid);
326         unsigned long   flags;
327
328         LASSERT(net != NULL);
329         LASSERT(nid != LNET_NID_ANY);
330
331         LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
332         if (peer == NULL) {
333                 CERROR("Cannot allocate peer\n");
334                 return -ENOMEM;
335         }
336
337         memset(peer, 0, sizeof(*peer));  /* zero flags etc */
338
339         peer->ibp_ni = ni;
340         peer->ibp_nid = nid;
341         peer->ibp_error = 0;
342         peer->ibp_last_alive = 0;
343         atomic_set(&peer->ibp_refcount, 1);  /* 1 ref for caller */
344
345         INIT_LIST_HEAD(&peer->ibp_list);     /* not in the peer table yet */
346         INIT_LIST_HEAD(&peer->ibp_conns);
347         INIT_LIST_HEAD(&peer->ibp_tx_queue);
348
349         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
350
351         /* always called with a ref on ni, which prevents ni being shutdown */
352         LASSERT(net->ibn_shutdown == 0);
353
354         /* npeers only grows with the global lock held */
355         atomic_inc(&net->ibn_npeers);
356
357         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
358
359         *peerp = peer;
360         return 0;
361 }
362
363 void kiblnd_destroy_peer(kib_peer_t *peer)
364 {
365         kib_net_t *net = peer->ibp_ni->ni_data;
366
367         LASSERT(net != NULL);
368         LASSERT(atomic_read(&peer->ibp_refcount) == 0);
369         LASSERT(!kiblnd_peer_active(peer));
370         LASSERT(peer->ibp_connecting == 0);
371         LASSERT(peer->ibp_accepting == 0);
372         LASSERT(list_empty(&peer->ibp_conns));
373         LASSERT(list_empty(&peer->ibp_tx_queue));
374
375         LIBCFS_FREE(peer, sizeof(*peer));
376
377         /* NB a peer's connections keep a reference on their peer until
378          * they are destroyed, so we can be assured that _all_ state to do
379          * with this peer has been cleaned up when its refcount drops to
380          * zero. */
381         atomic_dec(&net->ibn_npeers);
382 }
383
384 kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
385 {
386         /* the caller is responsible for accounting the additional reference
387          * that this creates */
388         struct list_head       *peer_list = kiblnd_nid2peerlist(nid);
389         struct list_head       *tmp;
390         kib_peer_t       *peer;
391
392         list_for_each(tmp, peer_list) {
393
394                 peer = list_entry(tmp, kib_peer_t, ibp_list);
395
396                 LASSERT(peer->ibp_connecting > 0 || /* creating conns */
397                          peer->ibp_accepting > 0 ||
398                          !list_empty(&peer->ibp_conns));  /* active conn */
399
400                 if (peer->ibp_nid != nid)
401                         continue;
402
403                 CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
404                        peer, libcfs_nid2str(nid),
405                        atomic_read(&peer->ibp_refcount),
406                        peer->ibp_version);
407                 return peer;
408         }
409         return NULL;
410 }
411
412 void kiblnd_unlink_peer_locked(kib_peer_t *peer)
413 {
414         LASSERT(list_empty(&peer->ibp_conns));
415
416         LASSERT(kiblnd_peer_active(peer));
417         list_del_init(&peer->ibp_list);
418         /* lose peerlist's ref */
419         kiblnd_peer_decref(peer);
420 }
421
422 static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
423                                 lnet_nid_t *nidp, int *count)
424 {
425         kib_peer_t          *peer;
426         struct list_head            *ptmp;
427         int                 i;
428         unsigned long     flags;
429
430         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
431
432         for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
433
434                 list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
435
436                         peer = list_entry(ptmp, kib_peer_t, ibp_list);
437                         LASSERT(peer->ibp_connecting > 0 ||
438                                  peer->ibp_accepting > 0 ||
439                                  !list_empty(&peer->ibp_conns));
440
441                         if (peer->ibp_ni != ni)
442                                 continue;
443
444                         if (index-- > 0)
445                                 continue;
446
447                         *nidp = peer->ibp_nid;
448                         *count = atomic_read(&peer->ibp_refcount);
449
450                         read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
451                                                flags);
452                         return 0;
453                 }
454         }
455
456         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
457         return -ENOENT;
458 }
459
460 static void kiblnd_del_peer_locked(kib_peer_t *peer)
461 {
462         struct list_head           *ctmp;
463         struct list_head           *cnxt;
464         kib_conn_t         *conn;
465
466         if (list_empty(&peer->ibp_conns)) {
467                 kiblnd_unlink_peer_locked(peer);
468         } else {
469                 list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
470                         conn = list_entry(ctmp, kib_conn_t, ibc_list);
471
472                         kiblnd_close_conn_locked(conn, 0);
473                 }
474                 /* NB closing peer's last conn unlinked it. */
475         }
476         /* NB peer now unlinked; might even be freed if the peer table had the
477          * last ref on it. */
478 }
479
480 static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
481 {
482         LIST_HEAD(zombies);
483         struct list_head            *ptmp;
484         struct list_head            *pnxt;
485         kib_peer_t          *peer;
486         int                 lo;
487         int                 hi;
488         int                 i;
489         unsigned long     flags;
490         int                 rc = -ENOENT;
491
492         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
493
494         if (nid != LNET_NID_ANY) {
495                 lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
496         } else {
497                 lo = 0;
498                 hi = kiblnd_data.kib_peer_hash_size - 1;
499         }
500
501         for (i = lo; i <= hi; i++) {
502                 list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
503                         peer = list_entry(ptmp, kib_peer_t, ibp_list);
504                         LASSERT(peer->ibp_connecting > 0 ||
505                                  peer->ibp_accepting > 0 ||
506                                  !list_empty(&peer->ibp_conns));
507
508                         if (peer->ibp_ni != ni)
509                                 continue;
510
511                         if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
512                                 continue;
513
514                         if (!list_empty(&peer->ibp_tx_queue)) {
515                                 LASSERT(list_empty(&peer->ibp_conns));
516
517                                 list_splice_init(&peer->ibp_tx_queue,
518                                                      &zombies);
519                         }
520
521                         kiblnd_del_peer_locked(peer);
522                         rc = 0;  /* matched something */
523                 }
524         }
525
526         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
527
528         kiblnd_txlist_done(ni, &zombies, -EIO);
529
530         return rc;
531 }
532
533 static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
534 {
535         kib_peer_t          *peer;
536         struct list_head            *ptmp;
537         kib_conn_t          *conn;
538         struct list_head            *ctmp;
539         int                 i;
540         unsigned long     flags;
541
542         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
543
544         for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
545                 list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
546
547                         peer = list_entry(ptmp, kib_peer_t, ibp_list);
548                         LASSERT(peer->ibp_connecting > 0 ||
549                                  peer->ibp_accepting > 0 ||
550                                  !list_empty(&peer->ibp_conns));
551
552                         if (peer->ibp_ni != ni)
553                                 continue;
554
555                         list_for_each(ctmp, &peer->ibp_conns) {
556                                 if (index-- > 0)
557                                         continue;
558
559                                 conn = list_entry(ctmp, kib_conn_t,
560                                                       ibc_list);
561                                 kiblnd_conn_addref(conn);
562                                 read_unlock_irqrestore(
563                                         &kiblnd_data.kib_global_lock,
564                                         flags);
565                                 return conn;
566                         }
567                 }
568         }
569
570         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
571         return NULL;
572 }
573
574 int kiblnd_translate_mtu(int value)
575 {
576         switch (value) {
577         default:
578                 return -1;
579         case 0:
580                 return 0;
581         case 256:
582                 return IB_MTU_256;
583         case 512:
584                 return IB_MTU_512;
585         case 1024:
586                 return IB_MTU_1024;
587         case 2048:
588                 return IB_MTU_2048;
589         case 4096:
590                 return IB_MTU_4096;
591         }
592 }
593
594 static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
595 {
596         int        mtu;
597
598         /* XXX There is no path record for iWARP, set by netdev->change_mtu? */
599         if (cmid->route.path_rec == NULL)
600                 return;
601
602         mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
603         LASSERT(mtu >= 0);
604         if (mtu != 0)
605                 cmid->route.path_rec->mtu = mtu;
606 }
607
608 static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
609 {
610         cpumask_t       *mask;
611         int             vectors;
612         int             off;
613         int             i;
614         lnet_nid_t      nid = conn->ibc_peer->ibp_nid;
615
616         vectors = conn->ibc_cmid->device->num_comp_vectors;
617         if (vectors <= 1)
618                 return 0;
619
620         mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
621         if (mask == NULL)
622                 return 0;
623
624         /* hash NID to CPU id in this partition... */
625         off = do_div(nid, cpumask_weight(mask));
626         for_each_cpu(i, mask) {
627                 if (off-- == 0)
628                         return i % vectors;
629         }
630
631         LBUG();
632         return 1;
633 }
634
635 kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
636                                 int state, int version)
637 {
638         /* CAVEAT EMPTOR:
639          * If the new conn is created successfully it takes over the caller's
640          * ref on 'peer'.  It also "owns" 'cmid' and destroys it when it itself
641          * is destroyed.  On failure, the caller's ref on 'peer' remains and
642          * she must dispose of 'cmid'.  (Actually I'd block forever if I tried
643          * to destroy 'cmid' here since I'm called from the CM which still has
644          * its ref on 'cmid'). */
645         rwlock_t                *glock = &kiblnd_data.kib_global_lock;
646         kib_net_t             *net = peer->ibp_ni->ni_data;
647         kib_dev_t             *dev;
648         struct ib_qp_init_attr *init_qp_attr;
649         struct kib_sched_info   *sched;
650         struct ib_cq_init_attr  cq_attr = {};
651         kib_conn_t              *conn;
652         struct ib_cq            *cq;
653         unsigned long           flags;
654         int                     cpt;
655         int                     rc;
656         int                     i;
657
658         LASSERT(net != NULL);
659         LASSERT(!in_interrupt());
660
661         dev = net->ibn_dev;
662
663         cpt = lnet_cpt_of_nid(peer->ibp_nid);
664         sched = kiblnd_data.kib_scheds[cpt];
665
666         LASSERT(sched->ibs_nthreads > 0);
667
668         LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
669                          sizeof(*init_qp_attr));
670         if (init_qp_attr == NULL) {
671                 CERROR("Can't allocate qp_attr for %s\n",
672                        libcfs_nid2str(peer->ibp_nid));
673                 goto failed_0;
674         }
675
676         LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
677         if (conn == NULL) {
678                 CERROR("Can't allocate connection for %s\n",
679                        libcfs_nid2str(peer->ibp_nid));
680                 goto failed_1;
681         }
682
683         conn->ibc_state = IBLND_CONN_INIT;
684         conn->ibc_version = version;
685         conn->ibc_peer = peer;            /* I take the caller's ref */
686         cmid->context = conn;              /* for future CM callbacks */
687         conn->ibc_cmid = cmid;
688
689         INIT_LIST_HEAD(&conn->ibc_early_rxs);
690         INIT_LIST_HEAD(&conn->ibc_tx_noops);
691         INIT_LIST_HEAD(&conn->ibc_tx_queue);
692         INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
693         INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
694         INIT_LIST_HEAD(&conn->ibc_active_txs);
695         spin_lock_init(&conn->ibc_lock);
696
697         LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
698                          sizeof(*conn->ibc_connvars));
699         if (conn->ibc_connvars == NULL) {
700                 CERROR("Can't allocate in-progress connection state\n");
701                 goto failed_2;
702         }
703
704         write_lock_irqsave(glock, flags);
705         if (dev->ibd_failover) {
706                 write_unlock_irqrestore(glock, flags);
707                 CERROR("%s: failover in progress\n", dev->ibd_ifname);
708                 goto failed_2;
709         }
710
711         if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
712                 /* wakeup failover thread and teardown connection */
713                 if (kiblnd_dev_can_failover(dev)) {
714                         list_add_tail(&dev->ibd_fail_list,
715                                       &kiblnd_data.kib_failed_devs);
716                         wake_up(&kiblnd_data.kib_failover_waitq);
717                 }
718
719                 write_unlock_irqrestore(glock, flags);
720                 CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
721                        cmid->device->name, dev->ibd_ifname);
722                 goto failed_2;
723         }
724
725         kiblnd_hdev_addref_locked(dev->ibd_hdev);
726         conn->ibc_hdev = dev->ibd_hdev;
727
728         kiblnd_setup_mtu_locked(cmid);
729
730         write_unlock_irqrestore(glock, flags);
731
732         LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
733                          IBLND_RX_MSGS(version) * sizeof(kib_rx_t));
734         if (conn->ibc_rxs == NULL) {
735                 CERROR("Cannot allocate RX buffers\n");
736                 goto failed_2;
737         }
738
739         rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
740                                 IBLND_RX_MSG_PAGES(version));
741         if (rc != 0)
742                 goto failed_2;
743
744         kiblnd_map_rx_descs(conn);
745
746         cq_attr.cqe = IBLND_CQ_ENTRIES(version);
747         cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt);
748         cq = ib_create_cq(cmid->device,
749                           kiblnd_cq_completion, kiblnd_cq_event, conn,
750                           &cq_attr);
751         if (IS_ERR(cq)) {
752                 CERROR("Can't create CQ: %ld, cqe: %d\n",
753                        PTR_ERR(cq), IBLND_CQ_ENTRIES(version));
754                 goto failed_2;
755         }
756
757         conn->ibc_cq = cq;
758
759         rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
760         if (rc != 0) {
761                 CERROR("Can't request completion notificiation: %d\n", rc);
762                 goto failed_2;
763         }
764
765         init_qp_attr->event_handler = kiblnd_qp_event;
766         init_qp_attr->qp_context = conn;
767         init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(version);
768         init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(version);
769         init_qp_attr->cap.max_send_sge = 1;
770         init_qp_attr->cap.max_recv_sge = 1;
771         init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
772         init_qp_attr->qp_type = IB_QPT_RC;
773         init_qp_attr->send_cq = cq;
774         init_qp_attr->recv_cq = cq;
775
776         conn->ibc_sched = sched;
777
778         rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
779         if (rc != 0) {
780                 CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
781                        rc, init_qp_attr->cap.max_send_wr,
782                        init_qp_attr->cap.max_recv_wr);
783                 goto failed_2;
784         }
785
786         LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
787
788         /* 1 ref for caller and each rxmsg */
789         atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
790         conn->ibc_nrx = IBLND_RX_MSGS(version);
791
792         /* post receives */
793         for (i = 0; i < IBLND_RX_MSGS(version); i++) {
794                 rc = kiblnd_post_rx(&conn->ibc_rxs[i],
795                                     IBLND_POSTRX_NO_CREDIT);
796                 if (rc != 0) {
797                         CERROR("Can't post rxmsg: %d\n", rc);
798
799                         /* Make posted receives complete */
800                         kiblnd_abort_receives(conn);
801
802                         /* correct # of posted buffers
803                          * NB locking needed now I'm racing with completion */
804                         spin_lock_irqsave(&sched->ibs_lock, flags);
805                         conn->ibc_nrx -= IBLND_RX_MSGS(version) - i;
806                         spin_unlock_irqrestore(&sched->ibs_lock, flags);
807
808                         /* cmid will be destroyed by CM(ofed) after cm_callback
809                          * returned, so we can't refer it anymore
810                          * (by kiblnd_connd()->kiblnd_destroy_conn) */
811                         rdma_destroy_qp(conn->ibc_cmid);
812                         conn->ibc_cmid = NULL;
813
814                         /* Drop my own and unused rxbuffer refcounts */
815                         while (i++ <= IBLND_RX_MSGS(version))
816                                 kiblnd_conn_decref(conn);
817
818                         return NULL;
819                 }
820         }
821
822         /* Init successful! */
823         LASSERT(state == IBLND_CONN_ACTIVE_CONNECT ||
824                  state == IBLND_CONN_PASSIVE_WAIT);
825         conn->ibc_state = state;
826
827         /* 1 more conn */
828         atomic_inc(&net->ibn_nconns);
829         return conn;
830
831  failed_2:
832         kiblnd_destroy_conn(conn);
833  failed_1:
834         LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
835  failed_0:
836         return NULL;
837 }
838
839 void kiblnd_destroy_conn(kib_conn_t *conn)
840 {
841         struct rdma_cm_id *cmid = conn->ibc_cmid;
842         kib_peer_t      *peer = conn->ibc_peer;
843         int             rc;
844
845         LASSERT(!in_interrupt());
846         LASSERT(atomic_read(&conn->ibc_refcount) == 0);
847         LASSERT(list_empty(&conn->ibc_early_rxs));
848         LASSERT(list_empty(&conn->ibc_tx_noops));
849         LASSERT(list_empty(&conn->ibc_tx_queue));
850         LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd));
851         LASSERT(list_empty(&conn->ibc_tx_queue_nocred));
852         LASSERT(list_empty(&conn->ibc_active_txs));
853         LASSERT(conn->ibc_noops_posted == 0);
854         LASSERT(conn->ibc_nsends_posted == 0);
855
856         switch (conn->ibc_state) {
857         default:
858                 /* conn must be completely disengaged from the network */
859                 LBUG();
860
861         case IBLND_CONN_DISCONNECTED:
862                 /* connvars should have been freed already */
863                 LASSERT(conn->ibc_connvars == NULL);
864                 break;
865
866         case IBLND_CONN_INIT:
867                 break;
868         }
869
870         /* conn->ibc_cmid might be destroyed by CM already */
871         if (cmid != NULL && cmid->qp != NULL)
872                 rdma_destroy_qp(cmid);
873
874         if (conn->ibc_cq != NULL) {
875                 rc = ib_destroy_cq(conn->ibc_cq);
876                 if (rc != 0)
877                         CWARN("Error destroying CQ: %d\n", rc);
878         }
879
880         if (conn->ibc_rx_pages != NULL)
881                 kiblnd_unmap_rx_descs(conn);
882
883         if (conn->ibc_rxs != NULL) {
884                 LIBCFS_FREE(conn->ibc_rxs,
885                             IBLND_RX_MSGS(conn->ibc_version)
886                               * sizeof(kib_rx_t));
887         }
888
889         if (conn->ibc_connvars != NULL)
890                 LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
891
892         if (conn->ibc_hdev != NULL)
893                 kiblnd_hdev_decref(conn->ibc_hdev);
894
895         /* See CAVEAT EMPTOR above in kiblnd_create_conn */
896         if (conn->ibc_state != IBLND_CONN_INIT) {
897                 kib_net_t *net = peer->ibp_ni->ni_data;
898
899                 kiblnd_peer_decref(peer);
900                 rdma_destroy_id(cmid);
901                 atomic_dec(&net->ibn_nconns);
902         }
903
904         LIBCFS_FREE(conn, sizeof(*conn));
905 }
906
907 int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
908 {
909         kib_conn_t           *conn;
910         struct list_head             *ctmp;
911         struct list_head             *cnxt;
912         int                  count = 0;
913
914         list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
915                 conn = list_entry(ctmp, kib_conn_t, ibc_list);
916
917                 CDEBUG(D_NET, "Closing conn -> %s, version: %x, reason: %d\n",
918                        libcfs_nid2str(peer->ibp_nid),
919                        conn->ibc_version, why);
920
921                 kiblnd_close_conn_locked(conn, why);
922                 count++;
923         }
924
925         return count;
926 }
927
928 int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
929                                      int version, __u64 incarnation)
930 {
931         kib_conn_t           *conn;
932         struct list_head             *ctmp;
933         struct list_head             *cnxt;
934         int                  count = 0;
935
936         list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
937                 conn = list_entry(ctmp, kib_conn_t, ibc_list);
938
939                 if (conn->ibc_version     == version &&
940                     conn->ibc_incarnation == incarnation)
941                         continue;
942
943                 CDEBUG(D_NET,
944                        "Closing stale conn -> %s version: %x, incarnation:%#llx(%x, %#llx)\n",
945                        libcfs_nid2str(peer->ibp_nid),
946                        conn->ibc_version, conn->ibc_incarnation,
947                        version, incarnation);
948
949                 kiblnd_close_conn_locked(conn, -ESTALE);
950                 count++;
951         }
952
953         return count;
954 }
955
956 static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
957 {
958         kib_peer_t           *peer;
959         struct list_head             *ptmp;
960         struct list_head             *pnxt;
961         int                  lo;
962         int                  hi;
963         int                  i;
964         unsigned long      flags;
965         int                  count = 0;
966
967         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
968
969         if (nid != LNET_NID_ANY)
970                 lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
971         else {
972                 lo = 0;
973                 hi = kiblnd_data.kib_peer_hash_size - 1;
974         }
975
976         for (i = lo; i <= hi; i++) {
977                 list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
978
979                         peer = list_entry(ptmp, kib_peer_t, ibp_list);
980                         LASSERT(peer->ibp_connecting > 0 ||
981                                  peer->ibp_accepting > 0 ||
982                                  !list_empty(&peer->ibp_conns));
983
984                         if (peer->ibp_ni != ni)
985                                 continue;
986
987                         if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
988                                 continue;
989
990                         count += kiblnd_close_peer_conns_locked(peer, 0);
991                 }
992         }
993
994         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
995
996         /* wildcards always succeed */
997         if (nid == LNET_NID_ANY)
998                 return 0;
999
1000         return (count == 0) ? -ENOENT : 0;
1001 }
1002
1003 int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
1004 {
1005         struct libcfs_ioctl_data *data = arg;
1006         int                    rc = -EINVAL;
1007
1008         switch (cmd) {
1009         case IOC_LIBCFS_GET_PEER: {
1010                 lnet_nid_t   nid = 0;
1011                 int       count = 0;
1012
1013                 rc = kiblnd_get_peer_info(ni, data->ioc_count,
1014                                           &nid, &count);
1015                 data->ioc_nid    = nid;
1016                 data->ioc_count  = count;
1017                 break;
1018         }
1019
1020         case IOC_LIBCFS_DEL_PEER: {
1021                 rc = kiblnd_del_peer(ni, data->ioc_nid);
1022                 break;
1023         }
1024         case IOC_LIBCFS_GET_CONN: {
1025                 kib_conn_t *conn;
1026
1027                 rc = 0;
1028                 conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
1029                 if (conn == NULL) {
1030                         rc = -ENOENT;
1031                         break;
1032                 }
1033
1034                 LASSERT(conn->ibc_cmid != NULL);
1035                 data->ioc_nid = conn->ibc_peer->ibp_nid;
1036                 if (conn->ibc_cmid->route.path_rec == NULL)
1037                         data->ioc_u32[0] = 0; /* iWarp has no path MTU */
1038                 else
1039                         data->ioc_u32[0] =
1040                         ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu);
1041                 kiblnd_conn_decref(conn);
1042                 break;
1043         }
1044         case IOC_LIBCFS_CLOSE_CONNECTION: {
1045                 rc = kiblnd_close_matching_conns(ni, data->ioc_nid);
1046                 break;
1047         }
1048
1049         default:
1050                 break;
1051         }
1052
1053         return rc;
1054 }
1055
1056 void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
1057 {
1058         unsigned long   last_alive = 0;
1059         unsigned long   now = cfs_time_current();
1060         rwlock_t        *glock = &kiblnd_data.kib_global_lock;
1061         kib_peer_t      *peer;
1062         unsigned long   flags;
1063
1064         read_lock_irqsave(glock, flags);
1065
1066         peer = kiblnd_find_peer_locked(nid);
1067         if (peer != NULL) {
1068                 LASSERT(peer->ibp_connecting > 0 || /* creating conns */
1069                          peer->ibp_accepting > 0 ||
1070                          !list_empty(&peer->ibp_conns));  /* active conn */
1071                 last_alive = peer->ibp_last_alive;
1072         }
1073
1074         read_unlock_irqrestore(glock, flags);
1075
1076         if (last_alive != 0)
1077                 *when = last_alive;
1078
1079         /* peer is not persistent in hash, trigger peer creation
1080          * and connection establishment with a NULL tx */
1081         if (peer == NULL)
1082                 kiblnd_launch_tx(ni, NULL, nid);
1083
1084         CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
1085                libcfs_nid2str(nid), peer,
1086                last_alive ? cfs_duration_sec(now - last_alive) : -1);
1087 }
1088
1089 void kiblnd_free_pages(kib_pages_t *p)
1090 {
1091         int     npages = p->ibp_npages;
1092         int     i;
1093
1094         for (i = 0; i < npages; i++) {
1095                 if (p->ibp_pages[i] != NULL)
1096                         __free_page(p->ibp_pages[i]);
1097         }
1098
1099         LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages]));
1100 }
1101
1102 int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
1103 {
1104         kib_pages_t     *p;
1105         int             i;
1106
1107         LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
1108                          offsetof(kib_pages_t, ibp_pages[npages]));
1109         if (p == NULL) {
1110                 CERROR("Can't allocate descriptor for %d pages\n", npages);
1111                 return -ENOMEM;
1112         }
1113
1114         memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
1115         p->ibp_npages = npages;
1116
1117         for (i = 0; i < npages; i++) {
1118                 p->ibp_pages[i] = alloc_pages_node(
1119                                     cfs_cpt_spread_node(lnet_cpt_table(), cpt),
1120                                     GFP_NOFS, 0);
1121                 if (p->ibp_pages[i] == NULL) {
1122                         CERROR("Can't allocate page %d of %d\n", i, npages);
1123                         kiblnd_free_pages(p);
1124                         return -ENOMEM;
1125                 }
1126         }
1127
1128         *pp = p;
1129         return 0;
1130 }
1131
1132 void kiblnd_unmap_rx_descs(kib_conn_t *conn)
1133 {
1134         kib_rx_t *rx;
1135         int       i;
1136
1137         LASSERT(conn->ibc_rxs != NULL);
1138         LASSERT(conn->ibc_hdev != NULL);
1139
1140         for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
1141                 rx = &conn->ibc_rxs[i];
1142
1143                 LASSERT(rx->rx_nob >= 0); /* not posted */
1144
1145                 kiblnd_dma_unmap_single(conn->ibc_hdev->ibh_ibdev,
1146                                         KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
1147                                                           rx->rx_msgaddr),
1148                                         IBLND_MSG_SIZE, DMA_FROM_DEVICE);
1149         }
1150
1151         kiblnd_free_pages(conn->ibc_rx_pages);
1152
1153         conn->ibc_rx_pages = NULL;
1154 }
1155
1156 void kiblnd_map_rx_descs(kib_conn_t *conn)
1157 {
1158         kib_rx_t       *rx;
1159         struct page    *pg;
1160         int          pg_off;
1161         int          ipg;
1162         int          i;
1163
1164         for (pg_off = ipg = i = 0;
1165              i < IBLND_RX_MSGS(conn->ibc_version); i++) {
1166                 pg = conn->ibc_rx_pages->ibp_pages[ipg];
1167                 rx = &conn->ibc_rxs[i];
1168
1169                 rx->rx_conn = conn;
1170                 rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
1171
1172                 rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
1173                                                        rx->rx_msg,
1174                                                        IBLND_MSG_SIZE,
1175                                                        DMA_FROM_DEVICE);
1176                 LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
1177                                                    rx->rx_msgaddr));
1178                 KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
1179
1180                 CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n",
1181                        i, rx->rx_msg, rx->rx_msgaddr,
1182                        lnet_page2phys(pg) + pg_off);
1183
1184                 pg_off += IBLND_MSG_SIZE;
1185                 LASSERT(pg_off <= PAGE_SIZE);
1186
1187                 if (pg_off == PAGE_SIZE) {
1188                         pg_off = 0;
1189                         ipg++;
1190                         LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version));
1191                 }
1192         }
1193 }
1194
1195 static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
1196 {
1197         kib_hca_dev_t  *hdev = tpo->tpo_hdev;
1198         kib_tx_t       *tx;
1199         int          i;
1200
1201         LASSERT(tpo->tpo_pool.po_allocated == 0);
1202
1203         if (hdev == NULL)
1204                 return;
1205
1206         for (i = 0; i < tpo->tpo_pool.po_size; i++) {
1207                 tx = &tpo->tpo_tx_descs[i];
1208                 kiblnd_dma_unmap_single(hdev->ibh_ibdev,
1209                                         KIBLND_UNMAP_ADDR(tx, tx_msgunmap,
1210                                                           tx->tx_msgaddr),
1211                                         IBLND_MSG_SIZE, DMA_TO_DEVICE);
1212         }
1213
1214         kiblnd_hdev_decref(hdev);
1215         tpo->tpo_hdev = NULL;
1216 }
1217
1218 static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev)
1219 {
1220         kib_hca_dev_t *hdev;
1221         unsigned long  flags;
1222         int         i = 0;
1223
1224         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1225         while (dev->ibd_failover) {
1226                 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1227                 if (i++ % 50 == 0)
1228                         CDEBUG(D_NET, "%s: Wait for failover\n",
1229                                dev->ibd_ifname);
1230                 schedule_timeout(cfs_time_seconds(1) / 100);
1231
1232                 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1233         }
1234
1235         kiblnd_hdev_addref_locked(dev->ibd_hdev);
1236         hdev = dev->ibd_hdev;
1237
1238         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1239
1240         return hdev;
1241 }
1242
1243 static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
1244 {
1245         kib_pages_t    *txpgs = tpo->tpo_tx_pages;
1246         kib_pool_t     *pool  = &tpo->tpo_pool;
1247         kib_net_t      *net   = pool->po_owner->ps_net;
1248         kib_dev_t      *dev;
1249         struct page    *page;
1250         kib_tx_t       *tx;
1251         int          page_offset;
1252         int          ipage;
1253         int          i;
1254
1255         LASSERT(net != NULL);
1256
1257         dev = net->ibn_dev;
1258
1259         /* pre-mapped messages are not bigger than 1 page */
1260         CLASSERT(IBLND_MSG_SIZE <= PAGE_SIZE);
1261
1262         /* No fancy arithmetic when we do the buffer calculations */
1263         CLASSERT(PAGE_SIZE % IBLND_MSG_SIZE == 0);
1264
1265         tpo->tpo_hdev = kiblnd_current_hdev(dev);
1266
1267         for (ipage = page_offset = i = 0; i < pool->po_size; i++) {
1268                 page = txpgs->ibp_pages[ipage];
1269                 tx = &tpo->tpo_tx_descs[i];
1270
1271                 tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
1272                                            page_offset);
1273
1274                 tx->tx_msgaddr = kiblnd_dma_map_single(
1275                         tpo->tpo_hdev->ibh_ibdev, tx->tx_msg,
1276                         IBLND_MSG_SIZE, DMA_TO_DEVICE);
1277                 LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
1278                                                    tx->tx_msgaddr));
1279                 KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
1280
1281                 list_add(&tx->tx_list, &pool->po_free_list);
1282
1283                 page_offset += IBLND_MSG_SIZE;
1284                 LASSERT(page_offset <= PAGE_SIZE);
1285
1286                 if (page_offset == PAGE_SIZE) {
1287                         page_offset = 0;
1288                         ipage++;
1289                         LASSERT(ipage <= txpgs->ibp_npages);
1290                 }
1291         }
1292 }
1293
1294 struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size)
1295 {
1296         __u64   index;
1297
1298         LASSERT(hdev->ibh_mrs[0] != NULL);
1299
1300         if (hdev->ibh_nmrs == 1)
1301                 return hdev->ibh_mrs[0];
1302
1303         index = addr >> hdev->ibh_mr_shift;
1304
1305         if (index <  hdev->ibh_nmrs &&
1306             index == ((addr + size - 1) >> hdev->ibh_mr_shift))
1307                 return hdev->ibh_mrs[index];
1308
1309         return NULL;
1310 }
1311
1312 struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd)
1313 {
1314         struct ib_mr *prev_mr;
1315         struct ib_mr *mr;
1316         int        i;
1317
1318         LASSERT(hdev->ibh_mrs[0] != NULL);
1319
1320         if (*kiblnd_tunables.kib_map_on_demand > 0 &&
1321             *kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags)
1322                 return NULL;
1323
1324         if (hdev->ibh_nmrs == 1)
1325                 return hdev->ibh_mrs[0];
1326
1327         for (i = 0, mr = prev_mr = NULL;
1328              i < rd->rd_nfrags; i++) {
1329                 mr = kiblnd_find_dma_mr(hdev,
1330                                         rd->rd_frags[i].rf_addr,
1331                                         rd->rd_frags[i].rf_nob);
1332                 if (prev_mr == NULL)
1333                         prev_mr = mr;
1334
1335                 if (mr == NULL || prev_mr != mr) {
1336                         /* Can't covered by one single MR */
1337                         mr = NULL;
1338                         break;
1339                 }
1340         }
1341
1342         return mr;
1343 }
1344
1345 static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
1346 {
1347         LASSERT(pool->fpo_map_count == 0);
1348
1349         if (pool->fpo_fmr_pool != NULL)
1350                 ib_destroy_fmr_pool(pool->fpo_fmr_pool);
1351
1352         if (pool->fpo_hdev != NULL)
1353                 kiblnd_hdev_decref(pool->fpo_hdev);
1354
1355         LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t));
1356 }
1357
1358 static void kiblnd_destroy_fmr_pool_list(struct list_head *head)
1359 {
1360         kib_fmr_pool_t *pool;
1361
1362         while (!list_empty(head)) {
1363                 pool = list_entry(head->next, kib_fmr_pool_t, fpo_list);
1364                 list_del(&pool->fpo_list);
1365                 kiblnd_destroy_fmr_pool(pool);
1366         }
1367 }
1368
1369 static int kiblnd_fmr_pool_size(int ncpts)
1370 {
1371         int size = *kiblnd_tunables.kib_fmr_pool_size / ncpts;
1372
1373         return max(IBLND_FMR_POOL, size);
1374 }
1375
1376 static int kiblnd_fmr_flush_trigger(int ncpts)
1377 {
1378         int size = *kiblnd_tunables.kib_fmr_flush_trigger / ncpts;
1379
1380         return max(IBLND_FMR_POOL_FLUSH, size);
1381 }
1382
1383 static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
1384                                   kib_fmr_pool_t **pp_fpo)
1385 {
1386         /* FMR pool for RDMA */
1387         kib_dev_t              *dev = fps->fps_net->ibn_dev;
1388         kib_fmr_pool_t    *fpo;
1389         struct ib_fmr_pool_param param = {
1390                 .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
1391                 .page_shift     = PAGE_SHIFT,
1392                 .access     = (IB_ACCESS_LOCAL_WRITE |
1393                                       IB_ACCESS_REMOTE_WRITE),
1394                 .pool_size         = fps->fps_pool_size,
1395                 .dirty_watermark   = fps->fps_flush_trigger,
1396                 .flush_function    = NULL,
1397                 .flush_arg       = NULL,
1398                 .cache       = !!*kiblnd_tunables.kib_fmr_cache};
1399         int rc;
1400
1401         LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
1402         if (fpo == NULL)
1403                 return -ENOMEM;
1404
1405         fpo->fpo_hdev = kiblnd_current_hdev(dev);
1406
1407         fpo->fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd, &param);
1408         if (IS_ERR(fpo->fpo_fmr_pool)) {
1409                 rc = PTR_ERR(fpo->fpo_fmr_pool);
1410                 CERROR("Failed to create FMR pool: %d\n", rc);
1411
1412                 kiblnd_hdev_decref(fpo->fpo_hdev);
1413                 LIBCFS_FREE(fpo, sizeof(kib_fmr_pool_t));
1414                 return rc;
1415         }
1416
1417         fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1418         fpo->fpo_owner    = fps;
1419         *pp_fpo = fpo;
1420
1421         return 0;
1422 }
1423
1424 static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
1425                                     struct list_head *zombies)
1426 {
1427         if (fps->fps_net == NULL) /* intialized? */
1428                 return;
1429
1430         spin_lock(&fps->fps_lock);
1431
1432         while (!list_empty(&fps->fps_pool_list)) {
1433                 kib_fmr_pool_t *fpo = list_entry(fps->fps_pool_list.next,
1434                                                  kib_fmr_pool_t, fpo_list);
1435                 fpo->fpo_failed = 1;
1436                 list_del(&fpo->fpo_list);
1437                 if (fpo->fpo_map_count == 0)
1438                         list_add(&fpo->fpo_list, zombies);
1439                 else
1440                         list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
1441         }
1442
1443         spin_unlock(&fps->fps_lock);
1444 }
1445
1446 static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
1447 {
1448         if (fps->fps_net != NULL) { /* initialized? */
1449                 kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
1450                 kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list);
1451         }
1452 }
1453
1454 static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
1455                                    kib_net_t *net, int pool_size,
1456                                    int flush_trigger)
1457 {
1458         kib_fmr_pool_t *fpo;
1459         int          rc;
1460
1461         memset(fps, 0, sizeof(kib_fmr_poolset_t));
1462
1463         fps->fps_net = net;
1464         fps->fps_cpt = cpt;
1465         fps->fps_pool_size = pool_size;
1466         fps->fps_flush_trigger = flush_trigger;
1467         spin_lock_init(&fps->fps_lock);
1468         INIT_LIST_HEAD(&fps->fps_pool_list);
1469         INIT_LIST_HEAD(&fps->fps_failed_pool_list);
1470
1471         rc = kiblnd_create_fmr_pool(fps, &fpo);
1472         if (rc == 0)
1473                 list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
1474
1475         return rc;
1476 }
1477
1478 static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
1479 {
1480         if (fpo->fpo_map_count != 0) /* still in use */
1481                 return 0;
1482         if (fpo->fpo_failed)
1483                 return 1;
1484         return cfs_time_aftereq(now, fpo->fpo_deadline);
1485 }
1486
1487 void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
1488 {
1489         LIST_HEAD(zombies);
1490         kib_fmr_pool_t    *fpo = fmr->fmr_pool;
1491         kib_fmr_poolset_t *fps = fpo->fpo_owner;
1492         unsigned long    now = cfs_time_current();
1493         kib_fmr_pool_t    *tmp;
1494         int             rc;
1495
1496         rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
1497         LASSERT(rc == 0);
1498
1499         if (status != 0) {
1500                 rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
1501                 LASSERT(rc == 0);
1502         }
1503
1504         fmr->fmr_pool = NULL;
1505         fmr->fmr_pfmr = NULL;
1506
1507         spin_lock(&fps->fps_lock);
1508         fpo->fpo_map_count--;  /* decref the pool */
1509
1510         list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
1511                 /* the first pool is persistent */
1512                 if (fps->fps_pool_list.next == &fpo->fpo_list)
1513                         continue;
1514
1515                 if (kiblnd_fmr_pool_is_idle(fpo, now)) {
1516                         list_move(&fpo->fpo_list, &zombies);
1517                         fps->fps_version++;
1518                 }
1519         }
1520         spin_unlock(&fps->fps_lock);
1521
1522         if (!list_empty(&zombies))
1523                 kiblnd_destroy_fmr_pool_list(&zombies);
1524 }
1525
1526 int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
1527                         __u64 iov, kib_fmr_t *fmr)
1528 {
1529         struct ib_pool_fmr *pfmr;
1530         kib_fmr_pool_t     *fpo;
1531         __u64          version;
1532         int              rc;
1533
1534  again:
1535         spin_lock(&fps->fps_lock);
1536         version = fps->fps_version;
1537         list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
1538                 fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1539                 fpo->fpo_map_count++;
1540                 spin_unlock(&fps->fps_lock);
1541
1542                 pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool,
1543                                             pages, npages, iov);
1544                 if (likely(!IS_ERR(pfmr))) {
1545                         fmr->fmr_pool = fpo;
1546                         fmr->fmr_pfmr = pfmr;
1547                         return 0;
1548                 }
1549
1550                 spin_lock(&fps->fps_lock);
1551                 fpo->fpo_map_count--;
1552                 if (PTR_ERR(pfmr) != -EAGAIN) {
1553                         spin_unlock(&fps->fps_lock);
1554                         return PTR_ERR(pfmr);
1555                 }
1556
1557                 /* EAGAIN and ... */
1558                 if (version != fps->fps_version) {
1559                         spin_unlock(&fps->fps_lock);
1560                         goto again;
1561                 }
1562         }
1563
1564         if (fps->fps_increasing) {
1565                 spin_unlock(&fps->fps_lock);
1566                 CDEBUG(D_NET,
1567                         "Another thread is allocating new FMR pool, waiting for her to complete\n");
1568                 schedule();
1569                 goto again;
1570
1571         }
1572
1573         if (time_before(cfs_time_current(), fps->fps_next_retry)) {
1574                 /* someone failed recently */
1575                 spin_unlock(&fps->fps_lock);
1576                 return -EAGAIN;
1577         }
1578
1579         fps->fps_increasing = 1;
1580         spin_unlock(&fps->fps_lock);
1581
1582         CDEBUG(D_NET, "Allocate new FMR pool\n");
1583         rc = kiblnd_create_fmr_pool(fps, &fpo);
1584         spin_lock(&fps->fps_lock);
1585         fps->fps_increasing = 0;
1586         if (rc == 0) {
1587                 fps->fps_version++;
1588                 list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
1589         } else {
1590                 fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
1591         }
1592         spin_unlock(&fps->fps_lock);
1593
1594         goto again;
1595 }
1596
1597 static void kiblnd_fini_pool(kib_pool_t *pool)
1598 {
1599         LASSERT(list_empty(&pool->po_free_list));
1600         LASSERT(pool->po_allocated == 0);
1601
1602         CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
1603 }
1604
1605 static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
1606 {
1607         CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
1608
1609         memset(pool, 0, sizeof(kib_pool_t));
1610         INIT_LIST_HEAD(&pool->po_free_list);
1611         pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1612         pool->po_owner    = ps;
1613         pool->po_size     = size;
1614 }
1615
1616 static void kiblnd_destroy_pool_list(struct list_head *head)
1617 {
1618         kib_pool_t *pool;
1619
1620         while (!list_empty(head)) {
1621                 pool = list_entry(head->next, kib_pool_t, po_list);
1622                 list_del(&pool->po_list);
1623
1624                 LASSERT(pool->po_owner != NULL);
1625                 pool->po_owner->ps_pool_destroy(pool);
1626         }
1627 }
1628
1629 static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
1630 {
1631         if (ps->ps_net == NULL) /* intialized? */
1632                 return;
1633
1634         spin_lock(&ps->ps_lock);
1635         while (!list_empty(&ps->ps_pool_list)) {
1636                 kib_pool_t *po = list_entry(ps->ps_pool_list.next,
1637                                             kib_pool_t, po_list);
1638                 po->po_failed = 1;
1639                 list_del(&po->po_list);
1640                 if (po->po_allocated == 0)
1641                         list_add(&po->po_list, zombies);
1642                 else
1643                         list_add(&po->po_list, &ps->ps_failed_pool_list);
1644         }
1645         spin_unlock(&ps->ps_lock);
1646 }
1647
1648 static void kiblnd_fini_poolset(kib_poolset_t *ps)
1649 {
1650         if (ps->ps_net != NULL) { /* initialized? */
1651                 kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
1652                 kiblnd_destroy_pool_list(&ps->ps_pool_list);
1653         }
1654 }
1655
1656 static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
1657                                kib_net_t *net, char *name, int size,
1658                                kib_ps_pool_create_t po_create,
1659                                kib_ps_pool_destroy_t po_destroy,
1660                                kib_ps_node_init_t nd_init,
1661                                kib_ps_node_fini_t nd_fini)
1662 {
1663         kib_pool_t      *pool;
1664         int             rc;
1665
1666         memset(ps, 0, sizeof(kib_poolset_t));
1667
1668         ps->ps_cpt          = cpt;
1669         ps->ps_net        = net;
1670         ps->ps_pool_create  = po_create;
1671         ps->ps_pool_destroy = po_destroy;
1672         ps->ps_node_init    = nd_init;
1673         ps->ps_node_fini    = nd_fini;
1674         ps->ps_pool_size    = size;
1675         if (strlcpy(ps->ps_name, name, sizeof(ps->ps_name))
1676             >= sizeof(ps->ps_name))
1677                 return -E2BIG;
1678         spin_lock_init(&ps->ps_lock);
1679         INIT_LIST_HEAD(&ps->ps_pool_list);
1680         INIT_LIST_HEAD(&ps->ps_failed_pool_list);
1681
1682         rc = ps->ps_pool_create(ps, size, &pool);
1683         if (rc == 0)
1684                 list_add(&pool->po_list, &ps->ps_pool_list);
1685         else
1686                 CERROR("Failed to create the first pool for %s\n", ps->ps_name);
1687
1688         return rc;
1689 }
1690
1691 static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
1692 {
1693         if (pool->po_allocated != 0) /* still in use */
1694                 return 0;
1695         if (pool->po_failed)
1696                 return 1;
1697         return cfs_time_aftereq(now, pool->po_deadline);
1698 }
1699
1700 void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
1701 {
1702         LIST_HEAD(zombies);
1703         kib_poolset_t  *ps = pool->po_owner;
1704         kib_pool_t     *tmp;
1705         unsigned long      now = cfs_time_current();
1706
1707         spin_lock(&ps->ps_lock);
1708
1709         if (ps->ps_node_fini != NULL)
1710                 ps->ps_node_fini(pool, node);
1711
1712         LASSERT(pool->po_allocated > 0);
1713         list_add(node, &pool->po_free_list);
1714         pool->po_allocated--;
1715
1716         list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
1717                 /* the first pool is persistent */
1718                 if (ps->ps_pool_list.next == &pool->po_list)
1719                         continue;
1720
1721                 if (kiblnd_pool_is_idle(pool, now))
1722                         list_move(&pool->po_list, &zombies);
1723         }
1724         spin_unlock(&ps->ps_lock);
1725
1726         if (!list_empty(&zombies))
1727                 kiblnd_destroy_pool_list(&zombies);
1728 }
1729
1730 struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
1731 {
1732         struct list_head            *node;
1733         kib_pool_t          *pool;
1734         int                 rc;
1735
1736  again:
1737         spin_lock(&ps->ps_lock);
1738         list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
1739                 if (list_empty(&pool->po_free_list))
1740                         continue;
1741
1742                 pool->po_allocated++;
1743                 pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1744                 node = pool->po_free_list.next;
1745                 list_del(node);
1746
1747                 if (ps->ps_node_init != NULL) {
1748                         /* still hold the lock */
1749                         ps->ps_node_init(pool, node);
1750                 }
1751                 spin_unlock(&ps->ps_lock);
1752                 return node;
1753         }
1754
1755         /* no available tx pool and ... */
1756         if (ps->ps_increasing) {
1757                 /* another thread is allocating a new pool */
1758                 spin_unlock(&ps->ps_lock);
1759                 CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting for her to complete\n",
1760                        ps->ps_name);
1761                 schedule();
1762                 goto again;
1763         }
1764
1765         if (time_before(cfs_time_current(), ps->ps_next_retry)) {
1766                 /* someone failed recently */
1767                 spin_unlock(&ps->ps_lock);
1768                 return NULL;
1769         }
1770
1771         ps->ps_increasing = 1;
1772         spin_unlock(&ps->ps_lock);
1773
1774         CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
1775
1776         rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
1777
1778         spin_lock(&ps->ps_lock);
1779         ps->ps_increasing = 0;
1780         if (rc == 0) {
1781                 list_add_tail(&pool->po_list, &ps->ps_pool_list);
1782         } else {
1783                 ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
1784                 CERROR("Can't allocate new %s pool because out of memory\n",
1785                        ps->ps_name);
1786         }
1787         spin_unlock(&ps->ps_lock);
1788
1789         goto again;
1790 }
1791
1792 void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr)
1793 {
1794         kib_pmr_pool_t      *ppo = pmr->pmr_pool;
1795         struct ib_mr    *mr  = pmr->pmr_mr;
1796
1797         pmr->pmr_mr = NULL;
1798         kiblnd_pool_free_node(&ppo->ppo_pool, &pmr->pmr_list);
1799         if (mr != NULL)
1800                 ib_dereg_mr(mr);
1801 }
1802
1803 int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
1804                     kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr)
1805 {
1806         kib_phys_mr_t *pmr;
1807         struct list_head    *node;
1808         int         rc;
1809         int         i;
1810
1811         node = kiblnd_pool_alloc_node(&pps->pps_poolset);
1812         if (node == NULL) {
1813                 CERROR("Failed to allocate PMR descriptor\n");
1814                 return -ENOMEM;
1815         }
1816
1817         pmr = container_of(node, kib_phys_mr_t, pmr_list);
1818         if (pmr->pmr_pool->ppo_hdev != hdev) {
1819                 kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node);
1820                 return -EAGAIN;
1821         }
1822
1823         for (i = 0; i < rd->rd_nfrags; i++) {
1824                 pmr->pmr_ipb[i].addr = rd->rd_frags[i].rf_addr;
1825                 pmr->pmr_ipb[i].size = rd->rd_frags[i].rf_nob;
1826         }
1827
1828         pmr->pmr_mr = ib_reg_phys_mr(hdev->ibh_pd,
1829                                      pmr->pmr_ipb, rd->rd_nfrags,
1830                                      IB_ACCESS_LOCAL_WRITE |
1831                                      IB_ACCESS_REMOTE_WRITE,
1832                                      iova);
1833         if (!IS_ERR(pmr->pmr_mr)) {
1834                 pmr->pmr_iova = *iova;
1835                 *pp_pmr = pmr;
1836                 return 0;
1837         }
1838
1839         rc = PTR_ERR(pmr->pmr_mr);
1840         CERROR("Failed ib_reg_phys_mr: %d\n", rc);
1841
1842         pmr->pmr_mr = NULL;
1843         kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node);
1844
1845         return rc;
1846 }
1847
1848 static void kiblnd_destroy_pmr_pool(kib_pool_t *pool)
1849 {
1850         kib_pmr_pool_t *ppo = container_of(pool, kib_pmr_pool_t, ppo_pool);
1851         kib_phys_mr_t  *pmr;
1852         kib_phys_mr_t *tmp;
1853
1854         LASSERT(pool->po_allocated == 0);
1855
1856         list_for_each_entry_safe(pmr, tmp, &pool->po_free_list, pmr_list) {
1857                 LASSERT(pmr->pmr_mr == NULL);
1858                 list_del(&pmr->pmr_list);
1859
1860                 if (pmr->pmr_ipb != NULL) {
1861                         LIBCFS_FREE(pmr->pmr_ipb,
1862                                     IBLND_MAX_RDMA_FRAGS *
1863                                     sizeof(struct ib_phys_buf));
1864                 }
1865
1866                 LIBCFS_FREE(pmr, sizeof(kib_phys_mr_t));
1867         }
1868
1869         kiblnd_fini_pool(pool);
1870         if (ppo->ppo_hdev != NULL)
1871                 kiblnd_hdev_decref(ppo->ppo_hdev);
1872
1873         LIBCFS_FREE(ppo, sizeof(kib_pmr_pool_t));
1874 }
1875
1876 static inline int kiblnd_pmr_pool_size(int ncpts)
1877 {
1878         int size = *kiblnd_tunables.kib_pmr_pool_size / ncpts;
1879
1880         return max(IBLND_PMR_POOL, size);
1881 }
1882
1883 static int kiblnd_create_pmr_pool(kib_poolset_t *ps, int size,
1884                                   kib_pool_t **pp_po)
1885 {
1886         struct kib_pmr_pool     *ppo;
1887         struct kib_pool         *pool;
1888         kib_phys_mr_t           *pmr;
1889         int                     i;
1890
1891         LIBCFS_CPT_ALLOC(ppo, lnet_cpt_table(),
1892                          ps->ps_cpt, sizeof(kib_pmr_pool_t));
1893         if (ppo == NULL) {
1894                 CERROR("Failed to allocate PMR pool\n");
1895                 return -ENOMEM;
1896         }
1897
1898         pool = &ppo->ppo_pool;
1899         kiblnd_init_pool(ps, pool, size);
1900
1901         for (i = 0; i < size; i++) {
1902                 LIBCFS_CPT_ALLOC(pmr, lnet_cpt_table(),
1903                                  ps->ps_cpt, sizeof(kib_phys_mr_t));
1904                 if (pmr == NULL)
1905                         break;
1906
1907                 pmr->pmr_pool = ppo;
1908                 LIBCFS_CPT_ALLOC(pmr->pmr_ipb, lnet_cpt_table(), ps->ps_cpt,
1909                                  IBLND_MAX_RDMA_FRAGS * sizeof(*pmr->pmr_ipb));
1910                 if (pmr->pmr_ipb == NULL)
1911                         break;
1912
1913                 list_add(&pmr->pmr_list, &pool->po_free_list);
1914         }
1915
1916         if (i < size) {
1917                 ps->ps_pool_destroy(pool);
1918                 return -ENOMEM;
1919         }
1920
1921         ppo->ppo_hdev = kiblnd_current_hdev(ps->ps_net->ibn_dev);
1922         *pp_po = pool;
1923         return 0;
1924 }
1925
1926 static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
1927 {
1928         kib_tx_pool_t  *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
1929         int          i;
1930
1931         LASSERT(pool->po_allocated == 0);
1932
1933         if (tpo->tpo_tx_pages != NULL) {
1934                 kiblnd_unmap_tx_pool(tpo);
1935                 kiblnd_free_pages(tpo->tpo_tx_pages);
1936         }
1937
1938         if (tpo->tpo_tx_descs == NULL)
1939                 goto out;
1940
1941         for (i = 0; i < pool->po_size; i++) {
1942                 kib_tx_t *tx = &tpo->tpo_tx_descs[i];
1943
1944                 list_del(&tx->tx_list);
1945                 if (tx->tx_pages != NULL)
1946                         LIBCFS_FREE(tx->tx_pages,
1947                                     LNET_MAX_IOV *
1948                                     sizeof(*tx->tx_pages));
1949                 if (tx->tx_frags != NULL)
1950                         LIBCFS_FREE(tx->tx_frags,
1951                                     IBLND_MAX_RDMA_FRAGS *
1952                                             sizeof(*tx->tx_frags));
1953                 if (tx->tx_wrq != NULL)
1954                         LIBCFS_FREE(tx->tx_wrq,
1955                                     (1 + IBLND_MAX_RDMA_FRAGS) *
1956                                     sizeof(*tx->tx_wrq));
1957                 if (tx->tx_sge != NULL)
1958                         LIBCFS_FREE(tx->tx_sge,
1959                                     (1 + IBLND_MAX_RDMA_FRAGS) *
1960                                     sizeof(*tx->tx_sge));
1961                 if (tx->tx_rd != NULL)
1962                         LIBCFS_FREE(tx->tx_rd,
1963                                     offsetof(kib_rdma_desc_t,
1964                                              rd_frags[IBLND_MAX_RDMA_FRAGS]));
1965         }
1966
1967         LIBCFS_FREE(tpo->tpo_tx_descs,
1968                     pool->po_size * sizeof(kib_tx_t));
1969 out:
1970         kiblnd_fini_pool(pool);
1971         LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
1972 }
1973
1974 static int kiblnd_tx_pool_size(int ncpts)
1975 {
1976         int ntx = *kiblnd_tunables.kib_ntx / ncpts;
1977
1978         return max(IBLND_TX_POOL, ntx);
1979 }
1980
1981 static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
1982                                  kib_pool_t **pp_po)
1983 {
1984         int         i;
1985         int         npg;
1986         kib_pool_t    *pool;
1987         kib_tx_pool_t *tpo;
1988
1989         LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
1990         if (tpo == NULL) {
1991                 CERROR("Failed to allocate TX pool\n");
1992                 return -ENOMEM;
1993         }
1994
1995         pool = &tpo->tpo_pool;
1996         kiblnd_init_pool(ps, pool, size);
1997         tpo->tpo_tx_descs = NULL;
1998         tpo->tpo_tx_pages = NULL;
1999
2000         npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE;
2001         if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) {
2002                 CERROR("Can't allocate tx pages: %d\n", npg);
2003                 LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
2004                 return -ENOMEM;
2005         }
2006
2007         LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
2008                          size * sizeof(kib_tx_t));
2009         if (tpo->tpo_tx_descs == NULL) {
2010                 CERROR("Can't allocate %d tx descriptors\n", size);
2011                 ps->ps_pool_destroy(pool);
2012                 return -ENOMEM;
2013         }
2014
2015         memset(tpo->tpo_tx_descs, 0, size * sizeof(kib_tx_t));
2016
2017         for (i = 0; i < size; i++) {
2018                 kib_tx_t *tx = &tpo->tpo_tx_descs[i];
2019
2020                 tx->tx_pool = tpo;
2021                 if (ps->ps_net->ibn_fmr_ps != NULL) {
2022                         LIBCFS_CPT_ALLOC(tx->tx_pages,
2023                                          lnet_cpt_table(), ps->ps_cpt,
2024                                          LNET_MAX_IOV * sizeof(*tx->tx_pages));
2025                         if (tx->tx_pages == NULL)
2026                                 break;
2027                 }
2028
2029                 LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
2030                                  IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags));
2031                 if (tx->tx_frags == NULL)
2032                         break;
2033
2034                 sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS);
2035
2036                 LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
2037                                  (1 + IBLND_MAX_RDMA_FRAGS) *
2038                                  sizeof(*tx->tx_wrq));
2039                 if (tx->tx_wrq == NULL)
2040                         break;
2041
2042                 LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
2043                                  (1 + IBLND_MAX_RDMA_FRAGS) *
2044                                  sizeof(*tx->tx_sge));
2045                 if (tx->tx_sge == NULL)
2046                         break;
2047
2048                 LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
2049                                  offsetof(kib_rdma_desc_t,
2050                                           rd_frags[IBLND_MAX_RDMA_FRAGS]));
2051                 if (tx->tx_rd == NULL)
2052                         break;
2053         }
2054
2055         if (i == size) {
2056                 kiblnd_map_tx_pool(tpo);
2057                 *pp_po = pool;
2058                 return 0;
2059         }
2060
2061         ps->ps_pool_destroy(pool);
2062         return -ENOMEM;
2063 }
2064
2065 static void kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
2066 {
2067         kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
2068                                              tps_poolset);
2069         kib_tx_t         *tx  = list_entry(node, kib_tx_t, tx_list);
2070
2071         tx->tx_cookie = tps->tps_next_tx_cookie++;
2072 }
2073
2074 static void kiblnd_net_fini_pools(kib_net_t *net)
2075 {
2076         int     i;
2077
2078         cfs_cpt_for_each(i, lnet_cpt_table()) {
2079                 kib_tx_poolset_t        *tps;
2080                 kib_fmr_poolset_t       *fps;
2081                 kib_pmr_poolset_t       *pps;
2082
2083                 if (net->ibn_tx_ps != NULL) {
2084                         tps = net->ibn_tx_ps[i];
2085                         kiblnd_fini_poolset(&tps->tps_poolset);
2086                 }
2087
2088                 if (net->ibn_fmr_ps != NULL) {
2089                         fps = net->ibn_fmr_ps[i];
2090                         kiblnd_fini_fmr_poolset(fps);
2091                 }
2092
2093                 if (net->ibn_pmr_ps != NULL) {
2094                         pps = net->ibn_pmr_ps[i];
2095                         kiblnd_fini_poolset(&pps->pps_poolset);
2096                 }
2097         }
2098
2099         if (net->ibn_tx_ps != NULL) {
2100                 cfs_percpt_free(net->ibn_tx_ps);
2101                 net->ibn_tx_ps = NULL;
2102         }
2103
2104         if (net->ibn_fmr_ps != NULL) {
2105                 cfs_percpt_free(net->ibn_fmr_ps);
2106                 net->ibn_fmr_ps = NULL;
2107         }
2108
2109         if (net->ibn_pmr_ps != NULL) {
2110                 cfs_percpt_free(net->ibn_pmr_ps);
2111                 net->ibn_pmr_ps = NULL;
2112         }
2113 }
2114
2115 static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
2116 {
2117         unsigned long   flags;
2118         int             cpt;
2119         int             rc;
2120         int             i;
2121
2122         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2123         if (*kiblnd_tunables.kib_map_on_demand == 0 &&
2124             net->ibn_dev->ibd_hdev->ibh_nmrs == 1) {
2125                 read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
2126                                            flags);
2127                 goto create_tx_pool;
2128         }
2129
2130         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2131
2132         if (*kiblnd_tunables.kib_fmr_pool_size <
2133             *kiblnd_tunables.kib_ntx / 4) {
2134                 CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
2135                        *kiblnd_tunables.kib_fmr_pool_size,
2136                        *kiblnd_tunables.kib_ntx / 4);
2137                 rc = -EINVAL;
2138                 goto failed;
2139         }
2140
2141         /* TX pool must be created later than FMR/PMR, see LU-2268
2142          * for details */
2143         LASSERT(net->ibn_tx_ps == NULL);
2144
2145         /* premapping can fail if ibd_nmr > 1, so we always create
2146          * FMR/PMR pool and map-on-demand if premapping failed */
2147
2148         net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
2149                                            sizeof(kib_fmr_poolset_t));
2150         if (net->ibn_fmr_ps == NULL) {
2151                 CERROR("Failed to allocate FMR pool array\n");
2152                 rc = -ENOMEM;
2153                 goto failed;
2154         }
2155
2156         for (i = 0; i < ncpts; i++) {
2157                 cpt = (cpts == NULL) ? i : cpts[i];
2158                 rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net,
2159                                              kiblnd_fmr_pool_size(ncpts),
2160                                              kiblnd_fmr_flush_trigger(ncpts));
2161                 if (rc == -ENOSYS && i == 0) /* no FMR */
2162                         break; /* create PMR pool */
2163
2164                 if (rc != 0) { /* a real error */
2165                         CERROR("Can't initialize FMR pool for CPT %d: %d\n",
2166                                cpt, rc);
2167                         goto failed;
2168                 }
2169         }
2170
2171         if (i > 0) {
2172                 LASSERT(i == ncpts);
2173                 goto create_tx_pool;
2174         }
2175
2176         cfs_percpt_free(net->ibn_fmr_ps);
2177         net->ibn_fmr_ps = NULL;
2178
2179         CWARN("Device does not support FMR, failing back to PMR\n");
2180
2181         if (*kiblnd_tunables.kib_pmr_pool_size <
2182             *kiblnd_tunables.kib_ntx / 4) {
2183                 CERROR("Can't set pmr pool size (%d) < ntx / 4(%d)\n",
2184                        *kiblnd_tunables.kib_pmr_pool_size,
2185                        *kiblnd_tunables.kib_ntx / 4);
2186                 rc = -EINVAL;
2187                 goto failed;
2188         }
2189
2190         net->ibn_pmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
2191                                            sizeof(kib_pmr_poolset_t));
2192         if (net->ibn_pmr_ps == NULL) {
2193                 CERROR("Failed to allocate PMR pool array\n");
2194                 rc = -ENOMEM;
2195                 goto failed;
2196         }
2197
2198         for (i = 0; i < ncpts; i++) {
2199                 cpt = (cpts == NULL) ? i : cpts[i];
2200                 rc = kiblnd_init_poolset(&net->ibn_pmr_ps[cpt]->pps_poolset,
2201                                          cpt, net, "PMR",
2202                                          kiblnd_pmr_pool_size(ncpts),
2203                                          kiblnd_create_pmr_pool,
2204                                          kiblnd_destroy_pmr_pool, NULL, NULL);
2205                 if (rc != 0) {
2206                         CERROR("Can't initialize PMR pool for CPT %d: %d\n",
2207                                cpt, rc);
2208                         goto failed;
2209                 }
2210         }
2211
2212  create_tx_pool:
2213         net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
2214                                           sizeof(kib_tx_poolset_t));
2215         if (net->ibn_tx_ps == NULL) {
2216                 CERROR("Failed to allocate tx pool array\n");
2217                 rc = -ENOMEM;
2218                 goto failed;
2219         }
2220
2221         for (i = 0; i < ncpts; i++) {
2222                 cpt = (cpts == NULL) ? i : cpts[i];
2223                 rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset,
2224                                          cpt, net, "TX",
2225                                          kiblnd_tx_pool_size(ncpts),
2226                                          kiblnd_create_tx_pool,
2227                                          kiblnd_destroy_tx_pool,
2228                                          kiblnd_tx_init, NULL);
2229                 if (rc != 0) {
2230                         CERROR("Can't initialize TX pool for CPT %d: %d\n",
2231                                cpt, rc);
2232                         goto failed;
2233                 }
2234         }
2235
2236         return 0;
2237  failed:
2238         kiblnd_net_fini_pools(net);
2239         LASSERT(rc != 0);
2240         return rc;
2241 }
2242
2243 static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
2244 {
2245         struct ib_device_attr *attr;
2246         int                 rc;
2247
2248         /* It's safe to assume a HCA can handle a page size
2249          * matching that of the native system */
2250         hdev->ibh_page_shift = PAGE_SHIFT;
2251         hdev->ibh_page_size  = 1 << PAGE_SHIFT;
2252         hdev->ibh_page_mask  = ~((__u64)hdev->ibh_page_size - 1);
2253
2254         LIBCFS_ALLOC(attr, sizeof(*attr));
2255         if (attr == NULL) {
2256                 CERROR("Out of memory\n");
2257                 return -ENOMEM;
2258         }
2259
2260         rc = ib_query_device(hdev->ibh_ibdev, attr);
2261         if (rc == 0)
2262                 hdev->ibh_mr_size = attr->max_mr_size;
2263
2264         LIBCFS_FREE(attr, sizeof(*attr));
2265
2266         if (rc != 0) {
2267                 CERROR("Failed to query IB device: %d\n", rc);
2268                 return rc;
2269         }
2270
2271         if (hdev->ibh_mr_size == ~0ULL) {
2272                 hdev->ibh_mr_shift = 64;
2273                 return 0;
2274         }
2275
2276         for (hdev->ibh_mr_shift = 0;
2277              hdev->ibh_mr_shift < 64; hdev->ibh_mr_shift++) {
2278                 if (hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) ||
2279                     hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) - 1)
2280                         return 0;
2281         }
2282
2283         CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size);
2284         return -EINVAL;
2285 }
2286
2287 static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
2288 {
2289         int     i;
2290
2291         if (hdev->ibh_nmrs == 0 || hdev->ibh_mrs == NULL)
2292                 return;
2293
2294         for (i = 0; i < hdev->ibh_nmrs; i++) {
2295                 if (hdev->ibh_mrs[i] == NULL)
2296                         break;
2297
2298                 ib_dereg_mr(hdev->ibh_mrs[i]);
2299         }
2300
2301         LIBCFS_FREE(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
2302         hdev->ibh_mrs  = NULL;
2303         hdev->ibh_nmrs = 0;
2304 }
2305
2306 void kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
2307 {
2308         kiblnd_hdev_cleanup_mrs(hdev);
2309
2310         if (hdev->ibh_pd != NULL)
2311                 ib_dealloc_pd(hdev->ibh_pd);
2312
2313         if (hdev->ibh_cmid != NULL)
2314                 rdma_destroy_id(hdev->ibh_cmid);
2315
2316         LIBCFS_FREE(hdev, sizeof(*hdev));
2317 }
2318
2319 static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
2320 {
2321         struct ib_mr *mr;
2322         int        i;
2323         int        rc;
2324         __u64    mm_size;
2325         __u64    mr_size;
2326         int        acflags = IB_ACCESS_LOCAL_WRITE |
2327                                 IB_ACCESS_REMOTE_WRITE;
2328
2329         rc = kiblnd_hdev_get_attr(hdev);
2330         if (rc != 0)
2331                 return rc;
2332
2333         if (hdev->ibh_mr_shift == 64) {
2334                 LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs));
2335                 if (hdev->ibh_mrs == NULL) {
2336                         CERROR("Failed to allocate MRs table\n");
2337                         return -ENOMEM;
2338                 }
2339
2340                 hdev->ibh_mrs[0] = NULL;
2341                 hdev->ibh_nmrs   = 1;
2342
2343                 mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
2344                 if (IS_ERR(mr)) {
2345                         CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr));
2346                         kiblnd_hdev_cleanup_mrs(hdev);
2347                         return PTR_ERR(mr);
2348                 }
2349
2350                 hdev->ibh_mrs[0] = mr;
2351
2352                 goto out;
2353         }
2354
2355         mr_size = 1ULL << hdev->ibh_mr_shift;
2356         mm_size = (unsigned long)high_memory - PAGE_OFFSET;
2357
2358         hdev->ibh_nmrs = (int)((mm_size + mr_size - 1) >> hdev->ibh_mr_shift);
2359
2360         if (hdev->ibh_mr_shift < 32 || hdev->ibh_nmrs > 1024) {
2361                 /* it's 4T..., assume we will re-code at that time */
2362                 CERROR("Can't support memory size: x%#llx with MR size: x%#llx\n",
2363                        mm_size, mr_size);
2364                 return -EINVAL;
2365         }
2366
2367         /* create an array of MRs to cover all memory */
2368         LIBCFS_ALLOC(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
2369         if (hdev->ibh_mrs == NULL) {
2370                 CERROR("Failed to allocate MRs' table\n");
2371                 return -ENOMEM;
2372         }
2373
2374         for (i = 0; i < hdev->ibh_nmrs; i++) {
2375                 struct ib_phys_buf ipb;
2376                 __u64         iova;
2377
2378                 ipb.size = hdev->ibh_mr_size;
2379                 ipb.addr = i * mr_size;
2380                 iova     = ipb.addr;
2381
2382                 mr = ib_reg_phys_mr(hdev->ibh_pd, &ipb, 1, acflags, &iova);
2383                 if (IS_ERR(mr)) {
2384                         CERROR("Failed ib_reg_phys_mr addr %#llx size %#llx : %ld\n",
2385                                ipb.addr, ipb.size, PTR_ERR(mr));
2386                         kiblnd_hdev_cleanup_mrs(hdev);
2387                         return PTR_ERR(mr);
2388                 }
2389
2390                 LASSERT(iova == ipb.addr);
2391
2392                 hdev->ibh_mrs[i] = mr;
2393         }
2394
2395 out:
2396         if (hdev->ibh_mr_size != ~0ULL || hdev->ibh_nmrs != 1)
2397                 LCONSOLE_INFO("Register global MR array, MR size: %#llx, array size: %d\n",
2398                               hdev->ibh_mr_size, hdev->ibh_nmrs);
2399         return 0;
2400 }
2401
2402 /* DUMMY */
2403 static int kiblnd_dummy_callback(struct rdma_cm_id *cmid,
2404                                  struct rdma_cm_event *event)
2405 {
2406         return 0;
2407 }
2408
2409 static int kiblnd_dev_need_failover(kib_dev_t *dev)
2410 {
2411         struct rdma_cm_id  *cmid;
2412         struct sockaddr_in  srcaddr;
2413         struct sockaddr_in  dstaddr;
2414         int              rc;
2415
2416         if (dev->ibd_hdev == NULL || /* initializing */
2417             dev->ibd_hdev->ibh_cmid == NULL || /* listener is dead */
2418             *kiblnd_tunables.kib_dev_failover > 1) /* debugging */
2419                 return 1;
2420
2421         /* XXX: it's UGLY, but I don't have better way to find
2422          * ib-bonding HCA failover because:
2423          *
2424          * a. no reliable CM event for HCA failover...
2425          * b. no OFED API to get ib_device for current net_device...
2426          *
2427          * We have only two choices at this point:
2428          *
2429          * a. rdma_bind_addr(), it will conflict with listener cmid
2430          * b. rdma_resolve_addr() to zero addr */
2431         cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP,
2432                                      IB_QPT_RC);
2433         if (IS_ERR(cmid)) {
2434                 rc = PTR_ERR(cmid);
2435                 CERROR("Failed to create cmid for failover: %d\n", rc);
2436                 return rc;
2437         }
2438
2439         memset(&srcaddr, 0, sizeof(srcaddr));
2440         srcaddr.sin_family      = AF_INET;
2441         srcaddr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
2442
2443         memset(&dstaddr, 0, sizeof(dstaddr));
2444         dstaddr.sin_family = AF_INET;
2445         rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr,
2446                                (struct sockaddr *)&dstaddr, 1);
2447         if (rc != 0 || cmid->device == NULL) {
2448                 CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
2449                        dev->ibd_ifname, &dev->ibd_ifip,
2450                        cmid->device, rc);
2451                 rdma_destroy_id(cmid);
2452                 return rc;
2453         }
2454
2455         if (dev->ibd_hdev->ibh_ibdev == cmid->device) {
2456                 /* don't need device failover */
2457                 rdma_destroy_id(cmid);
2458                 return 0;
2459         }
2460
2461         return 1;
2462 }
2463
2464 int kiblnd_dev_failover(kib_dev_t *dev)
2465 {
2466         LIST_HEAD(zombie_tpo);
2467         LIST_HEAD(zombie_ppo);
2468         LIST_HEAD(zombie_fpo);
2469         struct rdma_cm_id  *cmid  = NULL;
2470         kib_hca_dev_t      *hdev  = NULL;
2471         kib_hca_dev_t      *old;
2472         struct ib_pd       *pd;
2473         kib_net_t         *net;
2474         struct sockaddr_in  addr;
2475         unsigned long       flags;
2476         int              rc = 0;
2477         int                 i;
2478
2479         LASSERT(*kiblnd_tunables.kib_dev_failover > 1 ||
2480                  dev->ibd_can_failover ||
2481                  dev->ibd_hdev == NULL);
2482
2483         rc = kiblnd_dev_need_failover(dev);
2484         if (rc <= 0)
2485                 goto out;
2486
2487         if (dev->ibd_hdev != NULL &&
2488             dev->ibd_hdev->ibh_cmid != NULL) {
2489                 /* XXX it's not good to close old listener at here,
2490                  * because we can fail to create new listener.
2491                  * But we have to close it now, otherwise rdma_bind_addr
2492                  * will return EADDRINUSE... How crap! */
2493                 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2494
2495                 cmid = dev->ibd_hdev->ibh_cmid;
2496                 /* make next schedule of kiblnd_dev_need_failover()
2497                  * return 1 for me */
2498                 dev->ibd_hdev->ibh_cmid  = NULL;
2499                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2500
2501                 rdma_destroy_id(cmid);
2502         }
2503
2504         cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, dev, RDMA_PS_TCP,
2505                                      IB_QPT_RC);
2506         if (IS_ERR(cmid)) {
2507                 rc = PTR_ERR(cmid);
2508                 CERROR("Failed to create cmid for failover: %d\n", rc);
2509                 goto out;
2510         }
2511
2512         memset(&addr, 0, sizeof(addr));
2513         addr.sin_family      = AF_INET;
2514         addr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
2515         addr.sin_port   = htons(*kiblnd_tunables.kib_service);
2516
2517         /* Bind to failover device or port */
2518         rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
2519         if (rc != 0 || cmid->device == NULL) {
2520                 CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
2521                        dev->ibd_ifname, &dev->ibd_ifip,
2522                        cmid->device, rc);
2523                 rdma_destroy_id(cmid);
2524                 goto out;
2525         }
2526
2527         LIBCFS_ALLOC(hdev, sizeof(*hdev));
2528         if (hdev == NULL) {
2529                 CERROR("Failed to allocate kib_hca_dev\n");
2530                 rdma_destroy_id(cmid);
2531                 rc = -ENOMEM;
2532                 goto out;
2533         }
2534
2535         atomic_set(&hdev->ibh_ref, 1);
2536         hdev->ibh_dev   = dev;
2537         hdev->ibh_cmid  = cmid;
2538         hdev->ibh_ibdev = cmid->device;
2539
2540         pd = ib_alloc_pd(cmid->device);
2541         if (IS_ERR(pd)) {
2542                 rc = PTR_ERR(pd);
2543                 CERROR("Can't allocate PD: %d\n", rc);
2544                 goto out;
2545         }
2546
2547         hdev->ibh_pd = pd;
2548
2549         rc = rdma_listen(cmid, 0);
2550         if (rc != 0) {
2551                 CERROR("Can't start new listener: %d\n", rc);
2552                 goto out;
2553         }
2554
2555         rc = kiblnd_hdev_setup_mrs(hdev);
2556         if (rc != 0) {
2557                 CERROR("Can't setup device: %d\n", rc);
2558                 goto out;
2559         }
2560
2561         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2562
2563         old = dev->ibd_hdev;
2564         dev->ibd_hdev = hdev; /* take over the refcount */
2565         hdev = old;
2566
2567         list_for_each_entry(net, &dev->ibd_nets, ibn_list) {
2568                 cfs_cpt_for_each(i, lnet_cpt_table()) {
2569                         kiblnd_fail_poolset(&net->ibn_tx_ps[i]->tps_poolset,
2570                                             &zombie_tpo);
2571
2572                         if (net->ibn_fmr_ps != NULL) {
2573                                 kiblnd_fail_fmr_poolset(net->ibn_fmr_ps[i],
2574                                                         &zombie_fpo);
2575
2576                         } else if (net->ibn_pmr_ps != NULL) {
2577                                 kiblnd_fail_poolset(&net->ibn_pmr_ps[i]->
2578                                                     pps_poolset, &zombie_ppo);
2579                         }
2580                 }
2581         }
2582
2583         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2584  out:
2585         if (!list_empty(&zombie_tpo))
2586                 kiblnd_destroy_pool_list(&zombie_tpo);
2587         if (!list_empty(&zombie_ppo))
2588                 kiblnd_destroy_pool_list(&zombie_ppo);
2589         if (!list_empty(&zombie_fpo))
2590                 kiblnd_destroy_fmr_pool_list(&zombie_fpo);
2591         if (hdev != NULL)
2592                 kiblnd_hdev_decref(hdev);
2593
2594         if (rc != 0)
2595                 dev->ibd_failed_failover++;
2596         else
2597                 dev->ibd_failed_failover = 0;
2598
2599         return rc;
2600 }
2601
2602 void kiblnd_destroy_dev(kib_dev_t *dev)
2603 {
2604         LASSERT(dev->ibd_nnets == 0);
2605         LASSERT(list_empty(&dev->ibd_nets));
2606
2607         list_del(&dev->ibd_fail_list);
2608         list_del(&dev->ibd_list);
2609
2610         if (dev->ibd_hdev != NULL)
2611                 kiblnd_hdev_decref(dev->ibd_hdev);
2612
2613         LIBCFS_FREE(dev, sizeof(*dev));
2614 }
2615
2616 static kib_dev_t *kiblnd_create_dev(char *ifname)
2617 {
2618         struct net_device *netdev;
2619         kib_dev_t        *dev;
2620         __u32         netmask;
2621         __u32         ip;
2622         int             up;
2623         int             rc;
2624
2625         rc = libcfs_ipif_query(ifname, &up, &ip, &netmask);
2626         if (rc != 0) {
2627                 CERROR("Can't query IPoIB interface %s: %d\n",
2628                        ifname, rc);
2629                 return NULL;
2630         }
2631
2632         if (!up) {
2633                 CERROR("Can't query IPoIB interface %s: it's down\n", ifname);
2634                 return NULL;
2635         }
2636
2637         LIBCFS_ALLOC(dev, sizeof(*dev));
2638         if (dev == NULL)
2639                 return NULL;
2640
2641         netdev = dev_get_by_name(&init_net, ifname);
2642         if (netdev == NULL) {
2643                 dev->ibd_can_failover = 0;
2644         } else {
2645                 dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER);
2646                 dev_put(netdev);
2647         }
2648
2649         INIT_LIST_HEAD(&dev->ibd_nets);
2650         INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */
2651         INIT_LIST_HEAD(&dev->ibd_fail_list);
2652         dev->ibd_ifip = ip;
2653         strcpy(&dev->ibd_ifname[0], ifname);
2654
2655         /* initialize the device */
2656         rc = kiblnd_dev_failover(dev);
2657         if (rc != 0) {
2658                 CERROR("Can't initialize device: %d\n", rc);
2659                 LIBCFS_FREE(dev, sizeof(*dev));
2660                 return NULL;
2661         }
2662
2663         list_add_tail(&dev->ibd_list,
2664                           &kiblnd_data.kib_devs);
2665         return dev;
2666 }
2667
2668 static void kiblnd_base_shutdown(void)
2669 {
2670         struct kib_sched_info   *sched;
2671         int                     i;
2672
2673         LASSERT(list_empty(&kiblnd_data.kib_devs));
2674
2675         CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n",
2676                atomic_read(&libcfs_kmemory));
2677
2678         switch (kiblnd_data.kib_init) {
2679         default:
2680                 LBUG();
2681
2682         case IBLND_INIT_ALL:
2683         case IBLND_INIT_DATA:
2684                 LASSERT(kiblnd_data.kib_peers != NULL);
2685                 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
2686                         LASSERT(list_empty(&kiblnd_data.kib_peers[i]));
2687                 LASSERT(list_empty(&kiblnd_data.kib_connd_zombies));
2688                 LASSERT(list_empty(&kiblnd_data.kib_connd_conns));
2689
2690                 /* flag threads to terminate; wake and wait for them to die */
2691                 kiblnd_data.kib_shutdown = 1;
2692
2693                 /* NB: we really want to stop scheduler threads net by net
2694                  * instead of the whole module, this should be improved
2695                  * with dynamic configuration LNet */
2696                 cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
2697                         wake_up_all(&sched->ibs_waitq);
2698
2699                 wake_up_all(&kiblnd_data.kib_connd_waitq);
2700                 wake_up_all(&kiblnd_data.kib_failover_waitq);
2701
2702                 i = 2;
2703                 while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
2704                         i++;
2705                         /* power of 2 ? */
2706                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
2707                                "Waiting for %d threads to terminate\n",
2708                                atomic_read(&kiblnd_data.kib_nthreads));
2709                         set_current_state(TASK_UNINTERRUPTIBLE);
2710                         schedule_timeout(cfs_time_seconds(1));
2711                 }
2712
2713                 /* fall through */
2714
2715         case IBLND_INIT_NOTHING:
2716                 break;
2717         }
2718
2719         if (kiblnd_data.kib_peers != NULL) {
2720                 LIBCFS_FREE(kiblnd_data.kib_peers,
2721                             sizeof(struct list_head) *
2722                             kiblnd_data.kib_peer_hash_size);
2723         }
2724
2725         if (kiblnd_data.kib_scheds != NULL)
2726                 cfs_percpt_free(kiblnd_data.kib_scheds);
2727
2728         CDEBUG(D_MALLOC, "after LND base cleanup: kmem %d\n",
2729                atomic_read(&libcfs_kmemory));
2730
2731         kiblnd_data.kib_init = IBLND_INIT_NOTHING;
2732         module_put(THIS_MODULE);
2733 }
2734
2735 void kiblnd_shutdown(lnet_ni_t *ni)
2736 {
2737         kib_net_t       *net = ni->ni_data;
2738         rwlock_t     *g_lock = &kiblnd_data.kib_global_lock;
2739         int            i;
2740         unsigned long     flags;
2741
2742         LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL);
2743
2744         if (net == NULL)
2745                 goto out;
2746
2747         CDEBUG(D_MALLOC, "before LND net cleanup: kmem %d\n",
2748                atomic_read(&libcfs_kmemory));
2749
2750         write_lock_irqsave(g_lock, flags);
2751         net->ibn_shutdown = 1;
2752         write_unlock_irqrestore(g_lock, flags);
2753
2754         switch (net->ibn_init) {
2755         default:
2756                 LBUG();
2757
2758         case IBLND_INIT_ALL:
2759                 /* nuke all existing peers within this net */
2760                 kiblnd_del_peer(ni, LNET_NID_ANY);
2761
2762                 /* Wait for all peer state to clean up */
2763                 i = 2;
2764                 while (atomic_read(&net->ibn_npeers) != 0) {
2765                         i++;
2766                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
2767                                "%s: waiting for %d peers to disconnect\n",
2768                                libcfs_nid2str(ni->ni_nid),
2769                                atomic_read(&net->ibn_npeers));
2770                         set_current_state(TASK_UNINTERRUPTIBLE);
2771                         schedule_timeout(cfs_time_seconds(1));
2772                 }
2773
2774                 kiblnd_net_fini_pools(net);
2775
2776                 write_lock_irqsave(g_lock, flags);
2777                 LASSERT(net->ibn_dev->ibd_nnets > 0);
2778                 net->ibn_dev->ibd_nnets--;
2779                 list_del(&net->ibn_list);
2780                 write_unlock_irqrestore(g_lock, flags);
2781
2782                 /* fall through */
2783
2784         case IBLND_INIT_NOTHING:
2785                 LASSERT(atomic_read(&net->ibn_nconns) == 0);
2786
2787                 if (net->ibn_dev != NULL &&
2788                     net->ibn_dev->ibd_nnets == 0)
2789                         kiblnd_destroy_dev(net->ibn_dev);
2790
2791                 break;
2792         }
2793
2794         CDEBUG(D_MALLOC, "after LND net cleanup: kmem %d\n",
2795                atomic_read(&libcfs_kmemory));
2796
2797         net->ibn_init = IBLND_INIT_NOTHING;
2798         ni->ni_data = NULL;
2799
2800         LIBCFS_FREE(net, sizeof(*net));
2801
2802 out:
2803         if (list_empty(&kiblnd_data.kib_devs))
2804                 kiblnd_base_shutdown();
2805 }
2806
2807 static int kiblnd_base_startup(void)
2808 {
2809         struct kib_sched_info   *sched;
2810         int                     rc;
2811         int                     i;
2812
2813         LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING);
2814
2815         try_module_get(THIS_MODULE);
2816         /* zero pointers, flags etc */
2817         memset(&kiblnd_data, 0, sizeof(kiblnd_data));
2818
2819         rwlock_init(&kiblnd_data.kib_global_lock);
2820
2821         INIT_LIST_HEAD(&kiblnd_data.kib_devs);
2822         INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
2823
2824         kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
2825         LIBCFS_ALLOC(kiblnd_data.kib_peers,
2826                      sizeof(struct list_head) *
2827                             kiblnd_data.kib_peer_hash_size);
2828         if (kiblnd_data.kib_peers == NULL)
2829                 goto failed;
2830         for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
2831                 INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
2832
2833         spin_lock_init(&kiblnd_data.kib_connd_lock);
2834         INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
2835         INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
2836         init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
2837         init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
2838
2839         kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
2840                                                   sizeof(*sched));
2841         if (kiblnd_data.kib_scheds == NULL)
2842                 goto failed;
2843
2844         cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
2845                 int     nthrs;
2846
2847                 spin_lock_init(&sched->ibs_lock);
2848                 INIT_LIST_HEAD(&sched->ibs_conns);
2849                 init_waitqueue_head(&sched->ibs_waitq);
2850
2851                 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2852                 if (*kiblnd_tunables.kib_nscheds > 0) {
2853                         nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds);
2854                 } else {
2855                         /* max to half of CPUs, another half is reserved for
2856                          * upper layer modules */
2857                         nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
2858                 }
2859
2860                 sched->ibs_nthreads_max = nthrs;
2861                 sched->ibs_cpt = i;
2862         }
2863
2864         kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR;
2865
2866         /* lists/ptrs/locks initialised */
2867         kiblnd_data.kib_init = IBLND_INIT_DATA;
2868         /*****************************************************/
2869
2870         rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd");
2871         if (rc != 0) {
2872                 CERROR("Can't spawn o2iblnd connd: %d\n", rc);
2873                 goto failed;
2874         }
2875
2876         if (*kiblnd_tunables.kib_dev_failover != 0)
2877                 rc = kiblnd_thread_start(kiblnd_failover_thread, NULL,
2878                                          "kiblnd_failover");
2879
2880         if (rc != 0) {
2881                 CERROR("Can't spawn o2iblnd failover thread: %d\n", rc);
2882                 goto failed;
2883         }
2884
2885         /* flag everything initialised */
2886         kiblnd_data.kib_init = IBLND_INIT_ALL;
2887         /*****************************************************/
2888
2889         return 0;
2890
2891  failed:
2892         kiblnd_base_shutdown();
2893         return -ENETDOWN;
2894 }
2895
2896 static int kiblnd_start_schedulers(struct kib_sched_info *sched)
2897 {
2898         int     rc = 0;
2899         int     nthrs;
2900         int     i;
2901
2902         if (sched->ibs_nthreads == 0) {
2903                 if (*kiblnd_tunables.kib_nscheds > 0) {
2904                         nthrs = sched->ibs_nthreads_max;
2905                 } else {
2906                         nthrs = cfs_cpt_weight(lnet_cpt_table(),
2907                                                sched->ibs_cpt);
2908                         nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
2909                         nthrs = min(IBLND_N_SCHED_HIGH, nthrs);
2910                 }
2911         } else {
2912                 LASSERT(sched->ibs_nthreads <= sched->ibs_nthreads_max);
2913                 /* increase one thread if there is new interface */
2914                 nthrs = sched->ibs_nthreads < sched->ibs_nthreads_max;
2915         }
2916
2917         for (i = 0; i < nthrs; i++) {
2918                 long    id;
2919                 char    name[20];
2920
2921                 id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i);
2922                 snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
2923                          KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
2924                 rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name);
2925                 if (rc == 0)
2926                         continue;
2927
2928                 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2929                        sched->ibs_cpt, sched->ibs_nthreads + i, rc);
2930                 break;
2931         }
2932
2933         sched->ibs_nthreads += i;
2934         return rc;
2935 }
2936
2937 static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts,
2938                                     int ncpts)
2939 {
2940         int     cpt;
2941         int     rc;
2942         int     i;
2943
2944         for (i = 0; i < ncpts; i++) {
2945                 struct kib_sched_info *sched;
2946
2947                 cpt = (cpts == NULL) ? i : cpts[i];
2948                 sched = kiblnd_data.kib_scheds[cpt];
2949
2950                 if (!newdev && sched->ibs_nthreads > 0)
2951                         continue;
2952
2953                 rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]);
2954                 if (rc != 0) {
2955                         CERROR("Failed to start scheduler threads for %s\n",
2956                                dev->ibd_ifname);
2957                         return rc;
2958                 }
2959         }
2960         return 0;
2961 }
2962
2963 static kib_dev_t *kiblnd_dev_search(char *ifname)
2964 {
2965         kib_dev_t       *alias = NULL;
2966         kib_dev_t       *dev;
2967         char            *colon;
2968         char            *colon2;
2969
2970         colon = strchr(ifname, ':');
2971         list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
2972                 if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
2973                         return dev;
2974
2975                 if (alias != NULL)
2976                         continue;
2977
2978                 colon2 = strchr(dev->ibd_ifname, ':');
2979                 if (colon != NULL)
2980                         *colon = 0;
2981                 if (colon2 != NULL)
2982                         *colon2 = 0;
2983
2984                 if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
2985                         alias = dev;
2986
2987                 if (colon != NULL)
2988                         *colon = ':';
2989                 if (colon2 != NULL)
2990                         *colon2 = ':';
2991         }
2992         return alias;
2993 }
2994
2995 int kiblnd_startup(lnet_ni_t *ni)
2996 {
2997         char                 *ifname;
2998         kib_dev_t               *ibdev = NULL;
2999         kib_net_t               *net;
3000         struct timeval      tv;
3001         unsigned long        flags;
3002         int                    rc;
3003         int                       newdev;
3004
3005         LASSERT(ni->ni_lnd == &the_o2iblnd);
3006
3007         if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
3008                 rc = kiblnd_base_startup();
3009                 if (rc != 0)
3010                         return rc;
3011         }
3012
3013         LIBCFS_ALLOC(net, sizeof(*net));
3014         ni->ni_data = net;
3015         if (net == NULL)
3016                 goto net_failed;
3017
3018         do_gettimeofday(&tv);
3019         net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
3020
3021         ni->ni_peertimeout    = *kiblnd_tunables.kib_peertimeout;
3022         ni->ni_maxtxcredits   = *kiblnd_tunables.kib_credits;
3023         ni->ni_peertxcredits  = *kiblnd_tunables.kib_peertxcredits;
3024         ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits;
3025
3026         if (ni->ni_interfaces[0] != NULL) {
3027                 /* Use the IPoIB interface specified in 'networks=' */
3028
3029                 CLASSERT(LNET_MAX_INTERFACES > 1);
3030                 if (ni->ni_interfaces[1] != NULL) {
3031                         CERROR("Multiple interfaces not supported\n");
3032                         goto failed;
3033                 }
3034
3035                 ifname = ni->ni_interfaces[0];
3036         } else {
3037                 ifname = *kiblnd_tunables.kib_default_ipif;
3038         }
3039
3040         if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) {
3041                 CERROR("IPoIB interface name too long: %s\n", ifname);
3042                 goto failed;
3043         }
3044
3045         ibdev = kiblnd_dev_search(ifname);
3046
3047         newdev = ibdev == NULL;
3048         /* hmm...create kib_dev even for alias */
3049         if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0)
3050                 ibdev = kiblnd_create_dev(ifname);
3051
3052         if (ibdev == NULL)
3053                 goto failed;
3054
3055         net->ibn_dev = ibdev;
3056         ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
3057
3058         rc = kiblnd_dev_start_threads(ibdev, newdev,
3059                                       ni->ni_cpts, ni->ni_ncpts);
3060         if (rc != 0)
3061                 goto failed;
3062
3063         rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts);
3064         if (rc != 0) {
3065                 CERROR("Failed to initialize NI pools: %d\n", rc);
3066                 goto failed;
3067         }
3068
3069         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
3070         ibdev->ibd_nnets++;
3071         list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
3072         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
3073
3074         net->ibn_init = IBLND_INIT_ALL;
3075
3076         return 0;
3077
3078 failed:
3079         if (net->ibn_dev == NULL && ibdev != NULL)
3080                 kiblnd_destroy_dev(ibdev);
3081
3082 net_failed:
3083         kiblnd_shutdown(ni);
3084
3085         CDEBUG(D_NET, "kiblnd_startup failed\n");
3086         return -ENETDOWN;
3087 }
3088
3089 static void __exit kiblnd_module_fini(void)
3090 {
3091         lnet_unregister_lnd(&the_o2iblnd);
3092 }
3093
3094 static int __init kiblnd_module_init(void)
3095 {
3096         int    rc;
3097
3098         CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
3099         CLASSERT(offsetof(kib_msg_t,
3100                 ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
3101                 <= IBLND_MSG_SIZE);
3102         CLASSERT(offsetof(kib_msg_t,
3103                 ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
3104                 <= IBLND_MSG_SIZE);
3105
3106         rc = kiblnd_tunables_init();
3107         if (rc != 0)
3108                 return rc;
3109
3110         lnet_register_lnd(&the_o2iblnd);
3111
3112         return 0;
3113 }
3114
3115 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
3116 MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v2.00");
3117 MODULE_LICENSE("GPL");
3118
3119 module_init(kiblnd_module_init);
3120 module_exit(kiblnd_module_fini);