4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lnet/klnds/o2iblnd/o2iblnd.h
38 * Author: Eric Barton <eric@bartonsoftware.com>
41 #include <linux/module.h>
42 #include <linux/kernel.h>
44 #include <linux/string.h>
45 #include <linux/stat.h>
46 #include <linux/errno.h>
47 #include <linux/unistd.h>
48 #include <linux/uio.h>
49 #include <linux/uaccess.h>
54 #include <linux/file.h>
55 #include <linux/list.h>
56 #include <linux/kmod.h>
57 #include <linux/sysctl.h>
58 #include <linux/pci.h>
63 #define DEBUG_SUBSYSTEM S_LND
65 #include "../../../include/linux/libcfs/libcfs.h"
66 #include "../../../include/linux/lnet/lnet.h"
67 #include "../../../include/linux/lnet/lib-lnet.h"
69 #include <rdma/rdma_cm.h>
70 #include <rdma/ib_cm.h>
71 #include <rdma/ib_verbs.h>
72 #include <rdma/ib_fmr_pool.h>
74 #define IBLND_PEER_HASH_SIZE 101 /* # peer lists */
75 /* # scheduler loops before reschedule */
76 #define IBLND_RESCHED 100
78 #define IBLND_N_SCHED 2
79 #define IBLND_N_SCHED_HIGH 4
82 int *kib_dev_failover; /* HCA failover */
83 unsigned int *kib_service; /* IB service number */
84 int *kib_min_reconnect_interval; /* first failed connection
86 int *kib_max_reconnect_interval; /* ...exponentially increasing
88 int *kib_cksum; /* checksum kib_msg_t? */
89 int *kib_timeout; /* comms timeout (seconds) */
90 int *kib_keepalive; /* keepalive timeout (seconds) */
91 int *kib_ntx; /* # tx descs */
92 int *kib_credits; /* # concurrent sends */
93 int *kib_peertxcredits; /* # concurrent sends to 1 peer */
94 int *kib_peerrtrcredits; /* # per-peer router buffer
96 int *kib_peercredits_hiw; /* # when eagerly to return
98 int *kib_peertimeout; /* seconds to consider peer dead */
99 char **kib_default_ipif; /* default IPoIB interface */
100 int *kib_retry_count;
101 int *kib_rnr_retry_count;
102 int *kib_concurrent_sends; /* send work queue sizing */
103 int *kib_ib_mtu; /* IB MTU */
104 int *kib_map_on_demand; /* map-on-demand if RD has more
105 * fragments than this value, 0
106 * disable map-on-demand */
107 int *kib_fmr_pool_size; /* # FMRs in pool */
108 int *kib_fmr_flush_trigger; /* When to trigger FMR flush */
109 int *kib_fmr_cache; /* enable FMR pool cache? */
110 int *kib_require_priv_port; /* accept only privileged ports */
111 int *kib_use_priv_port; /* use privileged port for active
113 int *kib_nscheds; /* # threads on each CPT */
116 extern kib_tunables_t kiblnd_tunables;
118 #define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */
119 #define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
121 #define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */
122 #define IBLND_CREDITS_MAX ((typeof(((kib_msg_t *) 0)->ibm_credits)) - 1) /* Max # of peer credits */
124 #define IBLND_MSG_QUEUE_SIZE(v) ((v) == IBLND_MSG_VERSION_1 ? \
125 IBLND_MSG_QUEUE_SIZE_V1 : \
126 *kiblnd_tunables.kib_peertxcredits) /* # messages/RDMAs in-flight */
127 #define IBLND_CREDITS_HIGHWATER(v) ((v) == IBLND_MSG_VERSION_1 ? \
128 IBLND_CREDIT_HIGHWATER_V1 : \
129 *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */
131 #define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps, qpt)
134 kiblnd_concurrent_sends_v1(void)
136 if (*kiblnd_tunables.kib_concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
137 return IBLND_MSG_QUEUE_SIZE_V1 * 2;
139 if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
140 return IBLND_MSG_QUEUE_SIZE_V1 / 2;
142 return *kiblnd_tunables.kib_concurrent_sends;
145 #define IBLND_CONCURRENT_SENDS(v) ((v) == IBLND_MSG_VERSION_1 ? \
146 kiblnd_concurrent_sends_v1() : \
147 *kiblnd_tunables.kib_concurrent_sends)
148 /* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
149 #define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
150 #define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
152 #define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
153 #define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
154 #define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand != 0 ? \
155 *kiblnd_tunables.kib_map_on_demand : \
156 IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */
157 #define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \
158 IBLND_MAX_RDMA_FRAGS : IBLND_CFG_RDMA_FRAGS)
160 /************************/
161 /* derived constants... */
162 /* Pools (shared by connections on each CPT) */
163 /* These pools can grow at runtime, so don't need give a very large value */
164 #define IBLND_TX_POOL 256
165 #define IBLND_FMR_POOL 256
166 #define IBLND_FMR_POOL_FLUSH 192
168 /* TX messages (shared by all connections) */
169 #define IBLND_TX_MSGS() (*kiblnd_tunables.kib_ntx)
171 /* RX messages (per connection) */
172 #define IBLND_RX_MSGS(v) (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v))
173 #define IBLND_RX_MSG_BYTES(v) (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE)
174 #define IBLND_RX_MSG_PAGES(v) ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / PAGE_SIZE)
176 /* WRs and CQEs (per connection) */
177 #define IBLND_RECV_WRS(v) IBLND_RX_MSGS(v)
178 #define IBLND_SEND_WRS(v) ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v))
179 #define IBLND_CQ_ENTRIES(v) (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v))
183 /* o2iblnd can run over aliased interface */
185 #define KIB_IFNAME_SIZE IFALIASZ
187 #define KIB_IFNAME_SIZE 256
191 struct list_head ibd_list; /* chain on kib_devs */
192 struct list_head ibd_fail_list; /* chain on kib_failed_devs */
193 __u32 ibd_ifip; /* IPoIB interface IP */
195 /* IPoIB interface name */
196 char ibd_ifname[KIB_IFNAME_SIZE];
197 int ibd_nnets; /* # nets extant */
199 unsigned long ibd_next_failover;
200 int ibd_failed_failover; /* # failover failures */
201 unsigned int ibd_failover; /* failover in progress */
202 unsigned int ibd_can_failover; /* IPoIB interface is a bonding
204 struct list_head ibd_nets;
205 struct kib_hca_dev *ibd_hdev;
208 typedef struct kib_hca_dev {
209 struct rdma_cm_id *ibh_cmid; /* listener cmid */
210 struct ib_device *ibh_ibdev; /* IB device */
211 int ibh_page_shift; /* page shift of current HCA */
212 int ibh_page_size; /* page size of current HCA */
213 __u64 ibh_page_mask; /* page mask of current HCA */
214 int ibh_mr_shift; /* bits shift of max MR size */
215 __u64 ibh_mr_size; /* size of MR */
216 int ibh_nmrs; /* # of global MRs */
217 struct ib_mr **ibh_mrs; /* global MR */
218 struct ib_pd *ibh_pd; /* PD */
219 kib_dev_t *ibh_dev; /* owner */
220 atomic_t ibh_ref; /* refcount */
223 /** # of seconds to keep pool alive */
224 #define IBLND_POOL_DEADLINE 300
225 /** # of seconds to retry if allocation failed */
226 #define IBLND_POOL_RETRY 1
229 int ibp_npages; /* # pages */
230 struct page *ibp_pages[0]; /* page array */
236 typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps,
237 int inc, struct kib_pool **pp_po);
238 typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
239 typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node);
240 typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node);
244 #define IBLND_POOL_NAME_LEN 32
246 typedef struct kib_poolset {
247 spinlock_t ps_lock; /* serialize */
248 struct kib_net *ps_net; /* network it belongs to */
249 char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
250 struct list_head ps_pool_list; /* list of pools */
251 struct list_head ps_failed_pool_list;/* failed pool list */
252 unsigned long ps_next_retry; /* time stamp for retry if
253 * failed to allocate */
254 int ps_increasing; /* is allocating new pool */
255 int ps_pool_size; /* new pool size */
256 int ps_cpt; /* CPT id */
258 kib_ps_pool_create_t ps_pool_create; /* create a new pool */
259 kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */
260 kib_ps_node_init_t ps_node_init; /* initialize new allocated
262 kib_ps_node_fini_t ps_node_fini; /* finalize node */
265 typedef struct kib_pool {
266 struct list_head po_list; /* chain on pool list */
267 struct list_head po_free_list; /* pre-allocated node */
268 kib_poolset_t *po_owner; /* pool_set of this pool */
269 unsigned long po_deadline; /* deadline of this pool */
270 int po_allocated; /* # of elements in use */
271 int po_failed; /* pool is created on failed
273 int po_size; /* # of pre-allocated elements */
277 kib_poolset_t tps_poolset; /* pool-set */
278 __u64 tps_next_tx_cookie; /* cookie of TX */
282 kib_pool_t tpo_pool; /* pool */
283 struct kib_hca_dev *tpo_hdev; /* device for this pool */
284 struct kib_tx *tpo_tx_descs; /* all the tx descriptors */
285 kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */
289 spinlock_t fps_lock; /* serialize */
290 struct kib_net *fps_net; /* IB network */
291 struct list_head fps_pool_list; /* FMR pool list */
292 struct list_head fps_failed_pool_list;/* FMR pool list */
293 __u64 fps_version; /* validity stamp */
294 int fps_cpt; /* CPT id */
296 int fps_flush_trigger;
297 int fps_increasing; /* is allocating new pool */
298 unsigned long fps_next_retry; /* time stamp for retry if
299 * failed to allocate */
303 struct list_head fpo_list; /* chain on pool list */
304 struct kib_hca_dev *fpo_hdev; /* device for this pool */
305 kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
306 struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
307 unsigned long fpo_deadline; /* deadline of this pool */
308 int fpo_failed; /* fmr pool is failed */
309 int fpo_map_count; /* # of mapped FMR */
313 struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
314 kib_fmr_pool_t *fmr_pool; /* pool of FMR */
317 typedef struct kib_net {
318 struct list_head ibn_list; /* chain on kib_dev_t::ibd_nets */
319 __u64 ibn_incarnation;/* my epoch */
320 int ibn_init; /* initialisation state */
321 int ibn_shutdown; /* shutting down? */
323 atomic_t ibn_npeers; /* # peers extant */
324 atomic_t ibn_nconns; /* # connections extant */
326 kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */
327 kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */
329 kib_dev_t *ibn_dev; /* underlying IB device */
332 #define KIB_THREAD_SHIFT 16
333 #define KIB_THREAD_ID(cpt, tid) ((cpt) << KIB_THREAD_SHIFT | (tid))
334 #define KIB_THREAD_CPT(id) ((id) >> KIB_THREAD_SHIFT)
335 #define KIB_THREAD_TID(id) ((id) & ((1UL << KIB_THREAD_SHIFT) - 1))
337 struct kib_sched_info {
338 spinlock_t ibs_lock; /* serialise */
339 wait_queue_head_t ibs_waitq; /* schedulers sleep here */
340 struct list_head ibs_conns; /* conns to check for rx completions */
341 int ibs_nthreads; /* number of scheduler threads */
342 int ibs_nthreads_max; /* max allowed scheduler threads */
343 int ibs_cpt; /* CPT id */
347 int kib_init; /* initialisation state */
348 int kib_shutdown; /* shut down? */
349 struct list_head kib_devs; /* IB devices extant */
350 struct list_head kib_failed_devs; /* list head of failed
352 wait_queue_head_t kib_failover_waitq; /* schedulers sleep here */
353 atomic_t kib_nthreads; /* # live threads */
354 rwlock_t kib_global_lock; /* stabilize net/dev/peer/conn
356 struct list_head *kib_peers; /* hash table of all my known
358 int kib_peer_hash_size; /* size of kib_peers */
359 void *kib_connd; /* the connd task
360 * (serialisation assertions)
362 struct list_head kib_connd_conns; /* connections to
364 struct list_head kib_connd_zombies; /* connections with zero
366 wait_queue_head_t kib_connd_waitq; /* connection daemon sleeps
368 spinlock_t kib_connd_lock; /* serialise */
369 struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
370 struct kib_sched_info **kib_scheds; /* percpt data for schedulers
374 #define IBLND_INIT_NOTHING 0
375 #define IBLND_INIT_DATA 1
376 #define IBLND_INIT_ALL 2
378 /************************************************************************
379 * IB Wire message format.
380 * These are sent in sender's byte order (i.e. receiver flips).
383 typedef struct kib_connparams {
384 __u16 ibcp_queue_depth;
385 __u16 ibcp_max_frags;
386 __u32 ibcp_max_msg_size;
387 } WIRE_ATTR kib_connparams_t;
390 lnet_hdr_t ibim_hdr; /* portals header */
391 char ibim_payload[0]; /* piggy-backed payload */
392 } WIRE_ATTR kib_immediate_msg_t;
395 __u32 rf_nob; /* # bytes this frag */
396 __u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */
397 } WIRE_ATTR kib_rdma_frag_t;
400 __u32 rd_key; /* local/remote key */
401 __u32 rd_nfrags; /* # fragments */
402 kib_rdma_frag_t rd_frags[0]; /* buffer frags */
403 } WIRE_ATTR kib_rdma_desc_t;
406 lnet_hdr_t ibprm_hdr; /* portals header */
407 __u64 ibprm_cookie; /* opaque completion cookie */
408 } WIRE_ATTR kib_putreq_msg_t;
411 __u64 ibpam_src_cookie; /* reflected completion cookie */
412 __u64 ibpam_dst_cookie; /* opaque completion cookie */
413 kib_rdma_desc_t ibpam_rd; /* sender's sink buffer */
414 } WIRE_ATTR kib_putack_msg_t;
417 lnet_hdr_t ibgm_hdr; /* portals header */
418 __u64 ibgm_cookie; /* opaque completion cookie */
419 kib_rdma_desc_t ibgm_rd; /* rdma descriptor */
420 } WIRE_ATTR kib_get_msg_t;
423 __u64 ibcm_cookie; /* opaque completion cookie */
424 __s32 ibcm_status; /* < 0 failure: >= 0 length */
425 } WIRE_ATTR kib_completion_msg_t;
428 /* First 2 fields fixed FOR ALL TIME */
429 __u32 ibm_magic; /* I'm an ibnal message */
430 __u16 ibm_version; /* this is my version number */
432 __u8 ibm_type; /* msg type */
433 __u8 ibm_credits; /* returned credits */
434 __u32 ibm_nob; /* # bytes in whole message */
435 __u32 ibm_cksum; /* checksum (0 == no checksum) */
436 __u64 ibm_srcnid; /* sender's NID */
437 __u64 ibm_srcstamp; /* sender's incarnation */
438 __u64 ibm_dstnid; /* destination's NID */
439 __u64 ibm_dststamp; /* destination's incarnation */
442 kib_connparams_t connparams;
443 kib_immediate_msg_t immediate;
444 kib_putreq_msg_t putreq;
445 kib_putack_msg_t putack;
447 kib_completion_msg_t completion;
449 } WIRE_ATTR kib_msg_t;
451 #define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */
453 #define IBLND_MSG_VERSION_1 0x11
454 #define IBLND_MSG_VERSION_2 0x12
455 #define IBLND_MSG_VERSION IBLND_MSG_VERSION_2
457 #define IBLND_MSG_CONNREQ 0xc0 /* connection request */
458 #define IBLND_MSG_CONNACK 0xc1 /* connection acknowledge */
459 #define IBLND_MSG_NOOP 0xd0 /* nothing (just credits) */
460 #define IBLND_MSG_IMMEDIATE 0xd1 /* immediate */
461 #define IBLND_MSG_PUT_REQ 0xd2 /* putreq (src->sink) */
462 #define IBLND_MSG_PUT_NAK 0xd3 /* completion (sink->src) */
463 #define IBLND_MSG_PUT_ACK 0xd4 /* putack (sink->src) */
464 #define IBLND_MSG_PUT_DONE 0xd5 /* completion (src->sink) */
465 #define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */
466 #define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */
469 __u32 ibr_magic; /* sender's magic */
470 __u16 ibr_version; /* sender's version */
471 __u8 ibr_why; /* reject reason */
472 __u8 ibr_padding; /* padding */
473 __u64 ibr_incarnation; /* incarnation of peer */
474 kib_connparams_t ibr_cp; /* connection parameters */
475 } WIRE_ATTR kib_rej_t;
477 /* connection rejection reasons */
478 #define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */
479 #define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */
480 #define IBLND_REJECT_FATAL 3 /* Anything else */
481 #define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */
482 #define IBLND_REJECT_CONN_STALE 5 /* stale peer */
483 #define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match
485 #define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't
488 /***********************************************************************/
490 typedef struct kib_rx /* receive message */
492 struct list_head rx_list; /* queue for attention */
493 struct kib_conn *rx_conn; /* owning conn */
494 int rx_nob; /* # bytes received (-1 while
496 enum ib_wc_status rx_status; /* completion status */
497 kib_msg_t *rx_msg; /* message buffer (host vaddr) */
498 __u64 rx_msgaddr; /* message buffer (I/O addr) */
499 DECLARE_PCI_UNMAP_ADDR(rx_msgunmap); /* for dma_unmap_single() */
500 struct ib_recv_wr rx_wrq; /* receive work item... */
501 struct ib_sge rx_sge; /* ...and its memory */
504 #define IBLND_POSTRX_DONT_POST 0 /* don't post */
505 #define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */
506 #define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */
507 #define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give myself back 1 reserved
510 typedef struct kib_tx /* transmit message */
512 struct list_head tx_list; /* queue on idle_txs ibc_tx_queue
514 kib_tx_pool_t *tx_pool; /* pool I'm from */
515 struct kib_conn *tx_conn; /* owning conn */
516 short tx_sending; /* # tx callbacks outstanding */
517 short tx_queued; /* queued for sending */
518 short tx_waiting; /* waiting for peer */
519 int tx_status; /* LNET completion status */
520 unsigned long tx_deadline; /* completion deadline */
521 __u64 tx_cookie; /* completion cookie */
522 lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on
524 kib_msg_t *tx_msg; /* message buffer (host vaddr) */
525 __u64 tx_msgaddr; /* message buffer (I/O addr) */
526 DECLARE_PCI_UNMAP_ADDR(tx_msgunmap); /* for dma_unmap_single() */
527 int tx_nwrq; /* # send work items */
528 struct ib_send_wr *tx_wrq; /* send work items... */
529 struct ib_sge *tx_sge; /* ...and their memory */
530 kib_rdma_desc_t *tx_rd; /* rdma descriptor */
531 int tx_nfrags; /* # entries in... */
532 struct scatterlist *tx_frags; /* dma_map_sg descriptor */
533 __u64 *tx_pages; /* rdma phys page addrs */
534 kib_fmr_t fmr; /* FMR */
535 int tx_dmadir; /* dma direction */
538 typedef struct kib_connvars {
539 kib_msg_t cv_msg; /* connection-in-progress variables */
542 typedef struct kib_conn {
543 struct kib_sched_info *ibc_sched; /* scheduler information */
544 struct kib_peer *ibc_peer; /* owning peer */
545 kib_hca_dev_t *ibc_hdev; /* HCA bound on */
546 struct list_head ibc_list; /* stash on peer's conn
548 struct list_head ibc_sched_list; /* schedule for attention */
549 __u16 ibc_version; /* version of connection */
550 __u64 ibc_incarnation; /* which instance of the
552 atomic_t ibc_refcount; /* # users */
553 int ibc_state; /* what's happening */
554 int ibc_nsends_posted; /* # uncompleted sends */
555 int ibc_noops_posted; /* # uncompleted NOOPs */
556 int ibc_credits; /* # credits I have */
557 int ibc_outstanding_credits; /* # credits to return */
558 int ibc_reserved_credits; /* # ACK/DONE msg credits */
559 int ibc_comms_error; /* set on comms error */
560 unsigned int ibc_nrx:16; /* receive buffers owned */
561 unsigned int ibc_scheduled:1; /* scheduled for attention
563 unsigned int ibc_ready:1; /* CQ callback fired */
564 unsigned long ibc_last_send; /* time of last send */
565 struct list_head ibc_connd_list; /* link chain for
566 * kiblnd_check_conns only
568 struct list_head ibc_early_rxs; /* rxs completed before
570 struct list_head ibc_tx_noops; /* IBLND_MSG_NOOPs for
571 * IBLND_MSG_VERSION_1 */
572 struct list_head ibc_tx_queue; /* sends that need a credit
574 struct list_head ibc_tx_queue_nocred; /* sends that don't need a
576 struct list_head ibc_tx_queue_rsrvd; /* sends that need to
577 * reserve an ACK/DONE msg
579 struct list_head ibc_active_txs; /* active tx awaiting
581 spinlock_t ibc_lock; /* serialise */
582 kib_rx_t *ibc_rxs; /* the rx descs */
583 kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
585 struct rdma_cm_id *ibc_cmid; /* CM id */
586 struct ib_cq *ibc_cq; /* completion queue */
588 kib_connvars_t *ibc_connvars; /* in-progress connection
592 #define IBLND_CONN_INIT 0 /* being initialised */
593 #define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */
594 #define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */
595 #define IBLND_CONN_ESTABLISHED 3 /* connection established */
596 #define IBLND_CONN_CLOSING 4 /* being closed */
597 #define IBLND_CONN_DISCONNECTED 5 /* disconnected */
599 typedef struct kib_peer {
600 struct list_head ibp_list; /* stash on global peer list */
601 lnet_nid_t ibp_nid; /* who's on the other end(s) */
602 lnet_ni_t *ibp_ni; /* LNet interface */
603 atomic_t ibp_refcount; /* # users */
604 struct list_head ibp_conns; /* all active connections */
605 struct list_head ibp_tx_queue; /* msgs waiting for a conn */
606 __u16 ibp_version; /* version of peer */
607 __u64 ibp_incarnation; /* incarnation of peer */
608 int ibp_connecting; /* current active connection attempts
610 int ibp_accepting; /* current passive connection attempts
612 int ibp_error; /* errno on closing this peer */
613 unsigned long ibp_last_alive; /* when (in jiffies) I was last alive
617 extern kib_data_t kiblnd_data;
619 void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
622 kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
624 LASSERT(atomic_read(&hdev->ibh_ref) > 0);
625 atomic_inc(&hdev->ibh_ref);
629 kiblnd_hdev_decref(kib_hca_dev_t *hdev)
631 LASSERT(atomic_read(&hdev->ibh_ref) > 0);
632 if (atomic_dec_and_test(&hdev->ibh_ref))
633 kiblnd_hdev_destroy(hdev);
637 kiblnd_dev_can_failover(kib_dev_t *dev)
639 if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
642 if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */
645 if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
648 return dev->ibd_can_failover;
651 #define kiblnd_conn_addref(conn) \
653 CDEBUG(D_NET, "conn[%p] (%d)++\n", \
654 (conn), atomic_read(&(conn)->ibc_refcount)); \
655 atomic_inc(&(conn)->ibc_refcount); \
658 #define kiblnd_conn_decref(conn) \
660 unsigned long flags; \
662 CDEBUG(D_NET, "conn[%p] (%d)--\n", \
663 (conn), atomic_read(&(conn)->ibc_refcount)); \
664 LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
665 if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \
666 spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
667 list_add_tail(&(conn)->ibc_list, \
668 &kiblnd_data.kib_connd_zombies); \
669 wake_up(&kiblnd_data.kib_connd_waitq); \
670 spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
674 #define kiblnd_peer_addref(peer) \
676 CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
677 (peer), libcfs_nid2str((peer)->ibp_nid), \
678 atomic_read(&(peer)->ibp_refcount)); \
679 atomic_inc(&(peer)->ibp_refcount); \
682 #define kiblnd_peer_decref(peer) \
684 CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
685 (peer), libcfs_nid2str((peer)->ibp_nid), \
686 atomic_read(&(peer)->ibp_refcount)); \
687 LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \
688 if (atomic_dec_and_test(&(peer)->ibp_refcount)) \
689 kiblnd_destroy_peer(peer); \
692 static inline struct list_head *
693 kiblnd_nid2peerlist(lnet_nid_t nid)
696 ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
698 return &kiblnd_data.kib_peers[hash];
702 kiblnd_peer_active(kib_peer_t *peer)
704 /* Am I in the peer hash table? */
705 return !list_empty(&peer->ibp_list);
708 static inline kib_conn_t *
709 kiblnd_get_conn_locked(kib_peer_t *peer)
711 LASSERT(!list_empty(&peer->ibp_conns));
713 /* just return the first connection */
714 return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
718 kiblnd_send_keepalive(kib_conn_t *conn)
720 return (*kiblnd_tunables.kib_keepalive > 0) &&
721 cfs_time_after(jiffies, conn->ibc_last_send +
722 *kiblnd_tunables.kib_keepalive*HZ);
726 kiblnd_need_noop(kib_conn_t *conn)
728 LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
730 if (conn->ibc_outstanding_credits <
731 IBLND_CREDITS_HIGHWATER(conn->ibc_version) &&
732 !kiblnd_send_keepalive(conn))
733 return 0; /* No need to send NOOP */
735 if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
736 if (!list_empty(&conn->ibc_tx_queue_nocred))
737 return 0; /* NOOP can be piggybacked */
739 /* No tx to piggyback NOOP onto or no credit to send a tx */
740 return (list_empty(&conn->ibc_tx_queue) ||
741 conn->ibc_credits == 0);
744 if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
745 !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
746 conn->ibc_credits == 0) /* no credit */
749 if (conn->ibc_credits == 1 && /* last credit reserved for */
750 conn->ibc_outstanding_credits == 0) /* giving back credits */
753 /* No tx to piggyback NOOP onto or no credit to send a tx */
754 return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
758 kiblnd_abort_receives(kib_conn_t *conn)
760 ib_modify_qp(conn->ibc_cmid->qp,
761 &kiblnd_data.kib_error_qpa, IB_QP_STATE);
764 static inline const char *
765 kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
767 if (q == &conn->ibc_tx_queue)
770 if (q == &conn->ibc_tx_queue_rsrvd)
771 return "tx_queue_rsrvd";
773 if (q == &conn->ibc_tx_queue_nocred)
774 return "tx_queue_nocred";
776 if (q == &conn->ibc_active_txs)
783 /* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the
784 * lowest bits of the work request id to stash the work item type. */
786 #define IBLND_WID_TX 0
787 #define IBLND_WID_RDMA 1
788 #define IBLND_WID_RX 2
789 #define IBLND_WID_MASK 3UL
792 kiblnd_ptr2wreqid(void *ptr, int type)
794 unsigned long lptr = (unsigned long)ptr;
796 LASSERT((lptr & IBLND_WID_MASK) == 0);
797 LASSERT((type & ~IBLND_WID_MASK) == 0);
798 return (__u64)(lptr | type);
802 kiblnd_wreqid2ptr(__u64 wreqid)
804 return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK);
808 kiblnd_wreqid2type(__u64 wreqid)
810 return wreqid & IBLND_WID_MASK;
814 kiblnd_set_conn_state(kib_conn_t *conn, int state)
816 conn->ibc_state = state;
821 kiblnd_init_msg(kib_msg_t *msg, int type, int body_nob)
823 msg->ibm_type = type;
824 msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob;
828 kiblnd_rd_size(kib_rdma_desc_t *rd)
833 for (i = size = 0; i < rd->rd_nfrags; i++)
834 size += rd->rd_frags[i].rf_nob;
840 kiblnd_rd_frag_addr(kib_rdma_desc_t *rd, int index)
842 return rd->rd_frags[index].rf_addr;
846 kiblnd_rd_frag_size(kib_rdma_desc_t *rd, int index)
848 return rd->rd_frags[index].rf_nob;
852 kiblnd_rd_frag_key(kib_rdma_desc_t *rd, int index)
858 kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
860 if (nob < rd->rd_frags[index].rf_nob) {
861 rd->rd_frags[index].rf_addr += nob;
862 rd->rd_frags[index].rf_nob -= nob;
871 kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
873 LASSERT(msgtype == IBLND_MSG_GET_REQ ||
874 msgtype == IBLND_MSG_PUT_ACK);
876 return msgtype == IBLND_MSG_GET_REQ ?
877 offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) :
878 offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]);
883 kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
885 return ib_dma_mapping_error(dev, dma_addr);
888 static inline __u64 kiblnd_dma_map_single(struct ib_device *dev,
889 void *msg, size_t size,
890 enum dma_data_direction direction)
892 return ib_dma_map_single(dev, msg, size, direction);
895 static inline void kiblnd_dma_unmap_single(struct ib_device *dev,
896 __u64 addr, size_t size,
897 enum dma_data_direction direction)
899 ib_dma_unmap_single(dev, addr, size, direction);
902 #define KIBLND_UNMAP_ADDR_SET(p, m, a) do {} while (0)
903 #define KIBLND_UNMAP_ADDR(p, m, a) (a)
905 static inline int kiblnd_dma_map_sg(struct ib_device *dev,
906 struct scatterlist *sg, int nents,
907 enum dma_data_direction direction)
909 return ib_dma_map_sg(dev, sg, nents, direction);
912 static inline void kiblnd_dma_unmap_sg(struct ib_device *dev,
913 struct scatterlist *sg, int nents,
914 enum dma_data_direction direction)
916 ib_dma_unmap_sg(dev, sg, nents, direction);
919 static inline __u64 kiblnd_sg_dma_address(struct ib_device *dev,
920 struct scatterlist *sg)
922 return ib_sg_dma_address(dev, sg);
925 static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
926 struct scatterlist *sg)
928 return ib_sg_dma_len(dev, sg);
931 /* XXX We use KIBLND_CONN_PARAM(e) as writable buffer, it's not strictly
932 * right because OFED1.2 defines it as const, to use it we have to add
933 * (void *) cast to overcome "const" */
935 #define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
936 #define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
939 struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
940 kib_rdma_desc_t *rd);
941 struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev,
942 __u64 addr, __u64 size);
943 void kiblnd_map_rx_descs(kib_conn_t *conn);
944 void kiblnd_unmap_rx_descs(kib_conn_t *conn);
945 int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
946 kib_rdma_desc_t *rd, int nfrags);
947 void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
948 void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
949 struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
951 int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
952 int npages, __u64 iov, kib_fmr_t *fmr);
953 void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
955 int kiblnd_startup(lnet_ni_t *ni);
956 void kiblnd_shutdown(lnet_ni_t *ni);
957 int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
958 void kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
960 int kiblnd_tunables_init(void);
961 void kiblnd_tunables_fini(void);
963 int kiblnd_connd(void *arg);
964 int kiblnd_scheduler(void *arg);
965 int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
966 int kiblnd_failover_thread(void *arg);
968 int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
969 void kiblnd_free_pages(kib_pages_t *p);
971 int kiblnd_cm_callback(struct rdma_cm_id *cmid,
972 struct rdma_cm_event *event);
973 int kiblnd_translate_mtu(int value);
975 int kiblnd_dev_failover(kib_dev_t *dev);
976 int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
977 void kiblnd_destroy_peer(kib_peer_t *peer);
978 void kiblnd_destroy_dev(kib_dev_t *dev);
979 void kiblnd_unlink_peer_locked(kib_peer_t *peer);
980 void kiblnd_peer_alive(kib_peer_t *peer);
981 kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid);
982 void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error);
983 int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
984 int version, __u64 incarnation);
985 int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why);
987 void kiblnd_connreq_done(kib_conn_t *conn, int status);
988 kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
989 int state, int version);
990 void kiblnd_destroy_conn(kib_conn_t *conn);
991 void kiblnd_close_conn(kib_conn_t *conn, int error);
992 void kiblnd_close_conn_locked(kib_conn_t *conn, int error);
994 int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
995 int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
997 void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
998 void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
999 void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
1000 void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
1001 void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
1003 void kiblnd_check_sends (kib_conn_t *conn);
1005 void kiblnd_qp_event(struct ib_event *event, void *arg);
1006 void kiblnd_cq_event(struct ib_event *event, void *arg);
1007 void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
1009 void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
1010 int credits, lnet_nid_t dstnid, __u64 dststamp);
1011 int kiblnd_unpack_msg(kib_msg_t *msg, int nob);
1012 int kiblnd_post_rx(kib_rx_t *rx, int credit);
1014 int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
1015 int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
1016 unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
1017 unsigned int offset, unsigned int mlen, unsigned int rlen);