2 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/device.h>
39 #include <linux/module.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/random.h>
44 #include <linux/rbtree.h>
45 #include <linux/spinlock.h>
46 #include <linux/slab.h>
47 #include <linux/sysfs.h>
48 #include <linux/workqueue.h>
49 #include <linux/kdev_t.h>
50 #include <linux/etherdevice.h>
52 #include <rdma/ib_cache.h>
53 #include <rdma/ib_cm.h>
56 MODULE_AUTHOR("Sean Hefty");
57 MODULE_DESCRIPTION("InfiniBand CM");
58 MODULE_LICENSE("Dual BSD/GPL");
60 static void cm_add_one(struct ib_device *device);
61 static void cm_remove_one(struct ib_device *device, void *client_data);
63 static struct ib_client cm_client = {
66 .remove = cm_remove_one
71 struct list_head device_list;
73 struct rb_root listen_service_table;
74 u64 listen_service_id;
75 /* struct rb_root peer_service_table; todo: fix peer to peer */
76 struct rb_root remote_qp_table;
77 struct rb_root remote_id_table;
78 struct rb_root remote_sidr_table;
79 struct idr local_id_table;
80 __be32 random_id_operand;
81 struct list_head timewait_list;
82 struct workqueue_struct *wq;
83 /* Sync on cm change port state */
84 spinlock_t state_lock;
87 /* Counter indexes ordered by attribute ID */
101 CM_ATTR_ID_OFFSET = 0x0010,
112 static char const counter_group_names[CM_COUNTER_GROUPS]
113 [sizeof("cm_rx_duplicates")] = {
114 "cm_tx_msgs", "cm_tx_retries",
115 "cm_rx_msgs", "cm_rx_duplicates"
118 struct cm_counter_group {
120 atomic_long_t counter[CM_ATTR_COUNT];
123 struct cm_counter_attribute {
124 struct attribute attr;
128 #define CM_COUNTER_ATTR(_name, _index) \
129 struct cm_counter_attribute cm_##_name##_counter_attr = { \
130 .attr = { .name = __stringify(_name), .mode = 0444 }, \
134 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
135 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
136 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
137 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
138 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
139 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
140 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
141 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
142 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
143 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
144 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
146 static struct attribute *cm_counter_default_attrs[] = {
147 &cm_req_counter_attr.attr,
148 &cm_mra_counter_attr.attr,
149 &cm_rej_counter_attr.attr,
150 &cm_rep_counter_attr.attr,
151 &cm_rtu_counter_attr.attr,
152 &cm_dreq_counter_attr.attr,
153 &cm_drep_counter_attr.attr,
154 &cm_sidr_req_counter_attr.attr,
155 &cm_sidr_rep_counter_attr.attr,
156 &cm_lap_counter_attr.attr,
157 &cm_apr_counter_attr.attr,
162 struct cm_device *cm_dev;
163 struct ib_mad_agent *mad_agent;
164 struct kobject port_obj;
166 struct list_head cm_priv_prim_list;
167 struct list_head cm_priv_altr_list;
168 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
172 struct list_head list;
173 struct ib_device *ib_device;
174 struct device *device;
177 struct cm_port *port[0];
181 struct cm_port *port;
183 struct ib_ah_attr ah_attr;
189 struct delayed_work work;
190 struct list_head list;
191 struct cm_port *port;
192 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
193 __be32 local_id; /* Established / timewait */
195 struct ib_cm_event cm_event;
196 struct ib_sa_path_rec path[0];
199 struct cm_timewait_info {
200 struct cm_work work; /* Must be first. */
201 struct list_head list;
202 struct rb_node remote_qp_node;
203 struct rb_node remote_id_node;
204 __be64 remote_ca_guid;
206 u8 inserted_remote_qp;
207 u8 inserted_remote_id;
210 struct cm_id_private {
213 struct rb_node service_node;
214 struct rb_node sidr_id_node;
215 spinlock_t lock; /* Do not acquire inside cm.lock */
216 struct completion comp;
218 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
219 * Protected by the cm.lock spinlock. */
220 int listen_sharecount;
222 struct ib_mad_send_buf *msg;
223 struct cm_timewait_info *timewait_info;
224 /* todo: use alternate port on send failure */
232 enum ib_qp_type qp_type;
236 enum ib_mtu path_mtu;
241 u8 responder_resources;
248 struct list_head prim_list;
249 struct list_head altr_list;
250 /* Indicates that the send port mad is registered and av is set */
251 int prim_send_port_not_ready;
252 int altr_send_port_not_ready;
254 struct list_head work_list;
258 static void cm_work_handler(struct work_struct *work);
260 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
262 if (atomic_dec_and_test(&cm_id_priv->refcount))
263 complete(&cm_id_priv->comp);
266 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
267 struct ib_mad_send_buf **msg)
269 struct ib_mad_agent *mad_agent;
270 struct ib_mad_send_buf *m;
273 unsigned long flags, flags2;
276 /* don't let the port to be released till the agent is down */
277 spin_lock_irqsave(&cm.state_lock, flags2);
278 spin_lock_irqsave(&cm.lock, flags);
279 if (!cm_id_priv->prim_send_port_not_ready)
280 av = &cm_id_priv->av;
281 else if (!cm_id_priv->altr_send_port_not_ready &&
282 (cm_id_priv->alt_av.port))
283 av = &cm_id_priv->alt_av;
285 pr_info("%s: not valid CM id\n", __func__);
287 spin_unlock_irqrestore(&cm.lock, flags);
290 spin_unlock_irqrestore(&cm.lock, flags);
291 /* Make sure the port haven't released the mad yet */
292 mad_agent = cm_id_priv->av.port->mad_agent;
294 pr_info("%s: not a valid MAD agent\n", __func__);
298 ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr);
304 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
306 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
308 IB_MGMT_BASE_VERSION);
315 /* Timeout set by caller if response is expected. */
317 m->retries = cm_id_priv->max_cm_retries;
319 atomic_inc(&cm_id_priv->refcount);
320 m->context[0] = cm_id_priv;
324 spin_unlock_irqrestore(&cm.state_lock, flags2);
328 static int cm_alloc_response_msg(struct cm_port *port,
329 struct ib_mad_recv_wc *mad_recv_wc,
330 struct ib_mad_send_buf **msg)
332 struct ib_mad_send_buf *m;
335 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
336 mad_recv_wc->recv_buf.grh, port->port_num);
340 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
341 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
343 IB_MGMT_BASE_VERSION);
353 static void cm_free_msg(struct ib_mad_send_buf *msg)
355 ib_destroy_ah(msg->ah);
357 cm_deref_id(msg->context[0]);
358 ib_free_send_mad(msg);
361 static void * cm_copy_private_data(const void *private_data,
366 if (!private_data || !private_data_len)
369 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
371 return ERR_PTR(-ENOMEM);
376 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
377 void *private_data, u8 private_data_len)
379 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
380 kfree(cm_id_priv->private_data);
382 cm_id_priv->private_data = private_data;
383 cm_id_priv->private_data_len = private_data_len;
386 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
387 struct ib_grh *grh, struct cm_av *av)
390 av->pkey_index = wc->pkey_index;
391 ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
395 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av,
396 struct cm_id_private *cm_id_priv)
398 struct cm_device *cm_dev;
399 struct cm_port *port = NULL;
403 struct net_device *ndev = ib_get_ndev_from_path(path);
405 read_lock_irqsave(&cm.device_lock, flags);
406 list_for_each_entry(cm_dev, &cm.device_list, list) {
407 if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
409 port = cm_dev->port[p-1];
413 read_unlock_irqrestore(&cm.device_lock, flags);
421 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
422 be16_to_cpu(path->pkey), &av->pkey_index);
427 ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
429 av->timeout = path->packet_life_time + 1;
431 spin_lock_irqsave(&cm.lock, flags);
432 if (&cm_id_priv->av == av)
433 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
434 else if (&cm_id_priv->alt_av == av)
435 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
439 spin_unlock_irqrestore(&cm.lock, flags);
444 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
449 idr_preload(GFP_KERNEL);
450 spin_lock_irqsave(&cm.lock, flags);
452 id = idr_alloc_cyclic(&cm.local_id_table, cm_id_priv, 0, 0, GFP_NOWAIT);
454 spin_unlock_irqrestore(&cm.lock, flags);
457 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
458 return id < 0 ? id : 0;
461 static void cm_free_id(__be32 local_id)
463 spin_lock_irq(&cm.lock);
464 idr_remove(&cm.local_id_table,
465 (__force int) (local_id ^ cm.random_id_operand));
466 spin_unlock_irq(&cm.lock);
469 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
471 struct cm_id_private *cm_id_priv;
473 cm_id_priv = idr_find(&cm.local_id_table,
474 (__force int) (local_id ^ cm.random_id_operand));
476 if (cm_id_priv->id.remote_id == remote_id)
477 atomic_inc(&cm_id_priv->refcount);
485 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
487 struct cm_id_private *cm_id_priv;
489 spin_lock_irq(&cm.lock);
490 cm_id_priv = cm_get_id(local_id, remote_id);
491 spin_unlock_irq(&cm.lock);
497 * Trivial helpers to strip endian annotation and compare; the
498 * endianness doesn't actually matter since we just need a stable
499 * order for the RB tree.
501 static int be32_lt(__be32 a, __be32 b)
503 return (__force u32) a < (__force u32) b;
506 static int be32_gt(__be32 a, __be32 b)
508 return (__force u32) a > (__force u32) b;
511 static int be64_lt(__be64 a, __be64 b)
513 return (__force u64) a < (__force u64) b;
516 static int be64_gt(__be64 a, __be64 b)
518 return (__force u64) a > (__force u64) b;
521 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
523 struct rb_node **link = &cm.listen_service_table.rb_node;
524 struct rb_node *parent = NULL;
525 struct cm_id_private *cur_cm_id_priv;
526 __be64 service_id = cm_id_priv->id.service_id;
527 __be64 service_mask = cm_id_priv->id.service_mask;
531 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
533 if ((cur_cm_id_priv->id.service_mask & service_id) ==
534 (service_mask & cur_cm_id_priv->id.service_id) &&
535 (cm_id_priv->id.device == cur_cm_id_priv->id.device))
536 return cur_cm_id_priv;
538 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
539 link = &(*link)->rb_left;
540 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
541 link = &(*link)->rb_right;
542 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
543 link = &(*link)->rb_left;
544 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
545 link = &(*link)->rb_right;
547 link = &(*link)->rb_right;
549 rb_link_node(&cm_id_priv->service_node, parent, link);
550 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
554 static struct cm_id_private * cm_find_listen(struct ib_device *device,
557 struct rb_node *node = cm.listen_service_table.rb_node;
558 struct cm_id_private *cm_id_priv;
561 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
562 if ((cm_id_priv->id.service_mask & service_id) ==
563 cm_id_priv->id.service_id &&
564 (cm_id_priv->id.device == device))
567 if (device < cm_id_priv->id.device)
568 node = node->rb_left;
569 else if (device > cm_id_priv->id.device)
570 node = node->rb_right;
571 else if (be64_lt(service_id, cm_id_priv->id.service_id))
572 node = node->rb_left;
573 else if (be64_gt(service_id, cm_id_priv->id.service_id))
574 node = node->rb_right;
576 node = node->rb_right;
581 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
584 struct rb_node **link = &cm.remote_id_table.rb_node;
585 struct rb_node *parent = NULL;
586 struct cm_timewait_info *cur_timewait_info;
587 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
588 __be32 remote_id = timewait_info->work.remote_id;
592 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
594 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
595 link = &(*link)->rb_left;
596 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
597 link = &(*link)->rb_right;
598 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
599 link = &(*link)->rb_left;
600 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
601 link = &(*link)->rb_right;
603 return cur_timewait_info;
605 timewait_info->inserted_remote_id = 1;
606 rb_link_node(&timewait_info->remote_id_node, parent, link);
607 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
611 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
614 struct rb_node *node = cm.remote_id_table.rb_node;
615 struct cm_timewait_info *timewait_info;
618 timewait_info = rb_entry(node, struct cm_timewait_info,
620 if (be32_lt(remote_id, timewait_info->work.remote_id))
621 node = node->rb_left;
622 else if (be32_gt(remote_id, timewait_info->work.remote_id))
623 node = node->rb_right;
624 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
625 node = node->rb_left;
626 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
627 node = node->rb_right;
629 return timewait_info;
634 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
637 struct rb_node **link = &cm.remote_qp_table.rb_node;
638 struct rb_node *parent = NULL;
639 struct cm_timewait_info *cur_timewait_info;
640 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
641 __be32 remote_qpn = timewait_info->remote_qpn;
645 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
647 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
648 link = &(*link)->rb_left;
649 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
650 link = &(*link)->rb_right;
651 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
652 link = &(*link)->rb_left;
653 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
654 link = &(*link)->rb_right;
656 return cur_timewait_info;
658 timewait_info->inserted_remote_qp = 1;
659 rb_link_node(&timewait_info->remote_qp_node, parent, link);
660 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
664 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
667 struct rb_node **link = &cm.remote_sidr_table.rb_node;
668 struct rb_node *parent = NULL;
669 struct cm_id_private *cur_cm_id_priv;
670 union ib_gid *port_gid = &cm_id_priv->av.dgid;
671 __be32 remote_id = cm_id_priv->id.remote_id;
675 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
677 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
678 link = &(*link)->rb_left;
679 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
680 link = &(*link)->rb_right;
683 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
686 link = &(*link)->rb_left;
688 link = &(*link)->rb_right;
690 return cur_cm_id_priv;
693 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
694 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
698 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
699 enum ib_cm_sidr_status status)
701 struct ib_cm_sidr_rep_param param;
703 memset(¶m, 0, sizeof param);
704 param.status = status;
705 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m);
708 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
709 ib_cm_handler cm_handler,
712 struct cm_id_private *cm_id_priv;
715 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
717 return ERR_PTR(-ENOMEM);
719 cm_id_priv->id.state = IB_CM_IDLE;
720 cm_id_priv->id.device = device;
721 cm_id_priv->id.cm_handler = cm_handler;
722 cm_id_priv->id.context = context;
723 cm_id_priv->id.remote_cm_qpn = 1;
724 ret = cm_alloc_id(cm_id_priv);
728 spin_lock_init(&cm_id_priv->lock);
729 init_completion(&cm_id_priv->comp);
730 INIT_LIST_HEAD(&cm_id_priv->work_list);
731 INIT_LIST_HEAD(&cm_id_priv->prim_list);
732 INIT_LIST_HEAD(&cm_id_priv->altr_list);
733 atomic_set(&cm_id_priv->work_count, -1);
734 atomic_set(&cm_id_priv->refcount, 1);
735 return &cm_id_priv->id;
739 return ERR_PTR(-ENOMEM);
741 EXPORT_SYMBOL(ib_create_cm_id);
743 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
745 struct cm_work *work;
747 if (list_empty(&cm_id_priv->work_list))
750 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
751 list_del(&work->list);
755 static void cm_free_work(struct cm_work *work)
757 if (work->mad_recv_wc)
758 ib_free_recv_mad(work->mad_recv_wc);
762 static inline int cm_convert_to_ms(int iba_time)
764 /* approximate conversion to ms from 4.096us x 2^iba_time */
765 return 1 << max(iba_time - 8, 0);
769 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
770 * Because of how ack_timeout is stored, adding one doubles the timeout.
771 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
772 * increment it (round up) only if the other is within 50%.
774 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
776 int ack_timeout = packet_life_time + 1;
778 if (ack_timeout >= ca_ack_delay)
779 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
781 ack_timeout = ca_ack_delay +
782 (ack_timeout >= (ca_ack_delay - 1));
784 return min(31, ack_timeout);
787 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
789 if (timewait_info->inserted_remote_id) {
790 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
791 timewait_info->inserted_remote_id = 0;
794 if (timewait_info->inserted_remote_qp) {
795 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
796 timewait_info->inserted_remote_qp = 0;
800 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
802 struct cm_timewait_info *timewait_info;
804 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
806 return ERR_PTR(-ENOMEM);
808 timewait_info->work.local_id = local_id;
809 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
810 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
811 return timewait_info;
814 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
818 struct cm_device *cm_dev;
820 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
824 spin_lock_irqsave(&cm.lock, flags);
825 cm_cleanup_timewait(cm_id_priv->timewait_info);
826 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
827 spin_unlock_irqrestore(&cm.lock, flags);
830 * The cm_id could be destroyed by the user before we exit timewait.
831 * To protect against this, we search for the cm_id after exiting
832 * timewait before notifying the user that we've exited timewait.
834 cm_id_priv->id.state = IB_CM_TIMEWAIT;
835 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
837 /* Check if the device started its remove_one */
838 spin_lock_irqsave(&cm.lock, flags);
839 if (!cm_dev->going_down)
840 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
841 msecs_to_jiffies(wait_time));
842 spin_unlock_irqrestore(&cm.lock, flags);
844 cm_id_priv->timewait_info = NULL;
847 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
851 cm_id_priv->id.state = IB_CM_IDLE;
852 if (cm_id_priv->timewait_info) {
853 spin_lock_irqsave(&cm.lock, flags);
854 cm_cleanup_timewait(cm_id_priv->timewait_info);
855 spin_unlock_irqrestore(&cm.lock, flags);
856 kfree(cm_id_priv->timewait_info);
857 cm_id_priv->timewait_info = NULL;
861 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
863 struct cm_id_private *cm_id_priv;
864 struct cm_work *work;
866 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
868 spin_lock_irq(&cm_id_priv->lock);
869 switch (cm_id->state) {
871 spin_unlock_irq(&cm_id_priv->lock);
873 spin_lock_irq(&cm.lock);
874 if (--cm_id_priv->listen_sharecount > 0) {
875 /* The id is still shared. */
876 cm_deref_id(cm_id_priv);
877 spin_unlock_irq(&cm.lock);
880 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
881 spin_unlock_irq(&cm.lock);
883 case IB_CM_SIDR_REQ_SENT:
884 cm_id->state = IB_CM_IDLE;
885 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
886 spin_unlock_irq(&cm_id_priv->lock);
888 case IB_CM_SIDR_REQ_RCVD:
889 spin_unlock_irq(&cm_id_priv->lock);
890 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
891 spin_lock_irq(&cm.lock);
892 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
893 rb_erase(&cm_id_priv->sidr_id_node,
894 &cm.remote_sidr_table);
895 spin_unlock_irq(&cm.lock);
898 case IB_CM_MRA_REQ_RCVD:
899 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
900 spin_unlock_irq(&cm_id_priv->lock);
901 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
902 &cm_id_priv->id.device->node_guid,
903 sizeof cm_id_priv->id.device->node_guid,
907 if (err == -ENOMEM) {
908 /* Do not reject to allow future retries. */
909 cm_reset_to_idle(cm_id_priv);
910 spin_unlock_irq(&cm_id_priv->lock);
912 spin_unlock_irq(&cm_id_priv->lock);
913 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
918 case IB_CM_MRA_REP_RCVD:
919 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
921 case IB_CM_MRA_REQ_SENT:
923 case IB_CM_MRA_REP_SENT:
924 spin_unlock_irq(&cm_id_priv->lock);
925 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
928 case IB_CM_ESTABLISHED:
929 spin_unlock_irq(&cm_id_priv->lock);
930 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
932 ib_send_cm_dreq(cm_id, NULL, 0);
934 case IB_CM_DREQ_SENT:
935 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
936 cm_enter_timewait(cm_id_priv);
937 spin_unlock_irq(&cm_id_priv->lock);
939 case IB_CM_DREQ_RCVD:
940 spin_unlock_irq(&cm_id_priv->lock);
941 ib_send_cm_drep(cm_id, NULL, 0);
944 spin_unlock_irq(&cm_id_priv->lock);
948 spin_lock_irq(&cm.lock);
949 if (!list_empty(&cm_id_priv->altr_list) &&
950 (!cm_id_priv->altr_send_port_not_ready))
951 list_del(&cm_id_priv->altr_list);
952 if (!list_empty(&cm_id_priv->prim_list) &&
953 (!cm_id_priv->prim_send_port_not_ready))
954 list_del(&cm_id_priv->prim_list);
955 spin_unlock_irq(&cm.lock);
957 cm_free_id(cm_id->local_id);
958 cm_deref_id(cm_id_priv);
959 wait_for_completion(&cm_id_priv->comp);
960 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
962 kfree(cm_id_priv->private_data);
966 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
968 cm_destroy_id(cm_id, 0);
970 EXPORT_SYMBOL(ib_destroy_cm_id);
973 * __ib_cm_listen - Initiates listening on the specified service ID for
974 * connection and service ID resolution requests.
975 * @cm_id: Connection identifier associated with the listen request.
976 * @service_id: Service identifier matched against incoming connection
977 * and service ID resolution requests. The service ID should be specified
978 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
979 * assign a service ID to the caller.
980 * @service_mask: Mask applied to service ID used to listen across a
981 * range of service IDs. If set to 0, the service ID is matched
982 * exactly. This parameter is ignored if %service_id is set to
983 * IB_CM_ASSIGN_SERVICE_ID.
985 static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
988 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
991 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
992 service_id &= service_mask;
993 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
994 (service_id != IB_CM_ASSIGN_SERVICE_ID))
997 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
998 if (cm_id->state != IB_CM_IDLE)
1001 cm_id->state = IB_CM_LISTEN;
1002 ++cm_id_priv->listen_sharecount;
1004 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1005 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
1006 cm_id->service_mask = ~cpu_to_be64(0);
1008 cm_id->service_id = service_id;
1009 cm_id->service_mask = service_mask;
1011 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
1013 if (cur_cm_id_priv) {
1014 cm_id->state = IB_CM_IDLE;
1015 --cm_id_priv->listen_sharecount;
1021 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
1023 unsigned long flags;
1026 spin_lock_irqsave(&cm.lock, flags);
1027 ret = __ib_cm_listen(cm_id, service_id, service_mask);
1028 spin_unlock_irqrestore(&cm.lock, flags);
1032 EXPORT_SYMBOL(ib_cm_listen);
1035 * Create a new listening ib_cm_id and listen on the given service ID.
1037 * If there's an existing ID listening on that same device and service ID,
1040 * @device: Device associated with the cm_id. All related communication will
1041 * be associated with the specified device.
1042 * @cm_handler: Callback invoked to notify the user of CM events.
1043 * @service_id: Service identifier matched against incoming connection
1044 * and service ID resolution requests. The service ID should be specified
1045 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1046 * assign a service ID to the caller.
1048 * Callers should call ib_destroy_cm_id when done with the listener ID.
1050 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1051 ib_cm_handler cm_handler,
1054 struct cm_id_private *cm_id_priv;
1055 struct ib_cm_id *cm_id;
1056 unsigned long flags;
1059 /* Create an ID in advance, since the creation may sleep */
1060 cm_id = ib_create_cm_id(device, cm_handler, NULL);
1064 spin_lock_irqsave(&cm.lock, flags);
1066 if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1069 /* Find an existing ID */
1070 cm_id_priv = cm_find_listen(device, service_id);
1072 if (cm_id->cm_handler != cm_handler || cm_id->context) {
1073 /* Sharing an ib_cm_id with different handlers is not
1075 spin_unlock_irqrestore(&cm.lock, flags);
1076 return ERR_PTR(-EINVAL);
1078 atomic_inc(&cm_id_priv->refcount);
1079 ++cm_id_priv->listen_sharecount;
1080 spin_unlock_irqrestore(&cm.lock, flags);
1082 ib_destroy_cm_id(cm_id);
1083 cm_id = &cm_id_priv->id;
1088 /* Use newly created ID */
1089 err = __ib_cm_listen(cm_id, service_id, 0);
1091 spin_unlock_irqrestore(&cm.lock, flags);
1094 ib_destroy_cm_id(cm_id);
1095 return ERR_PTR(err);
1099 EXPORT_SYMBOL(ib_cm_insert_listen);
1101 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
1102 enum cm_msg_sequence msg_seq)
1104 u64 hi_tid, low_tid;
1106 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1107 low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
1109 return cpu_to_be64(hi_tid | low_tid);
1112 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1113 __be16 attr_id, __be64 tid)
1115 hdr->base_version = IB_MGMT_BASE_VERSION;
1116 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1117 hdr->class_version = IB_CM_CLASS_VERSION;
1118 hdr->method = IB_MGMT_METHOD_SEND;
1119 hdr->attr_id = attr_id;
1123 static void cm_format_req(struct cm_req_msg *req_msg,
1124 struct cm_id_private *cm_id_priv,
1125 struct ib_cm_req_param *param)
1127 struct ib_sa_path_rec *pri_path = param->primary_path;
1128 struct ib_sa_path_rec *alt_path = param->alternate_path;
1130 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1131 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
1133 req_msg->local_comm_id = cm_id_priv->id.local_id;
1134 req_msg->service_id = param->service_id;
1135 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1136 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1137 cm_req_set_init_depth(req_msg, param->initiator_depth);
1138 cm_req_set_remote_resp_timeout(req_msg,
1139 param->remote_cm_response_timeout);
1140 cm_req_set_qp_type(req_msg, param->qp_type);
1141 cm_req_set_flow_ctrl(req_msg, param->flow_control);
1142 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1143 cm_req_set_local_resp_timeout(req_msg,
1144 param->local_cm_response_timeout);
1145 req_msg->pkey = param->primary_path->pkey;
1146 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1147 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1149 if (param->qp_type != IB_QPT_XRC_INI) {
1150 cm_req_set_resp_res(req_msg, param->responder_resources);
1151 cm_req_set_retry_count(req_msg, param->retry_count);
1152 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1153 cm_req_set_srq(req_msg, param->srq);
1156 if (pri_path->hop_limit <= 1) {
1157 req_msg->primary_local_lid = pri_path->slid;
1158 req_msg->primary_remote_lid = pri_path->dlid;
1160 /* Work-around until there's a way to obtain remote LID info */
1161 req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1162 req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1164 req_msg->primary_local_gid = pri_path->sgid;
1165 req_msg->primary_remote_gid = pri_path->dgid;
1166 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1167 cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1168 req_msg->primary_traffic_class = pri_path->traffic_class;
1169 req_msg->primary_hop_limit = pri_path->hop_limit;
1170 cm_req_set_primary_sl(req_msg, pri_path->sl);
1171 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1172 cm_req_set_primary_local_ack_timeout(req_msg,
1173 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1174 pri_path->packet_life_time));
1177 if (alt_path->hop_limit <= 1) {
1178 req_msg->alt_local_lid = alt_path->slid;
1179 req_msg->alt_remote_lid = alt_path->dlid;
1181 req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1182 req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1184 req_msg->alt_local_gid = alt_path->sgid;
1185 req_msg->alt_remote_gid = alt_path->dgid;
1186 cm_req_set_alt_flow_label(req_msg,
1187 alt_path->flow_label);
1188 cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1189 req_msg->alt_traffic_class = alt_path->traffic_class;
1190 req_msg->alt_hop_limit = alt_path->hop_limit;
1191 cm_req_set_alt_sl(req_msg, alt_path->sl);
1192 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1193 cm_req_set_alt_local_ack_timeout(req_msg,
1194 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1195 alt_path->packet_life_time));
1198 if (param->private_data && param->private_data_len)
1199 memcpy(req_msg->private_data, param->private_data,
1200 param->private_data_len);
1203 static int cm_validate_req_param(struct ib_cm_req_param *param)
1205 /* peer-to-peer not supported */
1206 if (param->peer_to_peer)
1209 if (!param->primary_path)
1212 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1213 param->qp_type != IB_QPT_XRC_INI)
1216 if (param->private_data &&
1217 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1220 if (param->alternate_path &&
1221 (param->alternate_path->pkey != param->primary_path->pkey ||
1222 param->alternate_path->mtu != param->primary_path->mtu))
1228 int ib_send_cm_req(struct ib_cm_id *cm_id,
1229 struct ib_cm_req_param *param)
1231 struct cm_id_private *cm_id_priv;
1232 struct cm_req_msg *req_msg;
1233 unsigned long flags;
1236 ret = cm_validate_req_param(param);
1240 /* Verify that we're not in timewait. */
1241 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1242 spin_lock_irqsave(&cm_id_priv->lock, flags);
1243 if (cm_id->state != IB_CM_IDLE) {
1244 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1248 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1250 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1252 if (IS_ERR(cm_id_priv->timewait_info)) {
1253 ret = PTR_ERR(cm_id_priv->timewait_info);
1257 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av,
1261 if (param->alternate_path) {
1262 ret = cm_init_av_by_path(param->alternate_path,
1263 &cm_id_priv->alt_av, cm_id_priv);
1267 cm_id->service_id = param->service_id;
1268 cm_id->service_mask = ~cpu_to_be64(0);
1269 cm_id_priv->timeout_ms = cm_convert_to_ms(
1270 param->primary_path->packet_life_time) * 2 +
1272 param->remote_cm_response_timeout);
1273 cm_id_priv->max_cm_retries = param->max_cm_retries;
1274 cm_id_priv->initiator_depth = param->initiator_depth;
1275 cm_id_priv->responder_resources = param->responder_resources;
1276 cm_id_priv->retry_count = param->retry_count;
1277 cm_id_priv->path_mtu = param->primary_path->mtu;
1278 cm_id_priv->pkey = param->primary_path->pkey;
1279 cm_id_priv->qp_type = param->qp_type;
1281 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1285 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1286 cm_format_req(req_msg, cm_id_priv, param);
1287 cm_id_priv->tid = req_msg->hdr.tid;
1288 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1289 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1291 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1292 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1294 spin_lock_irqsave(&cm_id_priv->lock, flags);
1295 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1297 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1300 BUG_ON(cm_id->state != IB_CM_IDLE);
1301 cm_id->state = IB_CM_REQ_SENT;
1302 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1305 error2: cm_free_msg(cm_id_priv->msg);
1306 error1: kfree(cm_id_priv->timewait_info);
1309 EXPORT_SYMBOL(ib_send_cm_req);
1311 static int cm_issue_rej(struct cm_port *port,
1312 struct ib_mad_recv_wc *mad_recv_wc,
1313 enum ib_cm_rej_reason reason,
1314 enum cm_msg_response msg_rejected,
1315 void *ari, u8 ari_length)
1317 struct ib_mad_send_buf *msg = NULL;
1318 struct cm_rej_msg *rej_msg, *rcv_msg;
1321 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1325 /* We just need common CM header information. Cast to any message. */
1326 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1327 rej_msg = (struct cm_rej_msg *) msg->mad;
1329 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1330 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1331 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1332 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1333 rej_msg->reason = cpu_to_be16(reason);
1335 if (ari && ari_length) {
1336 cm_rej_set_reject_info_len(rej_msg, ari_length);
1337 memcpy(rej_msg->ari, ari, ari_length);
1340 ret = ib_post_send_mad(msg, NULL);
1347 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1348 __be32 local_qpn, __be32 remote_qpn)
1350 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1351 ((local_ca_guid == remote_ca_guid) &&
1352 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1355 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1356 struct ib_sa_path_rec *primary_path,
1357 struct ib_sa_path_rec *alt_path)
1359 memset(primary_path, 0, sizeof *primary_path);
1360 primary_path->dgid = req_msg->primary_local_gid;
1361 primary_path->sgid = req_msg->primary_remote_gid;
1362 primary_path->dlid = req_msg->primary_local_lid;
1363 primary_path->slid = req_msg->primary_remote_lid;
1364 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1365 primary_path->hop_limit = req_msg->primary_hop_limit;
1366 primary_path->traffic_class = req_msg->primary_traffic_class;
1367 primary_path->reversible = 1;
1368 primary_path->pkey = req_msg->pkey;
1369 primary_path->sl = cm_req_get_primary_sl(req_msg);
1370 primary_path->mtu_selector = IB_SA_EQ;
1371 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1372 primary_path->rate_selector = IB_SA_EQ;
1373 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1374 primary_path->packet_life_time_selector = IB_SA_EQ;
1375 primary_path->packet_life_time =
1376 cm_req_get_primary_local_ack_timeout(req_msg);
1377 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1378 primary_path->service_id = req_msg->service_id;
1380 if (req_msg->alt_local_lid) {
1381 memset(alt_path, 0, sizeof *alt_path);
1382 alt_path->dgid = req_msg->alt_local_gid;
1383 alt_path->sgid = req_msg->alt_remote_gid;
1384 alt_path->dlid = req_msg->alt_local_lid;
1385 alt_path->slid = req_msg->alt_remote_lid;
1386 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1387 alt_path->hop_limit = req_msg->alt_hop_limit;
1388 alt_path->traffic_class = req_msg->alt_traffic_class;
1389 alt_path->reversible = 1;
1390 alt_path->pkey = req_msg->pkey;
1391 alt_path->sl = cm_req_get_alt_sl(req_msg);
1392 alt_path->mtu_selector = IB_SA_EQ;
1393 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1394 alt_path->rate_selector = IB_SA_EQ;
1395 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1396 alt_path->packet_life_time_selector = IB_SA_EQ;
1397 alt_path->packet_life_time =
1398 cm_req_get_alt_local_ack_timeout(req_msg);
1399 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1400 alt_path->service_id = req_msg->service_id;
1404 static u16 cm_get_bth_pkey(struct cm_work *work)
1406 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1407 u8 port_num = work->port->port_num;
1408 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1412 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1414 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1415 port_num, pkey_index, ret);
1422 static void cm_format_req_event(struct cm_work *work,
1423 struct cm_id_private *cm_id_priv,
1424 struct ib_cm_id *listen_id)
1426 struct cm_req_msg *req_msg;
1427 struct ib_cm_req_event_param *param;
1429 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1430 param = &work->cm_event.param.req_rcvd;
1431 param->listen_id = listen_id;
1432 param->bth_pkey = cm_get_bth_pkey(work);
1433 param->port = cm_id_priv->av.port->port_num;
1434 param->primary_path = &work->path[0];
1435 if (req_msg->alt_local_lid)
1436 param->alternate_path = &work->path[1];
1438 param->alternate_path = NULL;
1439 param->remote_ca_guid = req_msg->local_ca_guid;
1440 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1441 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1442 param->qp_type = cm_req_get_qp_type(req_msg);
1443 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1444 param->responder_resources = cm_req_get_init_depth(req_msg);
1445 param->initiator_depth = cm_req_get_resp_res(req_msg);
1446 param->local_cm_response_timeout =
1447 cm_req_get_remote_resp_timeout(req_msg);
1448 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1449 param->remote_cm_response_timeout =
1450 cm_req_get_local_resp_timeout(req_msg);
1451 param->retry_count = cm_req_get_retry_count(req_msg);
1452 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1453 param->srq = cm_req_get_srq(req_msg);
1454 work->cm_event.private_data = &req_msg->private_data;
1457 static void cm_process_work(struct cm_id_private *cm_id_priv,
1458 struct cm_work *work)
1462 /* We will typically only have the current event to report. */
1463 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1466 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1467 spin_lock_irq(&cm_id_priv->lock);
1468 work = cm_dequeue_work(cm_id_priv);
1469 spin_unlock_irq(&cm_id_priv->lock);
1471 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1475 cm_deref_id(cm_id_priv);
1477 cm_destroy_id(&cm_id_priv->id, ret);
1480 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1481 struct cm_id_private *cm_id_priv,
1482 enum cm_msg_response msg_mraed, u8 service_timeout,
1483 const void *private_data, u8 private_data_len)
1485 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1486 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1487 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1488 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1489 cm_mra_set_service_timeout(mra_msg, service_timeout);
1491 if (private_data && private_data_len)
1492 memcpy(mra_msg->private_data, private_data, private_data_len);
1495 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1496 struct cm_id_private *cm_id_priv,
1497 enum ib_cm_rej_reason reason,
1500 const void *private_data,
1501 u8 private_data_len)
1503 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1504 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1506 switch(cm_id_priv->id.state) {
1507 case IB_CM_REQ_RCVD:
1508 rej_msg->local_comm_id = 0;
1509 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1511 case IB_CM_MRA_REQ_SENT:
1512 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1513 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1515 case IB_CM_REP_RCVD:
1516 case IB_CM_MRA_REP_SENT:
1517 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1518 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1521 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1522 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1526 rej_msg->reason = cpu_to_be16(reason);
1527 if (ari && ari_length) {
1528 cm_rej_set_reject_info_len(rej_msg, ari_length);
1529 memcpy(rej_msg->ari, ari, ari_length);
1532 if (private_data && private_data_len)
1533 memcpy(rej_msg->private_data, private_data, private_data_len);
1536 static void cm_dup_req_handler(struct cm_work *work,
1537 struct cm_id_private *cm_id_priv)
1539 struct ib_mad_send_buf *msg = NULL;
1542 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1543 counter[CM_REQ_COUNTER]);
1545 /* Quick state check to discard duplicate REQs. */
1546 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1549 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1553 spin_lock_irq(&cm_id_priv->lock);
1554 switch (cm_id_priv->id.state) {
1555 case IB_CM_MRA_REQ_SENT:
1556 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1557 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1558 cm_id_priv->private_data,
1559 cm_id_priv->private_data_len);
1561 case IB_CM_TIMEWAIT:
1562 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1563 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1568 spin_unlock_irq(&cm_id_priv->lock);
1570 ret = ib_post_send_mad(msg, NULL);
1575 unlock: spin_unlock_irq(&cm_id_priv->lock);
1576 free: cm_free_msg(msg);
1579 static struct cm_id_private * cm_match_req(struct cm_work *work,
1580 struct cm_id_private *cm_id_priv)
1582 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1583 struct cm_timewait_info *timewait_info;
1584 struct cm_req_msg *req_msg;
1586 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1588 /* Check for possible duplicate REQ. */
1589 spin_lock_irq(&cm.lock);
1590 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1591 if (timewait_info) {
1592 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1593 timewait_info->work.remote_id);
1594 spin_unlock_irq(&cm.lock);
1595 if (cur_cm_id_priv) {
1596 cm_dup_req_handler(work, cur_cm_id_priv);
1597 cm_deref_id(cur_cm_id_priv);
1602 /* Check for stale connections. */
1603 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1604 if (timewait_info) {
1605 cm_cleanup_timewait(cm_id_priv->timewait_info);
1606 spin_unlock_irq(&cm.lock);
1607 cm_issue_rej(work->port, work->mad_recv_wc,
1608 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1613 /* Find matching listen request. */
1614 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1615 req_msg->service_id);
1616 if (!listen_cm_id_priv) {
1617 cm_cleanup_timewait(cm_id_priv->timewait_info);
1618 spin_unlock_irq(&cm.lock);
1619 cm_issue_rej(work->port, work->mad_recv_wc,
1620 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1624 atomic_inc(&listen_cm_id_priv->refcount);
1625 atomic_inc(&cm_id_priv->refcount);
1626 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1627 atomic_inc(&cm_id_priv->work_count);
1628 spin_unlock_irq(&cm.lock);
1630 return listen_cm_id_priv;
1634 * Work-around for inter-subnet connections. If the LIDs are permissive,
1635 * we need to override the LID/SL data in the REQ with the LID information
1636 * in the work completion.
1638 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1640 if (!cm_req_get_primary_subnet_local(req_msg)) {
1641 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1642 req_msg->primary_local_lid = cpu_to_be16(wc->slid);
1643 cm_req_set_primary_sl(req_msg, wc->sl);
1646 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1647 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1650 if (!cm_req_get_alt_subnet_local(req_msg)) {
1651 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1652 req_msg->alt_local_lid = cpu_to_be16(wc->slid);
1653 cm_req_set_alt_sl(req_msg, wc->sl);
1656 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1657 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1661 static int cm_req_handler(struct cm_work *work)
1663 struct ib_cm_id *cm_id;
1664 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1665 struct cm_req_msg *req_msg;
1668 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1670 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1672 return PTR_ERR(cm_id);
1674 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1675 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1676 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1677 work->mad_recv_wc->recv_buf.grh,
1679 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1681 if (IS_ERR(cm_id_priv->timewait_info)) {
1682 ret = PTR_ERR(cm_id_priv->timewait_info);
1685 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1686 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1687 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1689 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1690 if (!listen_cm_id_priv) {
1692 kfree(cm_id_priv->timewait_info);
1696 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1697 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1698 cm_id_priv->id.service_id = req_msg->service_id;
1699 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1701 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1702 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1704 memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN);
1705 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av,
1708 ib_get_cached_gid(work->port->cm_dev->ib_device,
1709 work->port->port_num, 0, &work->path[0].sgid,
1711 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1712 &work->path[0].sgid, sizeof work->path[0].sgid,
1716 if (req_msg->alt_local_lid) {
1717 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av,
1720 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1721 &work->path[0].sgid,
1722 sizeof work->path[0].sgid, NULL, 0);
1726 cm_id_priv->tid = req_msg->hdr.tid;
1727 cm_id_priv->timeout_ms = cm_convert_to_ms(
1728 cm_req_get_local_resp_timeout(req_msg));
1729 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1730 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1731 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1732 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1733 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1734 cm_id_priv->pkey = req_msg->pkey;
1735 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1736 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1737 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1738 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1740 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1741 cm_process_work(cm_id_priv, work);
1742 cm_deref_id(listen_cm_id_priv);
1746 atomic_dec(&cm_id_priv->refcount);
1747 cm_deref_id(listen_cm_id_priv);
1749 ib_destroy_cm_id(cm_id);
1753 static void cm_format_rep(struct cm_rep_msg *rep_msg,
1754 struct cm_id_private *cm_id_priv,
1755 struct ib_cm_rep_param *param)
1757 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1758 rep_msg->local_comm_id = cm_id_priv->id.local_id;
1759 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1760 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1761 rep_msg->resp_resources = param->responder_resources;
1762 cm_rep_set_target_ack_delay(rep_msg,
1763 cm_id_priv->av.port->cm_dev->ack_delay);
1764 cm_rep_set_failover(rep_msg, param->failover_accepted);
1765 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1766 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1768 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
1769 rep_msg->initiator_depth = param->initiator_depth;
1770 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1771 cm_rep_set_srq(rep_msg, param->srq);
1772 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1774 cm_rep_set_srq(rep_msg, 1);
1775 cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
1778 if (param->private_data && param->private_data_len)
1779 memcpy(rep_msg->private_data, param->private_data,
1780 param->private_data_len);
1783 int ib_send_cm_rep(struct ib_cm_id *cm_id,
1784 struct ib_cm_rep_param *param)
1786 struct cm_id_private *cm_id_priv;
1787 struct ib_mad_send_buf *msg;
1788 struct cm_rep_msg *rep_msg;
1789 unsigned long flags;
1792 if (param->private_data &&
1793 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1796 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1797 spin_lock_irqsave(&cm_id_priv->lock, flags);
1798 if (cm_id->state != IB_CM_REQ_RCVD &&
1799 cm_id->state != IB_CM_MRA_REQ_SENT) {
1804 ret = cm_alloc_msg(cm_id_priv, &msg);
1808 rep_msg = (struct cm_rep_msg *) msg->mad;
1809 cm_format_rep(rep_msg, cm_id_priv, param);
1810 msg->timeout_ms = cm_id_priv->timeout_ms;
1811 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1813 ret = ib_post_send_mad(msg, NULL);
1815 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1820 cm_id->state = IB_CM_REP_SENT;
1821 cm_id_priv->msg = msg;
1822 cm_id_priv->initiator_depth = param->initiator_depth;
1823 cm_id_priv->responder_resources = param->responder_resources;
1824 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1825 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
1827 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1830 EXPORT_SYMBOL(ib_send_cm_rep);
1832 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1833 struct cm_id_private *cm_id_priv,
1834 const void *private_data,
1835 u8 private_data_len)
1837 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1838 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1839 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1841 if (private_data && private_data_len)
1842 memcpy(rtu_msg->private_data, private_data, private_data_len);
1845 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1846 const void *private_data,
1847 u8 private_data_len)
1849 struct cm_id_private *cm_id_priv;
1850 struct ib_mad_send_buf *msg;
1851 unsigned long flags;
1855 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1858 data = cm_copy_private_data(private_data, private_data_len);
1860 return PTR_ERR(data);
1862 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1863 spin_lock_irqsave(&cm_id_priv->lock, flags);
1864 if (cm_id->state != IB_CM_REP_RCVD &&
1865 cm_id->state != IB_CM_MRA_REP_SENT) {
1870 ret = cm_alloc_msg(cm_id_priv, &msg);
1874 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1875 private_data, private_data_len);
1877 ret = ib_post_send_mad(msg, NULL);
1879 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1885 cm_id->state = IB_CM_ESTABLISHED;
1886 cm_set_private_data(cm_id_priv, data, private_data_len);
1887 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1890 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1894 EXPORT_SYMBOL(ib_send_cm_rtu);
1896 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
1898 struct cm_rep_msg *rep_msg;
1899 struct ib_cm_rep_event_param *param;
1901 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1902 param = &work->cm_event.param.rep_rcvd;
1903 param->remote_ca_guid = rep_msg->local_ca_guid;
1904 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1905 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
1906 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1907 param->responder_resources = rep_msg->initiator_depth;
1908 param->initiator_depth = rep_msg->resp_resources;
1909 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1910 param->failover_accepted = cm_rep_get_failover(rep_msg);
1911 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1912 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1913 param->srq = cm_rep_get_srq(rep_msg);
1914 work->cm_event.private_data = &rep_msg->private_data;
1917 static void cm_dup_rep_handler(struct cm_work *work)
1919 struct cm_id_private *cm_id_priv;
1920 struct cm_rep_msg *rep_msg;
1921 struct ib_mad_send_buf *msg = NULL;
1924 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1925 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1926 rep_msg->local_comm_id);
1930 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1931 counter[CM_REP_COUNTER]);
1932 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1936 spin_lock_irq(&cm_id_priv->lock);
1937 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1938 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1939 cm_id_priv->private_data,
1940 cm_id_priv->private_data_len);
1941 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1942 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1943 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1944 cm_id_priv->private_data,
1945 cm_id_priv->private_data_len);
1948 spin_unlock_irq(&cm_id_priv->lock);
1950 ret = ib_post_send_mad(msg, NULL);
1955 unlock: spin_unlock_irq(&cm_id_priv->lock);
1956 free: cm_free_msg(msg);
1957 deref: cm_deref_id(cm_id_priv);
1960 static int cm_rep_handler(struct cm_work *work)
1962 struct cm_id_private *cm_id_priv;
1963 struct cm_rep_msg *rep_msg;
1966 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1967 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1969 cm_dup_rep_handler(work);
1973 cm_format_rep_event(work, cm_id_priv->qp_type);
1975 spin_lock_irq(&cm_id_priv->lock);
1976 switch (cm_id_priv->id.state) {
1977 case IB_CM_REQ_SENT:
1978 case IB_CM_MRA_REQ_RCVD:
1981 spin_unlock_irq(&cm_id_priv->lock);
1986 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1987 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1988 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
1990 spin_lock(&cm.lock);
1991 /* Check for duplicate REP. */
1992 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1993 spin_unlock(&cm.lock);
1994 spin_unlock_irq(&cm_id_priv->lock);
1998 /* Check for a stale connection. */
1999 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
2000 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
2001 &cm.remote_id_table);
2002 cm_id_priv->timewait_info->inserted_remote_id = 0;
2003 spin_unlock(&cm.lock);
2004 spin_unlock_irq(&cm_id_priv->lock);
2005 cm_issue_rej(work->port, work->mad_recv_wc,
2006 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2011 spin_unlock(&cm.lock);
2013 cm_id_priv->id.state = IB_CM_REP_RCVD;
2014 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
2015 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2016 cm_id_priv->initiator_depth = rep_msg->resp_resources;
2017 cm_id_priv->responder_resources = rep_msg->initiator_depth;
2018 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
2019 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2020 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2021 cm_id_priv->av.timeout =
2022 cm_ack_timeout(cm_id_priv->target_ack_delay,
2023 cm_id_priv->av.timeout - 1);
2024 cm_id_priv->alt_av.timeout =
2025 cm_ack_timeout(cm_id_priv->target_ack_delay,
2026 cm_id_priv->alt_av.timeout - 1);
2028 /* todo: handle peer_to_peer */
2030 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2031 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2033 list_add_tail(&work->list, &cm_id_priv->work_list);
2034 spin_unlock_irq(&cm_id_priv->lock);
2037 cm_process_work(cm_id_priv, work);
2039 cm_deref_id(cm_id_priv);
2043 cm_deref_id(cm_id_priv);
2047 static int cm_establish_handler(struct cm_work *work)
2049 struct cm_id_private *cm_id_priv;
2052 /* See comment in cm_establish about lookup. */
2053 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2057 spin_lock_irq(&cm_id_priv->lock);
2058 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2059 spin_unlock_irq(&cm_id_priv->lock);
2063 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2064 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2066 list_add_tail(&work->list, &cm_id_priv->work_list);
2067 spin_unlock_irq(&cm_id_priv->lock);
2070 cm_process_work(cm_id_priv, work);
2072 cm_deref_id(cm_id_priv);
2075 cm_deref_id(cm_id_priv);
2079 static int cm_rtu_handler(struct cm_work *work)
2081 struct cm_id_private *cm_id_priv;
2082 struct cm_rtu_msg *rtu_msg;
2085 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2086 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
2087 rtu_msg->local_comm_id);
2091 work->cm_event.private_data = &rtu_msg->private_data;
2093 spin_lock_irq(&cm_id_priv->lock);
2094 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2095 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2096 spin_unlock_irq(&cm_id_priv->lock);
2097 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2098 counter[CM_RTU_COUNTER]);
2101 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2103 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2104 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2106 list_add_tail(&work->list, &cm_id_priv->work_list);
2107 spin_unlock_irq(&cm_id_priv->lock);
2110 cm_process_work(cm_id_priv, work);
2112 cm_deref_id(cm_id_priv);
2115 cm_deref_id(cm_id_priv);
2119 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2120 struct cm_id_private *cm_id_priv,
2121 const void *private_data,
2122 u8 private_data_len)
2124 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2125 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
2126 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
2127 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
2128 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
2130 if (private_data && private_data_len)
2131 memcpy(dreq_msg->private_data, private_data, private_data_len);
2134 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
2135 const void *private_data,
2136 u8 private_data_len)
2138 struct cm_id_private *cm_id_priv;
2139 struct ib_mad_send_buf *msg;
2140 unsigned long flags;
2143 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2146 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2147 spin_lock_irqsave(&cm_id_priv->lock, flags);
2148 if (cm_id->state != IB_CM_ESTABLISHED) {
2153 if (cm_id->lap_state == IB_CM_LAP_SENT ||
2154 cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
2155 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2157 ret = cm_alloc_msg(cm_id_priv, &msg);
2159 cm_enter_timewait(cm_id_priv);
2163 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2164 private_data, private_data_len);
2165 msg->timeout_ms = cm_id_priv->timeout_ms;
2166 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2168 ret = ib_post_send_mad(msg, NULL);
2170 cm_enter_timewait(cm_id_priv);
2171 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2176 cm_id->state = IB_CM_DREQ_SENT;
2177 cm_id_priv->msg = msg;
2178 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2181 EXPORT_SYMBOL(ib_send_cm_dreq);
2183 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2184 struct cm_id_private *cm_id_priv,
2185 const void *private_data,
2186 u8 private_data_len)
2188 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2189 drep_msg->local_comm_id = cm_id_priv->id.local_id;
2190 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2192 if (private_data && private_data_len)
2193 memcpy(drep_msg->private_data, private_data, private_data_len);
2196 int ib_send_cm_drep(struct ib_cm_id *cm_id,
2197 const void *private_data,
2198 u8 private_data_len)
2200 struct cm_id_private *cm_id_priv;
2201 struct ib_mad_send_buf *msg;
2202 unsigned long flags;
2206 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2209 data = cm_copy_private_data(private_data, private_data_len);
2211 return PTR_ERR(data);
2213 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2214 spin_lock_irqsave(&cm_id_priv->lock, flags);
2215 if (cm_id->state != IB_CM_DREQ_RCVD) {
2216 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2221 cm_set_private_data(cm_id_priv, data, private_data_len);
2222 cm_enter_timewait(cm_id_priv);
2224 ret = cm_alloc_msg(cm_id_priv, &msg);
2228 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2229 private_data, private_data_len);
2231 ret = ib_post_send_mad(msg, NULL);
2233 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2238 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2241 EXPORT_SYMBOL(ib_send_cm_drep);
2243 static int cm_issue_drep(struct cm_port *port,
2244 struct ib_mad_recv_wc *mad_recv_wc)
2246 struct ib_mad_send_buf *msg = NULL;
2247 struct cm_dreq_msg *dreq_msg;
2248 struct cm_drep_msg *drep_msg;
2251 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2255 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2256 drep_msg = (struct cm_drep_msg *) msg->mad;
2258 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2259 drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2260 drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2262 ret = ib_post_send_mad(msg, NULL);
2269 static int cm_dreq_handler(struct cm_work *work)
2271 struct cm_id_private *cm_id_priv;
2272 struct cm_dreq_msg *dreq_msg;
2273 struct ib_mad_send_buf *msg = NULL;
2276 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2277 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2278 dreq_msg->local_comm_id);
2280 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2281 counter[CM_DREQ_COUNTER]);
2282 cm_issue_drep(work->port, work->mad_recv_wc);
2286 work->cm_event.private_data = &dreq_msg->private_data;
2288 spin_lock_irq(&cm_id_priv->lock);
2289 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2292 switch (cm_id_priv->id.state) {
2293 case IB_CM_REP_SENT:
2294 case IB_CM_DREQ_SENT:
2295 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2297 case IB_CM_ESTABLISHED:
2298 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2299 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2300 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2302 case IB_CM_MRA_REP_RCVD:
2304 case IB_CM_TIMEWAIT:
2305 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2306 counter[CM_DREQ_COUNTER]);
2307 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2310 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2311 cm_id_priv->private_data,
2312 cm_id_priv->private_data_len);
2313 spin_unlock_irq(&cm_id_priv->lock);
2315 if (ib_post_send_mad(msg, NULL))
2318 case IB_CM_DREQ_RCVD:
2319 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2320 counter[CM_DREQ_COUNTER]);
2325 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2326 cm_id_priv->tid = dreq_msg->hdr.tid;
2327 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2329 list_add_tail(&work->list, &cm_id_priv->work_list);
2330 spin_unlock_irq(&cm_id_priv->lock);
2333 cm_process_work(cm_id_priv, work);
2335 cm_deref_id(cm_id_priv);
2338 unlock: spin_unlock_irq(&cm_id_priv->lock);
2339 deref: cm_deref_id(cm_id_priv);
2343 static int cm_drep_handler(struct cm_work *work)
2345 struct cm_id_private *cm_id_priv;
2346 struct cm_drep_msg *drep_msg;
2349 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2350 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2351 drep_msg->local_comm_id);
2355 work->cm_event.private_data = &drep_msg->private_data;
2357 spin_lock_irq(&cm_id_priv->lock);
2358 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2359 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2360 spin_unlock_irq(&cm_id_priv->lock);
2363 cm_enter_timewait(cm_id_priv);
2365 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2366 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2368 list_add_tail(&work->list, &cm_id_priv->work_list);
2369 spin_unlock_irq(&cm_id_priv->lock);
2372 cm_process_work(cm_id_priv, work);
2374 cm_deref_id(cm_id_priv);
2377 cm_deref_id(cm_id_priv);
2381 int ib_send_cm_rej(struct ib_cm_id *cm_id,
2382 enum ib_cm_rej_reason reason,
2385 const void *private_data,
2386 u8 private_data_len)
2388 struct cm_id_private *cm_id_priv;
2389 struct ib_mad_send_buf *msg;
2390 unsigned long flags;
2393 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2394 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2397 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2399 spin_lock_irqsave(&cm_id_priv->lock, flags);
2400 switch (cm_id->state) {
2401 case IB_CM_REQ_SENT:
2402 case IB_CM_MRA_REQ_RCVD:
2403 case IB_CM_REQ_RCVD:
2404 case IB_CM_MRA_REQ_SENT:
2405 case IB_CM_REP_RCVD:
2406 case IB_CM_MRA_REP_SENT:
2407 ret = cm_alloc_msg(cm_id_priv, &msg);
2409 cm_format_rej((struct cm_rej_msg *) msg->mad,
2410 cm_id_priv, reason, ari, ari_length,
2411 private_data, private_data_len);
2413 cm_reset_to_idle(cm_id_priv);
2415 case IB_CM_REP_SENT:
2416 case IB_CM_MRA_REP_RCVD:
2417 ret = cm_alloc_msg(cm_id_priv, &msg);
2419 cm_format_rej((struct cm_rej_msg *) msg->mad,
2420 cm_id_priv, reason, ari, ari_length,
2421 private_data, private_data_len);
2423 cm_enter_timewait(cm_id_priv);
2433 ret = ib_post_send_mad(msg, NULL);
2437 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2440 EXPORT_SYMBOL(ib_send_cm_rej);
2442 static void cm_format_rej_event(struct cm_work *work)
2444 struct cm_rej_msg *rej_msg;
2445 struct ib_cm_rej_event_param *param;
2447 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2448 param = &work->cm_event.param.rej_rcvd;
2449 param->ari = rej_msg->ari;
2450 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2451 param->reason = __be16_to_cpu(rej_msg->reason);
2452 work->cm_event.private_data = &rej_msg->private_data;
2455 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2457 struct cm_timewait_info *timewait_info;
2458 struct cm_id_private *cm_id_priv;
2461 remote_id = rej_msg->local_comm_id;
2463 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2464 spin_lock_irq(&cm.lock);
2465 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2467 if (!timewait_info) {
2468 spin_unlock_irq(&cm.lock);
2471 cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2472 (timewait_info->work.local_id ^
2473 cm.random_id_operand));
2475 if (cm_id_priv->id.remote_id == remote_id)
2476 atomic_inc(&cm_id_priv->refcount);
2480 spin_unlock_irq(&cm.lock);
2481 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2482 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2484 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2489 static int cm_rej_handler(struct cm_work *work)
2491 struct cm_id_private *cm_id_priv;
2492 struct cm_rej_msg *rej_msg;
2495 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2496 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2500 cm_format_rej_event(work);
2502 spin_lock_irq(&cm_id_priv->lock);
2503 switch (cm_id_priv->id.state) {
2504 case IB_CM_REQ_SENT:
2505 case IB_CM_MRA_REQ_RCVD:
2506 case IB_CM_REP_SENT:
2507 case IB_CM_MRA_REP_RCVD:
2508 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2510 case IB_CM_REQ_RCVD:
2511 case IB_CM_MRA_REQ_SENT:
2512 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2513 cm_enter_timewait(cm_id_priv);
2515 cm_reset_to_idle(cm_id_priv);
2517 case IB_CM_DREQ_SENT:
2518 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2520 case IB_CM_REP_RCVD:
2521 case IB_CM_MRA_REP_SENT:
2522 cm_enter_timewait(cm_id_priv);
2524 case IB_CM_ESTABLISHED:
2525 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
2526 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
2527 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
2528 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2530 cm_enter_timewait(cm_id_priv);
2535 spin_unlock_irq(&cm_id_priv->lock);
2540 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2542 list_add_tail(&work->list, &cm_id_priv->work_list);
2543 spin_unlock_irq(&cm_id_priv->lock);
2546 cm_process_work(cm_id_priv, work);
2548 cm_deref_id(cm_id_priv);
2551 cm_deref_id(cm_id_priv);
2555 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2557 const void *private_data,
2558 u8 private_data_len)
2560 struct cm_id_private *cm_id_priv;
2561 struct ib_mad_send_buf *msg;
2562 enum ib_cm_state cm_state;
2563 enum ib_cm_lap_state lap_state;
2564 enum cm_msg_response msg_response;
2566 unsigned long flags;
2569 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2572 data = cm_copy_private_data(private_data, private_data_len);
2574 return PTR_ERR(data);
2576 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2578 spin_lock_irqsave(&cm_id_priv->lock, flags);
2579 switch(cm_id_priv->id.state) {
2580 case IB_CM_REQ_RCVD:
2581 cm_state = IB_CM_MRA_REQ_SENT;
2582 lap_state = cm_id->lap_state;
2583 msg_response = CM_MSG_RESPONSE_REQ;
2585 case IB_CM_REP_RCVD:
2586 cm_state = IB_CM_MRA_REP_SENT;
2587 lap_state = cm_id->lap_state;
2588 msg_response = CM_MSG_RESPONSE_REP;
2590 case IB_CM_ESTABLISHED:
2591 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
2592 cm_state = cm_id->state;
2593 lap_state = IB_CM_MRA_LAP_SENT;
2594 msg_response = CM_MSG_RESPONSE_OTHER;
2602 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2603 ret = cm_alloc_msg(cm_id_priv, &msg);
2607 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2608 msg_response, service_timeout,
2609 private_data, private_data_len);
2610 ret = ib_post_send_mad(msg, NULL);
2615 cm_id->state = cm_state;
2616 cm_id->lap_state = lap_state;
2617 cm_id_priv->service_timeout = service_timeout;
2618 cm_set_private_data(cm_id_priv, data, private_data_len);
2619 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2622 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2626 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2631 EXPORT_SYMBOL(ib_send_cm_mra);
2633 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2635 switch (cm_mra_get_msg_mraed(mra_msg)) {
2636 case CM_MSG_RESPONSE_REQ:
2637 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2638 case CM_MSG_RESPONSE_REP:
2639 case CM_MSG_RESPONSE_OTHER:
2640 return cm_acquire_id(mra_msg->remote_comm_id,
2641 mra_msg->local_comm_id);
2647 static int cm_mra_handler(struct cm_work *work)
2649 struct cm_id_private *cm_id_priv;
2650 struct cm_mra_msg *mra_msg;
2653 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2654 cm_id_priv = cm_acquire_mraed_id(mra_msg);
2658 work->cm_event.private_data = &mra_msg->private_data;
2659 work->cm_event.param.mra_rcvd.service_timeout =
2660 cm_mra_get_service_timeout(mra_msg);
2661 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2662 cm_convert_to_ms(cm_id_priv->av.timeout);
2664 spin_lock_irq(&cm_id_priv->lock);
2665 switch (cm_id_priv->id.state) {
2666 case IB_CM_REQ_SENT:
2667 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2668 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2669 cm_id_priv->msg, timeout))
2671 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2673 case IB_CM_REP_SENT:
2674 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2675 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2676 cm_id_priv->msg, timeout))
2678 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2680 case IB_CM_ESTABLISHED:
2681 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2682 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2683 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2684 cm_id_priv->msg, timeout)) {
2685 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2686 atomic_long_inc(&work->port->
2687 counter_group[CM_RECV_DUPLICATES].
2688 counter[CM_MRA_COUNTER]);
2691 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2693 case IB_CM_MRA_REQ_RCVD:
2694 case IB_CM_MRA_REP_RCVD:
2695 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2696 counter[CM_MRA_COUNTER]);
2702 cm_id_priv->msg->context[1] = (void *) (unsigned long)
2703 cm_id_priv->id.state;
2704 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2706 list_add_tail(&work->list, &cm_id_priv->work_list);
2707 spin_unlock_irq(&cm_id_priv->lock);
2710 cm_process_work(cm_id_priv, work);
2712 cm_deref_id(cm_id_priv);
2715 spin_unlock_irq(&cm_id_priv->lock);
2716 cm_deref_id(cm_id_priv);
2720 static void cm_format_lap(struct cm_lap_msg *lap_msg,
2721 struct cm_id_private *cm_id_priv,
2722 struct ib_sa_path_rec *alternate_path,
2723 const void *private_data,
2724 u8 private_data_len)
2726 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2727 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2728 lap_msg->local_comm_id = cm_id_priv->id.local_id;
2729 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2730 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2731 /* todo: need remote CM response timeout */
2732 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2733 lap_msg->alt_local_lid = alternate_path->slid;
2734 lap_msg->alt_remote_lid = alternate_path->dlid;
2735 lap_msg->alt_local_gid = alternate_path->sgid;
2736 lap_msg->alt_remote_gid = alternate_path->dgid;
2737 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2738 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2739 lap_msg->alt_hop_limit = alternate_path->hop_limit;
2740 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2741 cm_lap_set_sl(lap_msg, alternate_path->sl);
2742 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2743 cm_lap_set_local_ack_timeout(lap_msg,
2744 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
2745 alternate_path->packet_life_time));
2747 if (private_data && private_data_len)
2748 memcpy(lap_msg->private_data, private_data, private_data_len);
2751 int ib_send_cm_lap(struct ib_cm_id *cm_id,
2752 struct ib_sa_path_rec *alternate_path,
2753 const void *private_data,
2754 u8 private_data_len)
2756 struct cm_id_private *cm_id_priv;
2757 struct ib_mad_send_buf *msg;
2758 unsigned long flags;
2761 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2764 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2765 spin_lock_irqsave(&cm_id_priv->lock, flags);
2766 if (cm_id->state != IB_CM_ESTABLISHED ||
2767 (cm_id->lap_state != IB_CM_LAP_UNINIT &&
2768 cm_id->lap_state != IB_CM_LAP_IDLE)) {
2773 ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av,
2777 cm_id_priv->alt_av.timeout =
2778 cm_ack_timeout(cm_id_priv->target_ack_delay,
2779 cm_id_priv->alt_av.timeout - 1);
2781 ret = cm_alloc_msg(cm_id_priv, &msg);
2785 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2786 alternate_path, private_data, private_data_len);
2787 msg->timeout_ms = cm_id_priv->timeout_ms;
2788 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2790 ret = ib_post_send_mad(msg, NULL);
2792 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2797 cm_id->lap_state = IB_CM_LAP_SENT;
2798 cm_id_priv->msg = msg;
2800 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2803 EXPORT_SYMBOL(ib_send_cm_lap);
2805 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
2806 struct ib_sa_path_rec *path,
2807 struct cm_lap_msg *lap_msg)
2809 memset(path, 0, sizeof *path);
2810 path->dgid = lap_msg->alt_local_gid;
2811 path->sgid = lap_msg->alt_remote_gid;
2812 path->dlid = lap_msg->alt_local_lid;
2813 path->slid = lap_msg->alt_remote_lid;
2814 path->flow_label = cm_lap_get_flow_label(lap_msg);
2815 path->hop_limit = lap_msg->alt_hop_limit;
2816 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2817 path->reversible = 1;
2818 path->pkey = cm_id_priv->pkey;
2819 path->sl = cm_lap_get_sl(lap_msg);
2820 path->mtu_selector = IB_SA_EQ;
2821 path->mtu = cm_id_priv->path_mtu;
2822 path->rate_selector = IB_SA_EQ;
2823 path->rate = cm_lap_get_packet_rate(lap_msg);
2824 path->packet_life_time_selector = IB_SA_EQ;
2825 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2826 path->packet_life_time -= (path->packet_life_time > 0);
2829 static int cm_lap_handler(struct cm_work *work)
2831 struct cm_id_private *cm_id_priv;
2832 struct cm_lap_msg *lap_msg;
2833 struct ib_cm_lap_event_param *param;
2834 struct ib_mad_send_buf *msg = NULL;
2837 /* todo: verify LAP request and send reject APR if invalid. */
2838 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2839 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2840 lap_msg->local_comm_id);
2844 param = &work->cm_event.param.lap_rcvd;
2845 param->alternate_path = &work->path[0];
2846 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
2847 work->cm_event.private_data = &lap_msg->private_data;
2849 spin_lock_irq(&cm_id_priv->lock);
2850 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2853 switch (cm_id_priv->id.lap_state) {
2854 case IB_CM_LAP_UNINIT:
2855 case IB_CM_LAP_IDLE:
2857 case IB_CM_MRA_LAP_SENT:
2858 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2859 counter[CM_LAP_COUNTER]);
2860 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2863 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2864 CM_MSG_RESPONSE_OTHER,
2865 cm_id_priv->service_timeout,
2866 cm_id_priv->private_data,
2867 cm_id_priv->private_data_len);
2868 spin_unlock_irq(&cm_id_priv->lock);
2870 if (ib_post_send_mad(msg, NULL))
2873 case IB_CM_LAP_RCVD:
2874 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2875 counter[CM_LAP_COUNTER]);
2881 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2882 cm_id_priv->tid = lap_msg->hdr.tid;
2883 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2884 work->mad_recv_wc->recv_buf.grh,
2886 cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av,
2888 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2890 list_add_tail(&work->list, &cm_id_priv->work_list);
2891 spin_unlock_irq(&cm_id_priv->lock);
2894 cm_process_work(cm_id_priv, work);
2896 cm_deref_id(cm_id_priv);
2899 unlock: spin_unlock_irq(&cm_id_priv->lock);
2900 deref: cm_deref_id(cm_id_priv);
2904 static void cm_format_apr(struct cm_apr_msg *apr_msg,
2905 struct cm_id_private *cm_id_priv,
2906 enum ib_cm_apr_status status,
2909 const void *private_data,
2910 u8 private_data_len)
2912 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2913 apr_msg->local_comm_id = cm_id_priv->id.local_id;
2914 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2915 apr_msg->ap_status = (u8) status;
2917 if (info && info_length) {
2918 apr_msg->info_length = info_length;
2919 memcpy(apr_msg->info, info, info_length);
2922 if (private_data && private_data_len)
2923 memcpy(apr_msg->private_data, private_data, private_data_len);
2926 int ib_send_cm_apr(struct ib_cm_id *cm_id,
2927 enum ib_cm_apr_status status,
2930 const void *private_data,
2931 u8 private_data_len)
2933 struct cm_id_private *cm_id_priv;
2934 struct ib_mad_send_buf *msg;
2935 unsigned long flags;
2938 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2939 (info && info_length > IB_CM_APR_INFO_LENGTH))
2942 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2943 spin_lock_irqsave(&cm_id_priv->lock, flags);
2944 if (cm_id->state != IB_CM_ESTABLISHED ||
2945 (cm_id->lap_state != IB_CM_LAP_RCVD &&
2946 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2951 ret = cm_alloc_msg(cm_id_priv, &msg);
2955 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2956 info, info_length, private_data, private_data_len);
2957 ret = ib_post_send_mad(msg, NULL);
2959 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2964 cm_id->lap_state = IB_CM_LAP_IDLE;
2965 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2968 EXPORT_SYMBOL(ib_send_cm_apr);
2970 static int cm_apr_handler(struct cm_work *work)
2972 struct cm_id_private *cm_id_priv;
2973 struct cm_apr_msg *apr_msg;
2976 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2977 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2978 apr_msg->local_comm_id);
2980 return -EINVAL; /* Unmatched reply. */
2982 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2983 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2984 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2985 work->cm_event.private_data = &apr_msg->private_data;
2987 spin_lock_irq(&cm_id_priv->lock);
2988 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2989 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2990 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2991 spin_unlock_irq(&cm_id_priv->lock);
2994 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2995 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2996 cm_id_priv->msg = NULL;
2998 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3000 list_add_tail(&work->list, &cm_id_priv->work_list);
3001 spin_unlock_irq(&cm_id_priv->lock);
3004 cm_process_work(cm_id_priv, work);
3006 cm_deref_id(cm_id_priv);
3009 cm_deref_id(cm_id_priv);
3013 static int cm_timewait_handler(struct cm_work *work)
3015 struct cm_timewait_info *timewait_info;
3016 struct cm_id_private *cm_id_priv;
3019 timewait_info = (struct cm_timewait_info *)work;
3020 spin_lock_irq(&cm.lock);
3021 list_del(&timewait_info->list);
3022 spin_unlock_irq(&cm.lock);
3024 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3025 timewait_info->work.remote_id);
3029 spin_lock_irq(&cm_id_priv->lock);
3030 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3031 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3032 spin_unlock_irq(&cm_id_priv->lock);
3035 cm_id_priv->id.state = IB_CM_IDLE;
3036 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3038 list_add_tail(&work->list, &cm_id_priv->work_list);
3039 spin_unlock_irq(&cm_id_priv->lock);
3042 cm_process_work(cm_id_priv, work);
3044 cm_deref_id(cm_id_priv);
3047 cm_deref_id(cm_id_priv);
3051 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3052 struct cm_id_private *cm_id_priv,
3053 struct ib_cm_sidr_req_param *param)
3055 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3056 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
3057 sidr_req_msg->request_id = cm_id_priv->id.local_id;
3058 sidr_req_msg->pkey = param->path->pkey;
3059 sidr_req_msg->service_id = param->service_id;
3061 if (param->private_data && param->private_data_len)
3062 memcpy(sidr_req_msg->private_data, param->private_data,
3063 param->private_data_len);
3066 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3067 struct ib_cm_sidr_req_param *param)
3069 struct cm_id_private *cm_id_priv;
3070 struct ib_mad_send_buf *msg;
3071 unsigned long flags;
3074 if (!param->path || (param->private_data &&
3075 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3078 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3079 ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv);
3083 cm_id->service_id = param->service_id;
3084 cm_id->service_mask = ~cpu_to_be64(0);
3085 cm_id_priv->timeout_ms = param->timeout_ms;
3086 cm_id_priv->max_cm_retries = param->max_cm_retries;
3087 ret = cm_alloc_msg(cm_id_priv, &msg);
3091 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3093 msg->timeout_ms = cm_id_priv->timeout_ms;
3094 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3096 spin_lock_irqsave(&cm_id_priv->lock, flags);
3097 if (cm_id->state == IB_CM_IDLE)
3098 ret = ib_post_send_mad(msg, NULL);
3103 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3107 cm_id->state = IB_CM_SIDR_REQ_SENT;
3108 cm_id_priv->msg = msg;
3109 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3113 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3115 static void cm_format_sidr_req_event(struct cm_work *work,
3116 struct ib_cm_id *listen_id)
3118 struct cm_sidr_req_msg *sidr_req_msg;
3119 struct ib_cm_sidr_req_event_param *param;
3121 sidr_req_msg = (struct cm_sidr_req_msg *)
3122 work->mad_recv_wc->recv_buf.mad;
3123 param = &work->cm_event.param.sidr_req_rcvd;
3124 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
3125 param->listen_id = listen_id;
3126 param->service_id = sidr_req_msg->service_id;
3127 param->bth_pkey = cm_get_bth_pkey(work);
3128 param->port = work->port->port_num;
3129 work->cm_event.private_data = &sidr_req_msg->private_data;
3132 static int cm_sidr_req_handler(struct cm_work *work)
3134 struct ib_cm_id *cm_id;
3135 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
3136 struct cm_sidr_req_msg *sidr_req_msg;
3139 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
3141 return PTR_ERR(cm_id);
3142 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3144 /* Record SGID/SLID and request ID for lookup. */
3145 sidr_req_msg = (struct cm_sidr_req_msg *)
3146 work->mad_recv_wc->recv_buf.mad;
3147 wc = work->mad_recv_wc->wc;
3148 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3149 cm_id_priv->av.dgid.global.interface_id = 0;
3150 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3151 work->mad_recv_wc->recv_buf.grh,
3153 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
3154 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3155 atomic_inc(&cm_id_priv->work_count);
3157 spin_lock_irq(&cm.lock);
3158 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3159 if (cur_cm_id_priv) {
3160 spin_unlock_irq(&cm.lock);
3161 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3162 counter[CM_SIDR_REQ_COUNTER]);
3163 goto out; /* Duplicate message. */
3165 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3166 cur_cm_id_priv = cm_find_listen(cm_id->device,
3167 sidr_req_msg->service_id);
3168 if (!cur_cm_id_priv) {
3169 spin_unlock_irq(&cm.lock);
3170 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3171 goto out; /* No match. */
3173 atomic_inc(&cur_cm_id_priv->refcount);
3174 atomic_inc(&cm_id_priv->refcount);
3175 spin_unlock_irq(&cm.lock);
3177 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3178 cm_id_priv->id.context = cur_cm_id_priv->id.context;
3179 cm_id_priv->id.service_id = sidr_req_msg->service_id;
3180 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3182 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
3183 cm_process_work(cm_id_priv, work);
3184 cm_deref_id(cur_cm_id_priv);
3187 ib_destroy_cm_id(&cm_id_priv->id);
3191 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3192 struct cm_id_private *cm_id_priv,
3193 struct ib_cm_sidr_rep_param *param)
3195 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3197 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3198 sidr_rep_msg->status = param->status;
3199 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3200 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3201 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3203 if (param->info && param->info_length)
3204 memcpy(sidr_rep_msg->info, param->info, param->info_length);
3206 if (param->private_data && param->private_data_len)
3207 memcpy(sidr_rep_msg->private_data, param->private_data,
3208 param->private_data_len);
3211 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3212 struct ib_cm_sidr_rep_param *param)
3214 struct cm_id_private *cm_id_priv;
3215 struct ib_mad_send_buf *msg;
3216 unsigned long flags;
3219 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3220 (param->private_data &&
3221 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3224 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3225 spin_lock_irqsave(&cm_id_priv->lock, flags);
3226 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3231 ret = cm_alloc_msg(cm_id_priv, &msg);
3235 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3237 ret = ib_post_send_mad(msg, NULL);
3239 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3243 cm_id->state = IB_CM_IDLE;
3244 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3246 spin_lock_irqsave(&cm.lock, flags);
3247 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3248 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3249 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3251 spin_unlock_irqrestore(&cm.lock, flags);
3254 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3257 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3259 static void cm_format_sidr_rep_event(struct cm_work *work)
3261 struct cm_sidr_rep_msg *sidr_rep_msg;
3262 struct ib_cm_sidr_rep_event_param *param;
3264 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3265 work->mad_recv_wc->recv_buf.mad;
3266 param = &work->cm_event.param.sidr_rep_rcvd;
3267 param->status = sidr_rep_msg->status;
3268 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3269 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3270 param->info = &sidr_rep_msg->info;
3271 param->info_len = sidr_rep_msg->info_length;
3272 work->cm_event.private_data = &sidr_rep_msg->private_data;
3275 static int cm_sidr_rep_handler(struct cm_work *work)
3277 struct cm_sidr_rep_msg *sidr_rep_msg;
3278 struct cm_id_private *cm_id_priv;
3280 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3281 work->mad_recv_wc->recv_buf.mad;
3282 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3284 return -EINVAL; /* Unmatched reply. */
3286 spin_lock_irq(&cm_id_priv->lock);
3287 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3288 spin_unlock_irq(&cm_id_priv->lock);
3291 cm_id_priv->id.state = IB_CM_IDLE;
3292 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3293 spin_unlock_irq(&cm_id_priv->lock);
3295 cm_format_sidr_rep_event(work);
3296 cm_process_work(cm_id_priv, work);
3299 cm_deref_id(cm_id_priv);
3303 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3304 enum ib_wc_status wc_status)
3306 struct cm_id_private *cm_id_priv;
3307 struct ib_cm_event cm_event;
3308 enum ib_cm_state state;
3311 memset(&cm_event, 0, sizeof cm_event);
3312 cm_id_priv = msg->context[0];
3314 /* Discard old sends or ones without a response. */
3315 spin_lock_irq(&cm_id_priv->lock);
3316 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3317 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3321 case IB_CM_REQ_SENT:
3322 case IB_CM_MRA_REQ_RCVD:
3323 cm_reset_to_idle(cm_id_priv);
3324 cm_event.event = IB_CM_REQ_ERROR;
3326 case IB_CM_REP_SENT:
3327 case IB_CM_MRA_REP_RCVD:
3328 cm_reset_to_idle(cm_id_priv);
3329 cm_event.event = IB_CM_REP_ERROR;
3331 case IB_CM_DREQ_SENT:
3332 cm_enter_timewait(cm_id_priv);
3333 cm_event.event = IB_CM_DREQ_ERROR;
3335 case IB_CM_SIDR_REQ_SENT:
3336 cm_id_priv->id.state = IB_CM_IDLE;
3337 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3342 spin_unlock_irq(&cm_id_priv->lock);
3343 cm_event.param.send_status = wc_status;
3345 /* No other events can occur on the cm_id at this point. */
3346 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3349 ib_destroy_cm_id(&cm_id_priv->id);
3352 spin_unlock_irq(&cm_id_priv->lock);
3356 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3357 struct ib_mad_send_wc *mad_send_wc)
3359 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3360 struct cm_port *port;
3363 port = mad_agent->context;
3364 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3365 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3368 * If the send was in response to a received message (context[0] is not
3369 * set to a cm_id), and is not a REJ, then it is a send that was
3372 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3375 atomic_long_add(1 + msg->retries,
3376 &port->counter_group[CM_XMIT].counter[attr_index]);
3378 atomic_long_add(msg->retries,
3379 &port->counter_group[CM_XMIT_RETRIES].
3380 counter[attr_index]);
3382 switch (mad_send_wc->status) {
3384 case IB_WC_WR_FLUSH_ERR:
3388 if (msg->context[0] && msg->context[1])
3389 cm_process_send_error(msg, mad_send_wc->status);
3396 static void cm_work_handler(struct work_struct *_work)
3398 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3401 switch (work->cm_event.event) {
3402 case IB_CM_REQ_RECEIVED:
3403 ret = cm_req_handler(work);
3405 case IB_CM_MRA_RECEIVED:
3406 ret = cm_mra_handler(work);
3408 case IB_CM_REJ_RECEIVED:
3409 ret = cm_rej_handler(work);
3411 case IB_CM_REP_RECEIVED:
3412 ret = cm_rep_handler(work);
3414 case IB_CM_RTU_RECEIVED:
3415 ret = cm_rtu_handler(work);
3417 case IB_CM_USER_ESTABLISHED:
3418 ret = cm_establish_handler(work);
3420 case IB_CM_DREQ_RECEIVED:
3421 ret = cm_dreq_handler(work);
3423 case IB_CM_DREP_RECEIVED:
3424 ret = cm_drep_handler(work);
3426 case IB_CM_SIDR_REQ_RECEIVED:
3427 ret = cm_sidr_req_handler(work);
3429 case IB_CM_SIDR_REP_RECEIVED:
3430 ret = cm_sidr_rep_handler(work);
3432 case IB_CM_LAP_RECEIVED:
3433 ret = cm_lap_handler(work);
3435 case IB_CM_APR_RECEIVED:
3436 ret = cm_apr_handler(work);
3438 case IB_CM_TIMEWAIT_EXIT:
3439 ret = cm_timewait_handler(work);
3449 static int cm_establish(struct ib_cm_id *cm_id)
3451 struct cm_id_private *cm_id_priv;
3452 struct cm_work *work;
3453 unsigned long flags;
3455 struct cm_device *cm_dev;
3457 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3461 work = kmalloc(sizeof *work, GFP_ATOMIC);
3465 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3466 spin_lock_irqsave(&cm_id_priv->lock, flags);
3467 switch (cm_id->state)
3469 case IB_CM_REP_SENT:
3470 case IB_CM_MRA_REP_RCVD:
3471 cm_id->state = IB_CM_ESTABLISHED;
3473 case IB_CM_ESTABLISHED:
3480 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3488 * The CM worker thread may try to destroy the cm_id before it
3489 * can execute this work item. To prevent potential deadlock,
3490 * we need to find the cm_id once we're in the context of the
3491 * worker thread, rather than holding a reference on it.
3493 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3494 work->local_id = cm_id->local_id;
3495 work->remote_id = cm_id->remote_id;
3496 work->mad_recv_wc = NULL;
3497 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3499 /* Check if the device started its remove_one */
3500 spin_lock_irqsave(&cm.lock, flags);
3501 if (!cm_dev->going_down) {
3502 queue_delayed_work(cm.wq, &work->work, 0);
3507 spin_unlock_irqrestore(&cm.lock, flags);
3513 static int cm_migrate(struct ib_cm_id *cm_id)
3515 struct cm_id_private *cm_id_priv;
3516 struct cm_av tmp_av;
3517 unsigned long flags;
3518 int tmp_send_port_not_ready;
3521 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3522 spin_lock_irqsave(&cm_id_priv->lock, flags);
3523 if (cm_id->state == IB_CM_ESTABLISHED &&
3524 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3525 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3526 cm_id->lap_state = IB_CM_LAP_IDLE;
3527 /* Swap address vector */
3528 tmp_av = cm_id_priv->av;
3529 cm_id_priv->av = cm_id_priv->alt_av;
3530 cm_id_priv->alt_av = tmp_av;
3531 /* Swap port send ready state */
3532 tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
3533 cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
3534 cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
3537 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3542 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3547 case IB_EVENT_COMM_EST:
3548 ret = cm_establish(cm_id);
3550 case IB_EVENT_PATH_MIG:
3551 ret = cm_migrate(cm_id);
3558 EXPORT_SYMBOL(ib_cm_notify);
3560 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3561 struct ib_mad_recv_wc *mad_recv_wc)
3563 struct cm_port *port = mad_agent->context;
3564 struct cm_work *work;
3565 enum ib_cm_event_type event;
3570 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3571 case CM_REQ_ATTR_ID:
3572 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
3573 alt_local_lid != 0);
3574 event = IB_CM_REQ_RECEIVED;
3576 case CM_MRA_ATTR_ID:
3577 event = IB_CM_MRA_RECEIVED;
3579 case CM_REJ_ATTR_ID:
3580 event = IB_CM_REJ_RECEIVED;
3582 case CM_REP_ATTR_ID:
3583 event = IB_CM_REP_RECEIVED;
3585 case CM_RTU_ATTR_ID:
3586 event = IB_CM_RTU_RECEIVED;
3588 case CM_DREQ_ATTR_ID:
3589 event = IB_CM_DREQ_RECEIVED;
3591 case CM_DREP_ATTR_ID:
3592 event = IB_CM_DREP_RECEIVED;
3594 case CM_SIDR_REQ_ATTR_ID:
3595 event = IB_CM_SIDR_REQ_RECEIVED;
3597 case CM_SIDR_REP_ATTR_ID:
3598 event = IB_CM_SIDR_REP_RECEIVED;
3600 case CM_LAP_ATTR_ID:
3602 event = IB_CM_LAP_RECEIVED;
3604 case CM_APR_ATTR_ID:
3605 event = IB_CM_APR_RECEIVED;
3608 ib_free_recv_mad(mad_recv_wc);
3612 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
3613 atomic_long_inc(&port->counter_group[CM_RECV].
3614 counter[attr_id - CM_ATTR_ID_OFFSET]);
3616 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3619 ib_free_recv_mad(mad_recv_wc);
3623 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3624 work->cm_event.event = event;
3625 work->mad_recv_wc = mad_recv_wc;
3628 /* Check if the device started its remove_one */
3629 spin_lock_irq(&cm.lock);
3630 if (!port->cm_dev->going_down)
3631 queue_delayed_work(cm.wq, &work->work, 0);
3634 spin_unlock_irq(&cm.lock);
3638 ib_free_recv_mad(mad_recv_wc);
3642 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3643 struct ib_qp_attr *qp_attr,
3646 unsigned long flags;
3649 spin_lock_irqsave(&cm_id_priv->lock, flags);
3650 switch (cm_id_priv->id.state) {
3651 case IB_CM_REQ_SENT:
3652 case IB_CM_MRA_REQ_RCVD:
3653 case IB_CM_REQ_RCVD:
3654 case IB_CM_MRA_REQ_SENT:
3655 case IB_CM_REP_RCVD:
3656 case IB_CM_MRA_REP_SENT:
3657 case IB_CM_REP_SENT:
3658 case IB_CM_MRA_REP_RCVD:
3659 case IB_CM_ESTABLISHED:
3660 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3661 IB_QP_PKEY_INDEX | IB_QP_PORT;
3662 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
3663 if (cm_id_priv->responder_resources)
3664 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3665 IB_ACCESS_REMOTE_ATOMIC;
3666 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3667 qp_attr->port_num = cm_id_priv->av.port->port_num;
3674 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3678 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3679 struct ib_qp_attr *qp_attr,
3682 unsigned long flags;
3685 spin_lock_irqsave(&cm_id_priv->lock, flags);
3686 switch (cm_id_priv->id.state) {
3687 case IB_CM_REQ_RCVD:
3688 case IB_CM_MRA_REQ_SENT:
3689 case IB_CM_REP_RCVD:
3690 case IB_CM_MRA_REP_SENT:
3691 case IB_CM_REP_SENT:
3692 case IB_CM_MRA_REP_RCVD:
3693 case IB_CM_ESTABLISHED:
3694 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3695 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3696 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3697 qp_attr->path_mtu = cm_id_priv->path_mtu;
3698 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3699 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3700 if (cm_id_priv->qp_type == IB_QPT_RC ||
3701 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
3702 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3703 IB_QP_MIN_RNR_TIMER;
3704 qp_attr->max_dest_rd_atomic =
3705 cm_id_priv->responder_resources;
3706 qp_attr->min_rnr_timer = 0;
3708 if (cm_id_priv->alt_av.ah_attr.dlid) {
3709 *qp_attr_mask |= IB_QP_ALT_PATH;
3710 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3711 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3712 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3713 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3721 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3725 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3726 struct ib_qp_attr *qp_attr,
3729 unsigned long flags;
3732 spin_lock_irqsave(&cm_id_priv->lock, flags);
3733 switch (cm_id_priv->id.state) {
3734 /* Allow transition to RTS before sending REP */
3735 case IB_CM_REQ_RCVD:
3736 case IB_CM_MRA_REQ_SENT:
3738 case IB_CM_REP_RCVD:
3739 case IB_CM_MRA_REP_SENT:
3740 case IB_CM_REP_SENT:
3741 case IB_CM_MRA_REP_RCVD:
3742 case IB_CM_ESTABLISHED:
3743 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
3744 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3745 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3746 switch (cm_id_priv->qp_type) {
3748 case IB_QPT_XRC_INI:
3749 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
3750 IB_QP_MAX_QP_RD_ATOMIC;
3751 qp_attr->retry_cnt = cm_id_priv->retry_count;
3752 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3753 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3755 case IB_QPT_XRC_TGT:
3756 *qp_attr_mask |= IB_QP_TIMEOUT;
3757 qp_attr->timeout = cm_id_priv->av.timeout;
3762 if (cm_id_priv->alt_av.ah_attr.dlid) {
3763 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3764 qp_attr->path_mig_state = IB_MIG_REARM;
3767 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
3768 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3769 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3770 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3771 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3772 qp_attr->path_mig_state = IB_MIG_REARM;
3780 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3784 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3785 struct ib_qp_attr *qp_attr,
3788 struct cm_id_private *cm_id_priv;
3791 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3792 switch (qp_attr->qp_state) {
3794 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3797 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3800 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3808 EXPORT_SYMBOL(ib_cm_init_qp_attr);
3810 static void cm_get_ack_delay(struct cm_device *cm_dev)
3812 struct ib_device_attr attr;
3814 if (ib_query_device(cm_dev->ib_device, &attr))
3815 cm_dev->ack_delay = 0; /* acks will rely on packet life time */
3817 cm_dev->ack_delay = attr.local_ca_ack_delay;
3820 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
3823 struct cm_counter_group *group;
3824 struct cm_counter_attribute *cm_attr;
3826 group = container_of(obj, struct cm_counter_group, obj);
3827 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
3829 return sprintf(buf, "%ld\n",
3830 atomic_long_read(&group->counter[cm_attr->index]));
3833 static const struct sysfs_ops cm_counter_ops = {
3834 .show = cm_show_counter
3837 static struct kobj_type cm_counter_obj_type = {
3838 .sysfs_ops = &cm_counter_ops,
3839 .default_attrs = cm_counter_default_attrs
3842 static void cm_release_port_obj(struct kobject *obj)
3844 struct cm_port *cm_port;
3846 cm_port = container_of(obj, struct cm_port, port_obj);
3850 static struct kobj_type cm_port_obj_type = {
3851 .release = cm_release_port_obj
3854 static char *cm_devnode(struct device *dev, umode_t *mode)
3858 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
3861 struct class cm_class = {
3862 .owner = THIS_MODULE,
3863 .name = "infiniband_cm",
3864 .devnode = cm_devnode,
3866 EXPORT_SYMBOL(cm_class);
3868 static int cm_create_port_fs(struct cm_port *port)
3872 ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
3873 &port->cm_dev->device->kobj,
3874 "%d", port->port_num);
3880 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
3881 ret = kobject_init_and_add(&port->counter_group[i].obj,
3882 &cm_counter_obj_type,
3884 "%s", counter_group_names[i]);
3893 kobject_put(&port->counter_group[i].obj);
3894 kobject_put(&port->port_obj);
3899 static void cm_remove_port_fs(struct cm_port *port)
3903 for (i = 0; i < CM_COUNTER_GROUPS; i++)
3904 kobject_put(&port->counter_group[i].obj);
3906 kobject_put(&port->port_obj);
3909 static void cm_add_one(struct ib_device *ib_device)
3911 struct cm_device *cm_dev;
3912 struct cm_port *port;
3913 struct ib_mad_reg_req reg_req = {
3914 .mgmt_class = IB_MGMT_CLASS_CM,
3915 .mgmt_class_version = IB_CM_CLASS_VERSION,
3917 struct ib_port_modify port_modify = {
3918 .set_port_cap_mask = IB_PORT_CM_SUP
3920 unsigned long flags;
3925 cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
3926 ib_device->phys_port_cnt, GFP_KERNEL);
3930 cm_dev->ib_device = ib_device;
3931 cm_get_ack_delay(cm_dev);
3932 cm_dev->going_down = 0;
3933 cm_dev->device = device_create(&cm_class, &ib_device->dev,
3935 "%s", ib_device->name);
3936 if (IS_ERR(cm_dev->device)) {
3941 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3942 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3943 if (!rdma_cap_ib_cm(ib_device, i))
3946 port = kzalloc(sizeof *port, GFP_KERNEL);
3950 cm_dev->port[i-1] = port;
3951 port->cm_dev = cm_dev;
3954 INIT_LIST_HEAD(&port->cm_priv_prim_list);
3955 INIT_LIST_HEAD(&port->cm_priv_altr_list);
3957 ret = cm_create_port_fs(port);
3961 port->mad_agent = ib_register_mad_agent(ib_device, i,
3969 if (IS_ERR(port->mad_agent))
3972 ret = ib_modify_port(ib_device, i, 0, &port_modify);
3982 ib_set_client_data(ib_device, &cm_client, cm_dev);
3984 write_lock_irqsave(&cm.device_lock, flags);
3985 list_add_tail(&cm_dev->list, &cm.device_list);
3986 write_unlock_irqrestore(&cm.device_lock, flags);
3990 ib_unregister_mad_agent(port->mad_agent);
3992 cm_remove_port_fs(port);
3994 port_modify.set_port_cap_mask = 0;
3995 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3997 if (!rdma_cap_ib_cm(ib_device, i))
4000 port = cm_dev->port[i-1];
4001 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4002 ib_unregister_mad_agent(port->mad_agent);
4003 cm_remove_port_fs(port);
4006 device_unregister(cm_dev->device);
4010 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4012 struct cm_device *cm_dev = client_data;
4013 struct cm_port *port;
4014 struct cm_id_private *cm_id_priv;
4015 struct ib_mad_agent *cur_mad_agent;
4016 struct ib_port_modify port_modify = {
4017 .clr_port_cap_mask = IB_PORT_CM_SUP
4019 unsigned long flags;
4025 write_lock_irqsave(&cm.device_lock, flags);
4026 list_del(&cm_dev->list);
4027 write_unlock_irqrestore(&cm.device_lock, flags);
4029 spin_lock_irq(&cm.lock);
4030 cm_dev->going_down = 1;
4031 spin_unlock_irq(&cm.lock);
4033 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4034 if (!rdma_cap_ib_cm(ib_device, i))
4037 port = cm_dev->port[i-1];
4038 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4039 /* Mark all the cm_id's as not valid */
4040 spin_lock_irq(&cm.lock);
4041 list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
4042 cm_id_priv->altr_send_port_not_ready = 1;
4043 list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
4044 cm_id_priv->prim_send_port_not_ready = 1;
4045 spin_unlock_irq(&cm.lock);
4047 * We flush the queue here after the going_down set, this
4048 * verify that no new works will be queued in the recv handler,
4049 * after that we can call the unregister_mad_agent
4051 flush_workqueue(cm.wq);
4052 spin_lock_irq(&cm.state_lock);
4053 cur_mad_agent = port->mad_agent;
4054 port->mad_agent = NULL;
4055 spin_unlock_irq(&cm.state_lock);
4056 ib_unregister_mad_agent(cur_mad_agent);
4057 cm_remove_port_fs(port);
4060 device_unregister(cm_dev->device);
4064 static int __init ib_cm_init(void)
4068 memset(&cm, 0, sizeof cm);
4069 INIT_LIST_HEAD(&cm.device_list);
4070 rwlock_init(&cm.device_lock);
4071 spin_lock_init(&cm.lock);
4072 spin_lock_init(&cm.state_lock);
4073 cm.listen_service_table = RB_ROOT;
4074 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4075 cm.remote_id_table = RB_ROOT;
4076 cm.remote_qp_table = RB_ROOT;
4077 cm.remote_sidr_table = RB_ROOT;
4078 idr_init(&cm.local_id_table);
4079 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4080 INIT_LIST_HEAD(&cm.timewait_list);
4082 ret = class_register(&cm_class);
4088 cm.wq = create_workqueue("ib_cm");
4094 ret = ib_register_client(&cm_client);
4100 destroy_workqueue(cm.wq);
4102 class_unregister(&cm_class);
4104 idr_destroy(&cm.local_id_table);
4108 static void __exit ib_cm_cleanup(void)
4110 struct cm_timewait_info *timewait_info, *tmp;
4112 spin_lock_irq(&cm.lock);
4113 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4114 cancel_delayed_work(&timewait_info->work.work);
4115 spin_unlock_irq(&cm.lock);
4117 ib_unregister_client(&cm_client);
4118 destroy_workqueue(cm.wq);
4120 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4121 list_del(&timewait_info->list);
4122 kfree(timewait_info);
4125 class_unregister(&cm_class);
4126 idr_destroy(&cm.local_id_table);
4129 module_init(ib_cm_init);
4130 module_exit(ib_cm_cleanup);