2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/gfp.h>
42 #include "c2_status.h"
44 #define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1))
46 static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn)
51 spin_lock_irqsave(&c2dev->lock, flags);
52 cq = c2dev->qptr_array[cqn];
54 spin_unlock_irqrestore(&c2dev->lock, flags);
57 atomic_inc(&cq->refcount);
58 spin_unlock_irqrestore(&c2dev->lock, flags);
62 static void c2_cq_put(struct c2_cq *cq)
64 if (atomic_dec_and_test(&cq->refcount))
68 void c2_cq_event(struct c2_dev *c2dev, u32 mq_index)
72 cq = c2_cq_get(c2dev, mq_index);
74 printk("discarding events on destroyed CQN=%d\n", mq_index);
78 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
82 void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index)
87 cq = c2_cq_get(c2dev, mq_index);
91 spin_lock_irq(&cq->lock);
93 if (q && !c2_mq_empty(q)) {
97 while (priv != be16_to_cpu(*q->shared)) {
98 msg = (struct c2wr_ce *)
99 (q->msg_pool.host + priv * q->msg_size);
100 if (msg->qp_user_context == (u64) (unsigned long) qp) {
101 msg->qp_user_context = (u64) 0;
103 priv = (priv + 1) % q->q_size;
106 spin_unlock_irq(&cq->lock);
110 static inline enum ib_wc_status c2_cqe_status_to_openib(u8 status)
114 return IB_WC_SUCCESS;
116 return IB_WC_WR_FLUSH_ERR;
117 case CCERR_BASE_AND_BOUNDS_VIOLATION:
118 return IB_WC_LOC_PROT_ERR;
119 case CCERR_ACCESS_VIOLATION:
120 return IB_WC_LOC_ACCESS_ERR;
121 case CCERR_TOTAL_LENGTH_TOO_BIG:
122 return IB_WC_LOC_LEN_ERR;
123 case CCERR_INVALID_WINDOW:
124 return IB_WC_MW_BIND_ERR;
126 return IB_WC_GENERAL_ERR;
131 static inline int c2_poll_one(struct c2_dev *c2dev,
132 struct c2_cq *cq, struct ib_wc *entry)
138 ce = c2_mq_consume(&cq->mq);
144 * if the qp returned is null then this qp has already
145 * been freed and we are unable process the completion.
146 * try pulling the next message
149 (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
151 ce = c2_mq_consume(&cq->mq);
156 entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
157 entry->wr_id = ce->hdr.context;
158 entry->qp = &qp->ibqp;
163 entry->dlid_path_bits = 0;
164 entry->pkey_index = 0;
166 switch (c2_wr_get_id(ce)) {
167 case C2_WR_TYPE_SEND:
168 entry->opcode = IB_WC_SEND;
170 case C2_WR_TYPE_RDMA_WRITE:
171 entry->opcode = IB_WC_RDMA_WRITE;
173 case C2_WR_TYPE_RDMA_READ:
174 entry->opcode = IB_WC_RDMA_READ;
176 case C2_WR_TYPE_BIND_MW:
177 entry->opcode = IB_WC_BIND_MW;
179 case C2_WR_TYPE_RECV:
180 entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
181 entry->opcode = IB_WC_RECV;
188 /* consume the WQEs */
190 c2_mq_lconsume(&qp->rq_mq, 1);
192 c2_mq_lconsume(&qp->sq_mq,
193 be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1);
195 /* free the message */
201 int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
203 struct c2_dev *c2dev = to_c2dev(ibcq->device);
204 struct c2_cq *cq = to_c2cq(ibcq);
208 spin_lock_irqsave(&cq->lock, flags);
210 for (npolled = 0; npolled < num_entries; ++npolled) {
212 err = c2_poll_one(c2dev, cq, entry + npolled);
217 spin_unlock_irqrestore(&cq->lock, flags);
222 int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
224 struct c2_mq_shared __iomem *shared;
230 shared = cq->mq.peer;
232 if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
233 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
234 else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
235 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
239 writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed);
242 * Now read back shared->armed to make the PCI
243 * write synchronous. This is necessary for
244 * correct cq notification semantics.
246 readb(&shared->armed);
248 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
249 spin_lock_irqsave(&cq->lock, flags);
250 ret = !c2_mq_empty(&cq->mq);
251 spin_unlock_irqrestore(&cq->lock, flags);
257 static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
259 dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
260 mq->msg_pool.host, dma_unmap_addr(mq, mapping));
263 static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq,
264 size_t q_size, size_t msg_size)
268 if (q_size > SIZE_MAX / msg_size)
271 pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
272 &mq->host_dma, GFP_KERNEL);
277 0, /* index (currently unknown) */
281 NULL, /* peer (currently unknown) */
284 dma_unmap_addr_set(mq, mapping, mq->host_dma);
289 int c2_init_cq(struct c2_dev *c2dev, int entries,
290 struct c2_ucontext *ctx, struct c2_cq *cq)
292 struct c2wr_cq_create_req wr;
293 struct c2wr_cq_create_rep *reply;
294 unsigned long peer_pa;
295 struct c2_vq_req *vq_req;
300 cq->ibcq.cqe = entries - 1;
301 cq->is_kernel = !ctx;
303 /* Allocate a shared pointer */
304 cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
305 &cq->mq.shared_dma, GFP_KERNEL);
309 /* Allocate pages for the message pool */
310 err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
314 vq_req = vq_req_alloc(c2dev);
320 memset(&wr, 0, sizeof(wr));
321 c2_wr_set_id(&wr, CCWR_CQ_CREATE);
322 wr.hdr.context = (unsigned long) vq_req;
323 wr.rnic_handle = c2dev->adapter_handle;
324 wr.msg_size = cpu_to_be32(cq->mq.msg_size);
325 wr.depth = cpu_to_be32(cq->mq.q_size);
326 wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
327 wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
328 wr.user_context = (u64) (unsigned long) (cq);
330 vq_req_get(c2dev, vq_req);
332 err = vq_send_wr(c2dev, (union c2wr *) & wr);
334 vq_req_put(c2dev, vq_req);
338 err = vq_wait_for_reply(c2dev, vq_req);
342 reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
348 if ((err = c2_errno(reply)) != 0)
351 cq->adapter_handle = reply->cq_handle;
352 cq->mq.index = be32_to_cpu(reply->mq_index);
354 peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
355 cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
361 vq_repbuf_free(c2dev, reply);
362 vq_req_free(c2dev, vq_req);
364 spin_lock_init(&cq->lock);
365 atomic_set(&cq->refcount, 1);
366 init_waitqueue_head(&cq->wait);
369 * Use the MQ index allocated by the adapter to
370 * store the CQ in the qptr_array
372 cq->cqn = cq->mq.index;
373 c2dev->qptr_array[cq->cqn] = cq;
378 vq_repbuf_free(c2dev, reply);
380 vq_req_free(c2dev, vq_req);
382 c2_free_cq_buf(c2dev, &cq->mq);
384 c2_free_mqsp(cq->mq.shared);
389 void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
392 struct c2_vq_req *vq_req;
393 struct c2wr_cq_destroy_req wr;
394 struct c2wr_cq_destroy_rep *reply;
398 /* Clear CQ from the qptr array */
399 spin_lock_irq(&c2dev->lock);
400 c2dev->qptr_array[cq->mq.index] = NULL;
401 atomic_dec(&cq->refcount);
402 spin_unlock_irq(&c2dev->lock);
404 wait_event(cq->wait, !atomic_read(&cq->refcount));
406 vq_req = vq_req_alloc(c2dev);
411 memset(&wr, 0, sizeof(wr));
412 c2_wr_set_id(&wr, CCWR_CQ_DESTROY);
413 wr.hdr.context = (unsigned long) vq_req;
414 wr.rnic_handle = c2dev->adapter_handle;
415 wr.cq_handle = cq->adapter_handle;
417 vq_req_get(c2dev, vq_req);
419 err = vq_send_wr(c2dev, (union c2wr *) & wr);
421 vq_req_put(c2dev, vq_req);
425 err = vq_wait_for_reply(c2dev, vq_req);
429 reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
431 vq_repbuf_free(c2dev, reply);
433 vq_req_free(c2dev, vq_req);
436 c2_free_cq_buf(c2dev, &cq->mq);