2 * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <linux/device.h>
35 #include <linux/dmapool.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/highmem.h>
40 #include <linux/uio.h>
41 #include <linux/rbtree.h>
42 #include <linux/spinlock.h>
43 #include <linux/delay.h>
46 #include "qib_user_sdma.h"
48 /* minimum size of header */
49 #define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
50 /* expected size of headers (for dma_pool) */
51 #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
52 /* attempt to drain the queue for 5secs */
53 #define QIB_USER_SDMA_DRAIN_TIMEOUT 500
55 struct qib_user_sdma_pkt {
56 u8 naddr; /* dimension of addr (1..3) ... */
57 u32 counter; /* sdma pkts queued counter for this entry */
58 u64 added; /* global descq number of entries */
61 u32 offset; /* offset for kvaddr, addr */
62 u32 length; /* length in page */
63 u8 put_page; /* should we put_page? */
64 u8 dma_mapped; /* is page dma_mapped? */
65 struct page *page; /* may be NULL (coherent mem) */
66 void *kvaddr; /* FIXME: only for pio hack */
68 } addr[4]; /* max pages, any more and we coalesce */
69 struct list_head list; /* list element */
72 struct qib_user_sdma_queue {
74 * pkts sent to dma engine are queued on this
75 * list head. the type of the elements of this
76 * list are struct qib_user_sdma_pkt...
78 struct list_head sent;
80 /* headers with expected length are allocated from here... */
81 char header_cache_name[64];
82 struct dma_pool *header_cache;
84 /* packets are allocated from the slab cache... */
85 char pkt_slab_name[64];
86 struct kmem_cache *pkt_slab;
88 /* as packets go on the queued queue, they are counted... */
93 struct rb_root dma_pages_root;
95 /* protect everything above... */
99 struct qib_user_sdma_queue *
100 qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
102 struct qib_user_sdma_queue *pq =
103 kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
109 pq->sent_counter = 0;
110 INIT_LIST_HEAD(&pq->sent);
112 mutex_init(&pq->lock);
114 snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
115 "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
116 pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
117 sizeof(struct qib_user_sdma_pkt),
123 snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
124 "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
125 pq->header_cache = dma_pool_create(pq->header_cache_name,
127 QIB_USER_SDMA_EXP_HEADER_LENGTH,
129 if (!pq->header_cache)
132 pq->dma_pages_root = RB_ROOT;
137 kmem_cache_destroy(pq->pkt_slab);
146 static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
147 int i, size_t offset, size_t len,
148 int put_page, int dma_mapped,
150 void *kvaddr, dma_addr_t dma_addr)
152 pkt->addr[i].offset = offset;
153 pkt->addr[i].length = len;
154 pkt->addr[i].put_page = put_page;
155 pkt->addr[i].dma_mapped = dma_mapped;
156 pkt->addr[i].page = page;
157 pkt->addr[i].kvaddr = kvaddr;
158 pkt->addr[i].addr = dma_addr;
161 static void qib_user_sdma_init_header(struct qib_user_sdma_pkt *pkt,
162 u32 counter, size_t offset,
163 size_t len, int dma_mapped,
165 void *kvaddr, dma_addr_t dma_addr)
168 pkt->counter = counter;
169 qib_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
173 /* we've too many pages in the iovec, coalesce to a single page */
174 static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
175 struct qib_user_sdma_pkt *pkt,
176 const struct iovec *iov,
180 struct page *page = alloc_page(GFP_KERNEL);
194 for (i = 0; i < niov; i++) {
197 cfur = copy_from_user(mpage,
198 iov[i].iov_base, iov[i].iov_len);
204 mpage += iov[i].iov_len;
205 len += iov[i].iov_len;
208 dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
210 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
215 qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
229 * How many pages in this iovec element?
231 static int qib_user_sdma_num_pages(const struct iovec *iov)
233 const unsigned long addr = (unsigned long) iov->iov_base;
234 const unsigned long len = iov->iov_len;
235 const unsigned long spage = addr & PAGE_MASK;
236 const unsigned long epage = (addr + len - 1) & PAGE_MASK;
238 return 1 + ((epage - spage) >> PAGE_SHIFT);
242 * Truncate length to page boundary.
244 static int qib_user_sdma_page_length(unsigned long addr, unsigned long len)
246 const unsigned long offset = addr & ~PAGE_MASK;
248 return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
251 static void qib_user_sdma_free_pkt_frag(struct device *dev,
252 struct qib_user_sdma_queue *pq,
253 struct qib_user_sdma_pkt *pkt,
258 if (pkt->addr[i].page) {
259 if (pkt->addr[i].dma_mapped)
265 if (pkt->addr[i].kvaddr)
266 kunmap(pkt->addr[i].page);
268 if (pkt->addr[i].put_page)
269 put_page(pkt->addr[i].page);
271 __free_page(pkt->addr[i].page);
272 } else if (pkt->addr[i].kvaddr)
273 /* free coherent mem from cache... */
274 dma_pool_free(pq->header_cache,
275 pkt->addr[i].kvaddr, pkt->addr[i].addr);
278 /* return number of pages pinned... */
279 static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
280 struct qib_user_sdma_pkt *pkt,
281 unsigned long addr, int tlen, int npages)
283 struct page *pages[2];
287 ret = get_user_pages_fast(addr, npages, 0, pages);
292 for (i = 0; i < ret; i++)
299 for (j = 0; j < npages; j++) {
300 /* map the pages... */
301 const int flen = qib_user_sdma_page_length(addr, tlen);
302 dma_addr_t dma_addr =
303 dma_map_page(&dd->pcidev->dev,
304 pages[j], 0, flen, DMA_TO_DEVICE);
305 unsigned long fofs = addr & ~PAGE_MASK;
307 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
312 qib_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
313 pages[j], kmap(pages[j]), dma_addr);
324 static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
325 struct qib_user_sdma_queue *pq,
326 struct qib_user_sdma_pkt *pkt,
327 const struct iovec *iov,
333 for (idx = 0; idx < niov; idx++) {
334 const int npages = qib_user_sdma_num_pages(iov + idx);
335 const unsigned long addr = (unsigned long) iov[idx].iov_base;
337 ret = qib_user_sdma_pin_pages(dd, pkt, addr,
338 iov[idx].iov_len, npages);
346 for (idx = 0; idx < pkt->naddr; idx++)
347 qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
353 static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
354 struct qib_user_sdma_queue *pq,
355 struct qib_user_sdma_pkt *pkt,
356 const struct iovec *iov,
357 unsigned long niov, int npages)
361 if (npages >= ARRAY_SIZE(pkt->addr))
362 ret = qib_user_sdma_coalesce(dd, pkt, iov, niov);
364 ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
369 /* free a packet list -- return counter value of last packet */
370 static void qib_user_sdma_free_pkt_list(struct device *dev,
371 struct qib_user_sdma_queue *pq,
372 struct list_head *list)
374 struct qib_user_sdma_pkt *pkt, *pkt_next;
376 list_for_each_entry_safe(pkt, pkt_next, list, list) {
379 for (i = 0; i < pkt->naddr; i++)
380 qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
382 kmem_cache_free(pq->pkt_slab, pkt);
384 INIT_LIST_HEAD(list);
388 * copy headers, coalesce etc -- pq->lock must be held
390 * we queue all the packets to list, returning the
391 * number of bytes total. list must be empty initially,
392 * as, if there is an error we clean it...
394 static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
395 struct qib_user_sdma_queue *pq,
396 struct list_head *list,
397 const struct iovec *iov,
401 unsigned long idx = 0;
404 struct page *page = NULL;
407 struct qib_user_sdma_pkt *pkt = NULL;
410 u32 counter = pq->counter;
413 while (idx < niov && npkts < maxpkts) {
414 const unsigned long addr = (unsigned long) iov[idx].iov_base;
415 const unsigned long idx_save = idx;
423 len = iov[idx].iov_len;
427 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
433 if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
434 len > PAGE_SIZE || len & 3 || addr & 3) {
439 if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
440 pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
446 page = alloc_page(GFP_KERNEL);
454 cfur = copy_from_user(pbc, iov[idx].iov_base, len);
461 * This assignment is a bit strange. it's because the
462 * the pbc counts the number of 32 bit words in the full
463 * packet _except_ the first word of the pbc itself...
468 * pktnw computation yields the number of 32 bit words
469 * that the caller has indicated in the PBC. note that
470 * this is one less than the total number of words that
471 * goes to the send DMA engine as the first 32 bit word
472 * of the PBC itself is not counted. Armed with this count,
473 * we can verify that the packet is consistent with the
476 pktnw = le32_to_cpu(*pbc) & QIB_PBC_LENGTH_MASK;
477 if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
483 while (pktnwc < pktnw && idx < niov) {
484 const size_t slen = iov[idx].iov_len;
485 const unsigned long faddr =
486 (unsigned long) iov[idx].iov_base;
488 if (slen & 3 || faddr & 3 || !slen ||
495 if ((faddr & PAGE_MASK) !=
496 ((faddr + slen - 1) & PAGE_MASK))
504 if (pktnwc != pktnw) {
510 dma_addr = dma_map_page(&dd->pcidev->dev,
511 page, 0, len, DMA_TO_DEVICE);
512 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
520 qib_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
521 page, pbc, dma_addr);
524 ret = qib_user_sdma_init_payload(dd, pq, pkt,
534 list_add_tail(&pkt->list, list);
542 dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
548 dma_pool_free(pq->header_cache, pbc, dma_addr);
550 kmem_cache_free(pq->pkt_slab, pkt);
552 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
557 static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
560 pq->sent_counter = c;
563 /* try to clean out queue -- needs pq->lock */
564 static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
565 struct qib_user_sdma_queue *pq)
567 struct qib_devdata *dd = ppd->dd;
568 struct list_head free_list;
569 struct qib_user_sdma_pkt *pkt;
570 struct qib_user_sdma_pkt *pkt_prev;
573 INIT_LIST_HEAD(&free_list);
575 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
576 s64 descd = ppd->sdma_descq_removed - pkt->added;
581 list_move_tail(&pkt->list, &free_list);
583 /* one more packet cleaned */
587 if (!list_empty(&free_list)) {
590 pkt = list_entry(free_list.prev,
591 struct qib_user_sdma_pkt, list);
592 counter = pkt->counter;
594 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
595 qib_user_sdma_set_complete_counter(pq, counter);
601 void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
606 kmem_cache_destroy(pq->pkt_slab);
607 dma_pool_destroy(pq->header_cache);
611 /* clean descriptor queue, returns > 0 if some elements cleaned */
612 static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
617 spin_lock_irqsave(&ppd->sdma_lock, flags);
618 ret = qib_sdma_make_progress(ppd);
619 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
624 /* we're in close, drain packets so that we can cleanup successfully... */
625 void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
626 struct qib_user_sdma_queue *pq)
628 struct qib_devdata *dd = ppd->dd;
634 for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
635 mutex_lock(&pq->lock);
636 if (list_empty(&pq->sent)) {
637 mutex_unlock(&pq->lock);
640 qib_user_sdma_hwqueue_clean(ppd);
641 qib_user_sdma_queue_clean(ppd, pq);
642 mutex_unlock(&pq->lock);
646 if (!list_empty(&pq->sent)) {
647 struct list_head free_list;
649 qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
650 INIT_LIST_HEAD(&free_list);
651 mutex_lock(&pq->lock);
652 list_splice_init(&pq->sent, &free_list);
653 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
654 mutex_unlock(&pq->lock);
658 static inline __le64 qib_sdma_make_desc0(struct qib_pportdata *ppd,
659 u64 addr, u64 dwlen, u64 dwoffset)
663 tmpgen = ppd->sdma_generation;
665 return cpu_to_le64(/* SDmaPhyAddr[31:0] */
666 ((addr & 0xfffffffcULL) << 32) |
667 /* SDmaGeneration[1:0] */
668 ((tmpgen & 3ULL) << 30) |
669 /* SDmaDwordCount[10:0] */
670 ((dwlen & 0x7ffULL) << 16) |
671 /* SDmaBufOffset[12:2] */
672 (dwoffset & 0x7ffULL));
675 static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
677 return descq | cpu_to_le64(1ULL << 12);
680 static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
682 /* last */ /* dma head */
683 return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
686 static inline __le64 qib_sdma_make_desc1(u64 addr)
688 /* SDmaPhyAddr[47:32] */
689 return cpu_to_le64(addr >> 32);
692 static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
693 struct qib_user_sdma_pkt *pkt, int idx,
694 unsigned ofs, u16 tail)
696 const u64 addr = (u64) pkt->addr[idx].addr +
697 (u64) pkt->addr[idx].offset;
698 const u64 dwlen = (u64) pkt->addr[idx].length / 4;
702 descqp = &ppd->sdma_descq[tail].qw[0];
704 descq0 = qib_sdma_make_desc0(ppd, addr, dwlen, ofs);
706 descq0 = qib_sdma_make_first_desc0(descq0);
707 if (idx == pkt->naddr - 1)
708 descq0 = qib_sdma_make_last_desc0(descq0);
711 descqp[1] = qib_sdma_make_desc1(addr);
714 /* pq->lock must be held, get packets on the wire... */
715 static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
716 struct qib_user_sdma_queue *pq,
717 struct list_head *pktlist)
719 struct qib_devdata *dd = ppd->dd;
726 if (list_empty(pktlist))
729 if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
732 spin_lock_irqsave(&ppd->sdma_lock, flags);
734 /* keep a copy for restoring purposes in case of problems */
735 generation = ppd->sdma_generation;
736 descq_added = ppd->sdma_descq_added;
738 if (unlikely(!__qib_sdma_running(ppd))) {
743 tail = ppd->sdma_descq_tail;
744 while (!list_empty(pktlist)) {
745 struct qib_user_sdma_pkt *pkt =
746 list_entry(pktlist->next, struct qib_user_sdma_pkt,
752 if (pkt->naddr > qib_sdma_descq_freecnt(ppd))
753 goto unlock_check_tail;
755 for (i = 0; i < pkt->naddr; i++) {
756 qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail);
757 ofs += pkt->addr[i].length >> 2;
759 if (++tail == ppd->sdma_descq_cnt) {
761 ++ppd->sdma_generation;
765 if ((ofs << 2) > ppd->ibmaxlen) {
771 * If the packet is >= 2KB mtu equivalent, we have to use
772 * the large buffers, and have to mark each descriptor as
773 * part of a large buffer packet.
775 if (ofs > dd->piosize2kmax_dwords) {
776 for (i = 0; i < pkt->naddr; i++) {
777 ppd->sdma_descq[dtail].qw[0] |=
778 cpu_to_le64(1ULL << 14);
779 if (++dtail == ppd->sdma_descq_cnt)
784 ppd->sdma_descq_added += pkt->naddr;
785 pkt->added = ppd->sdma_descq_added;
786 list_move_tail(&pkt->list, &pq->sent);
791 /* advance the tail on the chip if necessary */
792 if (ppd->sdma_descq_tail != tail)
793 dd->f_sdma_update_tail(ppd, tail);
796 if (unlikely(ret < 0)) {
797 ppd->sdma_generation = generation;
798 ppd->sdma_descq_added = descq_added;
800 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
805 int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
806 struct qib_user_sdma_queue *pq,
807 const struct iovec *iov,
810 struct qib_devdata *dd = rcd->dd;
811 struct qib_pportdata *ppd = rcd->ppd;
813 struct list_head list;
816 INIT_LIST_HEAD(&list);
818 mutex_lock(&pq->lock);
820 /* why not -ECOMM like qib_user_sdma_push_pkts() below? */
821 if (!qib_sdma_running(ppd))
824 if (ppd->sdma_descq_added != ppd->sdma_descq_removed) {
825 qib_user_sdma_hwqueue_clean(ppd);
826 qib_user_sdma_queue_clean(ppd, pq);
832 ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
840 /* force packets onto the sdma hw queue... */
841 if (!list_empty(&list)) {
843 * Lazily clean hw queue. the 4 is a guess of about
844 * how many sdma descriptors a packet will take (it
845 * doesn't have to be perfect).
847 if (qib_sdma_descq_freecnt(ppd) < ret * 4) {
848 qib_user_sdma_hwqueue_clean(ppd);
849 qib_user_sdma_queue_clean(ppd, pq);
852 ret = qib_user_sdma_push_pkts(ppd, pq, &list);
859 if (!list_empty(&list))
866 if (!list_empty(&list))
867 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
868 mutex_unlock(&pq->lock);
870 return (ret < 0) ? ret : npkts;
873 int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
874 struct qib_user_sdma_queue *pq)
878 mutex_lock(&pq->lock);
879 qib_user_sdma_hwqueue_clean(ppd);
880 ret = qib_user_sdma_queue_clean(ppd, pq);
881 mutex_unlock(&pq->lock);
886 u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
888 return pq ? pq->sent_counter : 0;
891 u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
893 return pq ? pq->counter : 0;