2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
16 * Copyright(c) 2012 Intel Corporation. All rights reserved.
17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 * PCIe NTB Transport Linux driver
47 * Contact Information:
48 * Jon Mason <jon.mason@intel.com>
50 #include <linux/debugfs.h>
51 #include <linux/delay.h>
52 #include <linux/dmaengine.h>
53 #include <linux/dma-mapping.h>
54 #include <linux/errno.h>
55 #include <linux/export.h>
56 #include <linux/interrupt.h>
57 #include <linux/module.h>
58 #include <linux/pci.h>
59 #include <linux/slab.h>
60 #include <linux/types.h>
61 #include "linux/ntb.h"
62 #include "linux/ntb_transport.h"
64 #define NTB_TRANSPORT_VERSION 4
65 #define NTB_TRANSPORT_VER "4"
66 #define NTB_TRANSPORT_NAME "ntb_transport"
67 #define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB"
69 MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
70 MODULE_VERSION(NTB_TRANSPORT_VER);
71 MODULE_LICENSE("Dual BSD/GPL");
72 MODULE_AUTHOR("Intel Corporation");
74 static unsigned long max_mw_size;
75 module_param(max_mw_size, ulong, 0644);
76 MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
78 static unsigned int transport_mtu = 0x401E;
79 module_param(transport_mtu, uint, 0644);
80 MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
82 static unsigned char max_num_clients;
83 module_param(max_num_clients, byte, 0644);
84 MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
86 static unsigned int copy_bytes = 1024;
87 module_param(copy_bytes, uint, 0644);
88 MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
90 static struct dentry *nt_debugfs_dir;
92 struct ntb_queue_entry {
93 /* ntb_queue list reference */
94 struct list_head entry;
95 /* pointers to data to be transferred */
101 struct ntb_transport_qp *qp;
103 struct ntb_payload_header __iomem *tx_hdr;
104 struct ntb_payload_header *rx_hdr;
113 struct ntb_transport_qp {
114 struct ntb_transport_ctx *transport;
115 struct ntb_dev *ndev;
117 struct dma_chan *dma_chan;
122 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
125 struct ntb_rx_info __iomem *rx_info;
126 struct ntb_rx_info *remote_rx_info;
128 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
129 void *data, int len);
130 struct list_head tx_free_q;
131 spinlock_t ntb_tx_free_q_lock;
133 dma_addr_t tx_mw_phys;
134 unsigned int tx_index;
135 unsigned int tx_max_entry;
136 unsigned int tx_max_frame;
138 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
139 void *data, int len);
140 struct list_head rx_pend_q;
141 struct list_head rx_free_q;
142 spinlock_t ntb_rx_pend_q_lock;
143 spinlock_t ntb_rx_free_q_lock;
145 unsigned int rx_index;
146 unsigned int rx_max_entry;
147 unsigned int rx_max_frame;
148 dma_cookie_t last_cookie;
149 struct tasklet_struct rxc_db_work;
151 void (*event_handler)(void *data, int status);
152 struct delayed_work link_work;
153 struct work_struct link_cleanup;
155 struct dentry *debugfs_dir;
156 struct dentry *debugfs_stats;
175 struct ntb_transport_mw {
176 phys_addr_t phys_addr;
177 resource_size_t phys_size;
178 resource_size_t xlat_align;
179 resource_size_t xlat_align_size;
187 struct ntb_transport_client_dev {
188 struct list_head entry;
189 struct ntb_transport_ctx *nt;
193 struct ntb_transport_ctx {
194 struct list_head entry;
195 struct list_head client_devs;
197 struct ntb_dev *ndev;
199 struct ntb_transport_mw *mw_vec;
200 struct ntb_transport_qp *qp_vec;
201 unsigned int mw_count;
202 unsigned int qp_count;
207 struct delayed_work link_work;
208 struct work_struct link_cleanup;
212 DESC_DONE_FLAG = BIT(0),
213 LINK_DOWN_FLAG = BIT(1),
216 struct ntb_payload_header {
234 #define dev_client_dev(__dev) \
235 container_of((__dev), struct ntb_transport_client_dev, dev)
237 #define drv_client(__drv) \
238 container_of((__drv), struct ntb_transport_client, driver)
240 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
241 #define NTB_QP_DEF_NUM_ENTRIES 100
242 #define NTB_LINK_DOWN_TIMEOUT 10
244 static void ntb_transport_rxc_db(unsigned long data);
245 static const struct ntb_ctx_ops ntb_transport_ops;
246 static struct ntb_client ntb_transport_client;
248 static int ntb_transport_bus_match(struct device *dev,
249 struct device_driver *drv)
251 return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
254 static int ntb_transport_bus_probe(struct device *dev)
256 const struct ntb_transport_client *client;
261 client = drv_client(dev->driver);
262 rc = client->probe(dev);
269 static int ntb_transport_bus_remove(struct device *dev)
271 const struct ntb_transport_client *client;
273 client = drv_client(dev->driver);
281 static struct bus_type ntb_transport_bus = {
282 .name = "ntb_transport",
283 .match = ntb_transport_bus_match,
284 .probe = ntb_transport_bus_probe,
285 .remove = ntb_transport_bus_remove,
288 static LIST_HEAD(ntb_transport_list);
290 static int ntb_bus_init(struct ntb_transport_ctx *nt)
292 list_add(&nt->entry, &ntb_transport_list);
296 static void ntb_bus_remove(struct ntb_transport_ctx *nt)
298 struct ntb_transport_client_dev *client_dev, *cd;
300 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
301 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
302 dev_name(&client_dev->dev));
303 list_del(&client_dev->entry);
304 device_unregister(&client_dev->dev);
307 list_del(&nt->entry);
310 static void ntb_transport_client_release(struct device *dev)
312 struct ntb_transport_client_dev *client_dev;
314 client_dev = dev_client_dev(dev);
319 * ntb_transport_unregister_client_dev - Unregister NTB client device
320 * @device_name: Name of NTB client device
322 * Unregister an NTB client device with the NTB transport layer
324 void ntb_transport_unregister_client_dev(char *device_name)
326 struct ntb_transport_client_dev *client, *cd;
327 struct ntb_transport_ctx *nt;
329 list_for_each_entry(nt, &ntb_transport_list, entry)
330 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
331 if (!strncmp(dev_name(&client->dev), device_name,
332 strlen(device_name))) {
333 list_del(&client->entry);
334 device_unregister(&client->dev);
337 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
340 * ntb_transport_register_client_dev - Register NTB client device
341 * @device_name: Name of NTB client device
343 * Register an NTB client device with the NTB transport layer
345 int ntb_transport_register_client_dev(char *device_name)
347 struct ntb_transport_client_dev *client_dev;
348 struct ntb_transport_ctx *nt;
351 if (list_empty(&ntb_transport_list))
354 list_for_each_entry(nt, &ntb_transport_list, entry) {
357 client_dev = kzalloc(sizeof(*client_dev),
364 dev = &client_dev->dev;
366 /* setup and register client devices */
367 dev_set_name(dev, "%s%d", device_name, i);
368 dev->bus = &ntb_transport_bus;
369 dev->release = ntb_transport_client_release;
370 dev->parent = &nt->ndev->dev;
372 rc = device_register(dev);
378 list_add_tail(&client_dev->entry, &nt->client_devs);
385 ntb_transport_unregister_client_dev(device_name);
389 EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev);
392 * ntb_transport_register_client - Register NTB client driver
393 * @drv: NTB client driver to be registered
395 * Register an NTB client driver with the NTB transport layer
397 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
399 int ntb_transport_register_client(struct ntb_transport_client *drv)
401 drv->driver.bus = &ntb_transport_bus;
403 if (list_empty(&ntb_transport_list))
406 return driver_register(&drv->driver);
408 EXPORT_SYMBOL_GPL(ntb_transport_register_client);
411 * ntb_transport_unregister_client - Unregister NTB client driver
412 * @drv: NTB client driver to be unregistered
414 * Unregister an NTB client driver with the NTB transport layer
416 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
418 void ntb_transport_unregister_client(struct ntb_transport_client *drv)
420 driver_unregister(&drv->driver);
422 EXPORT_SYMBOL_GPL(ntb_transport_unregister_client);
424 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
427 struct ntb_transport_qp *qp;
429 ssize_t ret, out_offset, out_count;
433 buf = kmalloc(out_count, GFP_KERNEL);
437 qp = filp->private_data;
439 out_offset += snprintf(buf + out_offset, out_count - out_offset,
441 out_offset += snprintf(buf + out_offset, out_count - out_offset,
442 "rx_bytes - \t%llu\n", qp->rx_bytes);
443 out_offset += snprintf(buf + out_offset, out_count - out_offset,
444 "rx_pkts - \t%llu\n", qp->rx_pkts);
445 out_offset += snprintf(buf + out_offset, out_count - out_offset,
446 "rx_memcpy - \t%llu\n", qp->rx_memcpy);
447 out_offset += snprintf(buf + out_offset, out_count - out_offset,
448 "rx_async - \t%llu\n", qp->rx_async);
449 out_offset += snprintf(buf + out_offset, out_count - out_offset,
450 "rx_ring_empty - %llu\n", qp->rx_ring_empty);
451 out_offset += snprintf(buf + out_offset, out_count - out_offset,
452 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
453 out_offset += snprintf(buf + out_offset, out_count - out_offset,
454 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
455 out_offset += snprintf(buf + out_offset, out_count - out_offset,
456 "rx_err_ver - \t%llu\n", qp->rx_err_ver);
457 out_offset += snprintf(buf + out_offset, out_count - out_offset,
458 "rx_buff - \t%p\n", qp->rx_buff);
459 out_offset += snprintf(buf + out_offset, out_count - out_offset,
460 "rx_index - \t%u\n", qp->rx_index);
461 out_offset += snprintf(buf + out_offset, out_count - out_offset,
462 "rx_max_entry - \t%u\n", qp->rx_max_entry);
464 out_offset += snprintf(buf + out_offset, out_count - out_offset,
465 "tx_bytes - \t%llu\n", qp->tx_bytes);
466 out_offset += snprintf(buf + out_offset, out_count - out_offset,
467 "tx_pkts - \t%llu\n", qp->tx_pkts);
468 out_offset += snprintf(buf + out_offset, out_count - out_offset,
469 "tx_memcpy - \t%llu\n", qp->tx_memcpy);
470 out_offset += snprintf(buf + out_offset, out_count - out_offset,
471 "tx_async - \t%llu\n", qp->tx_async);
472 out_offset += snprintf(buf + out_offset, out_count - out_offset,
473 "tx_ring_full - \t%llu\n", qp->tx_ring_full);
474 out_offset += snprintf(buf + out_offset, out_count - out_offset,
475 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
476 out_offset += snprintf(buf + out_offset, out_count - out_offset,
477 "tx_mw - \t%p\n", qp->tx_mw);
478 out_offset += snprintf(buf + out_offset, out_count - out_offset,
479 "tx_index - \t%u\n", qp->tx_index);
480 out_offset += snprintf(buf + out_offset, out_count - out_offset,
481 "tx_max_entry - \t%u\n", qp->tx_max_entry);
483 out_offset += snprintf(buf + out_offset, out_count - out_offset,
485 qp->link_is_up ? "Up" : "Down");
486 if (out_offset > out_count)
487 out_offset = out_count;
489 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
494 static const struct file_operations ntb_qp_debugfs_stats = {
495 .owner = THIS_MODULE,
497 .read = debugfs_read,
500 static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
501 struct list_head *list)
505 spin_lock_irqsave(lock, flags);
506 list_add_tail(entry, list);
507 spin_unlock_irqrestore(lock, flags);
510 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
511 struct list_head *list)
513 struct ntb_queue_entry *entry;
516 spin_lock_irqsave(lock, flags);
517 if (list_empty(list)) {
521 entry = list_first_entry(list, struct ntb_queue_entry, entry);
522 list_del(&entry->entry);
524 spin_unlock_irqrestore(lock, flags);
529 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
532 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
533 struct ntb_transport_mw *mw;
534 unsigned int rx_size, num_qps_mw;
535 unsigned int mw_num, mw_count, qp_count;
538 mw_count = nt->mw_count;
539 qp_count = nt->qp_count;
541 mw_num = QP_TO_MW(nt, qp_num);
542 mw = &nt->mw_vec[mw_num];
547 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
548 num_qps_mw = qp_count / mw_count + 1;
550 num_qps_mw = qp_count / mw_count;
552 rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
553 qp->rx_buff = mw->virt_addr + rx_size * qp_num / mw_count;
554 rx_size -= sizeof(struct ntb_rx_info);
556 qp->remote_rx_info = qp->rx_buff + rx_size;
558 /* Due to housekeeping, there must be atleast 2 buffs */
559 qp->rx_max_frame = min(transport_mtu, rx_size / 2);
560 qp->rx_max_entry = rx_size / qp->rx_max_frame;
563 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
565 /* setup the hdr offsets with 0's */
566 for (i = 0; i < qp->rx_max_entry; i++) {
567 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) -
568 sizeof(struct ntb_payload_header));
569 memset(offset, 0, sizeof(struct ntb_payload_header));
579 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
581 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
582 struct pci_dev *pdev = nt->ndev->pdev;
587 ntb_mw_clear_trans(nt->ndev, num_mw);
588 dma_free_coherent(&pdev->dev, mw->buff_size,
589 mw->virt_addr, mw->dma_addr);
592 mw->virt_addr = NULL;
595 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
598 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
599 struct pci_dev *pdev = nt->ndev->pdev;
600 unsigned int xlat_size, buff_size;
603 xlat_size = round_up(size, mw->xlat_align_size);
604 buff_size = round_up(size, mw->xlat_align);
606 /* No need to re-setup */
607 if (mw->xlat_size == xlat_size)
611 ntb_free_mw(nt, num_mw);
613 /* Alloc memory for receiving data. Must be aligned */
614 mw->xlat_size = xlat_size;
615 mw->buff_size = buff_size;
617 mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size,
618 &mw->dma_addr, GFP_KERNEL);
619 if (!mw->virt_addr) {
622 dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n",
628 * we must ensure that the memory address allocated is BAR size
629 * aligned in order for the XLAT register to take the value. This
630 * is a requirement of the hardware. It is recommended to setup CMA
631 * for BAR sizes equal or greater than 4MB.
633 if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) {
634 dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
636 ntb_free_mw(nt, num_mw);
640 /* Notify HW the memory location of the receive buffer */
641 rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size);
643 dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw);
644 ntb_free_mw(nt, num_mw);
651 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
653 struct ntb_transport_ctx *nt = qp->transport;
654 struct pci_dev *pdev = nt->ndev->pdev;
656 if (qp->link_is_up) {
657 cancel_delayed_work_sync(&qp->link_work);
661 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
662 qp->link_is_up = false;
664 if (qp->event_handler)
665 qp->event_handler(qp->cb_data, qp->link_is_up);
668 static void ntb_qp_link_cleanup_work(struct work_struct *work)
670 struct ntb_transport_qp *qp = container_of(work,
671 struct ntb_transport_qp,
673 struct ntb_transport_ctx *nt = qp->transport;
675 ntb_qp_link_cleanup(qp);
678 schedule_delayed_work(&qp->link_work,
679 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
682 static void ntb_qp_link_down(struct ntb_transport_qp *qp)
684 schedule_work(&qp->link_cleanup);
687 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
689 struct ntb_transport_qp *qp;
693 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
695 /* Pass along the info to any clients */
696 for (i = 0; i < nt->qp_count; i++)
697 if (qp_bitmap_alloc & BIT_ULL(i)) {
699 ntb_qp_link_cleanup(qp);
700 cancel_work_sync(&qp->link_cleanup);
701 cancel_delayed_work_sync(&qp->link_work);
705 cancel_delayed_work_sync(&nt->link_work);
707 /* The scratchpad registers keep the values if the remote side
708 * goes down, blast them now to give them a sane value the next
709 * time they are accessed
711 for (i = 0; i < MAX_SPAD; i++)
712 ntb_spad_write(nt->ndev, i, 0);
715 static void ntb_transport_link_cleanup_work(struct work_struct *work)
717 struct ntb_transport_ctx *nt =
718 container_of(work, struct ntb_transport_ctx, link_cleanup);
720 ntb_transport_link_cleanup(nt);
723 static void ntb_transport_event_callback(void *data)
725 struct ntb_transport_ctx *nt = data;
727 if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
728 schedule_delayed_work(&nt->link_work, 0);
730 schedule_work(&nt->link_cleanup);
733 static void ntb_transport_link_work(struct work_struct *work)
735 struct ntb_transport_ctx *nt =
736 container_of(work, struct ntb_transport_ctx, link_work.work);
737 struct ntb_dev *ndev = nt->ndev;
738 struct pci_dev *pdev = ndev->pdev;
739 resource_size_t size;
743 /* send the local info, in the opposite order of the way we read it */
744 for (i = 0; i < nt->mw_count; i++) {
745 size = nt->mw_vec[i].phys_size;
747 if (max_mw_size && size > max_mw_size)
750 spad = MW0_SZ_HIGH + (i * 2);
751 ntb_peer_spad_write(ndev, spad, (u32)(size >> 32));
753 spad = MW0_SZ_LOW + (i * 2);
754 ntb_peer_spad_write(ndev, spad, (u32)size);
757 ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count);
759 ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count);
761 ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION);
763 /* Query the remote side for its info */
764 val = ntb_peer_spad_read(ndev, VERSION);
765 dev_dbg(&pdev->dev, "Remote version = %d\n", val);
766 if (val != NTB_TRANSPORT_VERSION)
769 val = ntb_peer_spad_read(ndev, NUM_QPS);
770 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
771 if (val != nt->qp_count)
774 val = ntb_peer_spad_read(ndev, NUM_MWS);
775 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
776 if (val != nt->mw_count)
779 for (i = 0; i < nt->mw_count; i++) {
782 val = ntb_peer_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
783 val64 = (u64)val << 32;
785 val = ntb_peer_spad_read(ndev, MW0_SZ_LOW + (i * 2));
788 dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64);
790 rc = ntb_set_mw(nt, i, val64);
795 nt->link_is_up = true;
797 for (i = 0; i < nt->qp_count; i++) {
798 struct ntb_transport_qp *qp = &nt->qp_vec[i];
800 ntb_transport_setup_qp_mw(nt, i);
802 if (qp->client_ready)
803 schedule_delayed_work(&qp->link_work, 0);
809 for (i = 0; i < nt->mw_count; i++)
812 if (ntb_link_is_up(ndev, NULL, NULL) == 1)
813 schedule_delayed_work(&nt->link_work,
814 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
817 static void ntb_qp_link_work(struct work_struct *work)
819 struct ntb_transport_qp *qp = container_of(work,
820 struct ntb_transport_qp,
822 struct pci_dev *pdev = qp->ndev->pdev;
823 struct ntb_transport_ctx *nt = qp->transport;
826 WARN_ON(!nt->link_is_up);
828 val = ntb_spad_read(nt->ndev, QP_LINKS);
830 ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num));
832 /* query remote spad for qp ready bits */
833 ntb_peer_spad_read(nt->ndev, QP_LINKS);
834 dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
836 /* See if the remote side is up */
837 if (val & BIT(qp->qp_num)) {
838 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
839 qp->link_is_up = true;
841 if (qp->event_handler)
842 qp->event_handler(qp->cb_data, qp->link_is_up);
843 } else if (nt->link_is_up)
844 schedule_delayed_work(&qp->link_work,
845 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
848 static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
851 struct ntb_transport_qp *qp;
852 struct ntb_transport_mw *mw;
854 resource_size_t mw_size;
855 unsigned int num_qps_mw, tx_size;
856 unsigned int mw_num, mw_count, qp_count;
859 mw_count = nt->mw_count;
860 qp_count = nt->qp_count;
862 mw_num = QP_TO_MW(nt, qp_num);
863 mw = &nt->mw_vec[mw_num];
865 qp = &nt->qp_vec[qp_num];
869 qp->link_is_up = false;
870 qp->client_ready = false;
871 qp->event_handler = NULL;
873 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
874 num_qps_mw = qp_count / mw_count + 1;
876 num_qps_mw = qp_count / mw_count;
878 mw_base = nt->mw_vec[mw_num].phys_addr;
879 mw_size = nt->mw_vec[mw_num].phys_size;
881 tx_size = (unsigned int)mw_size / num_qps_mw;
882 qp_offset = tx_size * qp_num / mw_count;
884 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
888 qp->tx_mw_phys = mw_base + qp_offset;
892 tx_size -= sizeof(struct ntb_rx_info);
893 qp->rx_info = qp->tx_mw + tx_size;
895 /* Due to housekeeping, there must be atleast 2 buffs */
896 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
897 qp->tx_max_entry = tx_size / qp->tx_max_frame;
899 if (nt_debugfs_dir) {
900 char debugfs_name[4];
902 snprintf(debugfs_name, 4, "qp%d", qp_num);
903 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
906 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
908 &ntb_qp_debugfs_stats);
910 qp->debugfs_dir = NULL;
911 qp->debugfs_stats = NULL;
914 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
915 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
917 spin_lock_init(&qp->ntb_rx_pend_q_lock);
918 spin_lock_init(&qp->ntb_rx_free_q_lock);
919 spin_lock_init(&qp->ntb_tx_free_q_lock);
921 INIT_LIST_HEAD(&qp->rx_pend_q);
922 INIT_LIST_HEAD(&qp->rx_free_q);
923 INIT_LIST_HEAD(&qp->tx_free_q);
925 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db,
931 static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
933 struct ntb_transport_ctx *nt;
934 struct ntb_transport_mw *mw;
935 unsigned int mw_count, qp_count;
939 if (ntb_db_is_unsafe(ndev))
941 "doorbell is unsafe, proceed anyway...\n");
942 if (ntb_spad_is_unsafe(ndev))
944 "scratchpad is unsafe, proceed anyway...\n");
946 nt = kzalloc(sizeof(*nt), GFP_KERNEL);
952 mw_count = ntb_mw_count(ndev);
954 nt->mw_count = mw_count;
956 nt->mw_vec = kcalloc(mw_count, sizeof(*nt->mw_vec), GFP_KERNEL);
962 for (i = 0; i < mw_count; i++) {
965 rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size,
966 &mw->xlat_align, &mw->xlat_align_size);
970 mw->vbase = ioremap(mw->phys_addr, mw->phys_size);
978 mw->virt_addr = NULL;
982 qp_bitmap = ntb_db_valid_mask(ndev);
984 qp_count = ilog2(qp_bitmap);
985 if (max_num_clients && max_num_clients < qp_count)
986 qp_count = max_num_clients;
987 else if (mw_count < qp_count)
990 qp_bitmap &= BIT_ULL(qp_count) - 1;
992 nt->qp_count = qp_count;
993 nt->qp_bitmap = qp_bitmap;
994 nt->qp_bitmap_free = qp_bitmap;
996 nt->qp_vec = kcalloc(qp_count, sizeof(*nt->qp_vec), GFP_KERNEL);
1002 for (i = 0; i < qp_count; i++) {
1003 rc = ntb_transport_init_queue(nt, i);
1008 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
1009 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
1011 rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
1015 INIT_LIST_HEAD(&nt->client_devs);
1016 rc = ntb_bus_init(nt);
1020 nt->link_is_up = false;
1021 ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
1022 ntb_link_event(ndev);
1027 ntb_clear_ctx(ndev);
1034 mw = &nt->mw_vec[i];
1042 static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
1044 struct ntb_transport_ctx *nt = ndev->ctx;
1045 struct ntb_transport_qp *qp;
1046 u64 qp_bitmap_alloc;
1049 ntb_transport_link_cleanup(nt);
1050 cancel_work_sync(&nt->link_cleanup);
1051 cancel_delayed_work_sync(&nt->link_work);
1053 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
1055 /* verify that all the qp's are freed */
1056 for (i = 0; i < nt->qp_count; i++) {
1057 qp = &nt->qp_vec[i];
1058 if (qp_bitmap_alloc & BIT_ULL(i))
1059 ntb_transport_free_queue(qp);
1060 debugfs_remove_recursive(qp->debugfs_dir);
1063 ntb_link_disable(ndev);
1064 ntb_clear_ctx(ndev);
1068 for (i = nt->mw_count; i--; ) {
1070 iounmap(nt->mw_vec[i].vbase);
1078 static void ntb_rx_copy_callback(void *data)
1080 struct ntb_queue_entry *entry = data;
1081 struct ntb_transport_qp *qp = entry->qp;
1082 void *cb_data = entry->cb_data;
1083 unsigned int len = entry->len;
1084 struct ntb_payload_header *hdr = entry->rx_hdr;
1088 iowrite32(entry->index, &qp->rx_info->entry);
1090 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1092 if (qp->rx_handler && qp->client_ready)
1093 qp->rx_handler(qp, qp->cb_data, cb_data, len);
1096 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1098 void *buf = entry->buf;
1099 size_t len = entry->len;
1101 memcpy(buf, offset, len);
1103 /* Ensure that the data is fully copied out before clearing the flag */
1106 ntb_rx_copy_callback(entry);
1109 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
1112 struct dma_async_tx_descriptor *txd;
1113 struct ntb_transport_qp *qp = entry->qp;
1114 struct dma_chan *chan = qp->dma_chan;
1115 struct dma_device *device;
1116 size_t pay_off, buff_off;
1117 struct dmaengine_unmap_data *unmap;
1118 dma_cookie_t cookie;
1119 void *buf = entry->buf;
1126 if (len < copy_bytes)
1129 device = chan->device;
1130 pay_off = (size_t)offset & ~PAGE_MASK;
1131 buff_off = (size_t)buf & ~PAGE_MASK;
1133 if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1136 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
1141 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
1142 pay_off, len, DMA_TO_DEVICE);
1143 if (dma_mapping_error(device->dev, unmap->addr[0]))
1148 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
1149 buff_off, len, DMA_FROM_DEVICE);
1150 if (dma_mapping_error(device->dev, unmap->addr[1]))
1153 unmap->from_cnt = 1;
1155 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1156 unmap->addr[0], len,
1157 DMA_PREP_INTERRUPT);
1161 txd->callback = ntb_rx_copy_callback;
1162 txd->callback_param = entry;
1163 dma_set_unmap(txd, unmap);
1165 cookie = dmaengine_submit(txd);
1166 if (dma_submit_error(cookie))
1169 dmaengine_unmap_put(unmap);
1171 qp->last_cookie = cookie;
1178 dmaengine_unmap_put(unmap);
1180 dmaengine_unmap_put(unmap);
1182 /* If the callbacks come out of order, the writing of the index to the
1183 * last completed will be out of order. This may result in the
1184 * receive stalling forever.
1186 dma_sync_wait(chan, qp->last_cookie);
1188 ntb_memcpy_rx(entry, offset);
1192 static int ntb_process_rxc(struct ntb_transport_qp *qp)
1194 struct ntb_payload_header *hdr;
1195 struct ntb_queue_entry *entry;
1199 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1200 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1202 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n",
1203 qp->qp_num, hdr->ver, hdr->len, hdr->flags);
1205 if (!(hdr->flags & DESC_DONE_FLAG)) {
1206 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n");
1207 qp->rx_ring_empty++;
1211 if (hdr->flags & LINK_DOWN_FLAG) {
1212 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n");
1213 ntb_qp_link_down(qp);
1215 iowrite32(qp->rx_index, &qp->rx_info->entry);
1219 if (hdr->ver != (u32)qp->rx_pkts) {
1220 dev_dbg(&qp->ndev->pdev->dev,
1221 "version mismatch, expected %llu - got %u\n",
1222 qp->rx_pkts, hdr->ver);
1227 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1229 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
1230 qp->rx_err_no_buf++;
1236 if (hdr->len > entry->len) {
1237 dev_dbg(&qp->ndev->pdev->dev,
1238 "receive buffer overflow! Wanted %d got %d\n",
1239 hdr->len, entry->len);
1246 dev_dbg(&qp->ndev->pdev->dev,
1247 "RX OK index %u ver %u size %d into buf size %d\n",
1248 qp->rx_index, hdr->ver, hdr->len, entry->len);
1250 qp->rx_bytes += hdr->len;
1253 entry->index = qp->rx_index;
1254 entry->rx_hdr = hdr;
1256 ntb_async_rx(entry, offset, hdr->len);
1259 qp->rx_index %= qp->rx_max_entry;
1264 /* FIXME: if this syncrhonous update of the rx_index gets ahead of
1265 * asyncrhonous ntb_rx_copy_callback of previous entry, there are three
1268 * 1) The peer might miss this update, but observe the update
1269 * from the memcpy completion callback. In this case, the buffer will
1270 * not be freed on the peer to be reused for a different packet. The
1271 * successful rx of a later packet would clear the condition, but the
1272 * condition could persist if several rx fail in a row.
1274 * 2) The peer may observe this update before the asyncrhonous copy of
1275 * prior packets is completed. The peer may overwrite the buffers of
1276 * the prior packets before they are copied.
1278 * 3) Both: the peer may observe the update, and then observe the index
1279 * decrement by the asynchronous completion callback. Who knows what
1280 * badness that will cause.
1283 iowrite32(qp->rx_index, &qp->rx_info->entry);
1288 static void ntb_transport_rxc_db(unsigned long data)
1290 struct ntb_transport_qp *qp = (void *)data;
1293 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n",
1294 __func__, qp->qp_num);
1296 /* Limit the number of packets processed in a single interrupt to
1297 * provide fairness to others
1299 for (i = 0; i < qp->rx_max_entry; i++) {
1300 rc = ntb_process_rxc(qp);
1306 dma_async_issue_pending(qp->dma_chan);
1308 if (i == qp->rx_max_entry) {
1309 /* there is more work to do */
1310 tasklet_schedule(&qp->rxc_db_work);
1311 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) {
1312 /* the doorbell bit is set: clear it */
1313 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num));
1314 /* ntb_db_read ensures ntb_db_clear write is committed */
1315 ntb_db_read(qp->ndev);
1317 /* an interrupt may have arrived between finishing
1318 * ntb_process_rxc and clearing the doorbell bit:
1319 * there might be some more work to do.
1321 tasklet_schedule(&qp->rxc_db_work);
1325 static void ntb_tx_copy_callback(void *data)
1327 struct ntb_queue_entry *entry = data;
1328 struct ntb_transport_qp *qp = entry->qp;
1329 struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
1331 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
1333 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
1335 /* The entry length can only be zero if the packet is intended to be a
1336 * "link down" or similar. Since no payload is being sent in these
1337 * cases, there is nothing to add to the completion queue.
1339 if (entry->len > 0) {
1340 qp->tx_bytes += entry->len;
1343 qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1347 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1350 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1352 memcpy_toio(offset, entry->buf, entry->len);
1354 /* Ensure that the data is fully copied out before setting the flags */
1357 ntb_tx_copy_callback(entry);
1360 static void ntb_async_tx(struct ntb_transport_qp *qp,
1361 struct ntb_queue_entry *entry)
1363 struct ntb_payload_header __iomem *hdr;
1364 struct dma_async_tx_descriptor *txd;
1365 struct dma_chan *chan = qp->dma_chan;
1366 struct dma_device *device;
1367 size_t dest_off, buff_off;
1368 struct dmaengine_unmap_data *unmap;
1370 dma_cookie_t cookie;
1371 void __iomem *offset;
1372 size_t len = entry->len;
1373 void *buf = entry->buf;
1375 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1376 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1377 entry->tx_hdr = hdr;
1379 iowrite32(entry->len, &hdr->len);
1380 iowrite32((u32)qp->tx_pkts, &hdr->ver);
1385 if (len < copy_bytes)
1388 device = chan->device;
1389 dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
1390 buff_off = (size_t)buf & ~PAGE_MASK;
1391 dest_off = (size_t)dest & ~PAGE_MASK;
1393 if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1396 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
1401 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
1402 buff_off, len, DMA_TO_DEVICE);
1403 if (dma_mapping_error(device->dev, unmap->addr[0]))
1408 txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1409 DMA_PREP_INTERRUPT);
1413 txd->callback = ntb_tx_copy_callback;
1414 txd->callback_param = entry;
1415 dma_set_unmap(txd, unmap);
1417 cookie = dmaengine_submit(txd);
1418 if (dma_submit_error(cookie))
1421 dmaengine_unmap_put(unmap);
1423 dma_async_issue_pending(chan);
1428 dmaengine_unmap_put(unmap);
1430 dmaengine_unmap_put(unmap);
1432 ntb_memcpy_tx(entry, offset);
1436 static int ntb_process_tx(struct ntb_transport_qp *qp,
1437 struct ntb_queue_entry *entry)
1439 if (qp->tx_index == qp->remote_rx_info->entry) {
1444 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
1446 qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
1448 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1453 ntb_async_tx(qp, entry);
1456 qp->tx_index %= qp->tx_max_entry;
1463 static void ntb_send_link_down(struct ntb_transport_qp *qp)
1465 struct pci_dev *pdev = qp->ndev->pdev;
1466 struct ntb_queue_entry *entry;
1469 if (!qp->link_is_up)
1472 qp->link_is_up = false;
1473 dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
1475 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1476 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1485 entry->cb_data = NULL;
1488 entry->flags = LINK_DOWN_FLAG;
1490 rc = ntb_process_tx(qp, entry);
1492 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1497 * ntb_transport_create_queue - Create a new NTB transport layer queue
1498 * @rx_handler: receive callback function
1499 * @tx_handler: transmit callback function
1500 * @event_handler: event callback function
1502 * Create a new NTB transport layer queue and provide the queue with a callback
1503 * routine for both transmit and receive. The receive callback routine will be
1504 * used to pass up data when the transport has received it on the queue. The
1505 * transmit callback routine will be called when the transport has completed the
1506 * transmission of the data on the queue and the data is ready to be freed.
1508 * RETURNS: pointer to newly created ntb_queue, NULL on error.
1510 struct ntb_transport_qp *
1511 ntb_transport_create_queue(void *data, struct device *client_dev,
1512 const struct ntb_queue_handlers *handlers)
1514 struct ntb_dev *ndev;
1515 struct pci_dev *pdev;
1516 struct ntb_transport_ctx *nt;
1517 struct ntb_queue_entry *entry;
1518 struct ntb_transport_qp *qp;
1520 unsigned int free_queue;
1523 ndev = dev_ntb(client_dev->parent);
1527 free_queue = ffs(nt->qp_bitmap);
1531 /* decrement free_queue to make it zero based */
1534 qp = &nt->qp_vec[free_queue];
1535 qp_bit = BIT_ULL(qp->qp_num);
1537 nt->qp_bitmap_free &= ~qp_bit;
1540 qp->rx_handler = handlers->rx_handler;
1541 qp->tx_handler = handlers->tx_handler;
1542 qp->event_handler = handlers->event_handler;
1545 qp->dma_chan = dma_find_channel(DMA_MEMCPY);
1546 if (!qp->dma_chan) {
1548 dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
1551 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1552 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1557 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
1561 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
1562 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1567 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1571 ntb_db_clear(qp->ndev, qp_bit);
1572 ntb_db_clear_mask(qp->ndev, qp_bit);
1574 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1579 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1582 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1586 nt->qp_bitmap_free |= qp_bit;
1590 EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1593 * ntb_transport_free_queue - Frees NTB transport queue
1594 * @qp: NTB queue to be freed
1596 * Frees NTB transport queue
1598 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1600 struct ntb_transport_ctx *nt = qp->transport;
1601 struct pci_dev *pdev;
1602 struct ntb_queue_entry *entry;
1608 pdev = qp->ndev->pdev;
1611 struct dma_chan *chan = qp->dma_chan;
1612 /* Putting the dma_chan to NULL will force any new traffic to be
1613 * processed by the CPU instead of the DAM engine
1615 qp->dma_chan = NULL;
1617 /* Try to be nice and wait for any queued DMA engine
1618 * transactions to process before smashing it with a rock
1620 dma_sync_wait(chan, qp->last_cookie);
1621 dmaengine_terminate_all(chan);
1625 qp_bit = BIT_ULL(qp->qp_num);
1627 ntb_db_set_mask(qp->ndev, qp_bit);
1628 tasklet_disable(&qp->rxc_db_work);
1630 cancel_delayed_work_sync(&qp->link_work);
1633 qp->rx_handler = NULL;
1634 qp->tx_handler = NULL;
1635 qp->event_handler = NULL;
1637 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
1640 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
1641 dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
1645 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1648 nt->qp_bitmap_free |= qp_bit;
1650 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1652 EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1655 * ntb_transport_rx_remove - Dequeues enqueued rx packet
1656 * @qp: NTB queue to be freed
1657 * @len: pointer to variable to write enqueued buffers length
1659 * Dequeues unused buffers from receive queue. Should only be used during
1662 * RETURNS: NULL error value on error, or void* for success.
1664 void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1666 struct ntb_queue_entry *entry;
1669 if (!qp || qp->client_ready)
1672 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
1676 buf = entry->cb_data;
1679 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1683 EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1686 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1687 * @qp: NTB transport layer queue the entry is to be enqueued on
1688 * @cb: per buffer pointer for callback function to use
1689 * @data: pointer to data buffer that incoming packets will be copied into
1690 * @len: length of the data buffer
1692 * Enqueue a new receive buffer onto the transport queue into which a NTB
1693 * payload can be received into.
1695 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1697 int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1700 struct ntb_queue_entry *entry;
1705 entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
1709 entry->cb_data = cb;
1713 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
1717 EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1720 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1721 * @qp: NTB transport layer queue the entry is to be enqueued on
1722 * @cb: per buffer pointer for callback function to use
1723 * @data: pointer to data buffer that will be sent
1724 * @len: length of the data buffer
1726 * Enqueue a new transmit buffer onto the transport queue from which a NTB
1727 * payload will be transmitted. This assumes that a lock is being held to
1728 * serialize access to the qp.
1730 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1732 int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1735 struct ntb_queue_entry *entry;
1738 if (!qp || !qp->link_is_up || !len)
1741 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1743 qp->tx_err_no_buf++;
1747 entry->cb_data = cb;
1752 rc = ntb_process_tx(qp, entry);
1754 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1759 EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
1762 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
1763 * @qp: NTB transport layer queue to be enabled
1765 * Notify NTB transport layer of client readiness to use queue
1767 void ntb_transport_link_up(struct ntb_transport_qp *qp)
1772 qp->client_ready = true;
1774 if (qp->transport->link_is_up)
1775 schedule_delayed_work(&qp->link_work, 0);
1777 EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1780 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1781 * @qp: NTB transport layer queue to be disabled
1783 * Notify NTB transport layer of client's desire to no longer receive data on
1784 * transport queue specified. It is the client's responsibility to ensure all
1785 * entries on queue are purged or otherwise handled appropriately.
1787 void ntb_transport_link_down(struct ntb_transport_qp *qp)
1789 struct pci_dev *pdev;
1795 pdev = qp->ndev->pdev;
1796 qp->client_ready = false;
1798 val = ntb_spad_read(qp->ndev, QP_LINKS);
1800 ntb_peer_spad_write(qp->ndev, QP_LINKS,
1801 val & ~BIT(qp->qp_num));
1804 ntb_send_link_down(qp);
1806 cancel_delayed_work_sync(&qp->link_work);
1808 EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1811 * ntb_transport_link_query - Query transport link state
1812 * @qp: NTB transport layer queue to be queried
1814 * Query connectivity to the remote system of the NTB transport queue
1816 * RETURNS: true for link up or false for link down
1818 bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1823 return qp->link_is_up;
1825 EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1828 * ntb_transport_qp_num - Query the qp number
1829 * @qp: NTB transport layer queue to be queried
1831 * Query qp number of the NTB transport queue
1833 * RETURNS: a zero based number specifying the qp number
1835 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1842 EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1845 * ntb_transport_max_size - Query the max payload size of a qp
1846 * @qp: NTB transport layer queue to be queried
1848 * Query the maximum payload size permissible on the given qp
1850 * RETURNS: the max payload size of a qp
1852 unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1860 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1862 /* If DMA engine usage is possible, try to find the max size for that */
1863 max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
1864 max -= max % (1 << qp->dma_chan->device->copy_align);
1868 EXPORT_SYMBOL_GPL(ntb_transport_max_size);
1870 static void ntb_transport_doorbell_callback(void *data, int vector)
1872 struct ntb_transport_ctx *nt = data;
1873 struct ntb_transport_qp *qp;
1875 unsigned int qp_num;
1877 db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free &
1878 ntb_db_vector_mask(nt->ndev, vector));
1881 qp_num = __ffs(db_bits);
1882 qp = &nt->qp_vec[qp_num];
1884 tasklet_schedule(&qp->rxc_db_work);
1886 db_bits &= ~BIT_ULL(qp_num);
1890 static const struct ntb_ctx_ops ntb_transport_ops = {
1891 .link_event = ntb_transport_event_callback,
1892 .db_event = ntb_transport_doorbell_callback,
1895 static struct ntb_client ntb_transport_client = {
1897 .probe = ntb_transport_probe,
1898 .remove = ntb_transport_free,
1902 static int __init ntb_transport_init(void)
1906 if (debugfs_initialized())
1907 nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1909 rc = bus_register(&ntb_transport_bus);
1913 rc = ntb_register_client(&ntb_transport_client);
1920 bus_unregister(&ntb_transport_bus);
1922 debugfs_remove_recursive(nt_debugfs_dir);
1925 module_init(ntb_transport_init);
1927 static void __exit ntb_transport_exit(void)
1929 debugfs_remove_recursive(nt_debugfs_dir);
1931 ntb_unregister_client(&ntb_transport_client);
1932 bus_unregister(&ntb_transport_bus);
1934 module_exit(ntb_transport_exit);