it's a Per-CPU variable.
Default: 64
-low_latency_read
+busy_read
----------------
Low latency busy poll timeout for socket reads. (needs CONFIG_NET_LL_RX_POLL)
Approximate time in us to busy loop waiting for packets on the device queue.
-This sets the default value of the SO_LL socket option.
-Can be set or overridden per socket by setting socket option SO_LL, which is
-the preferred method of enabling.
-If you need to enable the feature globally via sysctl, a value of 50 is recommended.
+This sets the default value of the SO_BUSY_POLL socket option.
+Can be set or overridden per socket by setting socket option SO_BUSY_POLL,
+which is the preferred method of enabling. If you need to enable the feature
+globally via sysctl, a value of 50 is recommended.
Will increase power usage.
Default: 0 (off)
-low_latency_poll
+busy_poll
----------------
Low latency busy poll timeout for poll and select. (needs CONFIG_NET_LL_RX_POLL)
Approximate time in us to busy loop waiting for events.
Recommended value depends on the number of sockets you poll on.
For several sockets 50, for several hundreds 100.
For more than that you probably want to use epoll.
-Note that only sockets with SO_LL set will be busy polled, so you want to either
-selectively set SO_LL on those sockets or set sysctl.net.low_latency_read globally.
+Note that only sockets with SO_BUSY_POLL set will be busy polled,
+so you want to either selectively set SO_BUSY_POLL on those sockets or set
+sysctl.net.busy_read globally.
Will increase power usage.
Default: 0 (off)
F: drivers/net/ethernet/qlogic/qla3xxx.*
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
+M: Himanshu Madhani <himanshu.madhani@qlogic.com>
M: Rajesh Borundia <rajesh.borundia@qlogic.com>
M: Shahed Shaikh <shahed.shaikh@qlogic.com>
M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
M: Sony Chacko <sony.chacko@qlogic.com>
+M: Sucheta Chakraborty <sucheta.chakraborty@qlogic.com>
M: linux-driver@qlogic.com
L: netdev@vger.kernel.org
S: Supported
#define SO_SELECT_ERR_QUEUE 45
-#define SO_LL 46
+#define SO_BUSY_POLL 46
#endif /* _UAPI_ASM_SOCKET_H */
#define SO_SELECT_ERR_QUEUE 45
-#define SO_LL 46
+#define SO_BUSY_POLL 46
#endif /* __ASM_AVR32_SOCKET_H */
#define SO_SELECT_ERR_QUEUE 45
-#define SO_LL 46
+#define SO_BUSY_POLL 46
#endif /* _ASM_SOCKET_H */
#define SO_SELECT_ERR_QUEUE 45
-#define SO_LL 46
+#define SO_BUSY_POLL 46
#endif /* _ASM_SOCKET_H */
#define SO_SELECT_ERR_QUEUE 45
-#define SO_LL 46
+#define SO_BUSY_POLL 46
#endif /* _ASM_SOCKET_H */
#define SO_SELECT_ERR_QUEUE 45
-#define SO_LL 46
+#define SO_BUSY_POLL 46
#endif /* _ASM_IA64_SOCKET_H */
#define SO_SELECT_ERR_QUEUE 45
-#define SO_LL 46
+#define SO_BUSY_POLL 46
#endif /* _ASM_M32R_SOCKET_H */
#define SO_SELECT_ERR_QUEUE 45
-#define SO_LL 46
+#define SO_BUSY_POLL 46
#endif /* _UAPI_ASM_SOCKET_H */
#define SO_SELECT_ERR_QUEUE 45
-#define SO_LL 46
+#define SO_BUSY_POLL 46
#endif /* _ASM_SOCKET_H */
#define SO_SELECT_ERR_QUEUE 0x4026
-#define SO_LL 0x4027
+#define SO_BUSY_POLL 0x4027
/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
* have to define SOCK_NONBLOCK to a different value here.
#define SO_SELECT_ERR_QUEUE 45
-#define SO_LL 46
+#define SO_BUSY_POLL 46
#endif /* _ASM_POWERPC_SOCKET_H */
#define SO_SELECT_ERR_QUEUE 45
-#define SO_LL 46
+#define SO_BUSY_POLL 46
#endif /* _ASM_SOCKET_H */
#define SO_SELECT_ERR_QUEUE 0x0029
-#define SO_LL 0x0030
+#define SO_BUSY_POLL 0x0030
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 0x5001
#define SO_SELECT_ERR_QUEUE 45
-#define SO_LL 46
+#define SO_BUSY_POLL 46
#endif /* _XTENSA_SOCKET_H */
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/can/dev.h>
struct c_can_priv *priv;
const struct of_device_id *match;
const struct platform_device_id *id;
- struct pinctrl *pinctrl;
struct resource *mem, *res;
int irq;
struct clk *clk;
id = platform_get_device_id(pdev);
}
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl))
- dev_warn(&pdev->dev,
- "failed to configure pins from driver\n");
-
/* get the appropriate clk */
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
rtnl_lock();
err = __rtnl_link_register(&dummy_link_ops);
+ if (err < 0)
+ goto out;
for (i = 0; i < numdummies && !err; i++) {
err = dummy_init_one();
}
if (err < 0)
__rtnl_link_unregister(&dummy_link_ops);
+
+out:
rtnl_unlock();
return err;
SET_NETDEV_DEV(netdev, &pdev->dev);
alx = netdev_priv(netdev);
+ spin_lock_init(&alx->hw.mdio_lock);
+ spin_lock_init(&alx->irq_lock);
alx->dev = netdev;
alx->hw.pdev = pdev;
alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
INIT_WORK(&alx->link_check_wk, alx_link_check);
INIT_WORK(&alx->reset_wk, alx_reset);
- spin_lock_init(&alx->hw.mdio_lock);
- spin_lock_init(&alx->irq_lock);
-
netif_carrier_off(netdev);
err = register_netdev(netdev);
return 0;
}
-static void atl1e_tx_map(struct atl1e_adapter *adapter,
- struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
+static int atl1e_tx_map(struct atl1e_adapter *adapter,
+ struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
{
struct atl1e_tpd_desc *use_tpd = NULL;
struct atl1e_tx_buffer *tx_buffer = NULL;
u16 nr_frags;
u16 f;
int segment;
+ int ring_start = adapter->tx_ring.next_to_use;
nr_frags = skb_shinfo(skb)->nr_frags;
segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
tx_buffer->length = map_len;
tx_buffer->dma = pci_map_single(adapter->pdev,
skb->data, hdr_len, PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma))
+ return -ENOSPC;
+
ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
mapped_len += map_len;
use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
tx_buffer->dma =
pci_map_single(adapter->pdev, skb->data + mapped_len,
map_len, PCI_DMA_TODEVICE);
+
+ if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
+ /* Reset the tx rings next pointer */
+ adapter->tx_ring.next_to_use = ring_start;
+ return -ENOSPC;
+ }
+
ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE);
mapped_len += map_len;
use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
(i * MAX_TX_BUF_LEN),
tx_buffer->length,
DMA_TO_DEVICE);
+
+ if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
+ /* Reset the ring next to use pointer */
+ adapter->tx_ring.next_to_use = ring_start;
+ return -ENOSPC;
+ }
+
ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE);
use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma);
use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) |
/* The last buffer info contain the skb address,
so it will be free after unmap */
tx_buffer->skb = skb;
+ return 0;
}
static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count,
return NETDEV_TX_OK;
}
- atl1e_tx_map(adapter, skb, tpd);
+ if (atl1e_tx_map(adapter, skb, tpd))
+ goto out;
+
atl1e_tx_queue(adapter, tpd_req, tpd);
netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+out:
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_OK;
}
#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
-#include <net/ll_poll.h>
+#include <net/busy_poll.h>
#include <linux/prefetch.h>
#include "bnx2x_cmn.h"
#include "bnx2x_init.h"
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
le16_to_cpu(cqe_fp->vlan_tag));
- skb_mark_ll(skb, &fp->napi);
+ skb_mark_napi_id(skb, &fp->napi);
if (bnx2x_fp_ll_polling(fp))
netif_receive_skb(skb);
/* outer IP header info */
if (xmit_type & XMIT_CSUM_V4) {
struct iphdr *iph = ip_hdr(skb);
- u16 csum = (__force u16)(~iph->check) -
- (__force u16)iph->tot_len -
- (__force u16)iph->frag_off;
+ u32 csum = (__force u32)(~iph->check) -
+ (__force u32)iph->tot_len -
+ (__force u32)iph->frag_off;
pbd2->fw_ip_csum_wo_len_flags_frag =
bswab16(csum_fold((__force __wsum)csum));
#endif
#ifdef CONFIG_NET_LL_RX_POLL
- .ndo_ll_poll = bnx2x_low_latency_recv,
+ .ndo_busy_poll = bnx2x_low_latency_recv,
#endif
};
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_net.h>
-#include <linux/pinctrl/consumer.h>
#include "macb.h"
struct resource *regs;
struct net_device *dev;
struct phy_device *phydev;
- struct pinctrl *pinctrl;
struct macb *lp;
int res;
u32 reg;
if (!regs)
return -ENOENT;
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl)) {
- res = PTR_ERR(pinctrl);
- if (res == -EPROBE_DEFER)
- return res;
-
- dev_warn(&pdev->dev, "No pinctrl provided\n");
- }
-
dev = alloc_etherdev(sizeof(struct macb));
if (!dev)
return -ENOMEM;
#include <linux/dca.h>
#endif
-#include <net/ll_poll.h>
+#include <net/busy_poll.h>
#ifdef CONFIG_NET_LL_RX_POLL
#define LL_EXTENDED_STATS
}
#endif /* IXGBE_FCOE */
- skb_mark_ll(skb, &q_vector->napi);
+ skb_mark_napi_id(skb, &q_vector->napi);
ixgbe_rx_skb(q_vector, skb);
/* update budget accounting */
.ndo_poll_controller = ixgbe_netpoll,
#endif
#ifdef CONFIG_NET_LL_RX_POLL
- .ndo_ll_poll = ixgbe_low_latency_recv,
+ .ndo_busy_poll = ixgbe_low_latency_recv,
#endif
#ifdef IXGBE_FCOE
.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
#include <linux/slab.h>
#include <linux/hash.h>
#include <net/ip.h>
-#include <net/ll_poll.h>
+#include <net/busy_poll.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
#ifdef CONFIG_NET_LL_RX_POLL
- .ndo_ll_poll = mlx4_en_low_latency_recv,
+ .ndo_busy_poll = mlx4_en_low_latency_recv,
#endif
};
*
*/
-#include <net/ll_poll.h>
+#include <net/busy_poll.h>
#include <linux/mlx4/cq.h>
#include <linux/slab.h>
#include <linux/mlx4/qp.h>
timestamp);
}
- skb_mark_ll(skb, &cq->napi);
+ skb_mark_napi_id(skb, &cq->napi);
/* Push it up the stack */
netif_receive_skb(skb);
#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
+#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw"
#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw"
#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw"
RTL_GIGA_MAC_VER_41,
RTL_GIGA_MAC_VER_42,
RTL_GIGA_MAC_VER_43,
+ RTL_GIGA_MAC_VER_44,
RTL_GIGA_MAC_NONE = 0xff,
};
[RTL_GIGA_MAC_VER_43] =
_R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2,
JUMBO_1K, true),
+ [RTL_GIGA_MAC_VER_44] =
+ _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2,
+ JUMBO_9K, false),
};
#undef _R
#define CSIAR_FUNC_CARD 0x00000000
#define CSIAR_FUNC_SDIO 0x00010000
#define CSIAR_FUNC_NIC 0x00020000
+#define CSIAR_FUNC_NIC2 0x00010000
PMCH = 0x6f,
EPHYAR = 0x80,
#define EPHYAR_FLAG 0x80000000
MODULE_FIRMWARE(FIRMWARE_8168F_2);
MODULE_FIRMWARE(FIRMWARE_8402_1);
MODULE_FIRMWARE(FIRMWARE_8411_1);
+MODULE_FIRMWARE(FIRMWARE_8411_2);
MODULE_FIRMWARE(FIRMWARE_8106E_1);
MODULE_FIRMWARE(FIRMWARE_8106E_2);
MODULE_FIRMWARE(FIRMWARE_8168G_2);
int mac_version;
} mac_info[] = {
/* 8168G family. */
+ { 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 },
{ 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 },
{ 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
{ 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
break;
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
+ case RTL_GIGA_MAC_VER_44:
rtl8168g_2_hw_phy_config(tp);
break;
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
+ case RTL_GIGA_MAC_VER_44:
ops->write = r8168g_mdio_write;
ops->read = r8168g_mdio_read;
break;
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
+ case RTL_GIGA_MAC_VER_44:
RTL_W32(RxConfig, RTL_R32(RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
break;
case RTL_GIGA_MAC_VER_40:
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
+ case RTL_GIGA_MAC_VER_44:
ops->down = r8168_pll_power_down;
ops->up = r8168_pll_power_up;
break;
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
+ case RTL_GIGA_MAC_VER_44:
RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
default:
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
+ case RTL_GIGA_MAC_VER_44:
default:
ops->disable = NULL;
ops->enable = NULL;
tp->mac_version == RTL_GIGA_MAC_VER_41 ||
tp->mac_version == RTL_GIGA_MAC_VER_42 ||
tp->mac_version == RTL_GIGA_MAC_VER_43 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_44 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
RTL_R32(CSIDR) : ~0;
}
+static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W32(CSIDR, value);
+ RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
+ CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
+ CSIAR_FUNC_NIC2);
+
+ rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
+}
+
+static u32 r8411_csi_read(struct rtl8169_private *tp, int addr)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 |
+ CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
+
+ return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
+ RTL_R32(CSIDR) : ~0;
+}
+
static void rtl_init_csi_ops(struct rtl8169_private *tp)
{
struct csi_ops *ops = &tp->csi_ops;
ops->read = r8402_csi_read;
break;
+ case RTL_GIGA_MAC_VER_44:
+ ops->write = r8411_csi_write;
+ ops->read = r8411_csi_read;
+ break;
+
default:
ops->write = r8169_csi_write;
ops->read = r8169_csi_read;
rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2));
}
+static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ static const struct ephy_info e_info_8411_2[] = {
+ { 0x00, 0x0000, 0x0008 },
+ { 0x0c, 0x3df0, 0x0200 },
+ { 0x0f, 0xffff, 0x5200 },
+ { 0x19, 0x0020, 0x0000 },
+ { 0x1e, 0x0000, 0x2000 }
+ };
+
+ rtl_hw_start_8168g_1(tp);
+
+ /* disable aspm and clock request before access ephy */
+ RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
+ RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
+ rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
+}
+
static void rtl_hw_start_8168(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
rtl_hw_start_8168g_2(tp);
break;
+ case RTL_GIGA_MAC_VER_44:
+ rtl_hw_start_8411_2(tp);
+ break;
+
default:
printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
dev->name, tp->mac_version);
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
+ case RTL_GIGA_MAC_VER_44:
rtl_hw_init_8168g(tp);
break;
config SH_ETH
tristate "Renesas SuperH Ethernet support"
+ depends on HAS_DMA
select CRC32
select MII
select MDIO_BITBANG
rp->rx_skbuff_dma[i] =
pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
-
+ if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[i])) {
+ rp->rx_skbuff_dma[i] = 0;
+ dev_kfree_skb(skb);
+ break;
+ }
rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
}
rp->tx_skbuff_dma[entry] =
pci_map_single(rp->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) {
+ dev_kfree_skb(skb);
+ rp->tx_skbuff_dma[entry] = 0;
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
}
pci_map_single(rp->pdev, skb->data,
rp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[entry])) {
+ dev_kfree_skb(skb);
+ rp->rx_skbuff_dma[entry] = 0;
+ break;
+ }
rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
}
rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/pinctrl/consumer.h>
#include <net/wpan-phy.h>
#include <net/mac802154.h>
#include <net/ieee802154.h>
int ret = -ENOMEM;
u8 val;
struct mrf24j40 *devrec;
- struct pinctrl *pinctrl;
printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq);
if (!devrec->buf)
goto err_buf;
- pinctrl = devm_pinctrl_get_select_default(&spi->dev);
- if (IS_ERR(pinctrl))
- dev_warn(&spi->dev,
- "pinctrl pins are not configured from the driver");
-
spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */
if (spi->max_speed_hz > MAX_SPI_SPEED_HZ)
spi->max_speed_hz = MAX_SPI_SPEED_HZ;
rtnl_lock();
err = __rtnl_link_register(&ifb_link_ops);
+ if (err < 0)
+ goto out;
- for (i = 0; i < numifbs && !err; i++)
+ for (i = 0; i < numifbs && !err; i++) {
err = ifb_init_one(i);
+ cond_resched();
+ }
if (err)
__rtnl_link_unregister(&ifb_link_ops);
+
+out:
rtnl_unlock();
return err;
int vnet_hdr_len = 0;
int copylen = 0;
bool zerocopy = false;
+ size_t linear;
if (q->flags & IFF_VNET_HDR) {
vnet_hdr_len = q->vnet_hdr_sz;
copylen = vnet_hdr.hdr_len;
if (!copylen)
copylen = GOODCOPY_LEN;
- } else
+ linear = copylen;
+ } else {
copylen = len;
+ linear = vnet_hdr.hdr_len;
+ }
skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
- vnet_hdr.hdr_len, noblock, &err);
+ linear, noblock, &err);
if (!skb)
goto err;
static struct mdio_device_id __maybe_unused atheros_tbl[] = {
{ 0x004dd076, 0xffffffef },
+ { 0x004dd074, 0xffffffef },
{ 0x004dd072, 0xffffffef },
{ }
};
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
- size_t len = total_len, align = NET_SKB_PAD;
+ size_t len = total_len, align = NET_SKB_PAD, linear;
struct virtio_net_hdr gso = { 0 };
int offset = 0;
int copylen;
copylen = gso.hdr_len;
if (!copylen)
copylen = GOODCOPY_LEN;
- } else
+ linear = copylen;
+ } else {
copylen = len;
+ linear = gso.hdr_len;
+ }
- skb = tun_alloc_skb(tfile, align, copylen, gso.hdr_len, noblock);
+ skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EAGAIN)
tun->dev->stats.rx_dropped++;
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
asix-y := asix_devices.o asix_common.o ax88172a.o
obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
-obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
+obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o
},
/* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */
-#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE)
{
USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
-#endif
+
+/* Realtek RTL8153 Based USB 3.0 Ethernet Adapters */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
/*
* WHITELIST!!!
struct r8152 *tp = netdev_priv(netdev);
struct net_device_stats *stats = rtl8152_get_stats(netdev);
struct tx_desc *tx_desc;
- int len, res;
+ unsigned int len;
+ int res;
netif_stop_queue(netdev);
len = skb->len;
--- /dev/null
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+
+#define RTL815x_REQT_READ 0xc0
+#define RTL815x_REQT_WRITE 0x40
+#define RTL815x_REQ_GET_REGS 0x05
+#define RTL815x_REQ_SET_REGS 0x05
+
+#define MCU_TYPE_PLA 0x0100
+#define OCP_BASE 0xe86c
+#define BASE_MII 0xa400
+
+#define BYTE_EN_DWORD 0xff
+#define BYTE_EN_WORD 0x33
+#define BYTE_EN_BYTE 0x11
+
+#define R815x_PHY_ID 32
+#define REALTEK_VENDOR_ID 0x0bda
+
+
+static int pla_read_word(struct usb_device *udev, u16 index)
+{
+ int data, ret;
+ u8 shift = index & 2;
+ __le32 ocp_data;
+
+ index &= ~3;
+
+ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
+ index, MCU_TYPE_PLA, &ocp_data, sizeof(ocp_data),
+ 500);
+ if (ret < 0)
+ return ret;
+
+ data = __le32_to_cpu(ocp_data);
+ data >>= (shift * 8);
+ data &= 0xffff;
+
+ return data;
+}
+
+static int pla_write_word(struct usb_device *udev, u16 index, u32 data)
+{
+ __le32 ocp_data;
+ u32 mask = 0xffff;
+ u16 byen = BYTE_EN_WORD;
+ u8 shift = index & 2;
+ int ret;
+
+ data &= mask;
+
+ if (shift) {
+ byen <<= shift;
+ mask <<= (shift * 8);
+ data <<= (shift * 8);
+ index &= ~3;
+ }
+
+ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
+ index, MCU_TYPE_PLA, &ocp_data, sizeof(ocp_data),
+ 500);
+ if (ret < 0)
+ return ret;
+
+ data |= __le32_to_cpu(ocp_data) & ~mask;
+ ocp_data = __cpu_to_le32(data);
+
+ ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE,
+ index, MCU_TYPE_PLA | byen, &ocp_data,
+ sizeof(ocp_data), 500);
+
+ return ret;
+}
+
+static int ocp_reg_read(struct usbnet *dev, u16 addr)
+{
+ u16 ocp_base, ocp_index;
+ int ret;
+
+ ocp_base = addr & 0xf000;
+ ret = pla_write_word(dev->udev, OCP_BASE, ocp_base);
+ if (ret < 0)
+ goto out;
+
+ ocp_index = (addr & 0x0fff) | 0xb000;
+ ret = pla_read_word(dev->udev, ocp_index);
+
+out:
+ return ret;
+}
+
+static int ocp_reg_write(struct usbnet *dev, u16 addr, u16 data)
+{
+ u16 ocp_base, ocp_index;
+ int ret;
+
+ ocp_base = addr & 0xf000;
+ ret = pla_write_word(dev->udev, OCP_BASE, ocp_base);
+ if (ret < 0)
+ goto out1;
+
+ ocp_index = (addr & 0x0fff) | 0xb000;
+ ret = pla_write_word(dev->udev, ocp_index, data);
+
+out1:
+ return ret;
+}
+
+static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+
+ if (phy_id != R815x_PHY_ID)
+ return -EINVAL;
+
+ return ocp_reg_read(dev, BASE_MII + reg * 2);
+}
+
+static
+void r815x_mdio_write(struct net_device *netdev, int phy_id, int reg, int val)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+
+ if (phy_id != R815x_PHY_ID)
+ return;
+
+ ocp_reg_write(dev, BASE_MII + reg * 2, val);
+}
+
+static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int status;
+
+ status = usbnet_cdc_bind(dev, intf);
+ if (status < 0)
+ return status;
+
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = r815x_mdio_read;
+ dev->mii.mdio_write = r815x_mdio_write;
+ dev->mii.phy_id_mask = 0x3f;
+ dev->mii.reg_num_mask = 0x1f;
+ dev->mii.phy_id = R815x_PHY_ID;
+ dev->mii.supports_gmii = 1;
+
+ return 0;
+}
+
+static int r8152_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int status;
+
+ status = usbnet_cdc_bind(dev, intf);
+ if (status < 0)
+ return status;
+
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = r815x_mdio_read;
+ dev->mii.mdio_write = r815x_mdio_write;
+ dev->mii.phy_id_mask = 0x3f;
+ dev->mii.reg_num_mask = 0x1f;
+ dev->mii.phy_id = R815x_PHY_ID;
+ dev->mii.supports_gmii = 0;
+
+ return 0;
+}
+
+static const struct driver_info r8152_info = {
+ .description = "RTL8152 ECM Device",
+ .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
+ .bind = r8152_bind,
+ .unbind = usbnet_cdc_unbind,
+ .status = usbnet_cdc_status,
+ .manage_power = usbnet_manage_power,
+};
+
+static const struct driver_info r8153_info = {
+ .description = "RTL8153 ECM Device",
+ .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
+ .bind = r8153_bind,
+ .unbind = usbnet_cdc_unbind,
+ .status = usbnet_cdc_status,
+ .manage_power = usbnet_manage_power,
+};
+
+static const struct usb_device_id products[] = {
+{
+ USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+#if defined(CONFIG_USB_RTL8152) || defined(CONFIG_USB_RTL8152_MODULE)
+ .driver_info = 0,
+#else
+ .driver_info = (unsigned long) &r8152_info,
+#endif
+},
+
+{
+ USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+#if defined(CONFIG_USB_RTL8153) || defined(CONFIG_USB_RTL8153_MODULE)
+ .driver_info = 0,
+#else
+ .driver_info = (unsigned long) &r8153_info,
+#endif
+},
+
+ { }, /* END */
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver r815x_driver = {
+ .name = "r815x",
+ .id_table = products,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+ .reset_resume = usbnet_resume,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(r815x_driver);
+
+MODULE_AUTHOR("Hayes Wang");
+MODULE_DESCRIPTION("Realtek USB ECM device");
+MODULE_LICENSE("GPL");
static void __exit vxlan_cleanup_module(void)
{
- unregister_pernet_device(&vxlan_net_ops);
rtnl_link_unregister(&vxlan_link_ops);
destroy_workqueue(vxlan_wq);
+ unregister_pernet_device(&vxlan_net_ops);
rcu_barrier();
}
module_exit(vxlan_cleanup_module);
#include <linux/hrtimer.h>
#include <linux/sched/rt.h>
#include <linux/freezer.h>
-#include <net/ll_poll.h>
+#include <net/busy_poll.h>
#include <asm/uaccess.h>
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
#ifdef CONFIG_NET_LL_RX_POLL
- int (*ndo_ll_poll)(struct napi_struct *dev);
+ int (*ndo_busy_poll)(struct napi_struct *dev);
#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
int queue, u8 *mac);
--- /dev/null
+/*
+ * net busy poll support
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Author: Eliezer Tamir
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ */
+
+#ifndef _LINUX_NET_BUSY_POLL_H
+#define _LINUX_NET_BUSY_POLL_H
+
+#include <linux/netdevice.h>
+#include <net/ip.h>
+
+#ifdef CONFIG_NET_LL_RX_POLL
+
+struct napi_struct;
+extern unsigned int sysctl_net_busy_read __read_mostly;
+extern unsigned int sysctl_net_busy_poll __read_mostly;
+
+/* return values from ndo_ll_poll */
+#define LL_FLUSH_FAILED -1
+#define LL_FLUSH_BUSY -2
+
+static inline bool net_busy_loop_on(void)
+{
+ return sysctl_net_busy_poll;
+}
+
+/* a wrapper to make debug_smp_processor_id() happy
+ * we can use sched_clock() because we don't care much about precision
+ * we only care that the average is bounded
+ */
+#ifdef CONFIG_DEBUG_PREEMPT
+static inline u64 busy_loop_us_clock(void)
+{
+ u64 rc;
+
+ preempt_disable_notrace();
+ rc = sched_clock();
+ preempt_enable_no_resched_notrace();
+
+ return rc >> 10;
+}
+#else /* CONFIG_DEBUG_PREEMPT */
+static inline u64 busy_loop_us_clock(void)
+{
+ return sched_clock() >> 10;
+}
+#endif /* CONFIG_DEBUG_PREEMPT */
+
+static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
+{
+ return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec);
+}
+
+/* in poll/select we use the global sysctl_net_ll_poll value */
+static inline unsigned long busy_loop_end_time(void)
+{
+ return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_busy_poll);
+}
+
+static inline bool sk_can_busy_loop(struct sock *sk)
+{
+ return sk->sk_ll_usec && sk->sk_napi_id &&
+ !need_resched() && !signal_pending(current);
+}
+
+
+static inline bool busy_loop_timeout(unsigned long end_time)
+{
+ unsigned long now = busy_loop_us_clock();
+
+ return time_after(now, end_time);
+}
+
+/* when used in sock_poll() nonblock is known at compile time to be true
+ * so the loop and end_time will be optimized out
+ */
+static inline bool sk_busy_loop(struct sock *sk, int nonblock)
+{
+ unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
+ const struct net_device_ops *ops;
+ struct napi_struct *napi;
+ int rc = false;
+
+ /*
+ * rcu read lock for napi hash
+ * bh so we don't race with net_rx_action
+ */
+ rcu_read_lock_bh();
+
+ napi = napi_by_id(sk->sk_napi_id);
+ if (!napi)
+ goto out;
+
+ ops = napi->dev->netdev_ops;
+ if (!ops->ndo_busy_poll)
+ goto out;
+
+ do {
+ rc = ops->ndo_busy_poll(napi);
+
+ if (rc == LL_FLUSH_FAILED)
+ break; /* permanent failure */
+
+ if (rc > 0)
+ /* local bh are disabled so it is ok to use _BH */
+ NET_ADD_STATS_BH(sock_net(sk),
+ LINUX_MIB_LOWLATENCYRXPACKETS, rc);
+
+ } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
+ !need_resched() && !busy_loop_timeout(end_time));
+
+ rc = !skb_queue_empty(&sk->sk_receive_queue);
+out:
+ rcu_read_unlock_bh();
+ return rc;
+}
+
+/* used in the NIC receive handler to mark the skb */
+static inline void skb_mark_napi_id(struct sk_buff *skb,
+ struct napi_struct *napi)
+{
+ skb->napi_id = napi->napi_id;
+}
+
+/* used in the protocol hanlder to propagate the napi_id to the socket */
+static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
+{
+ sk->sk_napi_id = skb->napi_id;
+}
+
+#else /* CONFIG_NET_LL_RX_POLL */
+static inline unsigned long net_busy_loop_on(void)
+{
+ return 0;
+}
+
+static inline unsigned long busy_loop_end_time(void)
+{
+ return 0;
+}
+
+static inline bool sk_can_busy_loop(struct sock *sk)
+{
+ return false;
+}
+
+static inline bool sk_busy_poll(struct sock *sk, int nonblock)
+{
+ return false;
+}
+
+static inline void skb_mark_napi_id(struct sk_buff *skb,
+ struct napi_struct *napi)
+{
+}
+
+static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
+{
+}
+
+static inline bool busy_loop_timeout(unsigned long end_time)
+{
+ return true;
+}
+
+#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* _LINUX_NET_BUSY_POLL_H */
+++ /dev/null
-/*
- * Low Latency Sockets
- * Copyright(c) 2013 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Author: Eliezer Tamir
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- */
-
-#ifndef _LINUX_NET_LL_POLL_H
-#define _LINUX_NET_LL_POLL_H
-
-#include <linux/netdevice.h>
-#include <net/ip.h>
-
-#ifdef CONFIG_NET_LL_RX_POLL
-
-struct napi_struct;
-extern unsigned int sysctl_net_ll_read __read_mostly;
-extern unsigned int sysctl_net_ll_poll __read_mostly;
-
-/* return values from ndo_ll_poll */
-#define LL_FLUSH_FAILED -1
-#define LL_FLUSH_BUSY -2
-
-static inline bool net_busy_loop_on(void)
-{
- return sysctl_net_ll_poll;
-}
-
-/* a wrapper to make debug_smp_processor_id() happy
- * we can use sched_clock() because we don't care much about precision
- * we only care that the average is bounded
- */
-#ifdef CONFIG_DEBUG_PREEMPT
-static inline u64 busy_loop_us_clock(void)
-{
- u64 rc;
-
- preempt_disable_notrace();
- rc = sched_clock();
- preempt_enable_no_resched_notrace();
-
- return rc >> 10;
-}
-#else /* CONFIG_DEBUG_PREEMPT */
-static inline u64 busy_loop_us_clock(void)
-{
- return sched_clock() >> 10;
-}
-#endif /* CONFIG_DEBUG_PREEMPT */
-
-static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
-{
- return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec);
-}
-
-/* in poll/select we use the global sysctl_net_ll_poll value */
-static inline unsigned long busy_loop_end_time(void)
-{
- return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_ll_poll);
-}
-
-static inline bool sk_can_busy_loop(struct sock *sk)
-{
- return sk->sk_ll_usec && sk->sk_napi_id &&
- !need_resched() && !signal_pending(current);
-}
-
-
-static inline bool busy_loop_timeout(unsigned long end_time)
-{
- unsigned long now = busy_loop_us_clock();
-
- return time_after(now, end_time);
-}
-
-/* when used in sock_poll() nonblock is known at compile time to be true
- * so the loop and end_time will be optimized out
- */
-static inline bool sk_busy_loop(struct sock *sk, int nonblock)
-{
- unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
- const struct net_device_ops *ops;
- struct napi_struct *napi;
- int rc = false;
-
- /*
- * rcu read lock for napi hash
- * bh so we don't race with net_rx_action
- */
- rcu_read_lock_bh();
-
- napi = napi_by_id(sk->sk_napi_id);
- if (!napi)
- goto out;
-
- ops = napi->dev->netdev_ops;
- if (!ops->ndo_ll_poll)
- goto out;
-
- do {
- rc = ops->ndo_ll_poll(napi);
-
- if (rc == LL_FLUSH_FAILED)
- break; /* permanent failure */
-
- if (rc > 0)
- /* local bh are disabled so it is ok to use _BH */
- NET_ADD_STATS_BH(sock_net(sk),
- LINUX_MIB_LOWLATENCYRXPACKETS, rc);
-
- } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
- !need_resched() && !busy_loop_timeout(end_time));
-
- rc = !skb_queue_empty(&sk->sk_receive_queue);
-out:
- rcu_read_unlock_bh();
- return rc;
-}
-
-/* used in the NIC receive handler to mark the skb */
-static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
-{
- skb->napi_id = napi->napi_id;
-}
-
-/* used in the protocol hanlder to propagate the napi_id to the socket */
-static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
-{
- sk->sk_napi_id = skb->napi_id;
-}
-
-#else /* CONFIG_NET_LL_RX_POLL */
-static inline unsigned long net_busy_loop_on(void)
-{
- return 0;
-}
-
-static inline unsigned long busy_loop_end_time(void)
-{
- return 0;
-}
-
-static inline bool sk_can_busy_loop(struct sock *sk)
-{
- return false;
-}
-
-static inline bool sk_busy_poll(struct sock *sk, int nonblock)
-{
- return false;
-}
-
-static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
-{
-}
-
-static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
-{
-}
-
-static inline bool busy_loop_timeout(unsigned long end_time)
-{
- return true;
-}
-
-#endif /* CONFIG_NET_LL_RX_POLL */
-#endif /* _LINUX_NET_LL_POLL_H */
#define SO_SELECT_ERR_QUEUE 45
-#define SO_LL 46
+#define SO_BUSY_POLL 46
#endif /* __ASM_GENERIC_SOCKET_H */
*/
void p9_release_pages(struct page **pages, int nr_pages)
{
- int i = 0;
- while (pages[i] && nr_pages--) {
- put_page(pages[i]);
- i++;
- }
+ int i;
+
+ for (i = 0; i < nr_pages; i++)
+ if (pages[i])
+ put_page(pages[i]);
}
EXPORT_SYMBOL(p9_release_pages);
#include <net/sock.h>
#include <net/tcp_states.h>
#include <trace/events/skb.h>
-#include <net/ll_poll.h>
+#include <net/busy_poll.h>
/*
* Is a socket 'connection oriented' ?
}
static netdev_features_t harmonize_features(struct sk_buff *skb,
- __be16 protocol, netdev_features_t features)
+ netdev_features_t features)
{
if (skb->ip_summed != CHECKSUM_NONE &&
- !can_checksum_protocol(features, protocol)) {
+ !can_checksum_protocol(features, skb_network_protocol(skb))) {
features &= ~NETIF_F_ALL_CSUM;
} else if (illegal_highdma(skb->dev, skb)) {
features &= ~NETIF_F_SG;
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
} else if (!vlan_tx_tag_present(skb)) {
- return harmonize_features(skb, protocol, features);
+ return harmonize_features(skb, features);
}
features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
- if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) {
- return harmonize_features(skb, protocol, features);
- } else {
+ if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
- return harmonize_features(skb, protocol, features);
- }
+
+ return harmonize_features(skb, features);
}
EXPORT_SYMBOL(netif_skb_features);
page = alloc_page(gfp_mask);
if (!page) {
while (head) {
- struct page *next = (struct page *)head->private;
+ struct page *next = (struct page *)page_private(head);
put_page(head);
head = next;
}
memcpy(page_address(page),
vaddr + f->page_offset, skb_frag_size(f));
kunmap_atomic(vaddr);
- page->private = (unsigned long)head;
+ set_page_private(page, (unsigned long)head);
head = page;
}
for (i = num_frags - 1; i >= 0; i--) {
__skb_fill_page_desc(skb, i, head, 0,
skb_shinfo(skb)->frags[i].size);
- head = (struct page *)head->private;
+ head = (struct page *)page_private(head);
}
skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
#include <net/tcp.h>
#endif
-#include <net/ll_poll.h>
+#include <net/busy_poll.h>
static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);
break;
#ifdef CONFIG_NET_LL_RX_POLL
- case SO_LL:
+ case SO_BUSY_POLL:
/* allow unprivileged users to decrease the value */
if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
ret = -EPERM;
break;
#ifdef CONFIG_NET_LL_RX_POLL
- case SO_LL:
+ case SO_BUSY_POLL:
v.val = sk->sk_ll_usec;
break;
#endif
#ifdef CONFIG_NET_LL_RX_POLL
sk->sk_napi_id = 0;
- sk->sk_ll_usec = sysctl_net_ll_read;
+ sk->sk_ll_usec = sysctl_net_busy_read;
#endif
/*
#include <net/ip.h>
#include <net/sock.h>
#include <net/net_ratelimit.h>
-#include <net/ll_poll.h>
+#include <net/busy_poll.h>
static int one = 1;
#endif /* CONFIG_NET_FLOW_LIMIT */
#ifdef CONFIG_NET_LL_RX_POLL
{
- .procname = "low_latency_poll",
- .data = &sysctl_net_ll_poll,
+ .procname = "busy_poll",
+ .data = &sysctl_net_busy_poll,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec
},
{
- .procname = "low_latency_read",
- .data = &sysctl_net_ll_read,
+ .procname = "busy_read",
+ .data = &sysctl_net_busy_read,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec
if (opt_vlen <= 0)
goto bad_option_value;
- ret = strict_strtoul(eq, 10, &derrno);
+ ret = kstrtoul(eq, 10, &derrno);
if (ret < 0)
goto bad_option_value;
}
__skb_push(skb, tnl_hlen - ghl);
+ skb_reset_inner_headers(skb);
+ skb->encapsulation = 1;
+
skb_reset_mac_header(skb);
skb_set_network_header(skb, mac_len);
skb->mac_len = mac_len;
lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
spin_lock_bh(lock);
- done =__sk_nulls_del_node_init_rcu(sk);
+ done = __sk_nulls_del_node_init_rcu(sk);
if (done)
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
spin_unlock_bh(lock);
struct rtable *rt, __be16 df)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- int pkt_size = skb->len - tunnel->hlen;
+ int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
int mtu;
if (df)
#include <asm/uaccess.h>
#include <asm/ioctls.h>
-#include <net/ll_poll.h>
+#include <net/busy_poll.h>
int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
#include <net/netdma.h>
#include <net/secure_seq.h>
#include <net/tcp_memcontrol.h>
-#include <net/ll_poll.h>
+#include <net/busy_poll.h>
#include <linux/inet.h>
#include <linux/ipv6.h>
if (sk_filter(sk, skb))
goto discard_and_relse;
- sk_mark_ll(sk, skb);
+ sk_mark_napi_id(sk, skb);
skb->dev = NULL;
bh_lock_sock_nested(sk);
* see tcp_input.c tcp_sacktag_write_queue().
*/
TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
+ } else {
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
}
return err;
}
if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
continue;
- if (tcp_retransmit_skb(sk, skb)) {
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
+ if (tcp_retransmit_skb(sk, skb))
return;
- }
+
NET_INC_STATS_BH(sock_net(sk), mib_idx);
if (tcp_in_cwnd_reduction(sk))
#include <trace/events/udp.h>
#include <linux/static_key.h>
#include <trace/events/skb.h>
-#include <net/ll_poll.h>
+#include <net/busy_poll.h>
#include "udp_impl.h"
struct udp_table udp_table __read_mostly;
if (sk != NULL) {
int ret;
- sk_mark_ll(sk, skb);
+ sk_mark_napi_id(sk, skb);
ret = udp_queue_rcv_skb(sk, skb);
sock_put(sk);
struct udphdr *uh;
int udp_offset = outer_hlen - tnl_hlen;
+ skb_reset_inner_headers(skb);
+ skb->encapsulation = 1;
+
skb->mac_len = mac_len;
skb_push(skb, outer_hlen);
uh->check = CSUM_MANGLED_0;
}
- skb->ip_summed = CHECKSUM_NONE;
skb->protocol = protocol;
} while ((skb = skb->next));
out:
return ln;
}
+static inline bool rt6_qualify_for_ecmp(struct rt6_info *rt)
+{
+ return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
+ RTF_GATEWAY;
+}
+
/*
* Insert routing information in a node.
*/
int add = (!info->nlh ||
(info->nlh->nlmsg_flags & NLM_F_CREATE));
int found = 0;
+ bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
ins = &fn->leaf;
* To avoid long list, we only had siblings if the
* route have a gateway.
*/
- if (rt->rt6i_flags & RTF_GATEWAY &&
- !(rt->rt6i_flags & RTF_EXPIRES) &&
- !(iter->rt6i_flags & RTF_EXPIRES))
+ if (rt_can_ecmp &&
+ rt6_qualify_for_ecmp(iter))
rt->rt6i_nsiblings++;
}
/* Find the first route that have the same metric */
sibling = fn->leaf;
while (sibling) {
- if (sibling->rt6i_metric == rt->rt6i_metric) {
+ if (sibling->rt6i_metric == rt->rt6i_metric &&
+ rt6_qualify_for_ecmp(sibling)) {
list_add_tail(&rt->rt6i_siblings,
&sibling->rt6i_siblings);
break;
#include <linux/sysctl.h>
#endif
+enum rt6_nud_state {
+ RT6_NUD_FAIL_HARD = -2,
+ RT6_NUD_FAIL_SOFT = -1,
+ RT6_NUD_SUCCEED = 1
+};
+
static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
const struct in6_addr *dest);
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
return 0;
}
-static inline bool rt6_check_neigh(struct rt6_info *rt)
+static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
{
struct neighbour *neigh;
- bool ret = false;
+ enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
if (rt->rt6i_flags & RTF_NONEXTHOP ||
!(rt->rt6i_flags & RTF_GATEWAY))
- return true;
+ return RT6_NUD_SUCCEED;
rcu_read_lock_bh();
neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
if (neigh) {
read_lock(&neigh->lock);
if (neigh->nud_state & NUD_VALID)
- ret = true;
+ ret = RT6_NUD_SUCCEED;
#ifdef CONFIG_IPV6_ROUTER_PREF
else if (!(neigh->nud_state & NUD_FAILED))
- ret = true;
+ ret = RT6_NUD_SUCCEED;
#endif
read_unlock(&neigh->lock);
- } else if (IS_ENABLED(CONFIG_IPV6_ROUTER_PREF)) {
- ret = true;
+ } else {
+ ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
+ RT6_NUD_SUCCEED : RT6_NUD_FAIL_SOFT;
}
rcu_read_unlock_bh();
m = rt6_check_dev(rt, oif);
if (!m && (strict & RT6_LOOKUP_F_IFACE))
- return -1;
+ return RT6_NUD_FAIL_HARD;
#ifdef CONFIG_IPV6_ROUTER_PREF
m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
#endif
- if (!rt6_check_neigh(rt) && (strict & RT6_LOOKUP_F_REACHABLE))
- return -1;
+ if (strict & RT6_LOOKUP_F_REACHABLE) {
+ int n = rt6_check_neigh(rt);
+ if (n < 0)
+ return n;
+ }
return m;
}
static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
- int *mpri, struct rt6_info *match)
+ int *mpri, struct rt6_info *match,
+ bool *do_rr)
{
int m;
+ bool match_do_rr = false;
if (rt6_check_expired(rt))
goto out;
m = rt6_score_route(rt, oif, strict);
- if (m < 0)
+ if (m == RT6_NUD_FAIL_SOFT && !IS_ENABLED(CONFIG_IPV6_ROUTER_PREF)) {
+ match_do_rr = true;
+ m = 0; /* lowest valid score */
+ } else if (m < 0) {
goto out;
+ }
+
+ if (strict & RT6_LOOKUP_F_REACHABLE)
+ rt6_probe(rt);
if (m > *mpri) {
- if (strict & RT6_LOOKUP_F_REACHABLE)
- rt6_probe(match);
+ *do_rr = match_do_rr;
*mpri = m;
match = rt;
- } else if (strict & RT6_LOOKUP_F_REACHABLE) {
- rt6_probe(rt);
}
-
out:
return match;
}
static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
struct rt6_info *rr_head,
- u32 metric, int oif, int strict)
+ u32 metric, int oif, int strict,
+ bool *do_rr)
{
struct rt6_info *rt, *match;
int mpri = -1;
match = NULL;
for (rt = rr_head; rt && rt->rt6i_metric == metric;
rt = rt->dst.rt6_next)
- match = find_match(rt, oif, strict, &mpri, match);
+ match = find_match(rt, oif, strict, &mpri, match, do_rr);
for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
rt = rt->dst.rt6_next)
- match = find_match(rt, oif, strict, &mpri, match);
+ match = find_match(rt, oif, strict, &mpri, match, do_rr);
return match;
}
{
struct rt6_info *match, *rt0;
struct net *net;
+ bool do_rr = false;
rt0 = fn->rr_ptr;
if (!rt0)
fn->rr_ptr = rt0 = fn->leaf;
- match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict);
+ match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
+ &do_rr);
- if (!match &&
- (strict & RT6_LOOKUP_F_REACHABLE)) {
+ if (do_rr) {
struct rt6_info *next = rt0->dst.rt6_next;
/* no entries matched; do round-robin */
rt = (struct rt6_info *) skb_dst(skb);
if (rt) {
- if (rt->rt6i_flags & RTF_CACHE)
- rt6_update_expires(rt, 0);
- else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
+ if (rt->rt6i_flags & RTF_CACHE) {
+ dst_hold(&rt->dst);
+ if (ip6_del_rt(rt))
+ dst_free(&rt->dst);
+ } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
rt->rt6i_node->fn_sernum = -1;
+ }
}
}
#include <net/inet_common.h>
#include <net/secure_seq.h>
#include <net/tcp_memcontrol.h>
-#include <net/ll_poll.h>
+#include <net/busy_poll.h>
#include <asm/uaccess.h>
if (sk_filter(sk, skb))
goto discard_and_relse;
- sk_mark_ll(sk, skb);
+ sk_mark_napi_id(sk, skb);
skb->dev = NULL;
bh_lock_sock_nested(sk);
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <net/inet6_hashtables.h>
-#include <net/ll_poll.h>
+#include <net/busy_poll.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
if (sk != NULL) {
int ret;
- sk_mark_ll(sk, skb);
+ sk_mark_napi_id(sk, skb);
ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);
unsigned long old_vslot = q->oldV >> q->min_slot_shift;
if (vslot != old_vslot) {
- unsigned long mask = (1ULL << fls(vslot ^ old_vslot)) - 1;
+ unsigned long mask;
+ int last_flip_pos = fls(vslot ^ old_vslot);
+
+ if (last_flip_pos > 31) /* higher than the number of groups */
+ mask = ~0UL; /* make all groups eligible */
+ else
+ mask = (1UL << last_flip_pos) - 1;
+
qfq_move_groups(q, mask, IR, ER);
qfq_move_groups(q, mask, IB, EB);
}
agg->F = agg->S + (u64)service_received * agg->inv_w;
}
-static inline void qfq_update_agg_ts(struct qfq_sched *q,
- struct qfq_aggregate *agg,
- enum update_reason reason);
+/* Assign a reasonable start time for a new aggregate in group i.
+ * Admissible values for \hat(F) are multiples of \sigma_i
+ * no greater than V+\sigma_i . Larger values mean that
+ * we had a wraparound so we consider the timestamp to be stale.
+ *
+ * If F is not stale and F >= V then we set S = F.
+ * Otherwise we should assign S = V, but this may violate
+ * the ordering in EB (see [2]). So, if we have groups in ER,
+ * set S to the F_j of the first group j which would be blocking us.
+ * We are guaranteed not to move S backward because
+ * otherwise our group i would still be blocked.
+ */
+static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
+{
+ unsigned long mask;
+ u64 limit, roundedF;
+ int slot_shift = agg->grp->slot_shift;
+
+ roundedF = qfq_round_down(agg->F, slot_shift);
+ limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
+
+ if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) {
+ /* timestamp was stale */
+ mask = mask_from(q->bitmaps[ER], agg->grp->index);
+ if (mask) {
+ struct qfq_group *next = qfq_ffs(q, mask);
+ if (qfq_gt(roundedF, next->F)) {
+ if (qfq_gt(limit, next->F))
+ agg->S = next->F;
+ else /* preserve timestamp correctness */
+ agg->S = limit;
+ return;
+ }
+ }
+ agg->S = q->V;
+ } else /* timestamp is not stale */
+ agg->S = agg->F;
+}
+
+/* Update the timestamps of agg before scheduling/rescheduling it for
+ * service. In particular, assign to agg->F its maximum possible
+ * value, i.e., the virtual finish time with which the aggregate
+ * should be labeled if it used all its budget once in service.
+ */
+static inline void
+qfq_update_agg_ts(struct qfq_sched *q,
+ struct qfq_aggregate *agg, enum update_reason reason)
+{
+ if (reason != requeue)
+ qfq_update_start(q, agg);
+ else /* just charge agg for the service received */
+ agg->S = agg->F;
+
+ agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w;
+}
static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg);
return agg;
}
-/*
- * Assign a reasonable start time for a new aggregate in group i.
- * Admissible values for \hat(F) are multiples of \sigma_i
- * no greater than V+\sigma_i . Larger values mean that
- * we had a wraparound so we consider the timestamp to be stale.
- *
- * If F is not stale and F >= V then we set S = F.
- * Otherwise we should assign S = V, but this may violate
- * the ordering in EB (see [2]). So, if we have groups in ER,
- * set S to the F_j of the first group j which would be blocking us.
- * We are guaranteed not to move S backward because
- * otherwise our group i would still be blocked.
- */
-static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
-{
- unsigned long mask;
- u64 limit, roundedF;
- int slot_shift = agg->grp->slot_shift;
-
- roundedF = qfq_round_down(agg->F, slot_shift);
- limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
-
- if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) {
- /* timestamp was stale */
- mask = mask_from(q->bitmaps[ER], agg->grp->index);
- if (mask) {
- struct qfq_group *next = qfq_ffs(q, mask);
- if (qfq_gt(roundedF, next->F)) {
- if (qfq_gt(limit, next->F))
- agg->S = next->F;
- else /* preserve timestamp correctness */
- agg->S = limit;
- return;
- }
- }
- agg->S = q->V;
- } else /* timestamp is not stale */
- agg->S = agg->F;
-}
-
-/*
- * Update the timestamps of agg before scheduling/rescheduling it for
- * service. In particular, assign to agg->F its maximum possible
- * value, i.e., the virtual finish time with which the aggregate
- * should be labeled if it used all its budget once in service.
- */
-static inline void
-qfq_update_agg_ts(struct qfq_sched *q,
- struct qfq_aggregate *agg, enum update_reason reason)
-{
- if (reason != requeue)
- qfq_update_start(q, agg);
- else /* just charge agg for the service received */
- agg->S = agg->F;
-
- agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w;
-}
-
-static void qfq_schedule_agg(struct qfq_sched *, struct qfq_aggregate *);
-
static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct qfq_sched *q = qdisc_priv(sch);
#include <linux/route.h>
#include <linux/sockios.h>
#include <linux/atalk.h>
-#include <net/ll_poll.h>
+#include <net/busy_poll.h>
#ifdef CONFIG_NET_LL_RX_POLL
-unsigned int sysctl_net_ll_read __read_mostly;
-unsigned int sysctl_net_ll_poll __read_mostly;
+unsigned int sysctl_net_busy_read __read_mostly;
+unsigned int sysctl_net_busy_poll __read_mostly;
#endif
static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
if (str_size < 60) /* 60 = 19 * strlen("xx:") + strlen("xx\0") */
return 1;
- sprintf(str_buf, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:"
- "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
- a->value[0], a->value[1], a->value[2], a->value[3],
- a->value[4], a->value[5], a->value[6], a->value[7],
- a->value[8], a->value[9], a->value[10], a->value[11],
- a->value[12], a->value[13], a->value[14], a->value[15],
- a->value[16], a->value[17], a->value[18], a->value[19]);
+ sprintf(str_buf, "%20phC", a->value);
return 0;
}