};
vmac-phy {
- compatible = "vmac-phy";
+ compatible = "rockchip,vmac-phy";
power-gpios = <&gpio0 GPIO_C0 GPIO_ACTIVE_HIGH>;
};
};
+++ /dev/null
-#
-# rockchip device configuration
-#
-
-config NET_VENDOR_ROCKCHIP
- bool "Rockchip devices"
- default y
- depends on HAS_IOMEM
- ---help---
- Rockchip devices
-
-if NET_VENDOR_ROCKCHIP
-
-source "drivers/net/ethernet/rk/vmac/Kconfig"
-
-endif # NET_VENDOR_ROCKCHIP
+++ /dev/null
-#
-# Makefile for the rockchip device drivers.
-#
-
-obj-$(CONFIG_RK_VMAC_ETH) += vmac/
+++ /dev/null
-config RK_VMAC_ETH
- bool "Rockchip 10/100 Ethernet driver"
- depends on HAS_IOMEM && HAS_DMA
- select NET_CORE
- select MII
- select PHYLIB
- select CRC32
- ---help---
- Rockchip 10/100 VMAC Ethernet driver.
-
+++ /dev/null
-obj-$(CONFIG_RK_VMAC_ETH) += rk29_vmac.o
-obj-$(CONFIG_RK_VMAC_ETH) += rk29_vmac_phy.o
+++ /dev/null
-/*\r
- * linux/arch/arc/drivers/arcvmac.c\r
- *\r
- * Copyright (C) 2003-2006 Codito Technologies, for linux-2.4 port\r
- * Copyright (C) 2006-2007 Celunite Inc, for linux-2.6 port\r
- * Copyright (C) 2007-2008 Sagem Communications, Fehmi HAFSI\r
- * Copyright (C) 2009 Sagem Communications, Andreas Fenkart\r
- * All Rights Reserved.\r
- *\r
- * This program is free software; you can redistribute it and/or modify\r
- * it under the terms of the GNU General Public License as published by\r
- * the Free Software Foundation; either version 2 of the License, or\r
- * (at your option) any later version.\r
- *\r
- * This program is distributed in the hope that it will be useful,\r
- * but WITHOUT ANY WARRANTY; without even the implied warranty of\r
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
- * GNU General Public License for more details.\r
- *\r
- * You should have received a copy of the GNU General Public License\r
- * along with this program; if not, write to the Free Software\r
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\r
- *\r
- * external PHY support based on dnet.c\r
- * ring management based on bcm63xx_enet.c\r
- *\r
- * Authors: amit.bhor@celunite.com, sameer.dhavale@celunite.com\r
- */\r
-\r
-#define DEBUG\r
-\r
-#include <linux/clk.h>\r
-#include <linux/crc32.h>\r
-#include <linux/delay.h>\r
-#include <linux/dma-mapping.h>\r
-#include <linux/etherdevice.h>\r
-#include <linux/init.h>\r
-#include <linux/io.h>\r
-#include <linux/kernel.h>\r
-#include <linux/module.h>\r
-#include <linux/moduleparam.h>\r
-#include <linux/netdevice.h>\r
-#include <linux/phy.h>\r
-#include <linux/platform_device.h>\r
-#include <linux/slab.h>\r
-#include <linux/types.h>\r
-#include <linux/wakelock.h>\r
-#include <linux/version.h>\r
-#include <linux/gpio.h>\r
-#include <asm/irq.h>\r
-#include <linux/interrupt.h>\r
-#include <linux/completion.h>\r
-#include <linux/of.h>\r
-#include <linux/of_platform.h>\r
-\r
-#include "rk29_vmac.h"\r
-\r
-//static struct wake_lock idlelock; /* add by lyx @ 20110302 */\r
-\r
-/* Register access macros */\r
-#define vmac_writel(port, value, reg) \\r
- writel((value), (port)->regs + reg##_OFFSET)\r
-#define vmac_readl(port, reg) readl((port)->regs + reg##_OFFSET)\r
-\r
-static unsigned char *read_mac_reg(struct net_device *dev,\r
- unsigned char hwaddr[ETH_ALEN])\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- unsigned mac_lo, mac_hi;\r
-\r
- WARN_ON(!hwaddr);\r
- mac_lo = vmac_readl(ap, ADDRL);\r
- mac_hi = vmac_readl(ap, ADDRH);\r
-\r
- hwaddr[0] = (mac_lo >> 0) & 0xff;\r
- hwaddr[1] = (mac_lo >> 8) & 0xff;\r
- hwaddr[2] = (mac_lo >> 16) & 0xff;\r
- hwaddr[3] = (mac_lo >> 24) & 0xff;\r
- hwaddr[4] = (mac_hi >> 0) & 0xff;\r
- hwaddr[5] = (mac_hi >> 8) & 0xff;\r
- return hwaddr;\r
-}\r
-\r
-static void write_mac_reg(struct net_device *dev, unsigned char* hwaddr)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- unsigned mac_lo, mac_hi;\r
-\r
- mac_lo = hwaddr[3] << 24 | hwaddr[2] << 16 | hwaddr[1] << 8 | hwaddr[0];\r
- mac_hi = hwaddr[5] << 8 | hwaddr[4];\r
-\r
- vmac_writel(ap, mac_lo, ADDRL);\r
- vmac_writel(ap, mac_hi, ADDRH);\r
-}\r
-\r
-static void vmac_mdio_xmit(struct vmac_priv *ap, unsigned val)\r
-{\r
- init_completion(&ap->mdio_complete);\r
- vmac_writel(ap, val, MDIO_DATA);\r
- if(!wait_for_completion_timeout(&ap->mdio_complete, msecs_to_jiffies(1000)))\r
- printk("Time out for waiting mdio completion\n");\r
-}\r
-\r
-static int vmac_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)\r
-{\r
- struct vmac_priv *vmac = bus->priv;\r
- unsigned int val;\r
- /* only 5 bits allowed for phy-addr and reg_offset */\r
- WARN_ON(phy_id & ~0x1f || phy_reg & ~0x1f);\r
-\r
- val = MDIO_BASE | MDIO_OP_READ;\r
- val |= phy_id << 23 | phy_reg << 18;\r
- vmac_mdio_xmit(vmac, val);\r
-\r
- val = vmac_readl(vmac, MDIO_DATA);\r
- return val & MDIO_DATA_MASK;\r
-}\r
-\r
-static int vmac_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg,\r
- u16 value)\r
-{\r
- struct vmac_priv *vmac = bus->priv;\r
- unsigned int val;\r
- /* only 5 bits allowed for phy-addr and reg_offset */\r
- WARN_ON(phy_id & ~0x1f || phy_reg & ~0x1f);\r
-\r
- val = MDIO_BASE | MDIO_OP_WRITE;\r
- val |= phy_id << 23 | phy_reg << 18;\r
- val |= (value & MDIO_DATA_MASK);\r
- vmac_mdio_xmit(vmac, val);\r
- return 0;\r
-}\r
-\r
-static void vmac_handle_link_change(struct net_device *dev)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- struct phy_device *phydev = ap->phy_dev;\r
- unsigned long flags;\r
- int report_change = 0;\r
- struct rk29_vmac_platform_data *pdata = ap->pdev->dev.platform_data;\r
-\r
- spin_lock_irqsave(&ap->lock, flags);\r
-\r
- if (phydev->duplex != ap->duplex) {\r
- unsigned tmp;\r
-\r
- tmp = vmac_readl(ap, CONTROL);\r
-\r
- if (phydev->duplex)\r
- tmp |= ENFL_MASK;\r
- else\r
- tmp &= ~ENFL_MASK;\r
-\r
- vmac_writel(ap, tmp, CONTROL);\r
-\r
- ap->duplex = phydev->duplex;\r
- report_change = 1;\r
- }\r
-\r
- if (phydev->speed != ap->speed) {\r
- ap->speed = phydev->speed;\r
- report_change = 1;\r
- }\r
-\r
- if (pdata && pdata->rmii_speed_switch)\r
- pdata->rmii_speed_switch(phydev->speed);\r
-\r
- if (phydev->link != ap->link) {\r
- ap->link = phydev->link;\r
- report_change = 1;\r
- }\r
-\r
- spin_unlock_irqrestore(&ap->lock, flags);\r
-\r
- if (report_change)\r
- phy_print_status(ap->phy_dev);\r
-}\r
-\r
-static int vmac_mii_probe(struct net_device *dev)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- struct phy_device *phydev = NULL; \r
- //struct clk *sys_clk;\r
- //unsigned long clock_rate;\r
- int phy_addr, err;\r
-\r
-\r
-#if defined (CONFIG_PHY_PORT_NUM) && (CONFIG_PHY_PORT_NUM != 0)\r
- if (ap->mii_bus->phy_map[CONFIG_PHY_PORT_NUM])\r
- phydev = ap->mii_bus->phy_map[CONFIG_PHY_PORT_NUM];\r
-#else\r
- /* find the first phy */\r
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {\r
- if (ap->mii_bus->phy_map[phy_addr]) {\r
- phydev = ap->mii_bus->phy_map[phy_addr];\r
- break;\r
- }\r
- }\r
-#endif\r
-\r
- if (!phydev) {\r
- dev_err(&dev->dev, "no PHY found\n");\r
- return -ENODEV;\r
- }\r
-\r
- /* add pin_irq, if avail */\r
- phydev = phy_connect(dev, dev_name(&phydev->dev),\r
- &vmac_handle_link_change,\r
- PHY_INTERFACE_MODE_RMII);\r
- if (IS_ERR(phydev)) {\r
- err = PTR_ERR(phydev);\r
- dev_err(&dev->dev, "could not attach to PHY %d\n", err);\r
- goto err_out;\r
- }\r
-\r
- phydev->supported &= PHY_BASIC_FEATURES;\r
- phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;\r
-\r
- phydev->advertising = phydev->supported;\r
-\r
- ap->link = 0;\r
- ap->speed = 0;\r
- ap->duplex = -1;\r
- ap->phy_dev = phydev;\r
-\r
- return 0;\r
-//err_disconnect:\r
-// phy_disconnect(phydev);\r
-err_out:\r
- return err;\r
-}\r
-\r
-static int vmac_mii_init(struct vmac_priv *ap)\r
-{\r
- int err, i;\r
-\r
- ap->mii_bus = mdiobus_alloc();\r
- \r
- if (ap->mii_bus == NULL)\r
- return -ENOMEM;\r
-\r
- ap->mii_bus->name = "vmac_mii_bus";\r
- ap->mii_bus->read = &vmac_mdio_read;\r
- ap->mii_bus->write = &vmac_mdio_write;\r
-\r
- snprintf(ap->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);\r
-\r
- ap->mii_bus->priv = ap;\r
-\r
- err = -ENOMEM;\r
- ap->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);\r
- if (!ap->mii_bus->irq)\r
- goto err_out;\r
-\r
- for (i = 0; i < PHY_MAX_ADDR; i++)\r
- ap->mii_bus->irq[i] = PHY_POLL;\r
-\r
-#if 0\r
- /* FIXME: what is it used for? */\r
- platform_set_drvdata(ap->dev, ap->mii_bus);\r
-#endif\r
-\r
- err = mdiobus_register(ap->mii_bus);\r
- if (err)\r
- goto err_out_free_mdio_irq;\r
-\r
- err = vmac_mii_probe(ap->dev);\r
- if (err)\r
- goto err_out_unregister_bus;\r
-\r
- return 0;\r
-\r
-err_out_unregister_bus:\r
- mdiobus_unregister(ap->mii_bus);\r
-err_out_free_mdio_irq:\r
- kfree(ap->mii_bus->irq);\r
-err_out:\r
- mdiobus_free(ap->mii_bus);\r
- ap->mii_bus = NULL;\r
- return err;\r
-}\r
-\r
-static void vmac_mii_exit(struct net_device *dev)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
-\r
- if (ap->phy_dev)\r
- phy_disconnect(ap->phy_dev);\r
- if (ap->mii_bus) {\r
- mdiobus_unregister(ap->mii_bus);\r
- kfree(ap->mii_bus->irq);\r
- mdiobus_free(ap->mii_bus);\r
- ap->mii_bus = NULL;\r
- }\r
-}\r
-\r
-static int vmacether_get_settings(struct net_device *dev,\r
- struct ethtool_cmd *cmd)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- struct phy_device *phydev = ap->phy_dev;\r
-\r
- if (!phydev)\r
- return -ENODEV;\r
-\r
- return phy_ethtool_gset(phydev, cmd);\r
-}\r
-\r
-static int vmacether_set_settings(struct net_device *dev,\r
- struct ethtool_cmd *cmd)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- struct phy_device *phydev = ap->phy_dev;\r
-\r
- if (!phydev)\r
- return -ENODEV;\r
-\r
- return phy_ethtool_sset(phydev, cmd);\r
-}\r
-\r
-static int vmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- struct phy_device *phydev = ap->phy_dev;\r
-\r
- if (!netif_running(dev))\r
- return -EINVAL;\r
-\r
- if (!phydev)\r
- return -ENODEV;\r
-\r
- return phy_mii_ioctl(phydev, rq, cmd);\r
-}\r
-\r
-static void vmacether_get_drvinfo(struct net_device *dev,\r
- struct ethtool_drvinfo *info)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
-\r
- strlcpy(info->driver, VMAC_NAME, sizeof(info->driver));\r
- strlcpy(info->version, VMAC_VERSION, sizeof(info->version));\r
- snprintf(info->bus_info, sizeof(info->bus_info),\r
- "platform 0x%x", ap->mem_base);\r
-}\r
-\r
-static int update_error_counters(struct net_device *dev, int status)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- dev_dbg(&ap->pdev->dev, "rx error counter overrun. status = 0x%x\n",\r
- status);\r
-\r
- /* programming error */\r
- WARN_ON(status & TXCH_MASK);\r
- WARN_ON(!(status & (MSER_MASK | RXCR_MASK | RXFR_MASK | RXFL_MASK)));\r
-\r
- if (status & MSER_MASK)\r
- ap->stats.rx_over_errors += 256; /* ran out of BD */\r
- if (status & RXCR_MASK)\r
- ap->stats.rx_crc_errors += 256;\r
- if (status & RXFR_MASK)\r
- ap->stats.rx_frame_errors += 256;\r
- if (status & RXFL_MASK)\r
- ap->stats.rx_fifo_errors += 256;\r
-\r
- return 0;\r
-}\r
-\r
-static void update_tx_errors(struct net_device *dev, int status)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
-\r
- if (status & UFLO)\r
- ap->stats.tx_fifo_errors++;\r
-\r
- if (ap->duplex)\r
- return;\r
-\r
- /* half duplex flags */\r
- if (status & LTCL)\r
- ap->stats.tx_window_errors++;\r
- if (status & RETRY_CT)\r
- ap->stats.collisions += (status & RETRY_CT) >> 24;\r
- if (status & DROP) /* too many retries */\r
- ap->stats.tx_aborted_errors++;\r
- if (status & DEFER)\r
- dev_vdbg(&ap->pdev->dev, "\"defer to traffic\"\n");\r
- if (status & CARLOSS)\r
- ap->stats.tx_carrier_errors++;\r
-}\r
-\r
-static int vmac_rx_reclaim_force(struct net_device *dev)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- int ct;\r
-\r
- ct = 0;\r
-\r
- dev_dbg(&ap->pdev->dev, "%s need to release %d rx sk_buff\n",\r
- __func__, fifo_used(&ap->rx_ring));\r
-\r
- while (!fifo_empty(&ap->rx_ring) && ct++ < ap->rx_ring.size) {\r
- struct vmac_buffer_desc *desc;\r
- struct sk_buff *skb;\r
- int desc_idx;\r
-\r
- desc_idx = ap->rx_ring.tail;\r
- desc = &ap->rxbd[desc_idx];\r
- fifo_inc_tail(&ap->rx_ring);\r
-\r
- if (!ap->rx_skbuff[desc_idx]) {\r
- dev_err(&ap->pdev->dev, "non-populated rx_skbuff found %d\n",\r
- desc_idx);\r
- continue;\r
- }\r
-\r
- skb = ap->rx_skbuff[desc_idx];\r
- ap->rx_skbuff[desc_idx] = NULL;\r
-\r
- dma_unmap_single(&ap->pdev->dev, desc->data, skb->len,\r
- DMA_TO_DEVICE);\r
-\r
- dev_kfree_skb(skb);\r
- }\r
-\r
- if (!fifo_empty(&ap->rx_ring)) {\r
- dev_err(&ap->pdev->dev, "failed to reclaim %d rx sk_buff\n",\r
- fifo_used(&ap->rx_ring));\r
- }\r
-\r
- return 0;\r
-}\r
-\r
-static int vmac_rx_refill(struct net_device *dev)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
-\r
- WARN_ON(fifo_full(&ap->rx_ring));\r
-\r
- while (!fifo_full(&ap->rx_ring)) {\r
- struct vmac_buffer_desc *desc;\r
- struct sk_buff *skb;\r
- dma_addr_t p;\r
- int desc_idx;\r
-\r
- desc_idx = ap->rx_ring.head;\r
- desc = &ap->rxbd[desc_idx];\r
-\r
- /* make sure we read the actual descriptor status */\r
- rmb();\r
-\r
- if (ap->rx_skbuff[desc_idx]) {\r
- /* dropped packet / buffer chaining */\r
- fifo_inc_head(&ap->rx_ring);\r
-\r
- /* return to DMA */\r
- wmb();\r
- desc->info = OWN_MASK | ap->rx_skb_size;\r
- continue;\r
- }\r
-\r
- skb = netdev_alloc_skb(dev, ap->rx_skb_size + 2);\r
- if (!skb) {\r
- dev_info(&ap->pdev->dev, "failed to allocate rx_skb, skb's left %d\n",\r
- fifo_used(&ap->rx_ring));\r
- break;\r
- }\r
-\r
- /* IP header Alignment (14 byte Ethernet header) */\r
- skb_reserve(skb, 2);\r
- WARN_ON(skb->len != 0); /* nothing received yet */\r
-\r
- ap->rx_skbuff[desc_idx] = skb;\r
-\r
- p = dma_map_single(&ap->pdev->dev, skb->data, ap->rx_skb_size,\r
- DMA_FROM_DEVICE);\r
-\r
- desc->data = p;\r
-\r
- wmb();\r
- desc->info = OWN_MASK | ap->rx_skb_size;\r
-\r
- fifo_inc_head(&ap->rx_ring);\r
- }\r
-\r
- /* If rx ring is still empty, set a timer to try allocating\r
- * again at a later time. */\r
- if (fifo_empty(&ap->rx_ring) && netif_running(dev)) {\r
- dev_warn(&ap->pdev->dev, "unable to refill rx ring\n");\r
- ap->rx_timeout.expires = jiffies + HZ;\r
- add_timer(&ap->rx_timeout);\r
- }\r
-\r
- return 0;\r
-}\r
-\r
-/*\r
- * timer callback to defer refill rx queue in case we're OOM\r
- */\r
-static void vmac_refill_rx_timer(unsigned long data)\r
-{\r
- struct net_device *dev;\r
- struct vmac_priv *ap;\r
-\r
- dev = (struct net_device *)data;\r
- ap = netdev_priv(dev);\r
-\r
- spin_lock(&ap->rx_lock);\r
- vmac_rx_refill(dev);\r
- spin_unlock(&ap->rx_lock);\r
-}\r
-\r
-/* merge buffer chaining */\r
-struct sk_buff *vmac_merge_rx_buffers(struct net_device *dev,\r
- struct vmac_buffer_desc *after,\r
- int pkt_len) /* data */\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- struct sk_buff *merge_skb, *cur_skb;\r
- struct dma_fifo *rx_ring;\r
- struct vmac_buffer_desc *desc;\r
-\r
- rx_ring = &ap->rx_ring;\r
- desc = &ap->rxbd[rx_ring->tail];\r
-\r
- WARN_ON(desc == after);\r
-\r
- /* strip FCS */\r
- pkt_len -= 4;\r
-\r
- /* IP header Alignment (14 byte Ethernet header) */\r
- merge_skb = netdev_alloc_skb(dev, pkt_len + 2);\r
- if (!merge_skb) {\r
- dev_err(&ap->pdev->dev, "failed to allocate merged rx_skb, rx skb's left %d\n",\r
- fifo_used(rx_ring));\r
-\r
- return NULL;\r
- }\r
-\r
- skb_reserve(merge_skb, 2);\r
-\r
- while (desc != after && pkt_len) {\r
- struct vmac_buffer_desc *desc;\r
- int buf_len, valid;\r
-\r
- /* desc needs wrapping */\r
- desc = &ap->rxbd[rx_ring->tail];\r
- cur_skb = ap->rx_skbuff[rx_ring->tail];\r
- WARN_ON(!cur_skb);\r
-\r
- dma_unmap_single(&ap->pdev->dev, desc->data, ap->rx_skb_size,\r
- DMA_FROM_DEVICE);\r
-\r
- /* do not copy FCS */\r
- buf_len = desc->info & LEN_MASK;\r
- valid = min(pkt_len, buf_len);\r
- pkt_len -= valid;\r
-\r
- memcpy(skb_put(merge_skb, valid), cur_skb->data, valid);\r
-\r
- fifo_inc_tail(rx_ring);\r
- }\r
-\r
- /* merging_pressure++ */\r
-\r
- if (unlikely(pkt_len != 0))\r
- dev_err(&ap->pdev->dev, "buffer chaining bytes missing %d\n",\r
- pkt_len);\r
-\r
- WARN_ON(desc != after);\r
-\r
- return merge_skb;\r
-}\r
-\r
-int vmac_rx_receive(struct net_device *dev, int budget)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- struct vmac_buffer_desc *first;\r
- int processed, pkt_len, pkt_err;\r
- struct dma_fifo lookahead;\r
-\r
- processed = 0;\r
-\r
- first = NULL;\r
- pkt_err = pkt_len = 0;\r
-\r
- /* look ahead, till packet complete */\r
- lookahead = ap->rx_ring;\r
-\r
- do {\r
- struct vmac_buffer_desc *desc; /* cur_ */\r
- int desc_idx; /* cur_ */\r
- struct sk_buff *skb; /* pkt_ */\r
-\r
- desc_idx = lookahead.tail;\r
- desc = &ap->rxbd[desc_idx];\r
-\r
- /* make sure we read the actual descriptor status */\r
- rmb();\r
-\r
- /* break if dma ownership belongs to hw */\r
- if (desc->info & OWN_MASK) {\r
- ap->mac_rxring_head = vmac_readl(ap, MAC_RXRING_HEAD);\r
- break;\r
- }\r
-\r
- if (desc->info & FRST_MASK) {\r
- pkt_len = 0;\r
- pkt_err = 0;\r
-\r
- /* don't free current */\r
- ap->rx_ring.tail = lookahead.tail;\r
- first = desc;\r
- }\r
-\r
- fifo_inc_tail(&lookahead);\r
-\r
- /* check bd */\r
-\r
- pkt_len += desc->info & LEN_MASK;\r
- pkt_err |= (desc->info & BUFF);\r
-\r
- if (!(desc->info & LAST_MASK))\r
- continue;\r
-\r
- /* received complete packet */\r
-\r
- if (unlikely(pkt_err || !first)) {\r
- /* recycle buffers */\r
- ap->rx_ring.tail = lookahead.tail;\r
- continue;\r
- }\r
-\r
- WARN_ON(!(first->info & FRST_MASK) ||\r
- !(desc->info & LAST_MASK));\r
- WARN_ON(pkt_err);\r
-\r
- /* -- valid packet -- */\r
-\r
- if (first != desc) {\r
- skb = vmac_merge_rx_buffers(dev, desc, pkt_len);\r
-\r
- if (!skb) {\r
- /* kill packet */\r
- ap->rx_ring.tail = lookahead.tail;\r
- ap->rx_merge_error++;\r
- continue;\r
- }\r
- } else {\r
- dma_unmap_single(&ap->pdev->dev, desc->data,\r
- ap->rx_skb_size, DMA_FROM_DEVICE);\r
-\r
- skb = ap->rx_skbuff[desc_idx];\r
- ap->rx_skbuff[desc_idx] = NULL;\r
- /* desc->data != skb->data => desc->data DMA mapped */\r
-\r
- /* strip FCS */\r
- skb_put(skb, pkt_len - 4);\r
- }\r
-\r
- /* free buffers */\r
- ap->rx_ring.tail = lookahead.tail;\r
-\r
- WARN_ON(skb->len != pkt_len - 4);\r
- processed++;\r
- skb->dev = dev;\r
- skb->protocol = eth_type_trans(skb, dev);\r
- ap->stats.rx_packets++;\r
- ap->stats.rx_bytes += skb->len;\r
- dev->last_rx = jiffies;\r
- netif_rx(skb);\r
-\r
- } while (!fifo_empty(&lookahead) && (processed < budget));\r
-\r
- dev_vdbg(&ap->pdev->dev, "processed pkt %d, remaining rx buff %d\n",\r
- processed,\r
- fifo_used(&ap->rx_ring));\r
-\r
- if (processed || fifo_empty(&ap->rx_ring))\r
- vmac_rx_refill(dev);\r
-\r
- return processed;\r
-}\r
-\r
-static void vmac_toggle_irqmask(struct net_device *dev, int enable, int mask)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- unsigned long tmp;\r
-\r
- tmp = vmac_readl(ap, ENABLE);\r
- if (enable)\r
- tmp |= mask;\r
- else\r
- tmp &= ~mask;\r
- vmac_writel(ap, tmp, ENABLE);\r
-}\r
-\r
-static void vmac_toggle_txint(struct net_device *dev, int enable)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- unsigned long flags;\r
-\r
- spin_lock_irqsave(&ap->lock, flags);\r
- vmac_toggle_irqmask(dev, enable, TXINT_MASK);\r
- spin_unlock_irqrestore(&ap->lock, flags);\r
-}\r
-\r
-static void vmac_toggle_rxint(struct net_device *dev, int enable)\r
-{\r
- vmac_toggle_irqmask(dev, enable, RXINT_MASK);\r
-}\r
-\r
-static int vmac_poll(struct napi_struct *napi, int budget)\r
-{\r
- struct vmac_priv *ap;\r
- struct net_device *dev;\r
- int rx_work_done;\r
- unsigned long flags;\r
-\r
- ap = container_of(napi, struct vmac_priv, napi);\r
- dev = ap->dev;\r
-\r
- /* ack interrupt */\r
- vmac_writel(ap, RXINT_MASK, STAT);\r
-\r
- spin_lock(&ap->rx_lock);\r
- rx_work_done = vmac_rx_receive(dev, budget);\r
- spin_unlock(&ap->rx_lock);\r
-\r
-#ifdef VERBOSE_DEBUG\r
- if (printk_ratelimit()) {\r
- dev_vdbg(&ap->pdev->dev, "poll budget %d receive rx_work_done %d\n",\r
- budget,\r
- rx_work_done);\r
- }\r
-#endif\r
-\r
- if (rx_work_done >= budget) {\r
- /* rx queue is not yet empty/clean */\r
- return rx_work_done;\r
- }\r
-\r
- /* no more packet in rx/tx queue, remove device from poll\r
- * queue */\r
- spin_lock_irqsave(&ap->lock, flags);\r
- napi_complete(napi);\r
- vmac_toggle_rxint(dev, 1);\r
- spin_unlock_irqrestore(&ap->lock, flags);\r
-\r
- return rx_work_done;\r
-}\r
-\r
-static int vmac_tx_reclaim(struct net_device *dev, int force);\r
-\r
-static irqreturn_t vmac_intr(int irq, void *dev_instance)\r
-{\r
- struct net_device *dev = dev_instance;\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- unsigned int status;\r
-\r
- spin_lock(&ap->lock);\r
-\r
- status = vmac_readl(ap, STAT);\r
- vmac_writel(ap, status, STAT);\r
-\r
-#ifdef DEBUG\r
- if (unlikely(ap->shutdown))\r
- dev_err(&ap->pdev->dev, "ISR during close\n");\r
-\r
- if (unlikely(!status & (RXINT_MASK|MDIO_MASK|ERR_MASK)))\r
- dev_err(&ap->pdev->dev, "No source of IRQ found\n");\r
-#endif\r
-\r
- if ((status & RXINT_MASK) &&\r
- (ap->mac_rxring_head !=\r
- vmac_readl(ap, MAC_RXRING_HEAD))) {\r
- vmac_toggle_rxint(dev, 0);\r
- napi_schedule(&ap->napi);\r
- }\r
-\r
- if (unlikely(netif_queue_stopped(dev) && (status & TXINT_MASK)))\r
- vmac_tx_reclaim(dev, 0);\r
-\r
- if (status & MDIO_MASK)\r
- complete(&ap->mdio_complete);\r
-\r
- if (unlikely(status & ERR_MASK))\r
- update_error_counters(dev, status);\r
-\r
- spin_unlock(&ap->lock);\r
-\r
- return IRQ_HANDLED;\r
-}\r
-\r
-static int vmac_tx_reclaim(struct net_device *dev, int force)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- int released = 0;\r
-\r
- /* buffer chaining not used, see vmac_start_xmit */\r
-\r
- while (!fifo_empty(&ap->tx_ring)) {\r
- struct vmac_buffer_desc *desc;\r
- struct sk_buff *skb;\r
- int desc_idx;\r
-\r
- desc_idx = ap->tx_ring.tail;\r
- desc = &ap->txbd[desc_idx];\r
-\r
- /* ensure other field of the descriptor were not read\r
- * before we checked ownership */\r
- rmb();\r
-\r
- if ((desc->info & OWN_MASK) && !force)\r
- break;\r
-\r
- if (desc->info & ERR_MSK_TX) {\r
- update_tx_errors(dev, desc->info);\r
- /* recycle packet, let upper level deal with it */\r
- }\r
-\r
- skb = ap->tx_skbuff[desc_idx];\r
- ap->tx_skbuff[desc_idx] = NULL;\r
- WARN_ON(!skb);\r
-\r
- dma_unmap_single(&ap->pdev->dev, desc->data, skb->len,\r
- DMA_TO_DEVICE);\r
-\r
- dev_kfree_skb_any(skb);\r
-\r
- released++;\r
- fifo_inc_tail(&ap->tx_ring);\r
- }\r
-\r
- if (netif_queue_stopped(dev) && released) {\r
- netif_wake_queue(dev);\r
- vmac_toggle_txint(dev, 0);\r
- }\r
-\r
- if (unlikely(force && !fifo_empty(&ap->tx_ring))) {\r
- dev_err(&ap->pdev->dev, "failed to reclaim %d tx sk_buff\n",\r
- fifo_used(&ap->tx_ring));\r
- }\r
-\r
- return released;\r
-}\r
-\r
-int vmac_start_xmit(struct sk_buff *skb, struct net_device *dev)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- struct vmac_buffer_desc *desc;\r
- unsigned int tmp;\r
-\r
- /* running under xmit lock */\r
-\r
- /* no scatter/gatter see features below */\r
- WARN_ON(skb_shinfo(skb)->nr_frags != 0);\r
- WARN_ON(skb->len > MAX_TX_BUFFER_LEN);\r
-\r
- if (unlikely(fifo_full(&ap->tx_ring))) {\r
- netif_stop_queue(dev);\r
- vmac_toggle_txint(dev, 1);\r
- dev_err(&ap->pdev->dev, "xmit called with no tx desc available\n");\r
- return NETDEV_TX_BUSY;\r
- }\r
-\r
- if (unlikely(skb->len < ETH_ZLEN)) {\r
- struct sk_buff *short_skb;\r
- short_skb = netdev_alloc_skb(dev, ETH_ZLEN);\r
- if (!short_skb)\r
- return NETDEV_TX_LOCKED;\r
-\r
- memset(short_skb->data, 0, ETH_ZLEN);\r
- memcpy(skb_put(short_skb, ETH_ZLEN), skb->data, skb->len);\r
- dev_kfree_skb(skb);\r
- skb = short_skb;\r
- }\r
-\r
- /* fill descriptor */\r
- ap->tx_skbuff[ap->tx_ring.head] = skb;\r
-\r
- desc = &ap->txbd[ap->tx_ring.head];\r
- desc->data = dma_map_single(&ap->pdev->dev, skb->data, skb->len,\r
- DMA_TO_DEVICE);\r
-\r
- /* dma might already be polling */\r
- wmb();\r
- desc->info = OWN_MASK | FRST_MASK | LAST_MASK | skb->len;\r
- wmb();\r
-\r
- /* kick tx dma */\r
- tmp = vmac_readl(ap, STAT);\r
- vmac_writel(ap, tmp | TXPL_MASK, STAT);\r
-\r
- ap->stats.tx_packets++;\r
- ap->stats.tx_bytes += skb->len;\r
- dev->trans_start = jiffies;\r
- fifo_inc_head(&ap->tx_ring);\r
-\r
- /* vmac_tx_reclaim independent of vmac_tx_timeout */\r
- if (fifo_used(&ap->tx_ring) > 8)\r
- vmac_tx_reclaim(dev, 0);\r
-\r
- /* stop queue if no more desc available */\r
- if (fifo_full(&ap->tx_ring)) {\r
- netif_stop_queue(dev);\r
- vmac_toggle_txint(dev, 1);\r
- }\r
-\r
- return NETDEV_TX_OK;\r
-}\r
-\r
-static int alloc_buffers(struct net_device *dev)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- int err = -ENOMEM;\r
- int size;\r
-\r
- fifo_init(&ap->rx_ring, RX_BDT_LEN);\r
- fifo_init(&ap->tx_ring, TX_BDT_LEN);\r
-\r
- /* initialize skb list */\r
- memset(ap->rx_skbuff, 0, sizeof(ap->rx_skbuff));\r
- memset(ap->tx_skbuff, 0, sizeof(ap->tx_skbuff));\r
-\r
- /* allocate DMA received descriptors */\r
- size = sizeof(*ap->rxbd) * ap->rx_ring.size;\r
- ap->rxbd = dma_alloc_coherent(&ap->pdev->dev, size,\r
- &ap->rxbd_dma,\r
- GFP_KERNEL);\r
- if (ap->rxbd == NULL)\r
- goto err_out;\r
-\r
- /* allocate DMA transmit descriptors */\r
- size = sizeof(*ap->txbd) * ap->tx_ring.size;\r
- ap->txbd = dma_alloc_coherent(&ap->pdev->dev, size,\r
- &ap->txbd_dma,\r
- GFP_KERNEL);\r
- if (ap->txbd == NULL)\r
- goto err_free_rxbd;\r
-\r
- /* ensure 8-byte aligned */\r
- WARN_ON(((int)ap->txbd & 0x7) || ((int)ap->rxbd & 0x7));\r
-\r
- memset(ap->txbd, 0, sizeof(*ap->txbd) * ap->tx_ring.size);\r
- memset(ap->rxbd, 0, sizeof(*ap->rxbd) * ap->rx_ring.size);\r
-\r
- /* allocate rx skb */\r
- err = vmac_rx_refill(dev);\r
- if (err)\r
- goto err_free_txbd;\r
-\r
- return 0;\r
-\r
-err_free_txbd:\r
- dma_free_coherent(&ap->pdev->dev, sizeof(*ap->txbd) * ap->tx_ring.size,\r
- ap->txbd, ap->txbd_dma);\r
-err_free_rxbd:\r
- dma_free_coherent(&ap->pdev->dev, sizeof(*ap->rxbd) * ap->rx_ring.size,\r
- ap->rxbd, ap->rxbd_dma);\r
-err_out:\r
- return err;\r
-}\r
-\r
-static int free_buffers(struct net_device *dev)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
-\r
- /* free skbuff */\r
- vmac_tx_reclaim(dev, 1);\r
- vmac_rx_reclaim_force(dev);\r
-\r
- /* free DMA ring */\r
- dma_free_coherent(&ap->pdev->dev, sizeof(ap->txbd) * ap->tx_ring.size,\r
- ap->txbd, ap->txbd_dma);\r
- dma_free_coherent(&ap->pdev->dev, sizeof(ap->rxbd) * ap->rx_ring.size,\r
- ap->rxbd, ap->rxbd_dma);\r
-\r
- return 0;\r
-}\r
-\r
-static int vmac_hw_init(struct net_device *dev)\r
-{\r
- struct vmac_priv *priv = netdev_priv(dev);\r
-\r
- /* clear IRQ mask */\r
- vmac_writel(priv, 0, ENABLE);\r
-\r
- /* clear pending IRQ */\r
- vmac_writel(priv, 0xffffffff, STAT);\r
-\r
- /* Initialize logical address filter */\r
- vmac_writel(priv, 0x0, LAFL);\r
- vmac_writel(priv, 0x0, LAFH);\r
-\r
- return 0;\r
-}\r
-\r
-#ifdef DEBUG\r
-static int vmac_register_print(struct net_device *dev)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
-\r
- printk("func::%s vmac register %s value = 0x%x\n", __func__, "ID", vmac_readl(ap, ID));\r
- printk("func::%s vmac register %s value = 0x%x\n", __func__, "STAT", vmac_readl(ap, STAT));\r
- printk("func::%s vmac register %s value = 0x%x\n", __func__, "ENABLE", vmac_readl(ap, ENABLE));\r
- printk("func::%s vmac register %s value = 0x%x\n", __func__, "CONTROL", vmac_readl(ap, CONTROL));\r
- printk("func::%s vmac register %s value = 0x%x\n", __func__, "ADDRL", vmac_readl(ap, ADDRL));\r
- printk("func::%s vmac register %s value = 0x%x\n", __func__, "ADDRH", vmac_readl(ap, ADDRH));\r
- \r
- return 0;\r
-}\r
-#endif\r
-\r
-int vmac_open(struct net_device *dev)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- struct phy_device *phydev;\r
- unsigned int temp;\r
- int err = 0;\r
- struct clk *mac_clk = NULL;\r
- struct clk *mac_parent = NULL;\r
- struct clk *arm_clk = NULL;\r
- struct rk29_vmac_platform_data *pdata = ap->pdev->dev.platform_data;\r
- unsigned char current_mac[6];\r
- int ret = 0;\r
- struct pinctrl_state *clkout_state;\r
-\r
- printk("enter func %s...\n", __func__);\r
-\r
- if (ap == NULL)\r
- return -ENODEV;\r
-\r
- wake_lock_timeout(&ap->resume_lock, 5*HZ);\r
-\r
- ap->shutdown = 0;\r
- \r
- // switch to rmii\r
- printk("ap->pdev->dev.pins->p = %p\n", ap->pdev->dev.pins->p);\r
- clkout_state = pinctrl_lookup_state(ap->pdev->dev.pins->p, "default");\r
- if (IS_ERR(clkout_state)) {\r
- dev_err(&ap->pdev->dev, "no clkout pinctrl state\n");\r
- goto err_out;\r
- }\r
- \r
- printk("in pinctrl_select_state.\n");\r
- pinctrl_select_state(ap->pdev->dev.pins->p, clkout_state);\r
- \r
- //set rmii ref clock 50MHz\r
- mac_clk = devm_clk_get(&ap->pdev->dev, "clk_mac");\r
- /*if (IS_ERR(mac_clk))\r
- mac_clk = NULL;\r
- arm_clk = clk_get(NULL, "arm_pll");\r
- if (IS_ERR(arm_clk))\r
- arm_clk = NULL;\r
- if (mac_clk) {\r
- mac_parent = clk_get_parent(mac_clk);\r
- if (IS_ERR(mac_parent))\r
- mac_parent = NULL;\r
- }\r
- if (arm_clk && mac_parent && (arm_clk == mac_parent))\r
- wake_lock(&idlelock);\r
-\r
- if(pdata && pdata->rmii_extclk_sel && pdata->rmii_extclk_sel())\r
- {\r
- struct clk * mac_clkin = NULL;\r
- mac_clkin = clk_get(NULL, "rmii_clkin");\r
- if (IS_ERR(mac_clkin)) {\r
- pr_err("mac_clkin get fail\n");\r
- }\r
- clk_set_parent(mac_clk, mac_clkin); \r
- }*/\r
- \r
- clk_set_rate(mac_clk, 50000000);\r
- clk_prepare_enable(mac_clk);\r
- //clk_enable(clk_get(NULL,"mii_rx"));\r
- //clk_enable(clk_get(NULL,"mii_tx"));\r
- //clk_enable(clk_get(NULL,"hclk_mac"));\r
-\r
- //phy power on\r
- if (pdata && pdata->rmii_power_control)\r
- pdata->rmii_power_control(1);\r
-\r
- msleep(1000);\r
-\r
- vmac_hw_init(dev);\r
-\r
-//$_rbox_$_modify_$_chenxiao\r
- if (is_valid_ether_addr(dev->dev_addr)){\r
- strlcpy(current_mac,dev->dev_addr,6);\r
- }\r
-\r
-#ifdef CONFIG_ETH_MAC_FROM_EEPROM\r
- ret = eeprom_read_data(0,dev->dev_addr,6);\r
- if (ret != 6){\r
- printk("read mac from Eeprom fail.\n");\r
- }else {\r
- if (is_valid_ether_addr(dev->dev_addr)){\r
- printk("eth_mac_from_eeprom***********:%X:%X:%X:%X:%X:%X\n",dev->dev_addr[0],\r
- dev->dev_addr[1],dev->dev_addr[2],dev->dev_addr[3],\r
- dev->dev_addr[4],dev->dev_addr[5] );\r
- }\r
- }\r
-#endif\r
-\r
-#ifdef CONFIG_ETH_MAC_FROM_IDB\r
- err = eth_mac_idb(dev->dev_addr);\r
- if (err) {\r
- printk("read mac from IDB fail.\n");\r
- } else {\r
- if (is_valid_ether_addr(dev->dev_addr)) {\r
- printk("eth_mac_from_idb***********:%X:%X:%X:%X:%X:%X\n",dev->dev_addr[0],\r
- dev->dev_addr[1],dev->dev_addr[2],dev->dev_addr[3],\r
- dev->dev_addr[4],dev->dev_addr[5] );\r
- }\r
- }\r
-#endif\r
-\r
-#ifdef CONFIG_ETH_MAC_FROM_WIFI_MAC\r
- err = eth_mac_wifi(dev->dev_addr);\r
- if (err) {\r
- printk("read mac from Wifi fail.\n");\r
- } else {\r
- if (is_valid_ether_addr(dev->dev_addr)) {\r
- printk("eth_mac_from_wifi_mac***********:%X:%X:%X:%X:%X:%X\n",dev->dev_addr[0],\r
- dev->dev_addr[1],dev->dev_addr[2],dev->dev_addr[3],\r
- dev->dev_addr[4],dev->dev_addr[5] );\r
- }\r
- }\r
-#endif\r
-\r
-#ifdef CONFIG_ETH_MAC_FROM_SECURE_CHIP\r
-\r
-#endif\r
- \r
-\r
- if (!is_valid_ether_addr(dev->dev_addr)) {\r
- strlcpy(dev->dev_addr,current_mac,6);\r
- printk("eth_mac_from_RANDOM***********:%X:%X:%X:%X:%X:%X\n",dev->dev_addr[0],\r
- dev->dev_addr[1],dev->dev_addr[2],dev->dev_addr[3],\r
- dev->dev_addr[4],dev->dev_addr[5] );\r
- }\r
-//add end \r
-\r
- /* mac address changed? */\r
- write_mac_reg(dev, dev->dev_addr);\r
-\r
- err = alloc_buffers(dev);\r
- if (err)\r
- goto err_out;\r
-\r
- err = request_irq(dev->irq, &vmac_intr, 0, dev->name, dev);\r
- if (err) {\r
- dev_err(&ap->pdev->dev, "Unable to request IRQ %d (error %d)\n",\r
- dev->irq, err);\r
- goto err_free_buffers;\r
- }\r
-\r
- /* install DMA ring pointers */\r
- vmac_writel(ap, ap->rxbd_dma, RXRINGPTR);\r
- vmac_writel(ap, ap->txbd_dma, TXRINGPTR);\r
-\r
- /* set poll rate to 1 ms */\r
- vmac_writel(ap, POLLRATE_TIME, POLLRATE);\r
-\r
- /* make sure we enable napi before rx interrupt */\r
- napi_enable(&ap->napi);\r
-\r
- /* IRQ mask */\r
- temp = RXINT_MASK | ERR_MASK | TXCH_MASK | MDIO_MASK;\r
- vmac_writel(ap, temp, ENABLE);\r
-\r
- /* Set control */\r
- temp = (RX_BDT_LEN << 24) | (TX_BDT_LEN << 16) | TXRN_MASK | RXRN_MASK;\r
- vmac_writel(ap, temp, CONTROL);\r
-\r
- /* enable, after all other bits are set */\r
- vmac_writel(ap, temp | EN_MASK, CONTROL);\r
- \r
- netif_start_queue(dev);\r
- netif_carrier_off(dev);\r
-\r
-#ifdef DEBUG\r
- vmac_register_print(dev);\r
-#endif\r
-\r
- /* register the PHY board fixup, if needed */\r
- err = vmac_mii_init(ap);\r
- if (err)\r
- goto err_free_irq;\r
-\r
- /* schedule a link state check */\r
- phy_start(ap->phy_dev);\r
-\r
- phydev = ap->phy_dev;\r
- dev_info(&ap->pdev->dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",\r
- phydev->drv->name, dev_name(&phydev->dev), phydev->irq);\r
-\r
- ap->suspending = 0;\r
- ap->open_flag = 1;\r
-\r
- return 0;\r
-\r
-err_free_irq:\r
- free_irq(dev->irq, dev);\r
-err_free_buffers:\r
- free_buffers(dev);\r
-err_out: \r
- //if (arm_clk && mac_parent && (arm_clk == mac_parent))\r
- // wake_unlock(&idlelock);\r
-\r
- return err;\r
-}\r
-\r
-int vmac_close(struct net_device *dev)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- unsigned int temp;\r
- struct clk *mac_clk = NULL;\r
- struct clk *arm_clk = NULL;\r
- struct clk *mac_parent = NULL;\r
- struct rk29_vmac_platform_data *pdata = ap->pdev->dev.platform_data;\r
-\r
- printk("enter func %s...\n", __func__);\r
- \r
- if (ap->suspending == 1) \r
- return 0;\r
-\r
- ap->open_flag = 0;\r
-\r
- netif_stop_queue(dev);\r
- napi_disable(&ap->napi);\r
-\r
- /* stop running transfers */\r
- temp = vmac_readl(ap, CONTROL);\r
- temp &= ~(TXRN_MASK | RXRN_MASK);\r
- vmac_writel(ap, temp, CONTROL);\r
-\r
- del_timer_sync(&ap->rx_timeout);\r
-\r
- /* disable phy */\r
- phy_stop(ap->phy_dev);\r
- vmac_mii_exit(dev);\r
- netif_carrier_off(dev);\r
-\r
- /* disable interrupts */\r
- vmac_writel(ap, 0, ENABLE);\r
- free_irq(dev->irq, dev);\r
-\r
- /* turn off vmac */\r
- vmac_writel(ap, 0, CONTROL);\r
- /* vmac_reset_hw(vmac) */\r
-\r
- ap->shutdown = 1;\r
- wmb();\r
-\r
- free_buffers(dev);\r
-\r
- //phy power off\r
- if (pdata && pdata->rmii_power_control)\r
- pdata->rmii_power_control(0);\r
-\r
- //clock close\r
- /*mac_clk = clk_get(NULL, "mac_ref_div");\r
- if (IS_ERR(mac_clk))\r
- mac_clk = NULL;\r
- if (mac_clk) {\r
- mac_parent = clk_get_parent(mac_clk);\r
- if (IS_ERR(mac_parent))\r
- mac_parent = NULL;\r
- }\r
- arm_clk = clk_get(NULL, "arm_pll");\r
- if (IS_ERR(arm_clk))\r
- arm_clk = NULL;\r
-\r
- if (arm_clk && mac_parent && (arm_clk == mac_parent))\r
- wake_unlock(&idlelock);*/\r
- \r
- clk_disable(clk_get(&ap->pdev->dev,"clk_mac"));\r
- //clk_disable(clk_get(NULL,"mii_tx"));\r
- //clk_disable(clk_get(NULL,"hclk_mac"));\r
- //clk_disable(clk_get(NULL,"clk_mac_pll"));\r
-\r
- return 0;\r
-}\r
-\r
-int vmac_shutdown(struct net_device *dev)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- unsigned int temp;\r
- \r
- printk("enter func %s...\n", __func__);\r
-\r
- netif_stop_queue(dev);\r
- napi_disable(&ap->napi);\r
-\r
- /* stop running transfers */\r
- temp = vmac_readl(ap, CONTROL);\r
- temp &= ~(TXRN_MASK | RXRN_MASK);\r
- vmac_writel(ap, temp, CONTROL);\r
-\r
- del_timer_sync(&ap->rx_timeout);\r
-\r
- /* disable phy */\r
- phy_stop(ap->phy_dev);\r
- vmac_mii_exit(dev);\r
- netif_carrier_off(dev);\r
-\r
- /* disable interrupts */\r
- vmac_writel(ap, 0, ENABLE);\r
- free_irq(dev->irq, dev);\r
-\r
- /* turn off vmac */\r
- vmac_writel(ap, 0, CONTROL);\r
- /* vmac_reset_hw(vmac) */\r
-\r
- ap->shutdown = 1;\r
- wmb();\r
-\r
- free_buffers(dev);\r
-\r
- return 0;\r
-}\r
-\r
-void vmac_update_stats(struct vmac_priv *ap)\r
-{\r
- struct net_device_stats *_stats = &ap->stats;\r
- unsigned long miss, rxerr;\r
- unsigned long rxfram, rxcrc, rxoflow;\r
-\r
- /* compare with /proc/net/dev,\r
- * see net/core/dev.c:dev_seq_printf_stats */\r
-\r
- /* rx stats */\r
- rxerr = vmac_readl(ap, RXERR);\r
- miss = vmac_readl(ap, MISS);\r
-\r
- rxcrc = (rxerr & RXERR_CRC);\r
- rxfram = (rxerr & RXERR_FRM) >> 8;\r
- rxoflow = (rxerr & RXERR_OFLO) >> 16;\r
-\r
- _stats->rx_length_errors = 0;\r
- _stats->rx_over_errors += miss;\r
- _stats->rx_crc_errors += rxcrc;\r
- _stats->rx_frame_errors += rxfram;\r
- _stats->rx_fifo_errors += rxoflow;\r
- _stats->rx_missed_errors = 0;\r
-\r
- /* TODO check rx_dropped/rx_errors/tx_dropped/tx_errors have not\r
- * been updated elsewhere */\r
- _stats->rx_dropped = _stats->rx_over_errors +\r
- _stats->rx_fifo_errors +\r
- ap->rx_merge_error;\r
-\r
- _stats->rx_errors = _stats->rx_length_errors + _stats->rx_crc_errors +\r
- _stats->rx_frame_errors +\r
- _stats->rx_missed_errors +\r
- _stats->rx_dropped;\r
-\r
- /* tx stats */\r
- _stats->tx_dropped = 0; /* otherwise queue stopped */\r
-\r
- _stats->tx_errors = _stats->tx_aborted_errors +\r
- _stats->tx_carrier_errors +\r
- _stats->tx_fifo_errors +\r
- _stats->tx_heartbeat_errors +\r
- _stats->tx_window_errors +\r
- _stats->tx_dropped +\r
- ap->tx_timeout_error;\r
-}\r
-\r
-struct net_device_stats *vmac_stats(struct net_device *dev)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- unsigned long flags;\r
-\r
- spin_lock_irqsave(&ap->lock, flags);\r
- vmac_update_stats(ap);\r
- spin_unlock_irqrestore(&ap->lock, flags);\r
-\r
- return &ap->stats;\r
-}\r
-\r
-void vmac_tx_timeout(struct net_device *dev)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- unsigned int status;\r
- unsigned long flags;\r
-\r
- spin_lock_irqsave(&ap->lock, flags);\r
-\r
- /* queue did not progress for timeo jiffies */\r
- WARN_ON(!netif_queue_stopped(dev));\r
- WARN_ON(!fifo_full(&ap->tx_ring));\r
-\r
- /* TX IRQ lost? */\r
- status = vmac_readl(ap, STAT);\r
- if (status & TXINT_MASK) {\r
- dev_err(&ap->pdev->dev, "lost tx interrupt, IRQ mask %x\n",\r
- vmac_readl(ap, ENABLE));\r
- vmac_writel(ap, TXINT_MASK, STAT);\r
- }\r
-\r
- /* TODO RX/MDIO/ERR as well? */\r
-\r
- vmac_tx_reclaim(dev, 0);\r
- if (fifo_full(&ap->tx_ring))\r
- dev_err(&ap->pdev->dev, "DMA state machine not active\n");\r
-\r
- /* We can accept TX packets again */\r
- ap->tx_timeout_error++;\r
- dev->trans_start = jiffies;\r
- netif_wake_queue(dev);\r
-\r
- spin_unlock_irqrestore(&ap->lock, flags);\r
-}\r
-\r
-static void create_multicast_filter(struct net_device *dev,\r
- unsigned long *bitmask)\r
-{\r
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34))\r
- struct netdev_hw_addr *ha;\r
- unsigned long crc;\r
- char *addrs;\r
- struct netdev_hw_addr_list *list = &dev->dev_addrs;\r
- \r
- //printk("-----------------func %s-------------------\n", __func__);\r
-\r
- WARN_ON(dev->mc_count == 0);\r
- WARN_ON(dev->flags & IFF_ALLMULTI);\r
-\r
- bitmask[0] = bitmask[1] = 0;\r
-\r
- list_for_each_entry(ha, &list->list, list) {\r
- addrs = ha->addr;\r
-\r
- /* skip non-multicast addresses */\r
- if (!(*addrs & 1))\r
- continue;\r
-\r
- crc = ether_crc_le(ETH_ALEN, addrs);\r
- set_bit(crc >> 26, bitmask);\r
- \r
- }\r
-#else\r
- struct netdev_hw_addr *ha;\r
- unsigned long crc;\r
- char *addrs;\r
-\r
- WARN_ON(netdev_mc_count(dev) == 0);\r
- WARN_ON(dev->flags & IFF_ALLMULTI);\r
-\r
- bitmask[0] = bitmask[1] = 0;\r
-\r
- netdev_for_each_mc_addr(ha, dev) {\r
- addrs = ha->addr;\r
-\r
- /* skip non-multicast addresses */\r
- if (!(*addrs & 1))\r
- continue;\r
-\r
- crc = ether_crc_le(ETH_ALEN, addrs);\r
- set_bit(crc >> 26, bitmask);\r
- }\r
-#endif\r
-}\r
-static void vmac_set_multicast_list(struct net_device *dev)\r
-{\r
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34))\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- unsigned long flags, bitmask[2];\r
- int promisc, reg;\r
-\r
- //printk("-----------------func %s-------------------\n", __func__);\r
-\r
- spin_lock_irqsave(&ap->lock, flags);\r
-\r
- promisc = !!(dev->flags & IFF_PROMISC);\r
- reg = vmac_readl(ap, CONTROL);\r
- if (promisc != !!(reg & PROM_MASK)) {\r
- reg ^= PROM_MASK;\r
- vmac_writel(ap, reg, CONTROL);\r
- }\r
-\r
- if (dev->flags & IFF_ALLMULTI)\r
- memset(bitmask, 1, sizeof(bitmask));\r
- else if (dev->mc_count == 0)\r
- memset(bitmask, 0, sizeof(bitmask));\r
- else\r
- create_multicast_filter(dev, bitmask);\r
-\r
- vmac_writel(ap, bitmask[0], LAFL);\r
- vmac_writel(ap, bitmask[1], LAFH);\r
-\r
- spin_unlock_irqrestore(&ap->lock, flags);\r
-#else\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- unsigned long flags, bitmask[2];\r
- int promisc, reg;\r
-\r
- spin_lock_irqsave(&ap->lock, flags);\r
-\r
- promisc = !!(dev->flags & IFF_PROMISC);\r
- reg = vmac_readl(ap, CONTROL);\r
- if (promisc != !!(reg & PROM_MASK)) {\r
- reg ^= PROM_MASK;\r
- vmac_writel(ap, reg, CONTROL);\r
- }\r
-\r
- if (dev->flags & IFF_ALLMULTI)\r
- memset(bitmask, 1, sizeof(bitmask));\r
- else if (netdev_mc_count(dev) == 0)\r
- memset(bitmask, 0, sizeof(bitmask));\r
- else\r
- create_multicast_filter(dev, bitmask);\r
-\r
- vmac_writel(ap, bitmask[0], LAFL);\r
- vmac_writel(ap, bitmask[1], LAFH);\r
-\r
- spin_unlock_irqrestore(&ap->lock, flags);\r
-#endif\r
-}\r
-\r
-static struct ethtool_ops vmac_ethtool_ops = {\r
- .get_settings = vmacether_get_settings,\r
- .set_settings = vmacether_set_settings,\r
- .get_drvinfo = vmacether_get_drvinfo,\r
- .get_link = ethtool_op_get_link,\r
-};\r
-\r
-static const struct net_device_ops vmac_netdev_ops = {\r
- .ndo_open = vmac_open,\r
- .ndo_stop = vmac_close,\r
- .ndo_get_stats = vmac_stats,\r
- .ndo_start_xmit = vmac_start_xmit,\r
- .ndo_do_ioctl = vmac_ioctl,\r
- .ndo_set_mac_address = eth_mac_addr,\r
- .ndo_tx_timeout = vmac_tx_timeout,\r
- //.ndo_set_multicast_list = vmac_set_multicast_list,\r
- .ndo_validate_addr = eth_validate_addr,\r
- .ndo_change_mtu = eth_change_mtu,\r
-};\r
-\r
-static int vmac_probe(struct platform_device *pdev)\r
-{\r
- struct net_device *dev;\r
- struct vmac_priv *ap;\r
- struct resource *res;\r
- unsigned int mem_base, mem_size, irq;\r
- int err;\r
- struct rk29_vmac_platform_data *pdata;\r
- struct device_node *np = pdev->dev.of_node;\r
- \r
- printk("vmac_probe.\n");\r
- dev_dbg(&pdev->dev, "vmac_probe 1.\n");\r
- \r
- pdev->dev.platform_data = &board_vmac_data;\r
- pdata = pdev->dev.platform_data;\r
-\r
- dev = alloc_etherdev(sizeof(*ap));\r
- if (!dev) {\r
- dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");\r
- return -ENOMEM;\r
- }\r
-\r
- ap = netdev_priv(dev);\r
-\r
- err = -ENODEV;\r
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);\r
- if (!res) {\r
- dev_err(&pdev->dev, "no mmio resource defined\n");\r
- goto err_out;\r
- }\r
- mem_base = res->start;\r
- mem_size = resource_size(res);\r
- irq = platform_get_irq(pdev, 0);\r
-\r
- /*err = -EBUSY;\r
- if (!devm_request_mem_region(&pdev->dev, mem_base, mem_size, VMAC_NAME)) {\r
- dev_err(&pdev->dev, "no memory region available\n");\r
- goto err_out;\r
- }*/\r
-\r
- err = -ENOMEM;\r
- ap->regs = devm_ioremap_resource(&pdev->dev, res);\r
- if (!ap->regs) {\r
- dev_err(&pdev->dev, "failed to map registers, aborting.\n");\r
- goto err_out_release_mem;\r
- }\r
- \r
- printk("mem_base = 0x%08x, mem_size = 0x%08x, irq = %d, regs = 0x%08x\n", \r
- mem_base, mem_size, irq, ap->regs);\r
-\r
- /* no checksum support, hence no scatter/gather */\r
- dev->features |= NETIF_F_HIGHDMA;\r
-\r
- spin_lock_init(&ap->lock);\r
-\r
- SET_NETDEV_DEV(dev, &pdev->dev);\r
- ap->dev = dev;\r
- ap->pdev = pdev;\r
-\r
- /* init rx timeout (used for oom) */\r
- init_timer(&ap->rx_timeout);\r
- ap->rx_timeout.function = vmac_refill_rx_timer;\r
- ap->rx_timeout.data = (unsigned long)dev;\r
-\r
- netif_napi_add(dev, &ap->napi, vmac_poll, 2);\r
- dev->netdev_ops = &vmac_netdev_ops;\r
- dev->ethtool_ops = &vmac_ethtool_ops;\r
- dev->irq = irq;\r
-\r
- dev->flags |= IFF_MULTICAST;////////////////////\r
-\r
- dev->base_addr = (unsigned long)ap->regs;\r
- ap->mem_base = mem_base;\r
-\r
- /* prevent buffer chaining, favor speed over space */\r
- ap->rx_skb_size = ETH_FRAME_LEN + VMAC_BUFFER_PAD;\r
-\r
- /* private struct functional */\r
-\r
- /* mac address intialize, set vmac_open */\r
- read_mac_reg(dev, dev->dev_addr);\r
-\r
- if (!is_valid_ether_addr(dev->dev_addr))\r
- random_ether_addr(dev->dev_addr);\r
-\r
- err = register_netdev(dev);\r
- if (err) {\r
- dev_err(&pdev->dev, "Cannot register net device, aborting.\n");\r
- goto err_out_iounmap;\r
- }\r
-\r
- dev_info(&pdev->dev, "ARC VMAC at 0x%08x irq %d %pM\n", mem_base,\r
- dev->irq, dev->dev_addr);\r
- platform_set_drvdata(pdev, dev);\r
-\r
- ap->suspending = 0;\r
- ap->open_flag = 0;\r
- //wake_lock_init(&idlelock, WAKE_LOCK_IDLE, "vmac");\r
- wake_lock_init(&ap->resume_lock, WAKE_LOCK_SUSPEND, "vmac_resume");\r
-\r
- //config rk29 vmac as rmii, 100MHz \r
- if (pdata && pdata->vmac_register_set)\r
- pdata->vmac_register_set();\r
-\r
- //power gpio init, phy power off default for power reduce\r
- if (pdata && pdata->rmii_io_init)\r
- pdata->rmii_io_init();\r
-\r
- return 0;\r
-\r
-err_out_iounmap:\r
- iounmap(ap->regs);\r
-err_out_release_mem:\r
- release_mem_region(mem_base, mem_size);\r
-err_out:\r
- free_netdev(dev);\r
- return err;\r
-}\r
-\r
-static int vmac_remove(struct platform_device *pdev)\r
-{\r
- struct net_device *dev;\r
- struct vmac_priv *ap;\r
- struct resource *res;\r
- struct rk29_vmac_platform_data *pdata = pdev->dev.platform_data;\r
-\r
- //wake_lock_destroy(&idlelock);\r
-\r
- //power gpio deinit, phy power off\r
- if (pdata && pdata->rmii_io_deinit)\r
- pdata->rmii_io_deinit();\r
-\r
- dev = platform_get_drvdata(pdev);\r
- if (!dev) {\r
- dev_err(&pdev->dev, "%s no valid dev found\n", __func__);\r
- return 0;\r
- }\r
-\r
- ap = netdev_priv(dev);\r
-\r
- /* MAC */\r
- unregister_netdev(dev);\r
- iounmap(ap->regs);\r
-\r
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);\r
- release_mem_region(res->start, resource_size(res));\r
-\r
- platform_set_drvdata(pdev, NULL);\r
- free_netdev(dev);\r
- return 0;\r
-}\r
-\r
-static void rk29_vmac_power_off(struct net_device *dev)\r
-{\r
- struct vmac_priv *ap = netdev_priv(dev);\r
- struct rk29_vmac_platform_data *pdata = ap->pdev->dev.platform_data;\r
-\r
- printk("enter func %s...\n", __func__);\r
-\r
- //phy power off\r
- if (pdata && pdata->rmii_power_control)\r
- pdata->rmii_power_control(0);\r
-\r
- //clock close\r
- clk_disable(clk_get(&ap->pdev->dev,"clk_mac"));\r
- //clk_disable(clk_get(NULL,"mii_tx"));\r
- //clk_disable(clk_get(NULL,"hclk_mac"));\r
- //clk_disable(clk_get(NULL,"clk_mac_pll"));\r
-\r
-}\r
-\r
-static int\r
-rk29_vmac_suspend(struct device *dev)\r
-{\r
- struct platform_device *pdev = to_platform_device(dev);\r
- struct net_device *ndev = platform_get_drvdata(pdev);\r
- struct vmac_priv *ap = netdev_priv(ndev);\r
- \r
- if (ndev) {\r
- if (ap->open_flag == 1) {\r
- netif_stop_queue(ndev);\r
- netif_device_detach(ndev);\r
- if (ap->suspending == 0) {\r
-//$_rbox_$_modify_$_chenzhi: for ethernet sleep\r
-#if 0\r
- vmac_shutdown(ndev);\r
- rk29_vmac_power_off(ndev);\r
-#endif\r
- ap->suspending = 1;\r
- }\r
- }\r
- }\r
- return 0;\r
-}\r
-\r
-static int\r
-rk29_vmac_resume(struct device *dev)\r
-{\r
- struct platform_device *pdev = to_platform_device(dev);\r
- struct net_device *ndev = platform_get_drvdata(pdev);\r
- struct vmac_priv *ap = netdev_priv(ndev);\r
- \r
- if (ndev) {\r
- if (ap->open_flag == 1) {\r
- netif_device_attach(ndev);\r
- netif_start_queue(ndev);\r
-//$_rbox_$_modify_$_chenzhi: \r
-//$_rbox_$_modify_$_begin\r
- if (ap->suspending == 1) {\r
- ap->suspending = 0;\r
- }\r
-//$_rbox_$_modify_$_end\r
- }\r
- }\r
- return 0;\r
-}\r
-\r
-static struct dev_pm_ops rk29_vmac_pm_ops = {\r
- .suspend = rk29_vmac_suspend,\r
- .resume = rk29_vmac_resume,\r
-};\r
-\r
-static const struct of_device_id rockchip_vmac_of_match[] = {\r
- { .compatible = "rockchip,vmac", .data = NULL, },\r
- {},\r
-};\r
-MODULE_DEVICE_TABLE(of, rockchip_vmac_of_match);\r
-\r
-static struct platform_driver rockchip_vmac_driver = {\r
- .probe = vmac_probe,\r
- .remove = vmac_remove,\r
- .driver = {\r
- .owner = THIS_MODULE,\r
- .name = "rockchip,vmac",\r
- .pm = &rk29_vmac_pm_ops,\r
- .of_match_table = of_match_ptr(rockchip_vmac_of_match),\r
- },\r
-};\r
-\r
-static int __init vmac_init(void)\r
-{\r
- printk("vmac_init.\n");\r
- return platform_driver_register(&rockchip_vmac_driver);\r
-}\r
-\r
-static void __exit vmac_exit(void)\r
-{\r
- platform_driver_unregister(&rockchip_vmac_driver);\r
-}\r
-\r
-module_init(vmac_init);\r
-module_exit(vmac_exit);\r
-\r
-MODULE_LICENSE("GPL");\r
-MODULE_DESCRIPTION("RK29 VMAC Ethernet driver");\r
-MODULE_AUTHOR("amit.bhor@celunite.com, sameer.dhavale@celunite.com, andreas.fenkart@streamunlimited.com");\r
+++ /dev/null
-/*\r
- * linux/arch/arc/drivers/arcvmac.h\r
- *\r
- * Copyright (C) 2003-2006 Codito Technologies, for linux-2.4 port\r
- * Copyright (C) 2006-2007 Celunite Inc, for linux-2.6 port\r
- * Copyright (C) 2007-2008 Sagem Communications, Fehmi HAFSI\r
- * Copyright (C) 2009 Sagem Communications, Andreas Fenkart\r
- * All Rights Reserved.\r
- *\r
- This program is free software; you can redistribute it and/or modify\r
- it under the terms of the GNU General Public License as published by\r
- the Free Software Foundation; either version 2 of the License, or\r
- (at your option) any later version.\r
-\r
- This program is distributed in the hope that it will be useful,\r
- but WITHOUT ANY WARRANTY; without even the implied warranty of\r
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
- GNU General Public License for more details.\r
-\r
- You should have received a copy of the GNU General Public License\r
- along with this program; if not, write to the Free Software\r
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\r
-\r
- * Authors: amit.bhor@celunite.com, sameer.dhavale@celunite.com\r
- */\r
-\r
-#ifndef _ARCVMAC_H\r
-#define _ARCVMAC_H\r
-\r
-#define VMAC_NAME "rockchip,vmac"\r
-#define VMAC_VERSION "1.0"\r
-\r
-/* Buffer descriptors */\r
-#ifdef CONFIG_ARCH_RK29\r
-#define TX_BDT_LEN 16 /* Number of receive BD's */\r
-#else\r
-#define TX_BDT_LEN 255 /* Number of receive BD's */\r
-#endif\r
-#define RX_BDT_LEN 255 /* Number of transmit BD's */\r
-\r
-/* BD poll rate, in 1024 cycles. @100Mhz: x * 1024 cy * 10ns = 1ms */\r
-#define POLLRATE_TIME 200\r
-\r
-/* next power of two, bigger than ETH_FRAME_LEN + VLAN */\r
-#define MAX_RX_BUFFER_LEN 0x800 /* 2^11 = 2048 = 0x800 */\r
-#define MAX_TX_BUFFER_LEN 0x800 /* 2^11 = 2048 = 0x800 */\r
-\r
-/* 14 bytes of ethernet header, 4 bytes VLAN, FCS,\r
- * plus extra pad to prevent buffer chaining of\r
- * maximum sized ethernet packets (1514 bytes) */\r
-#define VMAC_BUFFER_PAD (ETH_HLEN + 4 + ETH_FCS_LEN + 4)\r
-\r
-/* VMAC register definitions, offsets in the ref manual are in bytes */\r
-#define ID_OFFSET (0x00/0x4)\r
-#define STAT_OFFSET (0x04/0x4)\r
-#define ENABLE_OFFSET (0x08/0x4)\r
-#define CONTROL_OFFSET (0x0c/0x4)\r
-#define POLLRATE_OFFSET (0x10/0x4)\r
-#define RXERR_OFFSET (0x14/0x4)\r
-#define MISS_OFFSET (0x18/0x4)\r
-#define TXRINGPTR_OFFSET (0x1c/0x4)\r
-#define RXRINGPTR_OFFSET (0x20/0x4)\r
-#define ADDRL_OFFSET (0x24/0x4)\r
-#define ADDRH_OFFSET (0x28/0x4)\r
-#define LAFL_OFFSET (0x2c/0x4)\r
-#define LAFH_OFFSET (0x30/0x4)\r
-#define MDIO_DATA_OFFSET (0x34/0x4)\r
-#define MAC_TXRING_HEAD_OFFSET (0x38/0x4)\r
-#define MAC_RXRING_HEAD_OFFSET (0x3C/0x4)\r
-\r
-/* STATUS and ENABLE register bit masks */\r
-#define TXINT_MASK (1<<0) /* Transmit interrupt */\r
-#define RXINT_MASK (1<<1) /* Receive interrupt */\r
-#define ERR_MASK (1<<2) /* Error interrupt */\r
-#define TXCH_MASK (1<<3) /* Transmit chaining error interrupt */\r
-#define MSER_MASK (1<<4) /* Missed packet counter error */\r
-#define RXCR_MASK (1<<8) /* RXCRCERR counter rolled over */\r
-#define RXFR_MASK (1<<9) /* RXFRAMEERR counter rolled over */\r
-#define RXFL_MASK (1<<10) /* RXOFLOWERR counter rolled over */\r
-#define MDIO_MASK (1<<12) /* MDIO complete */\r
-#define TXPL_MASK (1<<31) /* TXPOLL */\r
-\r
-/* CONTROL register bitmasks */\r
-#define EN_MASK (1<<0) /* VMAC enable */\r
-#define TXRN_MASK (1<<3) /* TX enable */\r
-#define RXRN_MASK (1<<4) /* RX enable */\r
-#define DSBC_MASK (1<<8) /* Disable receive broadcast */\r
-#define ENFL_MASK (1<<10) /* Enable Full Duplex */ ///////\r
-#define PROM_MASK (1<<11) /* Promiscuous mode */\r
-\r
-/* RXERR register bitmasks */\r
-#define RXERR_CRC 0x000000ff\r
-#define RXERR_FRM 0x0000ff00\r
-#define RXERR_OFLO 0x00ff0000 /* fifo overflow */\r
-\r
-/* MDIO data register bit masks */\r
-#define MDIO_SFD 0xC0000000\r
-#define MDIO_OP 0x30000000\r
-#define MDIO_ID_MASK 0x0F800000\r
-#define MDIO_REG_MASK 0x007C0000\r
-#define MDIO_TA 0x00030000\r
-#define MDIO_DATA_MASK 0x0000FFFF\r
-\r
-#define MDIO_BASE 0x40020000\r
-#define MDIO_OP_READ 0x20000000\r
-#define MDIO_OP_WRITE 0x10000000\r
-\r
-/* Buffer descriptor INFO bit masks */\r
-#define OWN_MASK (1<<31) /* ownership of buffer, 0 CPU, 1 DMA */\r
-#define BUFF (1<<30) /* buffer invalid, rx */\r
-#define UFLO (1<<29) /* underflow, tx */\r
-#define LTCL (1<<28) /* late collision, tx */\r
-#define RETRY_CT (0xf<<24) /* tx */\r
-#define DROP (1<<23) /* drop, more than 16 retries, tx */\r
-#define DEFER (1<<22) /* traffic on the wire, tx */\r
-#define CARLOSS (1<<21) /* carrier loss while transmission, tx, rx? */\r
-/* 20:19 reserved */\r
-#define ADCR (1<<18) /* add crc, ignored if not disaddcrc */\r
-#define LAST_MASK (1<<17) /* Last buffer in chain */\r
-#define FRST_MASK (1<<16) /* First buffer in chain */\r
-/* 15:11 reserved */\r
-#define LEN_MASK 0x000007FF\r
-\r
-#define ERR_MSK_TX 0x3fe00000 /* UFLO | LTCL | RTRY | DROP | DEFER | CRLS */\r
-\r
-\r
-/* arcvmac private data structures */\r
-struct vmac_buffer_desc {\r
- unsigned int info;\r
- dma_addr_t data;\r
-};\r
-\r
-struct dma_fifo {\r
- int head; /* head */\r
- int tail; /* tail */\r
- int size;\r
-};\r
-\r
-struct vmac_priv {\r
- struct net_device *dev;\r
- struct platform_device *pdev;\r
- struct net_device_stats stats;\r
-\r
- spinlock_t lock; /* TODO revisit */\r
- struct completion mdio_complete;\r
-\r
- /* base address of register set */\r
- int *regs;\r
- unsigned int mem_base;\r
-\r
- /* DMA ring buffers */\r
- struct vmac_buffer_desc *rxbd;\r
- dma_addr_t rxbd_dma;\r
-\r
- struct vmac_buffer_desc *txbd;\r
- dma_addr_t txbd_dma;\r
-\r
- /* socket buffers */\r
- struct sk_buff *rx_skbuff[RX_BDT_LEN];\r
- struct sk_buff *tx_skbuff[TX_BDT_LEN];\r
- int rx_skb_size;\r
-\r
- /* skb / dma desc managing */\r
- struct dma_fifo rx_ring;\r
- struct dma_fifo tx_ring;\r
-\r
- /* descriptor last polled/processed by the VMAC */\r
- unsigned long mac_rxring_head;\r
- /* used when rx skb allocation failed, so we defer rx queue\r
- * refill */\r
- struct timer_list rx_timeout;\r
-\r
- /* lock rx_timeout against rx normal operation */\r
- spinlock_t rx_lock;\r
-\r
- struct napi_struct napi;\r
-\r
- /* rx buffer chaining */\r
- int rx_merge_error;\r
- int tx_timeout_error;\r
-\r
- /* PHY stuff */\r
- struct mii_bus *mii_bus;\r
- struct phy_device *phy_dev;\r
-\r
- int link;\r
- int speed;\r
- int duplex;\r
-\r
- int open_flag;\r
- int suspending;\r
- struct wake_lock resume_lock;\r
-\r
- /* debug */\r
- int shutdown;\r
-};\r
-\r
-/* DMA ring management */\r
-\r
-/* for a fifo with size n,\r
- * - [0..n] fill levels are n + 1 states\r
- * - there are only n different deltas (head - tail) values\r
- * => not all fill levels can be represented with head, tail\r
- * pointers only\r
- * we give up the n fill level, aka fifo full */\r
-\r
-/* sacrifice one elt as a sentinel */\r
-static inline int fifo_used(struct dma_fifo *f);\r
-static inline int fifo_inc_ct(int ct, int size);\r
-static inline void fifo_dump(struct dma_fifo *fifo);\r
-\r
-static inline int fifo_empty(struct dma_fifo *f)\r
-{\r
- return (f->head == f->tail);\r
-}\r
-\r
-static inline int fifo_free(struct dma_fifo *f)\r
-{\r
- int free;\r
-\r
- free = f->tail - f->head;\r
- if (free <= 0)\r
- free += f->size;\r
-\r
- return free;\r
-}\r
-\r
-static inline int fifo_used(struct dma_fifo *f)\r
-{\r
- int used;\r
-\r
- used = f->head - f->tail;\r
- if (used < 0)\r
- used += f->size;\r
-\r
- return used;\r
-}\r
-\r
-static inline int fifo_full(struct dma_fifo *f)\r
-{\r
- return (fifo_used(f) + 1) == f->size;\r
-}\r
-\r
-/* manipulate */\r
-static inline void fifo_init(struct dma_fifo *fifo, int size)\r
-{\r
- fifo->size = size;\r
- fifo->head = fifo->tail = 0; /* empty */\r
-}\r
-\r
-static inline void fifo_inc_head(struct dma_fifo *fifo)\r
-{\r
- BUG_ON(fifo_full(fifo));\r
- fifo->head = fifo_inc_ct(fifo->head, fifo->size);\r
-}\r
-\r
-static inline void fifo_inc_tail(struct dma_fifo *fifo)\r
-{\r
- BUG_ON(fifo_empty(fifo));\r
- fifo->tail = fifo_inc_ct(fifo->tail, fifo->size);\r
-}\r
-\r
-/* internal funcs */\r
-static inline void fifo_dump(struct dma_fifo *fifo)\r
-{\r
- printk(KERN_INFO "fifo: head %d, tail %d, size %d\n", fifo->head,\r
- fifo->tail,\r
- fifo->size);\r
-}\r
-\r
-static inline int fifo_inc_ct(int ct, int size)\r
-{\r
- return (++ct == size) ? 0 : ct;\r
-}\r
-\r
-/*vmac*/\r
-struct rk29_vmac_platform_data {\r
- int (*vmac_register_set)(void);\r
- int (*rmii_io_init)(void);\r
- int (*rmii_io_deinit)(void);\r
- int (*rmii_power_control)(int enable);\r
- int(*rmii_speed_switch)(int speed);\r
-};\r
-\r
-extern struct rk29_vmac_platform_data board_vmac_data;\r
-\r
-#endif /* _ARCVMAC_H */\r
+++ /dev/null
-
-#include <linux/clk.h>
-#include <linux/crc32.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/netdevice.h>
-#include <linux/phy.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/wakelock.h>
-#include <linux/version.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <asm/irq.h>
-#include <linux/interrupt.h>
-#include <linux/completion.h>
-
-#include <linux/rockchip/iomap.h>
-#include <linux/rockchip/grf.h>
-
-#include "rk29_vmac.h"
-
-struct vmac_phy_data {
- int power_io;
- int power_io_enable;
-};
-struct vmac_phy_data g_vmac_phy_data;
-
-#define grf_readl(offset) readl_relaxed(RK_GRF_VIRT + offset)
-#define grf_writel(v, offset) do { writel_relaxed(v, RK_GRF_VIRT + offset); dsb(); } while (0)
-
-static int rk30_vmac_register_set(void)
-{
- //config rk30 vmac as rmii
- writel_relaxed(0x3 << 16 | 0x2, RK_GRF_VIRT + RK3188_GRF_SOC_CON1);
- return 0;
-}
-
-static int rk30_rmii_io_init(void)
-{
- printk("enter %s \n",__func__);
-
- //rk3188 gpio3 and sdio drive strength ,
- grf_writel((0x0f<<16)|0x0f, RK3188_GRF_IO_CON3);
-
- return 0;
-}
-
-static int rk30_rmii_io_deinit(void)
-{
- //phy power down
- printk("enter %s \n",__func__);
- return 0;
-}
-
-static int rk30_rmii_power_control(int enable)
-{
- struct vmac_phy_data *pdata = &g_vmac_phy_data;
-
- printk("enter %s ,enable = %d \n",__func__,enable);
- if (enable) {
- if (gpio_is_valid(pdata->power_io)) {
- gpio_direction_output(pdata->power_io, pdata->power_io_enable);
- gpio_set_value(pdata->power_io, pdata->power_io_enable);
- }
- }else {
- if (gpio_is_valid(pdata->power_io)) {
- gpio_direction_output(pdata->power_io, !pdata->power_io_enable);
- gpio_set_value(pdata->power_io, !pdata->power_io_enable);
- }
- }
- return 0;
-}
-
-#define BIT_EMAC_SPEED_100M (1 << 1)
-#define BIT_EMAC_SPEED_10M (0 << 1)
-static int rk29_vmac_speed_switch(int speed)
-{
- //printk("%s: speed = %d\n", __func__, speed);
- if (10 == speed) {
- writel_relaxed((2<<16)|BIT_EMAC_SPEED_10M, RK_GRF_VIRT + RK3188_GRF_SOC_CON1);
- } else {
- writel_relaxed((2<<16)|BIT_EMAC_SPEED_100M, RK_GRF_VIRT + RK3188_GRF_SOC_CON1);
- }
- return 0;
-}
-
-struct rk29_vmac_platform_data board_vmac_data = {
- .vmac_register_set = rk30_vmac_register_set,
- .rmii_io_init = rk30_rmii_io_init,
- .rmii_io_deinit = rk30_rmii_io_deinit,
- .rmii_power_control = rk30_rmii_power_control,
- .rmii_speed_switch = rk29_vmac_speed_switch,
-};
-
-static int vmac_phy_probe(struct platform_device *pdev)
-{
- struct vmac_phy_data *pdata = pdev->dev.platform_data;
- enum of_gpio_flags flags;
- int ret = 0, err;
- struct device_node *node = pdev->dev.of_node;
-
- printk("enter %s \n",__func__);
- if (!pdata) {
- pdata = &g_vmac_phy_data;
-
- pdata->power_io = of_get_named_gpio_flags(node, "power-gpios", 0, &flags);
- if (!gpio_is_valid(pdata->power_io)) {
- printk("%s: Get power-gpios failed.\n", __func__);
- return -EINVAL;
- }
-
- if(flags & OF_GPIO_ACTIVE_LOW)
- pdata->power_io_enable = 0;
- else
- pdata->power_io_enable = 1;
- }
-
- // disable power
- /*err = gpio_request(pdata->power_io, "vmac_phy_power");
- if (err) {
- printk("%s: Request vmac phy power pin failed.\n", __func__);
- return -EINVAL;
- }*/
-
- gpio_direction_output(pdata->power_io, !pdata->power_io_enable);
- gpio_set_value(pdata->power_io, !pdata->power_io_enable);
-
- return ret;
-}
-
-static int vmac_phy_remove(struct platform_device *pdev)
-{
- struct vmac_phy_data *pdata = pdev->dev.platform_data;
-
- printk("enter %s \n",__func__);
- if (gpio_is_valid(pdata->power_io))
- gpio_free(pdata->power_io);
-
- return 0;
-}
-
-static struct of_device_id vmac_phy_of_match[] = {
- { .compatible = "vmac-phy" },
- { }
-};
-MODULE_DEVICE_TABLE(of, vmac_phy_of_match);
-
-static struct platform_driver vmac_phy_driver = {
- .driver = {
- .name = "vmac-phy",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(vmac_phy_of_match),
- },
- .probe = vmac_phy_probe,
- .remove = vmac_phy_remove,
-};
-
-module_platform_driver(vmac_phy_driver);
-
-MODULE_DESCRIPTION("VMAC PHY Power Driver");
-MODULE_LICENSE("GPL");
\ No newline at end of file
--- /dev/null
+#
+# rockchip device configuration
+#
+
+config NET_VENDOR_ROCKCHIP
+ bool "Rockchip devices"
+ default y
+ depends on HAS_IOMEM
+ ---help---
+ Rockchip devices
+
+if NET_VENDOR_ROCKCHIP
+
+source "drivers/net/ethernet/rockchip/vmac/Kconfig"
+source "drivers/net/ethernet/rockchip/gmac/Kconfig"
+
+endif # NET_VENDOR_ROCKCHIP
--- /dev/null
+#
+# Makefile for the rockchip device drivers.
+#
+
+obj-$(CONFIG_RK_VMAC_ETH) += vmac/
+obj-$(CONFIG_RK_GMAC_ETH) += gmac/
--- /dev/null
+config RK_GMAC_ETH
+ tristate "Rockchip 10/100/1000 Ethernet driver"
+ depends on HAS_IOMEM && HAS_DMA
+ select NET_CORE
+ select MII
+ select PHYLIB
+ select CRC32
+ select PTP_1588_CLOCK
+ ---help---
+ Rockchip 10/100/1000 Ethernet driver.
+
+if RK_GMAC_ETH
+
+config GMAC_DEBUG_FS
+ bool "Enable monitoring via sysFS "
+ default n
+ depends on RK_GMAC_ETH && DEBUG_FS
+ ---help---
+ The gmac entry in /sys reports DMA TX/RX rings
+ or (if supported) the HW cap register.
+
+config GMAC_DA
+ bool "GMAC DMA arbitration scheme"
+ default n
+ ---help---
+ Selecting this option, rx has priority over Tx (only for Giga
+ Ethernet device).
+ By default, the DMA arbitration scheme is based on Round-robin
+ (rx:tx priority is 1:1).
+
+endif
--- /dev/null
+obj-$(CONFIG_RK_GMAC_ETH) += stmmac.o
+stmmac-$(CONFIG_RK_GMAC_ETH) += stmmac_platform.o
+stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
+stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
+ chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
+ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
+ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y)
--- /dev/null
+/*******************************************************************************
+ Specialised functions for managing Chained mode
+
+ Copyright(C) 2011 STMicroelectronics Ltd
+
+ It defines all the functions used to handle the normal/enhanced
+ descriptors in case of the DMA is configured to work in chained or
+ in ring mode.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "stmmac.h"
+
+static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)p;
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int entry = priv->cur_tx % txsize;
+ struct dma_desc *desc = priv->dma_tx + entry;
+ unsigned int nopaged_len = skb_headlen(skb);
+ unsigned int bmax;
+ unsigned int i = 1, len;
+
+ if (priv->plat->enh_desc)
+ bmax = BUF_SIZE_8KiB;
+ else
+ bmax = BUF_SIZE_2KiB;
+
+ len = nopaged_len - bmax;
+
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ bmax, DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = desc->des2;
+ priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
+
+ while (len != 0) {
+ entry = (++priv->cur_tx) % txsize;
+ desc = priv->dma_tx + entry;
+
+ if (len > bmax) {
+ desc->des2 = dma_map_single(priv->device,
+ (skb->data + bmax * i),
+ bmax, DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = desc->des2;
+ priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
+ STMMAC_CHAIN_MODE);
+ priv->hw->desc->set_tx_owner(desc);
+ priv->tx_skbuff[entry] = NULL;
+ len -= bmax;
+ i++;
+ } else {
+ desc->des2 = dma_map_single(priv->device,
+ (skb->data + bmax * i), len,
+ DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = desc->des2;
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
+ STMMAC_CHAIN_MODE);
+ priv->hw->desc->set_tx_owner(desc);
+ priv->tx_skbuff[entry] = NULL;
+ len = 0;
+ }
+ }
+ return entry;
+}
+
+static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
+{
+ unsigned int ret = 0;
+
+ if ((enh_desc && (len > BUF_SIZE_8KiB)) ||
+ (!enh_desc && (len > BUF_SIZE_2KiB))) {
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr,
+ unsigned int size, unsigned int extend_desc)
+{
+ /*
+ * In chained mode the des3 points to the next element in the ring.
+ * The latest element has to point to the head.
+ */
+ int i;
+ dma_addr_t dma_phy = phy_addr;
+
+ if (extend_desc) {
+ struct dma_extended_desc *p = (struct dma_extended_desc *)des;
+ for (i = 0; i < (size - 1); i++) {
+ dma_phy += sizeof(struct dma_extended_desc);
+ p->basic.des3 = (unsigned int)dma_phy;
+ p++;
+ }
+ p->basic.des3 = (unsigned int)phy_addr;
+
+ } else {
+ struct dma_desc *p = (struct dma_desc *)des;
+ for (i = 0; i < (size - 1); i++) {
+ dma_phy += sizeof(struct dma_desc);
+ p->des3 = (unsigned int)dma_phy;
+ p++;
+ }
+ p->des3 = (unsigned int)phy_addr;
+ }
+}
+
+static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+
+ if (priv->hwts_rx_en && !priv->extend_desc)
+ /* NOTE: Device will overwrite des3 with timestamp value if
+ * 1588-2002 time stamping is enabled, hence reinitialize it
+ * to keep explicit chaining in the descriptor.
+ */
+ p->des3 = (unsigned int)(priv->dma_rx_phy +
+ (((priv->dirty_rx) + 1) %
+ priv->dma_rx_size) *
+ sizeof(struct dma_desc));
+}
+
+static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+
+ if (priv->hw->desc->get_tx_ls(p) && !priv->extend_desc)
+ /* NOTE: Device will overwrite des3 with timestamp value if
+ * 1588-2002 time stamping is enabled, hence reinitialize it
+ * to keep explicit chaining in the descriptor.
+ */
+ p->des3 = (unsigned int)(priv->dma_tx_phy +
+ (((priv->dirty_tx + 1) %
+ priv->dma_tx_size) *
+ sizeof(struct dma_desc)));
+}
+
+const struct stmmac_chain_mode_ops chain_mode_ops = {
+ .init = stmmac_init_dma_chain,
+ .is_jumbo_frm = stmmac_is_jumbo_frm,
+ .jumbo_frm = stmmac_jumbo_frm,
+ .refill_desc3 = stmmac_refill_desc3,
+ .clean_desc3 = stmmac_clean_desc3,
+};
--- /dev/null
+/*******************************************************************************
+ STMMAC Common Header File
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __COMMON_H__
+#define __COMMON_H__
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define STMMAC_VLAN_TAG_USED
+#include <linux/if_vlan.h>
+#endif
+
+#include "descs.h"
+#include "mmc.h"
+
+#undef CHIP_DEBUG_PRINT
+/* Turn-on extra printk debug for MAC core, dma and descriptors */
+/* #define CHIP_DEBUG_PRINT */
+
+#ifdef CHIP_DEBUG_PRINT
+#define CHIP_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define CHIP_DBG(fmt, args...) do { } while (0)
+#endif
+
+/* Synopsys Core versions */
+#define DWMAC_CORE_3_40 0x34
+#define DWMAC_CORE_3_50 0x35
+
+#undef FRAME_FILTER_DEBUG
+/* #define FRAME_FILTER_DEBUG */
+
+struct stmmac_extra_stats {
+ /* Transmit errors */
+ unsigned long tx_underflow ____cacheline_aligned;
+ unsigned long tx_carrier;
+ unsigned long tx_losscarrier;
+ unsigned long vlan_tag;
+ unsigned long tx_deferred;
+ unsigned long tx_vlan;
+ unsigned long tx_jabber;
+ unsigned long tx_frame_flushed;
+ unsigned long tx_payload_error;
+ unsigned long tx_ip_header_error;
+ /* Receive errors */
+ unsigned long rx_desc;
+ unsigned long sa_filter_fail;
+ unsigned long overflow_error;
+ unsigned long ipc_csum_error;
+ unsigned long rx_collision;
+ unsigned long rx_crc;
+ unsigned long dribbling_bit;
+ unsigned long rx_length;
+ unsigned long rx_mii;
+ unsigned long rx_multicast;
+ unsigned long rx_gmac_overflow;
+ unsigned long rx_watchdog;
+ unsigned long da_rx_filter_fail;
+ unsigned long sa_rx_filter_fail;
+ unsigned long rx_missed_cntr;
+ unsigned long rx_overflow_cntr;
+ unsigned long rx_vlan;
+ /* Tx/Rx IRQ error info */
+ unsigned long tx_undeflow_irq;
+ unsigned long tx_process_stopped_irq;
+ unsigned long tx_jabber_irq;
+ unsigned long rx_overflow_irq;
+ unsigned long rx_buf_unav_irq;
+ unsigned long rx_process_stopped_irq;
+ unsigned long rx_watchdog_irq;
+ unsigned long tx_early_irq;
+ unsigned long fatal_bus_error_irq;
+ /* Tx/Rx IRQ Events */
+ unsigned long rx_early_irq;
+ unsigned long threshold;
+ unsigned long tx_pkt_n;
+ unsigned long rx_pkt_n;
+ unsigned long normal_irq_n;
+ unsigned long rx_normal_irq_n;
+ unsigned long napi_poll;
+ unsigned long tx_normal_irq_n;
+ unsigned long tx_clean;
+ unsigned long tx_reset_ic_bit;
+ unsigned long irq_receive_pmt_irq_n;
+ /* MMC info */
+ unsigned long mmc_tx_irq_n;
+ unsigned long mmc_rx_irq_n;
+ unsigned long mmc_rx_csum_offload_irq_n;
+ /* EEE */
+ unsigned long irq_tx_path_in_lpi_mode_n;
+ unsigned long irq_tx_path_exit_lpi_mode_n;
+ unsigned long irq_rx_path_in_lpi_mode_n;
+ unsigned long irq_rx_path_exit_lpi_mode_n;
+ unsigned long phy_eee_wakeup_error_n;
+ /* Extended RDES status */
+ unsigned long ip_hdr_err;
+ unsigned long ip_payload_err;
+ unsigned long ip_csum_bypassed;
+ unsigned long ipv4_pkt_rcvd;
+ unsigned long ipv6_pkt_rcvd;
+ unsigned long rx_msg_type_ext_no_ptp;
+ unsigned long rx_msg_type_sync;
+ unsigned long rx_msg_type_follow_up;
+ unsigned long rx_msg_type_delay_req;
+ unsigned long rx_msg_type_delay_resp;
+ unsigned long rx_msg_type_pdelay_req;
+ unsigned long rx_msg_type_pdelay_resp;
+ unsigned long rx_msg_type_pdelay_follow_up;
+ unsigned long ptp_frame_type;
+ unsigned long ptp_ver;
+ unsigned long timestamp_dropped;
+ unsigned long av_pkt_rcvd;
+ unsigned long av_tagged_pkt_rcvd;
+ unsigned long vlan_tag_priority_val;
+ unsigned long l3_filter_match;
+ unsigned long l4_filter_match;
+ unsigned long l3_l4_filter_no_match;
+ /* PCS */
+ unsigned long irq_pcs_ane_n;
+ unsigned long irq_pcs_link_n;
+ unsigned long irq_rgmii_n;
+ unsigned long pcs_link;
+ unsigned long pcs_duplex;
+ unsigned long pcs_speed;
+};
+
+/* CSR Frequency Access Defines*/
+#define CSR_F_35M 35000000
+#define CSR_F_60M 60000000
+#define CSR_F_100M 100000000
+#define CSR_F_150M 150000000
+#define CSR_F_250M 250000000
+#define CSR_F_300M 300000000
+
+#define MAC_CSR_H_FRQ_MASK 0x20
+
+#define HASH_TABLE_SIZE 64
+#define PAUSE_TIME 0x200
+
+/* Flow Control defines */
+#define FLOW_OFF 0
+#define FLOW_RX 1
+#define FLOW_TX 2
+#define FLOW_AUTO (FLOW_TX | FLOW_RX)
+
+/* PCS defines */
+#define STMMAC_PCS_RGMII (1 << 0)
+#define STMMAC_PCS_SGMII (1 << 1)
+#define STMMAC_PCS_TBI (1 << 2)
+#define STMMAC_PCS_RTBI (1 << 3)
+
+#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
+
+/* DAM HW feature register fields */
+#define DMA_HW_FEAT_MIISEL 0x00000001 /* 10/100 Mbps Support */
+#define DMA_HW_FEAT_GMIISEL 0x00000002 /* 1000 Mbps Support */
+#define DMA_HW_FEAT_HDSEL 0x00000004 /* Half-Duplex Support */
+#define DMA_HW_FEAT_EXTHASHEN 0x00000008 /* Expanded DA Hash Filter */
+#define DMA_HW_FEAT_HASHSEL 0x00000010 /* HASH Filter */
+#define DMA_HW_FEAT_ADDMAC 0x00000020 /* Multiple MAC Addr Reg */
+#define DMA_HW_FEAT_PCSSEL 0x00000040 /* PCS registers */
+#define DMA_HW_FEAT_L3L4FLTREN 0x00000080 /* Layer 3 & Layer 4 Feature */
+#define DMA_HW_FEAT_SMASEL 0x00000100 /* SMA(MDIO) Interface */
+#define DMA_HW_FEAT_RWKSEL 0x00000200 /* PMT Remote Wakeup */
+#define DMA_HW_FEAT_MGKSEL 0x00000400 /* PMT Magic Packet */
+#define DMA_HW_FEAT_MMCSEL 0x00000800 /* RMON Module */
+#define DMA_HW_FEAT_TSVER1SEL 0x00001000 /* Only IEEE 1588-2002 */
+#define DMA_HW_FEAT_TSVER2SEL 0x00002000 /* IEEE 1588-2008 PTPv2 */
+#define DMA_HW_FEAT_EEESEL 0x00004000 /* Energy Efficient Ethernet */
+#define DMA_HW_FEAT_AVSEL 0x00008000 /* AV Feature */
+#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* Checksum Offload in Tx */
+#define DMA_HW_FEAT_RXTYP1COE 0x00020000 /* IP COE (Type 1) in Rx */
+#define DMA_HW_FEAT_RXTYP2COE 0x00040000 /* IP COE (Type 2) in Rx */
+#define DMA_HW_FEAT_RXFIFOSIZE 0x00080000 /* Rx FIFO > 2048 Bytes */
+#define DMA_HW_FEAT_RXCHCNT 0x00300000 /* No. additional Rx Channels */
+#define DMA_HW_FEAT_TXCHCNT 0x00c00000 /* No. additional Tx Channels */
+#define DMA_HW_FEAT_ENHDESSEL 0x01000000 /* Alternate Descriptor */
+/* Timestamping with Internal System Time */
+#define DMA_HW_FEAT_INTTSEN 0x02000000
+#define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */
+#define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN */
+#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */
+#define DEFAULT_DMA_PBL 8
+
+/* Max/Min RI Watchdog Timer count value */
+#define MAX_DMA_RIWT 0xff
+#define MIN_DMA_RIWT 0x20
+/* Tx coalesce parameters */
+#define STMMAC_COAL_TX_TIMER 40000
+#define STMMAC_MAX_COAL_TX_TICK 100000
+#define STMMAC_TX_MAX_FRAMES 256
+#define STMMAC_TX_FRAMES 64
+
+/* Rx IPC status */
+enum rx_frame_status {
+ good_frame = 0,
+ discard_frame = 1,
+ csum_none = 2,
+ llc_snap = 4,
+};
+
+enum dma_irq_status {
+ tx_hard_error = 0x1,
+ tx_hard_error_bump_tc = 0x2,
+ handle_rx = 0x4,
+ handle_tx = 0x8,
+};
+
+#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 1)
+#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 2)
+#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 3)
+#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 4)
+
+#define CORE_PCS_ANE_COMPLETE (1 << 5)
+#define CORE_PCS_LINK_STATUS (1 << 6)
+#define CORE_RGMII_IRQ (1 << 7)
+
+struct rgmii_adv {
+ unsigned int pause;
+ unsigned int duplex;
+ unsigned int lp_pause;
+ unsigned int lp_duplex;
+};
+
+#define STMMAC_PCS_PAUSE 1
+#define STMMAC_PCS_ASYM_PAUSE 2
+
+/* DMA HW capabilities */
+struct dma_features {
+ unsigned int mbps_10_100;
+ unsigned int mbps_1000;
+ unsigned int half_duplex;
+ unsigned int hash_filter;
+ unsigned int multi_addr;
+ unsigned int pcs;
+ unsigned int sma_mdio;
+ unsigned int pmt_remote_wake_up;
+ unsigned int pmt_magic_frame;
+ unsigned int rmon;
+ /* IEEE 1588-2002 */
+ unsigned int time_stamp;
+ /* IEEE 1588-2008 */
+ unsigned int atime_stamp;
+ /* 802.3az - Energy-Efficient Ethernet (EEE) */
+ unsigned int eee;
+ unsigned int av;
+ /* TX and RX csum */
+ unsigned int tx_coe;
+ unsigned int rx_coe_type1;
+ unsigned int rx_coe_type2;
+ unsigned int rxfifo_over_2048;
+ /* TX and RX number of channels */
+ unsigned int number_rx_channel;
+ unsigned int number_tx_channel;
+ /* Alternate (enhanced) DESC mode */
+ unsigned int enh_desc;
+};
+
+/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
+#define BUF_SIZE_16KiB 16384
+#define BUF_SIZE_8KiB 8192
+#define BUF_SIZE_4KiB 4096
+#define BUF_SIZE_2KiB 2048
+
+/* Power Down and WOL */
+#define PMT_NOT_SUPPORTED 0
+#define PMT_SUPPORTED 1
+
+/* Common MAC defines */
+#define MAC_CTRL_REG 0x00000000 /* MAC Control */
+#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
+#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
+
+/* Default LPI timers */
+#define STMMAC_DEFAULT_LIT_LS 0x3E8
+#define STMMAC_DEFAULT_TWT_LS 0x0
+
+#define STMMAC_CHAIN_MODE 0x1
+#define STMMAC_RING_MODE 0x2
+
+struct stmmac_desc_ops {
+ /* DMA RX descriptor ring initialization */
+ void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode,
+ int end);
+ /* DMA TX descriptor ring initialization */
+ void (*init_tx_desc) (struct dma_desc *p, int mode, int end);
+
+ /* Invoked by the xmit function to prepare the tx descriptor */
+ void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
+ int csum_flag, int mode);
+ /* Set/get the owner of the descriptor */
+ void (*set_tx_owner) (struct dma_desc *p);
+ int (*get_tx_owner) (struct dma_desc *p);
+ /* Invoked by the xmit function to close the tx descriptor */
+ void (*close_tx_desc) (struct dma_desc *p);
+ /* Clean the tx descriptor as soon as the tx irq is received */
+ void (*release_tx_desc) (struct dma_desc *p, int mode);
+ /* Clear interrupt on tx frame completion. When this bit is
+ * set an interrupt happens as soon as the frame is transmitted */
+ void (*clear_tx_ic) (struct dma_desc *p);
+ /* Last tx segment reports the transmit status */
+ int (*get_tx_ls) (struct dma_desc *p);
+ /* Return the transmit status looking at the TDES1 */
+ int (*tx_status) (void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, void __iomem *ioaddr);
+ /* Get the buffer size from the descriptor */
+ int (*get_tx_len) (struct dma_desc *p);
+ /* Handle extra events on specific interrupts hw dependent */
+ int (*get_rx_owner) (struct dma_desc *p);
+ void (*set_rx_owner) (struct dma_desc *p);
+ /* Get the receive frame size */
+ int (*get_rx_frame_len) (struct dma_desc *p, int rx_coe_type);
+ /* Return the reception status looking at the RDES1 */
+ int (*rx_status) (void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p);
+ void (*rx_extended_status) (void *data, struct stmmac_extra_stats *x,
+ struct dma_extended_desc *p);
+ /* Set tx timestamp enable bit */
+ void (*enable_tx_timestamp) (struct dma_desc *p);
+ /* get tx timestamp status */
+ int (*get_tx_timestamp_status) (struct dma_desc *p);
+ /* get timestamp value */
+ u64(*get_timestamp) (void *desc, u32 ats);
+ /* get rx timestamp status */
+ int (*get_rx_timestamp_status) (void *desc, u32 ats);
+};
+
+struct stmmac_dma_ops {
+ /* DMA core initialization */
+ int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb,
+ int burst_len, u32 dma_tx, u32 dma_rx, int atds);
+ /* Dump DMA registers */
+ void (*dump_regs) (void __iomem *ioaddr);
+ /* Set tx/rx threshold in the csr6 register
+ * An invalid value enables the store-and-forward mode */
+ void (*dma_mode) (void __iomem *ioaddr, int txmode, int rxmode);
+ /* To track extra statistic (if supported) */
+ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
+ void __iomem *ioaddr);
+ void (*enable_dma_transmission) (void __iomem *ioaddr);
+ void (*enable_dma_irq) (void __iomem *ioaddr);
+ void (*disable_dma_irq) (void __iomem *ioaddr);
+ void (*start_tx) (void __iomem *ioaddr);
+ void (*stop_tx) (void __iomem *ioaddr);
+ void (*start_rx) (void __iomem *ioaddr);
+ void (*stop_rx) (void __iomem *ioaddr);
+ int (*dma_interrupt) (void __iomem *ioaddr,
+ struct stmmac_extra_stats *x);
+ /* If supported then get the optional core features */
+ unsigned int (*get_hw_feature) (void __iomem *ioaddr);
+ /* Program the HW RX Watchdog */
+ void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt);
+};
+
+struct stmmac_ops {
+ /* MAC core initialization */
+ void (*core_init) (void __iomem *ioaddr);
+ /* Enable and verify that the IPC module is supported */
+ int (*rx_ipc) (void __iomem *ioaddr);
+ /* Dump MAC registers */
+ void (*dump_regs) (void __iomem *ioaddr);
+ /* Handle extra events on specific interrupts hw dependent */
+ int (*host_irq_status) (void __iomem *ioaddr,
+ struct stmmac_extra_stats *x);
+ /* Multicast filter setting */
+ void (*set_filter) (struct net_device *dev, int id);
+ /* Flow control setting */
+ void (*flow_ctrl) (void __iomem *ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time);
+ /* Set power management mode (e.g. magic frame) */
+ void (*pmt) (void __iomem *ioaddr, unsigned long mode);
+ /* Set/Get Unicast MAC addresses */
+ void (*set_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n);
+ void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n);
+ void (*set_eee_mode) (void __iomem *ioaddr);
+ void (*reset_eee_mode) (void __iomem *ioaddr);
+ void (*set_eee_timer) (void __iomem *ioaddr, int ls, int tw);
+ void (*set_eee_pls) (void __iomem *ioaddr, int link);
+ void (*ctrl_ane) (void __iomem *ioaddr, bool restart);
+ void (*get_adv) (void __iomem *ioaddr, struct rgmii_adv *adv);
+};
+
+struct stmmac_hwtimestamp {
+ void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
+ void (*config_sub_second_increment) (void __iomem *ioaddr);
+ int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec);
+ int (*config_addend) (void __iomem *ioaddr, u32 addend);
+ int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec,
+ int add_sub);
+ u64(*get_systime) (void __iomem *ioaddr);
+};
+
+struct mac_link {
+ int port;
+ int duplex;
+ int speed;
+};
+
+struct mii_regs {
+ unsigned int addr; /* MII Address */
+ unsigned int data; /* MII Data */
+};
+
+struct stmmac_ring_mode_ops {
+ unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
+ unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
+ void (*refill_desc3) (void *priv, struct dma_desc *p);
+ void (*init_desc3) (struct dma_desc *p);
+ void (*clean_desc3) (void *priv, struct dma_desc *p);
+ int (*set_16kib_bfsize) (int mtu);
+};
+
+struct stmmac_chain_mode_ops {
+ void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
+ unsigned int extend_desc);
+ unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
+ unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
+ void (*refill_desc3) (void *priv, struct dma_desc *p);
+ void (*clean_desc3) (void *priv, struct dma_desc *p);
+};
+
+struct mac_device_info {
+ const struct stmmac_ops *mac;
+ const struct stmmac_desc_ops *desc;
+ const struct stmmac_dma_ops *dma;
+ const struct stmmac_ring_mode_ops *ring;
+ const struct stmmac_chain_mode_ops *chain;
+ const struct stmmac_hwtimestamp *ptp;
+ struct mii_regs mii; /* MII register Addresses */
+ struct mac_link link;
+ unsigned int synopsys_uid;
+};
+
+struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
+
+extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low);
+extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low);
+
+extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+
+extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
+extern const struct stmmac_ring_mode_ops ring_mode_ops;
+extern const struct stmmac_chain_mode_ops chain_mode_ops;
+
+#endif /* __COMMON_H__ */
--- /dev/null
+/*******************************************************************************
+ Header File to describe the DMA descriptors.
+ Enhanced descriptors have been in case of DWMAC1000 Cores.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __DESCS_H__
+#define __DESCS_H__
+
+/* Basic descriptor structure for normal and alternate descriptors */
+struct dma_desc {
+ /* Receive descriptor */
+ union {
+ struct {
+ /* RDES0 */
+ u32 payload_csum_error:1;
+ u32 crc_error:1;
+ u32 dribbling:1;
+ u32 mii_error:1;
+ u32 receive_watchdog:1;
+ u32 frame_type:1;
+ u32 collision:1;
+ u32 ipc_csum_error:1;
+ u32 last_descriptor:1;
+ u32 first_descriptor:1;
+ u32 vlan_tag:1;
+ u32 overflow_error:1;
+ u32 length_error:1;
+ u32 sa_filter_fail:1;
+ u32 descriptor_error:1;
+ u32 error_summary:1;
+ u32 frame_length:14;
+ u32 da_filter_fail:1;
+ u32 own:1;
+ /* RDES1 */
+ u32 buffer1_size:11;
+ u32 buffer2_size:11;
+ u32 reserved1:2;
+ u32 second_address_chained:1;
+ u32 end_ring:1;
+ u32 reserved2:5;
+ u32 disable_ic:1;
+
+ } rx;
+ struct {
+ /* RDES0 */
+ u32 rx_mac_addr:1;
+ u32 crc_error:1;
+ u32 dribbling:1;
+ u32 error_gmii:1;
+ u32 receive_watchdog:1;
+ u32 frame_type:1;
+ u32 late_collision:1;
+ u32 ipc_csum_error:1;
+ u32 last_descriptor:1;
+ u32 first_descriptor:1;
+ u32 vlan_tag:1;
+ u32 overflow_error:1;
+ u32 length_error:1;
+ u32 sa_filter_fail:1;
+ u32 descriptor_error:1;
+ u32 error_summary:1;
+ u32 frame_length:14;
+ u32 da_filter_fail:1;
+ u32 own:1;
+ /* RDES1 */
+ u32 buffer1_size:13;
+ u32 reserved1:1;
+ u32 second_address_chained:1;
+ u32 end_ring:1;
+ u32 buffer2_size:13;
+ u32 reserved2:2;
+ u32 disable_ic:1;
+ } erx; /* -- enhanced -- */
+
+ /* Transmit descriptor */
+ struct {
+ /* TDES0 */
+ u32 deferred:1;
+ u32 underflow_error:1;
+ u32 excessive_deferral:1;
+ u32 collision_count:4;
+ u32 vlan_frame:1;
+ u32 excessive_collisions:1;
+ u32 late_collision:1;
+ u32 no_carrier:1;
+ u32 loss_carrier:1;
+ u32 payload_error:1;
+ u32 frame_flushed:1;
+ u32 jabber_timeout:1;
+ u32 error_summary:1;
+ u32 ip_header_error:1;
+ u32 time_stamp_status:1;
+ u32 reserved1:13;
+ u32 own:1;
+ /* TDES1 */
+ u32 buffer1_size:11;
+ u32 buffer2_size:11;
+ u32 time_stamp_enable:1;
+ u32 disable_padding:1;
+ u32 second_address_chained:1;
+ u32 end_ring:1;
+ u32 crc_disable:1;
+ u32 checksum_insertion:2;
+ u32 first_segment:1;
+ u32 last_segment:1;
+ u32 interrupt:1;
+ } tx;
+ struct {
+ /* TDES0 */
+ u32 deferred:1;
+ u32 underflow_error:1;
+ u32 excessive_deferral:1;
+ u32 collision_count:4;
+ u32 vlan_frame:1;
+ u32 excessive_collisions:1;
+ u32 late_collision:1;
+ u32 no_carrier:1;
+ u32 loss_carrier:1;
+ u32 payload_error:1;
+ u32 frame_flushed:1;
+ u32 jabber_timeout:1;
+ u32 error_summary:1;
+ u32 ip_header_error:1;
+ u32 time_stamp_status:1;
+ u32 reserved1:2;
+ u32 second_address_chained:1;
+ u32 end_ring:1;
+ u32 checksum_insertion:2;
+ u32 reserved2:1;
+ u32 time_stamp_enable:1;
+ u32 disable_padding:1;
+ u32 crc_disable:1;
+ u32 first_segment:1;
+ u32 last_segment:1;
+ u32 interrupt:1;
+ u32 own:1;
+ /* TDES1 */
+ u32 buffer1_size:13;
+ u32 reserved3:3;
+ u32 buffer2_size:13;
+ u32 reserved4:3;
+ } etx; /* -- enhanced -- */
+ } des01;
+ unsigned int des2;
+ unsigned int des3;
+};
+
+/* Extended descriptor structure (supported by new SYNP GMAC generations) */
+struct dma_extended_desc {
+ struct dma_desc basic;
+ union {
+ struct {
+ u32 ip_payload_type:3;
+ u32 ip_hdr_err:1;
+ u32 ip_payload_err:1;
+ u32 ip_csum_bypassed:1;
+ u32 ipv4_pkt_rcvd:1;
+ u32 ipv6_pkt_rcvd:1;
+ u32 msg_type:4;
+ u32 ptp_frame_type:1;
+ u32 ptp_ver:1;
+ u32 timestamp_dropped:1;
+ u32 reserved:1;
+ u32 av_pkt_rcvd:1;
+ u32 av_tagged_pkt_rcvd:1;
+ u32 vlan_tag_priority_val:3;
+ u32 reserved3:3;
+ u32 l3_filter_match:1;
+ u32 l4_filter_match:1;
+ u32 l3_l4_filter_no_match:2;
+ u32 reserved4:4;
+ } erx;
+ struct {
+ u32 reserved;
+ } etx;
+ } des4;
+ unsigned int des5; /* Reserved */
+ unsigned int des6; /* Tx/Rx Timestamp Low */
+ unsigned int des7; /* Tx/Rx Timestamp High */
+};
+
+/* Transmit checksum insertion control */
+enum tdes_csum_insertion {
+ cic_disabled = 0, /* Checksum Insertion Control */
+ cic_only_ip = 1, /* Only IP header */
+ /* IP header but pseudoheader is not calculated */
+ cic_no_pseudoheader = 2,
+ cic_full = 3, /* IP header and pseudoheader */
+};
+
+/* Extended RDES4 definitions */
+#define RDES_EXT_NO_PTP 0
+#define RDES_EXT_SYNC 0x1
+#define RDES_EXT_FOLLOW_UP 0x2
+#define RDES_EXT_DELAY_REQ 0x3
+#define RDES_EXT_DELAY_RESP 0x4
+#define RDES_EXT_PDELAY_REQ 0x5
+#define RDES_EXT_PDELAY_RESP 0x6
+#define RDES_EXT_PDELAY_FOLLOW_UP 0x7
+
+#endif /* __DESCS_H__ */
--- /dev/null
+/*******************************************************************************
+ Header File to describe Normal/enhanced descriptor functions used for RING
+ and CHAINED modes.
+
+ Copyright(C) 2011 STMicroelectronics Ltd
+
+ It defines all the functions used to handle the normal/enhanced
+ descriptors in case of the DMA is configured to work in chained or
+ in ring mode.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __DESC_COM_H__
+#define __DESC_COM_H__
+
+/* Specific functions used for Ring mode */
+
+/* Enhanced descriptors */
+static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
+{
+ p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
+ if (end)
+ p->des01.erx.end_ring = 1;
+}
+
+static inline void ehn_desc_tx_set_on_ring(struct dma_desc *p, int end)
+{
+ if (end)
+ p->des01.etx.end_ring = 1;
+}
+
+static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
+{
+ p->des01.etx.end_ring = ter;
+}
+
+static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
+{
+ if (unlikely(len > BUF_SIZE_4KiB)) {
+ p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
+ p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
+ } else
+ p->des01.etx.buffer1_size = len;
+}
+
+/* Normal descriptors */
+static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
+{
+ p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1;
+ if (end)
+ p->des01.rx.end_ring = 1;
+}
+
+static inline void ndesc_tx_set_on_ring(struct dma_desc *p, int end)
+{
+ if (end)
+ p->des01.tx.end_ring = 1;
+}
+
+static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
+{
+ p->des01.tx.end_ring = ter;
+}
+
+static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
+{
+ if (unlikely(len > BUF_SIZE_2KiB)) {
+ p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1;
+ p->des01.etx.buffer2_size = len - p->des01.etx.buffer1_size;
+ } else
+ p->des01.tx.buffer1_size = len;
+}
+
+/* Specific functions used for Chain mode */
+
+/* Enhanced descriptors */
+static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p, int end)
+{
+ p->des01.erx.second_address_chained = 1;
+}
+
+static inline void ehn_desc_tx_set_on_chain(struct dma_desc *p, int end)
+{
+ p->des01.etx.second_address_chained = 1;
+}
+
+static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
+{
+ p->des01.etx.second_address_chained = 1;
+}
+
+static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
+{
+ p->des01.etx.buffer1_size = len;
+}
+
+/* Normal descriptors */
+static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
+{
+ p->des01.rx.second_address_chained = 1;
+}
+
+static inline void ndesc_tx_set_on_chain(struct dma_desc *p, int ring_size)
+{
+ p->des01.tx.second_address_chained = 1;
+}
+
+static inline void ndesc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
+{
+ p->des01.tx.second_address_chained = 1;
+}
+
+static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
+{
+ p->des01.tx.buffer1_size = len;
+}
+#endif /* __DESC_COM_H__ */
--- /dev/null
+/*******************************************************************************
+ MAC 10/100 Header File
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __DWMAC100_H__
+#define __DWMAC100_H__
+
+#include <linux/phy.h>
+#include "common.h"
+
+/*----------------------------------------------------------------------------
+ * MAC BLOCK defines
+ *---------------------------------------------------------------------------*/
+/* MAC CSR offset */
+#define MAC_CONTROL 0x00000000 /* MAC Control */
+#define MAC_ADDR_HIGH 0x00000004 /* MAC Address High */
+#define MAC_ADDR_LOW 0x00000008 /* MAC Address Low */
+#define MAC_HASH_HIGH 0x0000000c /* Multicast Hash Table High */
+#define MAC_HASH_LOW 0x00000010 /* Multicast Hash Table Low */
+#define MAC_MII_ADDR 0x00000014 /* MII Address */
+#define MAC_MII_DATA 0x00000018 /* MII Data */
+#define MAC_FLOW_CTRL 0x0000001c /* Flow Control */
+#define MAC_VLAN1 0x00000020 /* VLAN1 Tag */
+#define MAC_VLAN2 0x00000024 /* VLAN2 Tag */
+
+/* MAC CTRL defines */
+#define MAC_CONTROL_RA 0x80000000 /* Receive All Mode */
+#define MAC_CONTROL_BLE 0x40000000 /* Endian Mode */
+#define MAC_CONTROL_HBD 0x10000000 /* Heartbeat Disable */
+#define MAC_CONTROL_PS 0x08000000 /* Port Select */
+#define MAC_CONTROL_DRO 0x00800000 /* Disable Receive Own */
+#define MAC_CONTROL_EXT_LOOPBACK 0x00400000 /* Reserved (ext loopback?) */
+#define MAC_CONTROL_OM 0x00200000 /* Loopback Operating Mode */
+#define MAC_CONTROL_F 0x00100000 /* Full Duplex Mode */
+#define MAC_CONTROL_PM 0x00080000 /* Pass All Multicast */
+#define MAC_CONTROL_PR 0x00040000 /* Promiscuous Mode */
+#define MAC_CONTROL_IF 0x00020000 /* Inverse Filtering */
+#define MAC_CONTROL_PB 0x00010000 /* Pass Bad Frames */
+#define MAC_CONTROL_HO 0x00008000 /* Hash Only Filtering Mode */
+#define MAC_CONTROL_HP 0x00002000 /* Hash/Perfect Filtering Mode */
+#define MAC_CONTROL_LCC 0x00001000 /* Late Collision Control */
+#define MAC_CONTROL_DBF 0x00000800 /* Disable Broadcast Frames */
+#define MAC_CONTROL_DRTY 0x00000400 /* Disable Retry */
+#define MAC_CONTROL_ASTP 0x00000100 /* Automatic Pad Stripping */
+#define MAC_CONTROL_BOLMT_10 0x00000000 /* Back Off Limit 10 */
+#define MAC_CONTROL_BOLMT_8 0x00000040 /* Back Off Limit 8 */
+#define MAC_CONTROL_BOLMT_4 0x00000080 /* Back Off Limit 4 */
+#define MAC_CONTROL_BOLMT_1 0x000000c0 /* Back Off Limit 1 */
+#define MAC_CONTROL_DC 0x00000020 /* Deferral Check */
+#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
+#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */
+
+#define MAC_CORE_INIT (MAC_CONTROL_HBD | MAC_CONTROL_ASTP)
+
+/* MAC FLOW CTRL defines */
+#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
+#define MAC_FLOW_CTRL_PT_SHIFT 16
+#define MAC_FLOW_CTRL_PASS 0x00000004 /* Pass Control Frames */
+#define MAC_FLOW_CTRL_ENABLE 0x00000002 /* Flow Control Enable */
+#define MAC_FLOW_CTRL_PAUSE 0x00000001 /* Flow Control Busy ... */
+
+/* MII ADDR defines */
+#define MAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
+#define MAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
+
+/*----------------------------------------------------------------------------
+ * DMA BLOCK defines
+ *---------------------------------------------------------------------------*/
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_DBO 0x00100000 /* Descriptor Byte Ordering */
+#define DMA_BUS_MODE_BLE 0x00000080 /* Big Endian/Little Endian */
+#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT 8
+#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
+#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
+#define DMA_BUS_MODE_DEFAULT 0x00000000
+
+/* DMA Control register defines */
+#define DMA_CONTROL_SF 0x00200000 /* Store And Forward */
+
+/* Transmit Threshold Control */
+enum ttc_control {
+ DMA_CONTROL_TTC_DEFAULT = 0x00000000, /* Threshold is 32 DWORDS */
+ DMA_CONTROL_TTC_64 = 0x00004000, /* Threshold is 64 DWORDS */
+ DMA_CONTROL_TTC_128 = 0x00008000, /* Threshold is 128 DWORDS */
+ DMA_CONTROL_TTC_256 = 0x0000c000, /* Threshold is 256 DWORDS */
+ DMA_CONTROL_TTC_18 = 0x00400000, /* Threshold is 18 DWORDS */
+ DMA_CONTROL_TTC_24 = 0x00404000, /* Threshold is 24 DWORDS */
+ DMA_CONTROL_TTC_32 = 0x00408000, /* Threshold is 32 DWORDS */
+ DMA_CONTROL_TTC_40 = 0x0040c000, /* Threshold is 40 DWORDS */
+ DMA_CONTROL_SE = 0x00000008, /* Stop On Empty */
+ DMA_CONTROL_OSF = 0x00000004, /* Operate On 2nd Frame */
+};
+
+/* STMAC110 DMA Missed Frame Counter register defines */
+#define DMA_MISSED_FRAME_OVE 0x10000000 /* FIFO Overflow Overflow */
+#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */
+#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */
+#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
+
+extern const struct stmmac_dma_ops dwmac100_dma_ops;
+
+#endif /* __DWMAC100_H__ */
--- /dev/null
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+#ifndef __DWMAC1000_H__
+#define __DWMAC1000_H__
+
+#include <linux/phy.h>
+#include "common.h"
+
+#define GMAC_CONTROL 0x00000000 /* Configuration */
+#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
+#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
+#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */
+#define GMAC_MII_ADDR 0x00000010 /* MII Address */
+#define GMAC_MII_DATA 0x00000014 /* MII Data */
+#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */
+#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */
+#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
+#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
+
+#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
+enum dwmac1000_irq_status {
+ lpiis_irq = 0x400,
+ time_stamp_irq = 0x0200,
+ mmc_rx_csum_offload_irq = 0x0080,
+ mmc_tx_irq = 0x0040,
+ mmc_rx_irq = 0x0020,
+ mmc_irq = 0x0010,
+ pmt_irq = 0x0008,
+ pcs_ane_irq = 0x0004,
+ pcs_link_irq = 0x0002,
+ rgmii_irq = 0x0001,
+};
+#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */
+
+/* PMT Control and Status */
+#define GMAC_PMT 0x0000002c
+enum power_event {
+ pointer_reset = 0x80000000,
+ global_unicast = 0x00000200,
+ wake_up_rx_frame = 0x00000040,
+ magic_frame = 0x00000020,
+ wake_up_frame_en = 0x00000004,
+ magic_pkt_en = 0x00000002,
+ power_down = 0x00000001,
+};
+
+/* Energy Efficient Ethernet (EEE)
+ *
+ * LPI status, timer and control register offset
+ */
+#define LPI_CTRL_STATUS 0x0030
+#define LPI_TIMER_CTRL 0x0034
+
+/* LPI control and status defines */
+#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */
+#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */
+#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */
+#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */
+#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */
+#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */
+#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */
+#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */
+#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */
+#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
+
+/* GMAC HW ADDR regs */
+#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
+ (reg * 8))
+#define GMAC_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
+ (reg * 8))
+#define GMAC_MAX_PERFECT_ADDRESSES 32
+
+/* PCS registers (AN/TBI/SGMII/RGMII) offset */
+#define GMAC_AN_CTRL 0x000000c0 /* AN control */
+#define GMAC_AN_STATUS 0x000000c4 /* AN status */
+#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */
+#define GMAC_ANE_LPA 0x000000cc /* Auto-Neg. link partener ability */
+#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */
+#define GMAC_TBI 0x000000d4 /* TBI extend status */
+#define GMAC_S_R_GMII 0x000000d8 /* SGMII RGMII status */
+
+/* AN Configuration defines */
+#define GMAC_AN_CTRL_RAN 0x00000200 /* Restart Auto-Negotiation */
+#define GMAC_AN_CTRL_ANE 0x00001000 /* Auto-Negotiation Enable */
+#define GMAC_AN_CTRL_ELE 0x00004000 /* External Loopback Enable */
+#define GMAC_AN_CTRL_ECD 0x00010000 /* Enable Comma Detect */
+#define GMAC_AN_CTRL_LR 0x00020000 /* Lock to Reference */
+#define GMAC_AN_CTRL_SGMRAL 0x00040000 /* SGMII RAL Control */
+
+/* AN Status defines */
+#define GMAC_AN_STATUS_LS 0x00000004 /* Link Status 0:down 1:up */
+#define GMAC_AN_STATUS_ANA 0x00000008 /* Auto-Negotiation Ability */
+#define GMAC_AN_STATUS_ANC 0x00000020 /* Auto-Negotiation Complete */
+#define GMAC_AN_STATUS_ES 0x00000100 /* Extended Status */
+
+/* Register 54 (SGMII/RGMII status register) */
+#define GMAC_S_R_GMII_LINK 0x8
+#define GMAC_S_R_GMII_SPEED 0x5
+#define GMAC_S_R_GMII_SPEED_SHIFT 0x1
+#define GMAC_S_R_GMII_MODE 0x1
+#define GMAC_S_R_GMII_SPEED_125 2
+#define GMAC_S_R_GMII_SPEED_25 1
+
+/* Common ADV and LPA defines */
+#define GMAC_ANE_FD (1 << 5)
+#define GMAC_ANE_HD (1 << 6)
+#define GMAC_ANE_PSE (3 << 7)
+#define GMAC_ANE_PSE_SHIFT 7
+
+ /* GMAC Configuration defines */
+#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
+#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
+
+/* GMAC Configuration defines */
+#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
+#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
+#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */
+#define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */
+#define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
+enum inter_frame_gap {
+ GMAC_CONTROL_IFG_88 = 0x00040000,
+ GMAC_CONTROL_IFG_80 = 0x00020000,
+ GMAC_CONTROL_IFG_40 = 0x000e0000,
+};
+#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense */
+#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */
+#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */
+#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */
+#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
+#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */
+#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
+#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */
+#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */
+#define GMAC_CONTROL_ACS 0x00000080 /* Auto Pad/FCS Stripping */
+#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */
+#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
+#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
+
+#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
+ GMAC_CONTROL_JE | GMAC_CONTROL_BE)
+
+/* GMAC Frame Filter defines */
+#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
+#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
+#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
+#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
+#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
+#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
+#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
+#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
+#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
+#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
+/* GMII ADDR defines */
+#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
+#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
+/* GMAC FLOW CTRL defines */
+#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
+#define GMAC_FLOW_CTRL_PT_SHIFT 16
+#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
+#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
+#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
+
+/*--- DMA BLOCK defines ---*/
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
+#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */
+#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
+/* Programmable burst length (passed thorugh platform)*/
+#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT 8
+#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */
+
+enum rx_tx_priority_ratio {
+ double_ratio = 0x00004000, /* 2:1 */
+ triple_ratio = 0x00008000, /* 3:1 */
+ quadruple_ratio = 0x0000c000, /* 4:1 */
+};
+
+#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
+#define DMA_BUS_MODE_MB 0x04000000 /* Mixed burst */
+#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
+#define DMA_BUS_MODE_RPBL_SHIFT 17
+#define DMA_BUS_MODE_USP 0x00800000
+#define DMA_BUS_MODE_PBL 0x01000000
+#define DMA_BUS_MODE_AAL 0x02000000
+
+/* DMA CRS Control and Status Register Mapping */
+#define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */
+#define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */
+/* DMA Bus Mode register defines */
+#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
+#define DMA_BUS_PR_RATIO_SHIFT 14
+#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
+
+/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/
+/* Disable Drop TCP/IP csum error */
+#define DMA_CONTROL_DT 0x04000000
+#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */
+#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */
+/* Threshold for Activating the FC */
+enum rfa {
+ act_full_minus_1 = 0x00800000,
+ act_full_minus_2 = 0x00800200,
+ act_full_minus_3 = 0x00800400,
+ act_full_minus_4 = 0x00800600,
+};
+/* Threshold for Deactivating the FC */
+enum rfd {
+ deac_full_minus_1 = 0x00400000,
+ deac_full_minus_2 = 0x00400800,
+ deac_full_minus_3 = 0x00401000,
+ deac_full_minus_4 = 0x00401800,
+};
+#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
+
+enum ttc_control {
+ DMA_CONTROL_TTC_64 = 0x00000000,
+ DMA_CONTROL_TTC_128 = 0x00004000,
+ DMA_CONTROL_TTC_192 = 0x00008000,
+ DMA_CONTROL_TTC_256 = 0x0000c000,
+ DMA_CONTROL_TTC_40 = 0x00010000,
+ DMA_CONTROL_TTC_32 = 0x00014000,
+ DMA_CONTROL_TTC_24 = 0x00018000,
+ DMA_CONTROL_TTC_16 = 0x0001c000,
+};
+#define DMA_CONTROL_TC_TX_MASK 0xfffe3fff
+
+#define DMA_CONTROL_EFC 0x00000100
+#define DMA_CONTROL_FEF 0x00000080
+#define DMA_CONTROL_FUF 0x00000040
+
+enum rtc_control {
+ DMA_CONTROL_RTC_64 = 0x00000000,
+ DMA_CONTROL_RTC_32 = 0x00000008,
+ DMA_CONTROL_RTC_96 = 0x00000010,
+ DMA_CONTROL_RTC_128 = 0x00000018,
+};
+#define DMA_CONTROL_TC_RX_MASK 0xffffffe7
+
+#define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */
+
+/* MMC registers offset */
+#define GMAC_MMC_CTRL 0x100
+#define GMAC_MMC_RX_INTR 0x104
+#define GMAC_MMC_TX_INTR 0x108
+#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
+
+extern const struct stmmac_dma_ops dwmac1000_dma_ops;
+#endif /* __DWMAC1000_H__ */
--- /dev/null
+/*******************************************************************************
+ This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
+ developing this code.
+
+ This only implements the mac core functions for this chip.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/ethtool.h>
+#include <asm/io.h>
+#include "dwmac1000.h"
+
+static void dwmac1000_core_init(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + GMAC_CONTROL);
+ value |= GMAC_CORE_INIT;
+ writel(value, ioaddr + GMAC_CONTROL);
+
+ /* Mask GMAC interrupts */
+ writel(0x207, ioaddr + GMAC_INT_MASK);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ /* Tag detection without filtering */
+ writel(0x0, ioaddr + GMAC_VLAN_TAG);
+#endif
+}
+
+static int dwmac1000_rx_ipc_enable(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + GMAC_CONTROL);
+
+ value |= GMAC_CONTROL_IPC;
+ writel(value, ioaddr + GMAC_CONTROL);
+
+ value = readl(ioaddr + GMAC_CONTROL);
+
+ return !!(value & GMAC_CONTROL_IPC);
+}
+
+static void dwmac1000_dump_regs(void __iomem *ioaddr)
+{
+ int i;
+ pr_info("\tDWMAC1000 regs (base addr = 0x%p)\n", ioaddr);
+
+ for (i = 0; i < 55; i++) {
+ int offset = i * 4;
+ pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
+ offset, readl(ioaddr + offset));
+ }
+}
+
+static void dwmac1000_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac1000_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac1000_set_filter(struct net_device *dev, int id)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ unsigned int value = 0;
+ unsigned int perfect_addr_number;
+
+ CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
+ __func__, netdev_mc_count(dev), netdev_uc_count(dev));
+
+ if (dev->flags & IFF_PROMISC)
+ value = GMAC_FRAME_FILTER_PR;
+ else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
+ || (dev->flags & IFF_ALLMULTI)) {
+ value = GMAC_FRAME_FILTER_PM; /* pass all multi */
+ writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
+ writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
+ } else if (!netdev_mc_empty(dev)) {
+ u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
+
+ /* Hash filter for multicast */
+ value = GMAC_FRAME_FILTER_HMC;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the contens of the hash table
+ */
+ int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register.
+ */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
+ writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
+ }
+
+ /* Extra 16 regs are available in cores newer than the 3.40. */
+ if (id > DWMAC_CORE_3_40)
+ perfect_addr_number = GMAC_MAX_PERFECT_ADDRESSES;
+ else
+ perfect_addr_number = GMAC_MAX_PERFECT_ADDRESSES / 2;
+
+ /* Handle multiple unicast addresses (perfect filtering) */
+ if (netdev_uc_count(dev) > perfect_addr_number)
+ /* Switch to promiscuous mode if more than 16 addrs
+ * are required
+ */
+ value |= GMAC_FRAME_FILTER_PR;
+ else {
+ int reg = 1;
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ dwmac1000_set_umac_addr(ioaddr, ha->addr, reg);
+ reg++;
+ }
+ }
+
+#ifdef FRAME_FILTER_DEBUG
+ /* Enable Receive all mode (to debug filtering_fail errors) */
+ value |= GMAC_FRAME_FILTER_RA;
+#endif
+ writel(value, ioaddr + GMAC_FRAME_FILTER);
+
+ CHIP_DBG(KERN_INFO "\tFilter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
+ readl(ioaddr + GMAC_FRAME_FILTER),
+ readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
+}
+
+static void dwmac1000_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ unsigned int flow = 0;
+
+ CHIP_DBG(KERN_DEBUG "GMAC Flow-Control:\n");
+ if (fc & FLOW_RX) {
+ CHIP_DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_RFE;
+ }
+ if (fc & FLOW_TX) {
+ CHIP_DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_TFE;
+ }
+
+ if (duplex) {
+ CHIP_DBG(KERN_DEBUG "\tduplex mode: PAUSE %d\n", pause_time);
+ flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
+ }
+
+ writel(flow, ioaddr + GMAC_FLOW_CTRL);
+}
+
+static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
+{
+ unsigned int pmt = 0;
+
+ if (mode & WAKE_MAGIC) {
+ CHIP_DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
+ pmt |= power_down | magic_pkt_en;
+ }
+ if (mode & WAKE_UCAST) {
+ CHIP_DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
+ pmt |= global_unicast;
+ }
+
+ writel(pmt, ioaddr + GMAC_PMT);
+}
+
+static int dwmac1000_irq_status(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x)
+{
+ u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+ int ret = 0;
+
+ /* Not used events (e.g. MMC interrupts) are not handled. */
+ if ((intr_status & mmc_tx_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC: MMC tx interrupt: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_TX_INTR));
+ x->mmc_tx_irq_n++;
+ }
+ if (unlikely(intr_status & mmc_rx_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC: MMC rx interrupt: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_RX_INTR));
+ x->mmc_rx_irq_n++;
+ }
+ if (unlikely(intr_status & mmc_rx_csum_offload_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC: MMC rx csum offload: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
+ x->mmc_rx_csum_offload_irq_n++;
+ }
+ if (unlikely(intr_status & pmt_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC: received Magic frame\n");
+ /* clear the PMT bits 5 and 6 by reading the PMT status reg */
+ readl(ioaddr + GMAC_PMT);
+ x->irq_receive_pmt_irq_n++;
+ }
+ /* MAC trx/rx EEE LPI entry/exit interrupts */
+ if (intr_status & lpiis_irq) {
+ /* Clean LPI interrupt by reading the Reg 12 */
+ ret = readl(ioaddr + LPI_CTRL_STATUS);
+
+ if (ret & LPI_CTRL_STATUS_TLPIEN) {
+ CHIP_DBG(KERN_INFO "GMAC TX entered in LPI\n");
+ x->irq_tx_path_in_lpi_mode_n++;
+ }
+ if (ret & LPI_CTRL_STATUS_TLPIEX) {
+ CHIP_DBG(KERN_INFO "GMAC TX exit from LPI\n");
+ x->irq_tx_path_exit_lpi_mode_n++;
+ }
+ if (ret & LPI_CTRL_STATUS_RLPIEN) {
+ CHIP_DBG(KERN_INFO "GMAC RX entered in LPI\n");
+ x->irq_rx_path_in_lpi_mode_n++;
+ }
+ if (ret & LPI_CTRL_STATUS_RLPIEX) {
+ CHIP_DBG(KERN_INFO "GMAC RX exit from LPI\n");
+ x->irq_rx_path_exit_lpi_mode_n++;
+ }
+ }
+
+ if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC PCS ANE IRQ\n");
+ readl(ioaddr + GMAC_AN_STATUS);
+ x->irq_pcs_ane_n++;
+ }
+ if (intr_status & rgmii_irq) {
+ u32 status = readl(ioaddr + GMAC_S_R_GMII);
+ CHIP_DBG(KERN_INFO "GMAC RGMII/SGMII interrupt\n");
+ x->irq_rgmii_n++;
+
+ /* Save and dump the link status. */
+ if (status & GMAC_S_R_GMII_LINK) {
+ int speed_value = (status & GMAC_S_R_GMII_SPEED) >>
+ GMAC_S_R_GMII_SPEED_SHIFT;
+ x->pcs_duplex = (status & GMAC_S_R_GMII_MODE);
+
+ if (speed_value == GMAC_S_R_GMII_SPEED_125)
+ x->pcs_speed = SPEED_1000;
+ else if (speed_value == GMAC_S_R_GMII_SPEED_25)
+ x->pcs_speed = SPEED_100;
+ else
+ x->pcs_speed = SPEED_10;
+
+ x->pcs_link = 1;
+ pr_debug("Link is Up - %d/%s\n", (int)x->pcs_speed,
+ x->pcs_duplex ? "Full" : "Half");
+ } else {
+ x->pcs_link = 0;
+ pr_debug("Link is Down\n");
+ }
+ }
+
+ return ret;
+}
+
+static void dwmac1000_set_eee_mode(void __iomem *ioaddr)
+{
+ u32 value;
+
+ /* Enable the link status receive on RGMII, SGMII ore SMII
+ * receive path and instruct the transmit to enter in LPI
+ * state.
+ */
+ value = readl(ioaddr + LPI_CTRL_STATUS);
+ value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ writel(value, ioaddr + LPI_CTRL_STATUS);
+}
+
+static void dwmac1000_reset_eee_mode(void __iomem *ioaddr)
+{
+ u32 value;
+
+ value = readl(ioaddr + LPI_CTRL_STATUS);
+ value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
+ writel(value, ioaddr + LPI_CTRL_STATUS);
+}
+
+static void dwmac1000_set_eee_pls(void __iomem *ioaddr, int link)
+{
+ u32 value;
+
+ value = readl(ioaddr + LPI_CTRL_STATUS);
+
+ if (link)
+ value |= LPI_CTRL_STATUS_PLS;
+ else
+ value &= ~LPI_CTRL_STATUS_PLS;
+
+ writel(value, ioaddr + LPI_CTRL_STATUS);
+}
+
+static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
+{
+ int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
+
+ /* Program the timers in the LPI timer control register:
+ * LS: minimum time (ms) for which the link
+ * status from PHY should be ok before transmitting
+ * the LPI pattern.
+ * TW: minimum time (us) for which the core waits
+ * after it has stopped transmitting the LPI pattern.
+ */
+ writel(value, ioaddr + LPI_TIMER_CTRL);
+}
+
+static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart)
+{
+ u32 value;
+
+ value = readl(ioaddr + GMAC_AN_CTRL);
+ /* auto negotiation enable and External Loopback enable */
+ value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
+
+ if (restart)
+ value |= GMAC_AN_CTRL_RAN;
+
+ writel(value, ioaddr + GMAC_AN_CTRL);
+}
+
+static void dwmac1000_get_adv(void __iomem *ioaddr, struct rgmii_adv *adv)
+{
+ u32 value = readl(ioaddr + GMAC_ANE_ADV);
+
+ if (value & GMAC_ANE_FD)
+ adv->duplex = DUPLEX_FULL;
+ if (value & GMAC_ANE_HD)
+ adv->duplex |= DUPLEX_HALF;
+
+ adv->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
+
+ value = readl(ioaddr + GMAC_ANE_LPA);
+
+ if (value & GMAC_ANE_FD)
+ adv->lp_duplex = DUPLEX_FULL;
+ if (value & GMAC_ANE_HD)
+ adv->lp_duplex = DUPLEX_HALF;
+
+ adv->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
+}
+
+static const struct stmmac_ops dwmac1000_ops = {
+ .core_init = dwmac1000_core_init,
+ .rx_ipc = dwmac1000_rx_ipc_enable,
+ .dump_regs = dwmac1000_dump_regs,
+ .host_irq_status = dwmac1000_irq_status,
+ .set_filter = dwmac1000_set_filter,
+ .flow_ctrl = dwmac1000_flow_ctrl,
+ .pmt = dwmac1000_pmt,
+ .set_umac_addr = dwmac1000_set_umac_addr,
+ .get_umac_addr = dwmac1000_get_umac_addr,
+ .set_eee_mode = dwmac1000_set_eee_mode,
+ .reset_eee_mode = dwmac1000_reset_eee_mode,
+ .set_eee_timer = dwmac1000_set_eee_timer,
+ .set_eee_pls = dwmac1000_set_eee_pls,
+ .ctrl_ane = dwmac1000_ctrl_ane,
+ .get_adv = dwmac1000_get_adv,
+};
+
+struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
+{
+ struct mac_device_info *mac;
+ u32 hwid = readl(ioaddr + GMAC_VERSION);
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+ if (!mac)
+ return NULL;
+
+ mac->mac = &dwmac1000_ops;
+ mac->dma = &dwmac1000_dma_ops;
+
+ mac->link.port = GMAC_CONTROL_PS;
+ mac->link.duplex = GMAC_CONTROL_DM;
+ mac->link.speed = GMAC_CONTROL_FES;
+ mac->mii.addr = GMAC_MII_ADDR;
+ mac->mii.data = GMAC_MII_DATA;
+ mac->synopsys_uid = hwid;
+
+ return mac;
+}
--- /dev/null
+/*******************************************************************************
+ This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
+ developing this code.
+
+ This contains the functions to handle the dma.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <asm/io.h>
+#include "dwmac1000.h"
+#include "dwmac_dma.h"
+
+static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
+ int burst_len, u32 dma_tx, u32 dma_rx, int atds)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+ int limit;
+
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ break;
+ mdelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ /*
+ * Set the DMA PBL (Programmable Burst Length) mode
+ * Before stmmac core 3.50 this mode bit was 4xPBL, and
+ * post 3.5 mode bit acts as 8*PBL.
+ * For core rev < 3.5, when the core is set for 4xPBL mode, the
+ * DMA transfers the data in 4, 8, 16, 32, 64 & 128 beats
+ * depending on pbl value.
+ * For core rev > 3.5, when the core is set for 8xPBL mode, the
+ * DMA transfers the data in 8, 16, 32, 64, 128 & 256 beats
+ * depending on pbl value.
+ */
+ value = DMA_BUS_MODE_PBL | ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
+ (pbl << DMA_BUS_MODE_RPBL_SHIFT));
+
+ /* Set the Fixed burst mode */
+ if (fb)
+ value |= DMA_BUS_MODE_FB;
+
+ /* Mixed Burst has no effect when fb is set */
+ if (mb)
+ value |= DMA_BUS_MODE_MB;
+
+#ifdef CONFIG_GMAC_DA
+ value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
+#endif
+
+ if (atds)
+ value |= DMA_BUS_MODE_ATDS;
+
+ writel(value, ioaddr + DMA_BUS_MODE);
+
+ /* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE
+ * for supported bursts.
+ *
+ * Note: This is applicable only for revision GMACv3.61a. For
+ * older version this register is reserved and shall have no
+ * effect.
+ *
+ * Note:
+ * For Fixed Burst Mode: if we directly write 0xFF to this
+ * register using the configurations pass from platform code,
+ * this would ensure that all bursts supported by core are set
+ * and those which are not supported would remain ineffective.
+ *
+ * For Non Fixed Burst Mode: provide the maximum value of the
+ * burst length. Any burst equal or below the provided burst
+ * length would be allowed to perform.
+ */
+ writel(burst_len, ioaddr + DMA_AXI_BUS_MODE);
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+
+ /* RX/TX descriptor base address lists must be written into
+ * DMA CSR3 and CSR4, respectively
+ */
+ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
+ writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+
+ return 0;
+}
+
+static void dwmac1000_dma_operation_mode(void __iomem *ioaddr, int txmode,
+ int rxmode)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+ if (txmode == SF_DMA_MODE) {
+ CHIP_DBG(KERN_DEBUG "GMAC: enable TX store and forward mode\n");
+ /* Transmit COE type 2 cannot be done in cut-through mode. */
+ csr6 |= DMA_CONTROL_TSF;
+ /* Operating on second frame increase the performance
+ * especially when transmit store-and-forward is used.
+ */
+ csr6 |= DMA_CONTROL_OSF;
+ } else {
+ CHIP_DBG(KERN_DEBUG "GMAC: disabling TX SF (threshold %d)\n",
+ txmode);
+ csr6 &= ~DMA_CONTROL_TSF;
+ csr6 &= DMA_CONTROL_TC_TX_MASK;
+ /* Set the transmit threshold */
+ if (txmode <= 32)
+ csr6 |= DMA_CONTROL_TTC_32;
+ else if (txmode <= 64)
+ csr6 |= DMA_CONTROL_TTC_64;
+ else if (txmode <= 128)
+ csr6 |= DMA_CONTROL_TTC_128;
+ else if (txmode <= 192)
+ csr6 |= DMA_CONTROL_TTC_192;
+ else
+ csr6 |= DMA_CONTROL_TTC_256;
+ }
+
+ if (rxmode == SF_DMA_MODE) {
+ CHIP_DBG(KERN_DEBUG "GMAC: enable RX store and forward mode\n");
+ csr6 |= DMA_CONTROL_RSF;
+ } else {
+ CHIP_DBG(KERN_DEBUG "GMAC: disable RX SF mode (threshold %d)\n",
+ rxmode);
+ csr6 &= ~DMA_CONTROL_RSF;
+ csr6 &= DMA_CONTROL_TC_RX_MASK;
+ if (rxmode <= 32)
+ csr6 |= DMA_CONTROL_RTC_32;
+ else if (rxmode <= 64)
+ csr6 |= DMA_CONTROL_RTC_64;
+ else if (rxmode <= 96)
+ csr6 |= DMA_CONTROL_RTC_96;
+ else
+ csr6 |= DMA_CONTROL_RTC_128;
+ }
+
+ writel(csr6, ioaddr + DMA_CONTROL);
+}
+
+static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
+{
+ int i;
+ pr_info(" DMA registers\n");
+ for (i = 0; i < 22; i++) {
+ if ((i < 9) || (i > 17)) {
+ int offset = i * 4;
+ pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i,
+ (DMA_BUS_MODE + offset),
+ readl(ioaddr + DMA_BUS_MODE + offset));
+ }
+ }
+}
+
+static unsigned int dwmac1000_get_hw_feature(void __iomem *ioaddr)
+{
+ return readl(ioaddr + DMA_HW_FEATURE);
+}
+
+static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
+{
+ writel(riwt, ioaddr + DMA_RX_WATCHDOG);
+}
+
+const struct stmmac_dma_ops dwmac1000_dma_ops = {
+ .init = dwmac1000_dma_init,
+ .dump_regs = dwmac1000_dump_dma_regs,
+ .dma_mode = dwmac1000_dma_operation_mode,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
+ .get_hw_feature = dwmac1000_get_hw_feature,
+ .rx_watchdog = dwmac1000_rx_watchdog,
+};
--- /dev/null
+/*******************************************************************************
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
+
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
+ this code.
+
+ This only implements the mac core functions for this chip.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/crc32.h>
+#include <asm/io.h>
+#include "dwmac100.h"
+
+static void dwmac100_core_init(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
+#endif
+}
+
+static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
+{
+ pr_info("\t----------------------------------------------\n"
+ "\t DWMAC 100 CSR (base addr = 0x%p)\n"
+ "\t----------------------------------------------\n", ioaddr);
+ pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
+ readl(ioaddr + MAC_CONTROL));
+ pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
+ readl(ioaddr + MAC_ADDR_HIGH));
+ pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
+ readl(ioaddr + MAC_ADDR_LOW));
+ pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
+ MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
+ pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
+ MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
+ pr_info("\tflow control (offset 0x%x): 0x%08x\n",
+ MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
+ pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
+ readl(ioaddr + MAC_VLAN1));
+ pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
+ readl(ioaddr + MAC_VLAN2));
+}
+
+static int dwmac100_rx_ipc_enable(void __iomem *ioaddr)
+{
+ return 0;
+}
+
+static int dwmac100_irq_status(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x)
+{
+ return 0;
+}
+
+static void dwmac100_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void dwmac100_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void dwmac100_set_filter(struct net_device *dev, int id)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ if (dev->flags & IFF_PROMISC) {
+ value |= MAC_CONTROL_PR;
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
+ MAC_CONTROL_HP);
+ } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
+ || (dev->flags & IFF_ALLMULTI)) {
+ value |= MAC_CONTROL_PM;
+ value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
+ writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
+ writel(0xffffffff, ioaddr + MAC_HASH_LOW);
+ } else if (netdev_mc_empty(dev)) { /* no multicast */
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
+ MAC_CONTROL_HO | MAC_CONTROL_HP);
+ } else {
+ u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
+
+ /* Perfect filter mode for physical address and Hash
+ * filter for multicast
+ */
+ value |= MAC_CONTROL_HP;
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
+ MAC_CONTROL_IF | MAC_CONTROL_HO);
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the contens of the hash table
+ */
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register.
+ */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
+ writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
+ }
+
+ writel(value, ioaddr + MAC_CONTROL);
+
+ CHIP_DBG(KERN_INFO "%s: Filter: 0x%08x Hash: HI 0x%08x, LO 0x%08x\n",
+ __func__, readl(ioaddr + MAC_CONTROL),
+ readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
+}
+
+static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ unsigned int flow = MAC_FLOW_CTRL_ENABLE;
+
+ if (duplex)
+ flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
+ writel(flow, ioaddr + MAC_FLOW_CTRL);
+}
+
+/* No PMT module supported on ST boards with this Eth chip. */
+static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode)
+{
+ return;
+}
+
+static const struct stmmac_ops dwmac100_ops = {
+ .core_init = dwmac100_core_init,
+ .rx_ipc = dwmac100_rx_ipc_enable,
+ .dump_regs = dwmac100_dump_mac_regs,
+ .host_irq_status = dwmac100_irq_status,
+ .set_filter = dwmac100_set_filter,
+ .flow_ctrl = dwmac100_flow_ctrl,
+ .pmt = dwmac100_pmt,
+ .set_umac_addr = dwmac100_set_umac_addr,
+ .get_umac_addr = dwmac100_get_umac_addr,
+};
+
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
+{
+ struct mac_device_info *mac;
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+ if (!mac)
+ return NULL;
+
+ pr_info("\tDWMAC100\n");
+
+ mac->mac = &dwmac100_ops;
+ mac->dma = &dwmac100_dma_ops;
+
+ mac->link.port = MAC_CONTROL_PS;
+ mac->link.duplex = MAC_CONTROL_F;
+ mac->link.speed = 0;
+ mac->mii.addr = MAC_MII_ADDR;
+ mac->mii.data = MAC_MII_DATA;
+ mac->synopsys_uid = 0;
+
+ return mac;
+}
--- /dev/null
+/*******************************************************************************
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
+
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
+ this code.
+
+ This contains the functions to handle the dma.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <asm/io.h>
+#include "dwmac100.h"
+#include "dwmac_dma.h"
+
+static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
+ int burst_len, u32 dma_tx, u32 dma_rx, int atds)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+ int limit;
+
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ break;
+ mdelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ /* Enable Application Access by writing to DMA CSR0 */
+ writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
+ ioaddr + DMA_BUS_MODE);
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+
+ /* RX/TX descriptor base addr lists must be written into
+ * DMA CSR3 and CSR4, respectively
+ */
+ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
+ writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
+
+ return 0;
+}
+
+/* Store and Forward capability is not used at all.
+ *
+ * The transmit threshold can be programmed by setting the TTC bits in the DMA
+ * control register.
+ */
+static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode,
+ int rxmode)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+ if (txmode <= 32)
+ csr6 |= DMA_CONTROL_TTC_32;
+ else if (txmode <= 64)
+ csr6 |= DMA_CONTROL_TTC_64;
+ else
+ csr6 |= DMA_CONTROL_TTC_128;
+
+ writel(csr6, ioaddr + DMA_CONTROL);
+}
+
+static void dwmac100_dump_dma_regs(void __iomem *ioaddr)
+{
+ int i;
+
+ CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n");
+ for (i = 0; i < 9; i++)
+ pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
+ (DMA_BUS_MODE + i * 4),
+ readl(ioaddr + DMA_BUS_MODE + i * 4));
+ CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
+ DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
+ CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
+ DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
+}
+
+/* DMA controller has two counters to track the number of the missed frames. */
+static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+ void __iomem *ioaddr)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
+
+ if (unlikely(csr8)) {
+ if (csr8 & DMA_MISSED_FRAME_OVE) {
+ stats->rx_over_errors += 0x800;
+ x->rx_overflow_cntr += 0x800;
+ } else {
+ unsigned int ove_cntr;
+ ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
+ stats->rx_over_errors += ove_cntr;
+ x->rx_overflow_cntr += ove_cntr;
+ }
+
+ if (csr8 & DMA_MISSED_FRAME_OVE_M) {
+ stats->rx_missed_errors += 0xffff;
+ x->rx_missed_cntr += 0xffff;
+ } else {
+ unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
+ stats->rx_missed_errors += miss_f;
+ x->rx_missed_cntr += miss_f;
+ }
+ }
+}
+
+const struct stmmac_dma_ops dwmac100_dma_ops = {
+ .init = dwmac100_dma_init,
+ .dump_regs = dwmac100_dump_dma_regs,
+ .dma_mode = dwmac100_dma_operation_mode,
+ .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
+};
--- /dev/null
+/*******************************************************************************
+ DWMAC DMA Header file.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __DWMAC_DMA_H__
+#define __DWMAC_DMA_H__
+
+/* DMA CRS Control and Status Register Mapping */
+#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
+#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
+#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
+#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
+#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
+#define DMA_STATUS 0x00001014 /* Status Register */
+#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
+#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
+#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
+/* Rx watchdog register */
+#define DMA_RX_WATCHDOG 0x00001024
+/* AXI Bus Mode */
+#define DMA_AXI_BUS_MODE 0x00001028
+#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
+#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
+#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */
+
+/* DMA Control register defines */
+#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
+#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
+
+/* DMA Normal interrupt */
+#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
+#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
+#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
+#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
+
+#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+ DMA_INTR_ENA_TIE)
+
+/* DMA Abnormal interrupt */
+#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
+#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
+#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
+#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
+#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
+#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
+#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
+#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
+#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
+
+#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+ DMA_INTR_ENA_UNE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+
+/* DMA Status register defines */
+#define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */
+#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
+#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
+#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
+#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT 20
+#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
+#define DMA_STATUS_RS_SHIFT 17
+#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
+#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
+#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
+#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
+#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
+#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
+#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
+
+extern void dwmac_enable_dma_transmission(void __iomem *ioaddr);
+extern void dwmac_enable_dma_irq(void __iomem *ioaddr);
+extern void dwmac_disable_dma_irq(void __iomem *ioaddr);
+extern void dwmac_dma_start_tx(void __iomem *ioaddr);
+extern void dwmac_dma_stop_tx(void __iomem *ioaddr);
+extern void dwmac_dma_start_rx(void __iomem *ioaddr);
+extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
+extern int dwmac_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x);
+
+#endif /* __DWMAC_DMA_H__ */
--- /dev/null
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/io.h>
+#include "common.h"
+#include "dwmac_dma.h"
+
+#undef DWMAC_DMA_DEBUG
+#ifdef DWMAC_DMA_DEBUG
+#define DWMAC_LIB_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define DWMAC_LIB_DBG(fmt, args...) do { } while (0)
+#endif
+
+#define GMAC_HI_REG_AE 0x80000000
+
+/* CSR1 enables the transmit DMA to check for new descriptor */
+void dwmac_enable_dma_transmission(void __iomem *ioaddr)
+{
+ writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
+}
+
+void dwmac_enable_dma_irq(void __iomem *ioaddr)
+{
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+}
+
+void dwmac_disable_dma_irq(void __iomem *ioaddr)
+{
+ writel(0, ioaddr + DMA_INTR_ENA);
+}
+
+void dwmac_dma_start_tx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+}
+
+void dwmac_dma_stop_tx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+}
+
+void dwmac_dma_start_rx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+}
+
+void dwmac_dma_stop_rx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+}
+
+#ifdef DWMAC_DMA_DEBUG
+static void show_tx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- TX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- TX (Running):Fetching the Tx desc\n");
+ break;
+ case 2:
+ pr_info("- TX (Running): Waiting for end of tx\n");
+ break;
+ case 3:
+ pr_info("- TX (Running): Reading the data "
+ "and queuing the data into the Tx buf\n");
+ break;
+ case 6:
+ pr_info("- TX (Suspended): Tx Buff Underflow "
+ "or an unavailable Transmit descriptor\n");
+ break;
+ case 7:
+ pr_info("- TX (Running): Closing Tx descriptor\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void show_rx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- RX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- RX (Running): Fetching the Rx desc\n");
+ break;
+ case 2:
+ pr_info("- RX (Running):Checking for end of pkt\n");
+ break;
+ case 3:
+ pr_info("- RX (Running): Waiting for Rx pkt\n");
+ break;
+ case 4:
+ pr_info("- RX (Suspended): Unavailable Rx buf\n");
+ break;
+ case 5:
+ pr_info("- RX (Running): Closing Rx descriptor\n");
+ break;
+ case 6:
+ pr_info("- RX(Running): Flushing the current frame"
+ " from the Rx buf\n");
+ break;
+ case 7:
+ pr_info("- RX (Running): Queuing the Rx frame"
+ " from the Rx buf into memory\n");
+ break;
+ default:
+ break;
+ }
+}
+#endif
+
+int dwmac_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x)
+{
+ int ret = 0;
+ /* read the status register (CSR5) */
+ u32 intr_status = readl(ioaddr + DMA_STATUS);
+
+ DWMAC_LIB_DBG(KERN_INFO "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
+#ifdef DWMAC_DMA_DEBUG
+ /* It displays the DMA process states (CSR5 register) */
+ show_tx_process_state(intr_status);
+ show_rx_process_state(intr_status);
+#endif
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_STATUS_AIS)) {
+ DWMAC_LIB_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: ");
+ if (unlikely(intr_status & DMA_STATUS_UNF)) {
+ DWMAC_LIB_DBG(KERN_INFO "transmit underflow\n");
+ ret = tx_hard_error_bump_tc;
+ x->tx_undeflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TJT)) {
+ DWMAC_LIB_DBG(KERN_INFO "transmit jabber\n");
+ x->tx_jabber_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_OVF)) {
+ DWMAC_LIB_DBG(KERN_INFO "recv overflow\n");
+ x->rx_overflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RU)) {
+ DWMAC_LIB_DBG(KERN_INFO "receive buffer unavailable\n");
+ x->rx_buf_unav_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RPS)) {
+ DWMAC_LIB_DBG(KERN_INFO "receive process stopped\n");
+ x->rx_process_stopped_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RWT)) {
+ DWMAC_LIB_DBG(KERN_INFO "receive watchdog\n");
+ x->rx_watchdog_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_ETI)) {
+ DWMAC_LIB_DBG(KERN_INFO "transmit early interrupt\n");
+ x->tx_early_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TPS)) {
+ DWMAC_LIB_DBG(KERN_INFO "transmit process stopped\n");
+ x->tx_process_stopped_irq++;
+ ret = tx_hard_error;
+ }
+ if (unlikely(intr_status & DMA_STATUS_FBI)) {
+ DWMAC_LIB_DBG(KERN_INFO "fatal bus error\n");
+ x->fatal_bus_error_irq++;
+ ret = tx_hard_error;
+ }
+ }
+ /* TX/RX NORMAL interrupts */
+ if (likely(intr_status & DMA_STATUS_NIS)) {
+ x->normal_irq_n++;
+ if (likely(intr_status & DMA_STATUS_RI)) {
+ u32 value = readl(ioaddr + DMA_INTR_ENA);
+ /* to schedule NAPI on real RIE event. */
+ if (likely(value & DMA_INTR_ENA_RIE)) {
+ x->rx_normal_irq_n++;
+ ret |= handle_rx;
+ }
+ }
+ if (likely(intr_status & DMA_STATUS_TI)) {
+ x->tx_normal_irq_n++;
+ ret |= handle_tx;
+ }
+ if (unlikely(intr_status & DMA_STATUS_ERI))
+ x->rx_early_irq++;
+ }
+ /* Optional hardware blocks, interrupts should be disabled */
+ if (unlikely(intr_status &
+ (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
+ pr_info("%s: unexpected status %08x\n", __func__, intr_status);
+
+ /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
+ writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
+
+ DWMAC_LIB_DBG(KERN_INFO "\n\n");
+ return ret;
+}
+
+void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+ writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
+
+ do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
+}
+
+void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ /* For MAC Addr registers se have to set the Address Enable (AE)
+ * bit that has no effect on the High Reg 0 where the bit 31 (MO)
+ * is RO.
+ */
+ writel(data | GMAC_HI_REG_AE, ioaddr + high);
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + low);
+}
+
+/* Enable disable MAC RX/TX */
+void stmmac_set_mac(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + MAC_CTRL_REG);
+
+ if (enable)
+ value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
+ else
+ value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
+
+ writel(value, ioaddr + MAC_CTRL_REG);
+}
+
+void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low)
+{
+ unsigned int hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + high);
+ lo_addr = readl(ioaddr + low);
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+}
+
--- /dev/null
+/*******************************************************************************
+ This contains the functions to handle the enhanced descriptors.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/stmmac.h>
+#include "common.h"
+#include "descs_com.h"
+
+static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, void __iomem *ioaddr)
+{
+ int ret = 0;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.etx.error_summary)) {
+ CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
+ if (unlikely(p->des01.etx.jabber_timeout)) {
+ CHIP_DBG(KERN_ERR "\tjabber_timeout error\n");
+ x->tx_jabber++;
+ }
+
+ if (unlikely(p->des01.etx.frame_flushed)) {
+ CHIP_DBG(KERN_ERR "\tframe_flushed error\n");
+ x->tx_frame_flushed++;
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ }
+
+ if (unlikely(p->des01.etx.loss_carrier)) {
+ CHIP_DBG(KERN_ERR "\tloss_carrier error\n");
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.etx.no_carrier)) {
+ CHIP_DBG(KERN_ERR "\tno_carrier error\n");
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.etx.late_collision)) {
+ CHIP_DBG(KERN_ERR "\tlate_collision error\n");
+ stats->collisions += p->des01.etx.collision_count;
+ }
+ if (unlikely(p->des01.etx.excessive_collisions)) {
+ CHIP_DBG(KERN_ERR "\texcessive_collisions\n");
+ stats->collisions += p->des01.etx.collision_count;
+ }
+ if (unlikely(p->des01.etx.excessive_deferral)) {
+ CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n");
+ x->tx_deferred++;
+ }
+
+ if (unlikely(p->des01.etx.underflow_error)) {
+ CHIP_DBG(KERN_ERR "\tunderflow error\n");
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ x->tx_underflow++;
+ }
+
+ if (unlikely(p->des01.etx.ip_header_error)) {
+ CHIP_DBG(KERN_ERR "\tTX IP header csum error\n");
+ x->tx_ip_header_error++;
+ }
+
+ if (unlikely(p->des01.etx.payload_error)) {
+ CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n");
+ x->tx_payload_error++;
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ }
+
+ ret = -1;
+ }
+
+ if (unlikely(p->des01.etx.deferred)) {
+ CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n");
+ x->tx_deferred++;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (p->des01.etx.vlan_frame) {
+ CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
+ x->tx_vlan++;
+ }
+#endif
+
+ return ret;
+}
+
+static int enh_desc_get_tx_len(struct dma_desc *p)
+{
+ return p->des01.etx.buffer1_size;
+}
+
+static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
+{
+ int ret = good_frame;
+ u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
+
+ /* bits 5 7 0 | Frame status
+ * ----------------------------------------------------------
+ * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
+ * 1 0 0 | IPv4/6 No CSUM errorS.
+ * 1 0 1 | IPv4/6 CSUM PAYLOAD error
+ * 1 1 0 | IPv4/6 CSUM IP HR error
+ * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
+ * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
+ * 0 1 1 | COE bypassed.. no IPv4/6 frame
+ * 0 1 0 | Reserved.
+ */
+ if (status == 0x0) {
+ CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
+ ret = llc_snap;
+ } else if (status == 0x4) {
+ CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
+ ret = good_frame;
+ } else if (status == 0x5) {
+ CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
+ ret = csum_none;
+ } else if (status == 0x6) {
+ CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
+ ret = csum_none;
+ } else if (status == 0x7) {
+ CHIP_DBG(KERN_ERR
+ "RX Des0 status: IPv4/6 Header and Payload Error.\n");
+ ret = csum_none;
+ } else if (status == 0x1) {
+ CHIP_DBG(KERN_ERR
+ "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
+ ret = discard_frame;
+ } else if (status == 0x3) {
+ CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
+ ret = discard_frame;
+ }
+ return ret;
+}
+
+static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_extended_desc *p)
+{
+ if (unlikely(p->basic.des01.erx.rx_mac_addr)) {
+ if (p->des4.erx.ip_hdr_err)
+ x->ip_hdr_err++;
+ if (p->des4.erx.ip_payload_err)
+ x->ip_payload_err++;
+ if (p->des4.erx.ip_csum_bypassed)
+ x->ip_csum_bypassed++;
+ if (p->des4.erx.ipv4_pkt_rcvd)
+ x->ipv4_pkt_rcvd++;
+ if (p->des4.erx.ipv6_pkt_rcvd)
+ x->ipv6_pkt_rcvd++;
+ if (p->des4.erx.msg_type == RDES_EXT_SYNC)
+ x->rx_msg_type_sync++;
+ else if (p->des4.erx.msg_type == RDES_EXT_FOLLOW_UP)
+ x->rx_msg_type_follow_up++;
+ else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
+ x->rx_msg_type_delay_req++;
+ else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
+ x->rx_msg_type_delay_resp++;
+ else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
+ x->rx_msg_type_pdelay_req++;
+ else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
+ x->rx_msg_type_pdelay_resp++;
+ else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_FOLLOW_UP)
+ x->rx_msg_type_pdelay_follow_up++;
+ else
+ x->rx_msg_type_ext_no_ptp++;
+ if (p->des4.erx.ptp_frame_type)
+ x->ptp_frame_type++;
+ if (p->des4.erx.ptp_ver)
+ x->ptp_ver++;
+ if (p->des4.erx.timestamp_dropped)
+ x->timestamp_dropped++;
+ if (p->des4.erx.av_pkt_rcvd)
+ x->av_pkt_rcvd++;
+ if (p->des4.erx.av_tagged_pkt_rcvd)
+ x->av_tagged_pkt_rcvd++;
+ if (p->des4.erx.vlan_tag_priority_val)
+ x->vlan_tag_priority_val++;
+ if (p->des4.erx.l3_filter_match)
+ x->l3_filter_match++;
+ if (p->des4.erx.l4_filter_match)
+ x->l4_filter_match++;
+ if (p->des4.erx.l3_l4_filter_no_match)
+ x->l3_l4_filter_no_match++;
+ }
+}
+
+static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ int ret = good_frame;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.erx.error_summary)) {
+ CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n",
+ p->des01.erx);
+ if (unlikely(p->des01.erx.descriptor_error)) {
+ CHIP_DBG(KERN_ERR "\tdescriptor error\n");
+ x->rx_desc++;
+ stats->rx_length_errors++;
+ }
+ if (unlikely(p->des01.erx.overflow_error)) {
+ CHIP_DBG(KERN_ERR "\toverflow error\n");
+ x->rx_gmac_overflow++;
+ }
+
+ if (unlikely(p->des01.erx.ipc_csum_error))
+ CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
+
+ if (unlikely(p->des01.erx.late_collision)) {
+ CHIP_DBG(KERN_ERR "\tlate_collision error\n");
+ stats->collisions++;
+ stats->collisions++;
+ }
+ if (unlikely(p->des01.erx.receive_watchdog)) {
+ CHIP_DBG(KERN_ERR "\treceive_watchdog error\n");
+ x->rx_watchdog++;
+ }
+ if (unlikely(p->des01.erx.error_gmii)) {
+ CHIP_DBG(KERN_ERR "\tReceive Error\n");
+ x->rx_mii++;
+ }
+ if (unlikely(p->des01.erx.crc_error)) {
+ CHIP_DBG(KERN_ERR "\tCRC error\n");
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+
+ /* After a payload csum error, the ES bit is set.
+ * It doesn't match with the information reported into the databook.
+ * At any rate, we need to understand if the CSUM hw computation is ok
+ * and report this info to the upper layers. */
+ ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
+ p->des01.erx.frame_type, p->des01.erx.rx_mac_addr);
+
+ if (unlikely(p->des01.erx.dribbling)) {
+ CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
+ x->dribbling_bit++;
+ }
+ if (unlikely(p->des01.erx.sa_filter_fail)) {
+ CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
+ x->sa_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.da_filter_fail)) {
+ CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n");
+ x->da_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.erx.length_error)) {
+ CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n");
+ x->rx_length++;
+ ret = discard_frame;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (p->des01.erx.vlan_tag) {
+ CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
+ x->rx_vlan++;
+ }
+#endif
+
+ return ret;
+}
+
+static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+ int mode, int end)
+{
+ p->des01.erx.own = 1;
+ p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
+
+ if (mode == STMMAC_CHAIN_MODE)
+ ehn_desc_rx_set_on_chain(p, end);
+ else
+ ehn_desc_rx_set_on_ring(p, end);
+
+ if (disable_rx_ic)
+ p->des01.erx.disable_ic = 1;
+}
+
+static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
+{
+ p->des01.etx.own = 0;
+ if (mode == STMMAC_CHAIN_MODE)
+ ehn_desc_tx_set_on_chain(p, end);
+ else
+ ehn_desc_tx_set_on_ring(p, end);
+}
+
+static int enh_desc_get_tx_owner(struct dma_desc *p)
+{
+ return p->des01.etx.own;
+}
+
+static int enh_desc_get_rx_owner(struct dma_desc *p)
+{
+ return p->des01.erx.own;
+}
+
+static void enh_desc_set_tx_owner(struct dma_desc *p)
+{
+ p->des01.etx.own = 1;
+}
+
+static void enh_desc_set_rx_owner(struct dma_desc *p)
+{
+ p->des01.erx.own = 1;
+}
+
+static int enh_desc_get_tx_ls(struct dma_desc *p)
+{
+ return p->des01.etx.last_segment;
+}
+
+static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
+{
+ int ter = p->des01.etx.end_ring;
+
+ memset(p, 0, offsetof(struct dma_desc, des2));
+ if (mode == STMMAC_CHAIN_MODE)
+ enh_desc_end_tx_desc_on_chain(p, ter);
+ else
+ enh_desc_end_tx_desc_on_ring(p, ter);
+}
+
+static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ int csum_flag, int mode)
+{
+ p->des01.etx.first_segment = is_fs;
+
+ if (mode == STMMAC_CHAIN_MODE)
+ enh_set_tx_desc_len_on_chain(p, len);
+ else
+ enh_set_tx_desc_len_on_ring(p, len);
+
+ if (likely(csum_flag))
+ p->des01.etx.checksum_insertion = cic_full;
+}
+
+static void enh_desc_clear_tx_ic(struct dma_desc *p)
+{
+ p->des01.etx.interrupt = 0;
+}
+
+static void enh_desc_close_tx_desc(struct dma_desc *p)
+{
+ p->des01.etx.last_segment = 1;
+ p->des01.etx.interrupt = 1;
+}
+
+static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
+{
+ /* The type-1 checksum offload engines append the checksum at
+ * the end of frame and the two bytes of checksum are added in
+ * the length.
+ * Adjust for that in the framelen for type-1 checksum offload
+ * engines. */
+ if (rx_coe_type == STMMAC_RX_COE_TYPE1)
+ return p->des01.erx.frame_length - 2;
+ else
+ return p->des01.erx.frame_length;
+}
+
+static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
+{
+ p->des01.etx.time_stamp_enable = 1;
+}
+
+static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
+{
+ return p->des01.etx.time_stamp_status;
+}
+
+static u64 enh_desc_get_timestamp(void *desc, u32 ats)
+{
+ u64 ns;
+
+ if (ats) {
+ struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
+ ns = p->des6;
+ /* convert high/sec time stamp value to nanosecond */
+ ns += p->des7 * 1000000000ULL;
+ } else {
+ struct dma_desc *p = (struct dma_desc *)desc;
+ ns = p->des2;
+ ns += p->des3 * 1000000000ULL;
+ }
+
+ return ns;
+}
+
+static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
+{
+ if (ats) {
+ struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
+ return p->basic.des01.erx.ipc_csum_error;
+ } else {
+ struct dma_desc *p = (struct dma_desc *)desc;
+ if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
+ /* timestamp is corrupted, hence don't store it */
+ return 0;
+ else
+ return 1;
+ }
+}
+
+const struct stmmac_desc_ops enh_desc_ops = {
+ .tx_status = enh_desc_get_tx_status,
+ .rx_status = enh_desc_get_rx_status,
+ .get_tx_len = enh_desc_get_tx_len,
+ .init_rx_desc = enh_desc_init_rx_desc,
+ .init_tx_desc = enh_desc_init_tx_desc,
+ .get_tx_owner = enh_desc_get_tx_owner,
+ .get_rx_owner = enh_desc_get_rx_owner,
+ .release_tx_desc = enh_desc_release_tx_desc,
+ .prepare_tx_desc = enh_desc_prepare_tx_desc,
+ .clear_tx_ic = enh_desc_clear_tx_ic,
+ .close_tx_desc = enh_desc_close_tx_desc,
+ .get_tx_ls = enh_desc_get_tx_ls,
+ .set_tx_owner = enh_desc_set_tx_owner,
+ .set_rx_owner = enh_desc_set_rx_owner,
+ .get_rx_frame_len = enh_desc_get_rx_frame_len,
+ .rx_extended_status = enh_desc_get_ext_status,
+ .enable_tx_timestamp = enh_desc_enable_tx_timestamp,
+ .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
+ .get_timestamp = enh_desc_get_timestamp,
+ .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
+};
--- /dev/null
+---- STMMAC_DEBUG_FS Matches (0 in 0 files) ----\r
--- /dev/null
+/*******************************************************************************
+ MMC Header file
+
+ Copyright (C) 2011 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __MMC_H__
+#define __MMC_H__
+
+/* MMC control register */
+/* When set, all counter are reset */
+#define MMC_CNTRL_COUNTER_RESET 0x1
+/* When set, do not roll over zero after reaching the max value*/
+#define MMC_CNTRL_COUNTER_STOP_ROLLOVER 0x2
+#define MMC_CNTRL_RESET_ON_READ 0x4 /* Reset after reading */
+#define MMC_CNTRL_COUNTER_FREEZER 0x8 /* Freeze counter values to the
+ * current value.*/
+#define MMC_CNTRL_PRESET 0x10
+#define MMC_CNTRL_FULL_HALF_PRESET 0x20
+struct stmmac_counters {
+ unsigned int mmc_tx_octetcount_gb;
+ unsigned int mmc_tx_framecount_gb;
+ unsigned int mmc_tx_broadcastframe_g;
+ unsigned int mmc_tx_multicastframe_g;
+ unsigned int mmc_tx_64_octets_gb;
+ unsigned int mmc_tx_65_to_127_octets_gb;
+ unsigned int mmc_tx_128_to_255_octets_gb;
+ unsigned int mmc_tx_256_to_511_octets_gb;
+ unsigned int mmc_tx_512_to_1023_octets_gb;
+ unsigned int mmc_tx_1024_to_max_octets_gb;
+ unsigned int mmc_tx_unicast_gb;
+ unsigned int mmc_tx_multicast_gb;
+ unsigned int mmc_tx_broadcast_gb;
+ unsigned int mmc_tx_underflow_error;
+ unsigned int mmc_tx_singlecol_g;
+ unsigned int mmc_tx_multicol_g;
+ unsigned int mmc_tx_deferred;
+ unsigned int mmc_tx_latecol;
+ unsigned int mmc_tx_exesscol;
+ unsigned int mmc_tx_carrier_error;
+ unsigned int mmc_tx_octetcount_g;
+ unsigned int mmc_tx_framecount_g;
+ unsigned int mmc_tx_excessdef;
+ unsigned int mmc_tx_pause_frame;
+ unsigned int mmc_tx_vlan_frame_g;
+
+ /* MMC RX counter registers */
+ unsigned int mmc_rx_framecount_gb;
+ unsigned int mmc_rx_octetcount_gb;
+ unsigned int mmc_rx_octetcount_g;
+ unsigned int mmc_rx_broadcastframe_g;
+ unsigned int mmc_rx_multicastframe_g;
+ unsigned int mmc_rx_crc_errror;
+ unsigned int mmc_rx_align_error;
+ unsigned int mmc_rx_run_error;
+ unsigned int mmc_rx_jabber_error;
+ unsigned int mmc_rx_undersize_g;
+ unsigned int mmc_rx_oversize_g;
+ unsigned int mmc_rx_64_octets_gb;
+ unsigned int mmc_rx_65_to_127_octets_gb;
+ unsigned int mmc_rx_128_to_255_octets_gb;
+ unsigned int mmc_rx_256_to_511_octets_gb;
+ unsigned int mmc_rx_512_to_1023_octets_gb;
+ unsigned int mmc_rx_1024_to_max_octets_gb;
+ unsigned int mmc_rx_unicast_g;
+ unsigned int mmc_rx_length_error;
+ unsigned int mmc_rx_autofrangetype;
+ unsigned int mmc_rx_pause_frames;
+ unsigned int mmc_rx_fifo_overflow;
+ unsigned int mmc_rx_vlan_frames_gb;
+ unsigned int mmc_rx_watchdog_error;
+ /* IPC */
+ unsigned int mmc_rx_ipc_intr_mask;
+ unsigned int mmc_rx_ipc_intr;
+ /* IPv4 */
+ unsigned int mmc_rx_ipv4_gd;
+ unsigned int mmc_rx_ipv4_hderr;
+ unsigned int mmc_rx_ipv4_nopay;
+ unsigned int mmc_rx_ipv4_frag;
+ unsigned int mmc_rx_ipv4_udsbl;
+
+ unsigned int mmc_rx_ipv4_gd_octets;
+ unsigned int mmc_rx_ipv4_hderr_octets;
+ unsigned int mmc_rx_ipv4_nopay_octets;
+ unsigned int mmc_rx_ipv4_frag_octets;
+ unsigned int mmc_rx_ipv4_udsbl_octets;
+
+ /* IPV6 */
+ unsigned int mmc_rx_ipv6_gd_octets;
+ unsigned int mmc_rx_ipv6_hderr_octets;
+ unsigned int mmc_rx_ipv6_nopay_octets;
+
+ unsigned int mmc_rx_ipv6_gd;
+ unsigned int mmc_rx_ipv6_hderr;
+ unsigned int mmc_rx_ipv6_nopay;
+
+ /* Protocols */
+ unsigned int mmc_rx_udp_gd;
+ unsigned int mmc_rx_udp_err;
+ unsigned int mmc_rx_tcp_gd;
+ unsigned int mmc_rx_tcp_err;
+ unsigned int mmc_rx_icmp_gd;
+ unsigned int mmc_rx_icmp_err;
+
+ unsigned int mmc_rx_udp_gd_octets;
+ unsigned int mmc_rx_udp_err_octets;
+ unsigned int mmc_rx_tcp_gd_octets;
+ unsigned int mmc_rx_tcp_err_octets;
+ unsigned int mmc_rx_icmp_gd_octets;
+ unsigned int mmc_rx_icmp_err_octets;
+};
+
+extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
+extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
+extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
+
+#endif /* __MMC_H__ */
--- /dev/null
+/*******************************************************************************
+ DWMAC Management Counters
+
+ Copyright (C) 2011 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include "mmc.h"
+
+/* MAC Management Counters register offset */
+
+#define MMC_CNTRL 0x00000100 /* MMC Control */
+#define MMC_RX_INTR 0x00000104 /* MMC RX Interrupt */
+#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */
+#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */
+#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */
+#define MMC_DEFAULT_MASK 0xffffffff
+
+/* MMC TX counter registers */
+
+/* Note:
+ * _GB register stands for good and bad frames
+ * _G is for good only.
+ */
+#define MMC_TX_OCTETCOUNT_GB 0x00000114
+#define MMC_TX_FRAMECOUNT_GB 0x00000118
+#define MMC_TX_BROADCASTFRAME_G 0x0000011c
+#define MMC_TX_MULTICASTFRAME_G 0x00000120
+#define MMC_TX_64_OCTETS_GB 0x00000124
+#define MMC_TX_65_TO_127_OCTETS_GB 0x00000128
+#define MMC_TX_128_TO_255_OCTETS_GB 0x0000012c
+#define MMC_TX_256_TO_511_OCTETS_GB 0x00000130
+#define MMC_TX_512_TO_1023_OCTETS_GB 0x00000134
+#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x00000138
+#define MMC_TX_UNICAST_GB 0x0000013c
+#define MMC_TX_MULTICAST_GB 0x00000140
+#define MMC_TX_BROADCAST_GB 0x00000144
+#define MMC_TX_UNDERFLOW_ERROR 0x00000148
+#define MMC_TX_SINGLECOL_G 0x0000014c
+#define MMC_TX_MULTICOL_G 0x00000150
+#define MMC_TX_DEFERRED 0x00000154
+#define MMC_TX_LATECOL 0x00000158
+#define MMC_TX_EXESSCOL 0x0000015c
+#define MMC_TX_CARRIER_ERROR 0x00000160
+#define MMC_TX_OCTETCOUNT_G 0x00000164
+#define MMC_TX_FRAMECOUNT_G 0x00000168
+#define MMC_TX_EXCESSDEF 0x0000016c
+#define MMC_TX_PAUSE_FRAME 0x00000170
+#define MMC_TX_VLAN_FRAME_G 0x00000174
+
+/* MMC RX counter registers */
+#define MMC_RX_FRAMECOUNT_GB 0x00000180
+#define MMC_RX_OCTETCOUNT_GB 0x00000184
+#define MMC_RX_OCTETCOUNT_G 0x00000188
+#define MMC_RX_BROADCASTFRAME_G 0x0000018c
+#define MMC_RX_MULTICASTFRAME_G 0x00000190
+#define MMC_RX_CRC_ERRROR 0x00000194
+#define MMC_RX_ALIGN_ERROR 0x00000198
+#define MMC_RX_RUN_ERROR 0x0000019C
+#define MMC_RX_JABBER_ERROR 0x000001A0
+#define MMC_RX_UNDERSIZE_G 0x000001A4
+#define MMC_RX_OVERSIZE_G 0x000001A8
+#define MMC_RX_64_OCTETS_GB 0x000001AC
+#define MMC_RX_65_TO_127_OCTETS_GB 0x000001b0
+#define MMC_RX_128_TO_255_OCTETS_GB 0x000001b4
+#define MMC_RX_256_TO_511_OCTETS_GB 0x000001b8
+#define MMC_RX_512_TO_1023_OCTETS_GB 0x000001bc
+#define MMC_RX_1024_TO_MAX_OCTETS_GB 0x000001c0
+#define MMC_RX_UNICAST_G 0x000001c4
+#define MMC_RX_LENGTH_ERROR 0x000001c8
+#define MMC_RX_AUTOFRANGETYPE 0x000001cc
+#define MMC_RX_PAUSE_FRAMES 0x000001d0
+#define MMC_RX_FIFO_OVERFLOW 0x000001d4
+#define MMC_RX_VLAN_FRAMES_GB 0x000001d8
+#define MMC_RX_WATCHDOG_ERROR 0x000001dc
+/* IPC*/
+#define MMC_RX_IPC_INTR_MASK 0x00000200
+#define MMC_RX_IPC_INTR 0x00000208
+/* IPv4*/
+#define MMC_RX_IPV4_GD 0x00000210
+#define MMC_RX_IPV4_HDERR 0x00000214
+#define MMC_RX_IPV4_NOPAY 0x00000218
+#define MMC_RX_IPV4_FRAG 0x0000021C
+#define MMC_RX_IPV4_UDSBL 0x00000220
+
+#define MMC_RX_IPV4_GD_OCTETS 0x00000250
+#define MMC_RX_IPV4_HDERR_OCTETS 0x00000254
+#define MMC_RX_IPV4_NOPAY_OCTETS 0x00000258
+#define MMC_RX_IPV4_FRAG_OCTETS 0x0000025c
+#define MMC_RX_IPV4_UDSBL_OCTETS 0x00000260
+
+/* IPV6*/
+#define MMC_RX_IPV6_GD_OCTETS 0x00000264
+#define MMC_RX_IPV6_HDERR_OCTETS 0x00000268
+#define MMC_RX_IPV6_NOPAY_OCTETS 0x0000026c
+
+#define MMC_RX_IPV6_GD 0x00000224
+#define MMC_RX_IPV6_HDERR 0x00000228
+#define MMC_RX_IPV6_NOPAY 0x0000022c
+
+/* Protocols*/
+#define MMC_RX_UDP_GD 0x00000230
+#define MMC_RX_UDP_ERR 0x00000234
+#define MMC_RX_TCP_GD 0x00000238
+#define MMC_RX_TCP_ERR 0x0000023c
+#define MMC_RX_ICMP_GD 0x00000240
+#define MMC_RX_ICMP_ERR 0x00000244
+
+#define MMC_RX_UDP_GD_OCTETS 0x00000270
+#define MMC_RX_UDP_ERR_OCTETS 0x00000274
+#define MMC_RX_TCP_GD_OCTETS 0x00000278
+#define MMC_RX_TCP_ERR_OCTETS 0x0000027c
+#define MMC_RX_ICMP_GD_OCTETS 0x00000280
+#define MMC_RX_ICMP_ERR_OCTETS 0x00000284
+
+void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
+{
+ u32 value = readl(ioaddr + MMC_CNTRL);
+
+ value |= (mode & 0x3F);
+
+ writel(value, ioaddr + MMC_CNTRL);
+
+ pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
+ MMC_CNTRL, value);
+}
+
+/* To mask all all interrupts.*/
+void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
+{
+ writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK);
+}
+
+/* This reads the MAC core counters (if actaully supported).
+ * by default the MMC core is programmed to reset each
+ * counter after a read. So all the field of the mmc struct
+ * have to be incremented.
+ */
+void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc)
+{
+ mmc->mmc_tx_octetcount_gb += readl(ioaddr + MMC_TX_OCTETCOUNT_GB);
+ mmc->mmc_tx_framecount_gb += readl(ioaddr + MMC_TX_FRAMECOUNT_GB);
+ mmc->mmc_tx_broadcastframe_g += readl(ioaddr + MMC_TX_BROADCASTFRAME_G);
+ mmc->mmc_tx_multicastframe_g += readl(ioaddr + MMC_TX_MULTICASTFRAME_G);
+ mmc->mmc_tx_64_octets_gb += readl(ioaddr + MMC_TX_64_OCTETS_GB);
+ mmc->mmc_tx_65_to_127_octets_gb +=
+ readl(ioaddr + MMC_TX_65_TO_127_OCTETS_GB);
+ mmc->mmc_tx_128_to_255_octets_gb +=
+ readl(ioaddr + MMC_TX_128_TO_255_OCTETS_GB);
+ mmc->mmc_tx_256_to_511_octets_gb +=
+ readl(ioaddr + MMC_TX_256_TO_511_OCTETS_GB);
+ mmc->mmc_tx_512_to_1023_octets_gb +=
+ readl(ioaddr + MMC_TX_512_TO_1023_OCTETS_GB);
+ mmc->mmc_tx_1024_to_max_octets_gb +=
+ readl(ioaddr + MMC_TX_1024_TO_MAX_OCTETS_GB);
+ mmc->mmc_tx_unicast_gb += readl(ioaddr + MMC_TX_UNICAST_GB);
+ mmc->mmc_tx_multicast_gb += readl(ioaddr + MMC_TX_MULTICAST_GB);
+ mmc->mmc_tx_broadcast_gb += readl(ioaddr + MMC_TX_BROADCAST_GB);
+ mmc->mmc_tx_underflow_error += readl(ioaddr + MMC_TX_UNDERFLOW_ERROR);
+ mmc->mmc_tx_singlecol_g += readl(ioaddr + MMC_TX_SINGLECOL_G);
+ mmc->mmc_tx_multicol_g += readl(ioaddr + MMC_TX_MULTICOL_G);
+ mmc->mmc_tx_deferred += readl(ioaddr + MMC_TX_DEFERRED);
+ mmc->mmc_tx_latecol += readl(ioaddr + MMC_TX_LATECOL);
+ mmc->mmc_tx_exesscol += readl(ioaddr + MMC_TX_EXESSCOL);
+ mmc->mmc_tx_carrier_error += readl(ioaddr + MMC_TX_CARRIER_ERROR);
+ mmc->mmc_tx_octetcount_g += readl(ioaddr + MMC_TX_OCTETCOUNT_G);
+ mmc->mmc_tx_framecount_g += readl(ioaddr + MMC_TX_FRAMECOUNT_G);
+ mmc->mmc_tx_excessdef += readl(ioaddr + MMC_TX_EXCESSDEF);
+ mmc->mmc_tx_pause_frame += readl(ioaddr + MMC_TX_PAUSE_FRAME);
+ mmc->mmc_tx_vlan_frame_g += readl(ioaddr + MMC_TX_VLAN_FRAME_G);
+
+ /* MMC RX counter registers */
+ mmc->mmc_rx_framecount_gb += readl(ioaddr + MMC_RX_FRAMECOUNT_GB);
+ mmc->mmc_rx_octetcount_gb += readl(ioaddr + MMC_RX_OCTETCOUNT_GB);
+ mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G);
+ mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G);
+ mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G);
+ mmc->mmc_rx_crc_errror += readl(ioaddr + MMC_RX_CRC_ERRROR);
+ mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR);
+ mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR);
+ mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
+ mmc->mmc_rx_undersize_g += readl(ioaddr + MMC_RX_UNDERSIZE_G);
+ mmc->mmc_rx_oversize_g += readl(ioaddr + MMC_RX_OVERSIZE_G);
+ mmc->mmc_rx_64_octets_gb += readl(ioaddr + MMC_RX_64_OCTETS_GB);
+ mmc->mmc_rx_65_to_127_octets_gb +=
+ readl(ioaddr + MMC_RX_65_TO_127_OCTETS_GB);
+ mmc->mmc_rx_128_to_255_octets_gb +=
+ readl(ioaddr + MMC_RX_128_TO_255_OCTETS_GB);
+ mmc->mmc_rx_256_to_511_octets_gb +=
+ readl(ioaddr + MMC_RX_256_TO_511_OCTETS_GB);
+ mmc->mmc_rx_512_to_1023_octets_gb +=
+ readl(ioaddr + MMC_RX_512_TO_1023_OCTETS_GB);
+ mmc->mmc_rx_1024_to_max_octets_gb +=
+ readl(ioaddr + MMC_RX_1024_TO_MAX_OCTETS_GB);
+ mmc->mmc_rx_unicast_g += readl(ioaddr + MMC_RX_UNICAST_G);
+ mmc->mmc_rx_length_error += readl(ioaddr + MMC_RX_LENGTH_ERROR);
+ mmc->mmc_rx_autofrangetype += readl(ioaddr + MMC_RX_AUTOFRANGETYPE);
+ mmc->mmc_rx_pause_frames += readl(ioaddr + MMC_RX_PAUSE_FRAMES);
+ mmc->mmc_rx_fifo_overflow += readl(ioaddr + MMC_RX_FIFO_OVERFLOW);
+ mmc->mmc_rx_vlan_frames_gb += readl(ioaddr + MMC_RX_VLAN_FRAMES_GB);
+ mmc->mmc_rx_watchdog_error += readl(ioaddr + MMC_RX_WATCHDOG_ERROR);
+ /* IPC */
+ mmc->mmc_rx_ipc_intr_mask += readl(ioaddr + MMC_RX_IPC_INTR_MASK);
+ mmc->mmc_rx_ipc_intr += readl(ioaddr + MMC_RX_IPC_INTR);
+ /* IPv4 */
+ mmc->mmc_rx_ipv4_gd += readl(ioaddr + MMC_RX_IPV4_GD);
+ mmc->mmc_rx_ipv4_hderr += readl(ioaddr + MMC_RX_IPV4_HDERR);
+ mmc->mmc_rx_ipv4_nopay += readl(ioaddr + MMC_RX_IPV4_NOPAY);
+ mmc->mmc_rx_ipv4_frag += readl(ioaddr + MMC_RX_IPV4_FRAG);
+ mmc->mmc_rx_ipv4_udsbl += readl(ioaddr + MMC_RX_IPV4_UDSBL);
+
+ mmc->mmc_rx_ipv4_gd_octets += readl(ioaddr + MMC_RX_IPV4_GD_OCTETS);
+ mmc->mmc_rx_ipv4_hderr_octets +=
+ readl(ioaddr + MMC_RX_IPV4_HDERR_OCTETS);
+ mmc->mmc_rx_ipv4_nopay_octets +=
+ readl(ioaddr + MMC_RX_IPV4_NOPAY_OCTETS);
+ mmc->mmc_rx_ipv4_frag_octets += readl(ioaddr + MMC_RX_IPV4_FRAG_OCTETS);
+ mmc->mmc_rx_ipv4_udsbl_octets +=
+ readl(ioaddr + MMC_RX_IPV4_UDSBL_OCTETS);
+
+ /* IPV6 */
+ mmc->mmc_rx_ipv6_gd_octets += readl(ioaddr + MMC_RX_IPV6_GD_OCTETS);
+ mmc->mmc_rx_ipv6_hderr_octets +=
+ readl(ioaddr + MMC_RX_IPV6_HDERR_OCTETS);
+ mmc->mmc_rx_ipv6_nopay_octets +=
+ readl(ioaddr + MMC_RX_IPV6_NOPAY_OCTETS);
+
+ mmc->mmc_rx_ipv6_gd += readl(ioaddr + MMC_RX_IPV6_GD);
+ mmc->mmc_rx_ipv6_hderr += readl(ioaddr + MMC_RX_IPV6_HDERR);
+ mmc->mmc_rx_ipv6_nopay += readl(ioaddr + MMC_RX_IPV6_NOPAY);
+
+ /* Protocols */
+ mmc->mmc_rx_udp_gd += readl(ioaddr + MMC_RX_UDP_GD);
+ mmc->mmc_rx_udp_err += readl(ioaddr + MMC_RX_UDP_ERR);
+ mmc->mmc_rx_tcp_gd += readl(ioaddr + MMC_RX_TCP_GD);
+ mmc->mmc_rx_tcp_err += readl(ioaddr + MMC_RX_TCP_ERR);
+ mmc->mmc_rx_icmp_gd += readl(ioaddr + MMC_RX_ICMP_GD);
+ mmc->mmc_rx_icmp_err += readl(ioaddr + MMC_RX_ICMP_ERR);
+
+ mmc->mmc_rx_udp_gd_octets += readl(ioaddr + MMC_RX_UDP_GD_OCTETS);
+ mmc->mmc_rx_udp_err_octets += readl(ioaddr + MMC_RX_UDP_ERR_OCTETS);
+ mmc->mmc_rx_tcp_gd_octets += readl(ioaddr + MMC_RX_TCP_GD_OCTETS);
+ mmc->mmc_rx_tcp_err_octets += readl(ioaddr + MMC_RX_TCP_ERR_OCTETS);
+ mmc->mmc_rx_icmp_gd_octets += readl(ioaddr + MMC_RX_ICMP_GD_OCTETS);
+ mmc->mmc_rx_icmp_err_octets += readl(ioaddr + MMC_RX_ICMP_ERR_OCTETS);
+}
--- /dev/null
+/*******************************************************************************
+ This contains the functions to handle the normal descriptors.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/stmmac.h>
+#include "common.h"
+#include "descs_com.h"
+
+static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, void __iomem *ioaddr)
+{
+ int ret = 0;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.tx.error_summary)) {
+ if (unlikely(p->des01.tx.underflow_error)) {
+ x->tx_underflow++;
+ stats->tx_fifo_errors++;
+ }
+ if (unlikely(p->des01.tx.no_carrier)) {
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(p->des01.tx.loss_carrier)) {
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely((p->des01.tx.excessive_deferral) ||
+ (p->des01.tx.excessive_collisions) ||
+ (p->des01.tx.late_collision)))
+ stats->collisions += p->des01.tx.collision_count;
+ ret = -1;
+ }
+
+ if (p->des01.etx.vlan_frame) {
+ CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
+ x->tx_vlan++;
+ }
+
+ if (unlikely(p->des01.tx.deferred))
+ x->tx_deferred++;
+
+ return ret;
+}
+
+static int ndesc_get_tx_len(struct dma_desc *p)
+{
+ return p->des01.tx.buffer1_size;
+}
+
+/* This function verifies if each incoming frame has some errors
+ * and, if required, updates the multicast statistics.
+ * In case of success, it returns good_frame because the GMAC device
+ * is supposed to be able to compute the csum in HW. */
+static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ int ret = good_frame;
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(p->des01.rx.last_descriptor == 0)) {
+ pr_warn("%s: Oversized frame spanned multiple buffers\n",
+ __func__);
+ stats->rx_length_errors++;
+ return discard_frame;
+ }
+
+ if (unlikely(p->des01.rx.error_summary)) {
+ if (unlikely(p->des01.rx.descriptor_error))
+ x->rx_desc++;
+ if (unlikely(p->des01.rx.sa_filter_fail))
+ x->sa_filter_fail++;
+ if (unlikely(p->des01.rx.overflow_error))
+ x->overflow_error++;
+ if (unlikely(p->des01.rx.ipc_csum_error))
+ x->ipc_csum_error++;
+ if (unlikely(p->des01.rx.collision)) {
+ x->rx_collision++;
+ stats->collisions++;
+ }
+ if (unlikely(p->des01.rx.crc_error)) {
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.rx.dribbling))
+ x->dribbling_bit++;
+
+ if (unlikely(p->des01.rx.length_error)) {
+ x->rx_length++;
+ ret = discard_frame;
+ }
+ if (unlikely(p->des01.rx.mii_error)) {
+ x->rx_mii++;
+ ret = discard_frame;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (p->des01.rx.vlan_tag)
+ x->vlan_tag++;
+#endif
+ return ret;
+}
+
+static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
+ int end)
+{
+ p->des01.rx.own = 1;
+ p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
+
+ if (mode == STMMAC_CHAIN_MODE)
+ ndesc_rx_set_on_chain(p, end);
+ else
+ ndesc_rx_set_on_ring(p, end);
+
+ if (disable_rx_ic)
+ p->des01.rx.disable_ic = 1;
+}
+
+static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
+{
+ p->des01.tx.own = 0;
+ if (mode == STMMAC_CHAIN_MODE)
+ ndesc_tx_set_on_chain(p, end);
+ else
+ ndesc_tx_set_on_ring(p, end);
+}
+
+static int ndesc_get_tx_owner(struct dma_desc *p)
+{
+ return p->des01.tx.own;
+}
+
+static int ndesc_get_rx_owner(struct dma_desc *p)
+{
+ return p->des01.rx.own;
+}
+
+static void ndesc_set_tx_owner(struct dma_desc *p)
+{
+ p->des01.tx.own = 1;
+}
+
+static void ndesc_set_rx_owner(struct dma_desc *p)
+{
+ p->des01.rx.own = 1;
+}
+
+static int ndesc_get_tx_ls(struct dma_desc *p)
+{
+ return p->des01.tx.last_segment;
+}
+
+static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
+{
+ int ter = p->des01.tx.end_ring;
+
+ memset(p, 0, offsetof(struct dma_desc, des2));
+ if (mode == STMMAC_CHAIN_MODE)
+ ndesc_end_tx_desc_on_chain(p, ter);
+ else
+ ndesc_end_tx_desc_on_ring(p, ter);
+}
+
+static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ int csum_flag, int mode)
+{
+ p->des01.tx.first_segment = is_fs;
+ if (mode == STMMAC_CHAIN_MODE)
+ norm_set_tx_desc_len_on_chain(p, len);
+ else
+ norm_set_tx_desc_len_on_ring(p, len);
+
+ if (likely(csum_flag))
+ p->des01.tx.checksum_insertion = cic_full;
+}
+
+static void ndesc_clear_tx_ic(struct dma_desc *p)
+{
+ p->des01.tx.interrupt = 0;
+}
+
+static void ndesc_close_tx_desc(struct dma_desc *p)
+{
+ p->des01.tx.last_segment = 1;
+ p->des01.tx.interrupt = 1;
+}
+
+static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
+{
+ /* The type-1 checksum offload engines append the checksum at
+ * the end of frame and the two bytes of checksum are added in
+ * the length.
+ * Adjust for that in the framelen for type-1 checksum offload
+ * engines. */
+ if (rx_coe_type == STMMAC_RX_COE_TYPE1)
+ return p->des01.rx.frame_length - 2;
+ else
+ return p->des01.rx.frame_length;
+}
+
+static void ndesc_enable_tx_timestamp(struct dma_desc *p)
+{
+ p->des01.tx.time_stamp_enable = 1;
+}
+
+static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
+{
+ return p->des01.tx.time_stamp_status;
+}
+
+static u64 ndesc_get_timestamp(void *desc, u32 ats)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ u64 ns;
+
+ ns = p->des2;
+ /* convert high/sec time stamp value to nanosecond */
+ ns += p->des3 * 1000000000ULL;
+
+ return ns;
+}
+
+static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+
+ if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
+ /* timestamp is corrupted, hence don't store it */
+ return 0;
+ else
+ return 1;
+}
+
+const struct stmmac_desc_ops ndesc_ops = {
+ .tx_status = ndesc_get_tx_status,
+ .rx_status = ndesc_get_rx_status,
+ .get_tx_len = ndesc_get_tx_len,
+ .init_rx_desc = ndesc_init_rx_desc,
+ .init_tx_desc = ndesc_init_tx_desc,
+ .get_tx_owner = ndesc_get_tx_owner,
+ .get_rx_owner = ndesc_get_rx_owner,
+ .release_tx_desc = ndesc_release_tx_desc,
+ .prepare_tx_desc = ndesc_prepare_tx_desc,
+ .clear_tx_ic = ndesc_clear_tx_ic,
+ .close_tx_desc = ndesc_close_tx_desc,
+ .get_tx_ls = ndesc_get_tx_ls,
+ .set_tx_owner = ndesc_set_tx_owner,
+ .set_rx_owner = ndesc_set_rx_owner,
+ .get_rx_frame_len = ndesc_get_rx_frame_len,
+ .enable_tx_timestamp = ndesc_enable_tx_timestamp,
+ .get_tx_timestamp_status = ndesc_get_tx_timestamp_status,
+ .get_timestamp = ndesc_get_timestamp,
+ .get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
+};
--- /dev/null
+/*******************************************************************************
+ Specialised functions for managing Ring mode
+
+ Copyright(C) 2011 STMicroelectronics Ltd
+
+ It defines all the functions used to handle the normal/enhanced
+ descriptors in case of the DMA is configured to work in chained or
+ in ring mode.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "stmmac.h"
+
+static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)p;
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int entry = priv->cur_tx % txsize;
+ struct dma_desc *desc = priv->dma_tx + entry;
+ unsigned int nopaged_len = skb_headlen(skb);
+ unsigned int bmax, len;
+
+ if (priv->plat->enh_desc)
+ bmax = BUF_SIZE_8KiB;
+ else
+ bmax = BUF_SIZE_2KiB;
+
+ len = nopaged_len - bmax;
+
+ if (nopaged_len > BUF_SIZE_8KiB) {
+
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ bmax, DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = desc->des2;
+ desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
+ STMMAC_RING_MODE);
+ wmb();
+ entry = (++priv->cur_tx) % txsize;
+ desc = priv->dma_tx + entry;
+
+ desc->des2 = dma_map_single(priv->device, skb->data + bmax,
+ len, DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = desc->des2;
+ desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
+ STMMAC_RING_MODE);
+ wmb();
+ priv->hw->desc->set_tx_owner(desc);
+ priv->tx_skbuff[entry] = NULL;
+ } else {
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = desc->des2;
+ desc->des3 = desc->des2 + BUF_SIZE_4KiB;
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
+ STMMAC_RING_MODE);
+ }
+
+ return entry;
+}
+
+static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
+{
+ unsigned int ret = 0;
+
+ if (len >= BUF_SIZE_4KiB)
+ ret = 1;
+
+ return ret;
+}
+
+static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+
+ if (unlikely(priv->plat->has_gmac))
+ /* Fill DES3 in case of RING mode */
+ if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
+ p->des3 = p->des2 + BUF_SIZE_8KiB;
+}
+
+/* In ring mode we need to fill the desc3 because it is used as buffer */
+static void stmmac_init_desc3(struct dma_desc *p)
+{
+ p->des3 = p->des2 + BUF_SIZE_8KiB;
+}
+
+static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
+{
+ if (unlikely(p->des3))
+ p->des3 = 0;
+}
+
+static int stmmac_set_16kib_bfsize(int mtu)
+{
+ int ret = 0;
+ if (unlikely(mtu >= BUF_SIZE_8KiB))
+ ret = BUF_SIZE_16KiB;
+ return ret;
+}
+
+const struct stmmac_ring_mode_ops ring_mode_ops = {
+ .is_jumbo_frm = stmmac_is_jumbo_frm,
+ .jumbo_frm = stmmac_jumbo_frm,
+ .refill_desc3 = stmmac_refill_desc3,
+ .init_desc3 = stmmac_init_desc3,
+ .clean_desc3 = stmmac_clean_desc3,
+ .set_16kib_bfsize = stmmac_set_16kib_bfsize,
+};
--- /dev/null
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __STMMAC_H__
+#define __STMMAC_H__
+
+#define STMMAC_RESOURCE_NAME "stmmaceth"
+#define DRV_MODULE_VERSION "March_2013"
+
+#include <linux/clk.h>
+#include <linux/stmmac.h>
+#include <linux/phy.h>
+#include <linux/pci.h>
+#include "common.h"
+#include <linux/ptp_clock_kernel.h>
+
+struct stmmac_priv {
+ /* Frequently used values are kept adjacent for cache effect */
+ struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
+ struct dma_desc *dma_tx;
+ struct sk_buff **tx_skbuff;
+ unsigned int cur_tx;
+ unsigned int dirty_tx;
+ unsigned int dma_tx_size;
+ u32 tx_count_frames;
+ u32 tx_coal_frames;
+ u32 tx_coal_timer;
+ dma_addr_t *tx_skbuff_dma;
+ dma_addr_t dma_tx_phy;
+ int tx_coalesce;
+ int hwts_tx_en;
+ spinlock_t tx_lock;
+ bool tx_path_in_lpi_mode;
+ struct timer_list txtimer;
+
+ struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
+ struct dma_extended_desc *dma_erx;
+ struct sk_buff **rx_skbuff;
+ unsigned int cur_rx;
+ unsigned int dirty_rx;
+ unsigned int dma_rx_size;
+ unsigned int dma_buf_sz;
+ u32 rx_riwt;
+ int hwts_rx_en;
+ dma_addr_t *rx_skbuff_dma;
+ dma_addr_t dma_rx_phy;
+
+ struct napi_struct napi ____cacheline_aligned_in_smp;
+
+ void __iomem *ioaddr;
+ struct net_device *dev;
+ struct device *device;
+ struct mac_device_info *hw;
+ int no_csum_insertion;
+ spinlock_t lock;
+
+ struct phy_device *phydev ____cacheline_aligned_in_smp;
+ int oldlink;
+ int speed;
+ int oldduplex;
+ unsigned int flow_ctrl;
+ unsigned int pause;
+ struct mii_bus *mii;
+ int mii_irq[PHY_MAX_ADDR];
+
+ struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp;
+ struct plat_stmmacenet_data *plat;
+ struct dma_features dma_cap;
+ struct stmmac_counters mmc;
+ int hw_cap_support;
+ int synopsys_id;
+ u32 msg_enable;
+ int wolopts;
+ int wol_irq;
+ struct clk *stmmac_clk;
+ int clk_csr;
+ struct timer_list eee_ctrl_timer;
+ int lpi_irq;
+ int eee_enabled;
+ int eee_active;
+ int tx_lpi_timer;
+ int pcs;
+ unsigned int mode;
+ int extend_desc;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_ops;
+ unsigned int default_addend;
+ u32 adv_ts;
+ int use_riwt;
+ spinlock_t ptp_lock;
+};
+
+extern int phyaddr;
+
+extern int stmmac_mdio_unregister(struct net_device *ndev);
+extern int stmmac_mdio_register(struct net_device *ndev);
+extern void stmmac_set_ethtool_ops(struct net_device *netdev);
+extern const struct stmmac_desc_ops enh_desc_ops;
+extern const struct stmmac_desc_ops ndesc_ops;
+extern const struct stmmac_hwtimestamp stmmac_ptp;
+extern int stmmac_ptp_register(struct stmmac_priv *priv);
+extern void stmmac_ptp_unregister(struct stmmac_priv *priv);
+int stmmac_freeze(struct net_device *ndev);
+int stmmac_restore(struct net_device *ndev);
+int stmmac_resume(struct net_device *ndev);
+int stmmac_suspend(struct net_device *ndev);
+int stmmac_dvr_remove(struct net_device *ndev);
+struct stmmac_priv *stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ void __iomem *addr);
+void stmmac_disable_eee_mode(struct stmmac_priv *priv);
+bool stmmac_eee_init(struct stmmac_priv *priv);
+
+extern struct platform_driver stmmac_pltfr_driver;
+static inline int stmmac_register_platform(void)
+{
+ int err;
+
+ err = platform_driver_register(&stmmac_pltfr_driver);
+ if (err)
+ pr_err("stmmac: failed to register the platform driver\n");
+
+ return err;
+}
+
+static inline void stmmac_unregister_platform(void)
+{
+ platform_driver_unregister(&stmmac_pltfr_driver);
+}
+
+#ifdef CONFIG_STMMAC_PCI
+extern struct pci_driver stmmac_pci_driver;
+static inline int stmmac_register_pci(void)
+{
+ int err;
+
+ err = pci_register_driver(&stmmac_pci_driver);
+ if (err)
+ pr_err("stmmac: failed to register the PCI driver\n");
+
+ return err;
+}
+
+static inline void stmmac_unregister_pci(void)
+{
+ pci_unregister_driver(&stmmac_pci_driver);
+}
+#else
+static inline int stmmac_register_pci(void)
+{
+ pr_debug("stmmac: do not register the PCI driver\n");
+
+ return 0;
+}
+
+static inline void stmmac_unregister_pci(void)
+{
+}
+#endif /* CONFIG_STMMAC_PCI */
+
+#endif /* __STMMAC_H__ */
--- /dev/null
+/*******************************************************************************
+ STMMAC Ethtool support
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/net_tstamp.h>
+#include <asm/io.h>
+
+#include "stmmac.h"
+#include "dwmac_dma.h"
+
+#define REG_SPACE_SIZE 0x1054
+#define MAC100_ETHTOOL_NAME "st_mac100"
+#define GMAC_ETHTOOL_NAME "st_gmac"
+
+struct stmmac_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define STMMAC_STAT(m) \
+ { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \
+ offsetof(struct stmmac_priv, xstats.m)}
+
+static const struct stmmac_stats stmmac_gstrings_stats[] = {
+ /* Transmit errors */
+ STMMAC_STAT(tx_underflow),
+ STMMAC_STAT(tx_carrier),
+ STMMAC_STAT(tx_losscarrier),
+ STMMAC_STAT(vlan_tag),
+ STMMAC_STAT(tx_deferred),
+ STMMAC_STAT(tx_vlan),
+ STMMAC_STAT(tx_jabber),
+ STMMAC_STAT(tx_frame_flushed),
+ STMMAC_STAT(tx_payload_error),
+ STMMAC_STAT(tx_ip_header_error),
+ /* Receive errors */
+ STMMAC_STAT(rx_desc),
+ STMMAC_STAT(sa_filter_fail),
+ STMMAC_STAT(overflow_error),
+ STMMAC_STAT(ipc_csum_error),
+ STMMAC_STAT(rx_collision),
+ STMMAC_STAT(rx_crc),
+ STMMAC_STAT(dribbling_bit),
+ STMMAC_STAT(rx_length),
+ STMMAC_STAT(rx_mii),
+ STMMAC_STAT(rx_multicast),
+ STMMAC_STAT(rx_gmac_overflow),
+ STMMAC_STAT(rx_watchdog),
+ STMMAC_STAT(da_rx_filter_fail),
+ STMMAC_STAT(sa_rx_filter_fail),
+ STMMAC_STAT(rx_missed_cntr),
+ STMMAC_STAT(rx_overflow_cntr),
+ STMMAC_STAT(rx_vlan),
+ /* Tx/Rx IRQ error info */
+ STMMAC_STAT(tx_undeflow_irq),
+ STMMAC_STAT(tx_process_stopped_irq),
+ STMMAC_STAT(tx_jabber_irq),
+ STMMAC_STAT(rx_overflow_irq),
+ STMMAC_STAT(rx_buf_unav_irq),
+ STMMAC_STAT(rx_process_stopped_irq),
+ STMMAC_STAT(rx_watchdog_irq),
+ STMMAC_STAT(tx_early_irq),
+ STMMAC_STAT(fatal_bus_error_irq),
+ /* Tx/Rx IRQ Events */
+ STMMAC_STAT(rx_early_irq),
+ STMMAC_STAT(threshold),
+ STMMAC_STAT(tx_pkt_n),
+ STMMAC_STAT(rx_pkt_n),
+ STMMAC_STAT(normal_irq_n),
+ STMMAC_STAT(rx_normal_irq_n),
+ STMMAC_STAT(napi_poll),
+ STMMAC_STAT(tx_normal_irq_n),
+ STMMAC_STAT(tx_clean),
+ STMMAC_STAT(tx_reset_ic_bit),
+ STMMAC_STAT(irq_receive_pmt_irq_n),
+ /* MMC info */
+ STMMAC_STAT(mmc_tx_irq_n),
+ STMMAC_STAT(mmc_rx_irq_n),
+ STMMAC_STAT(mmc_rx_csum_offload_irq_n),
+ /* EEE */
+ STMMAC_STAT(irq_tx_path_in_lpi_mode_n),
+ STMMAC_STAT(irq_tx_path_exit_lpi_mode_n),
+ STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
+ STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
+ STMMAC_STAT(phy_eee_wakeup_error_n),
+ /* Extended RDES status */
+ STMMAC_STAT(ip_hdr_err),
+ STMMAC_STAT(ip_payload_err),
+ STMMAC_STAT(ip_csum_bypassed),
+ STMMAC_STAT(ipv4_pkt_rcvd),
+ STMMAC_STAT(ipv6_pkt_rcvd),
+ STMMAC_STAT(rx_msg_type_ext_no_ptp),
+ STMMAC_STAT(rx_msg_type_sync),
+ STMMAC_STAT(rx_msg_type_follow_up),
+ STMMAC_STAT(rx_msg_type_delay_req),
+ STMMAC_STAT(rx_msg_type_delay_resp),
+ STMMAC_STAT(rx_msg_type_pdelay_req),
+ STMMAC_STAT(rx_msg_type_pdelay_resp),
+ STMMAC_STAT(rx_msg_type_pdelay_follow_up),
+ STMMAC_STAT(ptp_frame_type),
+ STMMAC_STAT(ptp_ver),
+ STMMAC_STAT(timestamp_dropped),
+ STMMAC_STAT(av_pkt_rcvd),
+ STMMAC_STAT(av_tagged_pkt_rcvd),
+ STMMAC_STAT(vlan_tag_priority_val),
+ STMMAC_STAT(l3_filter_match),
+ STMMAC_STAT(l4_filter_match),
+ STMMAC_STAT(l3_l4_filter_no_match),
+ /* PCS */
+ STMMAC_STAT(irq_pcs_ane_n),
+ STMMAC_STAT(irq_pcs_link_n),
+ STMMAC_STAT(irq_rgmii_n),
+};
+#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
+
+/* HW MAC Management counters (if supported) */
+#define STMMAC_MMC_STAT(m) \
+ { #m, FIELD_SIZEOF(struct stmmac_counters, m), \
+ offsetof(struct stmmac_priv, mmc.m)}
+
+static const struct stmmac_stats stmmac_mmc[] = {
+ STMMAC_MMC_STAT(mmc_tx_octetcount_gb),
+ STMMAC_MMC_STAT(mmc_tx_framecount_gb),
+ STMMAC_MMC_STAT(mmc_tx_broadcastframe_g),
+ STMMAC_MMC_STAT(mmc_tx_multicastframe_g),
+ STMMAC_MMC_STAT(mmc_tx_64_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_65_to_127_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_128_to_255_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_256_to_511_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_512_to_1023_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_1024_to_max_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_unicast_gb),
+ STMMAC_MMC_STAT(mmc_tx_multicast_gb),
+ STMMAC_MMC_STAT(mmc_tx_broadcast_gb),
+ STMMAC_MMC_STAT(mmc_tx_underflow_error),
+ STMMAC_MMC_STAT(mmc_tx_singlecol_g),
+ STMMAC_MMC_STAT(mmc_tx_multicol_g),
+ STMMAC_MMC_STAT(mmc_tx_deferred),
+ STMMAC_MMC_STAT(mmc_tx_latecol),
+ STMMAC_MMC_STAT(mmc_tx_exesscol),
+ STMMAC_MMC_STAT(mmc_tx_carrier_error),
+ STMMAC_MMC_STAT(mmc_tx_octetcount_g),
+ STMMAC_MMC_STAT(mmc_tx_framecount_g),
+ STMMAC_MMC_STAT(mmc_tx_excessdef),
+ STMMAC_MMC_STAT(mmc_tx_pause_frame),
+ STMMAC_MMC_STAT(mmc_tx_vlan_frame_g),
+ STMMAC_MMC_STAT(mmc_rx_framecount_gb),
+ STMMAC_MMC_STAT(mmc_rx_octetcount_gb),
+ STMMAC_MMC_STAT(mmc_rx_octetcount_g),
+ STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
+ STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
+ STMMAC_MMC_STAT(mmc_rx_crc_errror),
+ STMMAC_MMC_STAT(mmc_rx_align_error),
+ STMMAC_MMC_STAT(mmc_rx_run_error),
+ STMMAC_MMC_STAT(mmc_rx_jabber_error),
+ STMMAC_MMC_STAT(mmc_rx_undersize_g),
+ STMMAC_MMC_STAT(mmc_rx_oversize_g),
+ STMMAC_MMC_STAT(mmc_rx_64_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_65_to_127_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_128_to_255_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_256_to_511_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_512_to_1023_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_1024_to_max_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_unicast_g),
+ STMMAC_MMC_STAT(mmc_rx_length_error),
+ STMMAC_MMC_STAT(mmc_rx_autofrangetype),
+ STMMAC_MMC_STAT(mmc_rx_pause_frames),
+ STMMAC_MMC_STAT(mmc_rx_fifo_overflow),
+ STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb),
+ STMMAC_MMC_STAT(mmc_rx_watchdog_error),
+ STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask),
+ STMMAC_MMC_STAT(mmc_rx_ipc_intr),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_gd),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_hderr),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_nopay),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_frag),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_hderr_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_nopay_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_frag_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_hderr_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_nopay_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_gd),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_hderr),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_nopay),
+ STMMAC_MMC_STAT(mmc_rx_udp_gd),
+ STMMAC_MMC_STAT(mmc_rx_udp_err),
+ STMMAC_MMC_STAT(mmc_rx_tcp_gd),
+ STMMAC_MMC_STAT(mmc_rx_tcp_err),
+ STMMAC_MMC_STAT(mmc_rx_icmp_gd),
+ STMMAC_MMC_STAT(mmc_rx_icmp_err),
+ STMMAC_MMC_STAT(mmc_rx_udp_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_udp_err_octets),
+ STMMAC_MMC_STAT(mmc_rx_tcp_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
+ STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
+};
+#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc)
+
+static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (priv->plat->has_gmac)
+ strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
+ else
+ strlcpy(info->driver, MAC100_ETHTOOL_NAME,
+ sizeof(info->driver));
+
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+}
+
+static int stmmac_ethtool_getsettings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = priv->phydev;
+ int rc;
+
+ if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) {
+ struct rgmii_adv adv;
+
+ if (!priv->xstats.pcs_link) {
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
+ return 0;
+ }
+ cmd->duplex = priv->xstats.pcs_duplex;
+
+ ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed);
+
+ /* Get and convert ADV/LP_ADV from the HW AN registers */
+ if (priv->hw->mac->get_adv)
+ priv->hw->mac->get_adv(priv->ioaddr, &adv);
+ else
+ return -EOPNOTSUPP; /* should never happen indeed */
+
+ /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
+
+ if (adv.pause & STMMAC_PCS_PAUSE)
+ cmd->advertising |= ADVERTISED_Pause;
+ if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
+ cmd->advertising |= ADVERTISED_Asym_Pause;
+ if (adv.lp_pause & STMMAC_PCS_PAUSE)
+ cmd->lp_advertising |= ADVERTISED_Pause;
+ if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
+ cmd->lp_advertising |= ADVERTISED_Asym_Pause;
+
+ /* Reg49[3] always set because ANE is always supported */
+ cmd->autoneg = ADVERTISED_Autoneg;
+ cmd->supported |= SUPPORTED_Autoneg;
+ cmd->advertising |= ADVERTISED_Autoneg;
+ cmd->lp_advertising |= ADVERTISED_Autoneg;
+
+ if (adv.duplex) {
+ cmd->supported |= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Full);
+ cmd->advertising |= (ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Full);
+ } else {
+ cmd->supported |= (SUPPORTED_1000baseT_Half |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_10baseT_Half);
+ cmd->advertising |= (ADVERTISED_1000baseT_Half |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Half);
+ }
+ if (adv.lp_duplex)
+ cmd->lp_advertising |= (ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Full);
+ else
+ cmd->lp_advertising |= (ADVERTISED_1000baseT_Half |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Half);
+ cmd->port = PORT_OTHER;
+
+ return 0;
+ }
+
+ if (phy == NULL) {
+ pr_err("%s: %s: PHY is not registered\n",
+ __func__, dev->name);
+ return -ENODEV;
+ }
+ if (!netif_running(dev)) {
+ pr_err("%s: interface is disabled: we cannot track "
+ "link speed / duplex setting\n", dev->name);
+ return -EBUSY;
+ }
+ cmd->transceiver = XCVR_INTERNAL;
+ spin_lock_irq(&priv->lock);
+ rc = phy_ethtool_gset(phy, cmd);
+ spin_unlock_irq(&priv->lock);
+ return rc;
+}
+
+static int stmmac_ethtool_setsettings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = priv->phydev;
+ int rc;
+
+ if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) {
+ u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
+
+ /* Only support ANE */
+ if (cmd->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ mask &= (ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full);
+
+ spin_lock(&priv->lock);
+ if (priv->hw->mac->ctrl_ane)
+ priv->hw->mac->ctrl_ane(priv->ioaddr, 1);
+ spin_unlock(&priv->lock);
+ }
+
+ return 0;
+ }
+
+ spin_lock(&priv->lock);
+ rc = phy_ethtool_sset(phy, cmd);
+ spin_unlock(&priv->lock);
+
+ return rc;
+}
+
+static u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ priv->msg_enable = level;
+
+}
+
+static int stmmac_check_if_running(struct net_device *dev)
+{
+ if (!netif_running(dev))
+ return -EBUSY;
+ return 0;
+}
+
+static int stmmac_ethtool_get_regs_len(struct net_device *dev)
+{
+ return REG_SPACE_SIZE;
+}
+
+static void stmmac_ethtool_gregs(struct net_device *dev,
+ struct ethtool_regs *regs, void *space)
+{
+ int i;
+ u32 *reg_space = (u32 *) space;
+
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ memset(reg_space, 0x0, REG_SPACE_SIZE);
+
+ if (!priv->plat->has_gmac) {
+ /* MAC registers */
+ for (i = 0; i < 12; i++)
+ reg_space[i] = readl(priv->ioaddr + (i * 4));
+ /* DMA registers */
+ for (i = 0; i < 9; i++)
+ reg_space[i + 12] =
+ readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
+ reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR);
+ reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR);
+ } else {
+ /* MAC registers */
+ for (i = 0; i < 55; i++)
+ reg_space[i] = readl(priv->ioaddr + (i * 4));
+ /* DMA registers */
+ for (i = 0; i < 22; i++)
+ reg_space[i + 55] =
+ readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
+ }
+}
+
+static void
+stmmac_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+
+ if (priv->pcs) /* FIXME */
+ return;
+
+ spin_lock(&priv->lock);
+
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+ pause->autoneg = priv->phydev->autoneg;
+
+ if (priv->flow_ctrl & FLOW_RX)
+ pause->rx_pause = 1;
+ if (priv->flow_ctrl & FLOW_TX)
+ pause->tx_pause = 1;
+
+ spin_unlock(&priv->lock);
+}
+
+static int
+stmmac_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+ struct phy_device *phy = priv->phydev;
+ int new_pause = FLOW_OFF;
+ int ret = 0;
+
+ if (priv->pcs) /* FIXME */
+ return -EOPNOTSUPP;
+
+ spin_lock(&priv->lock);
+
+ if (pause->rx_pause)
+ new_pause |= FLOW_RX;
+ if (pause->tx_pause)
+ new_pause |= FLOW_TX;
+
+ priv->flow_ctrl = new_pause;
+ phy->autoneg = pause->autoneg;
+
+ if (phy->autoneg) {
+ if (netif_running(netdev))
+ ret = phy_start_aneg(phy);
+ } else
+ priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
+ priv->flow_ctrl, priv->pause);
+ spin_unlock(&priv->lock);
+ return ret;
+}
+
+static void stmmac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *dummy, u64 *data)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int i, j = 0;
+
+ /* Update the DMA HW counters for dwmac10/100 */
+ if (!priv->plat->has_gmac)
+ priv->hw->dma->dma_diagnostic_fr(&dev->stats,
+ (void *) &priv->xstats,
+ priv->ioaddr);
+ else {
+ /* If supported, for new GMAC chips expose the MMC counters */
+ if (priv->dma_cap.rmon) {
+ dwmac_mmc_read(priv->ioaddr, &priv->mmc);
+
+ for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
+ char *p;
+ p = (char *)priv + stmmac_mmc[i].stat_offset;
+
+ data[j++] = (stmmac_mmc[i].sizeof_stat ==
+ sizeof(u64)) ? (*(u64 *)p) :
+ (*(u32 *)p);
+ }
+ }
+ if (priv->eee_enabled) {
+ int val = phy_get_eee_err(priv->phydev);
+ if (val)
+ priv->xstats.phy_eee_wakeup_error_n = val;
+ }
+ }
+ for (i = 0; i < STMMAC_STATS_LEN; i++) {
+ char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
+ data[j++] = (stmmac_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
+ }
+}
+
+static int stmmac_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+ int len;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ len = STMMAC_STATS_LEN;
+
+ if (priv->dma_cap.rmon)
+ len += STMMAC_MMC_STATS_LEN;
+
+ return len;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+ u8 *p = data;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ if (priv->dma_cap.rmon)
+ for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
+ memcpy(p, stmmac_mmc[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < STMMAC_STATS_LEN; i++) {
+ memcpy(p, stmmac_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+/* Currently only support WOL through Magic packet. */
+static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ spin_lock_irq(&priv->lock);
+ if (device_can_wakeup(priv->device)) {
+ wol->supported = WAKE_MAGIC | WAKE_UCAST;
+ wol->wolopts = priv->wolopts;
+ }
+ spin_unlock_irq(&priv->lock);
+}
+
+static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 support = WAKE_MAGIC | WAKE_UCAST;
+
+ /* By default almost all GMAC devices support the WoL via
+ * magic frame but we can disable it if the HW capability
+ * register shows no support for pmt_magic_frame. */
+ if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame))
+ wol->wolopts &= ~WAKE_MAGIC;
+
+ if (!device_can_wakeup(priv->device))
+ return -EINVAL;
+
+ if (wol->wolopts & ~support)
+ return -EINVAL;
+
+ if (wol->wolopts) {
+ pr_info("stmmac: wakeup enable\n");
+ device_set_wakeup_enable(priv->device, 1);
+ enable_irq_wake(priv->wol_irq);
+ } else {
+ device_set_wakeup_enable(priv->device, 0);
+ disable_irq_wake(priv->wol_irq);
+ }
+
+ spin_lock_irq(&priv->lock);
+ priv->wolopts = wol->wolopts;
+ spin_unlock_irq(&priv->lock);
+
+ return 0;
+}
+
+static int stmmac_ethtool_op_get_eee(struct net_device *dev,
+ struct ethtool_eee *edata)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!priv->dma_cap.eee)
+ return -EOPNOTSUPP;
+
+ edata->eee_enabled = priv->eee_enabled;
+ edata->eee_active = priv->eee_active;
+ edata->tx_lpi_timer = priv->tx_lpi_timer;
+
+ return phy_ethtool_get_eee(priv->phydev, edata);
+}
+
+static int stmmac_ethtool_op_set_eee(struct net_device *dev,
+ struct ethtool_eee *edata)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ priv->eee_enabled = edata->eee_enabled;
+
+ if (!priv->eee_enabled)
+ stmmac_disable_eee_mode(priv);
+ else {
+ /* We are asking for enabling the EEE but it is safe
+ * to verify all by invoking the eee_init function.
+ * In case of failure it will return an error.
+ */
+ priv->eee_enabled = stmmac_eee_init(priv);
+ if (!priv->eee_enabled)
+ return -EOPNOTSUPP;
+
+ /* Do not change tx_lpi_timer in case of failure */
+ priv->tx_lpi_timer = edata->tx_lpi_timer;
+ }
+
+ return phy_ethtool_set_eee(priv->phydev, edata);
+}
+
+static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
+{
+ unsigned long clk = clk_get_rate(priv->stmmac_clk);
+
+ if (!clk)
+ return 0;
+
+ return (usec * (clk / 1000000)) / 256;
+}
+
+static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
+{
+ unsigned long clk = clk_get_rate(priv->stmmac_clk);
+
+ if (!clk)
+ return 0;
+
+ return (riwt * 256) / (clk / 1000000);
+}
+
+static int stmmac_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ ec->tx_coalesce_usecs = priv->tx_coal_timer;
+ ec->tx_max_coalesced_frames = priv->tx_coal_frames;
+
+ if (priv->use_riwt)
+ ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt, priv);
+
+ return 0;
+}
+
+static int stmmac_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned int rx_riwt;
+
+ /* Check not supported parameters */
+ if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
+ (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
+ (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
+ (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
+ (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
+ (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
+ (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
+ (ec->rx_max_coalesced_frames_high) ||
+ (ec->tx_max_coalesced_frames_irq) ||
+ (ec->stats_block_coalesce_usecs) ||
+ (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
+ return -EOPNOTSUPP;
+
+ if (ec->rx_coalesce_usecs == 0)
+ return -EINVAL;
+
+ if ((ec->tx_coalesce_usecs == 0) &&
+ (ec->tx_max_coalesced_frames == 0))
+ return -EINVAL;
+
+ if ((ec->tx_coalesce_usecs > STMMAC_COAL_TX_TIMER) ||
+ (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES))
+ return -EINVAL;
+
+ rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv);
+
+ if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT))
+ return -EINVAL;
+ else if (!priv->use_riwt)
+ return -EOPNOTSUPP;
+
+ /* Only copy relevant parameters, ignore all others. */
+ priv->tx_coal_frames = ec->tx_max_coalesced_frames;
+ priv->tx_coal_timer = ec->tx_coalesce_usecs;
+ priv->rx_riwt = rx_riwt;
+ priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt);
+
+ return 0;
+}
+
+static int stmmac_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if ((priv->hwts_tx_en) && (priv->hwts_rx_en)) {
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (priv->ptp_clock)
+ info->phc_index = ptp_clock_index(priv->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_ALL));
+ return 0;
+ } else
+ return ethtool_op_get_ts_info(dev, info);
+}
+
+static const struct ethtool_ops stmmac_ethtool_ops = {
+ .begin = stmmac_check_if_running,
+ .get_drvinfo = stmmac_ethtool_getdrvinfo,
+ .get_settings = stmmac_ethtool_getsettings,
+ .set_settings = stmmac_ethtool_setsettings,
+ .get_msglevel = stmmac_ethtool_getmsglevel,
+ .set_msglevel = stmmac_ethtool_setmsglevel,
+ .get_regs = stmmac_ethtool_gregs,
+ .get_regs_len = stmmac_ethtool_get_regs_len,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = stmmac_get_pauseparam,
+ .set_pauseparam = stmmac_set_pauseparam,
+ .get_ethtool_stats = stmmac_get_ethtool_stats,
+ .get_strings = stmmac_get_strings,
+ .get_wol = stmmac_get_wol,
+ .set_wol = stmmac_set_wol,
+ .get_eee = stmmac_ethtool_op_get_eee,
+ .set_eee = stmmac_ethtool_op_set_eee,
+ .get_sset_count = stmmac_get_sset_count,
+ .get_ts_info = stmmac_get_ts_info,
+ .get_coalesce = stmmac_get_coalesce,
+ .set_coalesce = stmmac_set_coalesce,
+};
+
+void stmmac_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops);
+}
--- /dev/null
+/*******************************************************************************
+ Copyright (C) 2013 Vayavya Labs Pvt Ltd
+
+ This implements all the API for managing HW timestamp & PTP.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include "common.h"
+#include "stmmac_ptp.h"
+
+static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data)
+{
+ writel(data, ioaddr + PTP_TCR);
+}
+
+static void stmmac_config_sub_second_increment(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + PTP_TCR);
+ unsigned long data;
+
+ /* Convert the ptp_clock to nano second
+ * formula = (1/ptp_clock) * 1000000000
+ * where, ptp_clock = 50MHz.
+ */
+ data = (1000000000ULL / 50000000);
+
+ /* 0.465ns accuracy */
+ if (value & PTP_TCR_TSCTRLSSR)
+ data = (data * 100) / 465;
+
+ writel(data, ioaddr + PTP_SSIR);
+}
+
+static int stmmac_init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
+{
+ int limit;
+ u32 value;
+
+ writel(sec, ioaddr + PTP_STSUR);
+ writel(nsec, ioaddr + PTP_STNSUR);
+ /* issue command to initialize the system time value */
+ value = readl(ioaddr + PTP_TCR);
+ value |= PTP_TCR_TSINIT;
+ writel(value, ioaddr + PTP_TCR);
+
+ /* wait for present system time initialize to complete */
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSINIT))
+ break;
+ mdelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int stmmac_config_addend(void __iomem *ioaddr, u32 addend)
+{
+ u32 value;
+ int limit;
+
+ writel(addend, ioaddr + PTP_TAR);
+ /* issue command to update the addend value */
+ value = readl(ioaddr + PTP_TCR);
+ value |= PTP_TCR_TSADDREG;
+ writel(value, ioaddr + PTP_TCR);
+
+ /* wait for present addend update to complete */
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSADDREG))
+ break;
+ mdelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
+ int add_sub)
+{
+ u32 value;
+ int limit;
+
+ writel(sec, ioaddr + PTP_STSUR);
+ writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec),
+ ioaddr + PTP_STNSUR);
+ /* issue command to initialize the system time value */
+ value = readl(ioaddr + PTP_TCR);
+ value |= PTP_TCR_TSUPDT;
+ writel(value, ioaddr + PTP_TCR);
+
+ /* wait for present system time adjust/update to complete */
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSUPDT))
+ break;
+ mdelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static u64 stmmac_get_systime(void __iomem *ioaddr)
+{
+ u64 ns;
+
+ ns = readl(ioaddr + PTP_STNSR);
+ /* convert sec time value to nanosecond */
+ ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
+
+ return ns;
+}
+
+const struct stmmac_hwtimestamp stmmac_ptp = {
+ .config_hw_tstamping = stmmac_config_hw_tstamping,
+ .init_systime = stmmac_init_systime,
+ .config_sub_second_increment = stmmac_config_sub_second_increment,
+ .config_addend = stmmac_config_addend,
+ .adjust_systime = stmmac_adjust_systime,
+ .get_systime = stmmac_get_systime,
+};
--- /dev/null
+/*******************************************************************************
+ This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
+ ST Ethernet IPs are built around a Synopsys IP Core.
+
+ Copyright(C) 2007-2011 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+
+ Documentation available at:
+ http://www.stlinux.com
+ Support available at:
+ https://bugzilla.stlinux.com/
+*******************************************************************************/
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/prefetch.h>
+#ifdef CONFIG_GMAC_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#endif /* CONFIG_GMAC_DEBUG_FS */
+#include <linux/net_tstamp.h>
+#include "stmmac_ptp.h"
+#include "stmmac.h"
+
+#undef STMMAC_DEBUG
+/*#define STMMAC_DEBUG*/
+#ifdef STMMAC_DEBUG
+#define DBG(nlevel, klevel, fmt, args...) \
+ ((void)(netif_msg_##nlevel(priv) && \
+ printk(KERN_##klevel fmt, ## args)))
+#else
+#define DBG(nlevel, klevel, fmt, args...) do { } while (0)
+#endif
+
+#undef STMMAC_RX_DEBUG
+/*#define STMMAC_RX_DEBUG*/
+#ifdef STMMAC_RX_DEBUG
+#define RX_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define RX_DBG(fmt, args...) do { } while (0)
+#endif
+
+#undef STMMAC_XMIT_DEBUG
+/*#define STMMAC_XMIT_DEBUG*/
+#ifdef STMMAC_XMIT_DEBUG
+#define TX_DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define TX_DBG(fmt, args...) do { } while (0)
+#endif
+
+#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
+#define JUMBO_LEN 9000
+
+/* Module parameters */
+#define TX_TIMEO 5000
+static int watchdog = TX_TIMEO;
+module_param(watchdog, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
+
+static int debug = -1;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
+
+int phyaddr = -1;
+module_param(phyaddr, int, S_IRUGO);
+MODULE_PARM_DESC(phyaddr, "Physical device address");
+
+#define DMA_TX_SIZE 256
+static int dma_txsize = DMA_TX_SIZE;
+module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list");
+
+#define DMA_RX_SIZE 256
+static int dma_rxsize = DMA_RX_SIZE;
+module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list");
+
+static int flow_ctrl = FLOW_OFF;
+module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
+
+static int pause = PAUSE_TIME;
+module_param(pause, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(pause, "Flow Control Pause Time");
+
+#define TC_DEFAULT 64
+static int tc = TC_DEFAULT;
+module_param(tc, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tc, "DMA threshold control value");
+
+#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
+static int buf_sz = DMA_BUFFER_SIZE;
+module_param(buf_sz, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(buf_sz, "DMA buffer size");
+
+static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
+
+#define STMMAC_DEFAULT_LPI_TIMER 1000
+static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+module_param(eee_timer, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
+#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
+
+/* By default the driver will use the ring mode to manage tx and rx descriptors
+ * but passing this value so user can force to use the chain instead of the ring
+ */
+static unsigned int chain_mode;
+module_param(chain_mode, int, S_IRUGO);
+MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
+
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
+
+#ifdef CONFIG_GMAC_DEBUG_FS
+static int stmmac_init_fs(struct net_device *dev);
+static void stmmac_exit_fs(void);
+#endif
+
+#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
+
+/**
+ * stmmac_verify_args - verify the driver parameters.
+ * Description: it verifies if some wrong parameter is passed to the driver.
+ * Note that wrong parameters are replaced with the default values.
+ */
+static void stmmac_verify_args(void)
+{
+ if (unlikely(watchdog < 0))
+ watchdog = TX_TIMEO;
+ if (unlikely(dma_rxsize < 0))
+ dma_rxsize = DMA_RX_SIZE;
+ if (unlikely(dma_txsize < 0))
+ dma_txsize = DMA_TX_SIZE;
+ if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
+ buf_sz = DMA_BUFFER_SIZE;
+ if (unlikely(flow_ctrl > 1))
+ flow_ctrl = FLOW_AUTO;
+ else if (likely(flow_ctrl < 0))
+ flow_ctrl = FLOW_OFF;
+ if (unlikely((pause < 0) || (pause > 0xffff)))
+ pause = PAUSE_TIME;
+ if (eee_timer < 0)
+ eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+}
+
+/**
+ * stmmac_clk_csr_set - dynamically set the MDC clock
+ * @priv: driver private structure
+ * Description: this is to dynamically set the MDC clock according to the csr
+ * clock input.
+ * Note:
+ * If a specific clk_csr value is passed from the platform
+ * this means that the CSR Clock Range selection cannot be
+ * changed at run-time and it is fixed (as reported in the driver
+ * documentation). Viceversa the driver will try to set the MDC
+ * clock dynamically according to the actual clock input.
+ */
+static void stmmac_clk_csr_set(struct stmmac_priv *priv)
+{
+ u32 clk_rate;
+
+ clk_rate = clk_get_rate(priv->stmmac_clk);
+
+ /* Platform provided default clk_csr would be assumed valid
+ * for all other cases except for the below mentioned ones.
+ * For values higher than the IEEE 802.3 specified frequency
+ * we can not estimate the proper divider as it is not known
+ * the frequency of clk_csr_i. So we do not change the default
+ * divider.
+ */
+ if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
+ if (clk_rate < CSR_F_35M)
+ priv->clk_csr = STMMAC_CSR_20_35M;
+ else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
+ priv->clk_csr = STMMAC_CSR_35_60M;
+ else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
+ priv->clk_csr = STMMAC_CSR_60_100M;
+ else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
+ priv->clk_csr = STMMAC_CSR_100_150M;
+ else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
+ priv->clk_csr = STMMAC_CSR_150_250M;
+ else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
+ priv->clk_csr = STMMAC_CSR_250_300M;
+ }
+}
+
+#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
+static void print_pkt(unsigned char *buf, int len)
+{
+ int j;
+ pr_info("len = %d byte, buf addr: 0x%p", len, buf);
+ for (j = 0; j < len; j++) {
+ if ((j % 16) == 0)
+ pr_info("\n %03x:", j);
+ pr_info(" %02x", buf[j]);
+ }
+ pr_info("\n");
+}
+#endif
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4)
+
+static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
+{
+ return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
+}
+
+/**
+ * stmmac_hw_fix_mac_speed: callback for speed selection
+ * @priv: driver private structure
+ * Description: on some platforms (e.g. ST), some HW system configuraton
+ * registers have to be set according to the link speed negotiated.
+ */
+static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
+{
+ struct phy_device *phydev = priv->phydev;
+
+ if (likely(priv->plat->fix_mac_speed))
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
+}
+
+/**
+ * stmmac_enable_eee_mode: Check and enter in LPI mode
+ * @priv: driver private structure
+ * Description: this function is to verify and enter in LPI mode for EEE.
+ */
+static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
+{
+ /* Check and enter in LPI mode */
+ if ((priv->dirty_tx == priv->cur_tx) &&
+ (priv->tx_path_in_lpi_mode == false))
+ priv->hw->mac->set_eee_mode(priv->ioaddr);
+}
+
+/**
+ * stmmac_disable_eee_mode: disable/exit from EEE
+ * @priv: driver private structure
+ * Description: this function is to exit and disable EEE in case of
+ * LPI state is true. This is called by the xmit.
+ */
+void stmmac_disable_eee_mode(struct stmmac_priv *priv)
+{
+ priv->hw->mac->reset_eee_mode(priv->ioaddr);
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->tx_path_in_lpi_mode = false;
+}
+
+/**
+ * stmmac_eee_ctrl_timer: EEE TX SW timer.
+ * @arg : data hook
+ * Description:
+ * if there is no data transfer and if we are not in LPI state,
+ * then MAC Transmitter can be moved to LPI state.
+ */
+static void stmmac_eee_ctrl_timer(unsigned long arg)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)arg;
+
+ stmmac_enable_eee_mode(priv);
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
+}
+
+/**
+ * stmmac_eee_init: init EEE
+ * @priv: driver private structure
+ * Description:
+ * If the EEE support has been enabled while configuring the driver,
+ * if the GMAC actually supports the EEE (from the HW cap reg) and the
+ * phy can also manage EEE, so enable the LPI state and start the timer
+ * to verify if the tx path can enter in LPI state.
+ */
+bool stmmac_eee_init(struct stmmac_priv *priv)
+{
+ bool ret = false;
+
+ /* Using PCS we cannot dial with the phy registers at this stage
+ * so we do not support extra feature like EEE.
+ */
+ if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) ||
+ (priv->pcs == STMMAC_PCS_RTBI))
+ goto out;
+
+ /* MAC core supports the EEE feature. */
+ if (priv->dma_cap.eee) {
+ /* Check if the PHY supports EEE */
+ if (phy_init_eee(priv->phydev, 1))
+ goto out;
+
+ if (!priv->eee_active) {
+ priv->eee_active = 1;
+ init_timer(&priv->eee_ctrl_timer);
+ priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
+ priv->eee_ctrl_timer.data = (unsigned long)priv;
+ priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
+ add_timer(&priv->eee_ctrl_timer);
+
+ priv->hw->mac->set_eee_timer(priv->ioaddr,
+ STMMAC_DEFAULT_LIT_LS,
+ priv->tx_lpi_timer);
+ } else
+ /* Set HW EEE according to the speed */
+ priv->hw->mac->set_eee_pls(priv->ioaddr,
+ priv->phydev->link);
+
+ pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
+
+ ret = true;
+ }
+out:
+ return ret;
+}
+
+/* stmmac_get_tx_hwtstamp: get HW TX timestamps
+ * @priv: driver private structure
+ * @entry : descriptor index to be used.
+ * @skb : the socket buffer
+ * Description :
+ * This function will read timestamp from the descriptor & pass it to stack.
+ * and also perform some sanity checks.
+ */
+static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
+ unsigned int entry, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps shhwtstamp;
+ u64 ns;
+ void *desc = NULL;
+
+ if (!priv->hwts_tx_en)
+ return;
+
+ /* exit if skb doesn't support hw tstamp */
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
+ return;
+
+ if (priv->adv_ts)
+ desc = (priv->dma_etx + entry);
+ else
+ desc = (priv->dma_tx + entry);
+
+ /* check tx tstamp status */
+ if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc))
+ return;
+
+ /* get the valid tstamp */
+ ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
+
+ memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp.hwtstamp = ns_to_ktime(ns);
+ /* pass tstamp to stack */
+ skb_tstamp_tx(skb, &shhwtstamp);
+
+ return;
+}
+
+/* stmmac_get_rx_hwtstamp: get HW RX timestamps
+ * @priv: driver private structure
+ * @entry : descriptor index to be used.
+ * @skb : the socket buffer
+ * Description :
+ * This function will read received packet's timestamp from the descriptor
+ * and pass it to stack. It also perform some sanity checks.
+ */
+static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv,
+ unsigned int entry, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shhwtstamp = NULL;
+ u64 ns;
+ void *desc = NULL;
+
+ if (!priv->hwts_rx_en)
+ return;
+
+ if (priv->adv_ts)
+ desc = (priv->dma_erx + entry);
+ else
+ desc = (priv->dma_rx + entry);
+
+ /* exit if rx tstamp is not valid */
+ if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts))
+ return;
+
+ /* get valid tstamp */
+ ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
+ shhwtstamp = skb_hwtstamps(skb);
+ memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp->hwtstamp = ns_to_ktime(ns);
+}
+
+/**
+ * stmmac_hwtstamp_ioctl - control hardware timestamping.
+ * @dev: device pointer.
+ * @ifr: An IOCTL specefic structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * Description:
+ * This function configures the MAC to enable/disable both outgoing(TX)
+ * and incoming(RX) packets time stamping based on user input.
+ * Return Value:
+ * 0 on success and an appropriate -ve integer on failure.
+ */
+static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config config;
+ struct timespec now;
+ u64 temp = 0;
+ u32 ptp_v2 = 0;
+ u32 tstamp_all = 0;
+ u32 ptp_over_ipv4_udp = 0;
+ u32 ptp_over_ipv6_udp = 0;
+ u32 ptp_over_ethernet = 0;
+ u32 snap_type_sel = 0;
+ u32 ts_master_en = 0;
+ u32 ts_event_en = 0;
+ u32 value = 0;
+
+ if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
+ netdev_alert(priv->dev, "No support for HW time stamping\n");
+ priv->hwts_tx_en = 0;
+ priv->hwts_rx_en = 0;
+
+ return -EOPNOTSUPP;
+ }
+
+ if (copy_from_user(&config, ifr->ifr_data,
+ sizeof(struct hwtstamp_config)))
+ return -EFAULT;
+
+ pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
+ __func__, config.flags, config.tx_type, config.rx_filter);
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->hwts_tx_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->hwts_tx_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (priv->adv_ts) {
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /* time stamp no incoming packet at all */
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ /* PTP v1, UDP, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ /* take time stamp for all event messages */
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ /* PTP v1, UDP, Sync packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ /* take time stamp for SYNC messages only */
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ /* PTP v1, UDP, Delay_req packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ /* take time stamp for Delay_Req messages only */
+ ts_master_en = PTP_TCR_TSMSTRENA;
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ /* PTP v2, UDP, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for all event messages */
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ /* PTP v2, UDP, Sync packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for SYNC messages only */
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ /* PTP v2, UDP, Delay_req packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for Delay_Req messages only */
+ ts_master_en = PTP_TCR_TSMSTRENA;
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ /* PTP v2/802.AS1 any layer, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for all event messages */
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ /* PTP v2/802.AS1, any layer, Sync packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for SYNC messages only */
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* PTP v2/802.AS1, any layer, Delay_req packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for Delay_Req messages only */
+ ts_master_en = PTP_TCR_TSMSTRENA;
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+ break;
+
+ case HWTSTAMP_FILTER_ALL:
+ /* time stamp any incoming packet */
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_all = PTP_TCR_TSENALL;
+ break;
+
+ default:
+ return -ERANGE;
+ }
+ } else {
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ default:
+ /* PTP v1, UDP, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ }
+ }
+ priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
+
+ if (!priv->hwts_tx_en && !priv->hwts_rx_en)
+ priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
+ else {
+ value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
+ tstamp_all | ptp_v2 | ptp_over_ethernet |
+ ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
+ ts_master_en | snap_type_sel);
+
+ priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value);
+
+ /* program Sub Second Increment reg */
+ priv->hw->ptp->config_sub_second_increment(priv->ioaddr);
+
+ /* calculate default added value:
+ * formula is :
+ * addend = (2^32)/freq_div_ratio;
+ * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz
+ * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK;
+ * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to
+ * achive 20ns accuracy.
+ *
+ * 2^x * y == (y << x), hence
+ * 2^32 * 50000000 ==> (50000000 << 32)
+ */
+ temp = (u64) (50000000ULL << 32);
+ priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK);
+ priv->hw->ptp->config_addend(priv->ioaddr,
+ priv->default_addend);
+
+ /* initialize system time */
+ getnstimeofday(&now);
+ priv->hw->ptp->init_systime(priv->ioaddr, now.tv_sec,
+ now.tv_nsec);
+ }
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
+}
+
+/**
+ * stmmac_init_ptp: init PTP
+ * @priv: driver private structure
+ * Description: this is to verify if the HW supports the PTPv1 or v2.
+ * This is done by looking at the HW cap. register.
+ * Also it registers the ptp driver.
+ */
+static int stmmac_init_ptp(struct stmmac_priv *priv)
+{
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
+ return -EOPNOTSUPP;
+
+ if (netif_msg_hw(priv)) {
+ if (priv->dma_cap.time_stamp) {
+ pr_debug("IEEE 1588-2002 Time Stamp supported\n");
+ priv->adv_ts = 0;
+ }
+ if (priv->dma_cap.atime_stamp && priv->extend_desc) {
+ pr_debug
+ ("IEEE 1588-2008 Advanced Time Stamp supported\n");
+ priv->adv_ts = 1;
+ }
+ }
+
+ priv->hw->ptp = &stmmac_ptp;
+ priv->hwts_tx_en = 0;
+ priv->hwts_rx_en = 0;
+
+ return stmmac_ptp_register(priv);
+}
+
+static void stmmac_release_ptp(struct stmmac_priv *priv)
+{
+ stmmac_ptp_unregister(priv);
+}
+
+/**
+ * stmmac_adjust_link
+ * @dev: net device structure
+ * Description: it adjusts the link parameters.
+ */
+static void stmmac_adjust_link(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ unsigned long flags;
+ int new_state = 0;
+ unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
+
+ if (phydev == NULL)
+ return;
+
+ DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n",
+ phydev->addr, phydev->link);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (phydev->link) {
+ u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
+
+ /* Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode. */
+ if (phydev->duplex != priv->oldduplex) {
+ new_state = 1;
+ if (!(phydev->duplex))
+ ctrl &= ~priv->hw->link.duplex;
+ else
+ ctrl |= priv->hw->link.duplex;
+ priv->oldduplex = phydev->duplex;
+ }
+ /* Flow Control operation */
+ if (phydev->pause)
+ priv->hw->mac->flow_ctrl(priv->ioaddr, phydev->duplex,
+ fc, pause_time);
+
+ if (phydev->speed != priv->speed) {
+ new_state = 1;
+ switch (phydev->speed) {
+ case 1000:
+ if (likely(priv->plat->has_gmac))
+ ctrl &= ~priv->hw->link.port;
+ stmmac_hw_fix_mac_speed(priv);
+ break;
+ case 100:
+ case 10:
+ if (priv->plat->has_gmac) {
+ ctrl |= priv->hw->link.port;
+ if (phydev->speed == SPEED_100) {
+ ctrl |= priv->hw->link.speed;
+ } else {
+ ctrl &= ~(priv->hw->link.speed);
+ }
+ } else {
+ ctrl &= ~priv->hw->link.port;
+ }
+ stmmac_hw_fix_mac_speed(priv);
+ break;
+ default:
+ if (netif_msg_link(priv))
+ pr_warn("%s: Speed (%d) not 10/100\n",
+ dev->name, phydev->speed);
+ break;
+ }
+
+ priv->speed = phydev->speed;
+ }
+
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+
+ if (!priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 1;
+ }
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->speed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+
+ /* At this stage, it could be needed to setup the EEE or adjust some
+ * MAC related HW registers.
+ */
+ priv->eee_enabled = stmmac_eee_init(priv);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
+}
+
+/**
+ * stmmac_check_pcs_mode: verify if RGMII/SGMII is supported
+ * @priv: driver private structure
+ * Description: this is to verify if the HW supports the PCS.
+ * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
+ * configured for the TBI, RTBI, or SGMII PHY interface.
+ */
+static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
+{
+ int interface = priv->plat->interface;
+
+ if (priv->dma_cap.pcs) {
+ if ((interface & PHY_INTERFACE_MODE_RGMII) ||
+ (interface & PHY_INTERFACE_MODE_RGMII_ID) ||
+ (interface & PHY_INTERFACE_MODE_RGMII_RXID) ||
+ (interface & PHY_INTERFACE_MODE_RGMII_TXID)) {
+ pr_debug("STMMAC: PCS RGMII support enable\n");
+ priv->pcs = STMMAC_PCS_RGMII;
+ } else if (interface & PHY_INTERFACE_MODE_SGMII) {
+ pr_debug("STMMAC: PCS SGMII support enable\n");
+ priv->pcs = STMMAC_PCS_SGMII;
+ }
+ }
+}
+
+/**
+ * stmmac_init_phy - PHY initialization
+ * @dev: net device structure
+ * Description: it initializes the driver's PHY state, and attaches the PHY
+ * to the mac driver.
+ * Return value:
+ * 0 on success
+ */
+static int stmmac_init_phy(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev;
+ char phy_id_fmt[MII_BUS_ID_SIZE + 3];
+ char bus_id[MII_BUS_ID_SIZE];
+ int interface = priv->plat->interface;
+ priv->oldlink = 0;
+ priv->speed = 0;
+ priv->oldduplex = -1;
+
+ if (priv->plat->phy_bus_name)
+ snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
+ priv->plat->phy_bus_name, priv->plat->bus_id);
+ else
+ snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
+ priv->plat->bus_id);
+
+ snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ priv->plat->phy_addr);
+ pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt);
+
+ phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
+
+ if (IS_ERR(phydev)) {
+ pr_err("%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(phydev);
+ }
+
+ /* Stop Advertising 1000BASE Capability if interface is not GMII */
+ if ((interface == PHY_INTERFACE_MODE_MII) ||
+ (interface == PHY_INTERFACE_MODE_RMII))
+ phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ /*
+ * Broken HW is sometimes missing the pull-up resistor on the
+ * MDIO line, which results in reads to non-existent devices returning
+ * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
+ * device as well.
+ * Note: phydev->phy_id is the result of reading the UID PHY registers.
+ */
+ if (phydev->phy_id == 0) {
+ phy_disconnect(phydev);
+ return -ENODEV;
+ }
+ pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
+ " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
+
+ priv->phydev = phydev;
+
+ return 0;
+}
+
+/**
+ * stmmac_display_ring: display ring
+ * @head: pointer to the head of the ring passed.
+ * @size: size of the ring.
+ * @extend_desc: to verify if extended descriptors are used.
+ * Description: display the control/status and buffer descriptors.
+ */
+static void stmmac_display_ring(void *head, int size, int extend_desc)
+{
+ int i;
+ struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
+ struct dma_desc *p = (struct dma_desc *)head;
+
+ for (i = 0; i < size; i++) {
+ u64 x;
+ if (extend_desc) {
+ x = *(u64 *) ep;
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int)virt_to_phys(ep),
+ (unsigned int)x, (unsigned int)(x >> 32),
+ ep->basic.des2, ep->basic.des3);
+ ep++;
+ } else {
+ x = *(u64 *) p;
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
+ i, (unsigned int)virt_to_phys(p),
+ (unsigned int)x, (unsigned int)(x >> 32),
+ p->des2, p->des3);
+ p++;
+ }
+ pr_info("\n");
+ }
+}
+
+static void stmmac_display_rings(struct stmmac_priv *priv)
+{
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int rxsize = priv->dma_rx_size;
+
+ if (priv->extend_desc) {
+ pr_info("Extended RX descriptor ring:\n");
+ stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
+ pr_info("Extended TX descriptor ring:\n");
+ stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
+ } else {
+ pr_info("RX descriptor ring:\n");
+ stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
+ pr_info("TX descriptor ring:\n");
+ stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
+ }
+}
+
+static int stmmac_set_bfsize(int mtu, int bufsize)
+{
+ int ret = bufsize;
+
+ if (mtu >= BUF_SIZE_4KiB)
+ ret = BUF_SIZE_8KiB;
+ else if (mtu >= BUF_SIZE_2KiB)
+ ret = BUF_SIZE_4KiB;
+ else if (mtu >= DMA_BUFFER_SIZE)
+ ret = BUF_SIZE_2KiB;
+ else
+ ret = DMA_BUFFER_SIZE;
+
+ return ret;
+}
+
+/**
+ * stmmac_clear_descriptors: clear descriptors
+ * @priv: driver private structure
+ * Description: this function is called to clear the tx and rx descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void stmmac_clear_descriptors(struct stmmac_priv *priv)
+{
+ int i;
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int rxsize = priv->dma_rx_size;
+
+ /* Clear the Rx/Tx descriptors */
+ for (i = 0; i < rxsize; i++)
+ if (priv->extend_desc)
+ priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
+ priv->use_riwt, priv->mode,
+ (i == rxsize - 1));
+ else
+ priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
+ priv->use_riwt, priv->mode,
+ (i == rxsize - 1));
+ for (i = 0; i < txsize; i++)
+ if (priv->extend_desc)
+ priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
+ priv->mode,
+ (i == txsize - 1));
+ else
+ priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
+ priv->mode,
+ (i == txsize - 1));
+}
+
+static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
+ int i)
+{
+ struct sk_buff *skb;
+
+ skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
+ GFP_KERNEL);
+ if (unlikely(skb == NULL)) {
+ pr_err("%s: Rx init fails; skb is NULL\n", __func__);
+ return 1;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ priv->rx_skbuff[i] = skb;
+ priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
+ priv->dma_buf_sz,
+ DMA_FROM_DEVICE);
+
+ p->des2 = priv->rx_skbuff_dma[i];
+
+ if ((priv->mode == STMMAC_RING_MODE) &&
+ (priv->dma_buf_sz == BUF_SIZE_16KiB))
+ priv->hw->ring->init_desc3(p);
+
+ return 0;
+}
+
+/**
+ * init_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * Description: this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers. It suppors the chained and ring
+ * modes.
+ */
+static void init_dma_desc_rings(struct net_device *dev)
+{
+ int i;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int rxsize = priv->dma_rx_size;
+ unsigned int bfsize = 0;
+
+ /* Set the max buffer size according to the DESC mode
+ * and the MTU. Note that RING mode allows 16KiB bsize.
+ */
+ if (priv->mode == STMMAC_RING_MODE)
+ bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
+
+ if (bfsize < BUF_SIZE_16KiB)
+ bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
+
+ DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
+ txsize, rxsize, bfsize);
+
+ if (priv->extend_desc) {
+ priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
+ sizeof(struct
+ dma_extended_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
+ priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
+ sizeof(struct
+ dma_extended_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
+ if ((!priv->dma_erx) || (!priv->dma_etx))
+ return;
+ } else {
+ priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
+ sizeof(struct dma_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
+ priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
+ sizeof(struct dma_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
+ if ((!priv->dma_rx) || (!priv->dma_tx))
+ return;
+ }
+
+ priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (netif_msg_drv(priv))
+ pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
+ (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
+
+ /* RX INITIALIZATION */
+ DBG(probe, INFO, "stmmac: SKB addresses:\nskb\t\tskb data\tdma data\n");
+ for (i = 0; i < rxsize; i++) {
+ struct dma_desc *p;
+ if (priv->extend_desc)
+ p = &((priv->dma_erx + i)->basic);
+ else
+ p = priv->dma_rx + i;
+
+ if (stmmac_init_rx_buffers(priv, p, i))
+ break;
+
+ DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
+ priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
+ }
+ priv->cur_rx = 0;
+ priv->dirty_rx = (unsigned int)(i - rxsize);
+ priv->dma_buf_sz = bfsize;
+ buf_sz = bfsize;
+
+ /* Setup the chained descriptor addresses */
+ if (priv->mode == STMMAC_CHAIN_MODE) {
+ if (priv->extend_desc) {
+ priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy,
+ rxsize, 1);
+ priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy,
+ txsize, 1);
+ } else {
+ priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy,
+ rxsize, 0);
+ priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy,
+ txsize, 0);
+ }
+ }
+
+ /* TX INITIALIZATION */
+ for (i = 0; i < txsize; i++) {
+ struct dma_desc *p;
+ if (priv->extend_desc)
+ p = &((priv->dma_etx + i)->basic);
+ else
+ p = priv->dma_tx + i;
+ p->des2 = 0;
+ priv->tx_skbuff_dma[i] = 0;
+ priv->tx_skbuff[i] = NULL;
+ }
+
+ priv->dirty_tx = 0;
+ priv->cur_tx = 0;
+
+ stmmac_clear_descriptors(priv);
+
+ if (netif_msg_hw(priv))
+ stmmac_display_rings(priv);
+}
+
+static void dma_free_rx_skbufs(struct stmmac_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++) {
+ if (priv->rx_skbuff[i]) {
+ dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(priv->rx_skbuff[i]);
+ }
+ priv->rx_skbuff[i] = NULL;
+ }
+}
+
+static void dma_free_tx_skbufs(struct stmmac_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->dma_tx_size; i++) {
+ if (priv->tx_skbuff[i] != NULL) {
+ struct dma_desc *p;
+ if (priv->extend_desc)
+ p = &((priv->dma_etx + i)->basic);
+ else
+ p = priv->dma_tx + i;
+
+ if (priv->tx_skbuff_dma[i])
+ dma_unmap_single(priv->device,
+ priv->tx_skbuff_dma[i],
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(priv->tx_skbuff[i]);
+ priv->tx_skbuff[i] = NULL;
+ priv->tx_skbuff_dma[i] = 0;
+ }
+ }
+}
+
+static void free_dma_desc_resources(struct stmmac_priv *priv)
+{
+ /* Release the DMA TX/RX socket buffers */
+ dma_free_rx_skbufs(priv);
+ dma_free_tx_skbufs(priv);
+
+ /* Free DMA regions of consistent memory previously allocated */
+ if (!priv->extend_desc) {
+ dma_free_coherent(priv->device,
+ priv->dma_tx_size * sizeof(struct dma_desc),
+ priv->dma_tx, priv->dma_tx_phy);
+ dma_free_coherent(priv->device,
+ priv->dma_rx_size * sizeof(struct dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ } else {
+ dma_free_coherent(priv->device, priv->dma_tx_size *
+ sizeof(struct dma_extended_desc),
+ priv->dma_etx, priv->dma_tx_phy);
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ priv->dma_erx, priv->dma_rx_phy);
+ }
+ kfree(priv->rx_skbuff_dma);
+ kfree(priv->rx_skbuff);
+ kfree(priv->tx_skbuff_dma);
+ kfree(priv->tx_skbuff);
+}
+
+/**
+ * stmmac_dma_operation_mode - HW DMA operation mode
+ * @priv: driver private structure
+ * Description: it sets the DMA operation mode: tx/rx DMA thresholds
+ * or Store-And-Forward capability.
+ */
+static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
+{
+ if (likely(priv->plat->force_sf_dma_mode ||
+ ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) {
+ /*
+ * In case of GMAC, SF mode can be enabled
+ * to perform the TX COE in HW. This depends on:
+ * 1) TX COE if actually supported
+ * 2) There is no bugged Jumbo frame support
+ * that needs to not insert csum in the TDES.
+ */
+ priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE);
+ tc = SF_DMA_MODE;
+ } else
+ priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
+}
+
+/**
+ * stmmac_tx_clean:
+ * @priv: driver private structure
+ * Description: it reclaims resources after transmission completes.
+ */
+static void stmmac_tx_clean(struct stmmac_priv *priv)
+{
+ unsigned int txsize = priv->dma_tx_size;
+
+ spin_lock(&priv->tx_lock);
+
+ priv->xstats.tx_clean++;
+
+ while (priv->dirty_tx != priv->cur_tx) {
+ int last;
+ unsigned int entry = priv->dirty_tx % txsize;
+ struct sk_buff *skb = priv->tx_skbuff[entry];
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(priv->dma_etx + entry);
+ else
+ p = priv->dma_tx + entry;
+
+ /* Check if the descriptor is owned by the DMA. */
+ if (priv->hw->desc->get_tx_owner(p))
+ break;
+
+ /* Verify tx error by looking at the last segment. */
+ last = priv->hw->desc->get_tx_ls(p);
+ if (likely(last)) {
+ int tx_error =
+ priv->hw->desc->tx_status(&priv->dev->stats,
+ &priv->xstats, p,
+ priv->ioaddr);
+ if (likely(tx_error == 0)) {
+ priv->dev->stats.tx_packets++;
+ priv->xstats.tx_pkt_n++;
+ } else
+ priv->dev->stats.tx_errors++;
+
+ stmmac_get_tx_hwtstamp(priv, entry, skb);
+ }
+ TX_DBG("%s: curr %d, dirty %d\n", __func__,
+ priv->cur_tx, priv->dirty_tx);
+
+ if (likely(priv->tx_skbuff_dma[entry])) {
+ dma_unmap_single(priv->device,
+ priv->tx_skbuff_dma[entry],
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = 0;
+ }
+ priv->hw->ring->clean_desc3(priv, p);
+
+ if (likely(skb != NULL)) {
+ dev_kfree_skb(skb);
+ priv->tx_skbuff[entry] = NULL;
+ }
+
+ priv->hw->desc->release_tx_desc(p, priv->mode);
+
+ priv->dirty_tx++;
+ }
+ if (unlikely(netif_queue_stopped(priv->dev) &&
+ stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
+ netif_tx_lock(priv->dev);
+ if (netif_queue_stopped(priv->dev) &&
+ stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
+ TX_DBG("%s: restart transmit\n", __func__);
+ netif_wake_queue(priv->dev);
+ }
+ netif_tx_unlock(priv->dev);
+ }
+
+ if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
+ stmmac_enable_eee_mode(priv);
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
+ }
+ spin_unlock(&priv->tx_lock);
+}
+
+static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
+{
+ priv->hw->dma->enable_dma_irq(priv->ioaddr);
+}
+
+static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
+{
+ priv->hw->dma->disable_dma_irq(priv->ioaddr);
+}
+
+/**
+ * stmmac_tx_err: irq tx error mng function
+ * @priv: driver private structure
+ * Description: it cleans the descriptors and restarts the transmission
+ * in case of errors.
+ */
+static void stmmac_tx_err(struct stmmac_priv *priv)
+{
+ int i;
+ int txsize = priv->dma_tx_size;
+ netif_stop_queue(priv->dev);
+
+ priv->hw->dma->stop_tx(priv->ioaddr);
+ dma_free_tx_skbufs(priv);
+ for (i = 0; i < txsize; i++)
+ if (priv->extend_desc)
+ priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
+ priv->mode,
+ (i == txsize - 1));
+ else
+ priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
+ priv->mode,
+ (i == txsize - 1));
+ priv->dirty_tx = 0;
+ priv->cur_tx = 0;
+ priv->hw->dma->start_tx(priv->ioaddr);
+
+ priv->dev->stats.tx_errors++;
+ netif_wake_queue(priv->dev);
+}
+
+/**
+ * stmmac_dma_interrupt: DMA ISR
+ * @priv: driver private structure
+ * Description: this is the DMA ISR. It is called by the main ISR.
+ * It calls the dwmac dma routine to understand which type of interrupt
+ * happened. In case of there is a Normal interrupt and either TX or RX
+ * interrupt happened so the NAPI is scheduled.
+ */
+static void stmmac_dma_interrupt(struct stmmac_priv *priv)
+{
+ int status;
+
+ status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
+ if (likely((status & handle_rx)) || (status & handle_tx)) {
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ stmmac_disable_dma_irq(priv);
+ __napi_schedule(&priv->napi);
+ }
+ }
+ if (unlikely(status & tx_hard_error_bump_tc)) {
+ /* Try to bump up the dma threshold on this failure */
+ if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
+ tc += 64;
+ priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
+ priv->xstats.threshold = tc;
+ }
+ } else if (unlikely(status == tx_hard_error))
+ stmmac_tx_err(priv);
+}
+
+/**
+ * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
+ * @priv: driver private structure
+ * Description: this masks the MMC irq, in fact, the counters are managed in SW.
+ */
+static void stmmac_mmc_setup(struct stmmac_priv *priv)
+{
+ unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
+ MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
+
+ dwmac_mmc_intr_all_mask(priv->ioaddr);
+
+ if (priv->dma_cap.rmon) {
+ dwmac_mmc_ctrl(priv->ioaddr, mode);
+ memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+ } else
+ pr_info(" No MAC Management Counters available\n");
+}
+
+static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
+{
+ u32 hwid = priv->hw->synopsys_uid;
+
+ /* Check Synopsys Id (not available on old chips) */
+ if (likely(hwid)) {
+ u32 uid = ((hwid & 0x0000ff00) >> 8);
+ u32 synid = (hwid & 0x000000ff);
+
+ pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
+ uid, synid);
+
+ return synid;
+ }
+ return 0;
+}
+
+/**
+ * stmmac_selec_desc_mode: to select among: normal/alternate/extend descriptors
+ * @priv: driver private structure
+ * Description: select the Enhanced/Alternate or Normal descriptors.
+ * In case of Enhanced/Alternate, it looks at the extended descriptors are
+ * supported by the HW cap. register.
+ */
+static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
+{
+ if (priv->plat->enh_desc) {
+ pr_info(" Enhanced/Alternate descriptors\n");
+
+ /* GMAC older than 3.50 has no extended descriptors */
+ if (priv->synopsys_id >= DWMAC_CORE_3_50) {
+ pr_info("\tEnabled extended descriptors\n");
+ priv->extend_desc = 1;
+ } else
+ pr_warn("Extended descriptors not supported\n");
+
+ priv->hw->desc = &enh_desc_ops;
+ } else {
+ pr_info(" Normal descriptors\n");
+ priv->hw->desc = &ndesc_ops;
+ }
+}
+
+/**
+ * stmmac_get_hw_features: get MAC capabilities from the HW cap. register.
+ * @priv: driver private structure
+ * Description:
+ * new GMAC chip generations have a new register to indicate the
+ * presence of the optional feature/functions.
+ * This can be also used to override the value passed through the
+ * platform and necessary for old MAC10/100 and GMAC chips.
+ */
+static int stmmac_get_hw_features(struct stmmac_priv *priv)
+{
+ u32 hw_cap = 0;
+
+ if (priv->hw->dma->get_hw_feature) {
+ hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
+
+ priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
+ priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
+ priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
+ priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
+ priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
+ priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
+ priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
+ priv->dma_cap.pmt_remote_wake_up =
+ (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
+ priv->dma_cap.pmt_magic_frame =
+ (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
+ /* MMC */
+ priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
+ /* IEEE 1588-2002 */
+ priv->dma_cap.time_stamp =
+ (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
+ /* IEEE 1588-2008 */
+ priv->dma_cap.atime_stamp =
+ (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
+ /* 802.3az - Energy-Efficient Ethernet (EEE) */
+ priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
+ priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
+ /* TX and RX csum */
+ priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
+ priv->dma_cap.rx_coe_type1 =
+ (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
+ priv->dma_cap.rx_coe_type2 =
+ (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
+ priv->dma_cap.rxfifo_over_2048 =
+ (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
+ /* TX and RX number of channels */
+ priv->dma_cap.number_rx_channel =
+ (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
+ priv->dma_cap.number_tx_channel =
+ (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
+ /* Alternate (enhanced) DESC mode */
+ priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
+ }
+
+ return hw_cap;
+}
+
+/**
+ * stmmac_check_ether_addr: check if the MAC addr is valid
+ * @priv: driver private structure
+ * Description:
+ * it is to verify if the MAC address is valid, in case of failures it
+ * generates a random MAC address
+ */
+static void stmmac_check_ether_addr(struct stmmac_priv *priv)
+{
+ if (!is_valid_ether_addr(priv->dev->dev_addr)) {
+ priv->hw->mac->get_umac_addr((void __iomem *)
+ priv->dev->base_addr,
+ priv->dev->dev_addr, 0);
+ if (!is_valid_ether_addr(priv->dev->dev_addr))
+ eth_hw_addr_random(priv->dev);
+ }
+ pr_warn("%s: device MAC address %pM\n", priv->dev->name,
+ priv->dev->dev_addr);
+}
+
+/**
+ * stmmac_init_dma_engine: DMA init.
+ * @priv: driver private structure
+ * Description:
+ * It inits the DMA invoking the specific MAC/GMAC callback.
+ * Some DMA parameters can be passed from the platform;
+ * in case of these are not passed a default is kept for the MAC or GMAC.
+ */
+static int stmmac_init_dma_engine(struct stmmac_priv *priv)
+{
+ int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
+ int mixed_burst = 0;
+ int atds = 0;
+
+ if (priv->plat->dma_cfg) {
+ pbl = priv->plat->dma_cfg->pbl;
+ fixed_burst = priv->plat->dma_cfg->fixed_burst;
+ mixed_burst = priv->plat->dma_cfg->mixed_burst;
+ burst_len = priv->plat->dma_cfg->burst_len;
+ }
+
+ if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
+ atds = 1;
+
+ return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
+ burst_len, priv->dma_tx_phy,
+ priv->dma_rx_phy, atds);
+}
+
+/**
+ * stmmac_tx_timer: mitigation sw timer for tx.
+ * @data: data pointer
+ * Description:
+ * This is the timer handler to directly invoke the stmmac_tx_clean.
+ */
+static void stmmac_tx_timer(unsigned long data)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)data;
+
+ stmmac_tx_clean(priv);
+}
+
+/**
+ * stmmac_init_tx_coalesce: init tx mitigation options.
+ * @priv: driver private structure
+ * Description:
+ * This inits the transmit coalesce parameters: i.e. timer rate,
+ * timer handler and default threshold used for enabling the
+ * interrupt on completion bit.
+ */
+static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
+{
+ priv->tx_coal_frames = STMMAC_TX_FRAMES;
+ priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
+ init_timer(&priv->txtimer);
+ priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
+ priv->txtimer.data = (unsigned long)priv;
+ priv->txtimer.function = stmmac_tx_timer;
+ add_timer(&priv->txtimer);
+}
+
+/**
+ * stmmac_open - open entry point of the driver
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function is the open entry point of the driver.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_open(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
+
+ clk_prepare_enable(priv->stmmac_clk);
+
+ stmmac_check_ether_addr(priv);
+
+ if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
+ priv->pcs != STMMAC_PCS_RTBI) {
+ ret = stmmac_init_phy(dev);
+ if (ret) {
+ pr_err("%s: Cannot attach to PHY (error: %d)\n",
+ __func__, ret);
+ goto open_error;
+ }
+ }
+
+ /* Create and initialize the TX/RX descriptors chains. */
+ priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
+ priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
+ priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
+ init_dma_desc_rings(dev);
+
+ /* DMA initialization and SW reset */
+ ret = stmmac_init_dma_engine(priv);
+ if (ret < 0) {
+ pr_err("%s: DMA initialization failed\n", __func__);
+ goto open_error;
+ }
+
+ /* Copy the MAC addr into the HW */
+ priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
+
+ /* If required, perform hw setup of the bus. */
+ if (priv->plat->bus_setup)
+ priv->plat->bus_setup(priv->ioaddr);
+
+ /* Initialize the MAC Core */
+ priv->hw->mac->core_init(priv->ioaddr);
+
+ /* Request the IRQ lines */
+ ret = request_irq(dev->irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
+ __func__, dev->irq, ret);
+ goto open_error;
+ }
+
+ /* Request the Wake IRQ in case of another line is used for WoL */
+ if (priv->wol_irq != dev->irq) {
+ ret = request_irq(priv->wol_irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n",
+ __func__, priv->wol_irq, ret);
+ goto open_error_wolirq;
+ }
+ }
+
+ /* Request the IRQ lines */
+ if (priv->lpi_irq != -ENXIO) {
+ ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if (unlikely(ret < 0)) {
+ pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
+ __func__, priv->lpi_irq, ret);
+ goto open_error_lpiirq;
+ }
+ }
+
+ /* Enable the MAC Rx/Tx */
+ stmmac_set_mac(priv->ioaddr, true);
+
+ /* Set the HW DMA mode and the COE */
+ stmmac_dma_operation_mode(priv);
+
+ /* Extra statistics */
+ memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
+ priv->xstats.threshold = tc;
+
+ stmmac_mmc_setup(priv);
+
+ ret = stmmac_init_ptp(priv);
+ if (ret)
+ pr_warn("%s: failed PTP initialisation\n", __func__);
+
+#ifdef CONFIG_GMAC_DEBUG_FS
+ ret = stmmac_init_fs(dev);
+ if (ret < 0)
+ pr_warn("%s: failed debugFS registration\n", __func__);
+#endif
+ /* Start the ball rolling... */
+ DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
+ priv->hw->dma->start_tx(priv->ioaddr);
+ priv->hw->dma->start_rx(priv->ioaddr);
+
+ /* Dump DMA/MAC registers */
+ if (netif_msg_hw(priv)) {
+ priv->hw->mac->dump_regs(priv->ioaddr);
+ priv->hw->dma->dump_regs(priv->ioaddr);
+ }
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
+
+ priv->eee_enabled = stmmac_eee_init(priv);
+
+ stmmac_init_tx_coalesce(priv);
+
+ if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
+ priv->rx_riwt = MAX_DMA_RIWT;
+ priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
+ }
+
+ if (priv->pcs && priv->hw->mac->ctrl_ane)
+ priv->hw->mac->ctrl_ane(priv->ioaddr, 0);
+
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+
+ return 0;
+
+open_error_lpiirq:
+ if (priv->wol_irq != dev->irq)
+ free_irq(priv->wol_irq, dev);
+
+open_error_wolirq:
+ free_irq(dev->irq, dev);
+
+open_error:
+ if (priv->phydev)
+ phy_disconnect(priv->phydev);
+
+ clk_disable_unprepare(priv->stmmac_clk);
+
+ return ret;
+}
+
+/**
+ * stmmac_release - close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver.
+ */
+static int stmmac_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (priv->eee_enabled)
+ del_timer_sync(&priv->eee_ctrl_timer);
+
+ /* Stop and disconnect the PHY */
+ if (priv->phydev) {
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+ }
+
+ netif_stop_queue(dev);
+
+ napi_disable(&priv->napi);
+
+ del_timer_sync(&priv->txtimer);
+
+ /* Free the IRQ lines */
+ free_irq(dev->irq, dev);
+ if (priv->wol_irq != dev->irq)
+ free_irq(priv->wol_irq, dev);
+ if (priv->lpi_irq != -ENXIO)
+ free_irq(priv->lpi_irq, dev);
+
+ /* Stop TX/RX DMA and clear the descriptors */
+ priv->hw->dma->stop_tx(priv->ioaddr);
+ priv->hw->dma->stop_rx(priv->ioaddr);
+
+ /* Release and free the Rx/Tx resources */
+ free_dma_desc_resources(priv);
+
+ /* Disable the MAC Rx/Tx */
+ stmmac_set_mac(priv->ioaddr, false);
+
+ netif_carrier_off(dev);
+
+#ifdef CONFIG_GMAC_DEBUG_FS
+ stmmac_exit_fs();
+#endif
+ clk_disable_unprepare(priv->stmmac_clk);
+
+ stmmac_release_ptp(priv);
+
+ return 0;
+}
+
+/**
+ * stmmac_xmit: Tx entry point of the driver
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description : this is the tx entry point of the driver.
+ * It programs the chain or the ring and supports oversized frames
+ * and SG feature.
+ */
+static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int entry;
+ int i, csum_insertion = 0, is_jumbo = 0;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ struct dma_desc *desc, *first;
+ unsigned int nopaged_len = skb_headlen(skb);
+
+ if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+ /* This is a hard error, log it. */
+ pr_err("%s: Tx Ring full when queue awake\n", __func__);
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ spin_lock(&priv->tx_lock);
+
+ if (priv->tx_path_in_lpi_mode)
+ stmmac_disable_eee_mode(priv);
+
+ entry = priv->cur_tx % txsize;
+
+#ifdef STMMAC_XMIT_DEBUG
+ if ((skb->len > ETH_FRAME_LEN) || nfrags)
+ pr_debug("%s: [entry %d]: skb addr %p len: %d nopagedlen: %d\n"
+ "\tn_frags: %d - ip_summed: %d - %s gso\n"
+ "\ttx_count_frames %d\n", __func__, entry,
+ skb, skb->len, nopaged_len, nfrags, skb->ip_summed,
+ !skb_is_gso(skb) ? "isn't" : "is",
+ priv->tx_count_frames);
+#endif
+
+ csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
+
+ if (priv->extend_desc)
+ desc = (struct dma_desc *)(priv->dma_etx + entry);
+ else
+ desc = priv->dma_tx + entry;
+
+ first = desc;
+
+#ifdef STMMAC_XMIT_DEBUG
+ if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
+ pr_debug("\tskb len: %d, nopaged_len: %d,\n"
+ "\t\tn_frags: %d, ip_summed: %d\n",
+ skb->len, nopaged_len, nfrags, skb->ip_summed);
+#endif
+ priv->tx_skbuff[entry] = skb;
+
+ /* To program the descriptors according to the size of the frame */
+ if (priv->mode == STMMAC_RING_MODE) {
+ is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
+ priv->plat->enh_desc);
+ if (unlikely(is_jumbo))
+ entry = priv->hw->ring->jumbo_frm(priv, skb,
+ csum_insertion);
+ } else {
+ is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
+ priv->plat->enh_desc);
+ if (unlikely(is_jumbo))
+ entry = priv->hw->chain->jumbo_frm(priv, skb,
+ csum_insertion);
+ }
+ if (likely(!is_jumbo)) {
+ desc->des2 = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = desc->des2;
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
+ csum_insertion, priv->mode);
+ } else
+ desc = first;
+
+ for (i = 0; i < nfrags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int len = skb_frag_size(frag);
+
+ entry = (++priv->cur_tx) % txsize;
+ if (priv->extend_desc)
+ desc = (struct dma_desc *)(priv->dma_etx + entry);
+ else
+ desc = priv->dma_tx + entry;
+
+ TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
+ desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
+ DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = desc->des2;
+ priv->tx_skbuff[entry] = NULL;
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
+ priv->mode);
+ wmb();
+ priv->hw->desc->set_tx_owner(desc);
+ wmb();
+ }
+
+ /* Finalize the latest segment. */
+ priv->hw->desc->close_tx_desc(desc);
+
+ wmb();
+ /* According to the coalesce parameter the IC bit for the latest
+ * segment could be reset and the timer re-started to invoke the
+ * stmmac_tx function. This approach takes care about the fragments.
+ */
+ priv->tx_count_frames += nfrags + 1;
+ if (priv->tx_coal_frames > priv->tx_count_frames) {
+ priv->hw->desc->clear_tx_ic(desc);
+ priv->xstats.tx_reset_ic_bit++;
+ TX_DBG("\t[entry %d]: tx_count_frames %d\n", entry,
+ priv->tx_count_frames);
+ mod_timer(&priv->txtimer,
+ STMMAC_COAL_TIMER(priv->tx_coal_timer));
+ } else
+ priv->tx_count_frames = 0;
+
+ /* To avoid raise condition */
+ priv->hw->desc->set_tx_owner(first);
+ wmb();
+
+ priv->cur_tx++;
+
+#ifdef STMMAC_XMIT_DEBUG
+ if (netif_msg_pktdata(priv)) {
+ pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d",
+ __func__, (priv->cur_tx % txsize),
+ (priv->dirty_tx % txsize), entry, first, nfrags);
+ if (priv->extend_desc)
+ stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
+ else
+ stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
+
+ pr_info(">>> frame to be transmitted: ");
+ print_pkt(skb->data, skb->len);
+ }
+#endif
+ if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
+ TX_DBG("%s: stop transmitted packets\n", __func__);
+ netif_stop_queue(dev);
+ }
+
+ dev->stats.tx_bytes += skb->len;
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->hw->desc->enable_tx_timestamp(first);
+ }
+
+ if (!priv->hwts_tx_en)
+ skb_tx_timestamp(skb);
+
+ priv->hw->dma->enable_dma_transmission(priv->ioaddr);
+
+ spin_unlock(&priv->tx_lock);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * stmmac_rx_refill: refill used skb preallocated buffers
+ * @priv: driver private structure
+ * Description : this is to reallocate the skb for the reception process
+ * that is based on zero-copy.
+ */
+static inline void stmmac_rx_refill(struct stmmac_priv *priv)
+{
+ unsigned int rxsize = priv->dma_rx_size;
+ int bfsize = priv->dma_buf_sz;
+
+ for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
+ unsigned int entry = priv->dirty_rx % rxsize;
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(priv->dma_erx + entry);
+ else
+ p = priv->dma_rx + entry;
+
+ if (likely(priv->rx_skbuff[entry] == NULL)) {
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
+
+ if (unlikely(skb == NULL))
+ break;
+
+ priv->rx_skbuff[entry] = skb;
+ priv->rx_skbuff_dma[entry] =
+ dma_map_single(priv->device, skb->data, bfsize,
+ DMA_FROM_DEVICE);
+
+ p->des2 = priv->rx_skbuff_dma[entry];
+
+ priv->hw->ring->refill_desc3(priv, p);
+
+ RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
+ }
+ wmb();
+ priv->hw->desc->set_rx_owner(p);
+ wmb();
+ }
+}
+
+/**
+ * stmmac_rx_refill: refill used skb preallocated buffers
+ * @priv: driver private structure
+ * @limit: napi bugget.
+ * Description : this the function called by the napi poll method.
+ * It gets all the frames inside the ring.
+ */
+static int stmmac_rx(struct stmmac_priv *priv, int limit)
+{
+ unsigned int rxsize = priv->dma_rx_size;
+ unsigned int entry = priv->cur_rx % rxsize;
+ unsigned int next_entry;
+ unsigned int count = 0;
+ int coe = priv->plat->rx_coe;
+
+#ifdef STMMAC_RX_DEBUG
+ if (netif_msg_hw(priv)) {
+ pr_debug(">>> stmmac_rx: descriptor ring:\n");
+ if (priv->extend_desc)
+ stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
+ else
+ stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
+ }
+#endif
+ while (count < limit) {
+ int status;
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(priv->dma_erx + entry);
+ else
+ p = priv->dma_rx + entry;
+
+ if (priv->hw->desc->get_rx_owner(p))
+ break;
+
+ count++;
+
+ next_entry = (++priv->cur_rx) % rxsize;
+ if (priv->extend_desc)
+ prefetch(priv->dma_erx + next_entry);
+ else
+ prefetch(priv->dma_rx + next_entry);
+
+ /* read the status of the incoming frame */
+ status = priv->hw->desc->rx_status(&priv->dev->stats,
+ &priv->xstats, p);
+ if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
+ priv->hw->desc->rx_extended_status(&priv->dev->stats,
+ &priv->xstats,
+ priv->dma_erx +
+ entry);
+ if (unlikely(status == discard_frame)) {
+ priv->dev->stats.rx_errors++;
+ if (priv->hwts_rx_en && !priv->extend_desc) {
+ /* DESC2 & DESC3 will be overwitten by device
+ * with timestamp value, hence reinitialize
+ * them in stmmac_rx_refill() function so that
+ * device can reuse it.
+ */
+ priv->rx_skbuff[entry] = NULL;
+ dma_unmap_single(priv->device,
+ priv->rx_skbuff_dma[entry],
+ priv->dma_buf_sz,
+ DMA_FROM_DEVICE);
+ }
+ } else {
+ struct sk_buff *skb;
+ int frame_len;
+
+ frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
+
+ /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
+ * Type frames (LLC/LLC-SNAP)
+ */
+ if (unlikely(status != llc_snap))
+ frame_len -= ETH_FCS_LEN;
+#ifdef STMMAC_RX_DEBUG
+ if (frame_len > ETH_FRAME_LEN)
+ pr_debug("\tRX frame size %d, COE status: %d\n",
+ frame_len, status);
+
+ if (netif_msg_hw(priv))
+ pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
+ p, entry, p->des2);
+#endif
+ skb = priv->rx_skbuff[entry];
+ if (unlikely(!skb)) {
+ pr_err("%s: Inconsistent Rx descriptor chain\n",
+ priv->dev->name);
+ priv->dev->stats.rx_dropped++;
+ break;
+ }
+ prefetch(skb->data - NET_IP_ALIGN);
+ priv->rx_skbuff[entry] = NULL;
+
+ stmmac_get_rx_hwtstamp(priv, entry, skb);
+
+ skb_put(skb, frame_len);
+ dma_unmap_single(priv->device,
+ priv->rx_skbuff_dma[entry],
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+#ifdef STMMAC_RX_DEBUG
+ if (netif_msg_pktdata(priv)) {
+ pr_info(" frame received (%dbytes)", frame_len);
+ print_pkt(skb->data, frame_len);
+ }
+#endif
+ skb->protocol = eth_type_trans(skb, priv->dev);
+
+ if (unlikely(!coe))
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ napi_gro_receive(&priv->napi, skb);
+
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += frame_len;
+ }
+ entry = next_entry;
+ }
+
+ stmmac_rx_refill(priv);
+
+ priv->xstats.rx_pkt_n += count;
+
+ return count;
+}
+
+/**
+ * stmmac_poll - stmmac poll method (NAPI)
+ * @napi : pointer to the napi structure.
+ * @budget : maximum number of packets that the current CPU can receive from
+ * all interfaces.
+ * Description :
+ * To look at the incoming frames and clear the tx resources.
+ */
+static int stmmac_poll(struct napi_struct *napi, int budget)
+{
+ struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
+ int work_done = 0;
+
+ priv->xstats.napi_poll++;
+ stmmac_tx_clean(priv);
+
+ work_done = stmmac_rx(priv, budget);
+ if (work_done < budget) {
+ napi_complete(napi);
+ stmmac_enable_dma_irq(priv);
+ }
+ return work_done;
+}
+
+/**
+ * stmmac_tx_timeout
+ * @dev : Pointer to net device structure
+ * Description: this function is called when a packet transmission fails to
+ * complete within a reasonable time. The driver will mark the error in the
+ * netdev structure and arrange for the device to be reset to a sane state
+ * in order to transmit a new packet.
+ */
+static void stmmac_tx_timeout(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ /* Clear Tx resources and restart transmitting again */
+ stmmac_tx_err(priv);
+}
+
+/* Configuration changes (passed on by ifconfig) */
+static int stmmac_config(struct net_device *dev, struct ifmap *map)
+{
+ if (dev->flags & IFF_UP) /* can't act on a running interface */
+ return -EBUSY;
+
+ /* Don't allow changing the I/O address */
+ if (map->base_addr != dev->base_addr) {
+ pr_warn("%s: can't change I/O address\n", dev->name);
+ return -EOPNOTSUPP;
+ }
+
+ /* Don't allow changing the IRQ */
+ if (map->irq != dev->irq) {
+ pr_warn("%s: not change IRQ number %d\n", dev->name, dev->irq);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * stmmac_set_rx_mode - entry point for multicast addressing
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever multicast addresses must be enabled/disabled.
+ * Return value:
+ * void.
+ */
+static void stmmac_set_rx_mode(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+ priv->hw->mac->set_filter(dev, priv->synopsys_id);
+ spin_unlock(&priv->lock);
+}
+
+/**
+ * stmmac_change_mtu - entry point to change MTU size for the device.
+ * @dev : device pointer.
+ * @new_mtu : the new MTU size for the device.
+ * Description: the Maximum Transfer Unit (MTU) is used by the network layer
+ * to drive packet transmission. Ethernet has an MTU of 1500 octets
+ * (ETH_DATA_LEN). This value can be changed with ifconfig.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int max_mtu;
+
+ if (netif_running(dev)) {
+ pr_err("%s: must be stopped to change its MTU\n", dev->name);
+ return -EBUSY;
+ }
+
+ if (priv->plat->enh_desc)
+ max_mtu = JUMBO_LEN;
+ else
+ max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
+
+ if ((new_mtu < 46) || (new_mtu > max_mtu)) {
+ pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
+ return -EINVAL;
+ }
+
+ dev->mtu = new_mtu;
+ netdev_update_features(dev);
+
+ return 0;
+}
+
+static netdev_features_t stmmac_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
+ features &= ~NETIF_F_RXCSUM;
+ else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
+ features &= ~NETIF_F_IPV6_CSUM;
+ if (!priv->plat->tx_coe)
+ features &= ~NETIF_F_ALL_CSUM;
+
+ /* Some GMAC devices have a bugged Jumbo frame support that
+ * needs to have the Tx COE disabled for oversized frames
+ * (due to limited buffer sizes). In this case we disable
+ * the TX csum insertionin the TDES and not use SF.
+ */
+ if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
+ features &= ~NETIF_F_ALL_CSUM;
+
+ return features;
+}
+
+/**
+ * stmmac_interrupt - main ISR
+ * @irq: interrupt number.
+ * @dev_id: to pass the net device pointer.
+ * Description: this is the main driver interrupt service routine.
+ * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
+ * interrupts.
+ */
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!dev)) {
+ pr_err("%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ /* To handle GMAC own interrupts */
+ if (priv->plat->has_gmac) {
+ int status = priv->hw->mac->host_irq_status((void __iomem *)
+ dev->base_addr,
+ &priv->xstats);
+ if (unlikely(status)) {
+ /* For LPI we need to save the tx status */
+ if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
+ priv->tx_path_in_lpi_mode = true;
+ if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
+ priv->tx_path_in_lpi_mode = false;
+ }
+ }
+
+ /* To handle DMA interrupts */
+ stmmac_dma_interrupt(priv);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling receive - used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled.
+ */
+static void stmmac_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ stmmac_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+/**
+ * stmmac_ioctl - Entry point for the Ioctl
+ * @dev: Device pointer.
+ * @rq: An IOCTL specefic structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * @cmd: IOCTL command
+ * Description:
+ * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
+ */
+static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (!priv->phydev)
+ return -EINVAL;
+ ret = phy_mii_ioctl(priv->phydev, rq, cmd);
+ break;
+ case SIOCSHWTSTAMP:
+ ret = stmmac_hwtstamp_ioctl(dev, rq);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_GMAC_DEBUG_FS
+static struct dentry *stmmac_fs_dir;
+static struct dentry *stmmac_rings_status;
+static struct dentry *stmmac_dma_cap;
+
+static void sysfs_display_ring(void *head, int size, int extend_desc,
+ struct seq_file *seq)
+{
+ int i;
+ struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
+ struct dma_desc *p = (struct dma_desc *)head;
+
+ for (i = 0; i < size; i++) {
+ u64 x;
+ if (extend_desc) {
+ x = *(u64 *) ep;
+ seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int)virt_to_phys(ep),
+ (unsigned int)x, (unsigned int)(x >> 32),
+ ep->basic.des2, ep->basic.des3);
+ ep++;
+ } else {
+ x = *(u64 *) p;
+ seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int)virt_to_phys(ep),
+ (unsigned int)x, (unsigned int)(x >> 32),
+ p->des2, p->des3);
+ p++;
+ }
+ seq_printf(seq, "\n");
+ }
+}
+
+static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
+{
+ struct net_device *dev = seq->private;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int rxsize = priv->dma_rx_size;
+
+ if (priv->extend_desc) {
+ seq_printf(seq, "Extended RX descriptor ring:\n");
+ sysfs_display_ring((void *)priv->dma_erx, rxsize, 1, seq);
+ seq_printf(seq, "Extended TX descriptor ring:\n");
+ sysfs_display_ring((void *)priv->dma_etx, txsize, 1, seq);
+ } else {
+ seq_printf(seq, "RX descriptor ring:\n");
+ sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq);
+ seq_printf(seq, "TX descriptor ring:\n");
+ sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq);
+ }
+
+ return 0;
+}
+
+static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
+}
+
+static const struct file_operations stmmac_rings_status_fops = {
+ .owner = THIS_MODULE,
+ .open = stmmac_sysfs_ring_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
+{
+ struct net_device *dev = seq->private;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!priv->hw_cap_support) {
+ seq_printf(seq, "DMA HW features not supported\n");
+ return 0;
+ }
+
+ seq_printf(seq, "==============================\n");
+ seq_printf(seq, "\tDMA HW features\n");
+ seq_printf(seq, "==============================\n");
+
+ seq_printf(seq, "\t10/100 Mbps %s\n",
+ (priv->dma_cap.mbps_10_100) ? "Y" : "N");
+ seq_printf(seq, "\t1000 Mbps %s\n",
+ (priv->dma_cap.mbps_1000) ? "Y" : "N");
+ seq_printf(seq, "\tHalf duple %s\n",
+ (priv->dma_cap.half_duplex) ? "Y" : "N");
+ seq_printf(seq, "\tHash Filter: %s\n",
+ (priv->dma_cap.hash_filter) ? "Y" : "N");
+ seq_printf(seq, "\tMultiple MAC address registers: %s\n",
+ (priv->dma_cap.multi_addr) ? "Y" : "N");
+ seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n",
+ (priv->dma_cap.pcs) ? "Y" : "N");
+ seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
+ (priv->dma_cap.sma_mdio) ? "Y" : "N");
+ seq_printf(seq, "\tPMT Remote wake up: %s\n",
+ (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
+ seq_printf(seq, "\tPMT Magic Frame: %s\n",
+ (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
+ seq_printf(seq, "\tRMON module: %s\n",
+ (priv->dma_cap.rmon) ? "Y" : "N");
+ seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
+ (priv->dma_cap.time_stamp) ? "Y" : "N");
+ seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n",
+ (priv->dma_cap.atime_stamp) ? "Y" : "N");
+ seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n",
+ (priv->dma_cap.eee) ? "Y" : "N");
+ seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
+ seq_printf(seq, "\tChecksum Offload in TX: %s\n",
+ (priv->dma_cap.tx_coe) ? "Y" : "N");
+ seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
+ seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+ seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
+ (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
+ seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
+ priv->dma_cap.number_rx_channel);
+ seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
+ priv->dma_cap.number_tx_channel);
+ seq_printf(seq, "\tEnhanced descriptors: %s\n",
+ (priv->dma_cap.enh_desc) ? "Y" : "N");
+
+ return 0;
+}
+
+static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
+}
+
+static const struct file_operations stmmac_dma_cap_fops = {
+ .owner = THIS_MODULE,
+ .open = stmmac_sysfs_dma_cap_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int stmmac_init_fs(struct net_device *dev)
+{
+ /* Create debugfs entries */
+ stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
+
+ if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
+ pr_err("ERROR %s, debugfs create directory failed\n",
+ STMMAC_RESOURCE_NAME);
+
+ return -ENOMEM;
+ }
+
+ /* Entry to report DMA RX/TX rings */
+ stmmac_rings_status = debugfs_create_file("descriptors_status",
+ S_IRUGO, stmmac_fs_dir, dev,
+ &stmmac_rings_status_fops);
+
+ if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) {
+ pr_info("ERROR creating stmmac ring debugfs file\n");
+ debugfs_remove(stmmac_fs_dir);
+
+ return -ENOMEM;
+ }
+
+ /* Entry to report the DMA HW features */
+ stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir,
+ dev, &stmmac_dma_cap_fops);
+
+ if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) {
+ pr_info("ERROR creating stmmac MMC debugfs file\n");
+ debugfs_remove(stmmac_rings_status);
+ debugfs_remove(stmmac_fs_dir);
+
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void stmmac_exit_fs(void)
+{
+ debugfs_remove(stmmac_rings_status);
+ debugfs_remove(stmmac_dma_cap);
+ debugfs_remove(stmmac_fs_dir);
+}
+#endif /* CONFIG_GMAC_DEBUG_FS */
+
+static const struct net_device_ops stmmac_netdev_ops = {
+ .ndo_open = stmmac_open,
+ .ndo_start_xmit = stmmac_xmit,
+ .ndo_stop = stmmac_release,
+ .ndo_change_mtu = stmmac_change_mtu,
+ .ndo_fix_features = stmmac_fix_features,
+ .ndo_set_rx_mode = stmmac_set_rx_mode,
+ .ndo_tx_timeout = stmmac_tx_timeout,
+ .ndo_do_ioctl = stmmac_ioctl,
+ .ndo_set_config = stmmac_config,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = stmmac_poll_controller,
+#endif
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+/**
+ * stmmac_hw_init - Init the MAC device
+ * @priv: driver private structure
+ * Description: this function detects which MAC device
+ * (GMAC/MAC10-100) has to attached, checks the HW capability
+ * (if supported) and sets the driver's features (for example
+ * to use the ring or chaine mode or support the normal/enh
+ * descriptor structure).
+ */
+static int stmmac_hw_init(struct stmmac_priv *priv)
+{
+ int ret;
+ struct mac_device_info *mac;
+
+ /* Identify the MAC HW device */
+ if (priv->plat->has_gmac) {
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac = dwmac1000_setup(priv->ioaddr);
+ } else {
+ mac = dwmac100_setup(priv->ioaddr);
+ }
+ if (!mac)
+ return -ENOMEM;
+
+ priv->hw = mac;
+
+ /* Get and dump the chip ID */
+ priv->synopsys_id = stmmac_get_synopsys_id(priv);
+
+ /* To use alternate (extended) or normal descriptor structures */
+ stmmac_selec_desc_mode(priv);
+
+ /* To use the chained or ring mode */
+ if (chain_mode) {
+ priv->hw->chain = &chain_mode_ops;
+ pr_info(" Chain mode enabled\n");
+ priv->mode = STMMAC_CHAIN_MODE;
+ } else {
+ priv->hw->ring = &ring_mode_ops;
+ pr_info(" Ring mode enabled\n");
+ priv->mode = STMMAC_RING_MODE;
+ }
+
+ /* Get the HW capability (new GMAC newer than 3.50a) */
+ priv->hw_cap_support = stmmac_get_hw_features(priv);
+ if (priv->hw_cap_support) {
+ pr_info(" DMA HW capability register supported");
+
+ /* We can override some gmac/dma configuration fields: e.g.
+ * enh_desc, tx_coe (e.g. that are passed through the
+ * platform) with the values from the HW capability
+ * register (if supported).
+ */
+ priv->plat->enh_desc = priv->dma_cap.enh_desc;
+ priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
+
+ priv->plat->tx_coe = priv->dma_cap.tx_coe;
+
+ if (priv->dma_cap.rx_coe_type2)
+ priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
+ else if (priv->dma_cap.rx_coe_type1)
+ priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
+
+ } else
+ pr_info(" No HW DMA feature register supported");
+
+ ret = priv->hw->mac->rx_ipc(priv->ioaddr);
+ if (!ret) {
+ pr_warn(" RX IPC Checksum Offload not configured.\n");
+ priv->plat->rx_coe = STMMAC_RX_COE_NONE;
+ }
+
+ if (priv->plat->rx_coe)
+ pr_info(" RX Checksum Offload Engine supported (type %d)\n",
+ priv->plat->rx_coe);
+ if (priv->plat->tx_coe)
+ pr_info(" TX Checksum insertion supported\n");
+
+ if (priv->plat->pmt) {
+ pr_info(" Wake-Up On Lan supported\n");
+ device_set_wakeup_capable(priv->device, 1);
+ }
+
+ return 0;
+}
+
+/**
+ * stmmac_dvr_probe
+ * @device: device pointer
+ * @plat_dat: platform data pointer
+ * @addr: iobase memory address
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
+ */
+struct stmmac_priv *stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ void __iomem *addr)
+{
+ int ret = 0;
+ struct net_device *ndev = NULL;
+ struct stmmac_priv *priv;
+
+ ndev = alloc_etherdev(sizeof(struct stmmac_priv));
+ if (!ndev)
+ return NULL;
+
+ SET_NETDEV_DEV(ndev, device);
+
+ priv = netdev_priv(ndev);
+ priv->device = device;
+ priv->dev = ndev;
+
+ ether_setup(ndev);
+
+ stmmac_set_ethtool_ops(ndev);
+ priv->pause = pause;
+ priv->plat = plat_dat;
+ priv->ioaddr = addr;
+ priv->dev->base_addr = (unsigned long)addr;
+
+ /* Verify driver arguments */
+ stmmac_verify_args();
+
+ /* Override with kernel parameters if supplied XXX CRS XXX
+ * this needs to have multiple instances
+ */
+ if ((phyaddr >= 0) && (phyaddr <= 31))
+ priv->plat->phy_addr = phyaddr;
+
+ /* Init MAC and get the capabilities */
+ ret = stmmac_hw_init(priv);
+ if (ret)
+ goto error_free_netdev;
+
+ ndev->netdev_ops = &stmmac_netdev_ops;
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM;
+ ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+ ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
+#ifdef STMMAC_VLAN_TAG_USED
+ /* Both mac100 and gmac support receive VLAN tag detection */
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+#endif
+ priv->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ if (flow_ctrl)
+ priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
+
+ /* Rx Watchdog is available in the COREs newer than the 3.40.
+ * In some case, for example on bugged HW this feature
+ * has to be disable and this can be done by passing the
+ * riwt_off field from the platform.
+ */
+ if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
+ priv->use_riwt = 1;
+ pr_info(" Enable RX Mitigation via HW Watchdog Timer\n");
+ }
+
+ netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
+
+ spin_lock_init(&priv->lock);
+ spin_lock_init(&priv->tx_lock);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ pr_err("%s: ERROR %i registering the device\n", __func__, ret);
+ goto error_netdev_register;
+ }
+
+ priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME);
+ if (IS_ERR(priv->stmmac_clk)) {
+ pr_warn("%s: warning: cannot get CSR clock\n", __func__);
+ goto error_clk_get;
+ }
+
+ /* If a specific clk_csr value is passed from the platform
+ * this means that the CSR Clock Range selection cannot be
+ * changed at run-time and it is fixed. Viceversa the driver'll try to
+ * set the MDC clock dynamically according to the csr actual
+ * clock input.
+ */
+ if (!priv->plat->clk_csr)
+ stmmac_clk_csr_set(priv);
+ else
+ priv->clk_csr = priv->plat->clk_csr;
+
+ stmmac_check_pcs_mode(priv);
+
+ if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
+ priv->pcs != STMMAC_PCS_RTBI) {
+ /* MDIO bus Registration */
+ ret = stmmac_mdio_register(ndev);
+ if (ret < 0) {
+ pr_debug("%s: MDIO bus (id: %d) registration failed",
+ __func__, priv->plat->bus_id);
+ goto error_mdio_register;
+ }
+ }
+
+ return priv;
+
+error_mdio_register:
+ clk_put(priv->stmmac_clk);
+error_clk_get:
+ unregister_netdev(ndev);
+error_netdev_register:
+ netif_napi_del(&priv->napi);
+error_free_netdev:
+ free_netdev(ndev);
+
+ return NULL;
+}
+
+/**
+ * stmmac_dvr_remove
+ * @ndev: net device pointer
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
+ * changes the link status, releases the DMA descriptor rings.
+ */
+int stmmac_dvr_remove(struct net_device *ndev)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ pr_info("%s:\n\tremoving driver", __func__);
+
+ priv->hw->dma->stop_rx(priv->ioaddr);
+ priv->hw->dma->stop_tx(priv->ioaddr);
+
+ stmmac_set_mac(priv->ioaddr, false);
+ if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
+ priv->pcs != STMMAC_PCS_RTBI)
+ stmmac_mdio_unregister(ndev);
+ netif_carrier_off(ndev);
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+int stmmac_suspend(struct net_device *ndev)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned long flags;
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ if (priv->phydev)
+ phy_stop(priv->phydev);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ netif_device_detach(ndev);
+ netif_stop_queue(ndev);
+
+ napi_disable(&priv->napi);
+
+ /* Stop TX/RX DMA */
+ priv->hw->dma->stop_tx(priv->ioaddr);
+ priv->hw->dma->stop_rx(priv->ioaddr);
+
+ stmmac_clear_descriptors(priv);
+
+ /* Enable Power down mode by programming the PMT regs */
+ if (device_may_wakeup(priv->device))
+ priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
+ else {
+ stmmac_set_mac(priv->ioaddr, false);
+ /* Disable clock in case of PWM is off */
+ clk_disable_unprepare(priv->stmmac_clk);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return 0;
+}
+
+int stmmac_resume(struct net_device *ndev)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned long flags;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Power Down bit, into the PM register, is cleared
+ * automatically as soon as a magic packet or a Wake-up frame
+ * is received. Anyway, it's better to manually clear
+ * this bit because it can generate problems while resuming
+ * from another devices (e.g. serial console).
+ */
+ if (device_may_wakeup(priv->device))
+ priv->hw->mac->pmt(priv->ioaddr, 0);
+ else
+ /* enable the clk prevously disabled */
+ clk_prepare_enable(priv->stmmac_clk);
+
+ netif_device_attach(ndev);
+
+ /* Enable the MAC and DMA */
+ stmmac_set_mac(priv->ioaddr, true);
+ priv->hw->dma->start_tx(priv->ioaddr);
+ priv->hw->dma->start_rx(priv->ioaddr);
+
+ napi_enable(&priv->napi);
+
+ netif_start_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ return 0;
+}
+
+int stmmac_freeze(struct net_device *ndev)
+{
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ return stmmac_release(ndev);
+}
+
+int stmmac_restore(struct net_device *ndev)
+{
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ return stmmac_open(ndev);
+}
+#endif /* CONFIG_PM */
+
+/* Driver can be configured w/ and w/ both PCI and Platf drivers
+ * depending on the configuration selected.
+ */
+static int __init stmmac_init(void)
+{
+ int ret;
+
+ ret = stmmac_register_platform();
+ if (ret)
+ goto err;
+ ret = stmmac_register_pci();
+ if (ret)
+ goto err_pci;
+ return 0;
+err_pci:
+ stmmac_unregister_platform();
+err:
+ pr_err("stmmac: driver registration failed\n");
+ return ret;
+}
+
+static void __exit stmmac_exit(void)
+{
+ stmmac_unregister_platform();
+ stmmac_unregister_pci();
+}
+
+module_init(stmmac_init);
+module_exit(stmmac_exit);
+
+#ifndef MODULE
+static int __init stmmac_cmdline_opt(char *str)
+{
+ char *opt;
+
+ if (!str || !*str)
+ return -EINVAL;
+ while ((opt = strsep(&str, ",")) != NULL) {
+ if (!strncmp(opt, "debug:", 6)) {
+ if (kstrtoint(opt + 6, 0, &debug))
+ goto err;
+ } else if (!strncmp(opt, "phyaddr:", 8)) {
+ if (kstrtoint(opt + 8, 0, &phyaddr))
+ goto err;
+ } else if (!strncmp(opt, "dma_txsize:", 11)) {
+ if (kstrtoint(opt + 11, 0, &dma_txsize))
+ goto err;
+ } else if (!strncmp(opt, "dma_rxsize:", 11)) {
+ if (kstrtoint(opt + 11, 0, &dma_rxsize))
+ goto err;
+ } else if (!strncmp(opt, "buf_sz:", 7)) {
+ if (kstrtoint(opt + 7, 0, &buf_sz))
+ goto err;
+ } else if (!strncmp(opt, "tc:", 3)) {
+ if (kstrtoint(opt + 3, 0, &tc))
+ goto err;
+ } else if (!strncmp(opt, "watchdog:", 9)) {
+ if (kstrtoint(opt + 9, 0, &watchdog))
+ goto err;
+ } else if (!strncmp(opt, "flow_ctrl:", 10)) {
+ if (kstrtoint(opt + 10, 0, &flow_ctrl))
+ goto err;
+ } else if (!strncmp(opt, "pause:", 6)) {
+ if (kstrtoint(opt + 6, 0, &pause))
+ goto err;
+ } else if (!strncmp(opt, "eee_timer:", 10)) {
+ if (kstrtoint(opt + 10, 0, &eee_timer))
+ goto err;
+ } else if (!strncmp(opt, "chain_mode:", 11)) {
+ if (kstrtoint(opt + 11, 0, &chain_mode))
+ goto err;
+ }
+ }
+ return 0;
+
+err:
+ pr_err("%s: ERROR broken module parameter conversion", __func__);
+ return -EINVAL;
+}
+
+__setup("stmmaceth=", stmmac_cmdline_opt);
+#endif /* MODULE */
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*******************************************************************************
+ STMMAC Ethernet Driver -- MDIO bus implementation
+ Provides Bus interface for MII registers
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Carl Shaw <carl.shaw@st.com>
+ Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+#include "stmmac.h"
+
+#define MII_BUSY 0x00000001
+#define MII_WRITE 0x00000002
+
+static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
+{
+ unsigned long curr;
+ unsigned long finish = jiffies + 3 * HZ;
+
+ do {
+ curr = jiffies;
+ if (readl(ioaddr + mii_addr) & MII_BUSY)
+ cpu_relax();
+ else
+ return 0;
+ } while (!time_after_eq(curr, finish));
+
+ return -EBUSY;
+}
+
+/**
+ * stmmac_mdio_read
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 15-11
+ * @phyreg: MII addr reg bits 10-6
+ * Description: it reads data from the MII register from within the phy device.
+ * For the 7111 GMAC, we must set the bit 0 in the MII address register while
+ * accessing the PHY registers.
+ * Fortunately, it seems this has no drawback for the 7109 MAC.
+ */
+static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+
+ int data;
+ u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
+ ((phyreg << 6) & (0x000007C0)));
+ regValue |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
+
+ if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+ return -EBUSY;
+
+ writel(regValue, priv->ioaddr + mii_address);
+
+ if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+ return -EBUSY;
+
+ /* Read the data from the MII data register */
+ data = (int)readl(priv->ioaddr + mii_data);
+
+ return data;
+}
+
+/**
+ * stmmac_mdio_write
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 15-11
+ * @phyreg: MII addr reg bits 10-6
+ * @phydata: phy data
+ * Description: it writes the data into the MII register from within the device.
+ */
+static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
+ u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+
+ u16 value =
+ (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
+ | MII_WRITE;
+
+ value |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
+
+ /* Wait until any existing MII operation is complete */
+ if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+ return -EBUSY;
+
+ /* Set the MII address register to write */
+ writel(phydata, priv->ioaddr + mii_data);
+ writel(value, priv->ioaddr + mii_address);
+
+ /* Wait until any existing MII operation is complete */
+ return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
+}
+
+/**
+ * stmmac_mdio_reset
+ * @bus: points to the mii_bus structure
+ * Description: reset the MII bus
+ */
+static int stmmac_mdio_reset(struct mii_bus *bus)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+
+ if (priv->plat->mdio_bus_data->phy_reset) {
+ pr_debug("stmmac_mdio_reset: calling phy_reset\n");
+ priv->plat->mdio_bus_data->phy_reset(priv->plat->bsp_priv);
+ }
+
+ /* This is a workaround for problems with the STE101P PHY.
+ * It doesn't complete its reset until at least one clock cycle
+ * on MDC, so perform a dummy mdio read.
+ */
+ writel(0, priv->ioaddr + mii_address);
+ return 0;
+}
+
+/**
+ * stmmac_mdio_register
+ * @ndev: net device structure
+ * Description: it registers the MII bus
+ */
+int stmmac_mdio_register(struct net_device *ndev)
+{
+ int err = 0;
+ struct mii_bus *new_bus;
+ int *irqlist;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
+ int addr, found;
+
+ if (!mdio_bus_data)
+ return 0;
+
+ new_bus = mdiobus_alloc();
+ if (new_bus == NULL)
+ return -ENOMEM;
+
+ if (mdio_bus_data->irqs)
+ irqlist = mdio_bus_data->irqs;
+ else
+ irqlist = priv->mii_irq;
+
+ new_bus->name = "stmmac";
+ new_bus->read = &stmmac_mdio_read;
+ new_bus->write = &stmmac_mdio_write;
+ new_bus->reset = &stmmac_mdio_reset;
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ new_bus->name, priv->plat->bus_id);
+ new_bus->priv = ndev;
+ new_bus->irq = irqlist;
+ new_bus->phy_mask = mdio_bus_data->phy_mask;
+ new_bus->parent = priv->device;
+ err = mdiobus_register(new_bus);
+ if (err != 0) {
+ pr_err("%s: Cannot register as MDIO bus\n", new_bus->name);
+ goto bus_register_fail;
+ }
+
+ found = 0;
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ struct phy_device *phydev = new_bus->phy_map[addr];
+ if (phydev) {
+ int act = 0;
+ char irq_num[4];
+ char *irq_str;
+
+ /*
+ * If an IRQ was provided to be assigned after
+ * the bus probe, do it here.
+ */
+ if ((mdio_bus_data->irqs == NULL) &&
+ (mdio_bus_data->probed_phy_irq > 0)) {
+ irqlist[addr] = mdio_bus_data->probed_phy_irq;
+ phydev->irq = mdio_bus_data->probed_phy_irq;
+ }
+
+ /*
+ * If we're going to bind the MAC to this PHY bus,
+ * and no PHY number was provided to the MAC,
+ * use the one probed here.
+ */
+ if (priv->plat->phy_addr == -1)
+ priv->plat->phy_addr = addr;
+
+ act = (priv->plat->phy_addr == addr);
+ switch (phydev->irq) {
+ case PHY_POLL:
+ irq_str = "POLL";
+ break;
+ case PHY_IGNORE_INTERRUPT:
+ irq_str = "IGNORE";
+ break;
+ default:
+ sprintf(irq_num, "%d", phydev->irq);
+ irq_str = irq_num;
+ break;
+ }
+ pr_info("%s: PHY ID %08x at %d IRQ %s (%s)%s\n",
+ ndev->name, phydev->phy_id, addr,
+ irq_str, dev_name(&phydev->dev),
+ act ? " active" : "");
+ found = 1;
+ }
+ }
+
+ if (!found) {
+ pr_warning("%s: No PHY found\n", ndev->name);
+ mdiobus_unregister(new_bus);
+ mdiobus_free(new_bus);
+ return -ENODEV;
+ }
+
+ priv->mii = new_bus;
+
+ return 0;
+
+bus_register_fail:
+ mdiobus_free(new_bus);
+ return err;
+}
+
+/**
+ * stmmac_mdio_unregister
+ * @ndev: net device structure
+ * Description: it unregisters the MII bus
+ */
+int stmmac_mdio_unregister(struct net_device *ndev)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ if (!priv->mii)
+ return 0;
+
+ mdiobus_unregister(priv->mii);
+ priv->mii->priv = NULL;
+ mdiobus_free(priv->mii);
+ priv->mii = NULL;
+
+ return 0;
+}
--- /dev/null
+/*******************************************************************************
+ This contains the functions to handle the pci driver.
+
+ Copyright (C) 2011-2012 Vayavya Labs Pvt Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include "stmmac.h"
+
+struct plat_stmmacenet_data plat_dat;
+struct stmmac_mdio_bus_data mdio_data;
+struct stmmac_dma_cfg dma_cfg;
+
+static void stmmac_default_data(void)
+{
+ memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data));
+ plat_dat.bus_id = 1;
+ plat_dat.phy_addr = 0;
+ plat_dat.interface = PHY_INTERFACE_MODE_GMII;
+ plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat_dat.has_gmac = 1;
+ plat_dat.force_sf_dma_mode = 1;
+
+ mdio_data.phy_reset = NULL;
+ mdio_data.phy_mask = 0;
+ plat_dat.mdio_bus_data = &mdio_data;
+
+ dma_cfg.pbl = 32;
+ dma_cfg.burst_len = DMA_AXI_BLEN_256;
+ plat_dat.dma_cfg = &dma_cfg;
+}
+
+/**
+ * stmmac_pci_probe
+ *
+ * @pdev: pci device pointer
+ * @id: pointer to table of device id/id's.
+ *
+ * Description: This probing function gets called for all PCI devices which
+ * match the ID table and are not "owned" by other driver yet. This function
+ * gets passed a "struct pci_dev *" for each device whose entry in the ID table
+ * matches the device. The probe functions returns zero when the driver choose
+ * to take "ownership" of the device or an error code(-ve no) otherwise.
+ */
+static int stmmac_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int ret = 0;
+ void __iomem *addr = NULL;
+ struct stmmac_priv *priv = NULL;
+ int i;
+
+ /* Enable pci device */
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ pr_err("%s : ERROR: failed to enable %s device\n", __func__,
+ pci_name(pdev));
+ return ret;
+ }
+ if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) {
+ pr_err("%s: ERROR: failed to get PCI region\n", __func__);
+ ret = -ENODEV;
+ goto err_out_req_reg_failed;
+ }
+
+ /* Get the base address of device */
+ for (i = 0; i <= 5; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ addr = pci_iomap(pdev, i, 0);
+ if (addr == NULL) {
+ pr_err("%s: ERROR: cannot map register memory aborting",
+ __func__);
+ ret = -EIO;
+ goto err_out_map_failed;
+ }
+ break;
+ }
+ pci_set_master(pdev);
+
+ stmmac_default_data();
+
+ priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr);
+ if (!priv) {
+ pr_err("%s: main driver probe failed", __func__);
+ ret = -ENODEV;
+ goto err_out;
+ }
+ priv->dev->irq = pdev->irq;
+ priv->wol_irq = pdev->irq;
+
+ pci_set_drvdata(pdev, priv->dev);
+
+ pr_debug("STMMAC platform driver registration completed");
+
+ return 0;
+
+err_out:
+ pci_clear_master(pdev);
+err_out_map_failed:
+ pci_release_regions(pdev);
+err_out_req_reg_failed:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+/**
+ * stmmac_pci_remove
+ *
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and releases the PCI resources.
+ */
+static void stmmac_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ stmmac_dvr_remove(ndev);
+
+ pci_set_drvdata(pdev, NULL);
+ pci_iounmap(pdev, priv->ioaddr);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int stmmac_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = stmmac_suspend(ndev);
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return ret;
+}
+
+static int stmmac_pci_resume(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ return stmmac_resume(ndev);
+}
+#endif
+
+#define STMMAC_VENDOR_ID 0x700
+#define STMMAC_DEVICE_ID 0x1108
+
+static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
+ {PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, stmmac_id_table);
+
+struct pci_driver stmmac_pci_driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ .id_table = stmmac_id_table,
+ .probe = stmmac_pci_probe,
+ .remove = stmmac_pci_remove,
+#ifdef CONFIG_PM
+ .suspend = stmmac_pci_suspend,
+ .resume = stmmac_pci_resume,
+#endif
+};
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
+MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*******************************************************************************
+ This contains the functions to handle the platform driver.
+
+ Copyright (C) 2007-2011 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include "stmmac.h"
+
+#ifdef CONFIG_OF
+static int stmmac_probe_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat,
+ const char **mac)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ *mac = of_get_mac_address(np);
+ plat->interface = of_get_phy_mode(np);
+ plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+ sizeof(struct stmmac_mdio_bus_data),
+ GFP_KERNEL);
+
+ /*
+ * Currently only the properties needed on SPEAr600
+ * are provided. All other properties should be added
+ * once needed on other platforms.
+ */
+ if (of_device_is_compatible(np, "st,spear600-gmac") ||
+ of_device_is_compatible(np, "snps,dwmac-3.70a") ||
+ of_device_is_compatible(np, "snps,dwmac")) {
+ plat->has_gmac = 1;
+ plat->pmt = 1;
+ }
+
+ return 0;
+}
+#else
+static int stmmac_probe_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat,
+ const char **mac)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_OF */
+
+/**
+ * stmmac_pltfr_probe
+ * @pdev: platform device pointer
+ * Description: platform_device probe function. It allocates
+ * the necessary resources and invokes the main to init
+ * the net device, register the mdio bus etc.
+ */
+static int stmmac_pltfr_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ void __iomem *addr = NULL;
+ struct stmmac_priv *priv = NULL;
+ struct plat_stmmacenet_data *plat_dat = NULL;
+ const char *mac = NULL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
+
+ if (pdev->dev.of_node) {
+ plat_dat = devm_kzalloc(&pdev->dev,
+ sizeof(struct plat_stmmacenet_data),
+ GFP_KERNEL);
+ if (!plat_dat) {
+ pr_err("%s: ERROR: no memory", __func__);
+ return -ENOMEM;
+ }
+
+ ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
+ if (ret) {
+ pr_err("%s: main dt probe failed", __func__);
+ return ret;
+ }
+ } else {
+ plat_dat = pdev->dev.platform_data;
+ }
+
+ /* Custom initialisation (if needed)*/
+ if (plat_dat->init) {
+ ret = plat_dat->init(pdev);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
+ if (!priv) {
+ pr_err("%s: main driver probe failed", __func__);
+ return -ENODEV;
+ }
+
+ /* Get MAC address if available (DT) */
+ if (mac)
+ memcpy(priv->dev->dev_addr, mac, ETH_ALEN);
+
+ /* Get the MAC information */
+ priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
+ if (priv->dev->irq == -ENXIO) {
+ pr_err("%s: ERROR: MAC IRQ configuration "
+ "information not found\n", __func__);
+ return -ENXIO;
+ }
+
+ /*
+ * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
+ * The external wake up irq can be passed through the platform code
+ * named as "eth_wake_irq"
+ *
+ * In case the wake up interrupt is not passed from the platform
+ * so the driver will continue to use the mac irq (ndev->irq)
+ */
+ priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+ if (priv->wol_irq == -ENXIO)
+ priv->wol_irq = priv->dev->irq;
+
+ priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+
+ platform_set_drvdata(pdev, priv->dev);
+
+ pr_debug("STMMAC platform driver registration completed");
+
+ return 0;
+}
+
+/**
+ * stmmac_pltfr_remove
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and calls the platforms hook and release the resources (e.g. mem).
+ */
+static int stmmac_pltfr_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret = stmmac_dvr_remove(ndev);
+
+ if (priv->plat->exit)
+ priv->plat->exit(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int stmmac_pltfr_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return stmmac_suspend(ndev);
+}
+
+static int stmmac_pltfr_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return stmmac_resume(ndev);
+}
+
+int stmmac_pltfr_freeze(struct device *dev)
+{
+ int ret;
+ struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+
+ ret = stmmac_freeze(ndev);
+ if (plat_dat->exit)
+ plat_dat->exit(pdev);
+
+ return ret;
+}
+
+int stmmac_pltfr_restore(struct device *dev)
+{
+ struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if (plat_dat->init)
+ plat_dat->init(pdev);
+
+ return stmmac_restore(ndev);
+}
+
+static const struct dev_pm_ops stmmac_pltfr_pm_ops = {
+ .suspend = stmmac_pltfr_suspend,
+ .resume = stmmac_pltfr_resume,
+ .freeze = stmmac_pltfr_freeze,
+ .thaw = stmmac_pltfr_restore,
+ .restore = stmmac_pltfr_restore,
+};
+#else
+static const struct dev_pm_ops stmmac_pltfr_pm_ops;
+#endif /* CONFIG_PM */
+
+static const struct of_device_id stmmac_dt_ids[] = {
+ { .compatible = "st,spear600-gmac"},
+ { .compatible = "snps,dwmac-3.70a"},
+ { .compatible = "snps,dwmac"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
+
+struct platform_driver stmmac_pltfr_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ .owner = THIS_MODULE,
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = of_match_ptr(stmmac_dt_ids),
+ },
+};
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
--- /dev/null
+/*******************************************************************************
+ PTP 1588 clock using the STMMAC.
+
+ Copyright (C) 2013 Vayavya Labs Pvt Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+*******************************************************************************/
+#include "stmmac.h"
+#include "stmmac_ptp.h"
+
+/**
+ * stmmac_adjust_freq
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @ppb: desired period change in parts ber billion
+ *
+ * Description: this function will adjust the frequency of hardware clock.
+ */
+static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+ u32 diff, addend;
+ int neg_adj = 0;
+ u64 adj;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ addend = priv->default_addend;
+ adj = addend;
+ adj *= ppb;
+ diff = div_u64(adj, 1000000000ULL);
+ addend = neg_adj ? (addend - diff) : (addend + diff);
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ priv->hw->ptp->config_addend(priv->ioaddr, addend);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+/**
+ * stmmac_adjust_time
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @delta: desired change in nanoseconds
+ *
+ * Description: this function will shift/adjust the hardware clock time.
+ */
+static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+ u32 sec, nsec;
+ u32 quotient, reminder;
+ int neg_adj = 0;
+
+ if (delta < 0) {
+ neg_adj = 1;
+ delta = -delta;
+ }
+
+ quotient = div_u64_rem(delta, 1000000000ULL, &reminder);
+ sec = quotient;
+ nsec = reminder;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+/**
+ * stmmac_get_time
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @ts: pointer to hold time/result
+ *
+ * Description: this function will read the current time from the
+ * hardware clock and store it in @ts.
+ */
+static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+ u64 ns;
+ u32 reminder;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ ns = priv->hw->ptp->get_systime(priv->ioaddr);
+
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &reminder);
+ ts->tv_nsec = reminder;
+
+ return 0;
+}
+
+/**
+ * stmmac_set_time
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @ts: time value to set
+ *
+ * Description: this function will set the current time on the
+ * hardware clock.
+ */
+static int stmmac_set_time(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec);
+
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ return 0;
+}
+
+static int stmmac_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/* structure describing a PTP hardware clock */
+static struct ptp_clock_info stmmac_ptp_clock_ops = {
+ .owner = THIS_MODULE,
+ .name = "stmmac_ptp_clock",
+ .max_adj = 62500000,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjfreq = stmmac_adjust_freq,
+ .adjtime = stmmac_adjust_time,
+ .gettime = stmmac_get_time,
+ .settime = stmmac_set_time,
+ .enable = stmmac_enable,
+};
+
+/**
+ * stmmac_ptp_register
+ * @priv: driver private structure
+ * Description: this function will register the ptp clock driver
+ * to kernel. It also does some house keeping work.
+ */
+int stmmac_ptp_register(struct stmmac_priv *priv)
+{
+ spin_lock_init(&priv->ptp_lock);
+ priv->ptp_clock_ops = stmmac_ptp_clock_ops;
+
+ priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
+ priv->device);
+ if (IS_ERR(priv->ptp_clock)) {
+ priv->ptp_clock = NULL;
+ pr_err("ptp_clock_register() failed on %s\n", priv->dev->name);
+ } else
+ pr_debug("Added PTP HW clock successfully on %s\n",
+ priv->dev->name);
+
+ return 0;
+}
+
+/**
+ * stmmac_ptp_unregister
+ * @priv: driver private structure
+ * Description: this function will remove/unregister the ptp clock driver
+ * from the kernel.
+ */
+void stmmac_ptp_unregister(struct stmmac_priv *priv)
+{
+ if (priv->ptp_clock) {
+ ptp_clock_unregister(priv->ptp_clock);
+ pr_debug("Removed PTP HW clock successfully on %s\n",
+ priv->dev->name);
+ }
+}
--- /dev/null
+/******************************************************************************
+ PTP Header file
+
+ Copyright (C) 2013 Vayavya Labs Pvt Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+******************************************************************************/
+
+#ifndef __STMMAC_PTP_H__
+#define __STMMAC_PTP_H__
+
+#define STMMAC_SYSCLOCK 62500000
+
+/* IEEE 1588 PTP register offsets */
+#define PTP_TCR 0x0700 /* Timestamp Control Reg */
+#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */
+#define PTP_STSR 0x0708 /* System Time – Seconds Regr */
+#define PTP_STNSR 0x070C /* System Time – Nanoseconds Reg */
+#define PTP_STSUR 0x0710 /* System Time – Seconds Update Reg */
+#define PTP_STNSUR 0x0714 /* System Time – Nanoseconds Update Reg */
+#define PTP_TAR 0x0718 /* Timestamp Addend Reg */
+#define PTP_TTSR 0x071C /* Target Time Seconds Reg */
+#define PTP_TTNSR 0x0720 /* Target Time Nanoseconds Reg */
+#define PTP_STHWSR 0x0724 /* System Time - Higher Word Seconds Reg */
+#define PTP_TSR 0x0728 /* Timestamp Status */
+
+#define PTP_STNSUR_ADDSUB_SHIFT 31
+
+/* PTP TCR defines */
+#define PTP_TCR_TSENA 0x00000001 /* Timestamp Enable */
+#define PTP_TCR_TSCFUPDT 0x00000002 /* Timestamp Fine/Coarse Update */
+#define PTP_TCR_TSINIT 0x00000004 /* Timestamp Initialize */
+#define PTP_TCR_TSUPDT 0x00000008 /* Timestamp Update */
+/* Timestamp Interrupt Trigger Enable */
+#define PTP_TCR_TSTRIG 0x00000010
+#define PTP_TCR_TSADDREG 0x00000020 /* Addend Reg Update */
+#define PTP_TCR_TSENALL 0x00000100 /* Enable Timestamp for All Frames */
+/* Timestamp Digital or Binary Rollover Control */
+#define PTP_TCR_TSCTRLSSR 0x00000200
+
+/* Enable PTP packet Processing for Version 2 Format */
+#define PTP_TCR_TSVER2ENA 0x00000400
+/* Enable Processing of PTP over Ethernet Frames */
+#define PTP_TCR_TSIPENA 0x00000800
+/* Enable Processing of PTP Frames Sent over IPv6-UDP */
+#define PTP_TCR_TSIPV6ENA 0x00001000
+/* Enable Processing of PTP Frames Sent over IPv4-UDP */
+#define PTP_TCR_TSIPV4ENA 0x00002000
+/* Enable Timestamp Snapshot for Event Messages */
+#define PTP_TCR_TSEVNTENA 0x00004000
+/* Enable Snapshot for Messages Relevant to Master */
+#define PTP_TCR_TSMSTRENA 0x00008000
+/* Select PTP packets for Taking Snapshots */
+#define PTP_TCR_SNAPTYPSEL_1 0x00010000
+/* Enable MAC address for PTP Frame Filtering */
+#define PTP_TCR_TSENMACADDR 0x00040000
+
+#endif /* __STMMAC_PTP_H__ */
--- /dev/null
+config RK_VMAC_ETH
+ bool "Rockchip 10/100 Ethernet driver"
+ depends on HAS_IOMEM && HAS_DMA
+ select NET_CORE
+ select MII
+ select PHYLIB
+ select CRC32
+ ---help---
+ Rockchip 10/100 VMAC Ethernet driver.
+
--- /dev/null
+obj-$(CONFIG_RK_VMAC_ETH) += rk29_vmac.o
+obj-$(CONFIG_RK_VMAC_ETH) += rk29_vmac_phy.o
--- /dev/null
+/*\r
+ * linux/arch/arc/drivers/arcvmac.c\r
+ *\r
+ * Copyright (C) 2003-2006 Codito Technologies, for linux-2.4 port\r
+ * Copyright (C) 2006-2007 Celunite Inc, for linux-2.6 port\r
+ * Copyright (C) 2007-2008 Sagem Communications, Fehmi HAFSI\r
+ * Copyright (C) 2009 Sagem Communications, Andreas Fenkart\r
+ * All Rights Reserved.\r
+ *\r
+ * This program is free software; you can redistribute it and/or modify\r
+ * it under the terms of the GNU General Public License as published by\r
+ * the Free Software Foundation; either version 2 of the License, or\r
+ * (at your option) any later version.\r
+ *\r
+ * This program is distributed in the hope that it will be useful,\r
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
+ * GNU General Public License for more details.\r
+ *\r
+ * You should have received a copy of the GNU General Public License\r
+ * along with this program; if not, write to the Free Software\r
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\r
+ *\r
+ * external PHY support based on dnet.c\r
+ * ring management based on bcm63xx_enet.c\r
+ *\r
+ * Authors: amit.bhor@celunite.com, sameer.dhavale@celunite.com\r
+ */\r
+\r
+#define DEBUG\r
+\r
+#include <linux/clk.h>\r
+#include <linux/crc32.h>\r
+#include <linux/delay.h>\r
+#include <linux/dma-mapping.h>\r
+#include <linux/etherdevice.h>\r
+#include <linux/init.h>\r
+#include <linux/io.h>\r
+#include <linux/kernel.h>\r
+#include <linux/module.h>\r
+#include <linux/moduleparam.h>\r
+#include <linux/netdevice.h>\r
+#include <linux/phy.h>\r
+#include <linux/platform_device.h>\r
+#include <linux/slab.h>\r
+#include <linux/types.h>\r
+#include <linux/wakelock.h>\r
+#include <linux/version.h>\r
+#include <linux/gpio.h>\r
+#include <asm/irq.h>\r
+#include <linux/interrupt.h>\r
+#include <linux/completion.h>\r
+#include <linux/of.h>\r
+#include <linux/of_platform.h>\r
+\r
+#include "rk29_vmac.h"\r
+\r
+//static struct wake_lock idlelock; /* add by lyx @ 20110302 */\r
+\r
+/* Register access macros */\r
+#define vmac_writel(port, value, reg) \\r
+ writel((value), (port)->regs + reg##_OFFSET)\r
+#define vmac_readl(port, reg) readl((port)->regs + reg##_OFFSET)\r
+\r
+static unsigned char *read_mac_reg(struct net_device *dev,\r
+ unsigned char hwaddr[ETH_ALEN])\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ unsigned mac_lo, mac_hi;\r
+\r
+ WARN_ON(!hwaddr);\r
+ mac_lo = vmac_readl(ap, ADDRL);\r
+ mac_hi = vmac_readl(ap, ADDRH);\r
+\r
+ hwaddr[0] = (mac_lo >> 0) & 0xff;\r
+ hwaddr[1] = (mac_lo >> 8) & 0xff;\r
+ hwaddr[2] = (mac_lo >> 16) & 0xff;\r
+ hwaddr[3] = (mac_lo >> 24) & 0xff;\r
+ hwaddr[4] = (mac_hi >> 0) & 0xff;\r
+ hwaddr[5] = (mac_hi >> 8) & 0xff;\r
+ return hwaddr;\r
+}\r
+\r
+static void write_mac_reg(struct net_device *dev, unsigned char* hwaddr)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ unsigned mac_lo, mac_hi;\r
+\r
+ mac_lo = hwaddr[3] << 24 | hwaddr[2] << 16 | hwaddr[1] << 8 | hwaddr[0];\r
+ mac_hi = hwaddr[5] << 8 | hwaddr[4];\r
+\r
+ vmac_writel(ap, mac_lo, ADDRL);\r
+ vmac_writel(ap, mac_hi, ADDRH);\r
+}\r
+\r
+static void vmac_mdio_xmit(struct vmac_priv *ap, unsigned val)\r
+{\r
+ init_completion(&ap->mdio_complete);\r
+ vmac_writel(ap, val, MDIO_DATA);\r
+ if(!wait_for_completion_timeout(&ap->mdio_complete, msecs_to_jiffies(1000)))\r
+ printk("Time out for waiting mdio completion\n");\r
+}\r
+\r
+static int vmac_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)\r
+{\r
+ struct vmac_priv *vmac = bus->priv;\r
+ unsigned int val;\r
+ /* only 5 bits allowed for phy-addr and reg_offset */\r
+ WARN_ON(phy_id & ~0x1f || phy_reg & ~0x1f);\r
+\r
+ val = MDIO_BASE | MDIO_OP_READ;\r
+ val |= phy_id << 23 | phy_reg << 18;\r
+ vmac_mdio_xmit(vmac, val);\r
+\r
+ val = vmac_readl(vmac, MDIO_DATA);\r
+ return val & MDIO_DATA_MASK;\r
+}\r
+\r
+static int vmac_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg,\r
+ u16 value)\r
+{\r
+ struct vmac_priv *vmac = bus->priv;\r
+ unsigned int val;\r
+ /* only 5 bits allowed for phy-addr and reg_offset */\r
+ WARN_ON(phy_id & ~0x1f || phy_reg & ~0x1f);\r
+\r
+ val = MDIO_BASE | MDIO_OP_WRITE;\r
+ val |= phy_id << 23 | phy_reg << 18;\r
+ val |= (value & MDIO_DATA_MASK);\r
+ vmac_mdio_xmit(vmac, val);\r
+ return 0;\r
+}\r
+\r
+static void vmac_handle_link_change(struct net_device *dev)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ struct phy_device *phydev = ap->phy_dev;\r
+ unsigned long flags;\r
+ int report_change = 0;\r
+ struct rk29_vmac_platform_data *pdata = ap->pdev->dev.platform_data;\r
+\r
+ spin_lock_irqsave(&ap->lock, flags);\r
+\r
+ if (phydev->duplex != ap->duplex) {\r
+ unsigned tmp;\r
+\r
+ tmp = vmac_readl(ap, CONTROL);\r
+\r
+ if (phydev->duplex)\r
+ tmp |= ENFL_MASK;\r
+ else\r
+ tmp &= ~ENFL_MASK;\r
+\r
+ vmac_writel(ap, tmp, CONTROL);\r
+\r
+ ap->duplex = phydev->duplex;\r
+ report_change = 1;\r
+ }\r
+\r
+ if (phydev->speed != ap->speed) {\r
+ ap->speed = phydev->speed;\r
+ report_change = 1;\r
+ }\r
+\r
+ if (pdata && pdata->rmii_speed_switch)\r
+ pdata->rmii_speed_switch(phydev->speed);\r
+\r
+ if (phydev->link != ap->link) {\r
+ ap->link = phydev->link;\r
+ report_change = 1;\r
+ }\r
+\r
+ spin_unlock_irqrestore(&ap->lock, flags);\r
+\r
+ if (report_change)\r
+ phy_print_status(ap->phy_dev);\r
+}\r
+\r
+static int vmac_mii_probe(struct net_device *dev)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ struct phy_device *phydev = NULL; \r
+ //struct clk *sys_clk;\r
+ //unsigned long clock_rate;\r
+ int phy_addr, err;\r
+\r
+\r
+#if defined (CONFIG_PHY_PORT_NUM) && (CONFIG_PHY_PORT_NUM != 0)\r
+ if (ap->mii_bus->phy_map[CONFIG_PHY_PORT_NUM])\r
+ phydev = ap->mii_bus->phy_map[CONFIG_PHY_PORT_NUM];\r
+#else\r
+ /* find the first phy */\r
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {\r
+ if (ap->mii_bus->phy_map[phy_addr]) {\r
+ phydev = ap->mii_bus->phy_map[phy_addr];\r
+ break;\r
+ }\r
+ }\r
+#endif\r
+\r
+ if (!phydev) {\r
+ dev_err(&dev->dev, "no PHY found\n");\r
+ return -ENODEV;\r
+ }\r
+\r
+ /* add pin_irq, if avail */\r
+ phydev = phy_connect(dev, dev_name(&phydev->dev),\r
+ &vmac_handle_link_change,\r
+ PHY_INTERFACE_MODE_RMII);\r
+ if (IS_ERR(phydev)) {\r
+ err = PTR_ERR(phydev);\r
+ dev_err(&dev->dev, "could not attach to PHY %d\n", err);\r
+ goto err_out;\r
+ }\r
+\r
+ phydev->supported &= PHY_BASIC_FEATURES;\r
+ phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;\r
+\r
+ phydev->advertising = phydev->supported;\r
+\r
+ ap->link = 0;\r
+ ap->speed = 0;\r
+ ap->duplex = -1;\r
+ ap->phy_dev = phydev;\r
+\r
+ return 0;\r
+//err_disconnect:\r
+// phy_disconnect(phydev);\r
+err_out:\r
+ return err;\r
+}\r
+\r
+static int vmac_mii_init(struct vmac_priv *ap)\r
+{\r
+ int err, i;\r
+\r
+ ap->mii_bus = mdiobus_alloc();\r
+ \r
+ if (ap->mii_bus == NULL)\r
+ return -ENOMEM;\r
+\r
+ ap->mii_bus->name = "vmac_mii_bus";\r
+ ap->mii_bus->read = &vmac_mdio_read;\r
+ ap->mii_bus->write = &vmac_mdio_write;\r
+\r
+ snprintf(ap->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);\r
+\r
+ ap->mii_bus->priv = ap;\r
+\r
+ err = -ENOMEM;\r
+ ap->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);\r
+ if (!ap->mii_bus->irq)\r
+ goto err_out;\r
+\r
+ for (i = 0; i < PHY_MAX_ADDR; i++)\r
+ ap->mii_bus->irq[i] = PHY_POLL;\r
+\r
+#if 0\r
+ /* FIXME: what is it used for? */\r
+ platform_set_drvdata(ap->dev, ap->mii_bus);\r
+#endif\r
+\r
+ err = mdiobus_register(ap->mii_bus);\r
+ if (err)\r
+ goto err_out_free_mdio_irq;\r
+\r
+ err = vmac_mii_probe(ap->dev);\r
+ if (err)\r
+ goto err_out_unregister_bus;\r
+\r
+ return 0;\r
+\r
+err_out_unregister_bus:\r
+ mdiobus_unregister(ap->mii_bus);\r
+err_out_free_mdio_irq:\r
+ kfree(ap->mii_bus->irq);\r
+err_out:\r
+ mdiobus_free(ap->mii_bus);\r
+ ap->mii_bus = NULL;\r
+ return err;\r
+}\r
+\r
+static void vmac_mii_exit(struct net_device *dev)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+\r
+ if (ap->phy_dev)\r
+ phy_disconnect(ap->phy_dev);\r
+ if (ap->mii_bus) {\r
+ mdiobus_unregister(ap->mii_bus);\r
+ kfree(ap->mii_bus->irq);\r
+ mdiobus_free(ap->mii_bus);\r
+ ap->mii_bus = NULL;\r
+ }\r
+}\r
+\r
+static int vmacether_get_settings(struct net_device *dev,\r
+ struct ethtool_cmd *cmd)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ struct phy_device *phydev = ap->phy_dev;\r
+\r
+ if (!phydev)\r
+ return -ENODEV;\r
+\r
+ return phy_ethtool_gset(phydev, cmd);\r
+}\r
+\r
+static int vmacether_set_settings(struct net_device *dev,\r
+ struct ethtool_cmd *cmd)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ struct phy_device *phydev = ap->phy_dev;\r
+\r
+ if (!phydev)\r
+ return -ENODEV;\r
+\r
+ return phy_ethtool_sset(phydev, cmd);\r
+}\r
+\r
+static int vmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ struct phy_device *phydev = ap->phy_dev;\r
+\r
+ if (!netif_running(dev))\r
+ return -EINVAL;\r
+\r
+ if (!phydev)\r
+ return -ENODEV;\r
+\r
+ return phy_mii_ioctl(phydev, rq, cmd);\r
+}\r
+\r
+static void vmacether_get_drvinfo(struct net_device *dev,\r
+ struct ethtool_drvinfo *info)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+\r
+ strlcpy(info->driver, VMAC_NAME, sizeof(info->driver));\r
+ strlcpy(info->version, VMAC_VERSION, sizeof(info->version));\r
+ snprintf(info->bus_info, sizeof(info->bus_info),\r
+ "platform 0x%x", ap->mem_base);\r
+}\r
+\r
+static int update_error_counters(struct net_device *dev, int status)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ dev_dbg(&ap->pdev->dev, "rx error counter overrun. status = 0x%x\n",\r
+ status);\r
+\r
+ /* programming error */\r
+ WARN_ON(status & TXCH_MASK);\r
+ WARN_ON(!(status & (MSER_MASK | RXCR_MASK | RXFR_MASK | RXFL_MASK)));\r
+\r
+ if (status & MSER_MASK)\r
+ ap->stats.rx_over_errors += 256; /* ran out of BD */\r
+ if (status & RXCR_MASK)\r
+ ap->stats.rx_crc_errors += 256;\r
+ if (status & RXFR_MASK)\r
+ ap->stats.rx_frame_errors += 256;\r
+ if (status & RXFL_MASK)\r
+ ap->stats.rx_fifo_errors += 256;\r
+\r
+ return 0;\r
+}\r
+\r
+static void update_tx_errors(struct net_device *dev, int status)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+\r
+ if (status & UFLO)\r
+ ap->stats.tx_fifo_errors++;\r
+\r
+ if (ap->duplex)\r
+ return;\r
+\r
+ /* half duplex flags */\r
+ if (status & LTCL)\r
+ ap->stats.tx_window_errors++;\r
+ if (status & RETRY_CT)\r
+ ap->stats.collisions += (status & RETRY_CT) >> 24;\r
+ if (status & DROP) /* too many retries */\r
+ ap->stats.tx_aborted_errors++;\r
+ if (status & DEFER)\r
+ dev_vdbg(&ap->pdev->dev, "\"defer to traffic\"\n");\r
+ if (status & CARLOSS)\r
+ ap->stats.tx_carrier_errors++;\r
+}\r
+\r
+static int vmac_rx_reclaim_force(struct net_device *dev)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ int ct;\r
+\r
+ ct = 0;\r
+\r
+ dev_dbg(&ap->pdev->dev, "%s need to release %d rx sk_buff\n",\r
+ __func__, fifo_used(&ap->rx_ring));\r
+\r
+ while (!fifo_empty(&ap->rx_ring) && ct++ < ap->rx_ring.size) {\r
+ struct vmac_buffer_desc *desc;\r
+ struct sk_buff *skb;\r
+ int desc_idx;\r
+\r
+ desc_idx = ap->rx_ring.tail;\r
+ desc = &ap->rxbd[desc_idx];\r
+ fifo_inc_tail(&ap->rx_ring);\r
+\r
+ if (!ap->rx_skbuff[desc_idx]) {\r
+ dev_err(&ap->pdev->dev, "non-populated rx_skbuff found %d\n",\r
+ desc_idx);\r
+ continue;\r
+ }\r
+\r
+ skb = ap->rx_skbuff[desc_idx];\r
+ ap->rx_skbuff[desc_idx] = NULL;\r
+\r
+ dma_unmap_single(&ap->pdev->dev, desc->data, skb->len,\r
+ DMA_TO_DEVICE);\r
+\r
+ dev_kfree_skb(skb);\r
+ }\r
+\r
+ if (!fifo_empty(&ap->rx_ring)) {\r
+ dev_err(&ap->pdev->dev, "failed to reclaim %d rx sk_buff\n",\r
+ fifo_used(&ap->rx_ring));\r
+ }\r
+\r
+ return 0;\r
+}\r
+\r
+static int vmac_rx_refill(struct net_device *dev)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+\r
+ WARN_ON(fifo_full(&ap->rx_ring));\r
+\r
+ while (!fifo_full(&ap->rx_ring)) {\r
+ struct vmac_buffer_desc *desc;\r
+ struct sk_buff *skb;\r
+ dma_addr_t p;\r
+ int desc_idx;\r
+\r
+ desc_idx = ap->rx_ring.head;\r
+ desc = &ap->rxbd[desc_idx];\r
+\r
+ /* make sure we read the actual descriptor status */\r
+ rmb();\r
+\r
+ if (ap->rx_skbuff[desc_idx]) {\r
+ /* dropped packet / buffer chaining */\r
+ fifo_inc_head(&ap->rx_ring);\r
+\r
+ /* return to DMA */\r
+ wmb();\r
+ desc->info = OWN_MASK | ap->rx_skb_size;\r
+ continue;\r
+ }\r
+\r
+ skb = netdev_alloc_skb(dev, ap->rx_skb_size + 2);\r
+ if (!skb) {\r
+ dev_info(&ap->pdev->dev, "failed to allocate rx_skb, skb's left %d\n",\r
+ fifo_used(&ap->rx_ring));\r
+ break;\r
+ }\r
+\r
+ /* IP header Alignment (14 byte Ethernet header) */\r
+ skb_reserve(skb, 2);\r
+ WARN_ON(skb->len != 0); /* nothing received yet */\r
+\r
+ ap->rx_skbuff[desc_idx] = skb;\r
+\r
+ p = dma_map_single(&ap->pdev->dev, skb->data, ap->rx_skb_size,\r
+ DMA_FROM_DEVICE);\r
+\r
+ desc->data = p;\r
+\r
+ wmb();\r
+ desc->info = OWN_MASK | ap->rx_skb_size;\r
+\r
+ fifo_inc_head(&ap->rx_ring);\r
+ }\r
+\r
+ /* If rx ring is still empty, set a timer to try allocating\r
+ * again at a later time. */\r
+ if (fifo_empty(&ap->rx_ring) && netif_running(dev)) {\r
+ dev_warn(&ap->pdev->dev, "unable to refill rx ring\n");\r
+ ap->rx_timeout.expires = jiffies + HZ;\r
+ add_timer(&ap->rx_timeout);\r
+ }\r
+\r
+ return 0;\r
+}\r
+\r
+/*\r
+ * timer callback to defer refill rx queue in case we're OOM\r
+ */\r
+static void vmac_refill_rx_timer(unsigned long data)\r
+{\r
+ struct net_device *dev;\r
+ struct vmac_priv *ap;\r
+\r
+ dev = (struct net_device *)data;\r
+ ap = netdev_priv(dev);\r
+\r
+ spin_lock(&ap->rx_lock);\r
+ vmac_rx_refill(dev);\r
+ spin_unlock(&ap->rx_lock);\r
+}\r
+\r
+/* merge buffer chaining */\r
+struct sk_buff *vmac_merge_rx_buffers(struct net_device *dev,\r
+ struct vmac_buffer_desc *after,\r
+ int pkt_len) /* data */\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ struct sk_buff *merge_skb, *cur_skb;\r
+ struct dma_fifo *rx_ring;\r
+ struct vmac_buffer_desc *desc;\r
+\r
+ rx_ring = &ap->rx_ring;\r
+ desc = &ap->rxbd[rx_ring->tail];\r
+\r
+ WARN_ON(desc == after);\r
+\r
+ /* strip FCS */\r
+ pkt_len -= 4;\r
+\r
+ /* IP header Alignment (14 byte Ethernet header) */\r
+ merge_skb = netdev_alloc_skb(dev, pkt_len + 2);\r
+ if (!merge_skb) {\r
+ dev_err(&ap->pdev->dev, "failed to allocate merged rx_skb, rx skb's left %d\n",\r
+ fifo_used(rx_ring));\r
+\r
+ return NULL;\r
+ }\r
+\r
+ skb_reserve(merge_skb, 2);\r
+\r
+ while (desc != after && pkt_len) {\r
+ struct vmac_buffer_desc *desc;\r
+ int buf_len, valid;\r
+\r
+ /* desc needs wrapping */\r
+ desc = &ap->rxbd[rx_ring->tail];\r
+ cur_skb = ap->rx_skbuff[rx_ring->tail];\r
+ WARN_ON(!cur_skb);\r
+\r
+ dma_unmap_single(&ap->pdev->dev, desc->data, ap->rx_skb_size,\r
+ DMA_FROM_DEVICE);\r
+\r
+ /* do not copy FCS */\r
+ buf_len = desc->info & LEN_MASK;\r
+ valid = min(pkt_len, buf_len);\r
+ pkt_len -= valid;\r
+\r
+ memcpy(skb_put(merge_skb, valid), cur_skb->data, valid);\r
+\r
+ fifo_inc_tail(rx_ring);\r
+ }\r
+\r
+ /* merging_pressure++ */\r
+\r
+ if (unlikely(pkt_len != 0))\r
+ dev_err(&ap->pdev->dev, "buffer chaining bytes missing %d\n",\r
+ pkt_len);\r
+\r
+ WARN_ON(desc != after);\r
+\r
+ return merge_skb;\r
+}\r
+\r
+int vmac_rx_receive(struct net_device *dev, int budget)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ struct vmac_buffer_desc *first;\r
+ int processed, pkt_len, pkt_err;\r
+ struct dma_fifo lookahead;\r
+\r
+ processed = 0;\r
+\r
+ first = NULL;\r
+ pkt_err = pkt_len = 0;\r
+\r
+ /* look ahead, till packet complete */\r
+ lookahead = ap->rx_ring;\r
+\r
+ do {\r
+ struct vmac_buffer_desc *desc; /* cur_ */\r
+ int desc_idx; /* cur_ */\r
+ struct sk_buff *skb; /* pkt_ */\r
+\r
+ desc_idx = lookahead.tail;\r
+ desc = &ap->rxbd[desc_idx];\r
+\r
+ /* make sure we read the actual descriptor status */\r
+ rmb();\r
+\r
+ /* break if dma ownership belongs to hw */\r
+ if (desc->info & OWN_MASK) {\r
+ ap->mac_rxring_head = vmac_readl(ap, MAC_RXRING_HEAD);\r
+ break;\r
+ }\r
+\r
+ if (desc->info & FRST_MASK) {\r
+ pkt_len = 0;\r
+ pkt_err = 0;\r
+\r
+ /* don't free current */\r
+ ap->rx_ring.tail = lookahead.tail;\r
+ first = desc;\r
+ }\r
+\r
+ fifo_inc_tail(&lookahead);\r
+\r
+ /* check bd */\r
+\r
+ pkt_len += desc->info & LEN_MASK;\r
+ pkt_err |= (desc->info & BUFF);\r
+\r
+ if (!(desc->info & LAST_MASK))\r
+ continue;\r
+\r
+ /* received complete packet */\r
+\r
+ if (unlikely(pkt_err || !first)) {\r
+ /* recycle buffers */\r
+ ap->rx_ring.tail = lookahead.tail;\r
+ continue;\r
+ }\r
+\r
+ WARN_ON(!(first->info & FRST_MASK) ||\r
+ !(desc->info & LAST_MASK));\r
+ WARN_ON(pkt_err);\r
+\r
+ /* -- valid packet -- */\r
+\r
+ if (first != desc) {\r
+ skb = vmac_merge_rx_buffers(dev, desc, pkt_len);\r
+\r
+ if (!skb) {\r
+ /* kill packet */\r
+ ap->rx_ring.tail = lookahead.tail;\r
+ ap->rx_merge_error++;\r
+ continue;\r
+ }\r
+ } else {\r
+ dma_unmap_single(&ap->pdev->dev, desc->data,\r
+ ap->rx_skb_size, DMA_FROM_DEVICE);\r
+\r
+ skb = ap->rx_skbuff[desc_idx];\r
+ ap->rx_skbuff[desc_idx] = NULL;\r
+ /* desc->data != skb->data => desc->data DMA mapped */\r
+\r
+ /* strip FCS */\r
+ skb_put(skb, pkt_len - 4);\r
+ }\r
+\r
+ /* free buffers */\r
+ ap->rx_ring.tail = lookahead.tail;\r
+\r
+ WARN_ON(skb->len != pkt_len - 4);\r
+ processed++;\r
+ skb->dev = dev;\r
+ skb->protocol = eth_type_trans(skb, dev);\r
+ ap->stats.rx_packets++;\r
+ ap->stats.rx_bytes += skb->len;\r
+ dev->last_rx = jiffies;\r
+ netif_rx(skb);\r
+\r
+ } while (!fifo_empty(&lookahead) && (processed < budget));\r
+\r
+ dev_vdbg(&ap->pdev->dev, "processed pkt %d, remaining rx buff %d\n",\r
+ processed,\r
+ fifo_used(&ap->rx_ring));\r
+\r
+ if (processed || fifo_empty(&ap->rx_ring))\r
+ vmac_rx_refill(dev);\r
+\r
+ return processed;\r
+}\r
+\r
+static void vmac_toggle_irqmask(struct net_device *dev, int enable, int mask)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ unsigned long tmp;\r
+\r
+ tmp = vmac_readl(ap, ENABLE);\r
+ if (enable)\r
+ tmp |= mask;\r
+ else\r
+ tmp &= ~mask;\r
+ vmac_writel(ap, tmp, ENABLE);\r
+}\r
+\r
+static void vmac_toggle_txint(struct net_device *dev, int enable)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ unsigned long flags;\r
+\r
+ spin_lock_irqsave(&ap->lock, flags);\r
+ vmac_toggle_irqmask(dev, enable, TXINT_MASK);\r
+ spin_unlock_irqrestore(&ap->lock, flags);\r
+}\r
+\r
+static void vmac_toggle_rxint(struct net_device *dev, int enable)\r
+{\r
+ vmac_toggle_irqmask(dev, enable, RXINT_MASK);\r
+}\r
+\r
+static int vmac_poll(struct napi_struct *napi, int budget)\r
+{\r
+ struct vmac_priv *ap;\r
+ struct net_device *dev;\r
+ int rx_work_done;\r
+ unsigned long flags;\r
+\r
+ ap = container_of(napi, struct vmac_priv, napi);\r
+ dev = ap->dev;\r
+\r
+ /* ack interrupt */\r
+ vmac_writel(ap, RXINT_MASK, STAT);\r
+\r
+ spin_lock(&ap->rx_lock);\r
+ rx_work_done = vmac_rx_receive(dev, budget);\r
+ spin_unlock(&ap->rx_lock);\r
+\r
+#ifdef VERBOSE_DEBUG\r
+ if (printk_ratelimit()) {\r
+ dev_vdbg(&ap->pdev->dev, "poll budget %d receive rx_work_done %d\n",\r
+ budget,\r
+ rx_work_done);\r
+ }\r
+#endif\r
+\r
+ if (rx_work_done >= budget) {\r
+ /* rx queue is not yet empty/clean */\r
+ return rx_work_done;\r
+ }\r
+\r
+ /* no more packet in rx/tx queue, remove device from poll\r
+ * queue */\r
+ spin_lock_irqsave(&ap->lock, flags);\r
+ napi_complete(napi);\r
+ vmac_toggle_rxint(dev, 1);\r
+ spin_unlock_irqrestore(&ap->lock, flags);\r
+\r
+ return rx_work_done;\r
+}\r
+\r
+static int vmac_tx_reclaim(struct net_device *dev, int force);\r
+\r
+static irqreturn_t vmac_intr(int irq, void *dev_instance)\r
+{\r
+ struct net_device *dev = dev_instance;\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ unsigned int status;\r
+\r
+ spin_lock(&ap->lock);\r
+\r
+ status = vmac_readl(ap, STAT);\r
+ vmac_writel(ap, status, STAT);\r
+\r
+#ifdef DEBUG\r
+ if (unlikely(ap->shutdown))\r
+ dev_err(&ap->pdev->dev, "ISR during close\n");\r
+\r
+ if (unlikely(!status & (RXINT_MASK|MDIO_MASK|ERR_MASK)))\r
+ dev_err(&ap->pdev->dev, "No source of IRQ found\n");\r
+#endif\r
+\r
+ if ((status & RXINT_MASK) &&\r
+ (ap->mac_rxring_head !=\r
+ vmac_readl(ap, MAC_RXRING_HEAD))) {\r
+ vmac_toggle_rxint(dev, 0);\r
+ napi_schedule(&ap->napi);\r
+ }\r
+\r
+ if (unlikely(netif_queue_stopped(dev) && (status & TXINT_MASK)))\r
+ vmac_tx_reclaim(dev, 0);\r
+\r
+ if (status & MDIO_MASK)\r
+ complete(&ap->mdio_complete);\r
+\r
+ if (unlikely(status & ERR_MASK))\r
+ update_error_counters(dev, status);\r
+\r
+ spin_unlock(&ap->lock);\r
+\r
+ return IRQ_HANDLED;\r
+}\r
+\r
+static int vmac_tx_reclaim(struct net_device *dev, int force)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ int released = 0;\r
+\r
+ /* buffer chaining not used, see vmac_start_xmit */\r
+\r
+ while (!fifo_empty(&ap->tx_ring)) {\r
+ struct vmac_buffer_desc *desc;\r
+ struct sk_buff *skb;\r
+ int desc_idx;\r
+\r
+ desc_idx = ap->tx_ring.tail;\r
+ desc = &ap->txbd[desc_idx];\r
+\r
+ /* ensure other field of the descriptor were not read\r
+ * before we checked ownership */\r
+ rmb();\r
+\r
+ if ((desc->info & OWN_MASK) && !force)\r
+ break;\r
+\r
+ if (desc->info & ERR_MSK_TX) {\r
+ update_tx_errors(dev, desc->info);\r
+ /* recycle packet, let upper level deal with it */\r
+ }\r
+\r
+ skb = ap->tx_skbuff[desc_idx];\r
+ ap->tx_skbuff[desc_idx] = NULL;\r
+ WARN_ON(!skb);\r
+\r
+ dma_unmap_single(&ap->pdev->dev, desc->data, skb->len,\r
+ DMA_TO_DEVICE);\r
+\r
+ dev_kfree_skb_any(skb);\r
+\r
+ released++;\r
+ fifo_inc_tail(&ap->tx_ring);\r
+ }\r
+\r
+ if (netif_queue_stopped(dev) && released) {\r
+ netif_wake_queue(dev);\r
+ vmac_toggle_txint(dev, 0);\r
+ }\r
+\r
+ if (unlikely(force && !fifo_empty(&ap->tx_ring))) {\r
+ dev_err(&ap->pdev->dev, "failed to reclaim %d tx sk_buff\n",\r
+ fifo_used(&ap->tx_ring));\r
+ }\r
+\r
+ return released;\r
+}\r
+\r
+int vmac_start_xmit(struct sk_buff *skb, struct net_device *dev)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ struct vmac_buffer_desc *desc;\r
+ unsigned int tmp;\r
+\r
+ /* running under xmit lock */\r
+\r
+ /* no scatter/gatter see features below */\r
+ WARN_ON(skb_shinfo(skb)->nr_frags != 0);\r
+ WARN_ON(skb->len > MAX_TX_BUFFER_LEN);\r
+\r
+ if (unlikely(fifo_full(&ap->tx_ring))) {\r
+ netif_stop_queue(dev);\r
+ vmac_toggle_txint(dev, 1);\r
+ dev_err(&ap->pdev->dev, "xmit called with no tx desc available\n");\r
+ return NETDEV_TX_BUSY;\r
+ }\r
+\r
+ if (unlikely(skb->len < ETH_ZLEN)) {\r
+ struct sk_buff *short_skb;\r
+ short_skb = netdev_alloc_skb(dev, ETH_ZLEN);\r
+ if (!short_skb)\r
+ return NETDEV_TX_LOCKED;\r
+\r
+ memset(short_skb->data, 0, ETH_ZLEN);\r
+ memcpy(skb_put(short_skb, ETH_ZLEN), skb->data, skb->len);\r
+ dev_kfree_skb(skb);\r
+ skb = short_skb;\r
+ }\r
+\r
+ /* fill descriptor */\r
+ ap->tx_skbuff[ap->tx_ring.head] = skb;\r
+\r
+ desc = &ap->txbd[ap->tx_ring.head];\r
+ desc->data = dma_map_single(&ap->pdev->dev, skb->data, skb->len,\r
+ DMA_TO_DEVICE);\r
+\r
+ /* dma might already be polling */\r
+ wmb();\r
+ desc->info = OWN_MASK | FRST_MASK | LAST_MASK | skb->len;\r
+ wmb();\r
+\r
+ /* kick tx dma */\r
+ tmp = vmac_readl(ap, STAT);\r
+ vmac_writel(ap, tmp | TXPL_MASK, STAT);\r
+\r
+ ap->stats.tx_packets++;\r
+ ap->stats.tx_bytes += skb->len;\r
+ dev->trans_start = jiffies;\r
+ fifo_inc_head(&ap->tx_ring);\r
+\r
+ /* vmac_tx_reclaim independent of vmac_tx_timeout */\r
+ if (fifo_used(&ap->tx_ring) > 8)\r
+ vmac_tx_reclaim(dev, 0);\r
+\r
+ /* stop queue if no more desc available */\r
+ if (fifo_full(&ap->tx_ring)) {\r
+ netif_stop_queue(dev);\r
+ vmac_toggle_txint(dev, 1);\r
+ }\r
+\r
+ return NETDEV_TX_OK;\r
+}\r
+\r
+static int alloc_buffers(struct net_device *dev)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ int err = -ENOMEM;\r
+ int size;\r
+\r
+ fifo_init(&ap->rx_ring, RX_BDT_LEN);\r
+ fifo_init(&ap->tx_ring, TX_BDT_LEN);\r
+\r
+ /* initialize skb list */\r
+ memset(ap->rx_skbuff, 0, sizeof(ap->rx_skbuff));\r
+ memset(ap->tx_skbuff, 0, sizeof(ap->tx_skbuff));\r
+\r
+ /* allocate DMA received descriptors */\r
+ size = sizeof(*ap->rxbd) * ap->rx_ring.size;\r
+ ap->rxbd = dma_alloc_coherent(&ap->pdev->dev, size,\r
+ &ap->rxbd_dma,\r
+ GFP_KERNEL);\r
+ if (ap->rxbd == NULL)\r
+ goto err_out;\r
+\r
+ /* allocate DMA transmit descriptors */\r
+ size = sizeof(*ap->txbd) * ap->tx_ring.size;\r
+ ap->txbd = dma_alloc_coherent(&ap->pdev->dev, size,\r
+ &ap->txbd_dma,\r
+ GFP_KERNEL);\r
+ if (ap->txbd == NULL)\r
+ goto err_free_rxbd;\r
+\r
+ /* ensure 8-byte aligned */\r
+ WARN_ON(((int)ap->txbd & 0x7) || ((int)ap->rxbd & 0x7));\r
+\r
+ memset(ap->txbd, 0, sizeof(*ap->txbd) * ap->tx_ring.size);\r
+ memset(ap->rxbd, 0, sizeof(*ap->rxbd) * ap->rx_ring.size);\r
+\r
+ /* allocate rx skb */\r
+ err = vmac_rx_refill(dev);\r
+ if (err)\r
+ goto err_free_txbd;\r
+\r
+ return 0;\r
+\r
+err_free_txbd:\r
+ dma_free_coherent(&ap->pdev->dev, sizeof(*ap->txbd) * ap->tx_ring.size,\r
+ ap->txbd, ap->txbd_dma);\r
+err_free_rxbd:\r
+ dma_free_coherent(&ap->pdev->dev, sizeof(*ap->rxbd) * ap->rx_ring.size,\r
+ ap->rxbd, ap->rxbd_dma);\r
+err_out:\r
+ return err;\r
+}\r
+\r
+static int free_buffers(struct net_device *dev)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+\r
+ /* free skbuff */\r
+ vmac_tx_reclaim(dev, 1);\r
+ vmac_rx_reclaim_force(dev);\r
+\r
+ /* free DMA ring */\r
+ dma_free_coherent(&ap->pdev->dev, sizeof(ap->txbd) * ap->tx_ring.size,\r
+ ap->txbd, ap->txbd_dma);\r
+ dma_free_coherent(&ap->pdev->dev, sizeof(ap->rxbd) * ap->rx_ring.size,\r
+ ap->rxbd, ap->rxbd_dma);\r
+\r
+ return 0;\r
+}\r
+\r
+static int vmac_hw_init(struct net_device *dev)\r
+{\r
+ struct vmac_priv *priv = netdev_priv(dev);\r
+\r
+ /* clear IRQ mask */\r
+ vmac_writel(priv, 0, ENABLE);\r
+\r
+ /* clear pending IRQ */\r
+ vmac_writel(priv, 0xffffffff, STAT);\r
+\r
+ /* Initialize logical address filter */\r
+ vmac_writel(priv, 0x0, LAFL);\r
+ vmac_writel(priv, 0x0, LAFH);\r
+\r
+ return 0;\r
+}\r
+\r
+#ifdef DEBUG\r
+static int vmac_register_print(struct net_device *dev)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+\r
+ printk("func::%s vmac register %s value = 0x%x\n", __func__, "ID", vmac_readl(ap, ID));\r
+ printk("func::%s vmac register %s value = 0x%x\n", __func__, "STAT", vmac_readl(ap, STAT));\r
+ printk("func::%s vmac register %s value = 0x%x\n", __func__, "ENABLE", vmac_readl(ap, ENABLE));\r
+ printk("func::%s vmac register %s value = 0x%x\n", __func__, "CONTROL", vmac_readl(ap, CONTROL));\r
+ printk("func::%s vmac register %s value = 0x%x\n", __func__, "ADDRL", vmac_readl(ap, ADDRL));\r
+ printk("func::%s vmac register %s value = 0x%x\n", __func__, "ADDRH", vmac_readl(ap, ADDRH));\r
+ \r
+ return 0;\r
+}\r
+#endif\r
+\r
+int vmac_open(struct net_device *dev)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ struct phy_device *phydev;\r
+ unsigned int temp;\r
+ int err = 0;\r
+ struct clk *mac_clk = NULL;\r
+ struct clk *mac_parent = NULL;\r
+ struct clk *arm_clk = NULL;\r
+ struct rk29_vmac_platform_data *pdata = ap->pdev->dev.platform_data;\r
+ unsigned char current_mac[6];\r
+ int ret = 0;\r
+ struct pinctrl_state *clkout_state;\r
+\r
+ printk("enter func %s...\n", __func__);\r
+\r
+ if (ap == NULL)\r
+ return -ENODEV;\r
+\r
+ wake_lock_timeout(&ap->resume_lock, 5*HZ);\r
+\r
+ ap->shutdown = 0;\r
+ \r
+ // switch to rmii\r
+ printk("ap->pdev->dev.pins->p = %p\n", ap->pdev->dev.pins->p);\r
+ clkout_state = pinctrl_lookup_state(ap->pdev->dev.pins->p, "default");\r
+ if (IS_ERR(clkout_state)) {\r
+ dev_err(&ap->pdev->dev, "no clkout pinctrl state\n");\r
+ goto err_out;\r
+ }\r
+ \r
+ printk("in pinctrl_select_state.\n");\r
+ pinctrl_select_state(ap->pdev->dev.pins->p, clkout_state);\r
+ \r
+ //set rmii ref clock 50MHz\r
+ mac_clk = devm_clk_get(&ap->pdev->dev, "clk_mac");\r
+ /*if (IS_ERR(mac_clk))\r
+ mac_clk = NULL;\r
+ arm_clk = clk_get(NULL, "arm_pll");\r
+ if (IS_ERR(arm_clk))\r
+ arm_clk = NULL;\r
+ if (mac_clk) {\r
+ mac_parent = clk_get_parent(mac_clk);\r
+ if (IS_ERR(mac_parent))\r
+ mac_parent = NULL;\r
+ }\r
+ if (arm_clk && mac_parent && (arm_clk == mac_parent))\r
+ wake_lock(&idlelock);\r
+\r
+ if(pdata && pdata->rmii_extclk_sel && pdata->rmii_extclk_sel())\r
+ {\r
+ struct clk * mac_clkin = NULL;\r
+ mac_clkin = clk_get(NULL, "rmii_clkin");\r
+ if (IS_ERR(mac_clkin)) {\r
+ pr_err("mac_clkin get fail\n");\r
+ }\r
+ clk_set_parent(mac_clk, mac_clkin); \r
+ }*/\r
+ \r
+ clk_set_rate(mac_clk, 50000000);\r
+ clk_prepare_enable(mac_clk);\r
+ //clk_enable(clk_get(NULL,"mii_rx"));\r
+ //clk_enable(clk_get(NULL,"mii_tx"));\r
+ //clk_enable(clk_get(NULL,"hclk_mac"));\r
+\r
+ //phy power on\r
+ if (pdata && pdata->rmii_power_control)\r
+ pdata->rmii_power_control(1);\r
+\r
+ msleep(1000);\r
+\r
+ vmac_hw_init(dev);\r
+\r
+//$_rbox_$_modify_$_chenxiao\r
+ if (is_valid_ether_addr(dev->dev_addr)){\r
+ strlcpy(current_mac,dev->dev_addr,6);\r
+ }\r
+\r
+#ifdef CONFIG_ETH_MAC_FROM_EEPROM\r
+ ret = eeprom_read_data(0,dev->dev_addr,6);\r
+ if (ret != 6){\r
+ printk("read mac from Eeprom fail.\n");\r
+ }else {\r
+ if (is_valid_ether_addr(dev->dev_addr)){\r
+ printk("eth_mac_from_eeprom***********:%X:%X:%X:%X:%X:%X\n",dev->dev_addr[0],\r
+ dev->dev_addr[1],dev->dev_addr[2],dev->dev_addr[3],\r
+ dev->dev_addr[4],dev->dev_addr[5] );\r
+ }\r
+ }\r
+#endif\r
+\r
+#ifdef CONFIG_ETH_MAC_FROM_IDB\r
+ err = eth_mac_idb(dev->dev_addr);\r
+ if (err) {\r
+ printk("read mac from IDB fail.\n");\r
+ } else {\r
+ if (is_valid_ether_addr(dev->dev_addr)) {\r
+ printk("eth_mac_from_idb***********:%X:%X:%X:%X:%X:%X\n",dev->dev_addr[0],\r
+ dev->dev_addr[1],dev->dev_addr[2],dev->dev_addr[3],\r
+ dev->dev_addr[4],dev->dev_addr[5] );\r
+ }\r
+ }\r
+#endif\r
+\r
+#ifdef CONFIG_ETH_MAC_FROM_WIFI_MAC\r
+ err = eth_mac_wifi(dev->dev_addr);\r
+ if (err) {\r
+ printk("read mac from Wifi fail.\n");\r
+ } else {\r
+ if (is_valid_ether_addr(dev->dev_addr)) {\r
+ printk("eth_mac_from_wifi_mac***********:%X:%X:%X:%X:%X:%X\n",dev->dev_addr[0],\r
+ dev->dev_addr[1],dev->dev_addr[2],dev->dev_addr[3],\r
+ dev->dev_addr[4],dev->dev_addr[5] );\r
+ }\r
+ }\r
+#endif\r
+\r
+#ifdef CONFIG_ETH_MAC_FROM_SECURE_CHIP\r
+\r
+#endif\r
+ \r
+\r
+ if (!is_valid_ether_addr(dev->dev_addr)) {\r
+ strlcpy(dev->dev_addr,current_mac,6);\r
+ printk("eth_mac_from_RANDOM***********:%X:%X:%X:%X:%X:%X\n",dev->dev_addr[0],\r
+ dev->dev_addr[1],dev->dev_addr[2],dev->dev_addr[3],\r
+ dev->dev_addr[4],dev->dev_addr[5] );\r
+ }\r
+//add end \r
+\r
+ /* mac address changed? */\r
+ write_mac_reg(dev, dev->dev_addr);\r
+\r
+ err = alloc_buffers(dev);\r
+ if (err)\r
+ goto err_out;\r
+\r
+ err = request_irq(dev->irq, &vmac_intr, 0, dev->name, dev);\r
+ if (err) {\r
+ dev_err(&ap->pdev->dev, "Unable to request IRQ %d (error %d)\n",\r
+ dev->irq, err);\r
+ goto err_free_buffers;\r
+ }\r
+\r
+ /* install DMA ring pointers */\r
+ vmac_writel(ap, ap->rxbd_dma, RXRINGPTR);\r
+ vmac_writel(ap, ap->txbd_dma, TXRINGPTR);\r
+\r
+ /* set poll rate to 1 ms */\r
+ vmac_writel(ap, POLLRATE_TIME, POLLRATE);\r
+\r
+ /* make sure we enable napi before rx interrupt */\r
+ napi_enable(&ap->napi);\r
+\r
+ /* IRQ mask */\r
+ temp = RXINT_MASK | ERR_MASK | TXCH_MASK | MDIO_MASK;\r
+ vmac_writel(ap, temp, ENABLE);\r
+\r
+ /* Set control */\r
+ temp = (RX_BDT_LEN << 24) | (TX_BDT_LEN << 16) | TXRN_MASK | RXRN_MASK;\r
+ vmac_writel(ap, temp, CONTROL);\r
+\r
+ /* enable, after all other bits are set */\r
+ vmac_writel(ap, temp | EN_MASK, CONTROL);\r
+ \r
+ netif_start_queue(dev);\r
+ netif_carrier_off(dev);\r
+\r
+#ifdef DEBUG\r
+ vmac_register_print(dev);\r
+#endif\r
+\r
+ /* register the PHY board fixup, if needed */\r
+ err = vmac_mii_init(ap);\r
+ if (err)\r
+ goto err_free_irq;\r
+\r
+ /* schedule a link state check */\r
+ phy_start(ap->phy_dev);\r
+\r
+ phydev = ap->phy_dev;\r
+ dev_info(&ap->pdev->dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",\r
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq);\r
+\r
+ ap->suspending = 0;\r
+ ap->open_flag = 1;\r
+\r
+ return 0;\r
+\r
+err_free_irq:\r
+ free_irq(dev->irq, dev);\r
+err_free_buffers:\r
+ free_buffers(dev);\r
+err_out: \r
+ //if (arm_clk && mac_parent && (arm_clk == mac_parent))\r
+ // wake_unlock(&idlelock);\r
+\r
+ return err;\r
+}\r
+\r
+int vmac_close(struct net_device *dev)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ unsigned int temp;\r
+ struct clk *mac_clk = NULL;\r
+ struct clk *arm_clk = NULL;\r
+ struct clk *mac_parent = NULL;\r
+ struct rk29_vmac_platform_data *pdata = ap->pdev->dev.platform_data;\r
+\r
+ printk("enter func %s...\n", __func__);\r
+ \r
+ if (ap->suspending == 1) \r
+ return 0;\r
+\r
+ ap->open_flag = 0;\r
+\r
+ netif_stop_queue(dev);\r
+ napi_disable(&ap->napi);\r
+\r
+ /* stop running transfers */\r
+ temp = vmac_readl(ap, CONTROL);\r
+ temp &= ~(TXRN_MASK | RXRN_MASK);\r
+ vmac_writel(ap, temp, CONTROL);\r
+\r
+ del_timer_sync(&ap->rx_timeout);\r
+\r
+ /* disable phy */\r
+ phy_stop(ap->phy_dev);\r
+ vmac_mii_exit(dev);\r
+ netif_carrier_off(dev);\r
+\r
+ /* disable interrupts */\r
+ vmac_writel(ap, 0, ENABLE);\r
+ free_irq(dev->irq, dev);\r
+\r
+ /* turn off vmac */\r
+ vmac_writel(ap, 0, CONTROL);\r
+ /* vmac_reset_hw(vmac) */\r
+\r
+ ap->shutdown = 1;\r
+ wmb();\r
+\r
+ free_buffers(dev);\r
+\r
+ //phy power off\r
+ if (pdata && pdata->rmii_power_control)\r
+ pdata->rmii_power_control(0);\r
+\r
+ //clock close\r
+ /*mac_clk = clk_get(NULL, "mac_ref_div");\r
+ if (IS_ERR(mac_clk))\r
+ mac_clk = NULL;\r
+ if (mac_clk) {\r
+ mac_parent = clk_get_parent(mac_clk);\r
+ if (IS_ERR(mac_parent))\r
+ mac_parent = NULL;\r
+ }\r
+ arm_clk = clk_get(NULL, "arm_pll");\r
+ if (IS_ERR(arm_clk))\r
+ arm_clk = NULL;\r
+\r
+ if (arm_clk && mac_parent && (arm_clk == mac_parent))\r
+ wake_unlock(&idlelock);*/\r
+ \r
+ clk_disable(clk_get(&ap->pdev->dev,"clk_mac"));\r
+ //clk_disable(clk_get(NULL,"mii_tx"));\r
+ //clk_disable(clk_get(NULL,"hclk_mac"));\r
+ //clk_disable(clk_get(NULL,"clk_mac_pll"));\r
+\r
+ return 0;\r
+}\r
+\r
+int vmac_shutdown(struct net_device *dev)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ unsigned int temp;\r
+ \r
+ printk("enter func %s...\n", __func__);\r
+\r
+ netif_stop_queue(dev);\r
+ napi_disable(&ap->napi);\r
+\r
+ /* stop running transfers */\r
+ temp = vmac_readl(ap, CONTROL);\r
+ temp &= ~(TXRN_MASK | RXRN_MASK);\r
+ vmac_writel(ap, temp, CONTROL);\r
+\r
+ del_timer_sync(&ap->rx_timeout);\r
+\r
+ /* disable phy */\r
+ phy_stop(ap->phy_dev);\r
+ vmac_mii_exit(dev);\r
+ netif_carrier_off(dev);\r
+\r
+ /* disable interrupts */\r
+ vmac_writel(ap, 0, ENABLE);\r
+ free_irq(dev->irq, dev);\r
+\r
+ /* turn off vmac */\r
+ vmac_writel(ap, 0, CONTROL);\r
+ /* vmac_reset_hw(vmac) */\r
+\r
+ ap->shutdown = 1;\r
+ wmb();\r
+\r
+ free_buffers(dev);\r
+\r
+ return 0;\r
+}\r
+\r
+void vmac_update_stats(struct vmac_priv *ap)\r
+{\r
+ struct net_device_stats *_stats = &ap->stats;\r
+ unsigned long miss, rxerr;\r
+ unsigned long rxfram, rxcrc, rxoflow;\r
+\r
+ /* compare with /proc/net/dev,\r
+ * see net/core/dev.c:dev_seq_printf_stats */\r
+\r
+ /* rx stats */\r
+ rxerr = vmac_readl(ap, RXERR);\r
+ miss = vmac_readl(ap, MISS);\r
+\r
+ rxcrc = (rxerr & RXERR_CRC);\r
+ rxfram = (rxerr & RXERR_FRM) >> 8;\r
+ rxoflow = (rxerr & RXERR_OFLO) >> 16;\r
+\r
+ _stats->rx_length_errors = 0;\r
+ _stats->rx_over_errors += miss;\r
+ _stats->rx_crc_errors += rxcrc;\r
+ _stats->rx_frame_errors += rxfram;\r
+ _stats->rx_fifo_errors += rxoflow;\r
+ _stats->rx_missed_errors = 0;\r
+\r
+ /* TODO check rx_dropped/rx_errors/tx_dropped/tx_errors have not\r
+ * been updated elsewhere */\r
+ _stats->rx_dropped = _stats->rx_over_errors +\r
+ _stats->rx_fifo_errors +\r
+ ap->rx_merge_error;\r
+\r
+ _stats->rx_errors = _stats->rx_length_errors + _stats->rx_crc_errors +\r
+ _stats->rx_frame_errors +\r
+ _stats->rx_missed_errors +\r
+ _stats->rx_dropped;\r
+\r
+ /* tx stats */\r
+ _stats->tx_dropped = 0; /* otherwise queue stopped */\r
+\r
+ _stats->tx_errors = _stats->tx_aborted_errors +\r
+ _stats->tx_carrier_errors +\r
+ _stats->tx_fifo_errors +\r
+ _stats->tx_heartbeat_errors +\r
+ _stats->tx_window_errors +\r
+ _stats->tx_dropped +\r
+ ap->tx_timeout_error;\r
+}\r
+\r
+struct net_device_stats *vmac_stats(struct net_device *dev)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ unsigned long flags;\r
+\r
+ spin_lock_irqsave(&ap->lock, flags);\r
+ vmac_update_stats(ap);\r
+ spin_unlock_irqrestore(&ap->lock, flags);\r
+\r
+ return &ap->stats;\r
+}\r
+\r
+void vmac_tx_timeout(struct net_device *dev)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ unsigned int status;\r
+ unsigned long flags;\r
+\r
+ spin_lock_irqsave(&ap->lock, flags);\r
+\r
+ /* queue did not progress for timeo jiffies */\r
+ WARN_ON(!netif_queue_stopped(dev));\r
+ WARN_ON(!fifo_full(&ap->tx_ring));\r
+\r
+ /* TX IRQ lost? */\r
+ status = vmac_readl(ap, STAT);\r
+ if (status & TXINT_MASK) {\r
+ dev_err(&ap->pdev->dev, "lost tx interrupt, IRQ mask %x\n",\r
+ vmac_readl(ap, ENABLE));\r
+ vmac_writel(ap, TXINT_MASK, STAT);\r
+ }\r
+\r
+ /* TODO RX/MDIO/ERR as well? */\r
+\r
+ vmac_tx_reclaim(dev, 0);\r
+ if (fifo_full(&ap->tx_ring))\r
+ dev_err(&ap->pdev->dev, "DMA state machine not active\n");\r
+\r
+ /* We can accept TX packets again */\r
+ ap->tx_timeout_error++;\r
+ dev->trans_start = jiffies;\r
+ netif_wake_queue(dev);\r
+\r
+ spin_unlock_irqrestore(&ap->lock, flags);\r
+}\r
+\r
+static void create_multicast_filter(struct net_device *dev,\r
+ unsigned long *bitmask)\r
+{\r
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34))\r
+ struct netdev_hw_addr *ha;\r
+ unsigned long crc;\r
+ char *addrs;\r
+ struct netdev_hw_addr_list *list = &dev->dev_addrs;\r
+ \r
+ //printk("-----------------func %s-------------------\n", __func__);\r
+\r
+ WARN_ON(dev->mc_count == 0);\r
+ WARN_ON(dev->flags & IFF_ALLMULTI);\r
+\r
+ bitmask[0] = bitmask[1] = 0;\r
+\r
+ list_for_each_entry(ha, &list->list, list) {\r
+ addrs = ha->addr;\r
+\r
+ /* skip non-multicast addresses */\r
+ if (!(*addrs & 1))\r
+ continue;\r
+\r
+ crc = ether_crc_le(ETH_ALEN, addrs);\r
+ set_bit(crc >> 26, bitmask);\r
+ \r
+ }\r
+#else\r
+ struct netdev_hw_addr *ha;\r
+ unsigned long crc;\r
+ char *addrs;\r
+\r
+ WARN_ON(netdev_mc_count(dev) == 0);\r
+ WARN_ON(dev->flags & IFF_ALLMULTI);\r
+\r
+ bitmask[0] = bitmask[1] = 0;\r
+\r
+ netdev_for_each_mc_addr(ha, dev) {\r
+ addrs = ha->addr;\r
+\r
+ /* skip non-multicast addresses */\r
+ if (!(*addrs & 1))\r
+ continue;\r
+\r
+ crc = ether_crc_le(ETH_ALEN, addrs);\r
+ set_bit(crc >> 26, bitmask);\r
+ }\r
+#endif\r
+}\r
+static void vmac_set_multicast_list(struct net_device *dev)\r
+{\r
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34))\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ unsigned long flags, bitmask[2];\r
+ int promisc, reg;\r
+\r
+ //printk("-----------------func %s-------------------\n", __func__);\r
+\r
+ spin_lock_irqsave(&ap->lock, flags);\r
+\r
+ promisc = !!(dev->flags & IFF_PROMISC);\r
+ reg = vmac_readl(ap, CONTROL);\r
+ if (promisc != !!(reg & PROM_MASK)) {\r
+ reg ^= PROM_MASK;\r
+ vmac_writel(ap, reg, CONTROL);\r
+ }\r
+\r
+ if (dev->flags & IFF_ALLMULTI)\r
+ memset(bitmask, 1, sizeof(bitmask));\r
+ else if (dev->mc_count == 0)\r
+ memset(bitmask, 0, sizeof(bitmask));\r
+ else\r
+ create_multicast_filter(dev, bitmask);\r
+\r
+ vmac_writel(ap, bitmask[0], LAFL);\r
+ vmac_writel(ap, bitmask[1], LAFH);\r
+\r
+ spin_unlock_irqrestore(&ap->lock, flags);\r
+#else\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ unsigned long flags, bitmask[2];\r
+ int promisc, reg;\r
+\r
+ spin_lock_irqsave(&ap->lock, flags);\r
+\r
+ promisc = !!(dev->flags & IFF_PROMISC);\r
+ reg = vmac_readl(ap, CONTROL);\r
+ if (promisc != !!(reg & PROM_MASK)) {\r
+ reg ^= PROM_MASK;\r
+ vmac_writel(ap, reg, CONTROL);\r
+ }\r
+\r
+ if (dev->flags & IFF_ALLMULTI)\r
+ memset(bitmask, 1, sizeof(bitmask));\r
+ else if (netdev_mc_count(dev) == 0)\r
+ memset(bitmask, 0, sizeof(bitmask));\r
+ else\r
+ create_multicast_filter(dev, bitmask);\r
+\r
+ vmac_writel(ap, bitmask[0], LAFL);\r
+ vmac_writel(ap, bitmask[1], LAFH);\r
+\r
+ spin_unlock_irqrestore(&ap->lock, flags);\r
+#endif\r
+}\r
+\r
+static struct ethtool_ops vmac_ethtool_ops = {\r
+ .get_settings = vmacether_get_settings,\r
+ .set_settings = vmacether_set_settings,\r
+ .get_drvinfo = vmacether_get_drvinfo,\r
+ .get_link = ethtool_op_get_link,\r
+};\r
+\r
+static const struct net_device_ops vmac_netdev_ops = {\r
+ .ndo_open = vmac_open,\r
+ .ndo_stop = vmac_close,\r
+ .ndo_get_stats = vmac_stats,\r
+ .ndo_start_xmit = vmac_start_xmit,\r
+ .ndo_do_ioctl = vmac_ioctl,\r
+ .ndo_set_mac_address = eth_mac_addr,\r
+ .ndo_tx_timeout = vmac_tx_timeout,\r
+ //.ndo_set_multicast_list = vmac_set_multicast_list,\r
+ .ndo_validate_addr = eth_validate_addr,\r
+ .ndo_change_mtu = eth_change_mtu,\r
+};\r
+\r
+static int vmac_probe(struct platform_device *pdev)\r
+{\r
+ struct net_device *dev;\r
+ struct vmac_priv *ap;\r
+ struct resource *res;\r
+ unsigned int mem_base, mem_size, irq;\r
+ int err;\r
+ struct rk29_vmac_platform_data *pdata;\r
+ struct device_node *np = pdev->dev.of_node;\r
+ \r
+ printk("vmac_probe.\n");\r
+ dev_dbg(&pdev->dev, "vmac_probe 1.\n");\r
+ \r
+ pdev->dev.platform_data = &board_vmac_data;\r
+ pdata = pdev->dev.platform_data;\r
+\r
+ dev = alloc_etherdev(sizeof(*ap));\r
+ if (!dev) {\r
+ dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");\r
+ return -ENOMEM;\r
+ }\r
+\r
+ ap = netdev_priv(dev);\r
+\r
+ err = -ENODEV;\r
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);\r
+ if (!res) {\r
+ dev_err(&pdev->dev, "no mmio resource defined\n");\r
+ goto err_out;\r
+ }\r
+ mem_base = res->start;\r
+ mem_size = resource_size(res);\r
+ irq = platform_get_irq(pdev, 0);\r
+\r
+ /*err = -EBUSY;\r
+ if (!devm_request_mem_region(&pdev->dev, mem_base, mem_size, VMAC_NAME)) {\r
+ dev_err(&pdev->dev, "no memory region available\n");\r
+ goto err_out;\r
+ }*/\r
+\r
+ err = -ENOMEM;\r
+ ap->regs = devm_ioremap_resource(&pdev->dev, res);\r
+ if (!ap->regs) {\r
+ dev_err(&pdev->dev, "failed to map registers, aborting.\n");\r
+ goto err_out_release_mem;\r
+ }\r
+ \r
+ printk("mem_base = 0x%08x, mem_size = 0x%08x, irq = %d, regs = 0x%08x\n", \r
+ mem_base, mem_size, irq, ap->regs);\r
+\r
+ /* no checksum support, hence no scatter/gather */\r
+ dev->features |= NETIF_F_HIGHDMA;\r
+\r
+ spin_lock_init(&ap->lock);\r
+\r
+ SET_NETDEV_DEV(dev, &pdev->dev);\r
+ ap->dev = dev;\r
+ ap->pdev = pdev;\r
+\r
+ /* init rx timeout (used for oom) */\r
+ init_timer(&ap->rx_timeout);\r
+ ap->rx_timeout.function = vmac_refill_rx_timer;\r
+ ap->rx_timeout.data = (unsigned long)dev;\r
+\r
+ netif_napi_add(dev, &ap->napi, vmac_poll, 2);\r
+ dev->netdev_ops = &vmac_netdev_ops;\r
+ dev->ethtool_ops = &vmac_ethtool_ops;\r
+ dev->irq = irq;\r
+\r
+ dev->flags |= IFF_MULTICAST;////////////////////\r
+\r
+ dev->base_addr = (unsigned long)ap->regs;\r
+ ap->mem_base = mem_base;\r
+\r
+ /* prevent buffer chaining, favor speed over space */\r
+ ap->rx_skb_size = ETH_FRAME_LEN + VMAC_BUFFER_PAD;\r
+\r
+ /* private struct functional */\r
+\r
+ /* mac address intialize, set vmac_open */\r
+ read_mac_reg(dev, dev->dev_addr);\r
+\r
+ if (!is_valid_ether_addr(dev->dev_addr))\r
+ random_ether_addr(dev->dev_addr);\r
+\r
+ err = register_netdev(dev);\r
+ if (err) {\r
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");\r
+ goto err_out_iounmap;\r
+ }\r
+\r
+ dev_info(&pdev->dev, "ARC VMAC at 0x%08x irq %d %pM\n", mem_base,\r
+ dev->irq, dev->dev_addr);\r
+ platform_set_drvdata(pdev, dev);\r
+\r
+ ap->suspending = 0;\r
+ ap->open_flag = 0;\r
+ //wake_lock_init(&idlelock, WAKE_LOCK_IDLE, "vmac");\r
+ wake_lock_init(&ap->resume_lock, WAKE_LOCK_SUSPEND, "vmac_resume");\r
+\r
+ //config rk29 vmac as rmii, 100MHz \r
+ if (pdata && pdata->vmac_register_set)\r
+ pdata->vmac_register_set();\r
+\r
+ //power gpio init, phy power off default for power reduce\r
+ if (pdata && pdata->rmii_io_init)\r
+ pdata->rmii_io_init();\r
+\r
+ return 0;\r
+\r
+err_out_iounmap:\r
+ iounmap(ap->regs);\r
+err_out_release_mem:\r
+ release_mem_region(mem_base, mem_size);\r
+err_out:\r
+ free_netdev(dev);\r
+ return err;\r
+}\r
+\r
+static int vmac_remove(struct platform_device *pdev)\r
+{\r
+ struct net_device *dev;\r
+ struct vmac_priv *ap;\r
+ struct resource *res;\r
+ struct rk29_vmac_platform_data *pdata = pdev->dev.platform_data;\r
+\r
+ //wake_lock_destroy(&idlelock);\r
+\r
+ //power gpio deinit, phy power off\r
+ if (pdata && pdata->rmii_io_deinit)\r
+ pdata->rmii_io_deinit();\r
+\r
+ dev = platform_get_drvdata(pdev);\r
+ if (!dev) {\r
+ dev_err(&pdev->dev, "%s no valid dev found\n", __func__);\r
+ return 0;\r
+ }\r
+\r
+ ap = netdev_priv(dev);\r
+\r
+ /* MAC */\r
+ unregister_netdev(dev);\r
+ iounmap(ap->regs);\r
+\r
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);\r
+ release_mem_region(res->start, resource_size(res));\r
+\r
+ platform_set_drvdata(pdev, NULL);\r
+ free_netdev(dev);\r
+ return 0;\r
+}\r
+\r
+static void rk29_vmac_power_off(struct net_device *dev)\r
+{\r
+ struct vmac_priv *ap = netdev_priv(dev);\r
+ struct rk29_vmac_platform_data *pdata = ap->pdev->dev.platform_data;\r
+\r
+ printk("enter func %s...\n", __func__);\r
+\r
+ //phy power off\r
+ if (pdata && pdata->rmii_power_control)\r
+ pdata->rmii_power_control(0);\r
+\r
+ //clock close\r
+ clk_disable(clk_get(&ap->pdev->dev,"clk_mac"));\r
+ //clk_disable(clk_get(NULL,"mii_tx"));\r
+ //clk_disable(clk_get(NULL,"hclk_mac"));\r
+ //clk_disable(clk_get(NULL,"clk_mac_pll"));\r
+\r
+}\r
+\r
+static int\r
+rk29_vmac_suspend(struct device *dev)\r
+{\r
+ struct platform_device *pdev = to_platform_device(dev);\r
+ struct net_device *ndev = platform_get_drvdata(pdev);\r
+ struct vmac_priv *ap = netdev_priv(ndev);\r
+ \r
+ if (ndev) {\r
+ if (ap->open_flag == 1) {\r
+ netif_stop_queue(ndev);\r
+ netif_device_detach(ndev);\r
+ if (ap->suspending == 0) {\r
+//$_rbox_$_modify_$_chenzhi: for ethernet sleep\r
+#if 0\r
+ vmac_shutdown(ndev);\r
+ rk29_vmac_power_off(ndev);\r
+#endif\r
+ ap->suspending = 1;\r
+ }\r
+ }\r
+ }\r
+ return 0;\r
+}\r
+\r
+static int\r
+rk29_vmac_resume(struct device *dev)\r
+{\r
+ struct platform_device *pdev = to_platform_device(dev);\r
+ struct net_device *ndev = platform_get_drvdata(pdev);\r
+ struct vmac_priv *ap = netdev_priv(ndev);\r
+ \r
+ if (ndev) {\r
+ if (ap->open_flag == 1) {\r
+ netif_device_attach(ndev);\r
+ netif_start_queue(ndev);\r
+//$_rbox_$_modify_$_chenzhi: \r
+//$_rbox_$_modify_$_begin\r
+ if (ap->suspending == 1) {\r
+ ap->suspending = 0;\r
+ }\r
+//$_rbox_$_modify_$_end\r
+ }\r
+ }\r
+ return 0;\r
+}\r
+\r
+static struct dev_pm_ops rk29_vmac_pm_ops = {\r
+ .suspend = rk29_vmac_suspend,\r
+ .resume = rk29_vmac_resume,\r
+};\r
+\r
+static const struct of_device_id rockchip_vmac_of_match[] = {\r
+ { .compatible = "rockchip,vmac", .data = NULL, },\r
+ {},\r
+};\r
+MODULE_DEVICE_TABLE(of, rockchip_vmac_of_match);\r
+\r
+static struct platform_driver rockchip_vmac_driver = {\r
+ .probe = vmac_probe,\r
+ .remove = vmac_remove,\r
+ .driver = {\r
+ .owner = THIS_MODULE,\r
+ .name = "rockchip,vmac",\r
+ .pm = &rk29_vmac_pm_ops,\r
+ .of_match_table = of_match_ptr(rockchip_vmac_of_match),\r
+ },\r
+};\r
+\r
+static int __init vmac_init(void)\r
+{\r
+ printk("vmac_init.\n");\r
+ return platform_driver_register(&rockchip_vmac_driver);\r
+}\r
+\r
+static void __exit vmac_exit(void)\r
+{\r
+ platform_driver_unregister(&rockchip_vmac_driver);\r
+}\r
+\r
+module_init(vmac_init);\r
+module_exit(vmac_exit);\r
+\r
+MODULE_LICENSE("GPL");\r
+MODULE_DESCRIPTION("RK29 VMAC Ethernet driver");\r
+MODULE_AUTHOR("amit.bhor@celunite.com, sameer.dhavale@celunite.com, andreas.fenkart@streamunlimited.com");\r
--- /dev/null
+/*\r
+ * linux/arch/arc/drivers/arcvmac.h\r
+ *\r
+ * Copyright (C) 2003-2006 Codito Technologies, for linux-2.4 port\r
+ * Copyright (C) 2006-2007 Celunite Inc, for linux-2.6 port\r
+ * Copyright (C) 2007-2008 Sagem Communications, Fehmi HAFSI\r
+ * Copyright (C) 2009 Sagem Communications, Andreas Fenkart\r
+ * All Rights Reserved.\r
+ *\r
+ This program is free software; you can redistribute it and/or modify\r
+ it under the terms of the GNU General Public License as published by\r
+ the Free Software Foundation; either version 2 of the License, or\r
+ (at your option) any later version.\r
+\r
+ This program is distributed in the hope that it will be useful,\r
+ but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
+ GNU General Public License for more details.\r
+\r
+ You should have received a copy of the GNU General Public License\r
+ along with this program; if not, write to the Free Software\r
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\r
+\r
+ * Authors: amit.bhor@celunite.com, sameer.dhavale@celunite.com\r
+ */\r
+\r
+#ifndef _ARCVMAC_H\r
+#define _ARCVMAC_H\r
+\r
+#define VMAC_NAME "rockchip,vmac"\r
+#define VMAC_VERSION "1.0"\r
+\r
+/* Buffer descriptors */\r
+#ifdef CONFIG_ARCH_RK29\r
+#define TX_BDT_LEN 16 /* Number of receive BD's */\r
+#else\r
+#define TX_BDT_LEN 255 /* Number of receive BD's */\r
+#endif\r
+#define RX_BDT_LEN 255 /* Number of transmit BD's */\r
+\r
+/* BD poll rate, in 1024 cycles. @100Mhz: x * 1024 cy * 10ns = 1ms */\r
+#define POLLRATE_TIME 200\r
+\r
+/* next power of two, bigger than ETH_FRAME_LEN + VLAN */\r
+#define MAX_RX_BUFFER_LEN 0x800 /* 2^11 = 2048 = 0x800 */\r
+#define MAX_TX_BUFFER_LEN 0x800 /* 2^11 = 2048 = 0x800 */\r
+\r
+/* 14 bytes of ethernet header, 4 bytes VLAN, FCS,\r
+ * plus extra pad to prevent buffer chaining of\r
+ * maximum sized ethernet packets (1514 bytes) */\r
+#define VMAC_BUFFER_PAD (ETH_HLEN + 4 + ETH_FCS_LEN + 4)\r
+\r
+/* VMAC register definitions, offsets in the ref manual are in bytes */\r
+#define ID_OFFSET (0x00/0x4)\r
+#define STAT_OFFSET (0x04/0x4)\r
+#define ENABLE_OFFSET (0x08/0x4)\r
+#define CONTROL_OFFSET (0x0c/0x4)\r
+#define POLLRATE_OFFSET (0x10/0x4)\r
+#define RXERR_OFFSET (0x14/0x4)\r
+#define MISS_OFFSET (0x18/0x4)\r
+#define TXRINGPTR_OFFSET (0x1c/0x4)\r
+#define RXRINGPTR_OFFSET (0x20/0x4)\r
+#define ADDRL_OFFSET (0x24/0x4)\r
+#define ADDRH_OFFSET (0x28/0x4)\r
+#define LAFL_OFFSET (0x2c/0x4)\r
+#define LAFH_OFFSET (0x30/0x4)\r
+#define MDIO_DATA_OFFSET (0x34/0x4)\r
+#define MAC_TXRING_HEAD_OFFSET (0x38/0x4)\r
+#define MAC_RXRING_HEAD_OFFSET (0x3C/0x4)\r
+\r
+/* STATUS and ENABLE register bit masks */\r
+#define TXINT_MASK (1<<0) /* Transmit interrupt */\r
+#define RXINT_MASK (1<<1) /* Receive interrupt */\r
+#define ERR_MASK (1<<2) /* Error interrupt */\r
+#define TXCH_MASK (1<<3) /* Transmit chaining error interrupt */\r
+#define MSER_MASK (1<<4) /* Missed packet counter error */\r
+#define RXCR_MASK (1<<8) /* RXCRCERR counter rolled over */\r
+#define RXFR_MASK (1<<9) /* RXFRAMEERR counter rolled over */\r
+#define RXFL_MASK (1<<10) /* RXOFLOWERR counter rolled over */\r
+#define MDIO_MASK (1<<12) /* MDIO complete */\r
+#define TXPL_MASK (1<<31) /* TXPOLL */\r
+\r
+/* CONTROL register bitmasks */\r
+#define EN_MASK (1<<0) /* VMAC enable */\r
+#define TXRN_MASK (1<<3) /* TX enable */\r
+#define RXRN_MASK (1<<4) /* RX enable */\r
+#define DSBC_MASK (1<<8) /* Disable receive broadcast */\r
+#define ENFL_MASK (1<<10) /* Enable Full Duplex */ ///////\r
+#define PROM_MASK (1<<11) /* Promiscuous mode */\r
+\r
+/* RXERR register bitmasks */\r
+#define RXERR_CRC 0x000000ff\r
+#define RXERR_FRM 0x0000ff00\r
+#define RXERR_OFLO 0x00ff0000 /* fifo overflow */\r
+\r
+/* MDIO data register bit masks */\r
+#define MDIO_SFD 0xC0000000\r
+#define MDIO_OP 0x30000000\r
+#define MDIO_ID_MASK 0x0F800000\r
+#define MDIO_REG_MASK 0x007C0000\r
+#define MDIO_TA 0x00030000\r
+#define MDIO_DATA_MASK 0x0000FFFF\r
+\r
+#define MDIO_BASE 0x40020000\r
+#define MDIO_OP_READ 0x20000000\r
+#define MDIO_OP_WRITE 0x10000000\r
+\r
+/* Buffer descriptor INFO bit masks */\r
+#define OWN_MASK (1<<31) /* ownership of buffer, 0 CPU, 1 DMA */\r
+#define BUFF (1<<30) /* buffer invalid, rx */\r
+#define UFLO (1<<29) /* underflow, tx */\r
+#define LTCL (1<<28) /* late collision, tx */\r
+#define RETRY_CT (0xf<<24) /* tx */\r
+#define DROP (1<<23) /* drop, more than 16 retries, tx */\r
+#define DEFER (1<<22) /* traffic on the wire, tx */\r
+#define CARLOSS (1<<21) /* carrier loss while transmission, tx, rx? */\r
+/* 20:19 reserved */\r
+#define ADCR (1<<18) /* add crc, ignored if not disaddcrc */\r
+#define LAST_MASK (1<<17) /* Last buffer in chain */\r
+#define FRST_MASK (1<<16) /* First buffer in chain */\r
+/* 15:11 reserved */\r
+#define LEN_MASK 0x000007FF\r
+\r
+#define ERR_MSK_TX 0x3fe00000 /* UFLO | LTCL | RTRY | DROP | DEFER | CRLS */\r
+\r
+\r
+/* arcvmac private data structures */\r
+struct vmac_buffer_desc {\r
+ unsigned int info;\r
+ dma_addr_t data;\r
+};\r
+\r
+struct dma_fifo {\r
+ int head; /* head */\r
+ int tail; /* tail */\r
+ int size;\r
+};\r
+\r
+struct vmac_priv {\r
+ struct net_device *dev;\r
+ struct platform_device *pdev;\r
+ struct net_device_stats stats;\r
+\r
+ spinlock_t lock; /* TODO revisit */\r
+ struct completion mdio_complete;\r
+\r
+ /* base address of register set */\r
+ int *regs;\r
+ unsigned int mem_base;\r
+\r
+ /* DMA ring buffers */\r
+ struct vmac_buffer_desc *rxbd;\r
+ dma_addr_t rxbd_dma;\r
+\r
+ struct vmac_buffer_desc *txbd;\r
+ dma_addr_t txbd_dma;\r
+\r
+ /* socket buffers */\r
+ struct sk_buff *rx_skbuff[RX_BDT_LEN];\r
+ struct sk_buff *tx_skbuff[TX_BDT_LEN];\r
+ int rx_skb_size;\r
+\r
+ /* skb / dma desc managing */\r
+ struct dma_fifo rx_ring;\r
+ struct dma_fifo tx_ring;\r
+\r
+ /* descriptor last polled/processed by the VMAC */\r
+ unsigned long mac_rxring_head;\r
+ /* used when rx skb allocation failed, so we defer rx queue\r
+ * refill */\r
+ struct timer_list rx_timeout;\r
+\r
+ /* lock rx_timeout against rx normal operation */\r
+ spinlock_t rx_lock;\r
+\r
+ struct napi_struct napi;\r
+\r
+ /* rx buffer chaining */\r
+ int rx_merge_error;\r
+ int tx_timeout_error;\r
+\r
+ /* PHY stuff */\r
+ struct mii_bus *mii_bus;\r
+ struct phy_device *phy_dev;\r
+\r
+ int link;\r
+ int speed;\r
+ int duplex;\r
+\r
+ int open_flag;\r
+ int suspending;\r
+ struct wake_lock resume_lock;\r
+\r
+ /* debug */\r
+ int shutdown;\r
+};\r
+\r
+/* DMA ring management */\r
+\r
+/* for a fifo with size n,\r
+ * - [0..n] fill levels are n + 1 states\r
+ * - there are only n different deltas (head - tail) values\r
+ * => not all fill levels can be represented with head, tail\r
+ * pointers only\r
+ * we give up the n fill level, aka fifo full */\r
+\r
+/* sacrifice one elt as a sentinel */\r
+static inline int fifo_used(struct dma_fifo *f);\r
+static inline int fifo_inc_ct(int ct, int size);\r
+static inline void fifo_dump(struct dma_fifo *fifo);\r
+\r
+static inline int fifo_empty(struct dma_fifo *f)\r
+{\r
+ return (f->head == f->tail);\r
+}\r
+\r
+static inline int fifo_free(struct dma_fifo *f)\r
+{\r
+ int free;\r
+\r
+ free = f->tail - f->head;\r
+ if (free <= 0)\r
+ free += f->size;\r
+\r
+ return free;\r
+}\r
+\r
+static inline int fifo_used(struct dma_fifo *f)\r
+{\r
+ int used;\r
+\r
+ used = f->head - f->tail;\r
+ if (used < 0)\r
+ used += f->size;\r
+\r
+ return used;\r
+}\r
+\r
+static inline int fifo_full(struct dma_fifo *f)\r
+{\r
+ return (fifo_used(f) + 1) == f->size;\r
+}\r
+\r
+/* manipulate */\r
+static inline void fifo_init(struct dma_fifo *fifo, int size)\r
+{\r
+ fifo->size = size;\r
+ fifo->head = fifo->tail = 0; /* empty */\r
+}\r
+\r
+static inline void fifo_inc_head(struct dma_fifo *fifo)\r
+{\r
+ BUG_ON(fifo_full(fifo));\r
+ fifo->head = fifo_inc_ct(fifo->head, fifo->size);\r
+}\r
+\r
+static inline void fifo_inc_tail(struct dma_fifo *fifo)\r
+{\r
+ BUG_ON(fifo_empty(fifo));\r
+ fifo->tail = fifo_inc_ct(fifo->tail, fifo->size);\r
+}\r
+\r
+/* internal funcs */\r
+static inline void fifo_dump(struct dma_fifo *fifo)\r
+{\r
+ printk(KERN_INFO "fifo: head %d, tail %d, size %d\n", fifo->head,\r
+ fifo->tail,\r
+ fifo->size);\r
+}\r
+\r
+static inline int fifo_inc_ct(int ct, int size)\r
+{\r
+ return (++ct == size) ? 0 : ct;\r
+}\r
+\r
+/*vmac*/\r
+struct rk29_vmac_platform_data {\r
+ int (*vmac_register_set)(void);\r
+ int (*rmii_io_init)(void);\r
+ int (*rmii_io_deinit)(void);\r
+ int (*rmii_power_control)(int enable);\r
+ int(*rmii_speed_switch)(int speed);\r
+};\r
+\r
+extern struct rk29_vmac_platform_data board_vmac_data;\r
+\r
+#endif /* _ARCVMAC_H */\r
--- /dev/null
+
+#include <linux/clk.h>
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/wakelock.h>
+#include <linux/version.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <asm/irq.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+
+#include <linux/rockchip/iomap.h>
+#include <linux/rockchip/grf.h>
+
+#include "rk29_vmac.h"
+
+struct vmac_phy_data {
+ int power_io;
+ int power_io_enable;
+};
+struct vmac_phy_data g_vmac_phy_data;
+
+#define grf_readl(offset) readl_relaxed(RK_GRF_VIRT + offset)
+#define grf_writel(v, offset) do { writel_relaxed(v, RK_GRF_VIRT + offset); dsb(); } while (0)
+
+static int rk30_vmac_register_set(void)
+{
+ //config rk30 vmac as rmii
+ writel_relaxed(0x3 << 16 | 0x2, RK_GRF_VIRT + RK3188_GRF_SOC_CON1);
+ return 0;
+}
+
+static int rk30_rmii_io_init(void)
+{
+ printk("enter %s \n",__func__);
+
+ //rk3188 gpio3 and sdio drive strength ,
+ grf_writel((0x0f<<16)|0x0f, RK3188_GRF_IO_CON3);
+
+ return 0;
+}
+
+static int rk30_rmii_io_deinit(void)
+{
+ //phy power down
+ printk("enter %s \n",__func__);
+ return 0;
+}
+
+static int rk30_rmii_power_control(int enable)
+{
+ struct vmac_phy_data *pdata = &g_vmac_phy_data;
+
+ printk("enter %s ,enable = %d \n",__func__,enable);
+ if (enable) {
+ if (gpio_is_valid(pdata->power_io)) {
+ gpio_direction_output(pdata->power_io, pdata->power_io_enable);
+ gpio_set_value(pdata->power_io, pdata->power_io_enable);
+ }
+ }else {
+ if (gpio_is_valid(pdata->power_io)) {
+ gpio_direction_output(pdata->power_io, !pdata->power_io_enable);
+ gpio_set_value(pdata->power_io, !pdata->power_io_enable);
+ }
+ }
+ return 0;
+}
+
+#define BIT_EMAC_SPEED_100M (1 << 1)
+#define BIT_EMAC_SPEED_10M (0 << 1)
+static int rk29_vmac_speed_switch(int speed)
+{
+ //printk("%s: speed = %d\n", __func__, speed);
+ if (10 == speed) {
+ writel_relaxed((2<<16)|BIT_EMAC_SPEED_10M, RK_GRF_VIRT + RK3188_GRF_SOC_CON1);
+ } else {
+ writel_relaxed((2<<16)|BIT_EMAC_SPEED_100M, RK_GRF_VIRT + RK3188_GRF_SOC_CON1);
+ }
+ return 0;
+}
+
+struct rk29_vmac_platform_data board_vmac_data = {
+ .vmac_register_set = rk30_vmac_register_set,
+ .rmii_io_init = rk30_rmii_io_init,
+ .rmii_io_deinit = rk30_rmii_io_deinit,
+ .rmii_power_control = rk30_rmii_power_control,
+ .rmii_speed_switch = rk29_vmac_speed_switch,
+};
+
+static int vmac_phy_probe(struct platform_device *pdev)
+{
+ struct vmac_phy_data *pdata = pdev->dev.platform_data;
+ enum of_gpio_flags flags;
+ int ret = 0, err;
+ struct device_node *node = pdev->dev.of_node;
+
+ printk("enter %s \n",__func__);
+ if (!pdata) {
+ pdata = &g_vmac_phy_data;
+
+ pdata->power_io = of_get_named_gpio_flags(node, "power-gpios", 0, &flags);
+ if (!gpio_is_valid(pdata->power_io)) {
+ printk("%s: Get power-gpios failed.\n", __func__);
+ return -EINVAL;
+ }
+
+ if(flags & OF_GPIO_ACTIVE_LOW)
+ pdata->power_io_enable = 0;
+ else
+ pdata->power_io_enable = 1;
+ }
+
+ // disable power
+ /*err = gpio_request(pdata->power_io, "vmac_phy_power");
+ if (err) {
+ printk("%s: Request vmac phy power pin failed.\n", __func__);
+ return -EINVAL;
+ }*/
+
+ gpio_direction_output(pdata->power_io, !pdata->power_io_enable);
+ gpio_set_value(pdata->power_io, !pdata->power_io_enable);
+
+ return ret;
+}
+
+static int vmac_phy_remove(struct platform_device *pdev)
+{
+ struct vmac_phy_data *pdata = pdev->dev.platform_data;
+
+ printk("enter %s \n",__func__);
+ if (gpio_is_valid(pdata->power_io))
+ gpio_free(pdata->power_io);
+
+ return 0;
+}
+
+static struct of_device_id vmac_phy_of_match[] = {
+ { .compatible = "rockchip,vmac-phy" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, vmac_phy_of_match);
+
+static struct platform_driver vmac_phy_driver = {
+ .driver = {
+ .name = "rockchip,vmac-phy",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(vmac_phy_of_match),
+ },
+ .probe = vmac_phy_probe,
+ .remove = vmac_phy_remove,
+};
+
+module_platform_driver(vmac_phy_driver);
+
+MODULE_DESCRIPTION("VMAC PHY Power Driver");
+MODULE_LICENSE("GPL");