2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
117 #include <linux/module.h>
118 #include <linux/device.h>
119 #include <linux/platform_device.h>
120 #include <linux/spinlock.h>
121 #include <linux/netdevice.h>
122 #include <linux/etherdevice.h>
123 #include <linux/io.h>
124 #include <linux/of.h>
125 #include <linux/of_net.h>
126 #include <linux/of_address.h>
127 #include <linux/of_platform.h>
128 #include <linux/clk.h>
129 #include <linux/property.h>
130 #include <linux/acpi.h>
131 #include <linux/mdio.h>
134 #include "xgbe-common.h"
136 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
137 MODULE_LICENSE("Dual BSD/GPL");
138 MODULE_VERSION(XGBE_DRV_VERSION);
139 MODULE_DESCRIPTION(XGBE_DRV_DESC);
141 static int debug = -1;
142 module_param(debug, int, S_IWUSR | S_IRUGO);
143 MODULE_PARM_DESC(debug, " Network interface message level setting");
145 static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
148 static const u32 xgbe_serdes_blwc[] = {
149 XGBE_SPEED_1000_BLWC,
150 XGBE_SPEED_2500_BLWC,
151 XGBE_SPEED_10000_BLWC,
154 static const u32 xgbe_serdes_cdr_rate[] = {
157 XGBE_SPEED_10000_CDR,
160 static const u32 xgbe_serdes_pq_skew[] = {
166 static const u32 xgbe_serdes_tx_amp[] = {
167 XGBE_SPEED_1000_TXAMP,
168 XGBE_SPEED_2500_TXAMP,
169 XGBE_SPEED_10000_TXAMP,
172 static const u32 xgbe_serdes_dfe_tap_cfg[] = {
173 XGBE_SPEED_1000_DFE_TAP_CONFIG,
174 XGBE_SPEED_2500_DFE_TAP_CONFIG,
175 XGBE_SPEED_10000_DFE_TAP_CONFIG,
178 static const u32 xgbe_serdes_dfe_tap_ena[] = {
179 XGBE_SPEED_1000_DFE_TAP_ENABLE,
180 XGBE_SPEED_2500_DFE_TAP_ENABLE,
181 XGBE_SPEED_10000_DFE_TAP_ENABLE,
184 static void xgbe_default_config(struct xgbe_prv_data *pdata)
186 DBGPR("-->xgbe_default_config\n");
188 pdata->pblx8 = DMA_PBL_X8_ENABLE;
189 pdata->tx_sf_mode = MTL_TSF_ENABLE;
190 pdata->tx_threshold = MTL_TX_THRESHOLD_64;
191 pdata->tx_pbl = DMA_PBL_16;
192 pdata->tx_osp_mode = DMA_OSP_ENABLE;
193 pdata->rx_sf_mode = MTL_RSF_DISABLE;
194 pdata->rx_threshold = MTL_RX_THRESHOLD_64;
195 pdata->rx_pbl = DMA_PBL_16;
196 pdata->pause_autoneg = 1;
199 pdata->phy_speed = SPEED_UNKNOWN;
200 pdata->power_down = 0;
202 DBGPR("<--xgbe_default_config\n");
205 static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
207 xgbe_init_function_ptrs_dev(&pdata->hw_if);
208 xgbe_init_function_ptrs_phy(&pdata->phy_if);
209 xgbe_init_function_ptrs_desc(&pdata->desc_if);
213 static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
215 struct device *dev = pdata->dev;
219 /* Obtain the system clock setting */
220 ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
222 dev_err(dev, "unable to obtain %s property\n",
226 pdata->sysclk_rate = property;
228 /* Obtain the PTP clock setting */
229 ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
231 dev_err(dev, "unable to obtain %s property\n",
235 pdata->ptpclk_rate = property;
239 #else /* CONFIG_ACPI */
240 static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
244 #endif /* CONFIG_ACPI */
247 static int xgbe_of_support(struct xgbe_prv_data *pdata)
249 struct device *dev = pdata->dev;
251 /* Obtain the system clock setting */
252 pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
253 if (IS_ERR(pdata->sysclk)) {
254 dev_err(dev, "dma devm_clk_get failed\n");
255 return PTR_ERR(pdata->sysclk);
257 pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
259 /* Obtain the PTP clock setting */
260 pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
261 if (IS_ERR(pdata->ptpclk)) {
262 dev_err(dev, "ptp devm_clk_get failed\n");
263 return PTR_ERR(pdata->ptpclk);
265 pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
270 static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
272 struct device *dev = pdata->dev;
273 struct device_node *phy_node;
274 struct platform_device *phy_pdev;
276 phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
278 /* Old style device tree:
279 * The XGBE and PHY resources are separate
281 phy_pdev = of_find_device_by_node(phy_node);
282 of_node_put(phy_node);
284 /* New style device tree:
285 * The XGBE and PHY resources are grouped together with
286 * the PHY resources listed last
289 phy_pdev = pdata->pdev;
294 #else /* CONFIG_OF */
295 static int xgbe_of_support(struct xgbe_prv_data *pdata)
300 static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
304 #endif /* CONFIG_OF */
306 static unsigned int xgbe_resource_count(struct platform_device *pdev,
312 for (i = 0, count = 0; i < pdev->num_resources; i++) {
313 struct resource *res = &pdev->resource[i];
315 if (type == resource_type(res))
322 static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
324 struct platform_device *phy_pdev;
326 if (pdata->use_acpi) {
327 get_device(pdata->dev);
328 phy_pdev = pdata->pdev;
330 phy_pdev = xgbe_of_get_phy_pdev(pdata);
336 static int xgbe_probe(struct platform_device *pdev)
338 struct xgbe_prv_data *pdata;
339 struct net_device *netdev;
340 struct device *dev = &pdev->dev, *phy_dev;
341 struct platform_device *phy_pdev;
342 struct resource *res;
343 const char *phy_mode;
344 unsigned int i, phy_memnum, phy_irqnum;
347 DBGPR("--> xgbe_probe\n");
349 netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
350 XGBE_MAX_DMA_CHANNELS);
352 dev_err(dev, "alloc_etherdev failed\n");
356 SET_NETDEV_DEV(netdev, dev);
357 pdata = netdev_priv(netdev);
358 pdata->netdev = netdev;
360 pdata->adev = ACPI_COMPANION(dev);
362 platform_set_drvdata(pdev, netdev);
364 spin_lock_init(&pdata->lock);
365 mutex_init(&pdata->xpcs_mutex);
366 mutex_init(&pdata->rss_mutex);
367 spin_lock_init(&pdata->tstamp_lock);
369 pdata->msg_enable = netif_msg_init(debug, default_msg_level);
371 set_bit(XGBE_DOWN, &pdata->dev_state);
373 /* Check if we should use ACPI or DT */
374 pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
376 phy_pdev = xgbe_get_phy_pdev(pdata);
378 dev_err(dev, "unable to obtain phy device\n");
382 phy_dev = &phy_pdev->dev;
384 if (pdev == phy_pdev) {
385 /* New style device tree or ACPI:
386 * The XGBE and PHY resources are grouped together with
387 * the PHY resources listed last
389 phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
390 phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
392 /* Old style device tree:
393 * The XGBE and PHY resources are separate
399 /* Set and validate the number of descriptors for a ring */
400 BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
401 pdata->tx_desc_count = XGBE_TX_DESC_CNT;
402 if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
403 dev_err(dev, "tx descriptor count (%d) is not valid\n",
404 pdata->tx_desc_count);
408 BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
409 pdata->rx_desc_count = XGBE_RX_DESC_CNT;
410 if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
411 dev_err(dev, "rx descriptor count (%d) is not valid\n",
412 pdata->rx_desc_count);
417 /* Obtain the mmio areas for the device */
418 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
419 pdata->xgmac_regs = devm_ioremap_resource(dev, res);
420 if (IS_ERR(pdata->xgmac_regs)) {
421 dev_err(dev, "xgmac ioremap failed\n");
422 ret = PTR_ERR(pdata->xgmac_regs);
425 if (netif_msg_probe(pdata))
426 dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
428 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
429 pdata->xpcs_regs = devm_ioremap_resource(dev, res);
430 if (IS_ERR(pdata->xpcs_regs)) {
431 dev_err(dev, "xpcs ioremap failed\n");
432 ret = PTR_ERR(pdata->xpcs_regs);
435 if (netif_msg_probe(pdata))
436 dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
438 res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
439 pdata->rxtx_regs = devm_ioremap_resource(dev, res);
440 if (IS_ERR(pdata->rxtx_regs)) {
441 dev_err(dev, "rxtx ioremap failed\n");
442 ret = PTR_ERR(pdata->rxtx_regs);
445 if (netif_msg_probe(pdata))
446 dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
448 res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
449 pdata->sir0_regs = devm_ioremap_resource(dev, res);
450 if (IS_ERR(pdata->sir0_regs)) {
451 dev_err(dev, "sir0 ioremap failed\n");
452 ret = PTR_ERR(pdata->sir0_regs);
455 if (netif_msg_probe(pdata))
456 dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
458 res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
459 pdata->sir1_regs = devm_ioremap_resource(dev, res);
460 if (IS_ERR(pdata->sir1_regs)) {
461 dev_err(dev, "sir1 ioremap failed\n");
462 ret = PTR_ERR(pdata->sir1_regs);
465 if (netif_msg_probe(pdata))
466 dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs);
468 /* Retrieve the MAC address */
469 ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
471 sizeof(pdata->mac_addr));
472 if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
473 dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
479 /* Retrieve the PHY mode - it must be "xgmii" */
480 ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
482 if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
483 dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
488 pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
490 /* Check for per channel interrupt support */
491 if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
492 pdata->per_channel_irq = 1;
494 /* Retrieve the PHY speedset */
495 ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY,
498 dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
502 switch (pdata->speed_set) {
503 case XGBE_SPEEDSET_1000_10000:
504 case XGBE_SPEEDSET_2500_10000:
507 dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
512 /* Retrieve the PHY configuration properties */
513 if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) {
514 ret = device_property_read_u32_array(phy_dev,
519 dev_err(dev, "invalid %s property\n",
524 memcpy(pdata->serdes_blwc, xgbe_serdes_blwc,
525 sizeof(pdata->serdes_blwc));
528 if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) {
529 ret = device_property_read_u32_array(phy_dev,
530 XGBE_CDR_RATE_PROPERTY,
531 pdata->serdes_cdr_rate,
534 dev_err(dev, "invalid %s property\n",
535 XGBE_CDR_RATE_PROPERTY);
539 memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate,
540 sizeof(pdata->serdes_cdr_rate));
543 if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
544 ret = device_property_read_u32_array(phy_dev,
545 XGBE_PQ_SKEW_PROPERTY,
546 pdata->serdes_pq_skew,
549 dev_err(dev, "invalid %s property\n",
550 XGBE_PQ_SKEW_PROPERTY);
554 memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew,
555 sizeof(pdata->serdes_pq_skew));
558 if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) {
559 ret = device_property_read_u32_array(phy_dev,
560 XGBE_TX_AMP_PROPERTY,
561 pdata->serdes_tx_amp,
564 dev_err(dev, "invalid %s property\n",
565 XGBE_TX_AMP_PROPERTY);
569 memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp,
570 sizeof(pdata->serdes_tx_amp));
573 if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) {
574 ret = device_property_read_u32_array(phy_dev,
575 XGBE_DFE_CFG_PROPERTY,
576 pdata->serdes_dfe_tap_cfg,
579 dev_err(dev, "invalid %s property\n",
580 XGBE_DFE_CFG_PROPERTY);
584 memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg,
585 sizeof(pdata->serdes_dfe_tap_cfg));
588 if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) {
589 ret = device_property_read_u32_array(phy_dev,
590 XGBE_DFE_ENA_PROPERTY,
591 pdata->serdes_dfe_tap_ena,
594 dev_err(dev, "invalid %s property\n",
595 XGBE_DFE_ENA_PROPERTY);
599 memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena,
600 sizeof(pdata->serdes_dfe_tap_ena));
603 /* Obtain device settings unique to ACPI/OF */
605 ret = xgbe_acpi_support(pdata);
607 ret = xgbe_of_support(pdata);
611 /* Set the DMA coherency values */
612 pdata->coherent = device_dma_is_coherent(pdata->dev);
613 if (pdata->coherent) {
614 pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
615 pdata->arcache = XGBE_DMA_OS_ARCACHE;
616 pdata->awcache = XGBE_DMA_OS_AWCACHE;
618 pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
619 pdata->arcache = XGBE_DMA_SYS_ARCACHE;
620 pdata->awcache = XGBE_DMA_SYS_AWCACHE;
623 /* Get the device interrupt */
624 ret = platform_get_irq(pdev, 0);
626 dev_err(dev, "platform_get_irq 0 failed\n");
629 pdata->dev_irq = ret;
631 /* Get the auto-negotiation interrupt */
632 ret = platform_get_irq(phy_pdev, phy_irqnum++);
634 dev_err(dev, "platform_get_irq phy 0 failed\n");
639 netdev->irq = pdata->dev_irq;
640 netdev->base_addr = (unsigned long)pdata->xgmac_regs;
641 memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
643 /* Set all the function pointers */
644 xgbe_init_all_fptrs(pdata);
646 /* Issue software reset to device */
647 pdata->hw_if.exit(pdata);
649 /* Populate the hardware features */
650 xgbe_get_all_hw_features(pdata);
652 /* Set default configuration data */
653 xgbe_default_config(pdata);
655 /* Set the DMA mask */
656 ret = dma_set_mask_and_coherent(dev,
657 DMA_BIT_MASK(pdata->hw_feat.dma_width));
659 dev_err(dev, "dma_set_mask_and_coherent failed\n");
663 /* Calculate the number of Tx and Rx rings to be created
664 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
665 * the number of Tx queues to the number of Tx channels
667 * -Rx (DMA) Channels do not map 1-to-1 so use the actual
668 * number of Rx queues
670 pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
671 pdata->hw_feat.tx_ch_cnt);
672 pdata->tx_q_count = pdata->tx_ring_count;
673 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
675 dev_err(dev, "error setting real tx queue count\n");
679 pdata->rx_ring_count = min_t(unsigned int,
680 netif_get_num_default_rss_queues(),
681 pdata->hw_feat.rx_ch_cnt);
682 pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
683 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
685 dev_err(dev, "error setting real rx queue count\n");
689 /* Initialize RSS hash key and lookup table */
690 netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
692 for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
693 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
694 i % pdata->rx_ring_count);
696 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
697 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
698 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
700 /* Call MDIO/PHY initialization routine */
701 pdata->phy_if.phy_init(pdata);
703 /* Set device operations */
704 netdev->netdev_ops = xgbe_get_netdev_ops();
705 netdev->ethtool_ops = xgbe_get_ethtool_ops();
706 #ifdef CONFIG_AMD_XGBE_DCB
707 netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
710 /* Set device features */
711 netdev->hw_features = NETIF_F_SG |
718 NETIF_F_HW_VLAN_CTAG_RX |
719 NETIF_F_HW_VLAN_CTAG_TX |
720 NETIF_F_HW_VLAN_CTAG_FILTER;
722 if (pdata->hw_feat.rss)
723 netdev->hw_features |= NETIF_F_RXHASH;
725 netdev->vlan_features |= NETIF_F_SG |
731 netdev->features |= netdev->hw_features;
732 pdata->netdev_features = netdev->features;
734 netdev->priv_flags |= IFF_UNICAST_FLT;
736 /* Use default watchdog timeout */
737 netdev->watchdog_timeo = 0;
739 xgbe_init_rx_coalesce(pdata);
740 xgbe_init_tx_coalesce(pdata);
742 netif_carrier_off(netdev);
743 ret = register_netdev(netdev);
745 dev_err(dev, "net device registration failed\n");
749 /* Create the PHY/ANEG name based on netdev name */
750 snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
751 netdev_name(netdev));
753 /* Create workqueues */
754 pdata->dev_workqueue =
755 create_singlethread_workqueue(netdev_name(netdev));
756 if (!pdata->dev_workqueue) {
757 netdev_err(netdev, "device workqueue creation failed\n");
762 pdata->an_workqueue =
763 create_singlethread_workqueue(pdata->an_name);
764 if (!pdata->an_workqueue) {
765 netdev_err(netdev, "phy workqueue creation failed\n");
770 xgbe_ptp_register(pdata);
772 xgbe_debugfs_init(pdata);
774 platform_device_put(phy_pdev);
776 netdev_notice(netdev, "net device enabled\n");
778 DBGPR("<-- xgbe_probe\n");
783 destroy_workqueue(pdata->dev_workqueue);
786 unregister_netdev(netdev);
789 platform_device_put(phy_pdev);
795 dev_notice(dev, "net device not enabled\n");
800 static int xgbe_remove(struct platform_device *pdev)
802 struct net_device *netdev = platform_get_drvdata(pdev);
803 struct xgbe_prv_data *pdata = netdev_priv(netdev);
805 DBGPR("-->xgbe_remove\n");
807 xgbe_debugfs_exit(pdata);
809 xgbe_ptp_unregister(pdata);
811 flush_workqueue(pdata->an_workqueue);
812 destroy_workqueue(pdata->an_workqueue);
814 flush_workqueue(pdata->dev_workqueue);
815 destroy_workqueue(pdata->dev_workqueue);
817 unregister_netdev(netdev);
821 DBGPR("<--xgbe_remove\n");
827 static int xgbe_suspend(struct device *dev)
829 struct net_device *netdev = dev_get_drvdata(dev);
830 struct xgbe_prv_data *pdata = netdev_priv(netdev);
833 DBGPR("-->xgbe_suspend\n");
835 if (netif_running(netdev))
836 ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
838 pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
839 pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
840 XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
842 DBGPR("<--xgbe_suspend\n");
847 static int xgbe_resume(struct device *dev)
849 struct net_device *netdev = dev_get_drvdata(dev);
850 struct xgbe_prv_data *pdata = netdev_priv(netdev);
853 DBGPR("-->xgbe_resume\n");
855 pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
856 XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
858 if (netif_running(netdev))
859 ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
861 DBGPR("<--xgbe_resume\n");
865 #endif /* CONFIG_PM */
868 static const struct acpi_device_id xgbe_acpi_match[] = {
873 MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
877 static const struct of_device_id xgbe_of_match[] = {
878 { .compatible = "amd,xgbe-seattle-v1a", },
882 MODULE_DEVICE_TABLE(of, xgbe_of_match);
885 static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
887 static struct platform_driver xgbe_driver = {
891 .acpi_match_table = xgbe_acpi_match,
894 .of_match_table = xgbe_of_match,
899 .remove = xgbe_remove,
902 module_platform_driver(xgbe_driver);