Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[firefly-linux-kernel-4.4.55.git] / drivers / net / phy / amd-xgbe-phy.c
1 /*
2  * AMD 10Gb Ethernet PHY driver
3  *
4  * This file is available to you under your choice of the following two
5  * licenses:
6  *
7  * License 1: GPLv2
8  *
9  * Copyright (c) 2014 Advanced Micro Devices, Inc.
10  *
11  * This file is free software; you may copy, redistribute and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation, either version 2 of the License, or (at
14  * your option) any later version.
15  *
16  * This file is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23  *
24  *
25  * License 2: Modified BSD
26  *
27  * Copyright (c) 2014 Advanced Micro Devices, Inc.
28  * All rights reserved.
29  *
30  * Redistribution and use in source and binary forms, with or without
31  * modification, are permitted provided that the following conditions are met:
32  *     * Redistributions of source code must retain the above copyright
33  *       notice, this list of conditions and the following disclaimer.
34  *     * Redistributions in binary form must reproduce the above copyright
35  *       notice, this list of conditions and the following disclaimer in the
36  *       documentation and/or other materials provided with the distribution.
37  *     * Neither the name of Advanced Micro Devices, Inc. nor the
38  *       names of its contributors may be used to endorse or promote products
39  *       derived from this software without specific prior written permission.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
42  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44  * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
45  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
46  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
48  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
50  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51  */
52
53 #include <linux/kernel.h>
54 #include <linux/device.h>
55 #include <linux/platform_device.h>
56 #include <linux/string.h>
57 #include <linux/errno.h>
58 #include <linux/unistd.h>
59 #include <linux/slab.h>
60 #include <linux/interrupt.h>
61 #include <linux/init.h>
62 #include <linux/delay.h>
63 #include <linux/workqueue.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/skbuff.h>
67 #include <linux/mm.h>
68 #include <linux/module.h>
69 #include <linux/mii.h>
70 #include <linux/ethtool.h>
71 #include <linux/phy.h>
72 #include <linux/mdio.h>
73 #include <linux/io.h>
74 #include <linux/of.h>
75 #include <linux/of_platform.h>
76 #include <linux/of_device.h>
77 #include <linux/uaccess.h>
78 #include <linux/bitops.h>
79 #include <linux/property.h>
80 #include <linux/acpi.h>
81 #include <linux/jiffies.h>
82
83 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
84 MODULE_LICENSE("Dual BSD/GPL");
85 MODULE_VERSION("1.0.0-a");
86 MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
87
88 #define XGBE_PHY_ID     0x000162d0
89 #define XGBE_PHY_MASK   0xfffffff0
90
91 #define XGBE_PHY_SPEEDSET_PROPERTY      "amd,speed-set"
92 #define XGBE_PHY_BLWC_PROPERTY          "amd,serdes-blwc"
93 #define XGBE_PHY_CDR_RATE_PROPERTY      "amd,serdes-cdr-rate"
94 #define XGBE_PHY_PQ_SKEW_PROPERTY       "amd,serdes-pq-skew"
95 #define XGBE_PHY_TX_AMP_PROPERTY        "amd,serdes-tx-amp"
96 #define XGBE_PHY_DFE_CFG_PROPERTY       "amd,serdes-dfe-tap-config"
97 #define XGBE_PHY_DFE_ENA_PROPERTY       "amd,serdes-dfe-tap-enable"
98
99 #define XGBE_PHY_SPEEDS                 3
100 #define XGBE_PHY_SPEED_1000             0
101 #define XGBE_PHY_SPEED_2500             1
102 #define XGBE_PHY_SPEED_10000            2
103
104 #define XGBE_AN_MS_TIMEOUT              500
105
106 #define XGBE_AN_INT_CMPLT               0x01
107 #define XGBE_AN_INC_LINK                0x02
108 #define XGBE_AN_PG_RCV                  0x04
109 #define XGBE_AN_INT_MASK                0x07
110
111 #define XNP_MCF_NULL_MESSAGE            0x001
112 #define XNP_ACK_PROCESSED               BIT(12)
113 #define XNP_MP_FORMATTED                BIT(13)
114 #define XNP_NP_EXCHANGE                 BIT(15)
115
116 #define XGBE_PHY_RATECHANGE_COUNT       500
117
118 #define XGBE_PHY_KR_TRAINING_START      0x01
119 #define XGBE_PHY_KR_TRAINING_ENABLE     0x02
120
121 #define XGBE_PHY_FEC_ENABLE             0x01
122 #define XGBE_PHY_FEC_FORWARD            0x02
123 #define XGBE_PHY_FEC_MASK               0x03
124
125 #ifndef MDIO_PMA_10GBR_PMD_CTRL
126 #define MDIO_PMA_10GBR_PMD_CTRL         0x0096
127 #endif
128
129 #ifndef MDIO_PMA_10GBR_FEC_ABILITY
130 #define MDIO_PMA_10GBR_FEC_ABILITY      0x00aa
131 #endif
132
133 #ifndef MDIO_PMA_10GBR_FEC_CTRL
134 #define MDIO_PMA_10GBR_FEC_CTRL         0x00ab
135 #endif
136
137 #ifndef MDIO_AN_XNP
138 #define MDIO_AN_XNP                     0x0016
139 #endif
140
141 #ifndef MDIO_AN_LPX
142 #define MDIO_AN_LPX                     0x0019
143 #endif
144
145 #ifndef MDIO_AN_INTMASK
146 #define MDIO_AN_INTMASK                 0x8001
147 #endif
148
149 #ifndef MDIO_AN_INT
150 #define MDIO_AN_INT                     0x8002
151 #endif
152
153 #ifndef MDIO_CTRL1_SPEED1G
154 #define MDIO_CTRL1_SPEED1G              (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
155 #endif
156
157 /* SerDes integration register offsets */
158 #define SIR0_KR_RT_1                    0x002c
159 #define SIR0_STATUS                     0x0040
160 #define SIR1_SPEED                      0x0000
161
162 /* SerDes integration register entry bit positions and sizes */
163 #define SIR0_KR_RT_1_RESET_INDEX        11
164 #define SIR0_KR_RT_1_RESET_WIDTH        1
165 #define SIR0_STATUS_RX_READY_INDEX      0
166 #define SIR0_STATUS_RX_READY_WIDTH      1
167 #define SIR0_STATUS_TX_READY_INDEX      8
168 #define SIR0_STATUS_TX_READY_WIDTH      1
169 #define SIR1_SPEED_CDR_RATE_INDEX       12
170 #define SIR1_SPEED_CDR_RATE_WIDTH       4
171 #define SIR1_SPEED_DATARATE_INDEX       4
172 #define SIR1_SPEED_DATARATE_WIDTH       2
173 #define SIR1_SPEED_PLLSEL_INDEX         3
174 #define SIR1_SPEED_PLLSEL_WIDTH         1
175 #define SIR1_SPEED_RATECHANGE_INDEX     6
176 #define SIR1_SPEED_RATECHANGE_WIDTH     1
177 #define SIR1_SPEED_TXAMP_INDEX          8
178 #define SIR1_SPEED_TXAMP_WIDTH          4
179 #define SIR1_SPEED_WORDMODE_INDEX       0
180 #define SIR1_SPEED_WORDMODE_WIDTH       3
181
182 #define SPEED_10000_BLWC                0
183 #define SPEED_10000_CDR                 0x7
184 #define SPEED_10000_PLL                 0x1
185 #define SPEED_10000_PQ                  0x12
186 #define SPEED_10000_RATE                0x0
187 #define SPEED_10000_TXAMP               0xa
188 #define SPEED_10000_WORD                0x7
189 #define SPEED_10000_DFE_TAP_CONFIG      0x1
190 #define SPEED_10000_DFE_TAP_ENABLE      0x7f
191
192 #define SPEED_2500_BLWC                 1
193 #define SPEED_2500_CDR                  0x2
194 #define SPEED_2500_PLL                  0x0
195 #define SPEED_2500_PQ                   0xa
196 #define SPEED_2500_RATE                 0x1
197 #define SPEED_2500_TXAMP                0xf
198 #define SPEED_2500_WORD                 0x1
199 #define SPEED_2500_DFE_TAP_CONFIG       0x3
200 #define SPEED_2500_DFE_TAP_ENABLE       0x0
201
202 #define SPEED_1000_BLWC                 1
203 #define SPEED_1000_CDR                  0x2
204 #define SPEED_1000_PLL                  0x0
205 #define SPEED_1000_PQ                   0xa
206 #define SPEED_1000_RATE                 0x3
207 #define SPEED_1000_TXAMP                0xf
208 #define SPEED_1000_WORD                 0x1
209 #define SPEED_1000_DFE_TAP_CONFIG       0x3
210 #define SPEED_1000_DFE_TAP_ENABLE       0x0
211
212 /* SerDes RxTx register offsets */
213 #define RXTX_REG6                       0x0018
214 #define RXTX_REG20                      0x0050
215 #define RXTX_REG22                      0x0058
216 #define RXTX_REG114                     0x01c8
217 #define RXTX_REG129                     0x0204
218
219 /* SerDes RxTx register entry bit positions and sizes */
220 #define RXTX_REG6_RESETB_RXD_INDEX      8
221 #define RXTX_REG6_RESETB_RXD_WIDTH      1
222 #define RXTX_REG20_BLWC_ENA_INDEX       2
223 #define RXTX_REG20_BLWC_ENA_WIDTH       1
224 #define RXTX_REG114_PQ_REG_INDEX        9
225 #define RXTX_REG114_PQ_REG_WIDTH        7
226 #define RXTX_REG129_RXDFE_CONFIG_INDEX  14
227 #define RXTX_REG129_RXDFE_CONFIG_WIDTH  2
228
229 /* Bit setting and getting macros
230  *  The get macro will extract the current bit field value from within
231  *  the variable
232  *
233  *  The set macro will clear the current bit field value within the
234  *  variable and then set the bit field of the variable to the
235  *  specified value
236  */
237 #define GET_BITS(_var, _index, _width)                                  \
238         (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
239
240 #define SET_BITS(_var, _index, _width, _val)                            \
241 do {                                                                    \
242         (_var) &= ~(((0x1 << (_width)) - 1) << (_index));               \
243         (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index));     \
244 } while (0)
245
246 #define XSIR_GET_BITS(_var, _prefix, _field)                            \
247         GET_BITS((_var),                                                \
248                  _prefix##_##_field##_INDEX,                            \
249                  _prefix##_##_field##_WIDTH)
250
251 #define XSIR_SET_BITS(_var, _prefix, _field, _val)                      \
252         SET_BITS((_var),                                                \
253                  _prefix##_##_field##_INDEX,                            \
254                  _prefix##_##_field##_WIDTH, (_val))
255
256 /* Macros for reading or writing SerDes integration registers
257  *  The ioread macros will get bit fields or full values using the
258  *  register definitions formed using the input names
259  *
260  *  The iowrite macros will set bit fields or full values using the
261  *  register definitions formed using the input names
262  */
263 #define XSIR0_IOREAD(_priv, _reg)                                       \
264         ioread16((_priv)->sir0_regs + _reg)
265
266 #define XSIR0_IOREAD_BITS(_priv, _reg, _field)                          \
267         GET_BITS(XSIR0_IOREAD((_priv), _reg),                           \
268                  _reg##_##_field##_INDEX,                               \
269                  _reg##_##_field##_WIDTH)
270
271 #define XSIR0_IOWRITE(_priv, _reg, _val)                                \
272         iowrite16((_val), (_priv)->sir0_regs + _reg)
273
274 #define XSIR0_IOWRITE_BITS(_priv, _reg, _field, _val)                   \
275 do {                                                                    \
276         u16 reg_val = XSIR0_IOREAD((_priv), _reg);                      \
277         SET_BITS(reg_val,                                               \
278                  _reg##_##_field##_INDEX,                               \
279                  _reg##_##_field##_WIDTH, (_val));                      \
280         XSIR0_IOWRITE((_priv), _reg, reg_val);                          \
281 } while (0)
282
283 #define XSIR1_IOREAD(_priv, _reg)                                       \
284         ioread16((_priv)->sir1_regs + _reg)
285
286 #define XSIR1_IOREAD_BITS(_priv, _reg, _field)                          \
287         GET_BITS(XSIR1_IOREAD((_priv), _reg),                           \
288                  _reg##_##_field##_INDEX,                               \
289                  _reg##_##_field##_WIDTH)
290
291 #define XSIR1_IOWRITE(_priv, _reg, _val)                                \
292         iowrite16((_val), (_priv)->sir1_regs + _reg)
293
294 #define XSIR1_IOWRITE_BITS(_priv, _reg, _field, _val)                   \
295 do {                                                                    \
296         u16 reg_val = XSIR1_IOREAD((_priv), _reg);                      \
297         SET_BITS(reg_val,                                               \
298                  _reg##_##_field##_INDEX,                               \
299                  _reg##_##_field##_WIDTH, (_val));                      \
300         XSIR1_IOWRITE((_priv), _reg, reg_val);                          \
301 } while (0)
302
303 /* Macros for reading or writing SerDes RxTx registers
304  *  The ioread macros will get bit fields or full values using the
305  *  register definitions formed using the input names
306  *
307  *  The iowrite macros will set bit fields or full values using the
308  *  register definitions formed using the input names
309  */
310 #define XRXTX_IOREAD(_priv, _reg)                                       \
311         ioread16((_priv)->rxtx_regs + _reg)
312
313 #define XRXTX_IOREAD_BITS(_priv, _reg, _field)                          \
314         GET_BITS(XRXTX_IOREAD((_priv), _reg),                           \
315                  _reg##_##_field##_INDEX,                               \
316                  _reg##_##_field##_WIDTH)
317
318 #define XRXTX_IOWRITE(_priv, _reg, _val)                                \
319         iowrite16((_val), (_priv)->rxtx_regs + _reg)
320
321 #define XRXTX_IOWRITE_BITS(_priv, _reg, _field, _val)                   \
322 do {                                                                    \
323         u16 reg_val = XRXTX_IOREAD((_priv), _reg);                      \
324         SET_BITS(reg_val,                                               \
325                  _reg##_##_field##_INDEX,                               \
326                  _reg##_##_field##_WIDTH, (_val));                      \
327         XRXTX_IOWRITE((_priv), _reg, reg_val);                          \
328 } while (0)
329
330 static const u32 amd_xgbe_phy_serdes_blwc[] = {
331         SPEED_1000_BLWC,
332         SPEED_2500_BLWC,
333         SPEED_10000_BLWC,
334 };
335
336 static const u32 amd_xgbe_phy_serdes_cdr_rate[] = {
337         SPEED_1000_CDR,
338         SPEED_2500_CDR,
339         SPEED_10000_CDR,
340 };
341
342 static const u32 amd_xgbe_phy_serdes_pq_skew[] = {
343         SPEED_1000_PQ,
344         SPEED_2500_PQ,
345         SPEED_10000_PQ,
346 };
347
348 static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
349         SPEED_1000_TXAMP,
350         SPEED_2500_TXAMP,
351         SPEED_10000_TXAMP,
352 };
353
354 static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
355         SPEED_1000_DFE_TAP_CONFIG,
356         SPEED_2500_DFE_TAP_CONFIG,
357         SPEED_10000_DFE_TAP_CONFIG,
358 };
359
360 static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
361         SPEED_1000_DFE_TAP_ENABLE,
362         SPEED_2500_DFE_TAP_ENABLE,
363         SPEED_10000_DFE_TAP_ENABLE,
364 };
365
366 enum amd_xgbe_phy_an {
367         AMD_XGBE_AN_READY = 0,
368         AMD_XGBE_AN_PAGE_RECEIVED,
369         AMD_XGBE_AN_INCOMPAT_LINK,
370         AMD_XGBE_AN_COMPLETE,
371         AMD_XGBE_AN_NO_LINK,
372         AMD_XGBE_AN_ERROR,
373 };
374
375 enum amd_xgbe_phy_rx {
376         AMD_XGBE_RX_BPA = 0,
377         AMD_XGBE_RX_XNP,
378         AMD_XGBE_RX_COMPLETE,
379         AMD_XGBE_RX_ERROR,
380 };
381
382 enum amd_xgbe_phy_mode {
383         AMD_XGBE_MODE_KR,
384         AMD_XGBE_MODE_KX,
385 };
386
387 enum amd_xgbe_phy_speedset {
388         AMD_XGBE_PHY_SPEEDSET_1000_10000 = 0,
389         AMD_XGBE_PHY_SPEEDSET_2500_10000,
390 };
391
392 struct amd_xgbe_phy_priv {
393         struct platform_device *pdev;
394         struct acpi_device *adev;
395         struct device *dev;
396
397         struct phy_device *phydev;
398
399         /* SerDes related mmio resources */
400         struct resource *rxtx_res;
401         struct resource *sir0_res;
402         struct resource *sir1_res;
403
404         /* SerDes related mmio registers */
405         void __iomem *rxtx_regs;        /* SerDes Rx/Tx CSRs */
406         void __iomem *sir0_regs;        /* SerDes integration registers (1/2) */
407         void __iomem *sir1_regs;        /* SerDes integration registers (2/2) */
408
409         int an_irq;
410         char an_irq_name[IFNAMSIZ + 32];
411         struct work_struct an_irq_work;
412         unsigned int an_irq_allocated;
413
414         unsigned int speed_set;
415
416         /* SerDes UEFI configurable settings.
417          *   Switching between modes/speeds requires new values for some
418          *   SerDes settings.  The values can be supplied as device
419          *   properties in array format.  The first array entry is for
420          *   1GbE, second for 2.5GbE and third for 10GbE
421          */
422         u32 serdes_blwc[XGBE_PHY_SPEEDS];
423         u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
424         u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
425         u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
426         u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
427         u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
428
429         /* Auto-negotiation state machine support */
430         struct mutex an_mutex;
431         enum amd_xgbe_phy_an an_result;
432         enum amd_xgbe_phy_an an_state;
433         enum amd_xgbe_phy_rx kr_state;
434         enum amd_xgbe_phy_rx kx_state;
435         struct work_struct an_work;
436         struct workqueue_struct *an_workqueue;
437         unsigned int an_supported;
438         unsigned int parallel_detect;
439         unsigned int fec_ability;
440         unsigned long an_start;
441
442         unsigned int lpm_ctrl;          /* CTRL1 for resume */
443 };
444
445 static int amd_xgbe_an_enable_kr_training(struct phy_device *phydev)
446 {
447         int ret;
448
449         ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
450         if (ret < 0)
451                 return ret;
452
453         ret |= XGBE_PHY_KR_TRAINING_ENABLE;
454         phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
455
456         return 0;
457 }
458
459 static int amd_xgbe_an_disable_kr_training(struct phy_device *phydev)
460 {
461         int ret;
462
463         ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
464         if (ret < 0)
465                 return ret;
466
467         ret &= ~XGBE_PHY_KR_TRAINING_ENABLE;
468         phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
469
470         return 0;
471 }
472
473 static int amd_xgbe_phy_pcs_power_cycle(struct phy_device *phydev)
474 {
475         int ret;
476
477         ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
478         if (ret < 0)
479                 return ret;
480
481         ret |= MDIO_CTRL1_LPOWER;
482         phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
483
484         usleep_range(75, 100);
485
486         ret &= ~MDIO_CTRL1_LPOWER;
487         phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
488
489         return 0;
490 }
491
492 static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
493 {
494         struct amd_xgbe_phy_priv *priv = phydev->priv;
495
496         /* Assert Rx and Tx ratechange */
497         XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 1);
498 }
499
500 static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
501 {
502         struct amd_xgbe_phy_priv *priv = phydev->priv;
503         unsigned int wait;
504         u16 status;
505
506         /* Release Rx and Tx ratechange */
507         XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
508
509         /* Wait for Rx and Tx ready */
510         wait = XGBE_PHY_RATECHANGE_COUNT;
511         while (wait--) {
512                 usleep_range(50, 75);
513
514                 status = XSIR0_IOREAD(priv, SIR0_STATUS);
515                 if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
516                     XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
517                         goto rx_reset;
518         }
519
520         netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
521                    status);
522
523 rx_reset:
524         /* Perform Rx reset for the DFE changes */
525         XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
526         XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
527 }
528
529 static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
530 {
531         struct amd_xgbe_phy_priv *priv = phydev->priv;
532         int ret;
533
534         /* Enable KR training */
535         ret = amd_xgbe_an_enable_kr_training(phydev);
536         if (ret < 0)
537                 return ret;
538
539         /* Set PCS to KR/10G speed */
540         ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
541         if (ret < 0)
542                 return ret;
543
544         ret &= ~MDIO_PCS_CTRL2_TYPE;
545         ret |= MDIO_PCS_CTRL2_10GBR;
546         phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
547
548         ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
549         if (ret < 0)
550                 return ret;
551
552         ret &= ~MDIO_CTRL1_SPEEDSEL;
553         ret |= MDIO_CTRL1_SPEED10G;
554         phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
555
556         ret = amd_xgbe_phy_pcs_power_cycle(phydev);
557         if (ret < 0)
558                 return ret;
559
560         /* Set SerDes to 10G speed */
561         amd_xgbe_phy_serdes_start_ratechange(phydev);
562
563         XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_10000_RATE);
564         XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_10000_WORD);
565         XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_10000_PLL);
566
567         XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
568                            priv->serdes_cdr_rate[XGBE_PHY_SPEED_10000]);
569         XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
570                            priv->serdes_tx_amp[XGBE_PHY_SPEED_10000]);
571         XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
572                            priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
573         XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
574                            priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
575         XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
576                            priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
577         XRXTX_IOWRITE(priv, RXTX_REG22,
578                       priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
579
580         amd_xgbe_phy_serdes_complete_ratechange(phydev);
581
582         return 0;
583 }
584
585 static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
586 {
587         struct amd_xgbe_phy_priv *priv = phydev->priv;
588         int ret;
589
590         /* Disable KR training */
591         ret = amd_xgbe_an_disable_kr_training(phydev);
592         if (ret < 0)
593                 return ret;
594
595         /* Set PCS to KX/1G speed */
596         ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
597         if (ret < 0)
598                 return ret;
599
600         ret &= ~MDIO_PCS_CTRL2_TYPE;
601         ret |= MDIO_PCS_CTRL2_10GBX;
602         phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
603
604         ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
605         if (ret < 0)
606                 return ret;
607
608         ret &= ~MDIO_CTRL1_SPEEDSEL;
609         ret |= MDIO_CTRL1_SPEED1G;
610         phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
611
612         ret = amd_xgbe_phy_pcs_power_cycle(phydev);
613         if (ret < 0)
614                 return ret;
615
616         /* Set SerDes to 2.5G speed */
617         amd_xgbe_phy_serdes_start_ratechange(phydev);
618
619         XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_2500_RATE);
620         XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_2500_WORD);
621         XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_2500_PLL);
622
623         XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
624                            priv->serdes_cdr_rate[XGBE_PHY_SPEED_2500]);
625         XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
626                            priv->serdes_tx_amp[XGBE_PHY_SPEED_2500]);
627         XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
628                            priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
629         XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
630                            priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
631         XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
632                            priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
633         XRXTX_IOWRITE(priv, RXTX_REG22,
634                       priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
635
636         amd_xgbe_phy_serdes_complete_ratechange(phydev);
637
638         return 0;
639 }
640
641 static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
642 {
643         struct amd_xgbe_phy_priv *priv = phydev->priv;
644         int ret;
645
646         /* Disable KR training */
647         ret = amd_xgbe_an_disable_kr_training(phydev);
648         if (ret < 0)
649                 return ret;
650
651         /* Set PCS to KX/1G speed */
652         ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
653         if (ret < 0)
654                 return ret;
655
656         ret &= ~MDIO_PCS_CTRL2_TYPE;
657         ret |= MDIO_PCS_CTRL2_10GBX;
658         phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2, ret);
659
660         ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
661         if (ret < 0)
662                 return ret;
663
664         ret &= ~MDIO_CTRL1_SPEEDSEL;
665         ret |= MDIO_CTRL1_SPEED1G;
666         phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
667
668         ret = amd_xgbe_phy_pcs_power_cycle(phydev);
669         if (ret < 0)
670                 return ret;
671
672         /* Set SerDes to 1G speed */
673         amd_xgbe_phy_serdes_start_ratechange(phydev);
674
675         XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, DATARATE, SPEED_1000_RATE);
676         XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, WORDMODE, SPEED_1000_WORD);
677         XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, PLLSEL, SPEED_1000_PLL);
678
679         XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, CDR_RATE,
680                            priv->serdes_cdr_rate[XGBE_PHY_SPEED_1000]);
681         XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, TXAMP,
682                            priv->serdes_tx_amp[XGBE_PHY_SPEED_1000]);
683         XRXTX_IOWRITE_BITS(priv, RXTX_REG20, BLWC_ENA,
684                            priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
685         XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
686                            priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
687         XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
688                            priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
689         XRXTX_IOWRITE(priv, RXTX_REG22,
690                       priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
691
692         amd_xgbe_phy_serdes_complete_ratechange(phydev);
693
694         return 0;
695 }
696
697 static int amd_xgbe_phy_cur_mode(struct phy_device *phydev,
698                                  enum amd_xgbe_phy_mode *mode)
699 {
700         int ret;
701
702         ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
703         if (ret < 0)
704                 return ret;
705
706         if ((ret & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
707                 *mode = AMD_XGBE_MODE_KR;
708         else
709                 *mode = AMD_XGBE_MODE_KX;
710
711         return 0;
712 }
713
714 static bool amd_xgbe_phy_in_kr_mode(struct phy_device *phydev)
715 {
716         enum amd_xgbe_phy_mode mode;
717
718         if (amd_xgbe_phy_cur_mode(phydev, &mode))
719                 return false;
720
721         return (mode == AMD_XGBE_MODE_KR);
722 }
723
724 static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
725 {
726         struct amd_xgbe_phy_priv *priv = phydev->priv;
727         int ret;
728
729         /* If we are in KR switch to KX, and vice-versa */
730         if (amd_xgbe_phy_in_kr_mode(phydev)) {
731                 if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000)
732                         ret = amd_xgbe_phy_gmii_mode(phydev);
733                 else
734                         ret = amd_xgbe_phy_gmii_2500_mode(phydev);
735         } else {
736                 ret = amd_xgbe_phy_xgmii_mode(phydev);
737         }
738
739         return ret;
740 }
741
742 static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
743                                  enum amd_xgbe_phy_mode mode)
744 {
745         enum amd_xgbe_phy_mode cur_mode;
746         int ret;
747
748         ret = amd_xgbe_phy_cur_mode(phydev, &cur_mode);
749         if (ret)
750                 return ret;
751
752         if (mode != cur_mode)
753                 ret = amd_xgbe_phy_switch_mode(phydev);
754
755         return ret;
756 }
757
758 static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
759                                bool restart)
760 {
761         int ret;
762
763         ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
764         if (ret < 0)
765                 return ret;
766
767         ret &= ~MDIO_AN_CTRL1_ENABLE;
768
769         if (enable)
770                 ret |= MDIO_AN_CTRL1_ENABLE;
771
772         if (restart)
773                 ret |= MDIO_AN_CTRL1_RESTART;
774
775         phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, ret);
776
777         return 0;
778 }
779
780 static int amd_xgbe_phy_restart_an(struct phy_device *phydev)
781 {
782         return amd_xgbe_phy_set_an(phydev, true, true);
783 }
784
785 static int amd_xgbe_phy_disable_an(struct phy_device *phydev)
786 {
787         return amd_xgbe_phy_set_an(phydev, false, false);
788 }
789
790 static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
791                                                     enum amd_xgbe_phy_rx *state)
792 {
793         struct amd_xgbe_phy_priv *priv = phydev->priv;
794         int ad_reg, lp_reg, ret;
795
796         *state = AMD_XGBE_RX_COMPLETE;
797
798         /* If we're not in KR mode then we're done */
799         if (!amd_xgbe_phy_in_kr_mode(phydev))
800                 return AMD_XGBE_AN_PAGE_RECEIVED;
801
802         /* Enable/Disable FEC */
803         ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
804         if (ad_reg < 0)
805                 return AMD_XGBE_AN_ERROR;
806
807         lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 2);
808         if (lp_reg < 0)
809                 return AMD_XGBE_AN_ERROR;
810
811         ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL);
812         if (ret < 0)
813                 return AMD_XGBE_AN_ERROR;
814
815         ret &= ~XGBE_PHY_FEC_MASK;
816         if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
817                 ret |= priv->fec_ability;
818
819         phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_CTRL, ret);
820
821         /* Start KR training */
822         ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
823         if (ret < 0)
824                 return AMD_XGBE_AN_ERROR;
825
826         if (ret & XGBE_PHY_KR_TRAINING_ENABLE) {
827                 XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
828
829                 ret |= XGBE_PHY_KR_TRAINING_START;
830                 phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
831                               ret);
832
833                 XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
834         }
835
836         return AMD_XGBE_AN_PAGE_RECEIVED;
837 }
838
839 static enum amd_xgbe_phy_an amd_xgbe_an_tx_xnp(struct phy_device *phydev,
840                                                enum amd_xgbe_phy_rx *state)
841 {
842         u16 msg;
843
844         *state = AMD_XGBE_RX_XNP;
845
846         msg = XNP_MCF_NULL_MESSAGE;
847         msg |= XNP_MP_FORMATTED;
848
849         phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
850         phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
851         phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP, msg);
852
853         return AMD_XGBE_AN_PAGE_RECEIVED;
854 }
855
856 static enum amd_xgbe_phy_an amd_xgbe_an_rx_bpa(struct phy_device *phydev,
857                                                enum amd_xgbe_phy_rx *state)
858 {
859         unsigned int link_support;
860         int ret, ad_reg, lp_reg;
861
862         /* Read Base Ability register 2 first */
863         ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
864         if (ret < 0)
865                 return AMD_XGBE_AN_ERROR;
866
867         /* Check for a supported mode, otherwise restart in a different one */
868         link_support = amd_xgbe_phy_in_kr_mode(phydev) ? 0x80 : 0x20;
869         if (!(ret & link_support))
870                 return AMD_XGBE_AN_INCOMPAT_LINK;
871
872         /* Check Extended Next Page support */
873         ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
874         if (ad_reg < 0)
875                 return AMD_XGBE_AN_ERROR;
876
877         lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
878         if (lp_reg < 0)
879                 return AMD_XGBE_AN_ERROR;
880
881         return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
882                amd_xgbe_an_tx_xnp(phydev, state) :
883                amd_xgbe_an_tx_training(phydev, state);
884 }
885
886 static enum amd_xgbe_phy_an amd_xgbe_an_rx_xnp(struct phy_device *phydev,
887                                                enum amd_xgbe_phy_rx *state)
888 {
889         int ad_reg, lp_reg;
890
891         /* Check Extended Next Page support */
892         ad_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_XNP);
893         if (ad_reg < 0)
894                 return AMD_XGBE_AN_ERROR;
895
896         lp_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPX);
897         if (lp_reg < 0)
898                 return AMD_XGBE_AN_ERROR;
899
900         return ((ad_reg & XNP_NP_EXCHANGE) || (lp_reg & XNP_NP_EXCHANGE)) ?
901                amd_xgbe_an_tx_xnp(phydev, state) :
902                amd_xgbe_an_tx_training(phydev, state);
903 }
904
905 static enum amd_xgbe_phy_an amd_xgbe_an_page_received(struct phy_device *phydev)
906 {
907         struct amd_xgbe_phy_priv *priv = phydev->priv;
908         enum amd_xgbe_phy_rx *state;
909         unsigned long an_timeout;
910         int ret;
911
912         if (!priv->an_start) {
913                 priv->an_start = jiffies;
914         } else {
915                 an_timeout = priv->an_start +
916                              msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
917                 if (time_after(jiffies, an_timeout)) {
918                         /* Auto-negotiation timed out, reset state */
919                         priv->kr_state = AMD_XGBE_RX_BPA;
920                         priv->kx_state = AMD_XGBE_RX_BPA;
921
922                         priv->an_start = jiffies;
923                 }
924         }
925
926         state = amd_xgbe_phy_in_kr_mode(phydev) ? &priv->kr_state
927                                                 : &priv->kx_state;
928
929         switch (*state) {
930         case AMD_XGBE_RX_BPA:
931                 ret = amd_xgbe_an_rx_bpa(phydev, state);
932                 break;
933
934         case AMD_XGBE_RX_XNP:
935                 ret = amd_xgbe_an_rx_xnp(phydev, state);
936                 break;
937
938         default:
939                 ret = AMD_XGBE_AN_ERROR;
940         }
941
942         return ret;
943 }
944
945 static enum amd_xgbe_phy_an amd_xgbe_an_incompat_link(struct phy_device *phydev)
946 {
947         struct amd_xgbe_phy_priv *priv = phydev->priv;
948         int ret;
949
950         /* Be sure we aren't looping trying to negotiate */
951         if (amd_xgbe_phy_in_kr_mode(phydev)) {
952                 priv->kr_state = AMD_XGBE_RX_ERROR;
953
954                 if (!(phydev->advertising & SUPPORTED_1000baseKX_Full) &&
955                     !(phydev->advertising & SUPPORTED_2500baseX_Full))
956                         return AMD_XGBE_AN_NO_LINK;
957
958                 if (priv->kx_state != AMD_XGBE_RX_BPA)
959                         return AMD_XGBE_AN_NO_LINK;
960         } else {
961                 priv->kx_state = AMD_XGBE_RX_ERROR;
962
963                 if (!(phydev->advertising & SUPPORTED_10000baseKR_Full))
964                         return AMD_XGBE_AN_NO_LINK;
965
966                 if (priv->kr_state != AMD_XGBE_RX_BPA)
967                         return AMD_XGBE_AN_NO_LINK;
968         }
969
970         ret = amd_xgbe_phy_disable_an(phydev);
971         if (ret)
972                 return AMD_XGBE_AN_ERROR;
973
974         ret = amd_xgbe_phy_switch_mode(phydev);
975         if (ret)
976                 return AMD_XGBE_AN_ERROR;
977
978         ret = amd_xgbe_phy_restart_an(phydev);
979         if (ret)
980                 return AMD_XGBE_AN_ERROR;
981
982         return AMD_XGBE_AN_INCOMPAT_LINK;
983 }
984
985 static irqreturn_t amd_xgbe_an_isr(int irq, void *data)
986 {
987         struct amd_xgbe_phy_priv *priv = (struct amd_xgbe_phy_priv *)data;
988
989         /* Interrupt reason must be read and cleared outside of IRQ context */
990         disable_irq_nosync(priv->an_irq);
991
992         queue_work(priv->an_workqueue, &priv->an_irq_work);
993
994         return IRQ_HANDLED;
995 }
996
997 static void amd_xgbe_an_irq_work(struct work_struct *work)
998 {
999         struct amd_xgbe_phy_priv *priv = container_of(work,
1000                                                       struct amd_xgbe_phy_priv,
1001                                                       an_irq_work);
1002
1003         /* Avoid a race between enabling the IRQ and exiting the work by
1004          * waiting for the work to finish and then queueing it
1005          */
1006         flush_work(&priv->an_work);
1007         queue_work(priv->an_workqueue, &priv->an_work);
1008 }
1009
1010 static void amd_xgbe_an_state_machine(struct work_struct *work)
1011 {
1012         struct amd_xgbe_phy_priv *priv = container_of(work,
1013                                                       struct amd_xgbe_phy_priv,
1014                                                       an_work);
1015         struct phy_device *phydev = priv->phydev;
1016         enum amd_xgbe_phy_an cur_state = priv->an_state;
1017         int int_reg, int_mask;
1018
1019         mutex_lock(&priv->an_mutex);
1020
1021         /* Read the interrupt */
1022         int_reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT);
1023         if (!int_reg)
1024                 goto out;
1025
1026 next_int:
1027         if (int_reg < 0) {
1028                 priv->an_state = AMD_XGBE_AN_ERROR;
1029                 int_mask = XGBE_AN_INT_MASK;
1030         } else if (int_reg & XGBE_AN_PG_RCV) {
1031                 priv->an_state = AMD_XGBE_AN_PAGE_RECEIVED;
1032                 int_mask = XGBE_AN_PG_RCV;
1033         } else if (int_reg & XGBE_AN_INC_LINK) {
1034                 priv->an_state = AMD_XGBE_AN_INCOMPAT_LINK;
1035                 int_mask = XGBE_AN_INC_LINK;
1036         } else if (int_reg & XGBE_AN_INT_CMPLT) {
1037                 priv->an_state = AMD_XGBE_AN_COMPLETE;
1038                 int_mask = XGBE_AN_INT_CMPLT;
1039         } else {
1040                 priv->an_state = AMD_XGBE_AN_ERROR;
1041                 int_mask = 0;
1042         }
1043
1044         /* Clear the interrupt to be processed */
1045         int_reg &= ~int_mask;
1046         phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, int_reg);
1047
1048         priv->an_result = priv->an_state;
1049
1050 again:
1051         cur_state = priv->an_state;
1052
1053         switch (priv->an_state) {
1054         case AMD_XGBE_AN_READY:
1055                 priv->an_supported = 0;
1056                 break;
1057
1058         case AMD_XGBE_AN_PAGE_RECEIVED:
1059                 priv->an_state = amd_xgbe_an_page_received(phydev);
1060                 priv->an_supported++;
1061                 break;
1062
1063         case AMD_XGBE_AN_INCOMPAT_LINK:
1064                 priv->an_supported = 0;
1065                 priv->parallel_detect = 0;
1066                 priv->an_state = amd_xgbe_an_incompat_link(phydev);
1067                 break;
1068
1069         case AMD_XGBE_AN_COMPLETE:
1070                 priv->parallel_detect = priv->an_supported ? 0 : 1;
1071                 netdev_dbg(phydev->attached_dev, "%s successful\n",
1072                            priv->an_supported ? "Auto negotiation"
1073                                               : "Parallel detection");
1074                 break;
1075
1076         case AMD_XGBE_AN_NO_LINK:
1077                 break;
1078
1079         default:
1080                 priv->an_state = AMD_XGBE_AN_ERROR;
1081         }
1082
1083         if (priv->an_state == AMD_XGBE_AN_NO_LINK) {
1084                 int_reg = 0;
1085                 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
1086         } else if (priv->an_state == AMD_XGBE_AN_ERROR) {
1087                 netdev_err(phydev->attached_dev,
1088                            "error during auto-negotiation, state=%u\n",
1089                            cur_state);
1090
1091                 int_reg = 0;
1092                 phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
1093         }
1094
1095         if (priv->an_state >= AMD_XGBE_AN_COMPLETE) {
1096                 priv->an_result = priv->an_state;
1097                 priv->an_state = AMD_XGBE_AN_READY;
1098                 priv->kr_state = AMD_XGBE_RX_BPA;
1099                 priv->kx_state = AMD_XGBE_RX_BPA;
1100                 priv->an_start = 0;
1101         }
1102
1103         if (cur_state != priv->an_state)
1104                 goto again;
1105
1106         if (int_reg)
1107                 goto next_int;
1108
1109 out:
1110         enable_irq(priv->an_irq);
1111
1112         mutex_unlock(&priv->an_mutex);
1113 }
1114
1115 static int amd_xgbe_an_init(struct phy_device *phydev)
1116 {
1117         int ret;
1118
1119         /* Set up Advertisement register 3 first */
1120         ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
1121         if (ret < 0)
1122                 return ret;
1123
1124         if (phydev->advertising & SUPPORTED_10000baseR_FEC)
1125                 ret |= 0xc000;
1126         else
1127                 ret &= ~0xc000;
1128
1129         phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, ret);
1130
1131         /* Set up Advertisement register 2 next */
1132         ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
1133         if (ret < 0)
1134                 return ret;
1135
1136         if (phydev->advertising & SUPPORTED_10000baseKR_Full)
1137                 ret |= 0x80;
1138         else
1139                 ret &= ~0x80;
1140
1141         if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
1142             (phydev->advertising & SUPPORTED_2500baseX_Full))
1143                 ret |= 0x20;
1144         else
1145                 ret &= ~0x20;
1146
1147         phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, ret);
1148
1149         /* Set up Advertisement register 1 last */
1150         ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
1151         if (ret < 0)
1152                 return ret;
1153
1154         if (phydev->advertising & SUPPORTED_Pause)
1155                 ret |= 0x400;
1156         else
1157                 ret &= ~0x400;
1158
1159         if (phydev->advertising & SUPPORTED_Asym_Pause)
1160                 ret |= 0x800;
1161         else
1162                 ret &= ~0x800;
1163
1164         /* We don't intend to perform XNP */
1165         ret &= ~XNP_NP_EXCHANGE;
1166
1167         phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ret);
1168
1169         return 0;
1170 }
1171
1172 static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
1173 {
1174         int count, ret;
1175
1176         ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1177         if (ret < 0)
1178                 return ret;
1179
1180         ret |= MDIO_CTRL1_RESET;
1181         phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
1182
1183         count = 50;
1184         do {
1185                 msleep(20);
1186                 ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1187                 if (ret < 0)
1188                         return ret;
1189         } while ((ret & MDIO_CTRL1_RESET) && --count);
1190
1191         if (ret & MDIO_CTRL1_RESET)
1192                 return -ETIMEDOUT;
1193
1194         /* Disable auto-negotiation for now */
1195         ret = amd_xgbe_phy_disable_an(phydev);
1196         if (ret < 0)
1197                 return ret;
1198
1199         /* Clear auto-negotiation interrupts */
1200         phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
1201
1202         return 0;
1203 }
1204
1205 static int amd_xgbe_phy_config_init(struct phy_device *phydev)
1206 {
1207         struct amd_xgbe_phy_priv *priv = phydev->priv;
1208         struct net_device *netdev = phydev->attached_dev;
1209         int ret;
1210
1211         if (!priv->an_irq_allocated) {
1212                 /* Allocate the auto-negotiation workqueue and interrupt */
1213                 snprintf(priv->an_irq_name, sizeof(priv->an_irq_name) - 1,
1214                          "%s-pcs", netdev_name(netdev));
1215
1216                 priv->an_workqueue =
1217                         create_singlethread_workqueue(priv->an_irq_name);
1218                 if (!priv->an_workqueue) {
1219                         netdev_err(netdev, "phy workqueue creation failed\n");
1220                         return -ENOMEM;
1221                 }
1222
1223                 ret = devm_request_irq(priv->dev, priv->an_irq,
1224                                        amd_xgbe_an_isr, 0, priv->an_irq_name,
1225                                        priv);
1226                 if (ret) {
1227                         netdev_err(netdev, "phy irq request failed\n");
1228                         destroy_workqueue(priv->an_workqueue);
1229                         return ret;
1230                 }
1231
1232                 priv->an_irq_allocated = 1;
1233         }
1234
1235         /* Set initial mode - call the mode setting routines
1236          * directly to insure we are properly configured
1237          */
1238         if (phydev->advertising & SUPPORTED_10000baseKR_Full)
1239                 ret = amd_xgbe_phy_xgmii_mode(phydev);
1240         else if (phydev->advertising & SUPPORTED_1000baseKX_Full)
1241                 ret = amd_xgbe_phy_gmii_mode(phydev);
1242         else if (phydev->advertising & SUPPORTED_2500baseX_Full)
1243                 ret = amd_xgbe_phy_gmii_2500_mode(phydev);
1244         else
1245                 ret = -EINVAL;
1246         if (ret < 0)
1247                 return ret;
1248
1249         /* Set up advertisement registers based on current settings */
1250         ret = amd_xgbe_an_init(phydev);
1251         if (ret)
1252                 return ret;
1253
1254         /* Enable auto-negotiation interrupts */
1255         phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
1256
1257         return 0;
1258 }
1259
1260 static int amd_xgbe_phy_setup_forced(struct phy_device *phydev)
1261 {
1262         int ret;
1263
1264         /* Disable auto-negotiation */
1265         ret = amd_xgbe_phy_disable_an(phydev);
1266         if (ret < 0)
1267                 return ret;
1268
1269         /* Validate/Set specified speed */
1270         switch (phydev->speed) {
1271         case SPEED_10000:
1272                 ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
1273                 break;
1274
1275         case SPEED_2500:
1276         case SPEED_1000:
1277                 ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
1278                 break;
1279
1280         default:
1281                 ret = -EINVAL;
1282         }
1283
1284         if (ret < 0)
1285                 return ret;
1286
1287         /* Validate duplex mode */
1288         if (phydev->duplex != DUPLEX_FULL)
1289                 return -EINVAL;
1290
1291         phydev->pause = 0;
1292         phydev->asym_pause = 0;
1293
1294         return 0;
1295 }
1296
1297 static int __amd_xgbe_phy_config_aneg(struct phy_device *phydev)
1298 {
1299         struct amd_xgbe_phy_priv *priv = phydev->priv;
1300         u32 mmd_mask = phydev->c45_ids.devices_in_package;
1301         int ret;
1302
1303         if (phydev->autoneg != AUTONEG_ENABLE)
1304                 return amd_xgbe_phy_setup_forced(phydev);
1305
1306         /* Make sure we have the AN MMD present */
1307         if (!(mmd_mask & MDIO_DEVS_AN))
1308                 return -EINVAL;
1309
1310         /* Disable auto-negotiation interrupt */
1311         disable_irq(priv->an_irq);
1312
1313         /* Start auto-negotiation in a supported mode */
1314         if (phydev->advertising & SUPPORTED_10000baseKR_Full)
1315                 ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
1316         else if ((phydev->advertising & SUPPORTED_1000baseKX_Full) ||
1317                  (phydev->advertising & SUPPORTED_2500baseX_Full))
1318                 ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
1319         else
1320                 ret = -EINVAL;
1321         if (ret < 0) {
1322                 enable_irq(priv->an_irq);
1323                 return ret;
1324         }
1325
1326         /* Disable and stop any in progress auto-negotiation */
1327         ret = amd_xgbe_phy_disable_an(phydev);
1328         if (ret < 0)
1329                 return ret;
1330
1331         /* Clear any auto-negotitation interrupts */
1332         phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INT, 0);
1333
1334         priv->an_result = AMD_XGBE_AN_READY;
1335         priv->an_state = AMD_XGBE_AN_READY;
1336         priv->kr_state = AMD_XGBE_RX_BPA;
1337         priv->kx_state = AMD_XGBE_RX_BPA;
1338
1339         /* Re-enable auto-negotiation interrupt */
1340         enable_irq(priv->an_irq);
1341
1342         /* Set up advertisement registers based on current settings */
1343         ret = amd_xgbe_an_init(phydev);
1344         if (ret)
1345                 return ret;
1346
1347         /* Enable and start auto-negotiation */
1348         return amd_xgbe_phy_restart_an(phydev);
1349 }
1350
1351 static int amd_xgbe_phy_config_aneg(struct phy_device *phydev)
1352 {
1353         struct amd_xgbe_phy_priv *priv = phydev->priv;
1354         int ret;
1355
1356         mutex_lock(&priv->an_mutex);
1357
1358         ret = __amd_xgbe_phy_config_aneg(phydev);
1359
1360         mutex_unlock(&priv->an_mutex);
1361
1362         return ret;
1363 }
1364
1365 static int amd_xgbe_phy_aneg_done(struct phy_device *phydev)
1366 {
1367         struct amd_xgbe_phy_priv *priv = phydev->priv;
1368
1369         return (priv->an_result == AMD_XGBE_AN_COMPLETE);
1370 }
1371
1372 static int amd_xgbe_phy_update_link(struct phy_device *phydev)
1373 {
1374         struct amd_xgbe_phy_priv *priv = phydev->priv;
1375         int ret;
1376
1377         /* If we're doing auto-negotiation don't report link down */
1378         if (priv->an_state != AMD_XGBE_AN_READY) {
1379                 phydev->link = 1;
1380                 return 0;
1381         }
1382
1383         /* Link status is latched low, so read once to clear
1384          * and then read again to get current state
1385          */
1386         ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
1387         if (ret < 0)
1388                 return ret;
1389
1390         ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_STAT1);
1391         if (ret < 0)
1392                 return ret;
1393
1394         phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
1395
1396         return 0;
1397 }
1398
1399 static int amd_xgbe_phy_read_status(struct phy_device *phydev)
1400 {
1401         struct amd_xgbe_phy_priv *priv = phydev->priv;
1402         u32 mmd_mask = phydev->c45_ids.devices_in_package;
1403         int ret, ad_ret, lp_ret;
1404
1405         ret = amd_xgbe_phy_update_link(phydev);
1406         if (ret)
1407                 return ret;
1408
1409         if ((phydev->autoneg == AUTONEG_ENABLE) &&
1410             !priv->parallel_detect) {
1411                 if (!(mmd_mask & MDIO_DEVS_AN))
1412                         return -EINVAL;
1413
1414                 if (!amd_xgbe_phy_aneg_done(phydev))
1415                         return 0;
1416
1417                 /* Compare Advertisement and Link Partner register 1 */
1418                 ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
1419                 if (ad_ret < 0)
1420                         return ad_ret;
1421                 lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
1422                 if (lp_ret < 0)
1423                         return lp_ret;
1424
1425                 ad_ret &= lp_ret;
1426                 phydev->pause = (ad_ret & 0x400) ? 1 : 0;
1427                 phydev->asym_pause = (ad_ret & 0x800) ? 1 : 0;
1428
1429                 /* Compare Advertisement and Link Partner register 2 */
1430                 ad_ret = phy_read_mmd(phydev, MDIO_MMD_AN,
1431                                       MDIO_AN_ADVERTISE + 1);
1432                 if (ad_ret < 0)
1433                         return ad_ret;
1434                 lp_ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA + 1);
1435                 if (lp_ret < 0)
1436                         return lp_ret;
1437
1438                 ad_ret &= lp_ret;
1439                 if (ad_ret & 0x80) {
1440                         phydev->speed = SPEED_10000;
1441                         ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KR);
1442                         if (ret)
1443                                 return ret;
1444                 } else {
1445                         switch (priv->speed_set) {
1446                         case AMD_XGBE_PHY_SPEEDSET_1000_10000:
1447                                 phydev->speed = SPEED_1000;
1448                                 break;
1449
1450                         case AMD_XGBE_PHY_SPEEDSET_2500_10000:
1451                                 phydev->speed = SPEED_2500;
1452                                 break;
1453                         }
1454
1455                         ret = amd_xgbe_phy_set_mode(phydev, AMD_XGBE_MODE_KX);
1456                         if (ret)
1457                                 return ret;
1458                 }
1459
1460                 phydev->duplex = DUPLEX_FULL;
1461         } else {
1462                 if (amd_xgbe_phy_in_kr_mode(phydev)) {
1463                         phydev->speed = SPEED_10000;
1464                 } else {
1465                         switch (priv->speed_set) {
1466                         case AMD_XGBE_PHY_SPEEDSET_1000_10000:
1467                                 phydev->speed = SPEED_1000;
1468                                 break;
1469
1470                         case AMD_XGBE_PHY_SPEEDSET_2500_10000:
1471                                 phydev->speed = SPEED_2500;
1472                                 break;
1473                         }
1474                 }
1475                 phydev->duplex = DUPLEX_FULL;
1476                 phydev->pause = 0;
1477                 phydev->asym_pause = 0;
1478         }
1479
1480         return 0;
1481 }
1482
1483 static int amd_xgbe_phy_suspend(struct phy_device *phydev)
1484 {
1485         struct amd_xgbe_phy_priv *priv = phydev->priv;
1486         int ret;
1487
1488         mutex_lock(&phydev->lock);
1489
1490         ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1);
1491         if (ret < 0)
1492                 goto unlock;
1493
1494         priv->lpm_ctrl = ret;
1495
1496         ret |= MDIO_CTRL1_LPOWER;
1497         phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, ret);
1498
1499         ret = 0;
1500
1501 unlock:
1502         mutex_unlock(&phydev->lock);
1503
1504         return ret;
1505 }
1506
1507 static int amd_xgbe_phy_resume(struct phy_device *phydev)
1508 {
1509         struct amd_xgbe_phy_priv *priv = phydev->priv;
1510
1511         mutex_lock(&phydev->lock);
1512
1513         priv->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
1514         phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, priv->lpm_ctrl);
1515
1516         mutex_unlock(&phydev->lock);
1517
1518         return 0;
1519 }
1520
1521 static unsigned int amd_xgbe_phy_resource_count(struct platform_device *pdev,
1522                                                 unsigned int type)
1523 {
1524         unsigned int count;
1525         int i;
1526
1527         for (i = 0, count = 0; i < pdev->num_resources; i++) {
1528                 struct resource *r = &pdev->resource[i];
1529
1530                 if (type == resource_type(r))
1531                         count++;
1532         }
1533
1534         return count;
1535 }
1536
1537 static int amd_xgbe_phy_probe(struct phy_device *phydev)
1538 {
1539         struct amd_xgbe_phy_priv *priv;
1540         struct platform_device *phy_pdev;
1541         struct device *dev, *phy_dev;
1542         unsigned int phy_resnum, phy_irqnum;
1543         int ret;
1544
1545         if (!phydev->bus || !phydev->bus->parent)
1546                 return -EINVAL;
1547
1548         dev = phydev->bus->parent;
1549
1550         priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1551         if (!priv)
1552                 return -ENOMEM;
1553
1554         priv->pdev = to_platform_device(dev);
1555         priv->adev = ACPI_COMPANION(dev);
1556         priv->dev = dev;
1557         priv->phydev = phydev;
1558         mutex_init(&priv->an_mutex);
1559         INIT_WORK(&priv->an_irq_work, amd_xgbe_an_irq_work);
1560         INIT_WORK(&priv->an_work, amd_xgbe_an_state_machine);
1561
1562         if (!priv->adev || acpi_disabled) {
1563                 struct device_node *bus_node;
1564                 struct device_node *phy_node;
1565
1566                 bus_node = priv->dev->of_node;
1567                 phy_node = of_parse_phandle(bus_node, "phy-handle", 0);
1568                 if (!phy_node) {
1569                         dev_err(dev, "unable to parse phy-handle\n");
1570                         ret = -EINVAL;
1571                         goto err_priv;
1572                 }
1573
1574                 phy_pdev = of_find_device_by_node(phy_node);
1575                 of_node_put(phy_node);
1576
1577                 if (!phy_pdev) {
1578                         dev_err(dev, "unable to obtain phy device\n");
1579                         ret = -EINVAL;
1580                         goto err_priv;
1581                 }
1582
1583                 phy_resnum = 0;
1584                 phy_irqnum = 0;
1585         } else {
1586                 /* In ACPI, the XGBE and PHY resources are the grouped
1587                  * together with the PHY resources at the end
1588                  */
1589                 phy_pdev = priv->pdev;
1590                 phy_resnum = amd_xgbe_phy_resource_count(phy_pdev,
1591                                                          IORESOURCE_MEM) - 3;
1592                 phy_irqnum = amd_xgbe_phy_resource_count(phy_pdev,
1593                                                          IORESOURCE_IRQ) - 1;
1594         }
1595         phy_dev = &phy_pdev->dev;
1596
1597         /* Get the device mmio areas */
1598         priv->rxtx_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
1599                                                phy_resnum++);
1600         priv->rxtx_regs = devm_ioremap_resource(dev, priv->rxtx_res);
1601         if (IS_ERR(priv->rxtx_regs)) {
1602                 dev_err(dev, "rxtx ioremap failed\n");
1603                 ret = PTR_ERR(priv->rxtx_regs);
1604                 goto err_put;
1605         }
1606
1607         priv->sir0_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
1608                                                phy_resnum++);
1609         priv->sir0_regs = devm_ioremap_resource(dev, priv->sir0_res);
1610         if (IS_ERR(priv->sir0_regs)) {
1611                 dev_err(dev, "sir0 ioremap failed\n");
1612                 ret = PTR_ERR(priv->sir0_regs);
1613                 goto err_rxtx;
1614         }
1615
1616         priv->sir1_res = platform_get_resource(phy_pdev, IORESOURCE_MEM,
1617                                                phy_resnum++);
1618         priv->sir1_regs = devm_ioremap_resource(dev, priv->sir1_res);
1619         if (IS_ERR(priv->sir1_regs)) {
1620                 dev_err(dev, "sir1 ioremap failed\n");
1621                 ret = PTR_ERR(priv->sir1_regs);
1622                 goto err_sir0;
1623         }
1624
1625         /* Get the auto-negotiation interrupt */
1626         ret = platform_get_irq(phy_pdev, phy_irqnum);
1627         if (ret < 0) {
1628                 dev_err(dev, "platform_get_irq failed\n");
1629                 goto err_sir1;
1630         }
1631         priv->an_irq = ret;
1632
1633         /* Get the device speed set property */
1634         ret = device_property_read_u32(phy_dev, XGBE_PHY_SPEEDSET_PROPERTY,
1635                                        &priv->speed_set);
1636         if (ret) {
1637                 dev_err(dev, "invalid %s property\n",
1638                         XGBE_PHY_SPEEDSET_PROPERTY);
1639                 goto err_sir1;
1640         }
1641
1642         switch (priv->speed_set) {
1643         case AMD_XGBE_PHY_SPEEDSET_1000_10000:
1644         case AMD_XGBE_PHY_SPEEDSET_2500_10000:
1645                 break;
1646         default:
1647                 dev_err(dev, "invalid %s property\n",
1648                         XGBE_PHY_SPEEDSET_PROPERTY);
1649                 ret = -EINVAL;
1650                 goto err_sir1;
1651         }
1652
1653         if (device_property_present(phy_dev, XGBE_PHY_BLWC_PROPERTY)) {
1654                 ret = device_property_read_u32_array(phy_dev,
1655                                                      XGBE_PHY_BLWC_PROPERTY,
1656                                                      priv->serdes_blwc,
1657                                                      XGBE_PHY_SPEEDS);
1658                 if (ret) {
1659                         dev_err(dev, "invalid %s property\n",
1660                                 XGBE_PHY_BLWC_PROPERTY);
1661                         goto err_sir1;
1662                 }
1663         } else {
1664                 memcpy(priv->serdes_blwc, amd_xgbe_phy_serdes_blwc,
1665                        sizeof(priv->serdes_blwc));
1666         }
1667
1668         if (device_property_present(phy_dev, XGBE_PHY_CDR_RATE_PROPERTY)) {
1669                 ret = device_property_read_u32_array(phy_dev,
1670                                                      XGBE_PHY_CDR_RATE_PROPERTY,
1671                                                      priv->serdes_cdr_rate,
1672                                                      XGBE_PHY_SPEEDS);
1673                 if (ret) {
1674                         dev_err(dev, "invalid %s property\n",
1675                                 XGBE_PHY_CDR_RATE_PROPERTY);
1676                         goto err_sir1;
1677                 }
1678         } else {
1679                 memcpy(priv->serdes_cdr_rate, amd_xgbe_phy_serdes_cdr_rate,
1680                        sizeof(priv->serdes_cdr_rate));
1681         }
1682
1683         if (device_property_present(phy_dev, XGBE_PHY_PQ_SKEW_PROPERTY)) {
1684                 ret = device_property_read_u32_array(phy_dev,
1685                                                      XGBE_PHY_PQ_SKEW_PROPERTY,
1686                                                      priv->serdes_pq_skew,
1687                                                      XGBE_PHY_SPEEDS);
1688                 if (ret) {
1689                         dev_err(dev, "invalid %s property\n",
1690                                 XGBE_PHY_PQ_SKEW_PROPERTY);
1691                         goto err_sir1;
1692                 }
1693         } else {
1694                 memcpy(priv->serdes_pq_skew, amd_xgbe_phy_serdes_pq_skew,
1695                        sizeof(priv->serdes_pq_skew));
1696         }
1697
1698         if (device_property_present(phy_dev, XGBE_PHY_TX_AMP_PROPERTY)) {
1699                 ret = device_property_read_u32_array(phy_dev,
1700                                                      XGBE_PHY_TX_AMP_PROPERTY,
1701                                                      priv->serdes_tx_amp,
1702                                                      XGBE_PHY_SPEEDS);
1703                 if (ret) {
1704                         dev_err(dev, "invalid %s property\n",
1705                                 XGBE_PHY_TX_AMP_PROPERTY);
1706                         goto err_sir1;
1707                 }
1708         } else {
1709                 memcpy(priv->serdes_tx_amp, amd_xgbe_phy_serdes_tx_amp,
1710                        sizeof(priv->serdes_tx_amp));
1711         }
1712
1713         if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
1714                 ret = device_property_read_u32_array(phy_dev,
1715                                                      XGBE_PHY_DFE_CFG_PROPERTY,
1716                                                      priv->serdes_dfe_tap_cfg,
1717                                                      XGBE_PHY_SPEEDS);
1718                 if (ret) {
1719                         dev_err(dev, "invalid %s property\n",
1720                                 XGBE_PHY_DFE_CFG_PROPERTY);
1721                         goto err_sir1;
1722                 }
1723         } else {
1724                 memcpy(priv->serdes_dfe_tap_cfg,
1725                        amd_xgbe_phy_serdes_dfe_tap_cfg,
1726                        sizeof(priv->serdes_dfe_tap_cfg));
1727         }
1728
1729         if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
1730                 ret = device_property_read_u32_array(phy_dev,
1731                                                      XGBE_PHY_DFE_ENA_PROPERTY,
1732                                                      priv->serdes_dfe_tap_ena,
1733                                                      XGBE_PHY_SPEEDS);
1734                 if (ret) {
1735                         dev_err(dev, "invalid %s property\n",
1736                                 XGBE_PHY_DFE_ENA_PROPERTY);
1737                         goto err_sir1;
1738                 }
1739         } else {
1740                 memcpy(priv->serdes_dfe_tap_ena,
1741                        amd_xgbe_phy_serdes_dfe_tap_ena,
1742                        sizeof(priv->serdes_dfe_tap_ena));
1743         }
1744
1745         /* Initialize supported features */
1746         phydev->supported = SUPPORTED_Autoneg;
1747         phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1748         phydev->supported |= SUPPORTED_Backplane;
1749         phydev->supported |= SUPPORTED_10000baseKR_Full;
1750         switch (priv->speed_set) {
1751         case AMD_XGBE_PHY_SPEEDSET_1000_10000:
1752                 phydev->supported |= SUPPORTED_1000baseKX_Full;
1753                 break;
1754         case AMD_XGBE_PHY_SPEEDSET_2500_10000:
1755                 phydev->supported |= SUPPORTED_2500baseX_Full;
1756                 break;
1757         }
1758
1759         ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
1760         if (ret < 0)
1761                 return ret;
1762         priv->fec_ability = ret & XGBE_PHY_FEC_MASK;
1763         if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
1764                 phydev->supported |= SUPPORTED_10000baseR_FEC;
1765
1766         phydev->advertising = phydev->supported;
1767
1768         phydev->priv = priv;
1769
1770         if (!priv->adev || acpi_disabled)
1771                 platform_device_put(phy_pdev);
1772
1773         return 0;
1774
1775 err_sir1:
1776         devm_iounmap(dev, priv->sir1_regs);
1777         devm_release_mem_region(dev, priv->sir1_res->start,
1778                                 resource_size(priv->sir1_res));
1779
1780 err_sir0:
1781         devm_iounmap(dev, priv->sir0_regs);
1782         devm_release_mem_region(dev, priv->sir0_res->start,
1783                                 resource_size(priv->sir0_res));
1784
1785 err_rxtx:
1786         devm_iounmap(dev, priv->rxtx_regs);
1787         devm_release_mem_region(dev, priv->rxtx_res->start,
1788                                 resource_size(priv->rxtx_res));
1789
1790 err_put:
1791         if (!priv->adev || acpi_disabled)
1792                 platform_device_put(phy_pdev);
1793
1794 err_priv:
1795         devm_kfree(dev, priv);
1796
1797         return ret;
1798 }
1799
1800 static void amd_xgbe_phy_remove(struct phy_device *phydev)
1801 {
1802         struct amd_xgbe_phy_priv *priv = phydev->priv;
1803         struct device *dev = priv->dev;
1804
1805         if (priv->an_irq_allocated) {
1806                 devm_free_irq(dev, priv->an_irq, priv);
1807
1808                 flush_workqueue(priv->an_workqueue);
1809                 destroy_workqueue(priv->an_workqueue);
1810         }
1811
1812         /* Release resources */
1813         devm_iounmap(dev, priv->sir1_regs);
1814         devm_release_mem_region(dev, priv->sir1_res->start,
1815                                 resource_size(priv->sir1_res));
1816
1817         devm_iounmap(dev, priv->sir0_regs);
1818         devm_release_mem_region(dev, priv->sir0_res->start,
1819                                 resource_size(priv->sir0_res));
1820
1821         devm_iounmap(dev, priv->rxtx_regs);
1822         devm_release_mem_region(dev, priv->rxtx_res->start,
1823                                 resource_size(priv->rxtx_res));
1824
1825         devm_kfree(dev, priv);
1826 }
1827
1828 static int amd_xgbe_match_phy_device(struct phy_device *phydev)
1829 {
1830         return phydev->c45_ids.device_ids[MDIO_MMD_PCS] == XGBE_PHY_ID;
1831 }
1832
1833 static struct phy_driver amd_xgbe_phy_driver[] = {
1834         {
1835                 .phy_id                 = XGBE_PHY_ID,
1836                 .phy_id_mask            = XGBE_PHY_MASK,
1837                 .name                   = "AMD XGBE PHY",
1838                 .features               = 0,
1839                 .flags                  = PHY_IS_INTERNAL,
1840                 .probe                  = amd_xgbe_phy_probe,
1841                 .remove                 = amd_xgbe_phy_remove,
1842                 .soft_reset             = amd_xgbe_phy_soft_reset,
1843                 .config_init            = amd_xgbe_phy_config_init,
1844                 .suspend                = amd_xgbe_phy_suspend,
1845                 .resume                 = amd_xgbe_phy_resume,
1846                 .config_aneg            = amd_xgbe_phy_config_aneg,
1847                 .aneg_done              = amd_xgbe_phy_aneg_done,
1848                 .read_status            = amd_xgbe_phy_read_status,
1849                 .match_phy_device       = amd_xgbe_match_phy_device,
1850                 .driver                 = {
1851                         .owner = THIS_MODULE,
1852                 },
1853         },
1854 };
1855
1856 module_phy_driver(amd_xgbe_phy_driver);
1857
1858 static struct mdio_device_id __maybe_unused amd_xgbe_phy_ids[] = {
1859         { XGBE_PHY_ID, XGBE_PHY_MASK },
1860         { }
1861 };
1862 MODULE_DEVICE_TABLE(mdio, amd_xgbe_phy_ids);