Merge remote-tracking branch 'lsk/v3.10/topic/gator' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / broadcom / bgmac.c
1 /*
2  * Driver for (BCM4706)? GBit MAC core on BCMA bus.
3  *
4  * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
5  *
6  * Licensed under the GNU/GPL. See COPYING for details.
7  */
8
9 #include "bgmac.h"
10
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/delay.h>
14 #include <linux/etherdevice.h>
15 #include <linux/mii.h>
16 #include <linux/phy.h>
17 #include <linux/interrupt.h>
18 #include <linux/dma-mapping.h>
19 #include <bcm47xx_nvram.h>
20
21 static const struct bcma_device_id bgmac_bcma_tbl[] = {
22         BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
23         BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
24         BCMA_CORETABLE_END
25 };
26 MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
27
28 static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
29                              u32 value, int timeout)
30 {
31         u32 val;
32         int i;
33
34         for (i = 0; i < timeout / 10; i++) {
35                 val = bcma_read32(core, reg);
36                 if ((val & mask) == value)
37                         return true;
38                 udelay(10);
39         }
40         pr_err("Timeout waiting for reg 0x%X\n", reg);
41         return false;
42 }
43
44 /**************************************************
45  * DMA
46  **************************************************/
47
48 static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
49 {
50         u32 val;
51         int i;
52
53         if (!ring->mmio_base)
54                 return;
55
56         /* Suspend DMA TX ring first.
57          * bgmac_wait_value doesn't support waiting for any of few values, so
58          * implement whole loop here.
59          */
60         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
61                     BGMAC_DMA_TX_SUSPEND);
62         for (i = 0; i < 10000 / 10; i++) {
63                 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
64                 val &= BGMAC_DMA_TX_STAT;
65                 if (val == BGMAC_DMA_TX_STAT_DISABLED ||
66                     val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
67                     val == BGMAC_DMA_TX_STAT_STOPPED) {
68                         i = 0;
69                         break;
70                 }
71                 udelay(10);
72         }
73         if (i)
74                 bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
75                           ring->mmio_base, val);
76
77         /* Remove SUSPEND bit */
78         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
79         if (!bgmac_wait_value(bgmac->core,
80                               ring->mmio_base + BGMAC_DMA_TX_STATUS,
81                               BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
82                               10000)) {
83                 bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
84                            ring->mmio_base);
85                 udelay(300);
86                 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
87                 if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
88                         bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n",
89                                   ring->mmio_base);
90         }
91 }
92
93 static void bgmac_dma_tx_enable(struct bgmac *bgmac,
94                                 struct bgmac_dma_ring *ring)
95 {
96         u32 ctl;
97
98         ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
99         ctl |= BGMAC_DMA_TX_ENABLE;
100         ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
101         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
102 }
103
104 static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
105                                     struct bgmac_dma_ring *ring,
106                                     struct sk_buff *skb)
107 {
108         struct device *dma_dev = bgmac->core->dma_dev;
109         struct net_device *net_dev = bgmac->net_dev;
110         struct bgmac_dma_desc *dma_desc;
111         struct bgmac_slot_info *slot;
112         u32 ctl0, ctl1;
113         int free_slots;
114
115         if (skb->len > BGMAC_DESC_CTL1_LEN) {
116                 bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
117                 goto err_stop_drop;
118         }
119
120         if (ring->start <= ring->end)
121                 free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS;
122         else
123                 free_slots = ring->start - ring->end;
124         if (free_slots == 1) {
125                 bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
126                 netif_stop_queue(net_dev);
127                 return NETDEV_TX_BUSY;
128         }
129
130         slot = &ring->slots[ring->end];
131         slot->skb = skb;
132         slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len,
133                                         DMA_TO_DEVICE);
134         if (dma_mapping_error(dma_dev, slot->dma_addr)) {
135                 bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
136                           ring->mmio_base);
137                 goto err_stop_drop;
138         }
139
140         ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF;
141         if (ring->end == ring->num_slots - 1)
142                 ctl0 |= BGMAC_DESC_CTL0_EOT;
143         ctl1 = skb->len & BGMAC_DESC_CTL1_LEN;
144
145         dma_desc = ring->cpu_base;
146         dma_desc += ring->end;
147         dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
148         dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
149         dma_desc->ctl0 = cpu_to_le32(ctl0);
150         dma_desc->ctl1 = cpu_to_le32(ctl1);
151
152         wmb();
153
154         /* Increase ring->end to point empty slot. We tell hardware the first
155          * slot it should *not* read.
156          */
157         if (++ring->end >= BGMAC_TX_RING_SLOTS)
158                 ring->end = 0;
159         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
160                     ring->end * sizeof(struct bgmac_dma_desc));
161
162         /* Always keep one slot free to allow detecting bugged calls. */
163         if (--free_slots == 1)
164                 netif_stop_queue(net_dev);
165
166         return NETDEV_TX_OK;
167
168 err_stop_drop:
169         netif_stop_queue(net_dev);
170         dev_kfree_skb(skb);
171         return NETDEV_TX_OK;
172 }
173
174 /* Free transmitted packets */
175 static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
176 {
177         struct device *dma_dev = bgmac->core->dma_dev;
178         int empty_slot;
179         bool freed = false;
180
181         /* The last slot that hardware didn't consume yet */
182         empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
183         empty_slot &= BGMAC_DMA_TX_STATDPTR;
184         empty_slot /= sizeof(struct bgmac_dma_desc);
185
186         while (ring->start != empty_slot) {
187                 struct bgmac_slot_info *slot = &ring->slots[ring->start];
188
189                 if (slot->skb) {
190                         /* Unmap no longer used buffer */
191                         dma_unmap_single(dma_dev, slot->dma_addr,
192                                          slot->skb->len, DMA_TO_DEVICE);
193                         slot->dma_addr = 0;
194
195                         /* Free memory! :) */
196                         dev_kfree_skb(slot->skb);
197                         slot->skb = NULL;
198                 } else {
199                         bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n",
200                                   ring->start, ring->end);
201                 }
202
203                 if (++ring->start >= BGMAC_TX_RING_SLOTS)
204                         ring->start = 0;
205                 freed = true;
206         }
207
208         if (freed && netif_queue_stopped(bgmac->net_dev))
209                 netif_wake_queue(bgmac->net_dev);
210 }
211
212 static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
213 {
214         if (!ring->mmio_base)
215                 return;
216
217         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
218         if (!bgmac_wait_value(bgmac->core,
219                               ring->mmio_base + BGMAC_DMA_RX_STATUS,
220                               BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
221                               10000))
222                 bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n",
223                           ring->mmio_base);
224 }
225
226 static void bgmac_dma_rx_enable(struct bgmac *bgmac,
227                                 struct bgmac_dma_ring *ring)
228 {
229         u32 ctl;
230
231         ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
232         ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
233         ctl |= BGMAC_DMA_RX_ENABLE;
234         ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
235         ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
236         ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
237         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
238 }
239
240 static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
241                                      struct bgmac_slot_info *slot)
242 {
243         struct device *dma_dev = bgmac->core->dma_dev;
244         struct bgmac_rx_header *rx;
245
246         /* Alloc skb */
247         slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
248         if (!slot->skb)
249                 return -ENOMEM;
250
251         /* Poison - if everything goes fine, hardware will overwrite it */
252         rx = (struct bgmac_rx_header *)slot->skb->data;
253         rx->len = cpu_to_le16(0xdead);
254         rx->flags = cpu_to_le16(0xbeef);
255
256         /* Map skb for the DMA */
257         slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
258                                         BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
259         if (dma_mapping_error(dma_dev, slot->dma_addr)) {
260                 bgmac_err(bgmac, "DMA mapping error\n");
261                 return -ENOMEM;
262         }
263         if (slot->dma_addr & 0xC0000000)
264                 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
265
266         return 0;
267 }
268
269 static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
270                              int weight)
271 {
272         u32 end_slot;
273         int handled = 0;
274
275         end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
276         end_slot &= BGMAC_DMA_RX_STATDPTR;
277         end_slot /= sizeof(struct bgmac_dma_desc);
278
279         ring->end = end_slot;
280
281         while (ring->start != ring->end) {
282                 struct device *dma_dev = bgmac->core->dma_dev;
283                 struct bgmac_slot_info *slot = &ring->slots[ring->start];
284                 struct sk_buff *skb = slot->skb;
285                 struct sk_buff *new_skb;
286                 struct bgmac_rx_header *rx;
287                 u16 len, flags;
288
289                 /* Unmap buffer to make it accessible to the CPU */
290                 dma_sync_single_for_cpu(dma_dev, slot->dma_addr,
291                                         BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
292
293                 /* Get info from the header */
294                 rx = (struct bgmac_rx_header *)skb->data;
295                 len = le16_to_cpu(rx->len);
296                 flags = le16_to_cpu(rx->flags);
297
298                 /* Check for poison and drop or pass the packet */
299                 if (len == 0xdead && flags == 0xbeef) {
300                         bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
301                                   ring->start);
302                 } else {
303                         /* Omit CRC. */
304                         len -= ETH_FCS_LEN;
305
306                         new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
307                         if (new_skb) {
308                                 skb_put(new_skb, len);
309                                 skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
310                                                                  new_skb->data,
311                                                                  len);
312                                 skb_checksum_none_assert(skb);
313                                 new_skb->protocol =
314                                         eth_type_trans(new_skb, bgmac->net_dev);
315                                 netif_receive_skb(new_skb);
316                                 handled++;
317                         } else {
318                                 bgmac->net_dev->stats.rx_dropped++;
319                                 bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
320                         }
321
322                         /* Poison the old skb */
323                         rx->len = cpu_to_le16(0xdead);
324                         rx->flags = cpu_to_le16(0xbeef);
325                 }
326
327                 /* Make it back accessible to the hardware */
328                 dma_sync_single_for_device(dma_dev, slot->dma_addr,
329                                            BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
330
331                 if (++ring->start >= BGMAC_RX_RING_SLOTS)
332                         ring->start = 0;
333
334                 if (handled >= weight) /* Should never be greater */
335                         break;
336         }
337
338         return handled;
339 }
340
341 /* Does ring support unaligned addressing? */
342 static bool bgmac_dma_unaligned(struct bgmac *bgmac,
343                                 struct bgmac_dma_ring *ring,
344                                 enum bgmac_dma_ring_type ring_type)
345 {
346         switch (ring_type) {
347         case BGMAC_DMA_RING_TX:
348                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
349                             0xff0);
350                 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
351                         return true;
352                 break;
353         case BGMAC_DMA_RING_RX:
354                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
355                             0xff0);
356                 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
357                         return true;
358                 break;
359         }
360         return false;
361 }
362
363 static void bgmac_dma_ring_free(struct bgmac *bgmac,
364                                 struct bgmac_dma_ring *ring)
365 {
366         struct device *dma_dev = bgmac->core->dma_dev;
367         struct bgmac_slot_info *slot;
368         int size;
369         int i;
370
371         for (i = 0; i < ring->num_slots; i++) {
372                 slot = &ring->slots[i];
373                 if (slot->skb) {
374                         if (slot->dma_addr)
375                                 dma_unmap_single(dma_dev, slot->dma_addr,
376                                                  slot->skb->len, DMA_TO_DEVICE);
377                         dev_kfree_skb(slot->skb);
378                 }
379         }
380
381         if (ring->cpu_base) {
382                 /* Free ring of descriptors */
383                 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
384                 dma_free_coherent(dma_dev, size, ring->cpu_base,
385                                   ring->dma_base);
386         }
387 }
388
389 static void bgmac_dma_free(struct bgmac *bgmac)
390 {
391         int i;
392
393         for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
394                 bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]);
395         for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
396                 bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]);
397 }
398
399 static int bgmac_dma_alloc(struct bgmac *bgmac)
400 {
401         struct device *dma_dev = bgmac->core->dma_dev;
402         struct bgmac_dma_ring *ring;
403         static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
404                                          BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
405         int size; /* ring size: different for Tx and Rx */
406         int err;
407         int i;
408
409         BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
410         BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
411
412         if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) {
413                 bgmac_err(bgmac, "Core does not report 64-bit DMA\n");
414                 return -ENOTSUPP;
415         }
416
417         for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
418                 ring = &bgmac->tx_ring[i];
419                 ring->num_slots = BGMAC_TX_RING_SLOTS;
420                 ring->mmio_base = ring_base[i];
421                 if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
422                         bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
423                                    ring->mmio_base);
424
425                 /* Alloc ring of descriptors */
426                 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
427                 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
428                                                      &ring->dma_base,
429                                                      GFP_KERNEL);
430                 if (!ring->cpu_base) {
431                         bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n",
432                                   ring->mmio_base);
433                         goto err_dma_free;
434                 }
435                 if (ring->dma_base & 0xC0000000)
436                         bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
437
438                 /* No need to alloc TX slots yet */
439         }
440
441         for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
442                 int j;
443
444                 ring = &bgmac->rx_ring[i];
445                 ring->num_slots = BGMAC_RX_RING_SLOTS;
446                 ring->mmio_base = ring_base[i];
447                 if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
448                         bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
449                                    ring->mmio_base);
450
451                 /* Alloc ring of descriptors */
452                 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
453                 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
454                                                      &ring->dma_base,
455                                                      GFP_KERNEL);
456                 if (!ring->cpu_base) {
457                         bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n",
458                                   ring->mmio_base);
459                         err = -ENOMEM;
460                         goto err_dma_free;
461                 }
462                 if (ring->dma_base & 0xC0000000)
463                         bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
464
465                 /* Alloc RX slots */
466                 for (j = 0; j < ring->num_slots; j++) {
467                         err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
468                         if (err) {
469                                 bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n");
470                                 goto err_dma_free;
471                         }
472                 }
473         }
474
475         return 0;
476
477 err_dma_free:
478         bgmac_dma_free(bgmac);
479         return -ENOMEM;
480 }
481
482 static void bgmac_dma_init(struct bgmac *bgmac)
483 {
484         struct bgmac_dma_ring *ring;
485         struct bgmac_dma_desc *dma_desc;
486         u32 ctl0, ctl1;
487         int i;
488
489         for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
490                 ring = &bgmac->tx_ring[i];
491
492                 /* We don't implement unaligned addressing, so enable first */
493                 bgmac_dma_tx_enable(bgmac, ring);
494                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
495                             lower_32_bits(ring->dma_base));
496                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
497                             upper_32_bits(ring->dma_base));
498
499                 ring->start = 0;
500                 ring->end = 0;  /* Points the slot that should *not* be read */
501         }
502
503         for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
504                 int j;
505
506                 ring = &bgmac->rx_ring[i];
507
508                 /* We don't implement unaligned addressing, so enable first */
509                 bgmac_dma_rx_enable(bgmac, ring);
510                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
511                             lower_32_bits(ring->dma_base));
512                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
513                             upper_32_bits(ring->dma_base));
514
515                 for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
516                      j++, dma_desc++) {
517                         ctl0 = ctl1 = 0;
518
519                         if (j == ring->num_slots - 1)
520                                 ctl0 |= BGMAC_DESC_CTL0_EOT;
521                         ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
522                         /* Is there any BGMAC device that requires extension? */
523                         /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
524                          * B43_DMA64_DCTL1_ADDREXT_MASK;
525                          */
526
527                         dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr));
528                         dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr));
529                         dma_desc->ctl0 = cpu_to_le32(ctl0);
530                         dma_desc->ctl1 = cpu_to_le32(ctl1);
531                 }
532
533                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
534                             ring->num_slots * sizeof(struct bgmac_dma_desc));
535
536                 ring->start = 0;
537                 ring->end = 0;
538         }
539 }
540
541 /**************************************************
542  * PHY ops
543  **************************************************/
544
545 static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
546 {
547         struct bcma_device *core;
548         u16 phy_access_addr;
549         u16 phy_ctl_addr;
550         u32 tmp;
551
552         BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
553         BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
554         BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
555         BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
556         BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
557         BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
558         BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
559         BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
560         BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
561         BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
562         BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
563
564         if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
565                 core = bgmac->core->bus->drv_gmac_cmn.core;
566                 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
567                 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
568         } else {
569                 core = bgmac->core;
570                 phy_access_addr = BGMAC_PHY_ACCESS;
571                 phy_ctl_addr = BGMAC_PHY_CNTL;
572         }
573
574         tmp = bcma_read32(core, phy_ctl_addr);
575         tmp &= ~BGMAC_PC_EPA_MASK;
576         tmp |= phyaddr;
577         bcma_write32(core, phy_ctl_addr, tmp);
578
579         tmp = BGMAC_PA_START;
580         tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
581         tmp |= reg << BGMAC_PA_REG_SHIFT;
582         bcma_write32(core, phy_access_addr, tmp);
583
584         if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
585                 bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n",
586                           phyaddr, reg);
587                 return 0xffff;
588         }
589
590         return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
591 }
592
593 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
594 static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
595 {
596         struct bcma_device *core;
597         u16 phy_access_addr;
598         u16 phy_ctl_addr;
599         u32 tmp;
600
601         if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
602                 core = bgmac->core->bus->drv_gmac_cmn.core;
603                 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
604                 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
605         } else {
606                 core = bgmac->core;
607                 phy_access_addr = BGMAC_PHY_ACCESS;
608                 phy_ctl_addr = BGMAC_PHY_CNTL;
609         }
610
611         tmp = bcma_read32(core, phy_ctl_addr);
612         tmp &= ~BGMAC_PC_EPA_MASK;
613         tmp |= phyaddr;
614         bcma_write32(core, phy_ctl_addr, tmp);
615
616         bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
617         if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
618                 bgmac_warn(bgmac, "Error setting MDIO int\n");
619
620         tmp = BGMAC_PA_START;
621         tmp |= BGMAC_PA_WRITE;
622         tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
623         tmp |= reg << BGMAC_PA_REG_SHIFT;
624         tmp |= value;
625         bcma_write32(core, phy_access_addr, tmp);
626
627         if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
628                 bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
629                           phyaddr, reg);
630                 return -ETIMEDOUT;
631         }
632
633         return 0;
634 }
635
636 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */
637 static void bgmac_phy_force(struct bgmac *bgmac)
638 {
639         u16 ctl;
640         u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB |
641                      BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX);
642
643         if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
644                 return;
645
646         if (bgmac->autoneg)
647                 return;
648
649         ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL);
650         ctl &= mask;
651         if (bgmac->full_duplex)
652                 ctl |= BGMAC_PHY_CTL_DUPLEX;
653         if (bgmac->speed == BGMAC_SPEED_100)
654                 ctl |= BGMAC_PHY_CTL_SPEED_100;
655         else if (bgmac->speed == BGMAC_SPEED_1000)
656                 ctl |= BGMAC_PHY_CTL_SPEED_1000;
657         bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl);
658 }
659
660 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */
661 static void bgmac_phy_advertise(struct bgmac *bgmac)
662 {
663         u16 adv;
664
665         if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
666                 return;
667
668         if (!bgmac->autoneg)
669                 return;
670
671         /* Adv selected 10/100 speeds */
672         adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV);
673         adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL |
674                  BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL);
675         if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
676                 adv |= BGMAC_PHY_ADV_10HALF;
677         if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
678                 adv |= BGMAC_PHY_ADV_100HALF;
679         if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
680                 adv |= BGMAC_PHY_ADV_10FULL;
681         if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
682                 adv |= BGMAC_PHY_ADV_100FULL;
683         bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv);
684
685         /* Adv selected 1000 speeds */
686         adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2);
687         adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL);
688         if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
689                 adv |= BGMAC_PHY_ADV2_1000HALF;
690         if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
691                 adv |= BGMAC_PHY_ADV2_1000FULL;
692         bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv);
693
694         /* Restart */
695         bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
696                         bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) |
697                         BGMAC_PHY_CTL_RESTART);
698 }
699
700 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
701 static void bgmac_phy_init(struct bgmac *bgmac)
702 {
703         struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
704         struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
705         u8 i;
706
707         if (ci->id == BCMA_CHIP_ID_BCM5356) {
708                 for (i = 0; i < 5; i++) {
709                         bgmac_phy_write(bgmac, i, 0x1f, 0x008b);
710                         bgmac_phy_write(bgmac, i, 0x15, 0x0100);
711                         bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
712                         bgmac_phy_write(bgmac, i, 0x12, 0x2aaa);
713                         bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
714                 }
715         }
716         if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
717             (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
718             (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
719                 bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
720                 bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
721                 for (i = 0; i < 5; i++) {
722                         bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
723                         bgmac_phy_write(bgmac, i, 0x16, 0x5284);
724                         bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
725                         bgmac_phy_write(bgmac, i, 0x17, 0x0010);
726                         bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
727                         bgmac_phy_write(bgmac, i, 0x16, 0x5296);
728                         bgmac_phy_write(bgmac, i, 0x17, 0x1073);
729                         bgmac_phy_write(bgmac, i, 0x17, 0x9073);
730                         bgmac_phy_write(bgmac, i, 0x16, 0x52b6);
731                         bgmac_phy_write(bgmac, i, 0x17, 0x9273);
732                         bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
733                 }
734         }
735 }
736
737 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
738 static void bgmac_phy_reset(struct bgmac *bgmac)
739 {
740         if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
741                 return;
742
743         bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
744                         BGMAC_PHY_CTL_RESET);
745         udelay(100);
746         if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) &
747             BGMAC_PHY_CTL_RESET)
748                 bgmac_err(bgmac, "PHY reset failed\n");
749         bgmac_phy_init(bgmac);
750 }
751
752 /**************************************************
753  * Chip ops
754  **************************************************/
755
756 /* TODO: can we just drop @force? Can we don't reset MAC at all if there is
757  * nothing to change? Try if after stabilizng driver.
758  */
759 static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
760                                  bool force)
761 {
762         u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
763         u32 new_val = (cmdcfg & mask) | set;
764
765         bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR);
766         udelay(2);
767
768         if (new_val != cmdcfg || force)
769                 bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
770
771         bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR);
772         udelay(2);
773 }
774
775 static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
776 {
777         u32 tmp;
778
779         tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
780         bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
781         tmp = (addr[4] << 8) | addr[5];
782         bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
783 }
784
785 static void bgmac_set_rx_mode(struct net_device *net_dev)
786 {
787         struct bgmac *bgmac = netdev_priv(net_dev);
788
789         if (net_dev->flags & IFF_PROMISC)
790                 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
791         else
792                 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
793 }
794
795 #if 0 /* We don't use that regs yet */
796 static void bgmac_chip_stats_update(struct bgmac *bgmac)
797 {
798         int i;
799
800         if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) {
801                 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
802                         bgmac->mib_tx_regs[i] =
803                                 bgmac_read(bgmac,
804                                            BGMAC_TX_GOOD_OCTETS + (i * 4));
805                 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
806                         bgmac->mib_rx_regs[i] =
807                                 bgmac_read(bgmac,
808                                            BGMAC_RX_GOOD_OCTETS + (i * 4));
809         }
810
811         /* TODO: what else? how to handle BCM4706? Specs are needed */
812 }
813 #endif
814
815 static void bgmac_clear_mib(struct bgmac *bgmac)
816 {
817         int i;
818
819         if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT)
820                 return;
821
822         bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
823         for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
824                 bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
825         for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
826                 bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
827 }
828
829 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
830 static void bgmac_speed(struct bgmac *bgmac, int speed)
831 {
832         u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
833         u32 set = 0;
834
835         if (speed & BGMAC_SPEED_10)
836                 set |= BGMAC_CMDCFG_ES_10;
837         if (speed & BGMAC_SPEED_100)
838                 set |= BGMAC_CMDCFG_ES_100;
839         if (speed & BGMAC_SPEED_1000)
840                 set |= BGMAC_CMDCFG_ES_1000;
841         if (!bgmac->full_duplex)
842                 set |= BGMAC_CMDCFG_HD;
843         bgmac_cmdcfg_maskset(bgmac, mask, set, true);
844 }
845
846 static void bgmac_miiconfig(struct bgmac *bgmac)
847 {
848         u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
849                         BGMAC_DS_MM_SHIFT;
850         if (imode == 0 || imode == 1) {
851                 if (bgmac->autoneg)
852                         bgmac_speed(bgmac, BGMAC_SPEED_100);
853                 else
854                         bgmac_speed(bgmac, bgmac->speed);
855         }
856 }
857
858 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
859 static void bgmac_chip_reset(struct bgmac *bgmac)
860 {
861         struct bcma_device *core = bgmac->core;
862         struct bcma_bus *bus = core->bus;
863         struct bcma_chipinfo *ci = &bus->chipinfo;
864         u32 flags = 0;
865         u32 iost;
866         int i;
867
868         if (bcma_core_is_enabled(core)) {
869                 if (!bgmac->stats_grabbed) {
870                         /* bgmac_chip_stats_update(bgmac); */
871                         bgmac->stats_grabbed = true;
872                 }
873
874                 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
875                         bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
876
877                 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
878                 udelay(1);
879
880                 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
881                         bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
882
883                 /* TODO: Clear software multicast filter list */
884         }
885
886         iost = bcma_aread32(core, BCMA_IOST);
887         if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) ||
888             (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
889             (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9))
890                 iost &= ~BGMAC_BCMA_IOST_ATTACHED;
891
892         if (iost & BGMAC_BCMA_IOST_ATTACHED) {
893                 flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
894                 if (!bgmac->has_robosw)
895                         flags |= BGMAC_BCMA_IOCTL_SW_RESET;
896         }
897
898         bcma_core_enable(core, flags);
899
900         if (core->id.rev > 2) {
901                 bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8);
902                 bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24,
903                                  1000);
904         }
905
906         if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 ||
907             ci->id == BCMA_CHIP_ID_BCM53572) {
908                 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
909                 u8 et_swtype = 0;
910                 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
911                              BGMAC_CHIPCTL_1_IF_TYPE_MII;
912                 char buf[2];
913
914                 if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) {
915                         if (kstrtou8(buf, 0, &et_swtype))
916                                 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
917                                           buf);
918                         et_swtype &= 0x0f;
919                         et_swtype <<= 4;
920                         sw_type = et_swtype;
921                 } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) {
922                         sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
923                 } else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) ||
924                            (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) {
925                         sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
926                                   BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
927                 }
928                 bcma_chipco_chipctl_maskset(cc, 1,
929                                             ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
930                                               BGMAC_CHIPCTL_1_SW_TYPE_MASK),
931                                             sw_type);
932         }
933
934         if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
935                 bcma_awrite32(core, BCMA_IOCTL,
936                               bcma_aread32(core, BCMA_IOCTL) &
937                               ~BGMAC_BCMA_IOCTL_SW_RESET);
938
939         /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
940          * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
941          * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
942          * be keps until taking MAC out of the reset.
943          */
944         bgmac_cmdcfg_maskset(bgmac,
945                              ~(BGMAC_CMDCFG_TE |
946                                BGMAC_CMDCFG_RE |
947                                BGMAC_CMDCFG_RPI |
948                                BGMAC_CMDCFG_TAI |
949                                BGMAC_CMDCFG_HD |
950                                BGMAC_CMDCFG_ML |
951                                BGMAC_CMDCFG_CFE |
952                                BGMAC_CMDCFG_RL |
953                                BGMAC_CMDCFG_RED |
954                                BGMAC_CMDCFG_PE |
955                                BGMAC_CMDCFG_TPI |
956                                BGMAC_CMDCFG_PAD_EN |
957                                BGMAC_CMDCFG_PF),
958                              BGMAC_CMDCFG_PROM |
959                              BGMAC_CMDCFG_NLC |
960                              BGMAC_CMDCFG_CFE |
961                              BGMAC_CMDCFG_SR,
962                              false);
963
964         bgmac_clear_mib(bgmac);
965         if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
966                 bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0,
967                                BCMA_GMAC_CMN_PC_MTE);
968         else
969                 bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
970         bgmac_miiconfig(bgmac);
971         bgmac_phy_init(bgmac);
972
973         bgmac->int_status = 0;
974 }
975
976 static void bgmac_chip_intrs_on(struct bgmac *bgmac)
977 {
978         bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
979 }
980
981 static void bgmac_chip_intrs_off(struct bgmac *bgmac)
982 {
983         bgmac_write(bgmac, BGMAC_INT_MASK, 0);
984         bgmac_read(bgmac, BGMAC_INT_MASK);
985 }
986
987 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
988 static void bgmac_enable(struct bgmac *bgmac)
989 {
990         struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
991         u32 cmdcfg;
992         u32 mode;
993         u32 rxq_ctl;
994         u32 fl_ctl;
995         u16 bp_clk;
996         u8 mdp;
997
998         cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
999         bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
1000                              BGMAC_CMDCFG_SR, true);
1001         udelay(2);
1002         cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
1003         bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
1004
1005         mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
1006                 BGMAC_DS_MM_SHIFT;
1007         if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0)
1008                 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
1009         if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2)
1010                 bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0,
1011                                             BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
1012
1013         switch (ci->id) {
1014         case BCMA_CHIP_ID_BCM5357:
1015         case BCMA_CHIP_ID_BCM4749:
1016         case BCMA_CHIP_ID_BCM53572:
1017         case BCMA_CHIP_ID_BCM4716:
1018         case BCMA_CHIP_ID_BCM47162:
1019                 fl_ctl = 0x03cb04cb;
1020                 if (ci->id == BCMA_CHIP_ID_BCM5357 ||
1021                     ci->id == BCMA_CHIP_ID_BCM4749 ||
1022                     ci->id == BCMA_CHIP_ID_BCM53572)
1023                         fl_ctl = 0x2300e1;
1024                 bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
1025                 bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
1026                 break;
1027         }
1028
1029         rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
1030         rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
1031         bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000;
1032         mdp = (bp_clk * 128 / 1000) - 3;
1033         rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
1034         bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
1035 }
1036
1037 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
1038 static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
1039 {
1040         struct bgmac_dma_ring *ring;
1041         int i;
1042
1043         /* 1 interrupt per received frame */
1044         bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
1045
1046         /* Enable 802.3x tx flow control (honor received PAUSE frames) */
1047         bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
1048
1049         bgmac_set_rx_mode(bgmac->net_dev);
1050
1051         bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
1052
1053         if (bgmac->loopback)
1054                 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
1055         else
1056                 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
1057
1058         bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
1059
1060         if (!bgmac->autoneg) {
1061                 bgmac_speed(bgmac, bgmac->speed);
1062                 bgmac_phy_force(bgmac);
1063         } else if (bgmac->speed) { /* if there is anything to adv */
1064                 bgmac_phy_advertise(bgmac);
1065         }
1066
1067         if (full_init) {
1068                 bgmac_dma_init(bgmac);
1069                 if (1) /* FIXME: is there any case we don't want IRQs? */
1070                         bgmac_chip_intrs_on(bgmac);
1071         } else {
1072                 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
1073                         ring = &bgmac->rx_ring[i];
1074                         bgmac_dma_rx_enable(bgmac, ring);
1075                 }
1076         }
1077
1078         bgmac_enable(bgmac);
1079 }
1080
1081 static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
1082 {
1083         struct bgmac *bgmac = netdev_priv(dev_id);
1084
1085         u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
1086         int_status &= bgmac->int_mask;
1087
1088         if (!int_status)
1089                 return IRQ_NONE;
1090
1091         /* Ack */
1092         bgmac_write(bgmac, BGMAC_INT_STATUS, int_status);
1093
1094         /* Disable new interrupts until handling existing ones */
1095         bgmac_chip_intrs_off(bgmac);
1096
1097         bgmac->int_status = int_status;
1098
1099         napi_schedule(&bgmac->napi);
1100
1101         return IRQ_HANDLED;
1102 }
1103
1104 static int bgmac_poll(struct napi_struct *napi, int weight)
1105 {
1106         struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
1107         struct bgmac_dma_ring *ring;
1108         int handled = 0;
1109
1110         if (bgmac->int_status & BGMAC_IS_TX0) {
1111                 ring = &bgmac->tx_ring[0];
1112                 bgmac_dma_tx_free(bgmac, ring);
1113                 bgmac->int_status &= ~BGMAC_IS_TX0;
1114         }
1115
1116         if (bgmac->int_status & BGMAC_IS_RX) {
1117                 ring = &bgmac->rx_ring[0];
1118                 handled += bgmac_dma_rx_read(bgmac, ring, weight);
1119                 bgmac->int_status &= ~BGMAC_IS_RX;
1120         }
1121
1122         if (bgmac->int_status) {
1123                 bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", bgmac->int_status);
1124                 bgmac->int_status = 0;
1125         }
1126
1127         if (handled < weight)
1128                 napi_complete(napi);
1129
1130         bgmac_chip_intrs_on(bgmac);
1131
1132         return handled;
1133 }
1134
1135 /**************************************************
1136  * net_device_ops
1137  **************************************************/
1138
1139 static int bgmac_open(struct net_device *net_dev)
1140 {
1141         struct bgmac *bgmac = netdev_priv(net_dev);
1142         int err = 0;
1143
1144         bgmac_chip_reset(bgmac);
1145         /* Specs say about reclaiming rings here, but we do that in DMA init */
1146         bgmac_chip_init(bgmac, true);
1147
1148         err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
1149                           KBUILD_MODNAME, net_dev);
1150         if (err < 0) {
1151                 bgmac_err(bgmac, "IRQ request error: %d!\n", err);
1152                 goto err_out;
1153         }
1154         napi_enable(&bgmac->napi);
1155
1156         netif_carrier_on(net_dev);
1157
1158 err_out:
1159         return err;
1160 }
1161
1162 static int bgmac_stop(struct net_device *net_dev)
1163 {
1164         struct bgmac *bgmac = netdev_priv(net_dev);
1165
1166         netif_carrier_off(net_dev);
1167
1168         napi_disable(&bgmac->napi);
1169         bgmac_chip_intrs_off(bgmac);
1170         free_irq(bgmac->core->irq, net_dev);
1171
1172         bgmac_chip_reset(bgmac);
1173
1174         return 0;
1175 }
1176
1177 static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
1178                                     struct net_device *net_dev)
1179 {
1180         struct bgmac *bgmac = netdev_priv(net_dev);
1181         struct bgmac_dma_ring *ring;
1182
1183         /* No QOS support yet */
1184         ring = &bgmac->tx_ring[0];
1185         return bgmac_dma_tx_add(bgmac, ring, skb);
1186 }
1187
1188 static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
1189 {
1190         struct bgmac *bgmac = netdev_priv(net_dev);
1191         int ret;
1192
1193         ret = eth_prepare_mac_addr_change(net_dev, addr);
1194         if (ret < 0)
1195                 return ret;
1196         bgmac_write_mac_address(bgmac, (u8 *)addr);
1197         eth_commit_mac_addr_change(net_dev, addr);
1198         return 0;
1199 }
1200
1201 static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1202 {
1203         struct bgmac *bgmac = netdev_priv(net_dev);
1204         struct mii_ioctl_data *data = if_mii(ifr);
1205
1206         switch (cmd) {
1207         case SIOCGMIIPHY:
1208                 data->phy_id = bgmac->phyaddr;
1209                 /* fallthru */
1210         case SIOCGMIIREG:
1211                 if (!netif_running(net_dev))
1212                         return -EAGAIN;
1213                 data->val_out = bgmac_phy_read(bgmac, data->phy_id,
1214                                                data->reg_num & 0x1f);
1215                 return 0;
1216         case SIOCSMIIREG:
1217                 if (!netif_running(net_dev))
1218                         return -EAGAIN;
1219                 bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f,
1220                                 data->val_in);
1221                 return 0;
1222         default:
1223                 return -EOPNOTSUPP;
1224         }
1225 }
1226
1227 static const struct net_device_ops bgmac_netdev_ops = {
1228         .ndo_open               = bgmac_open,
1229         .ndo_stop               = bgmac_stop,
1230         .ndo_start_xmit         = bgmac_start_xmit,
1231         .ndo_set_rx_mode        = bgmac_set_rx_mode,
1232         .ndo_set_mac_address    = bgmac_set_mac_address,
1233         .ndo_validate_addr      = eth_validate_addr,
1234         .ndo_do_ioctl           = bgmac_ioctl,
1235 };
1236
1237 /**************************************************
1238  * ethtool_ops
1239  **************************************************/
1240
1241 static int bgmac_get_settings(struct net_device *net_dev,
1242                               struct ethtool_cmd *cmd)
1243 {
1244         struct bgmac *bgmac = netdev_priv(net_dev);
1245
1246         cmd->supported = SUPPORTED_10baseT_Half |
1247                          SUPPORTED_10baseT_Full |
1248                          SUPPORTED_100baseT_Half |
1249                          SUPPORTED_100baseT_Full |
1250                          SUPPORTED_1000baseT_Half |
1251                          SUPPORTED_1000baseT_Full |
1252                          SUPPORTED_Autoneg;
1253
1254         if (bgmac->autoneg) {
1255                 WARN_ON(cmd->advertising);
1256                 if (bgmac->full_duplex) {
1257                         if (bgmac->speed & BGMAC_SPEED_10)
1258                                 cmd->advertising |= ADVERTISED_10baseT_Full;
1259                         if (bgmac->speed & BGMAC_SPEED_100)
1260                                 cmd->advertising |= ADVERTISED_100baseT_Full;
1261                         if (bgmac->speed & BGMAC_SPEED_1000)
1262                                 cmd->advertising |= ADVERTISED_1000baseT_Full;
1263                 } else {
1264                         if (bgmac->speed & BGMAC_SPEED_10)
1265                                 cmd->advertising |= ADVERTISED_10baseT_Half;
1266                         if (bgmac->speed & BGMAC_SPEED_100)
1267                                 cmd->advertising |= ADVERTISED_100baseT_Half;
1268                         if (bgmac->speed & BGMAC_SPEED_1000)
1269                                 cmd->advertising |= ADVERTISED_1000baseT_Half;
1270                 }
1271         } else {
1272                 switch (bgmac->speed) {
1273                 case BGMAC_SPEED_10:
1274                         ethtool_cmd_speed_set(cmd, SPEED_10);
1275                         break;
1276                 case BGMAC_SPEED_100:
1277                         ethtool_cmd_speed_set(cmd, SPEED_100);
1278                         break;
1279                 case BGMAC_SPEED_1000:
1280                         ethtool_cmd_speed_set(cmd, SPEED_1000);
1281                         break;
1282                 }
1283         }
1284
1285         cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1286
1287         cmd->autoneg = bgmac->autoneg;
1288
1289         return 0;
1290 }
1291
1292 #if 0
1293 static int bgmac_set_settings(struct net_device *net_dev,
1294                               struct ethtool_cmd *cmd)
1295 {
1296         struct bgmac *bgmac = netdev_priv(net_dev);
1297
1298         return -1;
1299 }
1300 #endif
1301
1302 static void bgmac_get_drvinfo(struct net_device *net_dev,
1303                               struct ethtool_drvinfo *info)
1304 {
1305         strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1306         strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info));
1307 }
1308
1309 static const struct ethtool_ops bgmac_ethtool_ops = {
1310         .get_settings           = bgmac_get_settings,
1311         .get_drvinfo            = bgmac_get_drvinfo,
1312 };
1313
1314 /**************************************************
1315  * MII
1316  **************************************************/
1317
1318 static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
1319 {
1320         return bgmac_phy_read(bus->priv, mii_id, regnum);
1321 }
1322
1323 static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
1324                            u16 value)
1325 {
1326         return bgmac_phy_write(bus->priv, mii_id, regnum, value);
1327 }
1328
1329 static int bgmac_mii_register(struct bgmac *bgmac)
1330 {
1331         struct mii_bus *mii_bus;
1332         int i, err = 0;
1333
1334         mii_bus = mdiobus_alloc();
1335         if (!mii_bus)
1336                 return -ENOMEM;
1337
1338         mii_bus->name = "bgmac mii bus";
1339         sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
1340                 bgmac->core->core_unit);
1341         mii_bus->priv = bgmac;
1342         mii_bus->read = bgmac_mii_read;
1343         mii_bus->write = bgmac_mii_write;
1344         mii_bus->parent = &bgmac->core->dev;
1345         mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
1346
1347         mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
1348         if (!mii_bus->irq) {
1349                 err = -ENOMEM;
1350                 goto err_free_bus;
1351         }
1352         for (i = 0; i < PHY_MAX_ADDR; i++)
1353                 mii_bus->irq[i] = PHY_POLL;
1354
1355         err = mdiobus_register(mii_bus);
1356         if (err) {
1357                 bgmac_err(bgmac, "Registration of mii bus failed\n");
1358                 goto err_free_irq;
1359         }
1360
1361         bgmac->mii_bus = mii_bus;
1362
1363         return err;
1364
1365 err_free_irq:
1366         kfree(mii_bus->irq);
1367 err_free_bus:
1368         mdiobus_free(mii_bus);
1369         return err;
1370 }
1371
1372 static void bgmac_mii_unregister(struct bgmac *bgmac)
1373 {
1374         struct mii_bus *mii_bus = bgmac->mii_bus;
1375
1376         mdiobus_unregister(mii_bus);
1377         kfree(mii_bus->irq);
1378         mdiobus_free(mii_bus);
1379 }
1380
1381 /**************************************************
1382  * BCMA bus ops
1383  **************************************************/
1384
1385 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
1386 static int bgmac_probe(struct bcma_device *core)
1387 {
1388         struct net_device *net_dev;
1389         struct bgmac *bgmac;
1390         struct ssb_sprom *sprom = &core->bus->sprom;
1391         u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac;
1392         int err;
1393
1394         /* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */
1395         if (core->core_unit > 1) {
1396                 pr_err("Unsupported core_unit %d\n", core->core_unit);
1397                 return -ENOTSUPP;
1398         }
1399
1400         if (!is_valid_ether_addr(mac)) {
1401                 dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac);
1402                 eth_random_addr(mac);
1403                 dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
1404         }
1405
1406         /* Allocation and references */
1407         net_dev = alloc_etherdev(sizeof(*bgmac));
1408         if (!net_dev)
1409                 return -ENOMEM;
1410         net_dev->netdev_ops = &bgmac_netdev_ops;
1411         net_dev->irq = core->irq;
1412         SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops);
1413         bgmac = netdev_priv(net_dev);
1414         bgmac->net_dev = net_dev;
1415         bgmac->core = core;
1416         bcma_set_drvdata(core, bgmac);
1417
1418         /* Defaults */
1419         bgmac->autoneg = true;
1420         bgmac->full_duplex = true;
1421         bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000;
1422         memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
1423
1424         /* On BCM4706 we need common core to access PHY */
1425         if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
1426             !core->bus->drv_gmac_cmn.core) {
1427                 bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n");
1428                 err = -ENODEV;
1429                 goto err_netdev_free;
1430         }
1431         bgmac->cmn = core->bus->drv_gmac_cmn.core;
1432
1433         bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr :
1434                          sprom->et0phyaddr;
1435         bgmac->phyaddr &= BGMAC_PHY_MASK;
1436         if (bgmac->phyaddr == BGMAC_PHY_MASK) {
1437                 bgmac_err(bgmac, "No PHY found\n");
1438                 err = -ENODEV;
1439                 goto err_netdev_free;
1440         }
1441         bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr,
1442                    bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
1443
1444         if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
1445                 bgmac_err(bgmac, "PCI setup not implemented\n");
1446                 err = -ENOTSUPP;
1447                 goto err_netdev_free;
1448         }
1449
1450         bgmac_chip_reset(bgmac);
1451
1452         err = bgmac_dma_alloc(bgmac);
1453         if (err) {
1454                 bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
1455                 goto err_netdev_free;
1456         }
1457
1458         bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
1459         if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
1460                 bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
1461
1462         /* TODO: reset the external phy. Specs are needed */
1463         bgmac_phy_reset(bgmac);
1464
1465         bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
1466                                BGMAC_BFL_ENETROBO);
1467         if (bgmac->has_robosw)
1468                 bgmac_warn(bgmac, "Support for Roboswitch not implemented\n");
1469
1470         if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
1471                 bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
1472
1473         err = bgmac_mii_register(bgmac);
1474         if (err) {
1475                 bgmac_err(bgmac, "Cannot register MDIO\n");
1476                 err = -ENOTSUPP;
1477                 goto err_dma_free;
1478         }
1479
1480         err = register_netdev(bgmac->net_dev);
1481         if (err) {
1482                 bgmac_err(bgmac, "Cannot register net device\n");
1483                 err = -ENOTSUPP;
1484                 goto err_mii_unregister;
1485         }
1486
1487         netif_carrier_off(net_dev);
1488
1489         netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
1490
1491         return 0;
1492
1493 err_mii_unregister:
1494         bgmac_mii_unregister(bgmac);
1495 err_dma_free:
1496         bgmac_dma_free(bgmac);
1497
1498 err_netdev_free:
1499         bcma_set_drvdata(core, NULL);
1500         free_netdev(net_dev);
1501
1502         return err;
1503 }
1504
1505 static void bgmac_remove(struct bcma_device *core)
1506 {
1507         struct bgmac *bgmac = bcma_get_drvdata(core);
1508
1509         netif_napi_del(&bgmac->napi);
1510         unregister_netdev(bgmac->net_dev);
1511         bgmac_mii_unregister(bgmac);
1512         bgmac_dma_free(bgmac);
1513         bcma_set_drvdata(core, NULL);
1514         free_netdev(bgmac->net_dev);
1515 }
1516
1517 static struct bcma_driver bgmac_bcma_driver = {
1518         .name           = KBUILD_MODNAME,
1519         .id_table       = bgmac_bcma_tbl,
1520         .probe          = bgmac_probe,
1521         .remove         = bgmac_remove,
1522 };
1523
1524 static int __init bgmac_init(void)
1525 {
1526         int err;
1527
1528         err = bcma_driver_register(&bgmac_bcma_driver);
1529         if (err)
1530                 return err;
1531         pr_info("Broadcom 47xx GBit MAC driver loaded\n");
1532
1533         return 0;
1534 }
1535
1536 static void __exit bgmac_exit(void)
1537 {
1538         bcma_driver_unregister(&bgmac_bcma_driver);
1539 }
1540
1541 module_init(bgmac_init)
1542 module_exit(bgmac_exit)
1543
1544 MODULE_AUTHOR("Rafał Miłecki");
1545 MODULE_LICENSE("GPL");