ASoC: rt5640: Fill up the IN3's support
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / ti / netcp_core.c
1 /*
2  * Keystone NetCP Core driver
3  *
4  * Copyright (C) 2014 Texas Instruments Incorporated
5  * Authors:     Sandeep Nair <sandeep_n@ti.com>
6  *              Sandeep Paulraj <s-paulraj@ti.com>
7  *              Cyril Chemparathy <cyril@ti.com>
8  *              Santosh Shilimkar <santosh.shilimkar@ti.com>
9  *              Murali Karicheri <m-karicheri2@ti.com>
10  *              Wingman Kwok <w-kwok2@ti.com>
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License as
14  * published by the Free Software Foundation version 2.
15  *
16  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
17  * kind, whether express or implied; without even the implied warranty
18  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  */
21
22 #include <linux/io.h>
23 #include <linux/module.h>
24 #include <linux/of_net.h>
25 #include <linux/of_address.h>
26 #include <linux/if_vlan.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/platform_device.h>
29 #include <linux/soc/ti/knav_qmss.h>
30 #include <linux/soc/ti/knav_dma.h>
31
32 #include "netcp.h"
33
34 #define NETCP_SOP_OFFSET        (NET_IP_ALIGN + NET_SKB_PAD)
35 #define NETCP_NAPI_WEIGHT       64
36 #define NETCP_TX_TIMEOUT        (5 * HZ)
37 #define NETCP_PACKET_SIZE       (ETH_FRAME_LEN + ETH_FCS_LEN)
38 #define NETCP_MIN_PACKET_SIZE   ETH_ZLEN
39 #define NETCP_MAX_MCAST_ADDR    16
40
41 #define NETCP_EFUSE_REG_INDEX   0
42
43 #define NETCP_MOD_PROBE_SKIPPED 1
44 #define NETCP_MOD_PROBE_FAILED  2
45
46 #define NETCP_DEBUG (NETIF_MSG_HW       | NETIF_MSG_WOL         |       \
47                     NETIF_MSG_DRV       | NETIF_MSG_LINK        |       \
48                     NETIF_MSG_IFUP      | NETIF_MSG_INTR        |       \
49                     NETIF_MSG_PROBE     | NETIF_MSG_TIMER       |       \
50                     NETIF_MSG_IFDOWN    | NETIF_MSG_RX_ERR      |       \
51                     NETIF_MSG_TX_ERR    | NETIF_MSG_TX_DONE     |       \
52                     NETIF_MSG_PKTDATA   | NETIF_MSG_TX_QUEUED   |       \
53                     NETIF_MSG_RX_STATUS)
54
55 #define NETCP_EFUSE_ADDR_SWAP   2
56
57 #define knav_queue_get_id(q)    knav_queue_device_control(q, \
58                                 KNAV_QUEUE_GET_ID, (unsigned long)NULL)
59
60 #define knav_queue_enable_notify(q) knav_queue_device_control(q,        \
61                                         KNAV_QUEUE_ENABLE_NOTIFY,       \
62                                         (unsigned long)NULL)
63
64 #define knav_queue_disable_notify(q) knav_queue_device_control(q,       \
65                                         KNAV_QUEUE_DISABLE_NOTIFY,      \
66                                         (unsigned long)NULL)
67
68 #define knav_queue_get_count(q) knav_queue_device_control(q, \
69                                 KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
70
71 #define for_each_netcp_module(module)                   \
72         list_for_each_entry(module, &netcp_modules, module_list)
73
74 #define for_each_netcp_device_module(netcp_device, inst_modpriv) \
75         list_for_each_entry(inst_modpriv, \
76                 &((netcp_device)->modpriv_head), inst_list)
77
78 #define for_each_module(netcp, intf_modpriv)                    \
79         list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
80
81 /* Module management structures */
82 struct netcp_device {
83         struct list_head        device_list;
84         struct list_head        interface_head;
85         struct list_head        modpriv_head;
86         struct device           *device;
87 };
88
89 struct netcp_inst_modpriv {
90         struct netcp_device     *netcp_device;
91         struct netcp_module     *netcp_module;
92         struct list_head        inst_list;
93         void                    *module_priv;
94 };
95
96 struct netcp_intf_modpriv {
97         struct netcp_intf       *netcp_priv;
98         struct netcp_module     *netcp_module;
99         struct list_head        intf_list;
100         void                    *module_priv;
101 };
102
103 static LIST_HEAD(netcp_devices);
104 static LIST_HEAD(netcp_modules);
105 static DEFINE_MUTEX(netcp_modules_lock);
106
107 static int netcp_debug_level = -1;
108 module_param(netcp_debug_level, int, 0);
109 MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
110
111 /* Helper functions - Get/Set */
112 static void get_pkt_info(u32 *buff, u32 *buff_len, u32 *ndesc,
113                          struct knav_dma_desc *desc)
114 {
115         *buff_len = desc->buff_len;
116         *buff = desc->buff;
117         *ndesc = desc->next_desc;
118 }
119
120 static void get_pad_info(u32 *pad0, u32 *pad1, struct knav_dma_desc *desc)
121 {
122         *pad0 = desc->pad[0];
123         *pad1 = desc->pad[1];
124 }
125
126 static void get_org_pkt_info(u32 *buff, u32 *buff_len,
127                              struct knav_dma_desc *desc)
128 {
129         *buff = desc->orig_buff;
130         *buff_len = desc->orig_len;
131 }
132
133 static void get_words(u32 *words, int num_words, u32 *desc)
134 {
135         int i;
136
137         for (i = 0; i < num_words; i++)
138                 words[i] = desc[i];
139 }
140
141 static void set_pkt_info(u32 buff, u32 buff_len, u32 ndesc,
142                          struct knav_dma_desc *desc)
143 {
144         desc->buff_len = buff_len;
145         desc->buff = buff;
146         desc->next_desc = ndesc;
147 }
148
149 static void set_desc_info(u32 desc_info, u32 pkt_info,
150                           struct knav_dma_desc *desc)
151 {
152         desc->desc_info = desc_info;
153         desc->packet_info = pkt_info;
154 }
155
156 static void set_pad_info(u32 pad0, u32 pad1, struct knav_dma_desc *desc)
157 {
158         desc->pad[0] = pad0;
159         desc->pad[1] = pad1;
160 }
161
162 static void set_org_pkt_info(u32 buff, u32 buff_len,
163                              struct knav_dma_desc *desc)
164 {
165         desc->orig_buff = buff;
166         desc->orig_len = buff_len;
167 }
168
169 static void set_words(u32 *words, int num_words, u32 *desc)
170 {
171         int i;
172
173         for (i = 0; i < num_words; i++)
174                 desc[i] = words[i];
175 }
176
177 /* Read the e-fuse value as 32 bit values to be endian independent */
178 static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap)
179 {
180         unsigned int addr0, addr1;
181
182         addr1 = readl(efuse_mac + 4);
183         addr0 = readl(efuse_mac);
184
185         switch (swap) {
186         case NETCP_EFUSE_ADDR_SWAP:
187                 addr0 = addr1;
188                 addr1 = readl(efuse_mac);
189                 break;
190         default:
191                 break;
192         }
193
194         x[0] = (addr1 & 0x0000ff00) >> 8;
195         x[1] = addr1 & 0x000000ff;
196         x[2] = (addr0 & 0xff000000) >> 24;
197         x[3] = (addr0 & 0x00ff0000) >> 16;
198         x[4] = (addr0 & 0x0000ff00) >> 8;
199         x[5] = addr0 & 0x000000ff;
200
201         return 0;
202 }
203
204 static const char *netcp_node_name(struct device_node *node)
205 {
206         const char *name;
207
208         if (of_property_read_string(node, "label", &name) < 0)
209                 name = node->name;
210         if (!name)
211                 name = "unknown";
212         return name;
213 }
214
215 /* Module management routines */
216 static int netcp_register_interface(struct netcp_intf *netcp)
217 {
218         int ret;
219
220         ret = register_netdev(netcp->ndev);
221         if (!ret)
222                 netcp->netdev_registered = true;
223         return ret;
224 }
225
226 static int netcp_module_probe(struct netcp_device *netcp_device,
227                               struct netcp_module *module)
228 {
229         struct device *dev = netcp_device->device;
230         struct device_node *devices, *interface, *node = dev->of_node;
231         struct device_node *child;
232         struct netcp_inst_modpriv *inst_modpriv;
233         struct netcp_intf *netcp_intf;
234         struct netcp_module *tmp;
235         bool primary_module_registered = false;
236         int ret;
237
238         /* Find this module in the sub-tree for this device */
239         devices = of_get_child_by_name(node, "netcp-devices");
240         if (!devices) {
241                 dev_err(dev, "could not find netcp-devices node\n");
242                 return NETCP_MOD_PROBE_SKIPPED;
243         }
244
245         for_each_available_child_of_node(devices, child) {
246                 const char *name = netcp_node_name(child);
247
248                 if (!strcasecmp(module->name, name))
249                         break;
250         }
251
252         of_node_put(devices);
253         /* If module not used for this device, skip it */
254         if (!child) {
255                 dev_warn(dev, "module(%s) not used for device\n", module->name);
256                 return NETCP_MOD_PROBE_SKIPPED;
257         }
258
259         inst_modpriv = devm_kzalloc(dev, sizeof(*inst_modpriv), GFP_KERNEL);
260         if (!inst_modpriv) {
261                 of_node_put(child);
262                 return -ENOMEM;
263         }
264
265         inst_modpriv->netcp_device = netcp_device;
266         inst_modpriv->netcp_module = module;
267         list_add_tail(&inst_modpriv->inst_list, &netcp_device->modpriv_head);
268
269         ret = module->probe(netcp_device, dev, child,
270                             &inst_modpriv->module_priv);
271         of_node_put(child);
272         if (ret) {
273                 dev_err(dev, "Probe of module(%s) failed with %d\n",
274                         module->name, ret);
275                 list_del(&inst_modpriv->inst_list);
276                 devm_kfree(dev, inst_modpriv);
277                 return NETCP_MOD_PROBE_FAILED;
278         }
279
280         /* Attach modules only if the primary module is probed */
281         for_each_netcp_module(tmp) {
282                 if (tmp->primary)
283                         primary_module_registered = true;
284         }
285
286         if (!primary_module_registered)
287                 return 0;
288
289         /* Attach module to interfaces */
290         list_for_each_entry(netcp_intf, &netcp_device->interface_head,
291                             interface_list) {
292                 struct netcp_intf_modpriv *intf_modpriv;
293
294                 /* If interface not registered then register now */
295                 if (!netcp_intf->netdev_registered)
296                         ret = netcp_register_interface(netcp_intf);
297
298                 if (ret)
299                         return -ENODEV;
300
301                 intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
302                                             GFP_KERNEL);
303                 if (!intf_modpriv)
304                         return -ENOMEM;
305
306                 interface = of_parse_phandle(netcp_intf->node_interface,
307                                              module->name, 0);
308
309                 intf_modpriv->netcp_priv = netcp_intf;
310                 intf_modpriv->netcp_module = module;
311                 list_add_tail(&intf_modpriv->intf_list,
312                               &netcp_intf->module_head);
313
314                 ret = module->attach(inst_modpriv->module_priv,
315                                      netcp_intf->ndev, interface,
316                                      &intf_modpriv->module_priv);
317                 of_node_put(interface);
318                 if (ret) {
319                         dev_dbg(dev, "Attach of module %s declined with %d\n",
320                                 module->name, ret);
321                         list_del(&intf_modpriv->intf_list);
322                         devm_kfree(dev, intf_modpriv);
323                         continue;
324                 }
325         }
326         return 0;
327 }
328
329 int netcp_register_module(struct netcp_module *module)
330 {
331         struct netcp_device *netcp_device;
332         struct netcp_module *tmp;
333         int ret;
334
335         if (!module->name) {
336                 WARN(1, "error registering netcp module: no name\n");
337                 return -EINVAL;
338         }
339
340         if (!module->probe) {
341                 WARN(1, "error registering netcp module: no probe\n");
342                 return -EINVAL;
343         }
344
345         mutex_lock(&netcp_modules_lock);
346
347         for_each_netcp_module(tmp) {
348                 if (!strcasecmp(tmp->name, module->name)) {
349                         mutex_unlock(&netcp_modules_lock);
350                         return -EEXIST;
351                 }
352         }
353         list_add_tail(&module->module_list, &netcp_modules);
354
355         list_for_each_entry(netcp_device, &netcp_devices, device_list) {
356                 ret = netcp_module_probe(netcp_device, module);
357                 if (ret < 0)
358                         goto fail;
359         }
360
361         mutex_unlock(&netcp_modules_lock);
362         return 0;
363
364 fail:
365         mutex_unlock(&netcp_modules_lock);
366         netcp_unregister_module(module);
367         return ret;
368 }
369 EXPORT_SYMBOL_GPL(netcp_register_module);
370
371 static void netcp_release_module(struct netcp_device *netcp_device,
372                                  struct netcp_module *module)
373 {
374         struct netcp_inst_modpriv *inst_modpriv, *inst_tmp;
375         struct netcp_intf *netcp_intf, *netcp_tmp;
376         struct device *dev = netcp_device->device;
377
378         /* Release the module from each interface */
379         list_for_each_entry_safe(netcp_intf, netcp_tmp,
380                                  &netcp_device->interface_head,
381                                  interface_list) {
382                 struct netcp_intf_modpriv *intf_modpriv, *intf_tmp;
383
384                 list_for_each_entry_safe(intf_modpriv, intf_tmp,
385                                          &netcp_intf->module_head,
386                                          intf_list) {
387                         if (intf_modpriv->netcp_module == module) {
388                                 module->release(intf_modpriv->module_priv);
389                                 list_del(&intf_modpriv->intf_list);
390                                 devm_kfree(dev, intf_modpriv);
391                                 break;
392                         }
393                 }
394         }
395
396         /* Remove the module from each instance */
397         list_for_each_entry_safe(inst_modpriv, inst_tmp,
398                                  &netcp_device->modpriv_head, inst_list) {
399                 if (inst_modpriv->netcp_module == module) {
400                         module->remove(netcp_device,
401                                        inst_modpriv->module_priv);
402                         list_del(&inst_modpriv->inst_list);
403                         devm_kfree(dev, inst_modpriv);
404                         break;
405                 }
406         }
407 }
408
409 void netcp_unregister_module(struct netcp_module *module)
410 {
411         struct netcp_device *netcp_device;
412         struct netcp_module *module_tmp;
413
414         mutex_lock(&netcp_modules_lock);
415
416         list_for_each_entry(netcp_device, &netcp_devices, device_list) {
417                 netcp_release_module(netcp_device, module);
418         }
419
420         /* Remove the module from the module list */
421         for_each_netcp_module(module_tmp) {
422                 if (module == module_tmp) {
423                         list_del(&module->module_list);
424                         break;
425                 }
426         }
427
428         mutex_unlock(&netcp_modules_lock);
429 }
430 EXPORT_SYMBOL_GPL(netcp_unregister_module);
431
432 void *netcp_module_get_intf_data(struct netcp_module *module,
433                                  struct netcp_intf *intf)
434 {
435         struct netcp_intf_modpriv *intf_modpriv;
436
437         list_for_each_entry(intf_modpriv, &intf->module_head, intf_list)
438                 if (intf_modpriv->netcp_module == module)
439                         return intf_modpriv->module_priv;
440         return NULL;
441 }
442 EXPORT_SYMBOL_GPL(netcp_module_get_intf_data);
443
444 /* Module TX and RX Hook management */
445 struct netcp_hook_list {
446         struct list_head         list;
447         netcp_hook_rtn          *hook_rtn;
448         void                    *hook_data;
449         int                      order;
450 };
451
452 int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
453                           netcp_hook_rtn *hook_rtn, void *hook_data)
454 {
455         struct netcp_hook_list *entry;
456         struct netcp_hook_list *next;
457         unsigned long flags;
458
459         entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
460         if (!entry)
461                 return -ENOMEM;
462
463         entry->hook_rtn  = hook_rtn;
464         entry->hook_data = hook_data;
465         entry->order     = order;
466
467         spin_lock_irqsave(&netcp_priv->lock, flags);
468         list_for_each_entry(next, &netcp_priv->txhook_list_head, list) {
469                 if (next->order > order)
470                         break;
471         }
472         __list_add(&entry->list, next->list.prev, &next->list);
473         spin_unlock_irqrestore(&netcp_priv->lock, flags);
474
475         return 0;
476 }
477 EXPORT_SYMBOL_GPL(netcp_register_txhook);
478
479 int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
480                             netcp_hook_rtn *hook_rtn, void *hook_data)
481 {
482         struct netcp_hook_list *next, *n;
483         unsigned long flags;
484
485         spin_lock_irqsave(&netcp_priv->lock, flags);
486         list_for_each_entry_safe(next, n, &netcp_priv->txhook_list_head, list) {
487                 if ((next->order     == order) &&
488                     (next->hook_rtn  == hook_rtn) &&
489                     (next->hook_data == hook_data)) {
490                         list_del(&next->list);
491                         spin_unlock_irqrestore(&netcp_priv->lock, flags);
492                         devm_kfree(netcp_priv->dev, next);
493                         return 0;
494                 }
495         }
496         spin_unlock_irqrestore(&netcp_priv->lock, flags);
497         return -ENOENT;
498 }
499 EXPORT_SYMBOL_GPL(netcp_unregister_txhook);
500
501 int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
502                           netcp_hook_rtn *hook_rtn, void *hook_data)
503 {
504         struct netcp_hook_list *entry;
505         struct netcp_hook_list *next;
506         unsigned long flags;
507
508         entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
509         if (!entry)
510                 return -ENOMEM;
511
512         entry->hook_rtn  = hook_rtn;
513         entry->hook_data = hook_data;
514         entry->order     = order;
515
516         spin_lock_irqsave(&netcp_priv->lock, flags);
517         list_for_each_entry(next, &netcp_priv->rxhook_list_head, list) {
518                 if (next->order > order)
519                         break;
520         }
521         __list_add(&entry->list, next->list.prev, &next->list);
522         spin_unlock_irqrestore(&netcp_priv->lock, flags);
523
524         return 0;
525 }
526
527 int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
528                             netcp_hook_rtn *hook_rtn, void *hook_data)
529 {
530         struct netcp_hook_list *next, *n;
531         unsigned long flags;
532
533         spin_lock_irqsave(&netcp_priv->lock, flags);
534         list_for_each_entry_safe(next, n, &netcp_priv->rxhook_list_head, list) {
535                 if ((next->order     == order) &&
536                     (next->hook_rtn  == hook_rtn) &&
537                     (next->hook_data == hook_data)) {
538                         list_del(&next->list);
539                         spin_unlock_irqrestore(&netcp_priv->lock, flags);
540                         devm_kfree(netcp_priv->dev, next);
541                         return 0;
542                 }
543         }
544         spin_unlock_irqrestore(&netcp_priv->lock, flags);
545
546         return -ENOENT;
547 }
548
549 static void netcp_frag_free(bool is_frag, void *ptr)
550 {
551         if (is_frag)
552                 skb_free_frag(ptr);
553         else
554                 kfree(ptr);
555 }
556
557 static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
558                                      struct knav_dma_desc *desc)
559 {
560         struct knav_dma_desc *ndesc;
561         dma_addr_t dma_desc, dma_buf;
562         unsigned int buf_len, dma_sz = sizeof(*ndesc);
563         void *buf_ptr;
564         u32 tmp;
565
566         get_words(&dma_desc, 1, &desc->next_desc);
567
568         while (dma_desc) {
569                 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
570                 if (unlikely(!ndesc)) {
571                         dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
572                         break;
573                 }
574                 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
575                 get_pad_info((u32 *)&buf_ptr, &tmp, ndesc);
576                 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
577                 __free_page(buf_ptr);
578                 knav_pool_desc_put(netcp->rx_pool, desc);
579         }
580
581         get_pad_info((u32 *)&buf_ptr, &buf_len, desc);
582         if (buf_ptr)
583                 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
584         knav_pool_desc_put(netcp->rx_pool, desc);
585 }
586
587 static void netcp_empty_rx_queue(struct netcp_intf *netcp)
588 {
589         struct knav_dma_desc *desc;
590         unsigned int dma_sz;
591         dma_addr_t dma;
592
593         for (; ;) {
594                 dma = knav_queue_pop(netcp->rx_queue, &dma_sz);
595                 if (!dma)
596                         break;
597
598                 desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
599                 if (unlikely(!desc)) {
600                         dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n",
601                                 __func__);
602                         netcp->ndev->stats.rx_errors++;
603                         continue;
604                 }
605                 netcp_free_rx_desc_chain(netcp, desc);
606                 netcp->ndev->stats.rx_dropped++;
607         }
608 }
609
610 static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
611 {
612         unsigned int dma_sz, buf_len, org_buf_len;
613         struct knav_dma_desc *desc, *ndesc;
614         unsigned int pkt_sz = 0, accum_sz;
615         struct netcp_hook_list *rx_hook;
616         dma_addr_t dma_desc, dma_buff;
617         struct netcp_packet p_info;
618         struct sk_buff *skb;
619         void *org_buf_ptr;
620         u32 tmp;
621
622         dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
623         if (!dma_desc)
624                 return -1;
625
626         desc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
627         if (unlikely(!desc)) {
628                 dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
629                 return 0;
630         }
631
632         get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
633         get_pad_info((u32 *)&org_buf_ptr, &org_buf_len, desc);
634
635         if (unlikely(!org_buf_ptr)) {
636                 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
637                 goto free_desc;
638         }
639
640         pkt_sz &= KNAV_DMA_DESC_PKT_LEN_MASK;
641         accum_sz = buf_len;
642         dma_unmap_single(netcp->dev, dma_buff, buf_len, DMA_FROM_DEVICE);
643
644         /* Build a new sk_buff for the primary buffer */
645         skb = build_skb(org_buf_ptr, org_buf_len);
646         if (unlikely(!skb)) {
647                 dev_err(netcp->ndev_dev, "build_skb() failed\n");
648                 goto free_desc;
649         }
650
651         /* update data, tail and len */
652         skb_reserve(skb, NETCP_SOP_OFFSET);
653         __skb_put(skb, buf_len);
654
655         /* Fill in the page fragment list */
656         while (dma_desc) {
657                 struct page *page;
658
659                 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
660                 if (unlikely(!ndesc)) {
661                         dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
662                         goto free_desc;
663                 }
664
665                 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
666                 get_pad_info((u32 *)&page, &tmp, ndesc);
667
668                 if (likely(dma_buff && buf_len && page)) {
669                         dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
670                                        DMA_FROM_DEVICE);
671                 } else {
672                         dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%p), len(%d), page(%p)\n",
673                                 (void *)dma_buff, buf_len, page);
674                         goto free_desc;
675                 }
676
677                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
678                                 offset_in_page(dma_buff), buf_len, PAGE_SIZE);
679                 accum_sz += buf_len;
680
681                 /* Free the descriptor */
682                 knav_pool_desc_put(netcp->rx_pool, ndesc);
683         }
684
685         /* Free the primary descriptor */
686         knav_pool_desc_put(netcp->rx_pool, desc);
687
688         /* check for packet len and warn */
689         if (unlikely(pkt_sz != accum_sz))
690                 dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n",
691                         pkt_sz, accum_sz);
692
693         /* Remove ethernet FCS from the packet */
694         __pskb_trim(skb, skb->len - ETH_FCS_LEN);
695
696         /* Call each of the RX hooks */
697         p_info.skb = skb;
698         p_info.rxtstamp_complete = false;
699         list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) {
700                 int ret;
701
702                 ret = rx_hook->hook_rtn(rx_hook->order, rx_hook->hook_data,
703                                         &p_info);
704                 if (unlikely(ret)) {
705                         dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n",
706                                 rx_hook->order, ret);
707                         netcp->ndev->stats.rx_errors++;
708                         dev_kfree_skb(skb);
709                         return 0;
710                 }
711         }
712
713         netcp->ndev->stats.rx_packets++;
714         netcp->ndev->stats.rx_bytes += skb->len;
715
716         /* push skb up the stack */
717         skb->protocol = eth_type_trans(skb, netcp->ndev);
718         netif_receive_skb(skb);
719         return 0;
720
721 free_desc:
722         netcp_free_rx_desc_chain(netcp, desc);
723         netcp->ndev->stats.rx_errors++;
724         return 0;
725 }
726
727 static int netcp_process_rx_packets(struct netcp_intf *netcp,
728                                     unsigned int budget)
729 {
730         int i;
731
732         for (i = 0; (i < budget) && !netcp_process_one_rx_packet(netcp); i++)
733                 ;
734         return i;
735 }
736
737 /* Release descriptors and attached buffers from Rx FDQ */
738 static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
739 {
740         struct knav_dma_desc *desc;
741         unsigned int buf_len, dma_sz;
742         dma_addr_t dma;
743         void *buf_ptr;
744         u32 tmp;
745
746         /* Allocate descriptor */
747         while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) {
748                 desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
749                 if (unlikely(!desc)) {
750                         dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
751                         continue;
752                 }
753
754                 get_org_pkt_info(&dma, &buf_len, desc);
755                 get_pad_info((u32 *)&buf_ptr, &tmp, desc);
756
757                 if (unlikely(!dma)) {
758                         dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
759                         knav_pool_desc_put(netcp->rx_pool, desc);
760                         continue;
761                 }
762
763                 if (unlikely(!buf_ptr)) {
764                         dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
765                         knav_pool_desc_put(netcp->rx_pool, desc);
766                         continue;
767                 }
768
769                 if (fdq == 0) {
770                         dma_unmap_single(netcp->dev, dma, buf_len,
771                                          DMA_FROM_DEVICE);
772                         netcp_frag_free((buf_len <= PAGE_SIZE), buf_ptr);
773                 } else {
774                         dma_unmap_page(netcp->dev, dma, buf_len,
775                                        DMA_FROM_DEVICE);
776                         __free_page(buf_ptr);
777                 }
778
779                 knav_pool_desc_put(netcp->rx_pool, desc);
780         }
781 }
782
783 static void netcp_rxpool_free(struct netcp_intf *netcp)
784 {
785         int i;
786
787         for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
788              !IS_ERR_OR_NULL(netcp->rx_fdq[i]); i++)
789                 netcp_free_rx_buf(netcp, i);
790
791         if (knav_pool_count(netcp->rx_pool) != netcp->rx_pool_size)
792                 dev_err(netcp->ndev_dev, "Lost Rx (%d) descriptors\n",
793                         netcp->rx_pool_size - knav_pool_count(netcp->rx_pool));
794
795         knav_pool_destroy(netcp->rx_pool);
796         netcp->rx_pool = NULL;
797 }
798
799 static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
800 {
801         struct knav_dma_desc *hwdesc;
802         unsigned int buf_len, dma_sz;
803         u32 desc_info, pkt_info;
804         struct page *page;
805         dma_addr_t dma;
806         void *bufptr;
807         u32 pad[2];
808
809         /* Allocate descriptor */
810         hwdesc = knav_pool_desc_get(netcp->rx_pool);
811         if (IS_ERR_OR_NULL(hwdesc)) {
812                 dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
813                 return;
814         }
815
816         if (likely(fdq == 0)) {
817                 unsigned int primary_buf_len;
818                 /* Allocate a primary receive queue entry */
819                 buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
820                 primary_buf_len = SKB_DATA_ALIGN(buf_len) +
821                                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
822
823                 bufptr = netdev_alloc_frag(primary_buf_len);
824                 pad[1] = primary_buf_len;
825
826                 if (unlikely(!bufptr)) {
827                         dev_warn_ratelimited(netcp->ndev_dev,
828                                              "Primary RX buffer alloc failed\n");
829                         goto fail;
830                 }
831                 dma = dma_map_single(netcp->dev, bufptr, buf_len,
832                                      DMA_TO_DEVICE);
833                 if (unlikely(dma_mapping_error(netcp->dev, dma)))
834                         goto fail;
835
836                 pad[0] = (u32)bufptr;
837
838         } else {
839                 /* Allocate a secondary receive queue entry */
840                 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
841                 if (unlikely(!page)) {
842                         dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
843                         goto fail;
844                 }
845                 buf_len = PAGE_SIZE;
846                 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
847                 pad[0] = (u32)page;
848                 pad[1] = 0;
849         }
850
851         desc_info =  KNAV_DMA_DESC_PS_INFO_IN_DESC;
852         desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK;
853         pkt_info =  KNAV_DMA_DESC_HAS_EPIB;
854         pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT;
855         pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
856                     KNAV_DMA_DESC_RETQ_SHIFT;
857         set_org_pkt_info(dma, buf_len, hwdesc);
858         set_pad_info(pad[0], pad[1], hwdesc);
859         set_desc_info(desc_info, pkt_info, hwdesc);
860
861         /* Push to FDQs */
862         knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
863                            &dma_sz);
864         knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
865         return;
866
867 fail:
868         knav_pool_desc_put(netcp->rx_pool, hwdesc);
869 }
870
871 /* Refill Rx FDQ with descriptors & attached buffers */
872 static void netcp_rxpool_refill(struct netcp_intf *netcp)
873 {
874         u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
875         int i;
876
877         /* Calculate the FDQ deficit and refill */
878         for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
879                 fdq_deficit[i] = netcp->rx_queue_depths[i] -
880                                  knav_queue_get_count(netcp->rx_fdq[i]);
881
882                 while (fdq_deficit[i]--)
883                         netcp_allocate_rx_buf(netcp, i);
884         } /* end for fdqs */
885 }
886
887 /* NAPI poll */
888 static int netcp_rx_poll(struct napi_struct *napi, int budget)
889 {
890         struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
891                                                 rx_napi);
892         unsigned int packets;
893
894         packets = netcp_process_rx_packets(netcp, budget);
895
896         if (packets < budget) {
897                 napi_complete(&netcp->rx_napi);
898                 knav_queue_enable_notify(netcp->rx_queue);
899         }
900
901         netcp_rxpool_refill(netcp);
902         return packets;
903 }
904
905 static void netcp_rx_notify(void *arg)
906 {
907         struct netcp_intf *netcp = arg;
908
909         knav_queue_disable_notify(netcp->rx_queue);
910         napi_schedule(&netcp->rx_napi);
911 }
912
913 static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
914                                      struct knav_dma_desc *desc,
915                                      unsigned int desc_sz)
916 {
917         struct knav_dma_desc *ndesc = desc;
918         dma_addr_t dma_desc, dma_buf;
919         unsigned int buf_len;
920
921         while (ndesc) {
922                 get_pkt_info(&dma_buf, &buf_len, &dma_desc, ndesc);
923
924                 if (dma_buf && buf_len)
925                         dma_unmap_single(netcp->dev, dma_buf, buf_len,
926                                          DMA_TO_DEVICE);
927                 else
928                         dev_warn(netcp->ndev_dev, "bad Tx desc buf(%p), len(%d)\n",
929                                  (void *)dma_buf, buf_len);
930
931                 knav_pool_desc_put(netcp->tx_pool, ndesc);
932                 ndesc = NULL;
933                 if (dma_desc) {
934                         ndesc = knav_pool_desc_unmap(netcp->tx_pool, dma_desc,
935                                                      desc_sz);
936                         if (!ndesc)
937                                 dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
938                 }
939         }
940 }
941
942 static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
943                                           unsigned int budget)
944 {
945         struct knav_dma_desc *desc;
946         struct sk_buff *skb;
947         unsigned int dma_sz;
948         dma_addr_t dma;
949         int pkts = 0;
950         u32 tmp;
951
952         while (budget--) {
953                 dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz);
954                 if (!dma)
955                         break;
956                 desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz);
957                 if (unlikely(!desc)) {
958                         dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
959                         netcp->ndev->stats.tx_errors++;
960                         continue;
961                 }
962
963                 get_pad_info((u32 *)&skb, &tmp, desc);
964                 netcp_free_tx_desc_chain(netcp, desc, dma_sz);
965                 if (!skb) {
966                         dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
967                         netcp->ndev->stats.tx_errors++;
968                         continue;
969                 }
970
971                 if (netif_subqueue_stopped(netcp->ndev, skb) &&
972                     netif_running(netcp->ndev) &&
973                     (knav_pool_count(netcp->tx_pool) >
974                     netcp->tx_resume_threshold)) {
975                         u16 subqueue = skb_get_queue_mapping(skb);
976
977                         netif_wake_subqueue(netcp->ndev, subqueue);
978                 }
979
980                 netcp->ndev->stats.tx_packets++;
981                 netcp->ndev->stats.tx_bytes += skb->len;
982                 dev_kfree_skb(skb);
983                 pkts++;
984         }
985         return pkts;
986 }
987
988 static int netcp_tx_poll(struct napi_struct *napi, int budget)
989 {
990         int packets;
991         struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
992                                                 tx_napi);
993
994         packets = netcp_process_tx_compl_packets(netcp, budget);
995         if (packets < budget) {
996                 napi_complete(&netcp->tx_napi);
997                 knav_queue_enable_notify(netcp->tx_compl_q);
998         }
999
1000         return packets;
1001 }
1002
1003 static void netcp_tx_notify(void *arg)
1004 {
1005         struct netcp_intf *netcp = arg;
1006
1007         knav_queue_disable_notify(netcp->tx_compl_q);
1008         napi_schedule(&netcp->tx_napi);
1009 }
1010
1011 static struct knav_dma_desc*
1012 netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
1013 {
1014         struct knav_dma_desc *desc, *ndesc, *pdesc;
1015         unsigned int pkt_len = skb_headlen(skb);
1016         struct device *dev = netcp->dev;
1017         dma_addr_t dma_addr;
1018         unsigned int dma_sz;
1019         int i;
1020
1021         /* Map the linear buffer */
1022         dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
1023         if (unlikely(dma_mapping_error(dev, dma_addr))) {
1024                 dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
1025                 return NULL;
1026         }
1027
1028         desc = knav_pool_desc_get(netcp->tx_pool);
1029         if (unlikely(IS_ERR_OR_NULL(desc))) {
1030                 dev_err(netcp->ndev_dev, "out of TX desc\n");
1031                 dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE);
1032                 return NULL;
1033         }
1034
1035         set_pkt_info(dma_addr, pkt_len, 0, desc);
1036         if (skb_is_nonlinear(skb)) {
1037                 prefetchw(skb_shinfo(skb));
1038         } else {
1039                 desc->next_desc = 0;
1040                 goto upd_pkt_len;
1041         }
1042
1043         pdesc = desc;
1044
1045         /* Handle the case where skb is fragmented in pages */
1046         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1047                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1048                 struct page *page = skb_frag_page(frag);
1049                 u32 page_offset = frag->page_offset;
1050                 u32 buf_len = skb_frag_size(frag);
1051                 dma_addr_t desc_dma;
1052                 u32 pkt_info;
1053
1054                 dma_addr = dma_map_page(dev, page, page_offset, buf_len,
1055                                         DMA_TO_DEVICE);
1056                 if (unlikely(!dma_addr)) {
1057                         dev_err(netcp->ndev_dev, "Failed to map skb page\n");
1058                         goto free_descs;
1059                 }
1060
1061                 ndesc = knav_pool_desc_get(netcp->tx_pool);
1062                 if (unlikely(IS_ERR_OR_NULL(ndesc))) {
1063                         dev_err(netcp->ndev_dev, "out of TX desc for frags\n");
1064                         dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE);
1065                         goto free_descs;
1066                 }
1067
1068                 desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool,
1069                                                       (void *)ndesc);
1070                 pkt_info =
1071                         (netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
1072                                 KNAV_DMA_DESC_RETQ_SHIFT;
1073                 set_pkt_info(dma_addr, buf_len, 0, ndesc);
1074                 set_words(&desc_dma, 1, &pdesc->next_desc);
1075                 pkt_len += buf_len;
1076                 if (pdesc != desc)
1077                         knav_pool_desc_map(netcp->tx_pool, pdesc,
1078                                            sizeof(*pdesc), &desc_dma, &dma_sz);
1079                 pdesc = ndesc;
1080         }
1081         if (pdesc != desc)
1082                 knav_pool_desc_map(netcp->tx_pool, pdesc, sizeof(*pdesc),
1083                                    &dma_addr, &dma_sz);
1084
1085         /* frag list based linkage is not supported for now. */
1086         if (skb_shinfo(skb)->frag_list) {
1087                 dev_err_ratelimited(netcp->ndev_dev, "NETIF_F_FRAGLIST not supported\n");
1088                 goto free_descs;
1089         }
1090
1091 upd_pkt_len:
1092         WARN_ON(pkt_len != skb->len);
1093
1094         pkt_len &= KNAV_DMA_DESC_PKT_LEN_MASK;
1095         set_words(&pkt_len, 1, &desc->desc_info);
1096         return desc;
1097
1098 free_descs:
1099         netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
1100         return NULL;
1101 }
1102
1103 static int netcp_tx_submit_skb(struct netcp_intf *netcp,
1104                                struct sk_buff *skb,
1105                                struct knav_dma_desc *desc)
1106 {
1107         struct netcp_tx_pipe *tx_pipe = NULL;
1108         struct netcp_hook_list *tx_hook;
1109         struct netcp_packet p_info;
1110         unsigned int dma_sz;
1111         dma_addr_t dma;
1112         u32 tmp = 0;
1113         int ret = 0;
1114
1115         p_info.netcp = netcp;
1116         p_info.skb = skb;
1117         p_info.tx_pipe = NULL;
1118         p_info.psdata_len = 0;
1119         p_info.ts_context = NULL;
1120         p_info.txtstamp_complete = NULL;
1121         p_info.epib = desc->epib;
1122         p_info.psdata = desc->psdata;
1123         memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(u32));
1124
1125         /* Find out where to inject the packet for transmission */
1126         list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) {
1127                 ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data,
1128                                         &p_info);
1129                 if (unlikely(ret != 0)) {
1130                         dev_err(netcp->ndev_dev, "TX hook %d rejected the packet with reason(%d)\n",
1131                                 tx_hook->order, ret);
1132                         ret = (ret < 0) ? ret : NETDEV_TX_OK;
1133                         goto out;
1134                 }
1135         }
1136
1137         /* Make sure some TX hook claimed the packet */
1138         tx_pipe = p_info.tx_pipe;
1139         if (!tx_pipe) {
1140                 dev_err(netcp->ndev_dev, "No TX hook claimed the packet!\n");
1141                 ret = -ENXIO;
1142                 goto out;
1143         }
1144
1145         /* update descriptor */
1146         if (p_info.psdata_len) {
1147                 u32 *psdata = p_info.psdata;
1148
1149                 memmove(p_info.psdata, p_info.psdata + p_info.psdata_len,
1150                         p_info.psdata_len);
1151                 set_words(psdata, p_info.psdata_len, psdata);
1152                 tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
1153                         KNAV_DMA_DESC_PSLEN_SHIFT;
1154         }
1155
1156         tmp |= KNAV_DMA_DESC_HAS_EPIB |
1157                 ((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
1158                 KNAV_DMA_DESC_RETQ_SHIFT);
1159
1160         if (!(tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO)) {
1161                 tmp |= ((tx_pipe->switch_to_port & KNAV_DMA_DESC_PSFLAG_MASK) <<
1162                         KNAV_DMA_DESC_PSFLAG_SHIFT);
1163         }
1164
1165         set_words(&tmp, 1, &desc->packet_info);
1166         set_words((u32 *)&skb, 1, &desc->pad[0]);
1167
1168         if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) {
1169                 tmp = tx_pipe->switch_to_port;
1170                 set_words((u32 *)&tmp, 1, &desc->tag_info);
1171         }
1172
1173         /* submit packet descriptor */
1174         ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma,
1175                                  &dma_sz);
1176         if (unlikely(ret)) {
1177                 dev_err(netcp->ndev_dev, "%s() failed to map desc\n", __func__);
1178                 ret = -ENOMEM;
1179                 goto out;
1180         }
1181         skb_tx_timestamp(skb);
1182         knav_queue_push(tx_pipe->dma_queue, dma, dma_sz, 0);
1183
1184 out:
1185         return ret;
1186 }
1187
1188 /* Submit the packet */
1189 static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1190 {
1191         struct netcp_intf *netcp = netdev_priv(ndev);
1192         int subqueue = skb_get_queue_mapping(skb);
1193         struct knav_dma_desc *desc;
1194         int desc_count, ret = 0;
1195
1196         if (unlikely(skb->len <= 0)) {
1197                 dev_kfree_skb(skb);
1198                 return NETDEV_TX_OK;
1199         }
1200
1201         if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) {
1202                 ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE);
1203                 if (ret < 0) {
1204                         /* If we get here, the skb has already been dropped */
1205                         dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n",
1206                                  ret);
1207                         ndev->stats.tx_dropped++;
1208                         return ret;
1209                 }
1210                 skb->len = NETCP_MIN_PACKET_SIZE;
1211         }
1212
1213         desc = netcp_tx_map_skb(skb, netcp);
1214         if (unlikely(!desc)) {
1215                 netif_stop_subqueue(ndev, subqueue);
1216                 ret = -ENOBUFS;
1217                 goto drop;
1218         }
1219
1220         ret = netcp_tx_submit_skb(netcp, skb, desc);
1221         if (ret)
1222                 goto drop;
1223
1224         ndev->trans_start = jiffies;
1225
1226         /* Check Tx pool count & stop subqueue if needed */
1227         desc_count = knav_pool_count(netcp->tx_pool);
1228         if (desc_count < netcp->tx_pause_threshold) {
1229                 dev_dbg(netcp->ndev_dev, "pausing tx, count(%d)\n", desc_count);
1230                 netif_stop_subqueue(ndev, subqueue);
1231         }
1232         return NETDEV_TX_OK;
1233
1234 drop:
1235         ndev->stats.tx_dropped++;
1236         if (desc)
1237                 netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
1238         dev_kfree_skb(skb);
1239         return ret;
1240 }
1241
1242 int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe)
1243 {
1244         if (tx_pipe->dma_channel) {
1245                 knav_dma_close_channel(tx_pipe->dma_channel);
1246                 tx_pipe->dma_channel = NULL;
1247         }
1248         return 0;
1249 }
1250 EXPORT_SYMBOL_GPL(netcp_txpipe_close);
1251
1252 int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
1253 {
1254         struct device *dev = tx_pipe->netcp_device->device;
1255         struct knav_dma_cfg config;
1256         int ret = 0;
1257         u8 name[16];
1258
1259         memset(&config, 0, sizeof(config));
1260         config.direction = DMA_MEM_TO_DEV;
1261         config.u.tx.filt_einfo = false;
1262         config.u.tx.filt_pswords = false;
1263         config.u.tx.priority = DMA_PRIO_MED_L;
1264
1265         tx_pipe->dma_channel = knav_dma_open_channel(dev,
1266                                 tx_pipe->dma_chan_name, &config);
1267         if (IS_ERR_OR_NULL(tx_pipe->dma_channel)) {
1268                 dev_err(dev, "failed opening tx chan(%s)\n",
1269                         tx_pipe->dma_chan_name);
1270                 goto err;
1271         }
1272
1273         snprintf(name, sizeof(name), "tx-pipe-%s", dev_name(dev));
1274         tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
1275                                              KNAV_QUEUE_SHARED);
1276         if (IS_ERR(tx_pipe->dma_queue)) {
1277                 dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
1278                         name, ret);
1279                 ret = PTR_ERR(tx_pipe->dma_queue);
1280                 goto err;
1281         }
1282
1283         dev_dbg(dev, "opened tx pipe %s\n", name);
1284         return 0;
1285
1286 err:
1287         if (!IS_ERR_OR_NULL(tx_pipe->dma_channel))
1288                 knav_dma_close_channel(tx_pipe->dma_channel);
1289         tx_pipe->dma_channel = NULL;
1290         return ret;
1291 }
1292 EXPORT_SYMBOL_GPL(netcp_txpipe_open);
1293
1294 int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
1295                       struct netcp_device *netcp_device,
1296                       const char *dma_chan_name, unsigned int dma_queue_id)
1297 {
1298         memset(tx_pipe, 0, sizeof(*tx_pipe));
1299         tx_pipe->netcp_device = netcp_device;
1300         tx_pipe->dma_chan_name = dma_chan_name;
1301         tx_pipe->dma_queue_id = dma_queue_id;
1302         return 0;
1303 }
1304 EXPORT_SYMBOL_GPL(netcp_txpipe_init);
1305
1306 static struct netcp_addr *netcp_addr_find(struct netcp_intf *netcp,
1307                                           const u8 *addr,
1308                                           enum netcp_addr_type type)
1309 {
1310         struct netcp_addr *naddr;
1311
1312         list_for_each_entry(naddr, &netcp->addr_list, node) {
1313                 if (naddr->type != type)
1314                         continue;
1315                 if (addr && memcmp(addr, naddr->addr, ETH_ALEN))
1316                         continue;
1317                 return naddr;
1318         }
1319
1320         return NULL;
1321 }
1322
1323 static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp,
1324                                          const u8 *addr,
1325                                          enum netcp_addr_type type)
1326 {
1327         struct netcp_addr *naddr;
1328
1329         naddr = devm_kmalloc(netcp->dev, sizeof(*naddr), GFP_ATOMIC);
1330         if (!naddr)
1331                 return NULL;
1332
1333         naddr->type = type;
1334         naddr->flags = 0;
1335         naddr->netcp = netcp;
1336         if (addr)
1337                 ether_addr_copy(naddr->addr, addr);
1338         else
1339                 eth_zero_addr(naddr->addr);
1340         list_add_tail(&naddr->node, &netcp->addr_list);
1341
1342         return naddr;
1343 }
1344
1345 static void netcp_addr_del(struct netcp_intf *netcp, struct netcp_addr *naddr)
1346 {
1347         list_del(&naddr->node);
1348         devm_kfree(netcp->dev, naddr);
1349 }
1350
1351 static void netcp_addr_clear_mark(struct netcp_intf *netcp)
1352 {
1353         struct netcp_addr *naddr;
1354
1355         list_for_each_entry(naddr, &netcp->addr_list, node)
1356                 naddr->flags = 0;
1357 }
1358
1359 static void netcp_addr_add_mark(struct netcp_intf *netcp, const u8 *addr,
1360                                 enum netcp_addr_type type)
1361 {
1362         struct netcp_addr *naddr;
1363
1364         naddr = netcp_addr_find(netcp, addr, type);
1365         if (naddr) {
1366                 naddr->flags |= ADDR_VALID;
1367                 return;
1368         }
1369
1370         naddr = netcp_addr_add(netcp, addr, type);
1371         if (!WARN_ON(!naddr))
1372                 naddr->flags |= ADDR_NEW;
1373 }
1374
1375 static void netcp_addr_sweep_del(struct netcp_intf *netcp)
1376 {
1377         struct netcp_addr *naddr, *tmp;
1378         struct netcp_intf_modpriv *priv;
1379         struct netcp_module *module;
1380         int error;
1381
1382         list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
1383                 if (naddr->flags & (ADDR_VALID | ADDR_NEW))
1384                         continue;
1385                 dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
1386                         naddr->addr, naddr->type);
1387                 mutex_lock(&netcp_modules_lock);
1388                 for_each_module(netcp, priv) {
1389                         module = priv->netcp_module;
1390                         if (!module->del_addr)
1391                                 continue;
1392                         error = module->del_addr(priv->module_priv,
1393                                                  naddr);
1394                         WARN_ON(error);
1395                 }
1396                 mutex_unlock(&netcp_modules_lock);
1397                 netcp_addr_del(netcp, naddr);
1398         }
1399 }
1400
1401 static void netcp_addr_sweep_add(struct netcp_intf *netcp)
1402 {
1403         struct netcp_addr *naddr, *tmp;
1404         struct netcp_intf_modpriv *priv;
1405         struct netcp_module *module;
1406         int error;
1407
1408         list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
1409                 if (!(naddr->flags & ADDR_NEW))
1410                         continue;
1411                 dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
1412                         naddr->addr, naddr->type);
1413                 mutex_lock(&netcp_modules_lock);
1414                 for_each_module(netcp, priv) {
1415                         module = priv->netcp_module;
1416                         if (!module->add_addr)
1417                                 continue;
1418                         error = module->add_addr(priv->module_priv, naddr);
1419                         WARN_ON(error);
1420                 }
1421                 mutex_unlock(&netcp_modules_lock);
1422         }
1423 }
1424
1425 static void netcp_set_rx_mode(struct net_device *ndev)
1426 {
1427         struct netcp_intf *netcp = netdev_priv(ndev);
1428         struct netdev_hw_addr *ndev_addr;
1429         bool promisc;
1430
1431         promisc = (ndev->flags & IFF_PROMISC ||
1432                    ndev->flags & IFF_ALLMULTI ||
1433                    netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
1434
1435         /* first clear all marks */
1436         netcp_addr_clear_mark(netcp);
1437
1438         /* next add new entries, mark existing ones */
1439         netcp_addr_add_mark(netcp, ndev->broadcast, ADDR_BCAST);
1440         for_each_dev_addr(ndev, ndev_addr)
1441                 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_DEV);
1442         netdev_for_each_uc_addr(ndev_addr, ndev)
1443                 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_UCAST);
1444         netdev_for_each_mc_addr(ndev_addr, ndev)
1445                 netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_MCAST);
1446
1447         if (promisc)
1448                 netcp_addr_add_mark(netcp, NULL, ADDR_ANY);
1449
1450         /* finally sweep and callout into modules */
1451         netcp_addr_sweep_del(netcp);
1452         netcp_addr_sweep_add(netcp);
1453 }
1454
1455 static void netcp_free_navigator_resources(struct netcp_intf *netcp)
1456 {
1457         int i;
1458
1459         if (netcp->rx_channel) {
1460                 knav_dma_close_channel(netcp->rx_channel);
1461                 netcp->rx_channel = NULL;
1462         }
1463
1464         if (!IS_ERR_OR_NULL(netcp->rx_pool))
1465                 netcp_rxpool_free(netcp);
1466
1467         if (!IS_ERR_OR_NULL(netcp->rx_queue)) {
1468                 knav_queue_close(netcp->rx_queue);
1469                 netcp->rx_queue = NULL;
1470         }
1471
1472         for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
1473              !IS_ERR_OR_NULL(netcp->rx_fdq[i]) ; ++i) {
1474                 knav_queue_close(netcp->rx_fdq[i]);
1475                 netcp->rx_fdq[i] = NULL;
1476         }
1477
1478         if (!IS_ERR_OR_NULL(netcp->tx_compl_q)) {
1479                 knav_queue_close(netcp->tx_compl_q);
1480                 netcp->tx_compl_q = NULL;
1481         }
1482
1483         if (!IS_ERR_OR_NULL(netcp->tx_pool)) {
1484                 knav_pool_destroy(netcp->tx_pool);
1485                 netcp->tx_pool = NULL;
1486         }
1487 }
1488
1489 static int netcp_setup_navigator_resources(struct net_device *ndev)
1490 {
1491         struct netcp_intf *netcp = netdev_priv(ndev);
1492         struct knav_queue_notify_config notify_cfg;
1493         struct knav_dma_cfg config;
1494         u32 last_fdq = 0;
1495         u8 name[16];
1496         int ret;
1497         int i;
1498
1499         /* Create Rx/Tx descriptor pools */
1500         snprintf(name, sizeof(name), "rx-pool-%s", ndev->name);
1501         netcp->rx_pool = knav_pool_create(name, netcp->rx_pool_size,
1502                                                 netcp->rx_pool_region_id);
1503         if (IS_ERR_OR_NULL(netcp->rx_pool)) {
1504                 dev_err(netcp->ndev_dev, "Couldn't create rx pool\n");
1505                 ret = PTR_ERR(netcp->rx_pool);
1506                 goto fail;
1507         }
1508
1509         snprintf(name, sizeof(name), "tx-pool-%s", ndev->name);
1510         netcp->tx_pool = knav_pool_create(name, netcp->tx_pool_size,
1511                                                 netcp->tx_pool_region_id);
1512         if (IS_ERR_OR_NULL(netcp->tx_pool)) {
1513                 dev_err(netcp->ndev_dev, "Couldn't create tx pool\n");
1514                 ret = PTR_ERR(netcp->tx_pool);
1515                 goto fail;
1516         }
1517
1518         /* open Tx completion queue */
1519         snprintf(name, sizeof(name), "tx-compl-%s", ndev->name);
1520         netcp->tx_compl_q = knav_queue_open(name, netcp->tx_compl_qid, 0);
1521         if (IS_ERR_OR_NULL(netcp->tx_compl_q)) {
1522                 ret = PTR_ERR(netcp->tx_compl_q);
1523                 goto fail;
1524         }
1525         netcp->tx_compl_qid = knav_queue_get_id(netcp->tx_compl_q);
1526
1527         /* Set notification for Tx completion */
1528         notify_cfg.fn = netcp_tx_notify;
1529         notify_cfg.fn_arg = netcp;
1530         ret = knav_queue_device_control(netcp->tx_compl_q,
1531                                         KNAV_QUEUE_SET_NOTIFIER,
1532                                         (unsigned long)&notify_cfg);
1533         if (ret)
1534                 goto fail;
1535
1536         knav_queue_disable_notify(netcp->tx_compl_q);
1537
1538         /* open Rx completion queue */
1539         snprintf(name, sizeof(name), "rx-compl-%s", ndev->name);
1540         netcp->rx_queue = knav_queue_open(name, netcp->rx_queue_id, 0);
1541         if (IS_ERR_OR_NULL(netcp->rx_queue)) {
1542                 ret = PTR_ERR(netcp->rx_queue);
1543                 goto fail;
1544         }
1545         netcp->rx_queue_id = knav_queue_get_id(netcp->rx_queue);
1546
1547         /* Set notification for Rx completion */
1548         notify_cfg.fn = netcp_rx_notify;
1549         notify_cfg.fn_arg = netcp;
1550         ret = knav_queue_device_control(netcp->rx_queue,
1551                                         KNAV_QUEUE_SET_NOTIFIER,
1552                                         (unsigned long)&notify_cfg);
1553         if (ret)
1554                 goto fail;
1555
1556         knav_queue_disable_notify(netcp->rx_queue);
1557
1558         /* open Rx FDQs */
1559         for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
1560              ++i) {
1561                 snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
1562                 netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
1563                 if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
1564                         ret = PTR_ERR(netcp->rx_fdq[i]);
1565                         goto fail;
1566                 }
1567         }
1568
1569         memset(&config, 0, sizeof(config));
1570         config.direction                = DMA_DEV_TO_MEM;
1571         config.u.rx.einfo_present       = true;
1572         config.u.rx.psinfo_present      = true;
1573         config.u.rx.err_mode            = DMA_DROP;
1574         config.u.rx.desc_type           = DMA_DESC_HOST;
1575         config.u.rx.psinfo_at_sop       = false;
1576         config.u.rx.sop_offset          = NETCP_SOP_OFFSET;
1577         config.u.rx.dst_q               = netcp->rx_queue_id;
1578         config.u.rx.thresh              = DMA_THRESH_NONE;
1579
1580         for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; ++i) {
1581                 if (netcp->rx_fdq[i])
1582                         last_fdq = knav_queue_get_id(netcp->rx_fdq[i]);
1583                 config.u.rx.fdq[i] = last_fdq;
1584         }
1585
1586         netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
1587                                         netcp->dma_chan_name, &config);
1588         if (IS_ERR_OR_NULL(netcp->rx_channel)) {
1589                 dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
1590                         netcp->dma_chan_name);
1591                 goto fail;
1592         }
1593
1594         dev_dbg(netcp->ndev_dev, "opened RX channel: %p\n", netcp->rx_channel);
1595         return 0;
1596
1597 fail:
1598         netcp_free_navigator_resources(netcp);
1599         return ret;
1600 }
1601
1602 /* Open the device */
1603 static int netcp_ndo_open(struct net_device *ndev)
1604 {
1605         struct netcp_intf *netcp = netdev_priv(ndev);
1606         struct netcp_intf_modpriv *intf_modpriv;
1607         struct netcp_module *module;
1608         int ret;
1609
1610         netif_carrier_off(ndev);
1611         ret = netcp_setup_navigator_resources(ndev);
1612         if (ret) {
1613                 dev_err(netcp->ndev_dev, "Failed to setup navigator resources\n");
1614                 goto fail;
1615         }
1616
1617         mutex_lock(&netcp_modules_lock);
1618         for_each_module(netcp, intf_modpriv) {
1619                 module = intf_modpriv->netcp_module;
1620                 if (module->open) {
1621                         ret = module->open(intf_modpriv->module_priv, ndev);
1622                         if (ret != 0) {
1623                                 dev_err(netcp->ndev_dev, "module open failed\n");
1624                                 goto fail_open;
1625                         }
1626                 }
1627         }
1628         mutex_unlock(&netcp_modules_lock);
1629
1630         napi_enable(&netcp->rx_napi);
1631         napi_enable(&netcp->tx_napi);
1632         knav_queue_enable_notify(netcp->tx_compl_q);
1633         knav_queue_enable_notify(netcp->rx_queue);
1634         netcp_rxpool_refill(netcp);
1635         netif_tx_wake_all_queues(ndev);
1636         dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
1637         return 0;
1638
1639 fail_open:
1640         for_each_module(netcp, intf_modpriv) {
1641                 module = intf_modpriv->netcp_module;
1642                 if (module->close)
1643                         module->close(intf_modpriv->module_priv, ndev);
1644         }
1645         mutex_unlock(&netcp_modules_lock);
1646
1647 fail:
1648         netcp_free_navigator_resources(netcp);
1649         return ret;
1650 }
1651
1652 /* Close the device */
1653 static int netcp_ndo_stop(struct net_device *ndev)
1654 {
1655         struct netcp_intf *netcp = netdev_priv(ndev);
1656         struct netcp_intf_modpriv *intf_modpriv;
1657         struct netcp_module *module;
1658         int err = 0;
1659
1660         netif_tx_stop_all_queues(ndev);
1661         netif_carrier_off(ndev);
1662         netcp_addr_clear_mark(netcp);
1663         netcp_addr_sweep_del(netcp);
1664         knav_queue_disable_notify(netcp->rx_queue);
1665         knav_queue_disable_notify(netcp->tx_compl_q);
1666         napi_disable(&netcp->rx_napi);
1667         napi_disable(&netcp->tx_napi);
1668
1669         mutex_lock(&netcp_modules_lock);
1670         for_each_module(netcp, intf_modpriv) {
1671                 module = intf_modpriv->netcp_module;
1672                 if (module->close) {
1673                         err = module->close(intf_modpriv->module_priv, ndev);
1674                         if (err != 0)
1675                                 dev_err(netcp->ndev_dev, "Close failed\n");
1676                 }
1677         }
1678         mutex_unlock(&netcp_modules_lock);
1679
1680         /* Recycle Rx descriptors from completion queue */
1681         netcp_empty_rx_queue(netcp);
1682
1683         /* Recycle Tx descriptors from completion queue */
1684         netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
1685
1686         if (knav_pool_count(netcp->tx_pool) != netcp->tx_pool_size)
1687                 dev_err(netcp->ndev_dev, "Lost (%d) Tx descs\n",
1688                         netcp->tx_pool_size - knav_pool_count(netcp->tx_pool));
1689
1690         netcp_free_navigator_resources(netcp);
1691         dev_dbg(netcp->ndev_dev, "netcp device %s stopped\n", ndev->name);
1692         return 0;
1693 }
1694
1695 static int netcp_ndo_ioctl(struct net_device *ndev,
1696                            struct ifreq *req, int cmd)
1697 {
1698         struct netcp_intf *netcp = netdev_priv(ndev);
1699         struct netcp_intf_modpriv *intf_modpriv;
1700         struct netcp_module *module;
1701         int ret = -1, err = -EOPNOTSUPP;
1702
1703         if (!netif_running(ndev))
1704                 return -EINVAL;
1705
1706         mutex_lock(&netcp_modules_lock);
1707         for_each_module(netcp, intf_modpriv) {
1708                 module = intf_modpriv->netcp_module;
1709                 if (!module->ioctl)
1710                         continue;
1711
1712                 err = module->ioctl(intf_modpriv->module_priv, req, cmd);
1713                 if ((err < 0) && (err != -EOPNOTSUPP)) {
1714                         ret = err;
1715                         goto out;
1716                 }
1717                 if (err == 0)
1718                         ret = err;
1719         }
1720
1721 out:
1722         mutex_unlock(&netcp_modules_lock);
1723         return (ret == 0) ? 0 : err;
1724 }
1725
1726 static int netcp_ndo_change_mtu(struct net_device *ndev, int new_mtu)
1727 {
1728         struct netcp_intf *netcp = netdev_priv(ndev);
1729
1730         /* MTU < 68 is an error for IPv4 traffic */
1731         if ((new_mtu < 68) ||
1732             (new_mtu > (NETCP_MAX_FRAME_SIZE - ETH_HLEN - ETH_FCS_LEN))) {
1733                 dev_err(netcp->ndev_dev, "Invalid mtu size = %d\n", new_mtu);
1734                 return -EINVAL;
1735         }
1736
1737         ndev->mtu = new_mtu;
1738         return 0;
1739 }
1740
1741 static void netcp_ndo_tx_timeout(struct net_device *ndev)
1742 {
1743         struct netcp_intf *netcp = netdev_priv(ndev);
1744         unsigned int descs = knav_pool_count(netcp->tx_pool);
1745
1746         dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs);
1747         netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
1748         ndev->trans_start = jiffies;
1749         netif_tx_wake_all_queues(ndev);
1750 }
1751
1752 static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
1753 {
1754         struct netcp_intf *netcp = netdev_priv(ndev);
1755         struct netcp_intf_modpriv *intf_modpriv;
1756         struct netcp_module *module;
1757         int err = 0;
1758
1759         dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
1760
1761         mutex_lock(&netcp_modules_lock);
1762         for_each_module(netcp, intf_modpriv) {
1763                 module = intf_modpriv->netcp_module;
1764                 if ((module->add_vid) && (vid != 0)) {
1765                         err = module->add_vid(intf_modpriv->module_priv, vid);
1766                         if (err != 0) {
1767                                 dev_err(netcp->ndev_dev, "Could not add vlan id = %d\n",
1768                                         vid);
1769                                 break;
1770                         }
1771                 }
1772         }
1773         mutex_unlock(&netcp_modules_lock);
1774         return err;
1775 }
1776
1777 static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
1778 {
1779         struct netcp_intf *netcp = netdev_priv(ndev);
1780         struct netcp_intf_modpriv *intf_modpriv;
1781         struct netcp_module *module;
1782         int err = 0;
1783
1784         dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
1785
1786         mutex_lock(&netcp_modules_lock);
1787         for_each_module(netcp, intf_modpriv) {
1788                 module = intf_modpriv->netcp_module;
1789                 if (module->del_vid) {
1790                         err = module->del_vid(intf_modpriv->module_priv, vid);
1791                         if (err != 0) {
1792                                 dev_err(netcp->ndev_dev, "Could not delete vlan id = %d\n",
1793                                         vid);
1794                                 break;
1795                         }
1796                 }
1797         }
1798         mutex_unlock(&netcp_modules_lock);
1799         return err;
1800 }
1801
1802 static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb,
1803                               void *accel_priv,
1804                               select_queue_fallback_t fallback)
1805 {
1806         return 0;
1807 }
1808
1809 static int netcp_setup_tc(struct net_device *dev, u8 num_tc)
1810 {
1811         int i;
1812
1813         /* setup tc must be called under rtnl lock */
1814         ASSERT_RTNL();
1815
1816         /* Sanity-check the number of traffic classes requested */
1817         if ((dev->real_num_tx_queues <= 1) ||
1818             (dev->real_num_tx_queues < num_tc))
1819                 return -EINVAL;
1820
1821         /* Configure traffic class to queue mappings */
1822         if (num_tc) {
1823                 netdev_set_num_tc(dev, num_tc);
1824                 for (i = 0; i < num_tc; i++)
1825                         netdev_set_tc_queue(dev, i, 1, i);
1826         } else {
1827                 netdev_reset_tc(dev);
1828         }
1829
1830         return 0;
1831 }
1832
1833 static const struct net_device_ops netcp_netdev_ops = {
1834         .ndo_open               = netcp_ndo_open,
1835         .ndo_stop               = netcp_ndo_stop,
1836         .ndo_start_xmit         = netcp_ndo_start_xmit,
1837         .ndo_set_rx_mode        = netcp_set_rx_mode,
1838         .ndo_do_ioctl           = netcp_ndo_ioctl,
1839         .ndo_change_mtu         = netcp_ndo_change_mtu,
1840         .ndo_set_mac_address    = eth_mac_addr,
1841         .ndo_validate_addr      = eth_validate_addr,
1842         .ndo_vlan_rx_add_vid    = netcp_rx_add_vid,
1843         .ndo_vlan_rx_kill_vid   = netcp_rx_kill_vid,
1844         .ndo_tx_timeout         = netcp_ndo_tx_timeout,
1845         .ndo_select_queue       = netcp_select_queue,
1846         .ndo_setup_tc           = netcp_setup_tc,
1847 };
1848
1849 static int netcp_create_interface(struct netcp_device *netcp_device,
1850                                   struct device_node *node_interface)
1851 {
1852         struct device *dev = netcp_device->device;
1853         struct device_node *node = dev->of_node;
1854         struct netcp_intf *netcp;
1855         struct net_device *ndev;
1856         resource_size_t size;
1857         struct resource res;
1858         void __iomem *efuse = NULL;
1859         u32 efuse_mac = 0;
1860         const void *mac_addr;
1861         u8 efuse_mac_addr[6];
1862         u32 temp[2];
1863         int ret = 0;
1864
1865         ndev = alloc_etherdev_mqs(sizeof(*netcp), 1, 1);
1866         if (!ndev) {
1867                 dev_err(dev, "Error allocating netdev\n");
1868                 return -ENOMEM;
1869         }
1870
1871         ndev->features |= NETIF_F_SG;
1872         ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1873         ndev->hw_features = ndev->features;
1874         ndev->vlan_features |=  NETIF_F_SG;
1875
1876         netcp = netdev_priv(ndev);
1877         spin_lock_init(&netcp->lock);
1878         INIT_LIST_HEAD(&netcp->module_head);
1879         INIT_LIST_HEAD(&netcp->txhook_list_head);
1880         INIT_LIST_HEAD(&netcp->rxhook_list_head);
1881         INIT_LIST_HEAD(&netcp->addr_list);
1882         netcp->netcp_device = netcp_device;
1883         netcp->dev = netcp_device->device;
1884         netcp->ndev = ndev;
1885         netcp->ndev_dev  = &ndev->dev;
1886         netcp->msg_enable = netif_msg_init(netcp_debug_level, NETCP_DEBUG);
1887         netcp->tx_pause_threshold = MAX_SKB_FRAGS;
1888         netcp->tx_resume_threshold = netcp->tx_pause_threshold;
1889         netcp->node_interface = node_interface;
1890
1891         ret = of_property_read_u32(node_interface, "efuse-mac", &efuse_mac);
1892         if (efuse_mac) {
1893                 if (of_address_to_resource(node, NETCP_EFUSE_REG_INDEX, &res)) {
1894                         dev_err(dev, "could not find efuse-mac reg resource\n");
1895                         ret = -ENODEV;
1896                         goto quit;
1897                 }
1898                 size = resource_size(&res);
1899
1900                 if (!devm_request_mem_region(dev, res.start, size,
1901                                              dev_name(dev))) {
1902                         dev_err(dev, "could not reserve resource\n");
1903                         ret = -ENOMEM;
1904                         goto quit;
1905                 }
1906
1907                 efuse = devm_ioremap_nocache(dev, res.start, size);
1908                 if (!efuse) {
1909                         dev_err(dev, "could not map resource\n");
1910                         devm_release_mem_region(dev, res.start, size);
1911                         ret = -ENOMEM;
1912                         goto quit;
1913                 }
1914
1915                 emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac);
1916                 if (is_valid_ether_addr(efuse_mac_addr))
1917                         ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
1918                 else
1919                         random_ether_addr(ndev->dev_addr);
1920
1921                 devm_iounmap(dev, efuse);
1922                 devm_release_mem_region(dev, res.start, size);
1923         } else {
1924                 mac_addr = of_get_mac_address(node_interface);
1925                 if (mac_addr)
1926                         ether_addr_copy(ndev->dev_addr, mac_addr);
1927                 else
1928                         random_ether_addr(ndev->dev_addr);
1929         }
1930
1931         ret = of_property_read_string(node_interface, "rx-channel",
1932                                       &netcp->dma_chan_name);
1933         if (ret < 0) {
1934                 dev_err(dev, "missing \"rx-channel\" parameter\n");
1935                 ret = -ENODEV;
1936                 goto quit;
1937         }
1938
1939         ret = of_property_read_u32(node_interface, "rx-queue",
1940                                    &netcp->rx_queue_id);
1941         if (ret < 0) {
1942                 dev_warn(dev, "missing \"rx-queue\" parameter\n");
1943                 netcp->rx_queue_id = KNAV_QUEUE_QPEND;
1944         }
1945
1946         ret = of_property_read_u32_array(node_interface, "rx-queue-depth",
1947                                          netcp->rx_queue_depths,
1948                                          KNAV_DMA_FDQ_PER_CHAN);
1949         if (ret < 0) {
1950                 dev_err(dev, "missing \"rx-queue-depth\" parameter\n");
1951                 netcp->rx_queue_depths[0] = 128;
1952         }
1953
1954         ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
1955         if (ret < 0) {
1956                 dev_err(dev, "missing \"rx-pool\" parameter\n");
1957                 ret = -ENODEV;
1958                 goto quit;
1959         }
1960         netcp->rx_pool_size = temp[0];
1961         netcp->rx_pool_region_id = temp[1];
1962
1963         ret = of_property_read_u32_array(node_interface, "tx-pool", temp, 2);
1964         if (ret < 0) {
1965                 dev_err(dev, "missing \"tx-pool\" parameter\n");
1966                 ret = -ENODEV;
1967                 goto quit;
1968         }
1969         netcp->tx_pool_size = temp[0];
1970         netcp->tx_pool_region_id = temp[1];
1971
1972         if (netcp->tx_pool_size < MAX_SKB_FRAGS) {
1973                 dev_err(dev, "tx-pool size too small, must be atleast(%ld)\n",
1974                         MAX_SKB_FRAGS);
1975                 ret = -ENODEV;
1976                 goto quit;
1977         }
1978
1979         ret = of_property_read_u32(node_interface, "tx-completion-queue",
1980                                    &netcp->tx_compl_qid);
1981         if (ret < 0) {
1982                 dev_warn(dev, "missing \"tx-completion-queue\" parameter\n");
1983                 netcp->tx_compl_qid = KNAV_QUEUE_QPEND;
1984         }
1985
1986         /* NAPI register */
1987         netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT);
1988         netif_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT);
1989
1990         /* Register the network device */
1991         ndev->dev_id            = 0;
1992         ndev->watchdog_timeo    = NETCP_TX_TIMEOUT;
1993         ndev->netdev_ops        = &netcp_netdev_ops;
1994         SET_NETDEV_DEV(ndev, dev);
1995
1996         list_add_tail(&netcp->interface_list, &netcp_device->interface_head);
1997         return 0;
1998
1999 quit:
2000         free_netdev(ndev);
2001         return ret;
2002 }
2003
2004 static void netcp_delete_interface(struct netcp_device *netcp_device,
2005                                    struct net_device *ndev)
2006 {
2007         struct netcp_intf_modpriv *intf_modpriv, *tmp;
2008         struct netcp_intf *netcp = netdev_priv(ndev);
2009         struct netcp_module *module;
2010
2011         dev_dbg(netcp_device->device, "Removing interface \"%s\"\n",
2012                 ndev->name);
2013
2014         /* Notify each of the modules that the interface is going away */
2015         list_for_each_entry_safe(intf_modpriv, tmp, &netcp->module_head,
2016                                  intf_list) {
2017                 module = intf_modpriv->netcp_module;
2018                 dev_dbg(netcp_device->device, "Releasing module \"%s\"\n",
2019                         module->name);
2020                 if (module->release)
2021                         module->release(intf_modpriv->module_priv);
2022                 list_del(&intf_modpriv->intf_list);
2023                 kfree(intf_modpriv);
2024         }
2025         WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n",
2026              ndev->name);
2027
2028         list_del(&netcp->interface_list);
2029
2030         of_node_put(netcp->node_interface);
2031         unregister_netdev(ndev);
2032         netif_napi_del(&netcp->rx_napi);
2033         free_netdev(ndev);
2034 }
2035
2036 static int netcp_probe(struct platform_device *pdev)
2037 {
2038         struct device_node *node = pdev->dev.of_node;
2039         struct netcp_intf *netcp_intf, *netcp_tmp;
2040         struct device_node *child, *interfaces;
2041         struct netcp_device *netcp_device;
2042         struct device *dev = &pdev->dev;
2043         struct netcp_module *module;
2044         int ret;
2045
2046         if (!node) {
2047                 dev_err(dev, "could not find device info\n");
2048                 return -ENODEV;
2049         }
2050
2051         /* Allocate a new NETCP device instance */
2052         netcp_device = devm_kzalloc(dev, sizeof(*netcp_device), GFP_KERNEL);
2053         if (!netcp_device)
2054                 return -ENOMEM;
2055
2056         pm_runtime_enable(&pdev->dev);
2057         ret = pm_runtime_get_sync(&pdev->dev);
2058         if (ret < 0) {
2059                 dev_err(dev, "Failed to enable NETCP power-domain\n");
2060                 pm_runtime_disable(&pdev->dev);
2061                 return ret;
2062         }
2063
2064         /* Initialize the NETCP device instance */
2065         INIT_LIST_HEAD(&netcp_device->interface_head);
2066         INIT_LIST_HEAD(&netcp_device->modpriv_head);
2067         netcp_device->device = dev;
2068         platform_set_drvdata(pdev, netcp_device);
2069
2070         /* create interfaces */
2071         interfaces = of_get_child_by_name(node, "netcp-interfaces");
2072         if (!interfaces) {
2073                 dev_err(dev, "could not find netcp-interfaces node\n");
2074                 ret = -ENODEV;
2075                 goto probe_quit;
2076         }
2077
2078         for_each_available_child_of_node(interfaces, child) {
2079                 ret = netcp_create_interface(netcp_device, child);
2080                 if (ret) {
2081                         dev_err(dev, "could not create interface(%s)\n",
2082                                 child->name);
2083                         goto probe_quit_interface;
2084                 }
2085         }
2086
2087         /* Add the device instance to the list */
2088         list_add_tail(&netcp_device->device_list, &netcp_devices);
2089
2090         /* Probe & attach any modules already registered */
2091         mutex_lock(&netcp_modules_lock);
2092         for_each_netcp_module(module) {
2093                 ret = netcp_module_probe(netcp_device, module);
2094                 if (ret < 0)
2095                         dev_err(dev, "module(%s) probe failed\n", module->name);
2096         }
2097         mutex_unlock(&netcp_modules_lock);
2098         return 0;
2099
2100 probe_quit_interface:
2101         list_for_each_entry_safe(netcp_intf, netcp_tmp,
2102                                  &netcp_device->interface_head,
2103                                  interface_list) {
2104                 netcp_delete_interface(netcp_device, netcp_intf->ndev);
2105         }
2106
2107 probe_quit:
2108         pm_runtime_put_sync(&pdev->dev);
2109         pm_runtime_disable(&pdev->dev);
2110         platform_set_drvdata(pdev, NULL);
2111         return ret;
2112 }
2113
2114 static int netcp_remove(struct platform_device *pdev)
2115 {
2116         struct netcp_device *netcp_device = platform_get_drvdata(pdev);
2117         struct netcp_intf *netcp_intf, *netcp_tmp;
2118         struct netcp_inst_modpriv *inst_modpriv, *tmp;
2119         struct netcp_module *module;
2120
2121         list_for_each_entry_safe(inst_modpriv, tmp, &netcp_device->modpriv_head,
2122                                  inst_list) {
2123                 module = inst_modpriv->netcp_module;
2124                 dev_dbg(&pdev->dev, "Removing module \"%s\"\n", module->name);
2125                 module->remove(netcp_device, inst_modpriv->module_priv);
2126                 list_del(&inst_modpriv->inst_list);
2127                 kfree(inst_modpriv);
2128         }
2129
2130         /* now that all modules are removed, clean up the interfaces */
2131         list_for_each_entry_safe(netcp_intf, netcp_tmp,
2132                                  &netcp_device->interface_head,
2133                                  interface_list) {
2134                 netcp_delete_interface(netcp_device, netcp_intf->ndev);
2135         }
2136
2137         WARN(!list_empty(&netcp_device->interface_head),
2138              "%s interface list not empty!\n", pdev->name);
2139
2140         pm_runtime_put_sync(&pdev->dev);
2141         pm_runtime_disable(&pdev->dev);
2142         platform_set_drvdata(pdev, NULL);
2143         return 0;
2144 }
2145
2146 static const struct of_device_id of_match[] = {
2147         { .compatible = "ti,netcp-1.0", },
2148         {},
2149 };
2150 MODULE_DEVICE_TABLE(of, of_match);
2151
2152 static struct platform_driver netcp_driver = {
2153         .driver = {
2154                 .name           = "netcp-1.0",
2155                 .of_match_table = of_match,
2156         },
2157         .probe = netcp_probe,
2158         .remove = netcp_remove,
2159 };
2160 module_platform_driver(netcp_driver);
2161
2162 MODULE_LICENSE("GPL v2");
2163 MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
2164 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");