2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mdio.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
94 CH_DEVICE(0x36, 3), /* S320E-CR */
95 CH_DEVICE(0x37, 7), /* N320E-G2 */
99 MODULE_DESCRIPTION(DRV_DESC);
100 MODULE_AUTHOR("Chelsio Communications");
101 MODULE_LICENSE("Dual BSD/GPL");
102 MODULE_VERSION(DRV_VERSION);
103 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
105 static int dflt_msg_enable = DFLT_MSG_ENABLE;
107 module_param(dflt_msg_enable, int, 0644);
108 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
111 * The driver uses the best interrupt scheme available on a platform in the
112 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
113 * of these schemes the driver may consider as follows:
115 * msi = 2: choose from among all three options
116 * msi = 1: only consider MSI and pin interrupts
117 * msi = 0: force pin interrupts
121 module_param(msi, int, 0644);
122 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
125 * The driver enables offload as a default.
126 * To disable it, use ofld_disable = 1.
129 static int ofld_disable = 0;
131 module_param(ofld_disable, int, 0644);
132 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
135 * We have work elements that we need to cancel when an interface is taken
136 * down. Normally the work elements would be executed by keventd but that
137 * can deadlock because of linkwatch. If our close method takes the rtnl
138 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
139 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
140 * for our work to complete. Get our own work queue to solve this.
142 static struct workqueue_struct *cxgb3_wq;
145 * link_report - show link status and link speed/duplex
146 * @p: the port whose settings are to be reported
148 * Shows the link status, speed, and duplex of a port.
150 static void link_report(struct net_device *dev)
152 if (!netif_carrier_ok(dev))
153 printk(KERN_INFO "%s: link down\n", dev->name);
155 const char *s = "10Mbps";
156 const struct port_info *p = netdev_priv(dev);
158 switch (p->link_config.speed) {
170 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
171 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
175 static void enable_tx_fifo_drain(struct adapter *adapter,
176 struct port_info *pi)
178 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
180 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
181 t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
182 t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
185 static void disable_tx_fifo_drain(struct adapter *adapter,
186 struct port_info *pi)
188 t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
192 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
194 struct net_device *dev = adap->port[port_id];
195 struct port_info *pi = netdev_priv(dev);
197 if (state == netif_carrier_ok(dev))
201 struct cmac *mac = &pi->mac;
203 netif_carrier_on(dev);
205 disable_tx_fifo_drain(adap, pi);
207 /* Clear local faults */
208 t3_xgm_intr_disable(adap, pi->port_id);
209 t3_read_reg(adap, A_XGM_INT_STATUS +
212 A_XGM_INT_CAUSE + pi->mac.offset,
215 t3_set_reg_field(adap,
218 F_XGM_INT, F_XGM_INT);
219 t3_xgm_intr_enable(adap, pi->port_id);
221 t3_mac_enable(mac, MAC_DIRECTION_TX);
223 netif_carrier_off(dev);
226 enable_tx_fifo_drain(adap, pi);
232 * t3_os_link_changed - handle link status changes
233 * @adapter: the adapter associated with the link change
234 * @port_id: the port index whose limk status has changed
235 * @link_stat: the new status of the link
236 * @speed: the new speed setting
237 * @duplex: the new duplex setting
238 * @pause: the new flow-control setting
240 * This is the OS-dependent handler for link status changes. The OS
241 * neutral handler takes care of most of the processing for these events,
242 * then calls this handler for any OS-specific processing.
244 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
245 int speed, int duplex, int pause)
247 struct net_device *dev = adapter->port[port_id];
248 struct port_info *pi = netdev_priv(dev);
249 struct cmac *mac = &pi->mac;
251 /* Skip changes from disabled ports. */
252 if (!netif_running(dev))
255 if (link_stat != netif_carrier_ok(dev)) {
257 disable_tx_fifo_drain(adapter, pi);
259 t3_mac_enable(mac, MAC_DIRECTION_RX);
261 /* Clear local faults */
262 t3_xgm_intr_disable(adapter, pi->port_id);
263 t3_read_reg(adapter, A_XGM_INT_STATUS +
265 t3_write_reg(adapter,
266 A_XGM_INT_CAUSE + pi->mac.offset,
269 t3_set_reg_field(adapter,
270 A_XGM_INT_ENABLE + pi->mac.offset,
271 F_XGM_INT, F_XGM_INT);
272 t3_xgm_intr_enable(adapter, pi->port_id);
274 netif_carrier_on(dev);
276 netif_carrier_off(dev);
278 t3_xgm_intr_disable(adapter, pi->port_id);
279 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
280 t3_set_reg_field(adapter,
281 A_XGM_INT_ENABLE + pi->mac.offset,
285 pi->phy.ops->power_down(&pi->phy, 1);
287 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
288 t3_mac_disable(mac, MAC_DIRECTION_RX);
289 t3_link_start(&pi->phy, mac, &pi->link_config);
292 enable_tx_fifo_drain(adapter, pi);
300 * t3_os_phymod_changed - handle PHY module changes
301 * @phy: the PHY reporting the module change
302 * @mod_type: new module type
304 * This is the OS-dependent handler for PHY module changes. It is
305 * invoked when a PHY module is removed or inserted for any OS-specific
308 void t3_os_phymod_changed(struct adapter *adap, int port_id)
310 static const char *mod_str[] = {
311 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
314 const struct net_device *dev = adap->port[port_id];
315 const struct port_info *pi = netdev_priv(dev);
317 if (pi->phy.modtype == phy_modtype_none)
318 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
320 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
321 mod_str[pi->phy.modtype]);
324 static void cxgb_set_rxmode(struct net_device *dev)
326 struct t3_rx_mode rm;
327 struct port_info *pi = netdev_priv(dev);
329 init_rx_mode(&rm, dev, dev->mc_list);
330 t3_mac_set_rx_mode(&pi->mac, &rm);
334 * link_start - enable a port
335 * @dev: the device to enable
337 * Performs the MAC and PHY actions needed to enable a port.
339 static void link_start(struct net_device *dev)
341 struct t3_rx_mode rm;
342 struct port_info *pi = netdev_priv(dev);
343 struct cmac *mac = &pi->mac;
345 init_rx_mode(&rm, dev, dev->mc_list);
347 t3_mac_set_mtu(mac, dev->mtu);
348 t3_mac_set_address(mac, 0, dev->dev_addr);
349 t3_mac_set_rx_mode(mac, &rm);
350 t3_link_start(&pi->phy, mac, &pi->link_config);
351 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
354 static inline void cxgb_disable_msi(struct adapter *adapter)
356 if (adapter->flags & USING_MSIX) {
357 pci_disable_msix(adapter->pdev);
358 adapter->flags &= ~USING_MSIX;
359 } else if (adapter->flags & USING_MSI) {
360 pci_disable_msi(adapter->pdev);
361 adapter->flags &= ~USING_MSI;
366 * Interrupt handler for asynchronous events used with MSI-X.
368 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
370 t3_slow_intr_handler(cookie);
375 * Name the MSI-X interrupts.
377 static void name_msix_vecs(struct adapter *adap)
379 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
381 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
382 adap->msix_info[0].desc[n] = 0;
384 for_each_port(adap, j) {
385 struct net_device *d = adap->port[j];
386 const struct port_info *pi = netdev_priv(d);
388 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
389 snprintf(adap->msix_info[msi_idx].desc, n,
390 "%s-%d", d->name, pi->first_qset + i);
391 adap->msix_info[msi_idx].desc[n] = 0;
396 static int request_msix_data_irqs(struct adapter *adap)
398 int i, j, err, qidx = 0;
400 for_each_port(adap, i) {
401 int nqsets = adap2pinfo(adap, i)->nqsets;
403 for (j = 0; j < nqsets; ++j) {
404 err = request_irq(adap->msix_info[qidx + 1].vec,
405 t3_intr_handler(adap,
408 adap->msix_info[qidx + 1].desc,
409 &adap->sge.qs[qidx]);
412 free_irq(adap->msix_info[qidx + 1].vec,
413 &adap->sge.qs[qidx]);
422 static void free_irq_resources(struct adapter *adapter)
424 if (adapter->flags & USING_MSIX) {
427 free_irq(adapter->msix_info[0].vec, adapter);
428 for_each_port(adapter, i)
429 n += adap2pinfo(adapter, i)->nqsets;
431 for (i = 0; i < n; ++i)
432 free_irq(adapter->msix_info[i + 1].vec,
433 &adapter->sge.qs[i]);
435 free_irq(adapter->pdev->irq, adapter);
438 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
443 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
451 static int init_tp_parity(struct adapter *adap)
455 struct cpl_set_tcb_field *greq;
456 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
458 t3_tp_set_offload_mode(adap, 1);
460 for (i = 0; i < 16; i++) {
461 struct cpl_smt_write_req *req;
463 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
465 skb = adap->nofail_skb;
469 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
470 memset(req, 0, sizeof(*req));
471 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
472 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
473 req->mtu_idx = NMTUS - 1;
475 t3_mgmt_tx(adap, skb);
476 if (skb == adap->nofail_skb) {
477 await_mgmt_replies(adap, cnt, i + 1);
478 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
479 if (!adap->nofail_skb)
484 for (i = 0; i < 2048; i++) {
485 struct cpl_l2t_write_req *req;
487 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
489 skb = adap->nofail_skb;
493 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
494 memset(req, 0, sizeof(*req));
495 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
496 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
497 req->params = htonl(V_L2T_W_IDX(i));
498 t3_mgmt_tx(adap, skb);
499 if (skb == adap->nofail_skb) {
500 await_mgmt_replies(adap, cnt, 16 + i + 1);
501 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
502 if (!adap->nofail_skb)
507 for (i = 0; i < 2048; i++) {
508 struct cpl_rte_write_req *req;
510 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
512 skb = adap->nofail_skb;
516 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
517 memset(req, 0, sizeof(*req));
518 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
519 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
520 req->l2t_idx = htonl(V_L2T_W_IDX(i));
521 t3_mgmt_tx(adap, skb);
522 if (skb == adap->nofail_skb) {
523 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
524 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
525 if (!adap->nofail_skb)
530 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
532 skb = adap->nofail_skb;
536 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
537 memset(greq, 0, sizeof(*greq));
538 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
539 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
540 greq->mask = cpu_to_be64(1);
541 t3_mgmt_tx(adap, skb);
543 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
544 if (skb == adap->nofail_skb) {
545 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
546 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
549 t3_tp_set_offload_mode(adap, 0);
553 t3_tp_set_offload_mode(adap, 0);
558 * setup_rss - configure RSS
561 * Sets up RSS to distribute packets to multiple receive queues. We
562 * configure the RSS CPU lookup table to distribute to the number of HW
563 * receive queues, and the response queue lookup table to narrow that
564 * down to the response queues actually configured for each port.
565 * We always configure the RSS mapping for two ports since the mapping
566 * table has plenty of entries.
568 static void setup_rss(struct adapter *adap)
571 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
572 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
573 u8 cpus[SGE_QSETS + 1];
574 u16 rspq_map[RSS_TABLE_SIZE];
576 for (i = 0; i < SGE_QSETS; ++i)
578 cpus[SGE_QSETS] = 0xff; /* terminator */
580 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
581 rspq_map[i] = i % nq0;
582 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
585 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
586 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
587 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
590 static void init_napi(struct adapter *adap)
594 for (i = 0; i < SGE_QSETS; i++) {
595 struct sge_qset *qs = &adap->sge.qs[i];
598 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
603 * netif_napi_add() can be called only once per napi_struct because it
604 * adds each new napi_struct to a list. Be careful not to call it a
605 * second time, e.g., during EEH recovery, by making a note of it.
607 adap->flags |= NAPI_INIT;
611 * Wait until all NAPI handlers are descheduled. This includes the handlers of
612 * both netdevices representing interfaces and the dummy ones for the extra
615 static void quiesce_rx(struct adapter *adap)
619 for (i = 0; i < SGE_QSETS; i++)
620 if (adap->sge.qs[i].adap)
621 napi_disable(&adap->sge.qs[i].napi);
624 static void enable_all_napi(struct adapter *adap)
627 for (i = 0; i < SGE_QSETS; i++)
628 if (adap->sge.qs[i].adap)
629 napi_enable(&adap->sge.qs[i].napi);
633 * set_qset_lro - Turn a queue set's LRO capability on and off
634 * @dev: the device the qset is attached to
635 * @qset_idx: the queue set index
636 * @val: the LRO switch
638 * Sets LRO on or off for a particular queue set.
639 * the device's features flag is updated to reflect the LRO
640 * capability when all queues belonging to the device are
643 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
645 struct port_info *pi = netdev_priv(dev);
646 struct adapter *adapter = pi->adapter;
648 adapter->params.sge.qset[qset_idx].lro = !!val;
649 adapter->sge.qs[qset_idx].lro_enabled = !!val;
653 * setup_sge_qsets - configure SGE Tx/Rx/response queues
656 * Determines how many sets of SGE queues to use and initializes them.
657 * We support multiple queue sets per port if we have MSI-X, otherwise
658 * just one queue set per port.
660 static int setup_sge_qsets(struct adapter *adap)
662 int i, j, err, irq_idx = 0, qset_idx = 0;
663 unsigned int ntxq = SGE_TXQ_PER_SET;
665 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
668 for_each_port(adap, i) {
669 struct net_device *dev = adap->port[i];
670 struct port_info *pi = netdev_priv(dev);
672 pi->qs = &adap->sge.qs[pi->first_qset];
673 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
674 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
675 err = t3_sge_alloc_qset(adap, qset_idx, 1,
676 (adap->flags & USING_MSIX) ? qset_idx + 1 :
678 &adap->params.sge.qset[qset_idx], ntxq, dev,
679 netdev_get_tx_queue(dev, j));
681 t3_free_sge_resources(adap);
690 static ssize_t attr_show(struct device *d, char *buf,
691 ssize_t(*format) (struct net_device *, char *))
695 /* Synchronize with ioctls that may shut down the device */
697 len = (*format) (to_net_dev(d), buf);
702 static ssize_t attr_store(struct device *d,
703 const char *buf, size_t len,
704 ssize_t(*set) (struct net_device *, unsigned int),
705 unsigned int min_val, unsigned int max_val)
711 if (!capable(CAP_NET_ADMIN))
714 val = simple_strtoul(buf, &endp, 0);
715 if (endp == buf || val < min_val || val > max_val)
719 ret = (*set) (to_net_dev(d), val);
726 #define CXGB3_SHOW(name, val_expr) \
727 static ssize_t format_##name(struct net_device *dev, char *buf) \
729 struct port_info *pi = netdev_priv(dev); \
730 struct adapter *adap = pi->adapter; \
731 return sprintf(buf, "%u\n", val_expr); \
733 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
736 return attr_show(d, buf, format_##name); \
739 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
741 struct port_info *pi = netdev_priv(dev);
742 struct adapter *adap = pi->adapter;
743 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
745 if (adap->flags & FULL_INIT_DONE)
747 if (val && adap->params.rev == 0)
749 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
752 adap->params.mc5.nfilters = val;
756 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
757 const char *buf, size_t len)
759 return attr_store(d, buf, len, set_nfilters, 0, ~0);
762 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
764 struct port_info *pi = netdev_priv(dev);
765 struct adapter *adap = pi->adapter;
767 if (adap->flags & FULL_INIT_DONE)
769 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
772 adap->params.mc5.nservers = val;
776 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
777 const char *buf, size_t len)
779 return attr_store(d, buf, len, set_nservers, 0, ~0);
782 #define CXGB3_ATTR_R(name, val_expr) \
783 CXGB3_SHOW(name, val_expr) \
784 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
786 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
787 CXGB3_SHOW(name, val_expr) \
788 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
790 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
791 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
792 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
794 static struct attribute *cxgb3_attrs[] = {
795 &dev_attr_cam_size.attr,
796 &dev_attr_nfilters.attr,
797 &dev_attr_nservers.attr,
801 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
803 static ssize_t tm_attr_show(struct device *d,
804 char *buf, int sched)
806 struct port_info *pi = netdev_priv(to_net_dev(d));
807 struct adapter *adap = pi->adapter;
808 unsigned int v, addr, bpt, cpt;
811 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
813 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
814 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
817 bpt = (v >> 8) & 0xff;
820 len = sprintf(buf, "disabled\n");
822 v = (adap->params.vpd.cclk * 1000) / cpt;
823 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
829 static ssize_t tm_attr_store(struct device *d,
830 const char *buf, size_t len, int sched)
832 struct port_info *pi = netdev_priv(to_net_dev(d));
833 struct adapter *adap = pi->adapter;
838 if (!capable(CAP_NET_ADMIN))
841 val = simple_strtoul(buf, &endp, 0);
842 if (endp == buf || val > 10000000)
846 ret = t3_config_sched(adap, val, sched);
853 #define TM_ATTR(name, sched) \
854 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
857 return tm_attr_show(d, buf, sched); \
859 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
860 const char *buf, size_t len) \
862 return tm_attr_store(d, buf, len, sched); \
864 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
875 static struct attribute *offload_attrs[] = {
876 &dev_attr_sched0.attr,
877 &dev_attr_sched1.attr,
878 &dev_attr_sched2.attr,
879 &dev_attr_sched3.attr,
880 &dev_attr_sched4.attr,
881 &dev_attr_sched5.attr,
882 &dev_attr_sched6.attr,
883 &dev_attr_sched7.attr,
887 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
890 * Sends an sk_buff to an offload queue driver
891 * after dealing with any active network taps.
893 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
898 ret = t3_offload_tx(tdev, skb);
903 static int write_smt_entry(struct adapter *adapter, int idx)
905 struct cpl_smt_write_req *req;
906 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
911 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
912 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
913 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
914 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
916 memset(req->src_mac1, 0, sizeof(req->src_mac1));
917 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
919 offload_tx(&adapter->tdev, skb);
923 static int init_smt(struct adapter *adapter)
927 for_each_port(adapter, i)
928 write_smt_entry(adapter, i);
932 static void init_port_mtus(struct adapter *adapter)
934 unsigned int mtus = adapter->port[0]->mtu;
936 if (adapter->port[1])
937 mtus |= adapter->port[1]->mtu << 16;
938 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
941 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
945 struct mngt_pktsched_wr *req;
948 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
950 skb = adap->nofail_skb;
954 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
955 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
956 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
962 ret = t3_mgmt_tx(adap, skb);
963 if (skb == adap->nofail_skb) {
964 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
966 if (!adap->nofail_skb)
973 static int bind_qsets(struct adapter *adap)
977 for_each_port(adap, i) {
978 const struct port_info *pi = adap2pinfo(adap, i);
980 for (j = 0; j < pi->nqsets; ++j) {
981 int ret = send_pktsched_cmd(adap, 1,
982 pi->first_qset + j, -1,
992 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
993 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
994 #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
995 #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
996 #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
998 static inline const char *get_edc_fw_name(int edc_idx)
1000 const char *fw_name = NULL;
1003 case EDC_OPT_AEL2005:
1004 fw_name = AEL2005_OPT_EDC_NAME;
1006 case EDC_TWX_AEL2005:
1007 fw_name = AEL2005_TWX_EDC_NAME;
1009 case EDC_TWX_AEL2020:
1010 fw_name = AEL2020_TWX_EDC_NAME;
1016 int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1018 struct adapter *adapter = phy->adapter;
1019 const struct firmware *fw;
1023 u16 *cache = phy->phy_cache;
1026 snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));
1028 ret = request_firmware(&fw, buf, &adapter->pdev->dev);
1030 dev_err(&adapter->pdev->dev,
1031 "could not upgrade firmware: unable to load %s\n",
1036 /* check size, take checksum in account */
1037 if (fw->size > size + 4) {
1038 CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1039 (unsigned int)fw->size, size + 4);
1043 /* compute checksum */
1044 p = (const __be32 *)fw->data;
1045 for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1046 csum += ntohl(p[i]);
1048 if (csum != 0xffffffff) {
1049 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1054 for (i = 0; i < size / 4 ; i++) {
1055 *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1056 *cache++ = be32_to_cpu(p[i]) & 0xffff;
1059 release_firmware(fw);
1064 static int upgrade_fw(struct adapter *adap)
1068 const struct firmware *fw;
1069 struct device *dev = &adap->pdev->dev;
1071 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
1072 FW_VERSION_MINOR, FW_VERSION_MICRO);
1073 ret = request_firmware(&fw, buf, dev);
1075 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1079 ret = t3_load_fw(adap, fw->data, fw->size);
1080 release_firmware(fw);
1083 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1084 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1086 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1087 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1092 static inline char t3rev2char(struct adapter *adapter)
1096 switch(adapter->params.rev) {
1108 static int update_tpsram(struct adapter *adap)
1110 const struct firmware *tpsram;
1112 struct device *dev = &adap->pdev->dev;
1116 rev = t3rev2char(adap);
1120 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
1121 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1123 ret = request_firmware(&tpsram, buf, dev);
1125 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1130 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1132 goto release_tpsram;
1134 ret = t3_set_proto_sram(adap, tpsram->data);
1137 "successful update of protocol engine "
1139 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1141 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1142 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1144 dev_err(dev, "loading protocol SRAM failed\n");
1147 release_firmware(tpsram);
1153 * cxgb_up - enable the adapter
1154 * @adapter: adapter being enabled
1156 * Called when the first port is enabled, this function performs the
1157 * actions necessary to make an adapter operational, such as completing
1158 * the initialization of HW modules, and enabling interrupts.
1160 * Must be called with the rtnl lock held.
1162 static int cxgb_up(struct adapter *adap)
1166 if (!(adap->flags & FULL_INIT_DONE)) {
1167 err = t3_check_fw_version(adap);
1168 if (err == -EINVAL) {
1169 err = upgrade_fw(adap);
1170 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1171 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1172 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1175 err = t3_check_tpsram_version(adap);
1176 if (err == -EINVAL) {
1177 err = update_tpsram(adap);
1178 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1179 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1180 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1184 * Clear interrupts now to catch errors if t3_init_hw fails.
1185 * We clear them again later as initialization may trigger
1186 * conditions that can interrupt.
1188 t3_intr_clear(adap);
1190 err = t3_init_hw(adap, 0);
1194 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1195 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1197 err = setup_sge_qsets(adap);
1202 if (!(adap->flags & NAPI_INIT))
1205 t3_start_sge_timers(adap);
1206 adap->flags |= FULL_INIT_DONE;
1209 t3_intr_clear(adap);
1211 if (adap->flags & USING_MSIX) {
1212 name_msix_vecs(adap);
1213 err = request_irq(adap->msix_info[0].vec,
1214 t3_async_intr_handler, 0,
1215 adap->msix_info[0].desc, adap);
1219 err = request_msix_data_irqs(adap);
1221 free_irq(adap->msix_info[0].vec, adap);
1224 } else if ((err = request_irq(adap->pdev->irq,
1225 t3_intr_handler(adap,
1226 adap->sge.qs[0].rspq.
1228 (adap->flags & USING_MSI) ?
1233 enable_all_napi(adap);
1235 t3_intr_enable(adap);
1237 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1238 is_offload(adap) && init_tp_parity(adap) == 0)
1239 adap->flags |= TP_PARITY_INIT;
1241 if (adap->flags & TP_PARITY_INIT) {
1242 t3_write_reg(adap, A_TP_INT_CAUSE,
1243 F_CMCACHEPERR | F_ARPLUTPERR);
1244 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1247 if (!(adap->flags & QUEUES_BOUND)) {
1248 err = bind_qsets(adap);
1250 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1251 t3_intr_disable(adap);
1252 free_irq_resources(adap);
1255 adap->flags |= QUEUES_BOUND;
1261 CH_ERR(adap, "request_irq failed, err %d\n", err);
1266 * Release resources when all the ports and offloading have been stopped.
1268 static void cxgb_down(struct adapter *adapter)
1270 t3_sge_stop(adapter);
1271 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1272 t3_intr_disable(adapter);
1273 spin_unlock_irq(&adapter->work_lock);
1275 free_irq_resources(adapter);
1276 quiesce_rx(adapter);
1277 t3_sge_stop(adapter);
1278 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1281 static void schedule_chk_task(struct adapter *adap)
1285 timeo = adap->params.linkpoll_period ?
1286 (HZ * adap->params.linkpoll_period) / 10 :
1287 adap->params.stats_update_period * HZ;
1289 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1292 static int offload_open(struct net_device *dev)
1294 struct port_info *pi = netdev_priv(dev);
1295 struct adapter *adapter = pi->adapter;
1296 struct t3cdev *tdev = dev2t3cdev(dev);
1297 int adap_up = adapter->open_device_map & PORT_MASK;
1300 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1303 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1306 t3_tp_set_offload_mode(adapter, 1);
1307 tdev->lldev = adapter->port[0];
1308 err = cxgb3_offload_activate(adapter);
1312 init_port_mtus(adapter);
1313 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1314 adapter->params.b_wnd,
1315 adapter->params.rev == 0 ?
1316 adapter->port[0]->mtu : 0xffff);
1319 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1320 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1322 /* Call back all registered clients */
1323 cxgb3_add_clients(tdev);
1326 /* restore them in case the offload module has changed them */
1328 t3_tp_set_offload_mode(adapter, 0);
1329 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1330 cxgb3_set_dummy_ops(tdev);
1335 static int offload_close(struct t3cdev *tdev)
1337 struct adapter *adapter = tdev2adap(tdev);
1339 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1342 /* Call back all registered clients */
1343 cxgb3_remove_clients(tdev);
1345 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1347 /* Flush work scheduled while releasing TIDs */
1348 flush_scheduled_work();
1351 cxgb3_set_dummy_ops(tdev);
1352 t3_tp_set_offload_mode(adapter, 0);
1353 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1355 if (!adapter->open_device_map)
1358 cxgb3_offload_deactivate(adapter);
1362 static int cxgb_open(struct net_device *dev)
1364 struct port_info *pi = netdev_priv(dev);
1365 struct adapter *adapter = pi->adapter;
1366 int other_ports = adapter->open_device_map & PORT_MASK;
1369 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1372 set_bit(pi->port_id, &adapter->open_device_map);
1373 if (is_offload(adapter) && !ofld_disable) {
1374 err = offload_open(dev);
1377 "Could not initialize offload capabilities\n");
1380 dev->real_num_tx_queues = pi->nqsets;
1382 t3_port_intr_enable(adapter, pi->port_id);
1383 netif_tx_start_all_queues(dev);
1385 schedule_chk_task(adapter);
1387 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1391 static int cxgb_close(struct net_device *dev)
1393 struct port_info *pi = netdev_priv(dev);
1394 struct adapter *adapter = pi->adapter;
1397 if (!adapter->open_device_map)
1400 /* Stop link fault interrupts */
1401 t3_xgm_intr_disable(adapter, pi->port_id);
1402 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1404 t3_port_intr_disable(adapter, pi->port_id);
1405 netif_tx_stop_all_queues(dev);
1406 pi->phy.ops->power_down(&pi->phy, 1);
1407 netif_carrier_off(dev);
1408 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1410 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1411 clear_bit(pi->port_id, &adapter->open_device_map);
1412 spin_unlock_irq(&adapter->work_lock);
1414 if (!(adapter->open_device_map & PORT_MASK))
1415 cancel_delayed_work_sync(&adapter->adap_check_task);
1417 if (!adapter->open_device_map)
1420 cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1424 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1426 struct port_info *pi = netdev_priv(dev);
1427 struct adapter *adapter = pi->adapter;
1428 struct net_device_stats *ns = &pi->netstats;
1429 const struct mac_stats *pstats;
1431 spin_lock(&adapter->stats_lock);
1432 pstats = t3_mac_update_stats(&pi->mac);
1433 spin_unlock(&adapter->stats_lock);
1435 ns->tx_bytes = pstats->tx_octets;
1436 ns->tx_packets = pstats->tx_frames;
1437 ns->rx_bytes = pstats->rx_octets;
1438 ns->rx_packets = pstats->rx_frames;
1439 ns->multicast = pstats->rx_mcast_frames;
1441 ns->tx_errors = pstats->tx_underrun;
1442 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1443 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1444 pstats->rx_fifo_ovfl;
1446 /* detailed rx_errors */
1447 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1448 ns->rx_over_errors = 0;
1449 ns->rx_crc_errors = pstats->rx_fcs_errs;
1450 ns->rx_frame_errors = pstats->rx_symbol_errs;
1451 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1452 ns->rx_missed_errors = pstats->rx_cong_drops;
1454 /* detailed tx_errors */
1455 ns->tx_aborted_errors = 0;
1456 ns->tx_carrier_errors = 0;
1457 ns->tx_fifo_errors = pstats->tx_underrun;
1458 ns->tx_heartbeat_errors = 0;
1459 ns->tx_window_errors = 0;
1463 static u32 get_msglevel(struct net_device *dev)
1465 struct port_info *pi = netdev_priv(dev);
1466 struct adapter *adapter = pi->adapter;
1468 return adapter->msg_enable;
1471 static void set_msglevel(struct net_device *dev, u32 val)
1473 struct port_info *pi = netdev_priv(dev);
1474 struct adapter *adapter = pi->adapter;
1476 adapter->msg_enable = val;
1479 static char stats_strings[][ETH_GSTRING_LEN] = {
1482 "TxMulticastFramesOK",
1483 "TxBroadcastFramesOK",
1490 "TxFrames128To255 ",
1491 "TxFrames256To511 ",
1492 "TxFrames512To1023 ",
1493 "TxFrames1024To1518 ",
1494 "TxFrames1519ToMax ",
1498 "RxMulticastFramesOK",
1499 "RxBroadcastFramesOK",
1510 "RxFrames128To255 ",
1511 "RxFrames256To511 ",
1512 "RxFrames512To1023 ",
1513 "RxFrames1024To1518 ",
1514 "RxFrames1519ToMax ",
1527 "CheckTXEnToggled ",
1533 static int get_sset_count(struct net_device *dev, int sset)
1537 return ARRAY_SIZE(stats_strings);
1543 #define T3_REGMAP_SIZE (3 * 1024)
1545 static int get_regs_len(struct net_device *dev)
1547 return T3_REGMAP_SIZE;
1550 static int get_eeprom_len(struct net_device *dev)
1555 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1557 struct port_info *pi = netdev_priv(dev);
1558 struct adapter *adapter = pi->adapter;
1562 spin_lock(&adapter->stats_lock);
1563 t3_get_fw_version(adapter, &fw_vers);
1564 t3_get_tp_version(adapter, &tp_vers);
1565 spin_unlock(&adapter->stats_lock);
1567 strcpy(info->driver, DRV_NAME);
1568 strcpy(info->version, DRV_VERSION);
1569 strcpy(info->bus_info, pci_name(adapter->pdev));
1571 strcpy(info->fw_version, "N/A");
1573 snprintf(info->fw_version, sizeof(info->fw_version),
1574 "%s %u.%u.%u TP %u.%u.%u",
1575 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1576 G_FW_VERSION_MAJOR(fw_vers),
1577 G_FW_VERSION_MINOR(fw_vers),
1578 G_FW_VERSION_MICRO(fw_vers),
1579 G_TP_VERSION_MAJOR(tp_vers),
1580 G_TP_VERSION_MINOR(tp_vers),
1581 G_TP_VERSION_MICRO(tp_vers));
1585 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1587 if (stringset == ETH_SS_STATS)
1588 memcpy(data, stats_strings, sizeof(stats_strings));
1591 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1592 struct port_info *p, int idx)
1595 unsigned long tot = 0;
1597 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1598 tot += adapter->sge.qs[i].port_stats[idx];
1602 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1605 struct port_info *pi = netdev_priv(dev);
1606 struct adapter *adapter = pi->adapter;
1607 const struct mac_stats *s;
1609 spin_lock(&adapter->stats_lock);
1610 s = t3_mac_update_stats(&pi->mac);
1611 spin_unlock(&adapter->stats_lock);
1613 *data++ = s->tx_octets;
1614 *data++ = s->tx_frames;
1615 *data++ = s->tx_mcast_frames;
1616 *data++ = s->tx_bcast_frames;
1617 *data++ = s->tx_pause;
1618 *data++ = s->tx_underrun;
1619 *data++ = s->tx_fifo_urun;
1621 *data++ = s->tx_frames_64;
1622 *data++ = s->tx_frames_65_127;
1623 *data++ = s->tx_frames_128_255;
1624 *data++ = s->tx_frames_256_511;
1625 *data++ = s->tx_frames_512_1023;
1626 *data++ = s->tx_frames_1024_1518;
1627 *data++ = s->tx_frames_1519_max;
1629 *data++ = s->rx_octets;
1630 *data++ = s->rx_frames;
1631 *data++ = s->rx_mcast_frames;
1632 *data++ = s->rx_bcast_frames;
1633 *data++ = s->rx_pause;
1634 *data++ = s->rx_fcs_errs;
1635 *data++ = s->rx_symbol_errs;
1636 *data++ = s->rx_short;
1637 *data++ = s->rx_jabber;
1638 *data++ = s->rx_too_long;
1639 *data++ = s->rx_fifo_ovfl;
1641 *data++ = s->rx_frames_64;
1642 *data++ = s->rx_frames_65_127;
1643 *data++ = s->rx_frames_128_255;
1644 *data++ = s->rx_frames_256_511;
1645 *data++ = s->rx_frames_512_1023;
1646 *data++ = s->rx_frames_1024_1518;
1647 *data++ = s->rx_frames_1519_max;
1649 *data++ = pi->phy.fifo_errors;
1651 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1652 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1653 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1654 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1655 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1659 *data++ = s->rx_cong_drops;
1661 *data++ = s->num_toggled;
1662 *data++ = s->num_resets;
1664 *data++ = s->link_faults;
1667 static inline void reg_block_dump(struct adapter *ap, void *buf,
1668 unsigned int start, unsigned int end)
1670 u32 *p = buf + start;
1672 for (; start <= end; start += sizeof(u32))
1673 *p++ = t3_read_reg(ap, start);
1676 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1679 struct port_info *pi = netdev_priv(dev);
1680 struct adapter *ap = pi->adapter;
1684 * bits 0..9: chip version
1685 * bits 10..15: chip revision
1686 * bit 31: set for PCIe cards
1688 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1691 * We skip the MAC statistics registers because they are clear-on-read.
1692 * Also reading multi-register stats would need to synchronize with the
1693 * periodic mac stats accumulation. Hard to justify the complexity.
1695 memset(buf, 0, T3_REGMAP_SIZE);
1696 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1697 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1698 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1699 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1700 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1701 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1702 XGM_REG(A_XGM_SERDES_STAT3, 1));
1703 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1704 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1707 static int restart_autoneg(struct net_device *dev)
1709 struct port_info *p = netdev_priv(dev);
1711 if (!netif_running(dev))
1713 if (p->link_config.autoneg != AUTONEG_ENABLE)
1715 p->phy.ops->autoneg_restart(&p->phy);
1719 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1721 struct port_info *pi = netdev_priv(dev);
1722 struct adapter *adapter = pi->adapter;
1728 for (i = 0; i < data * 2; i++) {
1729 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1730 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1731 if (msleep_interruptible(500))
1734 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1739 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1741 struct port_info *p = netdev_priv(dev);
1743 cmd->supported = p->link_config.supported;
1744 cmd->advertising = p->link_config.advertising;
1746 if (netif_carrier_ok(dev)) {
1747 cmd->speed = p->link_config.speed;
1748 cmd->duplex = p->link_config.duplex;
1754 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1755 cmd->phy_address = p->phy.mdio.prtad;
1756 cmd->transceiver = XCVR_EXTERNAL;
1757 cmd->autoneg = p->link_config.autoneg;
1763 static int speed_duplex_to_caps(int speed, int duplex)
1769 if (duplex == DUPLEX_FULL)
1770 cap = SUPPORTED_10baseT_Full;
1772 cap = SUPPORTED_10baseT_Half;
1775 if (duplex == DUPLEX_FULL)
1776 cap = SUPPORTED_100baseT_Full;
1778 cap = SUPPORTED_100baseT_Half;
1781 if (duplex == DUPLEX_FULL)
1782 cap = SUPPORTED_1000baseT_Full;
1784 cap = SUPPORTED_1000baseT_Half;
1787 if (duplex == DUPLEX_FULL)
1788 cap = SUPPORTED_10000baseT_Full;
1793 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1794 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1795 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1796 ADVERTISED_10000baseT_Full)
1798 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1800 struct port_info *p = netdev_priv(dev);
1801 struct link_config *lc = &p->link_config;
1803 if (!(lc->supported & SUPPORTED_Autoneg)) {
1805 * PHY offers a single speed/duplex. See if that's what's
1808 if (cmd->autoneg == AUTONEG_DISABLE) {
1809 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1810 if (lc->supported & cap)
1816 if (cmd->autoneg == AUTONEG_DISABLE) {
1817 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1819 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1821 lc->requested_speed = cmd->speed;
1822 lc->requested_duplex = cmd->duplex;
1823 lc->advertising = 0;
1825 cmd->advertising &= ADVERTISED_MASK;
1826 cmd->advertising &= lc->supported;
1827 if (!cmd->advertising)
1829 lc->requested_speed = SPEED_INVALID;
1830 lc->requested_duplex = DUPLEX_INVALID;
1831 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1833 lc->autoneg = cmd->autoneg;
1834 if (netif_running(dev))
1835 t3_link_start(&p->phy, &p->mac, lc);
1839 static void get_pauseparam(struct net_device *dev,
1840 struct ethtool_pauseparam *epause)
1842 struct port_info *p = netdev_priv(dev);
1844 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1845 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1846 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1849 static int set_pauseparam(struct net_device *dev,
1850 struct ethtool_pauseparam *epause)
1852 struct port_info *p = netdev_priv(dev);
1853 struct link_config *lc = &p->link_config;
1855 if (epause->autoneg == AUTONEG_DISABLE)
1856 lc->requested_fc = 0;
1857 else if (lc->supported & SUPPORTED_Autoneg)
1858 lc->requested_fc = PAUSE_AUTONEG;
1862 if (epause->rx_pause)
1863 lc->requested_fc |= PAUSE_RX;
1864 if (epause->tx_pause)
1865 lc->requested_fc |= PAUSE_TX;
1866 if (lc->autoneg == AUTONEG_ENABLE) {
1867 if (netif_running(dev))
1868 t3_link_start(&p->phy, &p->mac, lc);
1870 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1871 if (netif_running(dev))
1872 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1877 static u32 get_rx_csum(struct net_device *dev)
1879 struct port_info *p = netdev_priv(dev);
1881 return p->rx_offload & T3_RX_CSUM;
1884 static int set_rx_csum(struct net_device *dev, u32 data)
1886 struct port_info *p = netdev_priv(dev);
1889 p->rx_offload |= T3_RX_CSUM;
1893 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1894 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1895 set_qset_lro(dev, i, 0);
1900 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1902 struct port_info *pi = netdev_priv(dev);
1903 struct adapter *adapter = pi->adapter;
1904 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1906 e->rx_max_pending = MAX_RX_BUFFERS;
1907 e->rx_mini_max_pending = 0;
1908 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1909 e->tx_max_pending = MAX_TXQ_ENTRIES;
1911 e->rx_pending = q->fl_size;
1912 e->rx_mini_pending = q->rspq_size;
1913 e->rx_jumbo_pending = q->jumbo_size;
1914 e->tx_pending = q->txq_size[0];
1917 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1919 struct port_info *pi = netdev_priv(dev);
1920 struct adapter *adapter = pi->adapter;
1921 struct qset_params *q;
1924 if (e->rx_pending > MAX_RX_BUFFERS ||
1925 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1926 e->tx_pending > MAX_TXQ_ENTRIES ||
1927 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1928 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1929 e->rx_pending < MIN_FL_ENTRIES ||
1930 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1931 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1934 if (adapter->flags & FULL_INIT_DONE)
1937 q = &adapter->params.sge.qset[pi->first_qset];
1938 for (i = 0; i < pi->nqsets; ++i, ++q) {
1939 q->rspq_size = e->rx_mini_pending;
1940 q->fl_size = e->rx_pending;
1941 q->jumbo_size = e->rx_jumbo_pending;
1942 q->txq_size[0] = e->tx_pending;
1943 q->txq_size[1] = e->tx_pending;
1944 q->txq_size[2] = e->tx_pending;
1949 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1951 struct port_info *pi = netdev_priv(dev);
1952 struct adapter *adapter = pi->adapter;
1953 struct qset_params *qsp = &adapter->params.sge.qset[0];
1954 struct sge_qset *qs = &adapter->sge.qs[0];
1956 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1959 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1960 t3_update_qset_coalesce(qs, qsp);
1964 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1966 struct port_info *pi = netdev_priv(dev);
1967 struct adapter *adapter = pi->adapter;
1968 struct qset_params *q = adapter->params.sge.qset;
1970 c->rx_coalesce_usecs = q->coalesce_usecs;
1974 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1977 struct port_info *pi = netdev_priv(dev);
1978 struct adapter *adapter = pi->adapter;
1981 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1985 e->magic = EEPROM_MAGIC;
1986 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1987 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1990 memcpy(data, buf + e->offset, e->len);
1995 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1998 struct port_info *pi = netdev_priv(dev);
1999 struct adapter *adapter = pi->adapter;
2000 u32 aligned_offset, aligned_len;
2005 if (eeprom->magic != EEPROM_MAGIC)
2008 aligned_offset = eeprom->offset & ~3;
2009 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2011 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2012 buf = kmalloc(aligned_len, GFP_KERNEL);
2015 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2016 if (!err && aligned_len > 4)
2017 err = t3_seeprom_read(adapter,
2018 aligned_offset + aligned_len - 4,
2019 (__le32 *) & buf[aligned_len - 4]);
2022 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2026 err = t3_seeprom_wp(adapter, 0);
2030 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2031 err = t3_seeprom_write(adapter, aligned_offset, *p);
2032 aligned_offset += 4;
2036 err = t3_seeprom_wp(adapter, 1);
2043 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2047 memset(&wol->sopass, 0, sizeof(wol->sopass));
2050 static const struct ethtool_ops cxgb_ethtool_ops = {
2051 .get_settings = get_settings,
2052 .set_settings = set_settings,
2053 .get_drvinfo = get_drvinfo,
2054 .get_msglevel = get_msglevel,
2055 .set_msglevel = set_msglevel,
2056 .get_ringparam = get_sge_param,
2057 .set_ringparam = set_sge_param,
2058 .get_coalesce = get_coalesce,
2059 .set_coalesce = set_coalesce,
2060 .get_eeprom_len = get_eeprom_len,
2061 .get_eeprom = get_eeprom,
2062 .set_eeprom = set_eeprom,
2063 .get_pauseparam = get_pauseparam,
2064 .set_pauseparam = set_pauseparam,
2065 .get_rx_csum = get_rx_csum,
2066 .set_rx_csum = set_rx_csum,
2067 .set_tx_csum = ethtool_op_set_tx_csum,
2068 .set_sg = ethtool_op_set_sg,
2069 .get_link = ethtool_op_get_link,
2070 .get_strings = get_strings,
2071 .phys_id = cxgb3_phys_id,
2072 .nway_reset = restart_autoneg,
2073 .get_sset_count = get_sset_count,
2074 .get_ethtool_stats = get_stats,
2075 .get_regs_len = get_regs_len,
2076 .get_regs = get_regs,
2078 .set_tso = ethtool_op_set_tso,
2081 static int in_range(int val, int lo, int hi)
2083 return val < 0 || (val <= hi && val >= lo);
2086 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2088 struct port_info *pi = netdev_priv(dev);
2089 struct adapter *adapter = pi->adapter;
2093 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2097 case CHELSIO_SET_QSET_PARAMS:{
2099 struct qset_params *q;
2100 struct ch_qset_params t;
2101 int q1 = pi->first_qset;
2102 int nqsets = pi->nqsets;
2104 if (!capable(CAP_NET_ADMIN))
2106 if (copy_from_user(&t, useraddr, sizeof(t)))
2108 if (t.qset_idx >= SGE_QSETS)
2110 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2111 !in_range(t.cong_thres, 0, 255) ||
2112 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2114 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2116 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2117 MAX_CTRL_TXQ_ENTRIES) ||
2118 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2120 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2121 MAX_RX_JUMBO_BUFFERS)
2122 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2126 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
2127 for_each_port(adapter, i) {
2128 pi = adap2pinfo(adapter, i);
2129 if (t.qset_idx >= pi->first_qset &&
2130 t.qset_idx < pi->first_qset + pi->nqsets &&
2131 !(pi->rx_offload & T3_RX_CSUM))
2135 if ((adapter->flags & FULL_INIT_DONE) &&
2136 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2137 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2138 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2139 t.polling >= 0 || t.cong_thres >= 0))
2142 /* Allow setting of any available qset when offload enabled */
2143 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2145 for_each_port(adapter, i) {
2146 pi = adap2pinfo(adapter, i);
2147 nqsets += pi->first_qset + pi->nqsets;
2151 if (t.qset_idx < q1)
2153 if (t.qset_idx > q1 + nqsets - 1)
2156 q = &adapter->params.sge.qset[t.qset_idx];
2158 if (t.rspq_size >= 0)
2159 q->rspq_size = t.rspq_size;
2160 if (t.fl_size[0] >= 0)
2161 q->fl_size = t.fl_size[0];
2162 if (t.fl_size[1] >= 0)
2163 q->jumbo_size = t.fl_size[1];
2164 if (t.txq_size[0] >= 0)
2165 q->txq_size[0] = t.txq_size[0];
2166 if (t.txq_size[1] >= 0)
2167 q->txq_size[1] = t.txq_size[1];
2168 if (t.txq_size[2] >= 0)
2169 q->txq_size[2] = t.txq_size[2];
2170 if (t.cong_thres >= 0)
2171 q->cong_thres = t.cong_thres;
2172 if (t.intr_lat >= 0) {
2173 struct sge_qset *qs =
2174 &adapter->sge.qs[t.qset_idx];
2176 q->coalesce_usecs = t.intr_lat;
2177 t3_update_qset_coalesce(qs, q);
2179 if (t.polling >= 0) {
2180 if (adapter->flags & USING_MSIX)
2181 q->polling = t.polling;
2183 /* No polling with INTx for T3A */
2184 if (adapter->params.rev == 0 &&
2185 !(adapter->flags & USING_MSI))
2188 for (i = 0; i < SGE_QSETS; i++) {
2189 q = &adapter->params.sge.
2191 q->polling = t.polling;
2196 set_qset_lro(dev, t.qset_idx, t.lro);
2200 case CHELSIO_GET_QSET_PARAMS:{
2201 struct qset_params *q;
2202 struct ch_qset_params t;
2203 int q1 = pi->first_qset;
2204 int nqsets = pi->nqsets;
2207 if (copy_from_user(&t, useraddr, sizeof(t)))
2210 /* Display qsets for all ports when offload enabled */
2211 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2213 for_each_port(adapter, i) {
2214 pi = adap2pinfo(adapter, i);
2215 nqsets = pi->first_qset + pi->nqsets;
2219 if (t.qset_idx >= nqsets)
2222 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2223 t.rspq_size = q->rspq_size;
2224 t.txq_size[0] = q->txq_size[0];
2225 t.txq_size[1] = q->txq_size[1];
2226 t.txq_size[2] = q->txq_size[2];
2227 t.fl_size[0] = q->fl_size;
2228 t.fl_size[1] = q->jumbo_size;
2229 t.polling = q->polling;
2231 t.intr_lat = q->coalesce_usecs;
2232 t.cong_thres = q->cong_thres;
2235 if (adapter->flags & USING_MSIX)
2236 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2238 t.vector = adapter->pdev->irq;
2240 if (copy_to_user(useraddr, &t, sizeof(t)))
2244 case CHELSIO_SET_QSET_NUM:{
2245 struct ch_reg edata;
2246 unsigned int i, first_qset = 0, other_qsets = 0;
2248 if (!capable(CAP_NET_ADMIN))
2250 if (adapter->flags & FULL_INIT_DONE)
2252 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2254 if (edata.val < 1 ||
2255 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2258 for_each_port(adapter, i)
2259 if (adapter->port[i] && adapter->port[i] != dev)
2260 other_qsets += adap2pinfo(adapter, i)->nqsets;
2262 if (edata.val + other_qsets > SGE_QSETS)
2265 pi->nqsets = edata.val;
2267 for_each_port(adapter, i)
2268 if (adapter->port[i]) {
2269 pi = adap2pinfo(adapter, i);
2270 pi->first_qset = first_qset;
2271 first_qset += pi->nqsets;
2275 case CHELSIO_GET_QSET_NUM:{
2276 struct ch_reg edata;
2278 memset(&edata, 0, sizeof(struct ch_reg));
2280 edata.cmd = CHELSIO_GET_QSET_NUM;
2281 edata.val = pi->nqsets;
2282 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2286 case CHELSIO_LOAD_FW:{
2288 struct ch_mem_range t;
2290 if (!capable(CAP_SYS_RAWIO))
2292 if (copy_from_user(&t, useraddr, sizeof(t)))
2294 /* Check t.len sanity ? */
2295 fw_data = kmalloc(t.len, GFP_KERNEL);
2300 (fw_data, useraddr + sizeof(t), t.len)) {
2305 ret = t3_load_fw(adapter, fw_data, t.len);
2311 case CHELSIO_SETMTUTAB:{
2315 if (!is_offload(adapter))
2317 if (!capable(CAP_NET_ADMIN))
2319 if (offload_running(adapter))
2321 if (copy_from_user(&m, useraddr, sizeof(m)))
2323 if (m.nmtus != NMTUS)
2325 if (m.mtus[0] < 81) /* accommodate SACK */
2328 /* MTUs must be in ascending order */
2329 for (i = 1; i < NMTUS; ++i)
2330 if (m.mtus[i] < m.mtus[i - 1])
2333 memcpy(adapter->params.mtus, m.mtus,
2334 sizeof(adapter->params.mtus));
2337 case CHELSIO_GET_PM:{
2338 struct tp_params *p = &adapter->params.tp;
2339 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2341 if (!is_offload(adapter))
2343 m.tx_pg_sz = p->tx_pg_size;
2344 m.tx_num_pg = p->tx_num_pgs;
2345 m.rx_pg_sz = p->rx_pg_size;
2346 m.rx_num_pg = p->rx_num_pgs;
2347 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2348 if (copy_to_user(useraddr, &m, sizeof(m)))
2352 case CHELSIO_SET_PM:{
2354 struct tp_params *p = &adapter->params.tp;
2356 if (!is_offload(adapter))
2358 if (!capable(CAP_NET_ADMIN))
2360 if (adapter->flags & FULL_INIT_DONE)
2362 if (copy_from_user(&m, useraddr, sizeof(m)))
2364 if (!is_power_of_2(m.rx_pg_sz) ||
2365 !is_power_of_2(m.tx_pg_sz))
2366 return -EINVAL; /* not power of 2 */
2367 if (!(m.rx_pg_sz & 0x14000))
2368 return -EINVAL; /* not 16KB or 64KB */
2369 if (!(m.tx_pg_sz & 0x1554000))
2371 if (m.tx_num_pg == -1)
2372 m.tx_num_pg = p->tx_num_pgs;
2373 if (m.rx_num_pg == -1)
2374 m.rx_num_pg = p->rx_num_pgs;
2375 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2377 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2378 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2380 p->rx_pg_size = m.rx_pg_sz;
2381 p->tx_pg_size = m.tx_pg_sz;
2382 p->rx_num_pgs = m.rx_num_pg;
2383 p->tx_num_pgs = m.tx_num_pg;
2386 case CHELSIO_GET_MEM:{
2387 struct ch_mem_range t;
2391 if (!is_offload(adapter))
2393 if (!(adapter->flags & FULL_INIT_DONE))
2394 return -EIO; /* need the memory controllers */
2395 if (copy_from_user(&t, useraddr, sizeof(t)))
2397 if ((t.addr & 7) || (t.len & 7))
2399 if (t.mem_id == MEM_CM)
2401 else if (t.mem_id == MEM_PMRX)
2402 mem = &adapter->pmrx;
2403 else if (t.mem_id == MEM_PMTX)
2404 mem = &adapter->pmtx;
2410 * bits 0..9: chip version
2411 * bits 10..15: chip revision
2413 t.version = 3 | (adapter->params.rev << 10);
2414 if (copy_to_user(useraddr, &t, sizeof(t)))
2418 * Read 256 bytes at a time as len can be large and we don't
2419 * want to use huge intermediate buffers.
2421 useraddr += sizeof(t); /* advance to start of buffer */
2423 unsigned int chunk =
2424 min_t(unsigned int, t.len, sizeof(buf));
2427 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2431 if (copy_to_user(useraddr, buf, chunk))
2439 case CHELSIO_SET_TRACE_FILTER:{
2441 const struct trace_params *tp;
2443 if (!capable(CAP_NET_ADMIN))
2445 if (!offload_running(adapter))
2447 if (copy_from_user(&t, useraddr, sizeof(t)))
2450 tp = (const struct trace_params *)&t.sip;
2452 t3_config_trace_filter(adapter, tp, 0,
2456 t3_config_trace_filter(adapter, tp, 1,
2467 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2469 struct mii_ioctl_data *data = if_mii(req);
2470 struct port_info *pi = netdev_priv(dev);
2471 struct adapter *adapter = pi->adapter;
2476 /* Convert phy_id from older PRTAD/DEVAD format */
2477 if (is_10G(adapter) &&
2478 !mdio_phy_id_is_c45(data->phy_id) &&
2479 (data->phy_id & 0x1f00) &&
2480 !(data->phy_id & 0xe0e0))
2481 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2482 data->phy_id & 0x1f);
2485 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2487 return cxgb_extension_ioctl(dev, req->ifr_data);
2493 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2495 struct port_info *pi = netdev_priv(dev);
2496 struct adapter *adapter = pi->adapter;
2499 if (new_mtu < 81) /* accommodate SACK */
2501 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2504 init_port_mtus(adapter);
2505 if (adapter->params.rev == 0 && offload_running(adapter))
2506 t3_load_mtus(adapter, adapter->params.mtus,
2507 adapter->params.a_wnd, adapter->params.b_wnd,
2508 adapter->port[0]->mtu);
2512 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2514 struct port_info *pi = netdev_priv(dev);
2515 struct adapter *adapter = pi->adapter;
2516 struct sockaddr *addr = p;
2518 if (!is_valid_ether_addr(addr->sa_data))
2521 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2522 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2523 if (offload_running(adapter))
2524 write_smt_entry(adapter, pi->port_id);
2529 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2530 * @adap: the adapter
2533 * Ensures that current Rx processing on any of the queues associated with
2534 * the given port completes before returning. We do this by acquiring and
2535 * releasing the locks of the response queues associated with the port.
2537 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2541 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2542 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2544 spin_lock_irq(&q->lock);
2545 spin_unlock_irq(&q->lock);
2549 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2551 struct port_info *pi = netdev_priv(dev);
2552 struct adapter *adapter = pi->adapter;
2555 if (adapter->params.rev > 0)
2556 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2558 /* single control for all ports */
2559 unsigned int i, have_vlans = 0;
2560 for_each_port(adapter, i)
2561 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2563 t3_set_vlan_accel(adapter, 1, have_vlans);
2565 t3_synchronize_rx(adapter, pi);
2568 #ifdef CONFIG_NET_POLL_CONTROLLER
2569 static void cxgb_netpoll(struct net_device *dev)
2571 struct port_info *pi = netdev_priv(dev);
2572 struct adapter *adapter = pi->adapter;
2575 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2576 struct sge_qset *qs = &adapter->sge.qs[qidx];
2579 if (adapter->flags & USING_MSIX)
2584 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2590 * Periodic accumulation of MAC statistics.
2592 static void mac_stats_update(struct adapter *adapter)
2596 for_each_port(adapter, i) {
2597 struct net_device *dev = adapter->port[i];
2598 struct port_info *p = netdev_priv(dev);
2600 if (netif_running(dev)) {
2601 spin_lock(&adapter->stats_lock);
2602 t3_mac_update_stats(&p->mac);
2603 spin_unlock(&adapter->stats_lock);
2608 static void check_link_status(struct adapter *adapter)
2612 for_each_port(adapter, i) {
2613 struct net_device *dev = adapter->port[i];
2614 struct port_info *p = netdev_priv(dev);
2617 spin_lock_irq(&adapter->work_lock);
2618 link_fault = p->link_fault;
2619 spin_unlock_irq(&adapter->work_lock);
2622 t3_link_fault(adapter, i);
2626 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2627 t3_xgm_intr_disable(adapter, i);
2628 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2630 t3_link_changed(adapter, i);
2631 t3_xgm_intr_enable(adapter, i);
2636 static void check_t3b2_mac(struct adapter *adapter)
2640 if (!rtnl_trylock()) /* synchronize with ifdown */
2643 for_each_port(adapter, i) {
2644 struct net_device *dev = adapter->port[i];
2645 struct port_info *p = netdev_priv(dev);
2648 if (!netif_running(dev))
2652 if (netif_running(dev) && netif_carrier_ok(dev))
2653 status = t3b2_mac_watchdog_task(&p->mac);
2655 p->mac.stats.num_toggled++;
2656 else if (status == 2) {
2657 struct cmac *mac = &p->mac;
2659 t3_mac_set_mtu(mac, dev->mtu);
2660 t3_mac_set_address(mac, 0, dev->dev_addr);
2661 cxgb_set_rxmode(dev);
2662 t3_link_start(&p->phy, mac, &p->link_config);
2663 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2664 t3_port_intr_enable(adapter, p->port_id);
2665 p->mac.stats.num_resets++;
2672 static void t3_adap_check_task(struct work_struct *work)
2674 struct adapter *adapter = container_of(work, struct adapter,
2675 adap_check_task.work);
2676 const struct adapter_params *p = &adapter->params;
2678 unsigned int v, status, reset;
2680 adapter->check_task_cnt++;
2682 check_link_status(adapter);
2684 /* Accumulate MAC stats if needed */
2685 if (!p->linkpoll_period ||
2686 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2687 p->stats_update_period) {
2688 mac_stats_update(adapter);
2689 adapter->check_task_cnt = 0;
2692 if (p->rev == T3_REV_B2)
2693 check_t3b2_mac(adapter);
2696 * Scan the XGMAC's to check for various conditions which we want to
2697 * monitor in a periodic polling manner rather than via an interrupt
2698 * condition. This is used for conditions which would otherwise flood
2699 * the system with interrupts and we only really need to know that the
2700 * conditions are "happening" ... For each condition we count the
2701 * detection of the condition and reset it for the next polling loop.
2703 for_each_port(adapter, port) {
2704 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2707 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2709 if (cause & F_RXFIFO_OVERFLOW) {
2710 mac->stats.rx_fifo_ovfl++;
2711 reset |= F_RXFIFO_OVERFLOW;
2714 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2718 * We do the same as above for FL_EMPTY interrupts.
2720 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2723 if (status & F_FLEMPTY) {
2724 struct sge_qset *qs = &adapter->sge.qs[0];
2729 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2733 qs->fl[i].empty += (v & 1);
2741 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2743 /* Schedule the next check update if any port is active. */
2744 spin_lock_irq(&adapter->work_lock);
2745 if (adapter->open_device_map & PORT_MASK)
2746 schedule_chk_task(adapter);
2747 spin_unlock_irq(&adapter->work_lock);
2751 * Processes external (PHY) interrupts in process context.
2753 static void ext_intr_task(struct work_struct *work)
2755 struct adapter *adapter = container_of(work, struct adapter,
2756 ext_intr_handler_task);
2759 /* Disable link fault interrupts */
2760 for_each_port(adapter, i) {
2761 struct net_device *dev = adapter->port[i];
2762 struct port_info *p = netdev_priv(dev);
2764 t3_xgm_intr_disable(adapter, i);
2765 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2768 /* Re-enable link fault interrupts */
2769 t3_phy_intr_handler(adapter);
2771 for_each_port(adapter, i)
2772 t3_xgm_intr_enable(adapter, i);
2774 /* Now reenable external interrupts */
2775 spin_lock_irq(&adapter->work_lock);
2776 if (adapter->slow_intr_mask) {
2777 adapter->slow_intr_mask |= F_T3DBG;
2778 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2779 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2780 adapter->slow_intr_mask);
2782 spin_unlock_irq(&adapter->work_lock);
2786 * Interrupt-context handler for external (PHY) interrupts.
2788 void t3_os_ext_intr_handler(struct adapter *adapter)
2791 * Schedule a task to handle external interrupts as they may be slow
2792 * and we use a mutex to protect MDIO registers. We disable PHY
2793 * interrupts in the meantime and let the task reenable them when
2796 spin_lock(&adapter->work_lock);
2797 if (adapter->slow_intr_mask) {
2798 adapter->slow_intr_mask &= ~F_T3DBG;
2799 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2800 adapter->slow_intr_mask);
2801 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2803 spin_unlock(&adapter->work_lock);
2806 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2808 struct net_device *netdev = adapter->port[port_id];
2809 struct port_info *pi = netdev_priv(netdev);
2811 spin_lock(&adapter->work_lock);
2813 spin_unlock(&adapter->work_lock);
2816 static int t3_adapter_error(struct adapter *adapter, int reset)
2820 if (is_offload(adapter) &&
2821 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2822 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2823 offload_close(&adapter->tdev);
2826 /* Stop all ports */
2827 for_each_port(adapter, i) {
2828 struct net_device *netdev = adapter->port[i];
2830 if (netif_running(netdev))
2834 /* Stop SGE timers */
2835 t3_stop_sge_timers(adapter);
2837 adapter->flags &= ~FULL_INIT_DONE;
2840 ret = t3_reset_adapter(adapter);
2842 pci_disable_device(adapter->pdev);
2847 static int t3_reenable_adapter(struct adapter *adapter)
2849 if (pci_enable_device(adapter->pdev)) {
2850 dev_err(&adapter->pdev->dev,
2851 "Cannot re-enable PCI device after reset.\n");
2854 pci_set_master(adapter->pdev);
2855 pci_restore_state(adapter->pdev);
2857 /* Free sge resources */
2858 t3_free_sge_resources(adapter);
2860 if (t3_replay_prep_adapter(adapter))
2868 static void t3_resume_ports(struct adapter *adapter)
2872 /* Restart the ports */
2873 for_each_port(adapter, i) {
2874 struct net_device *netdev = adapter->port[i];
2876 if (netif_running(netdev)) {
2877 if (cxgb_open(netdev)) {
2878 dev_err(&adapter->pdev->dev,
2879 "can't bring device back up"
2886 if (is_offload(adapter) && !ofld_disable)
2887 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2891 * processes a fatal error.
2892 * Bring the ports down, reset the chip, bring the ports back up.
2894 static void fatal_error_task(struct work_struct *work)
2896 struct adapter *adapter = container_of(work, struct adapter,
2897 fatal_error_handler_task);
2901 err = t3_adapter_error(adapter, 1);
2903 err = t3_reenable_adapter(adapter);
2905 t3_resume_ports(adapter);
2907 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2911 void t3_fatal_err(struct adapter *adapter)
2913 unsigned int fw_status[4];
2915 if (adapter->flags & FULL_INIT_DONE) {
2916 t3_sge_stop(adapter);
2917 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2918 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2919 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2920 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2922 spin_lock(&adapter->work_lock);
2923 t3_intr_disable(adapter);
2924 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2925 spin_unlock(&adapter->work_lock);
2927 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2928 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2929 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2930 fw_status[0], fw_status[1],
2931 fw_status[2], fw_status[3]);
2935 * t3_io_error_detected - called when PCI error is detected
2936 * @pdev: Pointer to PCI device
2937 * @state: The current pci connection state
2939 * This function is called after a PCI bus error affecting
2940 * this device has been detected.
2942 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2943 pci_channel_state_t state)
2945 struct adapter *adapter = pci_get_drvdata(pdev);
2948 if (state == pci_channel_io_perm_failure)
2949 return PCI_ERS_RESULT_DISCONNECT;
2951 ret = t3_adapter_error(adapter, 0);
2953 /* Request a slot reset. */
2954 return PCI_ERS_RESULT_NEED_RESET;
2958 * t3_io_slot_reset - called after the pci bus has been reset.
2959 * @pdev: Pointer to PCI device
2961 * Restart the card from scratch, as if from a cold-boot.
2963 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2965 struct adapter *adapter = pci_get_drvdata(pdev);
2967 if (!t3_reenable_adapter(adapter))
2968 return PCI_ERS_RESULT_RECOVERED;
2970 return PCI_ERS_RESULT_DISCONNECT;
2974 * t3_io_resume - called when traffic can start flowing again.
2975 * @pdev: Pointer to PCI device
2977 * This callback is called when the error recovery driver tells us that
2978 * its OK to resume normal operation.
2980 static void t3_io_resume(struct pci_dev *pdev)
2982 struct adapter *adapter = pci_get_drvdata(pdev);
2984 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2985 t3_read_reg(adapter, A_PCIE_PEX_ERR));
2987 t3_resume_ports(adapter);
2990 static struct pci_error_handlers t3_err_handler = {
2991 .error_detected = t3_io_error_detected,
2992 .slot_reset = t3_io_slot_reset,
2993 .resume = t3_io_resume,
2997 * Set the number of qsets based on the number of CPUs and the number of ports,
2998 * not to exceed the number of available qsets, assuming there are enough qsets
3001 static void set_nqsets(struct adapter *adap)
3004 int num_cpus = num_online_cpus();
3005 int hwports = adap->params.nports;
3006 int nqsets = adap->msix_nvectors - 1;
3008 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3010 (hwports * nqsets > SGE_QSETS ||
3011 num_cpus >= nqsets / hwports))
3013 if (nqsets > num_cpus)
3015 if (nqsets < 1 || hwports == 4)
3020 for_each_port(adap, i) {
3021 struct port_info *pi = adap2pinfo(adap, i);
3024 pi->nqsets = nqsets;
3025 j = pi->first_qset + nqsets;
3027 dev_info(&adap->pdev->dev,
3028 "Port %d using %d queue sets.\n", i, nqsets);
3032 static int __devinit cxgb_enable_msix(struct adapter *adap)
3034 struct msix_entry entries[SGE_QSETS + 1];
3038 vectors = ARRAY_SIZE(entries);
3039 for (i = 0; i < vectors; ++i)
3040 entries[i].entry = i;
3042 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
3046 pci_disable_msix(adap->pdev);
3048 if (!err && vectors < (adap->params.nports + 1)) {
3049 pci_disable_msix(adap->pdev);
3054 for (i = 0; i < vectors; ++i)
3055 adap->msix_info[i].vec = entries[i].vector;
3056 adap->msix_nvectors = vectors;
3062 static void __devinit print_port_info(struct adapter *adap,
3063 const struct adapter_info *ai)
3065 static const char *pci_variant[] = {
3066 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3073 snprintf(buf, sizeof(buf), "%s x%d",
3074 pci_variant[adap->params.pci.variant],
3075 adap->params.pci.width);
3077 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3078 pci_variant[adap->params.pci.variant],
3079 adap->params.pci.speed, adap->params.pci.width);
3081 for_each_port(adap, i) {
3082 struct net_device *dev = adap->port[i];
3083 const struct port_info *pi = netdev_priv(dev);
3085 if (!test_bit(i, &adap->registered_device_map))
3087 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
3088 dev->name, ai->desc, pi->phy.desc,
3089 is_offload(adap) ? "R" : "", adap->params.rev, buf,
3090 (adap->flags & USING_MSIX) ? " MSI-X" :
3091 (adap->flags & USING_MSI) ? " MSI" : "");
3092 if (adap->name == dev->name && adap->params.vpd.mclk)
3094 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3095 adap->name, t3_mc7_size(&adap->cm) >> 20,
3096 t3_mc7_size(&adap->pmtx) >> 20,
3097 t3_mc7_size(&adap->pmrx) >> 20,
3098 adap->params.vpd.sn);
3102 static const struct net_device_ops cxgb_netdev_ops = {
3103 .ndo_open = cxgb_open,
3104 .ndo_stop = cxgb_close,
3105 .ndo_start_xmit = t3_eth_xmit,
3106 .ndo_get_stats = cxgb_get_stats,
3107 .ndo_validate_addr = eth_validate_addr,
3108 .ndo_set_multicast_list = cxgb_set_rxmode,
3109 .ndo_do_ioctl = cxgb_ioctl,
3110 .ndo_change_mtu = cxgb_change_mtu,
3111 .ndo_set_mac_address = cxgb_set_mac_addr,
3112 .ndo_vlan_rx_register = vlan_rx_register,
3113 #ifdef CONFIG_NET_POLL_CONTROLLER
3114 .ndo_poll_controller = cxgb_netpoll,
3118 static int __devinit init_one(struct pci_dev *pdev,
3119 const struct pci_device_id *ent)
3121 static int version_printed;
3123 int i, err, pci_using_dac = 0;
3124 resource_size_t mmio_start, mmio_len;
3125 const struct adapter_info *ai;
3126 struct adapter *adapter = NULL;
3127 struct port_info *pi;
3129 if (!version_printed) {
3130 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3135 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3137 printk(KERN_ERR DRV_NAME
3138 ": cannot initialize work queue\n");
3143 err = pci_request_regions(pdev, DRV_NAME);
3145 /* Just info, some other driver may have claimed the device. */
3146 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3150 err = pci_enable_device(pdev);
3152 dev_err(&pdev->dev, "cannot enable PCI device\n");
3153 goto out_release_regions;
3156 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3158 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3160 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3161 "coherent allocations\n");
3162 goto out_disable_device;
3164 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3165 dev_err(&pdev->dev, "no usable DMA configuration\n");
3166 goto out_disable_device;
3169 pci_set_master(pdev);
3170 pci_save_state(pdev);
3172 mmio_start = pci_resource_start(pdev, 0);
3173 mmio_len = pci_resource_len(pdev, 0);
3174 ai = t3_get_adapter_info(ent->driver_data);
3176 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3179 goto out_disable_device;
3182 adapter->nofail_skb =
3183 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3184 if (!adapter->nofail_skb) {
3185 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3187 goto out_free_adapter;
3190 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3191 if (!adapter->regs) {
3192 dev_err(&pdev->dev, "cannot map device registers\n");
3194 goto out_free_adapter;
3197 adapter->pdev = pdev;
3198 adapter->name = pci_name(pdev);
3199 adapter->msg_enable = dflt_msg_enable;
3200 adapter->mmio_len = mmio_len;
3202 mutex_init(&adapter->mdio_lock);
3203 spin_lock_init(&adapter->work_lock);
3204 spin_lock_init(&adapter->stats_lock);
3206 INIT_LIST_HEAD(&adapter->adapter_list);
3207 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3208 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3209 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3211 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3212 struct net_device *netdev;
3214 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3220 SET_NETDEV_DEV(netdev, &pdev->dev);
3222 adapter->port[i] = netdev;
3223 pi = netdev_priv(netdev);
3224 pi->adapter = adapter;
3225 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3227 netif_carrier_off(netdev);
3228 netif_tx_stop_all_queues(netdev);
3229 netdev->irq = pdev->irq;
3230 netdev->mem_start = mmio_start;
3231 netdev->mem_end = mmio_start + mmio_len - 1;
3232 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3233 netdev->features |= NETIF_F_GRO;
3235 netdev->features |= NETIF_F_HIGHDMA;
3237 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3238 netdev->netdev_ops = &cxgb_netdev_ops;
3239 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3242 pci_set_drvdata(pdev, adapter);
3243 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3249 * The card is now ready to go. If any errors occur during device
3250 * registration we do not fail the whole card but rather proceed only
3251 * with the ports we manage to register successfully. However we must
3252 * register at least one net device.
3254 for_each_port(adapter, i) {
3255 err = register_netdev(adapter->port[i]);
3257 dev_warn(&pdev->dev,
3258 "cannot register net device %s, skipping\n",
3259 adapter->port[i]->name);
3262 * Change the name we use for messages to the name of
3263 * the first successfully registered interface.
3265 if (!adapter->registered_device_map)
3266 adapter->name = adapter->port[i]->name;
3268 __set_bit(i, &adapter->registered_device_map);
3271 if (!adapter->registered_device_map) {
3272 dev_err(&pdev->dev, "could not register any net devices\n");
3276 /* Driver's ready. Reflect it on LEDs */
3277 t3_led_ready(adapter);
3279 if (is_offload(adapter)) {
3280 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3281 cxgb3_adapter_ofld(adapter);
3284 /* See what interrupts we'll be using */
3285 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3286 adapter->flags |= USING_MSIX;
3287 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3288 adapter->flags |= USING_MSI;
3290 set_nqsets(adapter);
3292 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3295 print_port_info(adapter, ai);
3299 iounmap(adapter->regs);
3300 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3301 if (adapter->port[i])
3302 free_netdev(adapter->port[i]);
3308 pci_disable_device(pdev);
3309 out_release_regions:
3310 pci_release_regions(pdev);
3311 pci_set_drvdata(pdev, NULL);
3315 static void __devexit remove_one(struct pci_dev *pdev)
3317 struct adapter *adapter = pci_get_drvdata(pdev);
3322 t3_sge_stop(adapter);
3323 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3326 if (is_offload(adapter)) {
3327 cxgb3_adapter_unofld(adapter);
3328 if (test_bit(OFFLOAD_DEVMAP_BIT,
3329 &adapter->open_device_map))
3330 offload_close(&adapter->tdev);
3333 for_each_port(adapter, i)
3334 if (test_bit(i, &adapter->registered_device_map))
3335 unregister_netdev(adapter->port[i]);
3337 t3_stop_sge_timers(adapter);
3338 t3_free_sge_resources(adapter);
3339 cxgb_disable_msi(adapter);
3341 for_each_port(adapter, i)
3342 if (adapter->port[i])
3343 free_netdev(adapter->port[i]);
3345 iounmap(adapter->regs);
3346 if (adapter->nofail_skb)
3347 kfree_skb(adapter->nofail_skb);
3349 pci_release_regions(pdev);
3350 pci_disable_device(pdev);
3351 pci_set_drvdata(pdev, NULL);
3355 static struct pci_driver driver = {
3357 .id_table = cxgb3_pci_tbl,
3359 .remove = __devexit_p(remove_one),
3360 .err_handler = &t3_err_handler,
3363 static int __init cxgb3_init_module(void)
3367 cxgb3_offload_init();
3369 ret = pci_register_driver(&driver);
3373 static void __exit cxgb3_cleanup_module(void)
3375 pci_unregister_driver(&driver);
3377 destroy_workqueue(cxgb3_wq);
3380 module_init(cxgb3_init_module);
3381 module_exit(cxgb3_cleanup_module);