2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
126 static int ofld_disable = 0;
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
139 static struct workqueue_struct *cxgb3_wq;
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
145 * Shows the link status, speed, and duplex of a port.
147 static void link_report(struct net_device *dev)
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
155 switch (p->link_config.speed) {
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
188 struct net_device *dev = adapter->port[port_id];
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
196 if (link_stat != netif_carrier_ok(dev)) {
198 t3_mac_enable(mac, MAC_DIRECTION_RX);
199 netif_carrier_on(dev);
201 netif_carrier_off(dev);
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
212 * t3_os_phymod_changed - handle PHY module changes
213 * @phy: the PHY reporting the module change
214 * @mod_type: new module type
216 * This is the OS-dependent handler for PHY module changes. It is
217 * invoked when a PHY module is removed or inserted for any OS-specific
220 void t3_os_phymod_changed(struct adapter *adap, int port_id)
222 static const char *mod_str[] = {
223 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
226 const struct net_device *dev = adap->port[port_id];
227 const struct port_info *pi = netdev_priv(dev);
229 if (pi->phy.modtype == phy_modtype_none)
230 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
232 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
233 mod_str[pi->phy.modtype]);
236 static void cxgb_set_rxmode(struct net_device *dev)
238 struct t3_rx_mode rm;
239 struct port_info *pi = netdev_priv(dev);
241 init_rx_mode(&rm, dev, dev->mc_list);
242 t3_mac_set_rx_mode(&pi->mac, &rm);
246 * link_start - enable a port
247 * @dev: the device to enable
249 * Performs the MAC and PHY actions needed to enable a port.
251 static void link_start(struct net_device *dev)
253 struct t3_rx_mode rm;
254 struct port_info *pi = netdev_priv(dev);
255 struct cmac *mac = &pi->mac;
257 init_rx_mode(&rm, dev, dev->mc_list);
259 t3_mac_set_mtu(mac, dev->mtu);
260 t3_mac_set_address(mac, 0, dev->dev_addr);
261 t3_mac_set_rx_mode(mac, &rm);
262 t3_link_start(&pi->phy, mac, &pi->link_config);
263 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
266 static inline void cxgb_disable_msi(struct adapter *adapter)
268 if (adapter->flags & USING_MSIX) {
269 pci_disable_msix(adapter->pdev);
270 adapter->flags &= ~USING_MSIX;
271 } else if (adapter->flags & USING_MSI) {
272 pci_disable_msi(adapter->pdev);
273 adapter->flags &= ~USING_MSI;
278 * Interrupt handler for asynchronous events used with MSI-X.
280 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
282 t3_slow_intr_handler(cookie);
287 * Name the MSI-X interrupts.
289 static void name_msix_vecs(struct adapter *adap)
291 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
293 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
294 adap->msix_info[0].desc[n] = 0;
296 for_each_port(adap, j) {
297 struct net_device *d = adap->port[j];
298 const struct port_info *pi = netdev_priv(d);
300 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
301 snprintf(adap->msix_info[msi_idx].desc, n,
302 "%s-%d", d->name, pi->first_qset + i);
303 adap->msix_info[msi_idx].desc[n] = 0;
308 static int request_msix_data_irqs(struct adapter *adap)
310 int i, j, err, qidx = 0;
312 for_each_port(adap, i) {
313 int nqsets = adap2pinfo(adap, i)->nqsets;
315 for (j = 0; j < nqsets; ++j) {
316 err = request_irq(adap->msix_info[qidx + 1].vec,
317 t3_intr_handler(adap,
320 adap->msix_info[qidx + 1].desc,
321 &adap->sge.qs[qidx]);
324 free_irq(adap->msix_info[qidx + 1].vec,
325 &adap->sge.qs[qidx]);
334 static void free_irq_resources(struct adapter *adapter)
336 if (adapter->flags & USING_MSIX) {
339 free_irq(adapter->msix_info[0].vec, adapter);
340 for_each_port(adapter, i)
341 n += adap2pinfo(adapter, i)->nqsets;
343 for (i = 0; i < n; ++i)
344 free_irq(adapter->msix_info[i + 1].vec,
345 &adapter->sge.qs[i]);
347 free_irq(adapter->pdev->irq, adapter);
350 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
355 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
363 static int init_tp_parity(struct adapter *adap)
367 struct cpl_set_tcb_field *greq;
368 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
370 t3_tp_set_offload_mode(adap, 1);
372 for (i = 0; i < 16; i++) {
373 struct cpl_smt_write_req *req;
375 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
376 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
377 memset(req, 0, sizeof(*req));
378 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
379 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
381 t3_mgmt_tx(adap, skb);
384 for (i = 0; i < 2048; i++) {
385 struct cpl_l2t_write_req *req;
387 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
388 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
389 memset(req, 0, sizeof(*req));
390 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
391 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
392 req->params = htonl(V_L2T_W_IDX(i));
393 t3_mgmt_tx(adap, skb);
396 for (i = 0; i < 2048; i++) {
397 struct cpl_rte_write_req *req;
399 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
400 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
401 memset(req, 0, sizeof(*req));
402 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
403 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
404 req->l2t_idx = htonl(V_L2T_W_IDX(i));
405 t3_mgmt_tx(adap, skb);
408 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
409 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
410 memset(greq, 0, sizeof(*greq));
411 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
412 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
413 greq->mask = cpu_to_be64(1);
414 t3_mgmt_tx(adap, skb);
416 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
417 t3_tp_set_offload_mode(adap, 0);
422 * setup_rss - configure RSS
425 * Sets up RSS to distribute packets to multiple receive queues. We
426 * configure the RSS CPU lookup table to distribute to the number of HW
427 * receive queues, and the response queue lookup table to narrow that
428 * down to the response queues actually configured for each port.
429 * We always configure the RSS mapping for two ports since the mapping
430 * table has plenty of entries.
432 static void setup_rss(struct adapter *adap)
435 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
436 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
437 u8 cpus[SGE_QSETS + 1];
438 u16 rspq_map[RSS_TABLE_SIZE];
440 for (i = 0; i < SGE_QSETS; ++i)
442 cpus[SGE_QSETS] = 0xff; /* terminator */
444 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
445 rspq_map[i] = i % nq0;
446 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
449 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
450 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
451 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
454 static void init_napi(struct adapter *adap)
458 for (i = 0; i < SGE_QSETS; i++) {
459 struct sge_qset *qs = &adap->sge.qs[i];
462 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
467 * netif_napi_add() can be called only once per napi_struct because it
468 * adds each new napi_struct to a list. Be careful not to call it a
469 * second time, e.g., during EEH recovery, by making a note of it.
471 adap->flags |= NAPI_INIT;
475 * Wait until all NAPI handlers are descheduled. This includes the handlers of
476 * both netdevices representing interfaces and the dummy ones for the extra
479 static void quiesce_rx(struct adapter *adap)
483 for (i = 0; i < SGE_QSETS; i++)
484 if (adap->sge.qs[i].adap)
485 napi_disable(&adap->sge.qs[i].napi);
488 static void enable_all_napi(struct adapter *adap)
491 for (i = 0; i < SGE_QSETS; i++)
492 if (adap->sge.qs[i].adap)
493 napi_enable(&adap->sge.qs[i].napi);
497 * set_qset_lro - Turn a queue set's LRO capability on and off
498 * @dev: the device the qset is attached to
499 * @qset_idx: the queue set index
500 * @val: the LRO switch
502 * Sets LRO on or off for a particular queue set.
503 * the device's features flag is updated to reflect the LRO
504 * capability when all queues belonging to the device are
507 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
509 struct port_info *pi = netdev_priv(dev);
510 struct adapter *adapter = pi->adapter;
513 adapter->params.sge.qset[qset_idx].lro = !!val;
514 adapter->sge.qs[qset_idx].lro_enabled = !!val;
516 /* let ethtool report LRO on only if all queues are LRO enabled */
517 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; ++i)
518 lro_on &= adapter->params.sge.qset[i].lro;
521 dev->features |= NETIF_F_LRO;
523 dev->features &= ~NETIF_F_LRO;
527 * setup_sge_qsets - configure SGE Tx/Rx/response queues
530 * Determines how many sets of SGE queues to use and initializes them.
531 * We support multiple queue sets per port if we have MSI-X, otherwise
532 * just one queue set per port.
534 static int setup_sge_qsets(struct adapter *adap)
536 int i, j, err, irq_idx = 0, qset_idx = 0;
537 unsigned int ntxq = SGE_TXQ_PER_SET;
539 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
542 for_each_port(adap, i) {
543 struct net_device *dev = adap->port[i];
544 struct port_info *pi = netdev_priv(dev);
546 pi->qs = &adap->sge.qs[pi->first_qset];
547 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
549 set_qset_lro(dev, qset_idx, pi->rx_csum_offload);
550 err = t3_sge_alloc_qset(adap, qset_idx, 1,
551 (adap->flags & USING_MSIX) ? qset_idx + 1 :
553 &adap->params.sge.qset[qset_idx], ntxq, dev);
555 t3_stop_sge_timers(adap);
556 t3_free_sge_resources(adap);
565 static ssize_t attr_show(struct device *d, char *buf,
566 ssize_t(*format) (struct net_device *, char *))
570 /* Synchronize with ioctls that may shut down the device */
572 len = (*format) (to_net_dev(d), buf);
577 static ssize_t attr_store(struct device *d,
578 const char *buf, size_t len,
579 ssize_t(*set) (struct net_device *, unsigned int),
580 unsigned int min_val, unsigned int max_val)
586 if (!capable(CAP_NET_ADMIN))
589 val = simple_strtoul(buf, &endp, 0);
590 if (endp == buf || val < min_val || val > max_val)
594 ret = (*set) (to_net_dev(d), val);
601 #define CXGB3_SHOW(name, val_expr) \
602 static ssize_t format_##name(struct net_device *dev, char *buf) \
604 struct port_info *pi = netdev_priv(dev); \
605 struct adapter *adap = pi->adapter; \
606 return sprintf(buf, "%u\n", val_expr); \
608 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
611 return attr_show(d, buf, format_##name); \
614 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
616 struct port_info *pi = netdev_priv(dev);
617 struct adapter *adap = pi->adapter;
618 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
620 if (adap->flags & FULL_INIT_DONE)
622 if (val && adap->params.rev == 0)
624 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
627 adap->params.mc5.nfilters = val;
631 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
632 const char *buf, size_t len)
634 return attr_store(d, buf, len, set_nfilters, 0, ~0);
637 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
639 struct port_info *pi = netdev_priv(dev);
640 struct adapter *adap = pi->adapter;
642 if (adap->flags & FULL_INIT_DONE)
644 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
647 adap->params.mc5.nservers = val;
651 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
652 const char *buf, size_t len)
654 return attr_store(d, buf, len, set_nservers, 0, ~0);
657 #define CXGB3_ATTR_R(name, val_expr) \
658 CXGB3_SHOW(name, val_expr) \
659 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
661 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
662 CXGB3_SHOW(name, val_expr) \
663 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
665 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
666 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
667 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
669 static struct attribute *cxgb3_attrs[] = {
670 &dev_attr_cam_size.attr,
671 &dev_attr_nfilters.attr,
672 &dev_attr_nservers.attr,
676 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
678 static ssize_t tm_attr_show(struct device *d,
679 char *buf, int sched)
681 struct port_info *pi = netdev_priv(to_net_dev(d));
682 struct adapter *adap = pi->adapter;
683 unsigned int v, addr, bpt, cpt;
686 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
688 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
689 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
692 bpt = (v >> 8) & 0xff;
695 len = sprintf(buf, "disabled\n");
697 v = (adap->params.vpd.cclk * 1000) / cpt;
698 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
704 static ssize_t tm_attr_store(struct device *d,
705 const char *buf, size_t len, int sched)
707 struct port_info *pi = netdev_priv(to_net_dev(d));
708 struct adapter *adap = pi->adapter;
713 if (!capable(CAP_NET_ADMIN))
716 val = simple_strtoul(buf, &endp, 0);
717 if (endp == buf || val > 10000000)
721 ret = t3_config_sched(adap, val, sched);
728 #define TM_ATTR(name, sched) \
729 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
732 return tm_attr_show(d, buf, sched); \
734 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
735 const char *buf, size_t len) \
737 return tm_attr_store(d, buf, len, sched); \
739 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
750 static struct attribute *offload_attrs[] = {
751 &dev_attr_sched0.attr,
752 &dev_attr_sched1.attr,
753 &dev_attr_sched2.attr,
754 &dev_attr_sched3.attr,
755 &dev_attr_sched4.attr,
756 &dev_attr_sched5.attr,
757 &dev_attr_sched6.attr,
758 &dev_attr_sched7.attr,
762 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
765 * Sends an sk_buff to an offload queue driver
766 * after dealing with any active network taps.
768 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
773 ret = t3_offload_tx(tdev, skb);
778 static int write_smt_entry(struct adapter *adapter, int idx)
780 struct cpl_smt_write_req *req;
781 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
786 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
787 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
788 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
789 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
791 memset(req->src_mac1, 0, sizeof(req->src_mac1));
792 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
794 offload_tx(&adapter->tdev, skb);
798 static int init_smt(struct adapter *adapter)
802 for_each_port(adapter, i)
803 write_smt_entry(adapter, i);
807 static void init_port_mtus(struct adapter *adapter)
809 unsigned int mtus = adapter->port[0]->mtu;
811 if (adapter->port[1])
812 mtus |= adapter->port[1]->mtu << 16;
813 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
816 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
820 struct mngt_pktsched_wr *req;
823 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
824 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
825 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
826 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
832 ret = t3_mgmt_tx(adap, skb);
837 static int bind_qsets(struct adapter *adap)
841 for_each_port(adap, i) {
842 const struct port_info *pi = adap2pinfo(adap, i);
844 for (j = 0; j < pi->nqsets; ++j) {
845 int ret = send_pktsched_cmd(adap, 1,
846 pi->first_qset + j, -1,
856 #define FW_FNAME "t3fw-%d.%d.%d.bin"
857 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
859 static int upgrade_fw(struct adapter *adap)
863 const struct firmware *fw;
864 struct device *dev = &adap->pdev->dev;
866 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
867 FW_VERSION_MINOR, FW_VERSION_MICRO);
868 ret = request_firmware(&fw, buf, dev);
870 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
874 ret = t3_load_fw(adap, fw->data, fw->size);
875 release_firmware(fw);
878 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
879 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
881 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
882 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
887 static inline char t3rev2char(struct adapter *adapter)
891 switch(adapter->params.rev) {
903 static int update_tpsram(struct adapter *adap)
905 const struct firmware *tpsram;
907 struct device *dev = &adap->pdev->dev;
911 rev = t3rev2char(adap);
915 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
916 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
918 ret = request_firmware(&tpsram, buf, dev);
920 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
925 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
929 ret = t3_set_proto_sram(adap, tpsram->data);
932 "successful update of protocol engine "
934 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
936 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
937 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
939 dev_err(dev, "loading protocol SRAM failed\n");
942 release_firmware(tpsram);
948 * cxgb_up - enable the adapter
949 * @adapter: adapter being enabled
951 * Called when the first port is enabled, this function performs the
952 * actions necessary to make an adapter operational, such as completing
953 * the initialization of HW modules, and enabling interrupts.
955 * Must be called with the rtnl lock held.
957 static int cxgb_up(struct adapter *adap)
962 if (!(adap->flags & FULL_INIT_DONE)) {
963 err = t3_check_fw_version(adap, &must_load);
964 if (err == -EINVAL) {
965 err = upgrade_fw(adap);
966 if (err && must_load)
970 err = t3_check_tpsram_version(adap, &must_load);
971 if (err == -EINVAL) {
972 err = update_tpsram(adap);
973 if (err && must_load)
978 * Clear interrupts now to catch errors if t3_init_hw fails.
979 * We clear them again later as initialization may trigger
980 * conditions that can interrupt.
984 err = t3_init_hw(adap, 0);
988 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
989 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
991 err = setup_sge_qsets(adap);
996 if (!(adap->flags & NAPI_INIT))
998 adap->flags |= FULL_INIT_DONE;
1001 t3_intr_clear(adap);
1003 if (adap->flags & USING_MSIX) {
1004 name_msix_vecs(adap);
1005 err = request_irq(adap->msix_info[0].vec,
1006 t3_async_intr_handler, 0,
1007 adap->msix_info[0].desc, adap);
1011 err = request_msix_data_irqs(adap);
1013 free_irq(adap->msix_info[0].vec, adap);
1016 } else if ((err = request_irq(adap->pdev->irq,
1017 t3_intr_handler(adap,
1018 adap->sge.qs[0].rspq.
1020 (adap->flags & USING_MSI) ?
1025 enable_all_napi(adap);
1027 t3_intr_enable(adap);
1029 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1030 is_offload(adap) && init_tp_parity(adap) == 0)
1031 adap->flags |= TP_PARITY_INIT;
1033 if (adap->flags & TP_PARITY_INIT) {
1034 t3_write_reg(adap, A_TP_INT_CAUSE,
1035 F_CMCACHEPERR | F_ARPLUTPERR);
1036 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1039 if (!(adap->flags & QUEUES_BOUND)) {
1040 err = bind_qsets(adap);
1042 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1043 t3_intr_disable(adap);
1044 free_irq_resources(adap);
1047 adap->flags |= QUEUES_BOUND;
1053 CH_ERR(adap, "request_irq failed, err %d\n", err);
1058 * Release resources when all the ports and offloading have been stopped.
1060 static void cxgb_down(struct adapter *adapter)
1062 t3_sge_stop(adapter);
1063 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1064 t3_intr_disable(adapter);
1065 spin_unlock_irq(&adapter->work_lock);
1067 free_irq_resources(adapter);
1068 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1069 quiesce_rx(adapter);
1072 static void schedule_chk_task(struct adapter *adap)
1076 timeo = adap->params.linkpoll_period ?
1077 (HZ * adap->params.linkpoll_period) / 10 :
1078 adap->params.stats_update_period * HZ;
1080 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1083 static int offload_open(struct net_device *dev)
1085 struct port_info *pi = netdev_priv(dev);
1086 struct adapter *adapter = pi->adapter;
1087 struct t3cdev *tdev = dev2t3cdev(dev);
1088 int adap_up = adapter->open_device_map & PORT_MASK;
1091 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1094 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1097 t3_tp_set_offload_mode(adapter, 1);
1098 tdev->lldev = adapter->port[0];
1099 err = cxgb3_offload_activate(adapter);
1103 init_port_mtus(adapter);
1104 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1105 adapter->params.b_wnd,
1106 adapter->params.rev == 0 ?
1107 adapter->port[0]->mtu : 0xffff);
1110 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1111 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1113 /* Call back all registered clients */
1114 cxgb3_add_clients(tdev);
1117 /* restore them in case the offload module has changed them */
1119 t3_tp_set_offload_mode(adapter, 0);
1120 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1121 cxgb3_set_dummy_ops(tdev);
1126 static int offload_close(struct t3cdev *tdev)
1128 struct adapter *adapter = tdev2adap(tdev);
1130 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1133 /* Call back all registered clients */
1134 cxgb3_remove_clients(tdev);
1136 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1139 cxgb3_set_dummy_ops(tdev);
1140 t3_tp_set_offload_mode(adapter, 0);
1141 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1143 if (!adapter->open_device_map)
1146 cxgb3_offload_deactivate(adapter);
1150 static int cxgb_open(struct net_device *dev)
1152 struct port_info *pi = netdev_priv(dev);
1153 struct adapter *adapter = pi->adapter;
1154 int other_ports = adapter->open_device_map & PORT_MASK;
1157 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1160 set_bit(pi->port_id, &adapter->open_device_map);
1161 if (is_offload(adapter) && !ofld_disable) {
1162 err = offload_open(dev);
1165 "Could not initialize offload capabilities\n");
1169 t3_port_intr_enable(adapter, pi->port_id);
1170 netif_start_queue(dev);
1172 schedule_chk_task(adapter);
1177 static int cxgb_close(struct net_device *dev)
1179 struct port_info *pi = netdev_priv(dev);
1180 struct adapter *adapter = pi->adapter;
1182 t3_port_intr_disable(adapter, pi->port_id);
1183 netif_stop_queue(dev);
1184 pi->phy.ops->power_down(&pi->phy, 1);
1185 netif_carrier_off(dev);
1186 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1188 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1189 clear_bit(pi->port_id, &adapter->open_device_map);
1190 spin_unlock_irq(&adapter->work_lock);
1192 if (!(adapter->open_device_map & PORT_MASK))
1193 cancel_rearming_delayed_workqueue(cxgb3_wq,
1194 &adapter->adap_check_task);
1196 if (!adapter->open_device_map)
1202 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1204 struct port_info *pi = netdev_priv(dev);
1205 struct adapter *adapter = pi->adapter;
1206 struct net_device_stats *ns = &pi->netstats;
1207 const struct mac_stats *pstats;
1209 spin_lock(&adapter->stats_lock);
1210 pstats = t3_mac_update_stats(&pi->mac);
1211 spin_unlock(&adapter->stats_lock);
1213 ns->tx_bytes = pstats->tx_octets;
1214 ns->tx_packets = pstats->tx_frames;
1215 ns->rx_bytes = pstats->rx_octets;
1216 ns->rx_packets = pstats->rx_frames;
1217 ns->multicast = pstats->rx_mcast_frames;
1219 ns->tx_errors = pstats->tx_underrun;
1220 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1221 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1222 pstats->rx_fifo_ovfl;
1224 /* detailed rx_errors */
1225 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1226 ns->rx_over_errors = 0;
1227 ns->rx_crc_errors = pstats->rx_fcs_errs;
1228 ns->rx_frame_errors = pstats->rx_symbol_errs;
1229 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1230 ns->rx_missed_errors = pstats->rx_cong_drops;
1232 /* detailed tx_errors */
1233 ns->tx_aborted_errors = 0;
1234 ns->tx_carrier_errors = 0;
1235 ns->tx_fifo_errors = pstats->tx_underrun;
1236 ns->tx_heartbeat_errors = 0;
1237 ns->tx_window_errors = 0;
1241 static u32 get_msglevel(struct net_device *dev)
1243 struct port_info *pi = netdev_priv(dev);
1244 struct adapter *adapter = pi->adapter;
1246 return adapter->msg_enable;
1249 static void set_msglevel(struct net_device *dev, u32 val)
1251 struct port_info *pi = netdev_priv(dev);
1252 struct adapter *adapter = pi->adapter;
1254 adapter->msg_enable = val;
1257 static char stats_strings[][ETH_GSTRING_LEN] = {
1260 "TxMulticastFramesOK",
1261 "TxBroadcastFramesOK",
1268 "TxFrames128To255 ",
1269 "TxFrames256To511 ",
1270 "TxFrames512To1023 ",
1271 "TxFrames1024To1518 ",
1272 "TxFrames1519ToMax ",
1276 "RxMulticastFramesOK",
1277 "RxBroadcastFramesOK",
1288 "RxFrames128To255 ",
1289 "RxFrames256To511 ",
1290 "RxFrames512To1023 ",
1291 "RxFrames1024To1518 ",
1292 "RxFrames1519ToMax ",
1305 "CheckTXEnToggled ",
1310 static int get_sset_count(struct net_device *dev, int sset)
1314 return ARRAY_SIZE(stats_strings);
1320 #define T3_REGMAP_SIZE (3 * 1024)
1322 static int get_regs_len(struct net_device *dev)
1324 return T3_REGMAP_SIZE;
1327 static int get_eeprom_len(struct net_device *dev)
1332 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1334 struct port_info *pi = netdev_priv(dev);
1335 struct adapter *adapter = pi->adapter;
1339 spin_lock(&adapter->stats_lock);
1340 t3_get_fw_version(adapter, &fw_vers);
1341 t3_get_tp_version(adapter, &tp_vers);
1342 spin_unlock(&adapter->stats_lock);
1344 strcpy(info->driver, DRV_NAME);
1345 strcpy(info->version, DRV_VERSION);
1346 strcpy(info->bus_info, pci_name(adapter->pdev));
1348 strcpy(info->fw_version, "N/A");
1350 snprintf(info->fw_version, sizeof(info->fw_version),
1351 "%s %u.%u.%u TP %u.%u.%u",
1352 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1353 G_FW_VERSION_MAJOR(fw_vers),
1354 G_FW_VERSION_MINOR(fw_vers),
1355 G_FW_VERSION_MICRO(fw_vers),
1356 G_TP_VERSION_MAJOR(tp_vers),
1357 G_TP_VERSION_MINOR(tp_vers),
1358 G_TP_VERSION_MICRO(tp_vers));
1362 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1364 if (stringset == ETH_SS_STATS)
1365 memcpy(data, stats_strings, sizeof(stats_strings));
1368 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1369 struct port_info *p, int idx)
1372 unsigned long tot = 0;
1374 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1375 tot += adapter->sge.qs[i].port_stats[idx];
1379 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1382 struct port_info *pi = netdev_priv(dev);
1383 struct adapter *adapter = pi->adapter;
1384 const struct mac_stats *s;
1386 spin_lock(&adapter->stats_lock);
1387 s = t3_mac_update_stats(&pi->mac);
1388 spin_unlock(&adapter->stats_lock);
1390 *data++ = s->tx_octets;
1391 *data++ = s->tx_frames;
1392 *data++ = s->tx_mcast_frames;
1393 *data++ = s->tx_bcast_frames;
1394 *data++ = s->tx_pause;
1395 *data++ = s->tx_underrun;
1396 *data++ = s->tx_fifo_urun;
1398 *data++ = s->tx_frames_64;
1399 *data++ = s->tx_frames_65_127;
1400 *data++ = s->tx_frames_128_255;
1401 *data++ = s->tx_frames_256_511;
1402 *data++ = s->tx_frames_512_1023;
1403 *data++ = s->tx_frames_1024_1518;
1404 *data++ = s->tx_frames_1519_max;
1406 *data++ = s->rx_octets;
1407 *data++ = s->rx_frames;
1408 *data++ = s->rx_mcast_frames;
1409 *data++ = s->rx_bcast_frames;
1410 *data++ = s->rx_pause;
1411 *data++ = s->rx_fcs_errs;
1412 *data++ = s->rx_symbol_errs;
1413 *data++ = s->rx_short;
1414 *data++ = s->rx_jabber;
1415 *data++ = s->rx_too_long;
1416 *data++ = s->rx_fifo_ovfl;
1418 *data++ = s->rx_frames_64;
1419 *data++ = s->rx_frames_65_127;
1420 *data++ = s->rx_frames_128_255;
1421 *data++ = s->rx_frames_256_511;
1422 *data++ = s->rx_frames_512_1023;
1423 *data++ = s->rx_frames_1024_1518;
1424 *data++ = s->rx_frames_1519_max;
1426 *data++ = pi->phy.fifo_errors;
1428 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1429 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1430 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1431 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1432 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1433 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1434 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1435 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
1436 *data++ = s->rx_cong_drops;
1438 *data++ = s->num_toggled;
1439 *data++ = s->num_resets;
1442 static inline void reg_block_dump(struct adapter *ap, void *buf,
1443 unsigned int start, unsigned int end)
1445 u32 *p = buf + start;
1447 for (; start <= end; start += sizeof(u32))
1448 *p++ = t3_read_reg(ap, start);
1451 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1454 struct port_info *pi = netdev_priv(dev);
1455 struct adapter *ap = pi->adapter;
1459 * bits 0..9: chip version
1460 * bits 10..15: chip revision
1461 * bit 31: set for PCIe cards
1463 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1466 * We skip the MAC statistics registers because they are clear-on-read.
1467 * Also reading multi-register stats would need to synchronize with the
1468 * periodic mac stats accumulation. Hard to justify the complexity.
1470 memset(buf, 0, T3_REGMAP_SIZE);
1471 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1472 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1473 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1474 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1475 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1476 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1477 XGM_REG(A_XGM_SERDES_STAT3, 1));
1478 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1479 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1482 static int restart_autoneg(struct net_device *dev)
1484 struct port_info *p = netdev_priv(dev);
1486 if (!netif_running(dev))
1488 if (p->link_config.autoneg != AUTONEG_ENABLE)
1490 p->phy.ops->autoneg_restart(&p->phy);
1494 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1496 struct port_info *pi = netdev_priv(dev);
1497 struct adapter *adapter = pi->adapter;
1503 for (i = 0; i < data * 2; i++) {
1504 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1505 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1506 if (msleep_interruptible(500))
1509 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1514 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1516 struct port_info *p = netdev_priv(dev);
1518 cmd->supported = p->link_config.supported;
1519 cmd->advertising = p->link_config.advertising;
1521 if (netif_carrier_ok(dev)) {
1522 cmd->speed = p->link_config.speed;
1523 cmd->duplex = p->link_config.duplex;
1529 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1530 cmd->phy_address = p->phy.addr;
1531 cmd->transceiver = XCVR_EXTERNAL;
1532 cmd->autoneg = p->link_config.autoneg;
1538 static int speed_duplex_to_caps(int speed, int duplex)
1544 if (duplex == DUPLEX_FULL)
1545 cap = SUPPORTED_10baseT_Full;
1547 cap = SUPPORTED_10baseT_Half;
1550 if (duplex == DUPLEX_FULL)
1551 cap = SUPPORTED_100baseT_Full;
1553 cap = SUPPORTED_100baseT_Half;
1556 if (duplex == DUPLEX_FULL)
1557 cap = SUPPORTED_1000baseT_Full;
1559 cap = SUPPORTED_1000baseT_Half;
1562 if (duplex == DUPLEX_FULL)
1563 cap = SUPPORTED_10000baseT_Full;
1568 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1569 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1570 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1571 ADVERTISED_10000baseT_Full)
1573 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1576 struct port_info *p = netdev_priv(dev);
1577 struct link_config *lc = &p->link_config;
1579 if (!(lc->supported & SUPPORTED_Autoneg)) {
1581 * PHY offers a single speed/duplex. See if that's what's
1584 if (cmd->autoneg == AUTONEG_DISABLE) {
1585 cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1586 if (lc->supported & cap)
1592 if (cmd->autoneg == AUTONEG_DISABLE) {
1593 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1595 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1597 lc->requested_speed = cmd->speed;
1598 lc->requested_duplex = cmd->duplex;
1599 lc->advertising = 0;
1601 cmd->advertising &= ADVERTISED_MASK;
1602 cmd->advertising &= lc->supported;
1603 if (!cmd->advertising)
1605 lc->requested_speed = SPEED_INVALID;
1606 lc->requested_duplex = DUPLEX_INVALID;
1607 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1609 lc->autoneg = cmd->autoneg;
1610 if (netif_running(dev))
1611 t3_link_start(&p->phy, &p->mac, lc);
1615 static void get_pauseparam(struct net_device *dev,
1616 struct ethtool_pauseparam *epause)
1618 struct port_info *p = netdev_priv(dev);
1620 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1621 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1622 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1625 static int set_pauseparam(struct net_device *dev,
1626 struct ethtool_pauseparam *epause)
1628 struct port_info *p = netdev_priv(dev);
1629 struct link_config *lc = &p->link_config;
1631 if (epause->autoneg == AUTONEG_DISABLE)
1632 lc->requested_fc = 0;
1633 else if (lc->supported & SUPPORTED_Autoneg)
1634 lc->requested_fc = PAUSE_AUTONEG;
1638 if (epause->rx_pause)
1639 lc->requested_fc |= PAUSE_RX;
1640 if (epause->tx_pause)
1641 lc->requested_fc |= PAUSE_TX;
1642 if (lc->autoneg == AUTONEG_ENABLE) {
1643 if (netif_running(dev))
1644 t3_link_start(&p->phy, &p->mac, lc);
1646 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1647 if (netif_running(dev))
1648 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1653 static u32 get_rx_csum(struct net_device *dev)
1655 struct port_info *p = netdev_priv(dev);
1657 return p->rx_csum_offload;
1660 static int set_rx_csum(struct net_device *dev, u32 data)
1662 struct port_info *p = netdev_priv(dev);
1664 p->rx_csum_offload = data;
1668 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1669 set_qset_lro(dev, i, 0);
1674 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1676 struct port_info *pi = netdev_priv(dev);
1677 struct adapter *adapter = pi->adapter;
1678 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1680 e->rx_max_pending = MAX_RX_BUFFERS;
1681 e->rx_mini_max_pending = 0;
1682 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1683 e->tx_max_pending = MAX_TXQ_ENTRIES;
1685 e->rx_pending = q->fl_size;
1686 e->rx_mini_pending = q->rspq_size;
1687 e->rx_jumbo_pending = q->jumbo_size;
1688 e->tx_pending = q->txq_size[0];
1691 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1693 struct port_info *pi = netdev_priv(dev);
1694 struct adapter *adapter = pi->adapter;
1695 struct qset_params *q;
1698 if (e->rx_pending > MAX_RX_BUFFERS ||
1699 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1700 e->tx_pending > MAX_TXQ_ENTRIES ||
1701 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1702 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1703 e->rx_pending < MIN_FL_ENTRIES ||
1704 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1705 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1708 if (adapter->flags & FULL_INIT_DONE)
1711 q = &adapter->params.sge.qset[pi->first_qset];
1712 for (i = 0; i < pi->nqsets; ++i, ++q) {
1713 q->rspq_size = e->rx_mini_pending;
1714 q->fl_size = e->rx_pending;
1715 q->jumbo_size = e->rx_jumbo_pending;
1716 q->txq_size[0] = e->tx_pending;
1717 q->txq_size[1] = e->tx_pending;
1718 q->txq_size[2] = e->tx_pending;
1723 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1725 struct port_info *pi = netdev_priv(dev);
1726 struct adapter *adapter = pi->adapter;
1727 struct qset_params *qsp = &adapter->params.sge.qset[0];
1728 struct sge_qset *qs = &adapter->sge.qs[0];
1730 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1733 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1734 t3_update_qset_coalesce(qs, qsp);
1738 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1740 struct port_info *pi = netdev_priv(dev);
1741 struct adapter *adapter = pi->adapter;
1742 struct qset_params *q = adapter->params.sge.qset;
1744 c->rx_coalesce_usecs = q->coalesce_usecs;
1748 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1751 struct port_info *pi = netdev_priv(dev);
1752 struct adapter *adapter = pi->adapter;
1755 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1759 e->magic = EEPROM_MAGIC;
1760 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1761 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1764 memcpy(data, buf + e->offset, e->len);
1769 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1772 struct port_info *pi = netdev_priv(dev);
1773 struct adapter *adapter = pi->adapter;
1774 u32 aligned_offset, aligned_len;
1779 if (eeprom->magic != EEPROM_MAGIC)
1782 aligned_offset = eeprom->offset & ~3;
1783 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1785 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1786 buf = kmalloc(aligned_len, GFP_KERNEL);
1789 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1790 if (!err && aligned_len > 4)
1791 err = t3_seeprom_read(adapter,
1792 aligned_offset + aligned_len - 4,
1793 (__le32 *) & buf[aligned_len - 4]);
1796 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1800 err = t3_seeprom_wp(adapter, 0);
1804 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1805 err = t3_seeprom_write(adapter, aligned_offset, *p);
1806 aligned_offset += 4;
1810 err = t3_seeprom_wp(adapter, 1);
1817 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1821 memset(&wol->sopass, 0, sizeof(wol->sopass));
1824 static int cxgb3_set_flags(struct net_device *dev, u32 data)
1826 struct port_info *pi = netdev_priv(dev);
1829 if (data & ETH_FLAG_LRO) {
1830 if (!pi->rx_csum_offload)
1833 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1834 set_qset_lro(dev, i, 1);
1837 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1838 set_qset_lro(dev, i, 0);
1843 static const struct ethtool_ops cxgb_ethtool_ops = {
1844 .get_settings = get_settings,
1845 .set_settings = set_settings,
1846 .get_drvinfo = get_drvinfo,
1847 .get_msglevel = get_msglevel,
1848 .set_msglevel = set_msglevel,
1849 .get_ringparam = get_sge_param,
1850 .set_ringparam = set_sge_param,
1851 .get_coalesce = get_coalesce,
1852 .set_coalesce = set_coalesce,
1853 .get_eeprom_len = get_eeprom_len,
1854 .get_eeprom = get_eeprom,
1855 .set_eeprom = set_eeprom,
1856 .get_pauseparam = get_pauseparam,
1857 .set_pauseparam = set_pauseparam,
1858 .get_rx_csum = get_rx_csum,
1859 .set_rx_csum = set_rx_csum,
1860 .set_tx_csum = ethtool_op_set_tx_csum,
1861 .set_sg = ethtool_op_set_sg,
1862 .get_link = ethtool_op_get_link,
1863 .get_strings = get_strings,
1864 .phys_id = cxgb3_phys_id,
1865 .nway_reset = restart_autoneg,
1866 .get_sset_count = get_sset_count,
1867 .get_ethtool_stats = get_stats,
1868 .get_regs_len = get_regs_len,
1869 .get_regs = get_regs,
1871 .set_tso = ethtool_op_set_tso,
1872 .get_flags = ethtool_op_get_flags,
1873 .set_flags = cxgb3_set_flags,
1876 static int in_range(int val, int lo, int hi)
1878 return val < 0 || (val <= hi && val >= lo);
1881 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1883 struct port_info *pi = netdev_priv(dev);
1884 struct adapter *adapter = pi->adapter;
1888 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1892 case CHELSIO_SET_QSET_PARAMS:{
1894 struct qset_params *q;
1895 struct ch_qset_params t;
1896 int q1 = pi->first_qset;
1897 int nqsets = pi->nqsets;
1899 if (!capable(CAP_NET_ADMIN))
1901 if (copy_from_user(&t, useraddr, sizeof(t)))
1903 if (t.qset_idx >= SGE_QSETS)
1905 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1906 !in_range(t.cong_thres, 0, 255) ||
1907 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1909 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1911 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1912 MAX_CTRL_TXQ_ENTRIES) ||
1913 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1915 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1916 MAX_RX_JUMBO_BUFFERS)
1917 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1921 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1922 for_each_port(adapter, i) {
1923 pi = adap2pinfo(adapter, i);
1924 if (t.qset_idx >= pi->first_qset &&
1925 t.qset_idx < pi->first_qset + pi->nqsets &&
1926 !pi->rx_csum_offload)
1930 if ((adapter->flags & FULL_INIT_DONE) &&
1931 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1932 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1933 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1934 t.polling >= 0 || t.cong_thres >= 0))
1937 /* Allow setting of any available qset when offload enabled */
1938 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1940 for_each_port(adapter, i) {
1941 pi = adap2pinfo(adapter, i);
1942 nqsets += pi->first_qset + pi->nqsets;
1946 if (t.qset_idx < q1)
1948 if (t.qset_idx > q1 + nqsets - 1)
1951 q = &adapter->params.sge.qset[t.qset_idx];
1953 if (t.rspq_size >= 0)
1954 q->rspq_size = t.rspq_size;
1955 if (t.fl_size[0] >= 0)
1956 q->fl_size = t.fl_size[0];
1957 if (t.fl_size[1] >= 0)
1958 q->jumbo_size = t.fl_size[1];
1959 if (t.txq_size[0] >= 0)
1960 q->txq_size[0] = t.txq_size[0];
1961 if (t.txq_size[1] >= 0)
1962 q->txq_size[1] = t.txq_size[1];
1963 if (t.txq_size[2] >= 0)
1964 q->txq_size[2] = t.txq_size[2];
1965 if (t.cong_thres >= 0)
1966 q->cong_thres = t.cong_thres;
1967 if (t.intr_lat >= 0) {
1968 struct sge_qset *qs =
1969 &adapter->sge.qs[t.qset_idx];
1971 q->coalesce_usecs = t.intr_lat;
1972 t3_update_qset_coalesce(qs, q);
1974 if (t.polling >= 0) {
1975 if (adapter->flags & USING_MSIX)
1976 q->polling = t.polling;
1978 /* No polling with INTx for T3A */
1979 if (adapter->params.rev == 0 &&
1980 !(adapter->flags & USING_MSI))
1983 for (i = 0; i < SGE_QSETS; i++) {
1984 q = &adapter->params.sge.
1986 q->polling = t.polling;
1991 set_qset_lro(dev, t.qset_idx, t.lro);
1995 case CHELSIO_GET_QSET_PARAMS:{
1996 struct qset_params *q;
1997 struct ch_qset_params t;
1998 int q1 = pi->first_qset;
1999 int nqsets = pi->nqsets;
2002 if (copy_from_user(&t, useraddr, sizeof(t)))
2005 /* Display qsets for all ports when offload enabled */
2006 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2008 for_each_port(adapter, i) {
2009 pi = adap2pinfo(adapter, i);
2010 nqsets = pi->first_qset + pi->nqsets;
2014 if (t.qset_idx >= nqsets)
2017 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2018 t.rspq_size = q->rspq_size;
2019 t.txq_size[0] = q->txq_size[0];
2020 t.txq_size[1] = q->txq_size[1];
2021 t.txq_size[2] = q->txq_size[2];
2022 t.fl_size[0] = q->fl_size;
2023 t.fl_size[1] = q->jumbo_size;
2024 t.polling = q->polling;
2026 t.intr_lat = q->coalesce_usecs;
2027 t.cong_thres = q->cong_thres;
2030 if (adapter->flags & USING_MSIX)
2031 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2033 t.vector = adapter->pdev->irq;
2035 if (copy_to_user(useraddr, &t, sizeof(t)))
2039 case CHELSIO_SET_QSET_NUM:{
2040 struct ch_reg edata;
2041 unsigned int i, first_qset = 0, other_qsets = 0;
2043 if (!capable(CAP_NET_ADMIN))
2045 if (adapter->flags & FULL_INIT_DONE)
2047 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2049 if (edata.val < 1 ||
2050 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2053 for_each_port(adapter, i)
2054 if (adapter->port[i] && adapter->port[i] != dev)
2055 other_qsets += adap2pinfo(adapter, i)->nqsets;
2057 if (edata.val + other_qsets > SGE_QSETS)
2060 pi->nqsets = edata.val;
2062 for_each_port(adapter, i)
2063 if (adapter->port[i]) {
2064 pi = adap2pinfo(adapter, i);
2065 pi->first_qset = first_qset;
2066 first_qset += pi->nqsets;
2070 case CHELSIO_GET_QSET_NUM:{
2071 struct ch_reg edata;
2073 edata.cmd = CHELSIO_GET_QSET_NUM;
2074 edata.val = pi->nqsets;
2075 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2079 case CHELSIO_LOAD_FW:{
2081 struct ch_mem_range t;
2083 if (!capable(CAP_SYS_RAWIO))
2085 if (copy_from_user(&t, useraddr, sizeof(t)))
2087 /* Check t.len sanity ? */
2088 fw_data = kmalloc(t.len, GFP_KERNEL);
2093 (fw_data, useraddr + sizeof(t), t.len)) {
2098 ret = t3_load_fw(adapter, fw_data, t.len);
2104 case CHELSIO_SETMTUTAB:{
2108 if (!is_offload(adapter))
2110 if (!capable(CAP_NET_ADMIN))
2112 if (offload_running(adapter))
2114 if (copy_from_user(&m, useraddr, sizeof(m)))
2116 if (m.nmtus != NMTUS)
2118 if (m.mtus[0] < 81) /* accommodate SACK */
2121 /* MTUs must be in ascending order */
2122 for (i = 1; i < NMTUS; ++i)
2123 if (m.mtus[i] < m.mtus[i - 1])
2126 memcpy(adapter->params.mtus, m.mtus,
2127 sizeof(adapter->params.mtus));
2130 case CHELSIO_GET_PM:{
2131 struct tp_params *p = &adapter->params.tp;
2132 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2134 if (!is_offload(adapter))
2136 m.tx_pg_sz = p->tx_pg_size;
2137 m.tx_num_pg = p->tx_num_pgs;
2138 m.rx_pg_sz = p->rx_pg_size;
2139 m.rx_num_pg = p->rx_num_pgs;
2140 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2141 if (copy_to_user(useraddr, &m, sizeof(m)))
2145 case CHELSIO_SET_PM:{
2147 struct tp_params *p = &adapter->params.tp;
2149 if (!is_offload(adapter))
2151 if (!capable(CAP_NET_ADMIN))
2153 if (adapter->flags & FULL_INIT_DONE)
2155 if (copy_from_user(&m, useraddr, sizeof(m)))
2157 if (!is_power_of_2(m.rx_pg_sz) ||
2158 !is_power_of_2(m.tx_pg_sz))
2159 return -EINVAL; /* not power of 2 */
2160 if (!(m.rx_pg_sz & 0x14000))
2161 return -EINVAL; /* not 16KB or 64KB */
2162 if (!(m.tx_pg_sz & 0x1554000))
2164 if (m.tx_num_pg == -1)
2165 m.tx_num_pg = p->tx_num_pgs;
2166 if (m.rx_num_pg == -1)
2167 m.rx_num_pg = p->rx_num_pgs;
2168 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2170 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2171 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2173 p->rx_pg_size = m.rx_pg_sz;
2174 p->tx_pg_size = m.tx_pg_sz;
2175 p->rx_num_pgs = m.rx_num_pg;
2176 p->tx_num_pgs = m.tx_num_pg;
2179 case CHELSIO_GET_MEM:{
2180 struct ch_mem_range t;
2184 if (!is_offload(adapter))
2186 if (!(adapter->flags & FULL_INIT_DONE))
2187 return -EIO; /* need the memory controllers */
2188 if (copy_from_user(&t, useraddr, sizeof(t)))
2190 if ((t.addr & 7) || (t.len & 7))
2192 if (t.mem_id == MEM_CM)
2194 else if (t.mem_id == MEM_PMRX)
2195 mem = &adapter->pmrx;
2196 else if (t.mem_id == MEM_PMTX)
2197 mem = &adapter->pmtx;
2203 * bits 0..9: chip version
2204 * bits 10..15: chip revision
2206 t.version = 3 | (adapter->params.rev << 10);
2207 if (copy_to_user(useraddr, &t, sizeof(t)))
2211 * Read 256 bytes at a time as len can be large and we don't
2212 * want to use huge intermediate buffers.
2214 useraddr += sizeof(t); /* advance to start of buffer */
2216 unsigned int chunk =
2217 min_t(unsigned int, t.len, sizeof(buf));
2220 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2224 if (copy_to_user(useraddr, buf, chunk))
2232 case CHELSIO_SET_TRACE_FILTER:{
2234 const struct trace_params *tp;
2236 if (!capable(CAP_NET_ADMIN))
2238 if (!offload_running(adapter))
2240 if (copy_from_user(&t, useraddr, sizeof(t)))
2243 tp = (const struct trace_params *)&t.sip;
2245 t3_config_trace_filter(adapter, tp, 0,
2249 t3_config_trace_filter(adapter, tp, 1,
2260 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2262 struct mii_ioctl_data *data = if_mii(req);
2263 struct port_info *pi = netdev_priv(dev);
2264 struct adapter *adapter = pi->adapter;
2269 data->phy_id = pi->phy.addr;
2273 struct cphy *phy = &pi->phy;
2275 if (!phy->mdio_read)
2277 if (is_10G(adapter)) {
2278 mmd = data->phy_id >> 8;
2281 else if (mmd > MDIO_DEV_VEND2)
2285 phy->mdio_read(adapter, data->phy_id & 0x1f,
2286 mmd, data->reg_num, &val);
2289 phy->mdio_read(adapter, data->phy_id & 0x1f,
2290 0, data->reg_num & 0x1f,
2293 data->val_out = val;
2297 struct cphy *phy = &pi->phy;
2299 if (!capable(CAP_NET_ADMIN))
2301 if (!phy->mdio_write)
2303 if (is_10G(adapter)) {
2304 mmd = data->phy_id >> 8;
2307 else if (mmd > MDIO_DEV_VEND2)
2311 phy->mdio_write(adapter,
2312 data->phy_id & 0x1f, mmd,
2317 phy->mdio_write(adapter,
2318 data->phy_id & 0x1f, 0,
2319 data->reg_num & 0x1f,
2324 return cxgb_extension_ioctl(dev, req->ifr_data);
2331 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2333 struct port_info *pi = netdev_priv(dev);
2334 struct adapter *adapter = pi->adapter;
2337 if (new_mtu < 81) /* accommodate SACK */
2339 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2342 init_port_mtus(adapter);
2343 if (adapter->params.rev == 0 && offload_running(adapter))
2344 t3_load_mtus(adapter, adapter->params.mtus,
2345 adapter->params.a_wnd, adapter->params.b_wnd,
2346 adapter->port[0]->mtu);
2350 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2352 struct port_info *pi = netdev_priv(dev);
2353 struct adapter *adapter = pi->adapter;
2354 struct sockaddr *addr = p;
2356 if (!is_valid_ether_addr(addr->sa_data))
2359 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2360 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2361 if (offload_running(adapter))
2362 write_smt_entry(adapter, pi->port_id);
2367 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2368 * @adap: the adapter
2371 * Ensures that current Rx processing on any of the queues associated with
2372 * the given port completes before returning. We do this by acquiring and
2373 * releasing the locks of the response queues associated with the port.
2375 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2379 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2380 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2382 spin_lock_irq(&q->lock);
2383 spin_unlock_irq(&q->lock);
2387 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2389 struct port_info *pi = netdev_priv(dev);
2390 struct adapter *adapter = pi->adapter;
2393 if (adapter->params.rev > 0)
2394 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2396 /* single control for all ports */
2397 unsigned int i, have_vlans = 0;
2398 for_each_port(adapter, i)
2399 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2401 t3_set_vlan_accel(adapter, 1, have_vlans);
2403 t3_synchronize_rx(adapter, pi);
2406 #ifdef CONFIG_NET_POLL_CONTROLLER
2407 static void cxgb_netpoll(struct net_device *dev)
2409 struct port_info *pi = netdev_priv(dev);
2410 struct adapter *adapter = pi->adapter;
2413 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2414 struct sge_qset *qs = &adapter->sge.qs[qidx];
2417 if (adapter->flags & USING_MSIX)
2422 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2428 * Periodic accumulation of MAC statistics.
2430 static void mac_stats_update(struct adapter *adapter)
2434 for_each_port(adapter, i) {
2435 struct net_device *dev = adapter->port[i];
2436 struct port_info *p = netdev_priv(dev);
2438 if (netif_running(dev)) {
2439 spin_lock(&adapter->stats_lock);
2440 t3_mac_update_stats(&p->mac);
2441 spin_unlock(&adapter->stats_lock);
2446 static void check_link_status(struct adapter *adapter)
2450 for_each_port(adapter, i) {
2451 struct net_device *dev = adapter->port[i];
2452 struct port_info *p = netdev_priv(dev);
2454 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev))
2455 t3_link_changed(adapter, i);
2459 static void check_t3b2_mac(struct adapter *adapter)
2463 if (!rtnl_trylock()) /* synchronize with ifdown */
2466 for_each_port(adapter, i) {
2467 struct net_device *dev = adapter->port[i];
2468 struct port_info *p = netdev_priv(dev);
2471 if (!netif_running(dev))
2475 if (netif_running(dev) && netif_carrier_ok(dev))
2476 status = t3b2_mac_watchdog_task(&p->mac);
2478 p->mac.stats.num_toggled++;
2479 else if (status == 2) {
2480 struct cmac *mac = &p->mac;
2482 t3_mac_set_mtu(mac, dev->mtu);
2483 t3_mac_set_address(mac, 0, dev->dev_addr);
2484 cxgb_set_rxmode(dev);
2485 t3_link_start(&p->phy, mac, &p->link_config);
2486 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2487 t3_port_intr_enable(adapter, p->port_id);
2488 p->mac.stats.num_resets++;
2495 static void t3_adap_check_task(struct work_struct *work)
2497 struct adapter *adapter = container_of(work, struct adapter,
2498 adap_check_task.work);
2499 const struct adapter_params *p = &adapter->params;
2501 adapter->check_task_cnt++;
2503 /* Check link status for PHYs without interrupts */
2504 if (p->linkpoll_period)
2505 check_link_status(adapter);
2507 /* Accumulate MAC stats if needed */
2508 if (!p->linkpoll_period ||
2509 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2510 p->stats_update_period) {
2511 mac_stats_update(adapter);
2512 adapter->check_task_cnt = 0;
2515 if (p->rev == T3_REV_B2)
2516 check_t3b2_mac(adapter);
2518 /* Schedule the next check update if any port is active. */
2519 spin_lock_irq(&adapter->work_lock);
2520 if (adapter->open_device_map & PORT_MASK)
2521 schedule_chk_task(adapter);
2522 spin_unlock_irq(&adapter->work_lock);
2526 * Processes external (PHY) interrupts in process context.
2528 static void ext_intr_task(struct work_struct *work)
2530 struct adapter *adapter = container_of(work, struct adapter,
2531 ext_intr_handler_task);
2533 t3_phy_intr_handler(adapter);
2535 /* Now reenable external interrupts */
2536 spin_lock_irq(&adapter->work_lock);
2537 if (adapter->slow_intr_mask) {
2538 adapter->slow_intr_mask |= F_T3DBG;
2539 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2540 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2541 adapter->slow_intr_mask);
2543 spin_unlock_irq(&adapter->work_lock);
2547 * Interrupt-context handler for external (PHY) interrupts.
2549 void t3_os_ext_intr_handler(struct adapter *adapter)
2552 * Schedule a task to handle external interrupts as they may be slow
2553 * and we use a mutex to protect MDIO registers. We disable PHY
2554 * interrupts in the meantime and let the task reenable them when
2557 spin_lock(&adapter->work_lock);
2558 if (adapter->slow_intr_mask) {
2559 adapter->slow_intr_mask &= ~F_T3DBG;
2560 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2561 adapter->slow_intr_mask);
2562 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2564 spin_unlock(&adapter->work_lock);
2567 static int t3_adapter_error(struct adapter *adapter, int reset)
2571 /* Stop all ports */
2572 for_each_port(adapter, i) {
2573 struct net_device *netdev = adapter->port[i];
2575 if (netif_running(netdev))
2579 if (is_offload(adapter) &&
2580 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2581 offload_close(&adapter->tdev);
2583 /* Stop SGE timers */
2584 t3_stop_sge_timers(adapter);
2586 adapter->flags &= ~FULL_INIT_DONE;
2589 ret = t3_reset_adapter(adapter);
2591 pci_disable_device(adapter->pdev);
2596 static int t3_reenable_adapter(struct adapter *adapter)
2598 if (pci_enable_device(adapter->pdev)) {
2599 dev_err(&adapter->pdev->dev,
2600 "Cannot re-enable PCI device after reset.\n");
2603 pci_set_master(adapter->pdev);
2604 pci_restore_state(adapter->pdev);
2606 /* Free sge resources */
2607 t3_free_sge_resources(adapter);
2609 if (t3_replay_prep_adapter(adapter))
2617 static void t3_resume_ports(struct adapter *adapter)
2621 /* Restart the ports */
2622 for_each_port(adapter, i) {
2623 struct net_device *netdev = adapter->port[i];
2625 if (netif_running(netdev)) {
2626 if (cxgb_open(netdev)) {
2627 dev_err(&adapter->pdev->dev,
2628 "can't bring device back up"
2637 * processes a fatal error.
2638 * Bring the ports down, reset the chip, bring the ports back up.
2640 static void fatal_error_task(struct work_struct *work)
2642 struct adapter *adapter = container_of(work, struct adapter,
2643 fatal_error_handler_task);
2647 err = t3_adapter_error(adapter, 1);
2649 err = t3_reenable_adapter(adapter);
2651 t3_resume_ports(adapter);
2653 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2657 void t3_fatal_err(struct adapter *adapter)
2659 unsigned int fw_status[4];
2661 if (adapter->flags & FULL_INIT_DONE) {
2662 t3_sge_stop(adapter);
2663 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2664 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2665 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2666 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2668 spin_lock(&adapter->work_lock);
2669 t3_intr_disable(adapter);
2670 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2671 spin_unlock(&adapter->work_lock);
2673 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2674 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2675 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2676 fw_status[0], fw_status[1],
2677 fw_status[2], fw_status[3]);
2682 * t3_io_error_detected - called when PCI error is detected
2683 * @pdev: Pointer to PCI device
2684 * @state: The current pci connection state
2686 * This function is called after a PCI bus error affecting
2687 * this device has been detected.
2689 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2690 pci_channel_state_t state)
2692 struct adapter *adapter = pci_get_drvdata(pdev);
2695 ret = t3_adapter_error(adapter, 0);
2697 /* Request a slot reset. */
2698 return PCI_ERS_RESULT_NEED_RESET;
2702 * t3_io_slot_reset - called after the pci bus has been reset.
2703 * @pdev: Pointer to PCI device
2705 * Restart the card from scratch, as if from a cold-boot.
2707 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2709 struct adapter *adapter = pci_get_drvdata(pdev);
2711 if (!t3_reenable_adapter(adapter))
2712 return PCI_ERS_RESULT_RECOVERED;
2714 return PCI_ERS_RESULT_DISCONNECT;
2718 * t3_io_resume - called when traffic can start flowing again.
2719 * @pdev: Pointer to PCI device
2721 * This callback is called when the error recovery driver tells us that
2722 * its OK to resume normal operation.
2724 static void t3_io_resume(struct pci_dev *pdev)
2726 struct adapter *adapter = pci_get_drvdata(pdev);
2728 t3_resume_ports(adapter);
2731 static struct pci_error_handlers t3_err_handler = {
2732 .error_detected = t3_io_error_detected,
2733 .slot_reset = t3_io_slot_reset,
2734 .resume = t3_io_resume,
2738 * Set the number of qsets based on the number of CPUs and the number of ports,
2739 * not to exceed the number of available qsets, assuming there are enough qsets
2742 static void set_nqsets(struct adapter *adap)
2745 int num_cpus = num_online_cpus();
2746 int hwports = adap->params.nports;
2747 int nqsets = SGE_QSETS;
2749 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2751 (hwports * nqsets > SGE_QSETS ||
2752 num_cpus >= nqsets / hwports))
2754 if (nqsets > num_cpus)
2756 if (nqsets < 1 || hwports == 4)
2761 for_each_port(adap, i) {
2762 struct port_info *pi = adap2pinfo(adap, i);
2765 pi->nqsets = nqsets;
2766 j = pi->first_qset + nqsets;
2768 dev_info(&adap->pdev->dev,
2769 "Port %d using %d queue sets.\n", i, nqsets);
2773 static int __devinit cxgb_enable_msix(struct adapter *adap)
2775 struct msix_entry entries[SGE_QSETS + 1];
2778 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2779 entries[i].entry = i;
2781 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2783 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2784 adap->msix_info[i].vec = entries[i].vector;
2786 dev_info(&adap->pdev->dev,
2787 "only %d MSI-X vectors left, not using MSI-X\n", err);
2791 static void __devinit print_port_info(struct adapter *adap,
2792 const struct adapter_info *ai)
2794 static const char *pci_variant[] = {
2795 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2802 snprintf(buf, sizeof(buf), "%s x%d",
2803 pci_variant[adap->params.pci.variant],
2804 adap->params.pci.width);
2806 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2807 pci_variant[adap->params.pci.variant],
2808 adap->params.pci.speed, adap->params.pci.width);
2810 for_each_port(adap, i) {
2811 struct net_device *dev = adap->port[i];
2812 const struct port_info *pi = netdev_priv(dev);
2814 if (!test_bit(i, &adap->registered_device_map))
2816 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2817 dev->name, ai->desc, pi->phy.desc,
2818 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2819 (adap->flags & USING_MSIX) ? " MSI-X" :
2820 (adap->flags & USING_MSI) ? " MSI" : "");
2821 if (adap->name == dev->name && adap->params.vpd.mclk)
2823 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2824 adap->name, t3_mc7_size(&adap->cm) >> 20,
2825 t3_mc7_size(&adap->pmtx) >> 20,
2826 t3_mc7_size(&adap->pmrx) >> 20,
2827 adap->params.vpd.sn);
2831 static int __devinit init_one(struct pci_dev *pdev,
2832 const struct pci_device_id *ent)
2834 static int version_printed;
2836 int i, err, pci_using_dac = 0;
2837 unsigned long mmio_start, mmio_len;
2838 const struct adapter_info *ai;
2839 struct adapter *adapter = NULL;
2840 struct port_info *pi;
2842 if (!version_printed) {
2843 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2848 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2850 printk(KERN_ERR DRV_NAME
2851 ": cannot initialize work queue\n");
2856 err = pci_request_regions(pdev, DRV_NAME);
2858 /* Just info, some other driver may have claimed the device. */
2859 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2863 err = pci_enable_device(pdev);
2865 dev_err(&pdev->dev, "cannot enable PCI device\n");
2866 goto out_release_regions;
2869 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2871 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2873 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2874 "coherent allocations\n");
2875 goto out_disable_device;
2877 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2878 dev_err(&pdev->dev, "no usable DMA configuration\n");
2879 goto out_disable_device;
2882 pci_set_master(pdev);
2883 pci_save_state(pdev);
2885 mmio_start = pci_resource_start(pdev, 0);
2886 mmio_len = pci_resource_len(pdev, 0);
2887 ai = t3_get_adapter_info(ent->driver_data);
2889 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2892 goto out_disable_device;
2895 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2896 if (!adapter->regs) {
2897 dev_err(&pdev->dev, "cannot map device registers\n");
2899 goto out_free_adapter;
2902 adapter->pdev = pdev;
2903 adapter->name = pci_name(pdev);
2904 adapter->msg_enable = dflt_msg_enable;
2905 adapter->mmio_len = mmio_len;
2907 mutex_init(&adapter->mdio_lock);
2908 spin_lock_init(&adapter->work_lock);
2909 spin_lock_init(&adapter->stats_lock);
2911 INIT_LIST_HEAD(&adapter->adapter_list);
2912 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2913 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
2914 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2916 for (i = 0; i < ai->nports; ++i) {
2917 struct net_device *netdev;
2919 netdev = alloc_etherdev(sizeof(struct port_info));
2925 SET_NETDEV_DEV(netdev, &pdev->dev);
2927 adapter->port[i] = netdev;
2928 pi = netdev_priv(netdev);
2929 pi->adapter = adapter;
2930 pi->rx_csum_offload = 1;
2932 netif_carrier_off(netdev);
2933 netdev->irq = pdev->irq;
2934 netdev->mem_start = mmio_start;
2935 netdev->mem_end = mmio_start + mmio_len - 1;
2936 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2937 netdev->features |= NETIF_F_LLTX;
2939 netdev->features |= NETIF_F_HIGHDMA;
2941 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2942 netdev->vlan_rx_register = vlan_rx_register;
2944 netdev->open = cxgb_open;
2945 netdev->stop = cxgb_close;
2946 netdev->hard_start_xmit = t3_eth_xmit;
2947 netdev->get_stats = cxgb_get_stats;
2948 netdev->set_multicast_list = cxgb_set_rxmode;
2949 netdev->do_ioctl = cxgb_ioctl;
2950 netdev->change_mtu = cxgb_change_mtu;
2951 netdev->set_mac_address = cxgb_set_mac_addr;
2952 #ifdef CONFIG_NET_POLL_CONTROLLER
2953 netdev->poll_controller = cxgb_netpoll;
2956 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2959 pci_set_drvdata(pdev, adapter);
2960 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2966 * The card is now ready to go. If any errors occur during device
2967 * registration we do not fail the whole card but rather proceed only
2968 * with the ports we manage to register successfully. However we must
2969 * register at least one net device.
2971 for_each_port(adapter, i) {
2972 err = register_netdev(adapter->port[i]);
2974 dev_warn(&pdev->dev,
2975 "cannot register net device %s, skipping\n",
2976 adapter->port[i]->name);
2979 * Change the name we use for messages to the name of
2980 * the first successfully registered interface.
2982 if (!adapter->registered_device_map)
2983 adapter->name = adapter->port[i]->name;
2985 __set_bit(i, &adapter->registered_device_map);
2988 if (!adapter->registered_device_map) {
2989 dev_err(&pdev->dev, "could not register any net devices\n");
2993 /* Driver's ready. Reflect it on LEDs */
2994 t3_led_ready(adapter);
2996 if (is_offload(adapter)) {
2997 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2998 cxgb3_adapter_ofld(adapter);
3001 /* See what interrupts we'll be using */
3002 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3003 adapter->flags |= USING_MSIX;
3004 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3005 adapter->flags |= USING_MSI;
3007 set_nqsets(adapter);
3009 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3012 print_port_info(adapter, ai);
3016 iounmap(adapter->regs);
3017 for (i = ai->nports - 1; i >= 0; --i)
3018 if (adapter->port[i])
3019 free_netdev(adapter->port[i]);
3025 pci_disable_device(pdev);
3026 out_release_regions:
3027 pci_release_regions(pdev);
3028 pci_set_drvdata(pdev, NULL);
3032 static void __devexit remove_one(struct pci_dev *pdev)
3034 struct adapter *adapter = pci_get_drvdata(pdev);
3039 t3_sge_stop(adapter);
3040 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3043 if (is_offload(adapter)) {
3044 cxgb3_adapter_unofld(adapter);
3045 if (test_bit(OFFLOAD_DEVMAP_BIT,
3046 &adapter->open_device_map))
3047 offload_close(&adapter->tdev);
3050 for_each_port(adapter, i)
3051 if (test_bit(i, &adapter->registered_device_map))
3052 unregister_netdev(adapter->port[i]);
3054 t3_stop_sge_timers(adapter);
3055 t3_free_sge_resources(adapter);
3056 cxgb_disable_msi(adapter);
3058 for_each_port(adapter, i)
3059 if (adapter->port[i])
3060 free_netdev(adapter->port[i]);
3062 iounmap(adapter->regs);
3064 pci_release_regions(pdev);
3065 pci_disable_device(pdev);
3066 pci_set_drvdata(pdev, NULL);
3070 static struct pci_driver driver = {
3072 .id_table = cxgb3_pci_tbl,
3074 .remove = __devexit_p(remove_one),
3075 .err_handler = &t3_err_handler,
3078 static int __init cxgb3_init_module(void)
3082 cxgb3_offload_init();
3084 ret = pci_register_driver(&driver);
3088 static void __exit cxgb3_cleanup_module(void)
3090 pci_unregister_driver(&driver);
3092 destroy_workqueue(cxgb3_wq);
3095 module_init(cxgb3_init_module);
3096 module_exit(cxgb3_cleanup_module);