2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
97 MODULE_DESCRIPTION(DRV_DESC);
98 MODULE_AUTHOR("Chelsio Communications");
99 MODULE_LICENSE("Dual BSD/GPL");
100 MODULE_VERSION(DRV_VERSION);
101 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
103 static int dflt_msg_enable = DFLT_MSG_ENABLE;
105 module_param(dflt_msg_enable, int, 0644);
106 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
109 * The driver uses the best interrupt scheme available on a platform in the
110 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
111 * of these schemes the driver may consider as follows:
113 * msi = 2: choose from among all three options
114 * msi = 1: only consider MSI and pin interrupts
115 * msi = 0: force pin interrupts
119 module_param(msi, int, 0644);
120 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
123 * The driver enables offload as a default.
124 * To disable it, use ofld_disable = 1.
127 static int ofld_disable = 0;
129 module_param(ofld_disable, int, 0644);
130 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
133 * We have work elements that we need to cancel when an interface is taken
134 * down. Normally the work elements would be executed by keventd but that
135 * can deadlock because of linkwatch. If our close method takes the rtnl
136 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
137 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
138 * for our work to complete. Get our own work queue to solve this.
140 static struct workqueue_struct *cxgb3_wq;
143 * link_report - show link status and link speed/duplex
144 * @p: the port whose settings are to be reported
146 * Shows the link status, speed, and duplex of a port.
148 static void link_report(struct net_device *dev)
150 if (!netif_carrier_ok(dev))
151 printk(KERN_INFO "%s: link down\n", dev->name);
153 const char *s = "10Mbps";
154 const struct port_info *p = netdev_priv(dev);
156 switch (p->link_config.speed) {
168 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
169 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
175 struct net_device *dev = adap->port[port_id];
176 struct port_info *pi = netdev_priv(dev);
178 if (state == netif_carrier_ok(dev))
182 struct cmac *mac = &pi->mac;
184 netif_carrier_on(dev);
186 /* Clear local faults */
187 t3_xgm_intr_disable(adap, pi->port_id);
188 t3_read_reg(adap, A_XGM_INT_STATUS +
191 A_XGM_INT_CAUSE + pi->mac.offset,
194 t3_set_reg_field(adap,
197 F_XGM_INT, F_XGM_INT);
198 t3_xgm_intr_enable(adap, pi->port_id);
200 t3_mac_enable(mac, MAC_DIRECTION_TX);
202 netif_carrier_off(dev);
208 * t3_os_link_changed - handle link status changes
209 * @adapter: the adapter associated with the link change
210 * @port_id: the port index whose limk status has changed
211 * @link_stat: the new status of the link
212 * @speed: the new speed setting
213 * @duplex: the new duplex setting
214 * @pause: the new flow-control setting
216 * This is the OS-dependent handler for link status changes. The OS
217 * neutral handler takes care of most of the processing for these events,
218 * then calls this handler for any OS-specific processing.
220 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
221 int speed, int duplex, int pause)
223 struct net_device *dev = adapter->port[port_id];
224 struct port_info *pi = netdev_priv(dev);
225 struct cmac *mac = &pi->mac;
227 /* Skip changes from disabled ports. */
228 if (!netif_running(dev))
231 if (link_stat != netif_carrier_ok(dev)) {
233 t3_mac_enable(mac, MAC_DIRECTION_RX);
235 /* Clear local faults */
236 t3_xgm_intr_disable(adapter, pi->port_id);
237 t3_read_reg(adapter, A_XGM_INT_STATUS +
239 t3_write_reg(adapter,
240 A_XGM_INT_CAUSE + pi->mac.offset,
243 t3_set_reg_field(adapter,
244 A_XGM_INT_ENABLE + pi->mac.offset,
245 F_XGM_INT, F_XGM_INT);
246 t3_xgm_intr_enable(adapter, pi->port_id);
248 netif_carrier_on(dev);
250 netif_carrier_off(dev);
252 t3_xgm_intr_disable(adapter, pi->port_id);
253 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
254 t3_set_reg_field(adapter,
255 A_XGM_INT_ENABLE + pi->mac.offset,
259 pi->phy.ops->power_down(&pi->phy, 1);
261 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
262 t3_mac_disable(mac, MAC_DIRECTION_RX);
263 t3_link_start(&pi->phy, mac, &pi->link_config);
271 * t3_os_phymod_changed - handle PHY module changes
272 * @phy: the PHY reporting the module change
273 * @mod_type: new module type
275 * This is the OS-dependent handler for PHY module changes. It is
276 * invoked when a PHY module is removed or inserted for any OS-specific
279 void t3_os_phymod_changed(struct adapter *adap, int port_id)
281 static const char *mod_str[] = {
282 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
285 const struct net_device *dev = adap->port[port_id];
286 const struct port_info *pi = netdev_priv(dev);
288 if (pi->phy.modtype == phy_modtype_none)
289 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
291 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
292 mod_str[pi->phy.modtype]);
295 static void cxgb_set_rxmode(struct net_device *dev)
297 struct t3_rx_mode rm;
298 struct port_info *pi = netdev_priv(dev);
300 init_rx_mode(&rm, dev, dev->mc_list);
301 t3_mac_set_rx_mode(&pi->mac, &rm);
305 * link_start - enable a port
306 * @dev: the device to enable
308 * Performs the MAC and PHY actions needed to enable a port.
310 static void link_start(struct net_device *dev)
312 struct t3_rx_mode rm;
313 struct port_info *pi = netdev_priv(dev);
314 struct cmac *mac = &pi->mac;
316 init_rx_mode(&rm, dev, dev->mc_list);
318 t3_mac_set_mtu(mac, dev->mtu);
319 t3_mac_set_address(mac, 0, dev->dev_addr);
320 t3_mac_set_rx_mode(mac, &rm);
321 t3_link_start(&pi->phy, mac, &pi->link_config);
322 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
325 static inline void cxgb_disable_msi(struct adapter *adapter)
327 if (adapter->flags & USING_MSIX) {
328 pci_disable_msix(adapter->pdev);
329 adapter->flags &= ~USING_MSIX;
330 } else if (adapter->flags & USING_MSI) {
331 pci_disable_msi(adapter->pdev);
332 adapter->flags &= ~USING_MSI;
337 * Interrupt handler for asynchronous events used with MSI-X.
339 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
341 t3_slow_intr_handler(cookie);
346 * Name the MSI-X interrupts.
348 static void name_msix_vecs(struct adapter *adap)
350 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
352 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
353 adap->msix_info[0].desc[n] = 0;
355 for_each_port(adap, j) {
356 struct net_device *d = adap->port[j];
357 const struct port_info *pi = netdev_priv(d);
359 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
360 snprintf(adap->msix_info[msi_idx].desc, n,
361 "%s-%d", d->name, pi->first_qset + i);
362 adap->msix_info[msi_idx].desc[n] = 0;
367 static int request_msix_data_irqs(struct adapter *adap)
369 int i, j, err, qidx = 0;
371 for_each_port(adap, i) {
372 int nqsets = adap2pinfo(adap, i)->nqsets;
374 for (j = 0; j < nqsets; ++j) {
375 err = request_irq(adap->msix_info[qidx + 1].vec,
376 t3_intr_handler(adap,
379 adap->msix_info[qidx + 1].desc,
380 &adap->sge.qs[qidx]);
383 free_irq(adap->msix_info[qidx + 1].vec,
384 &adap->sge.qs[qidx]);
393 static void free_irq_resources(struct adapter *adapter)
395 if (adapter->flags & USING_MSIX) {
398 free_irq(adapter->msix_info[0].vec, adapter);
399 for_each_port(adapter, i)
400 n += adap2pinfo(adapter, i)->nqsets;
402 for (i = 0; i < n; ++i)
403 free_irq(adapter->msix_info[i + 1].vec,
404 &adapter->sge.qs[i]);
406 free_irq(adapter->pdev->irq, adapter);
409 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
414 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
422 static int init_tp_parity(struct adapter *adap)
426 struct cpl_set_tcb_field *greq;
427 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
429 t3_tp_set_offload_mode(adap, 1);
431 for (i = 0; i < 16; i++) {
432 struct cpl_smt_write_req *req;
434 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
435 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
436 memset(req, 0, sizeof(*req));
437 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
438 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
440 t3_mgmt_tx(adap, skb);
443 for (i = 0; i < 2048; i++) {
444 struct cpl_l2t_write_req *req;
446 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
447 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
448 memset(req, 0, sizeof(*req));
449 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
450 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
451 req->params = htonl(V_L2T_W_IDX(i));
452 t3_mgmt_tx(adap, skb);
455 for (i = 0; i < 2048; i++) {
456 struct cpl_rte_write_req *req;
458 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
459 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
460 memset(req, 0, sizeof(*req));
461 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
462 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
463 req->l2t_idx = htonl(V_L2T_W_IDX(i));
464 t3_mgmt_tx(adap, skb);
467 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
468 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
469 memset(greq, 0, sizeof(*greq));
470 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
471 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
472 greq->mask = cpu_to_be64(1);
473 t3_mgmt_tx(adap, skb);
475 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
476 t3_tp_set_offload_mode(adap, 0);
481 * setup_rss - configure RSS
484 * Sets up RSS to distribute packets to multiple receive queues. We
485 * configure the RSS CPU lookup table to distribute to the number of HW
486 * receive queues, and the response queue lookup table to narrow that
487 * down to the response queues actually configured for each port.
488 * We always configure the RSS mapping for two ports since the mapping
489 * table has plenty of entries.
491 static void setup_rss(struct adapter *adap)
494 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
495 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
496 u8 cpus[SGE_QSETS + 1];
497 u16 rspq_map[RSS_TABLE_SIZE];
499 for (i = 0; i < SGE_QSETS; ++i)
501 cpus[SGE_QSETS] = 0xff; /* terminator */
503 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
504 rspq_map[i] = i % nq0;
505 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
508 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
509 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
510 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
513 static void init_napi(struct adapter *adap)
517 for (i = 0; i < SGE_QSETS; i++) {
518 struct sge_qset *qs = &adap->sge.qs[i];
521 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
526 * netif_napi_add() can be called only once per napi_struct because it
527 * adds each new napi_struct to a list. Be careful not to call it a
528 * second time, e.g., during EEH recovery, by making a note of it.
530 adap->flags |= NAPI_INIT;
534 * Wait until all NAPI handlers are descheduled. This includes the handlers of
535 * both netdevices representing interfaces and the dummy ones for the extra
538 static void quiesce_rx(struct adapter *adap)
542 for (i = 0; i < SGE_QSETS; i++)
543 if (adap->sge.qs[i].adap)
544 napi_disable(&adap->sge.qs[i].napi);
547 static void enable_all_napi(struct adapter *adap)
550 for (i = 0; i < SGE_QSETS; i++)
551 if (adap->sge.qs[i].adap)
552 napi_enable(&adap->sge.qs[i].napi);
556 * set_qset_lro - Turn a queue set's LRO capability on and off
557 * @dev: the device the qset is attached to
558 * @qset_idx: the queue set index
559 * @val: the LRO switch
561 * Sets LRO on or off for a particular queue set.
562 * the device's features flag is updated to reflect the LRO
563 * capability when all queues belonging to the device are
566 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
568 struct port_info *pi = netdev_priv(dev);
569 struct adapter *adapter = pi->adapter;
571 adapter->params.sge.qset[qset_idx].lro = !!val;
572 adapter->sge.qs[qset_idx].lro_enabled = !!val;
576 * setup_sge_qsets - configure SGE Tx/Rx/response queues
579 * Determines how many sets of SGE queues to use and initializes them.
580 * We support multiple queue sets per port if we have MSI-X, otherwise
581 * just one queue set per port.
583 static int setup_sge_qsets(struct adapter *adap)
585 int i, j, err, irq_idx = 0, qset_idx = 0;
586 unsigned int ntxq = SGE_TXQ_PER_SET;
588 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
591 for_each_port(adap, i) {
592 struct net_device *dev = adap->port[i];
593 struct port_info *pi = netdev_priv(dev);
595 pi->qs = &adap->sge.qs[pi->first_qset];
596 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
598 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
599 err = t3_sge_alloc_qset(adap, qset_idx, 1,
600 (adap->flags & USING_MSIX) ? qset_idx + 1 :
602 &adap->params.sge.qset[qset_idx], ntxq, dev,
603 netdev_get_tx_queue(dev, j));
605 t3_stop_sge_timers(adap);
606 t3_free_sge_resources(adap);
615 static ssize_t attr_show(struct device *d, char *buf,
616 ssize_t(*format) (struct net_device *, char *))
620 /* Synchronize with ioctls that may shut down the device */
622 len = (*format) (to_net_dev(d), buf);
627 static ssize_t attr_store(struct device *d,
628 const char *buf, size_t len,
629 ssize_t(*set) (struct net_device *, unsigned int),
630 unsigned int min_val, unsigned int max_val)
636 if (!capable(CAP_NET_ADMIN))
639 val = simple_strtoul(buf, &endp, 0);
640 if (endp == buf || val < min_val || val > max_val)
644 ret = (*set) (to_net_dev(d), val);
651 #define CXGB3_SHOW(name, val_expr) \
652 static ssize_t format_##name(struct net_device *dev, char *buf) \
654 struct port_info *pi = netdev_priv(dev); \
655 struct adapter *adap = pi->adapter; \
656 return sprintf(buf, "%u\n", val_expr); \
658 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
661 return attr_show(d, buf, format_##name); \
664 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
666 struct port_info *pi = netdev_priv(dev);
667 struct adapter *adap = pi->adapter;
668 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
670 if (adap->flags & FULL_INIT_DONE)
672 if (val && adap->params.rev == 0)
674 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
677 adap->params.mc5.nfilters = val;
681 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
682 const char *buf, size_t len)
684 return attr_store(d, buf, len, set_nfilters, 0, ~0);
687 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
689 struct port_info *pi = netdev_priv(dev);
690 struct adapter *adap = pi->adapter;
692 if (adap->flags & FULL_INIT_DONE)
694 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
697 adap->params.mc5.nservers = val;
701 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
702 const char *buf, size_t len)
704 return attr_store(d, buf, len, set_nservers, 0, ~0);
707 #define CXGB3_ATTR_R(name, val_expr) \
708 CXGB3_SHOW(name, val_expr) \
709 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
711 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
712 CXGB3_SHOW(name, val_expr) \
713 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
715 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
716 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
717 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
719 static struct attribute *cxgb3_attrs[] = {
720 &dev_attr_cam_size.attr,
721 &dev_attr_nfilters.attr,
722 &dev_attr_nservers.attr,
726 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
728 static ssize_t tm_attr_show(struct device *d,
729 char *buf, int sched)
731 struct port_info *pi = netdev_priv(to_net_dev(d));
732 struct adapter *adap = pi->adapter;
733 unsigned int v, addr, bpt, cpt;
736 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
738 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
739 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
742 bpt = (v >> 8) & 0xff;
745 len = sprintf(buf, "disabled\n");
747 v = (adap->params.vpd.cclk * 1000) / cpt;
748 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
754 static ssize_t tm_attr_store(struct device *d,
755 const char *buf, size_t len, int sched)
757 struct port_info *pi = netdev_priv(to_net_dev(d));
758 struct adapter *adap = pi->adapter;
763 if (!capable(CAP_NET_ADMIN))
766 val = simple_strtoul(buf, &endp, 0);
767 if (endp == buf || val > 10000000)
771 ret = t3_config_sched(adap, val, sched);
778 #define TM_ATTR(name, sched) \
779 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
782 return tm_attr_show(d, buf, sched); \
784 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
785 const char *buf, size_t len) \
787 return tm_attr_store(d, buf, len, sched); \
789 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
800 static struct attribute *offload_attrs[] = {
801 &dev_attr_sched0.attr,
802 &dev_attr_sched1.attr,
803 &dev_attr_sched2.attr,
804 &dev_attr_sched3.attr,
805 &dev_attr_sched4.attr,
806 &dev_attr_sched5.attr,
807 &dev_attr_sched6.attr,
808 &dev_attr_sched7.attr,
812 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
815 * Sends an sk_buff to an offload queue driver
816 * after dealing with any active network taps.
818 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
823 ret = t3_offload_tx(tdev, skb);
828 static int write_smt_entry(struct adapter *adapter, int idx)
830 struct cpl_smt_write_req *req;
831 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
836 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
837 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
838 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
839 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
841 memset(req->src_mac1, 0, sizeof(req->src_mac1));
842 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
844 offload_tx(&adapter->tdev, skb);
848 static int init_smt(struct adapter *adapter)
852 for_each_port(adapter, i)
853 write_smt_entry(adapter, i);
857 static void init_port_mtus(struct adapter *adapter)
859 unsigned int mtus = adapter->port[0]->mtu;
861 if (adapter->port[1])
862 mtus |= adapter->port[1]->mtu << 16;
863 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
866 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
870 struct mngt_pktsched_wr *req;
873 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
874 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
875 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
876 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
882 ret = t3_mgmt_tx(adap, skb);
887 static int bind_qsets(struct adapter *adap)
891 for_each_port(adap, i) {
892 const struct port_info *pi = adap2pinfo(adap, i);
894 for (j = 0; j < pi->nqsets; ++j) {
895 int ret = send_pktsched_cmd(adap, 1,
896 pi->first_qset + j, -1,
906 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
907 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
909 static int upgrade_fw(struct adapter *adap)
913 const struct firmware *fw;
914 struct device *dev = &adap->pdev->dev;
916 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
917 FW_VERSION_MINOR, FW_VERSION_MICRO);
918 ret = request_firmware(&fw, buf, dev);
920 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
924 ret = t3_load_fw(adap, fw->data, fw->size);
925 release_firmware(fw);
928 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
929 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
931 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
932 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
937 static inline char t3rev2char(struct adapter *adapter)
941 switch(adapter->params.rev) {
953 static int update_tpsram(struct adapter *adap)
955 const struct firmware *tpsram;
957 struct device *dev = &adap->pdev->dev;
961 rev = t3rev2char(adap);
965 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
966 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
968 ret = request_firmware(&tpsram, buf, dev);
970 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
975 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
979 ret = t3_set_proto_sram(adap, tpsram->data);
982 "successful update of protocol engine "
984 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
986 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
987 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
989 dev_err(dev, "loading protocol SRAM failed\n");
992 release_firmware(tpsram);
998 * cxgb_up - enable the adapter
999 * @adapter: adapter being enabled
1001 * Called when the first port is enabled, this function performs the
1002 * actions necessary to make an adapter operational, such as completing
1003 * the initialization of HW modules, and enabling interrupts.
1005 * Must be called with the rtnl lock held.
1007 static int cxgb_up(struct adapter *adap)
1011 if (!(adap->flags & FULL_INIT_DONE)) {
1012 err = t3_check_fw_version(adap);
1013 if (err == -EINVAL) {
1014 err = upgrade_fw(adap);
1015 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1016 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1017 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1020 err = t3_check_tpsram_version(adap);
1021 if (err == -EINVAL) {
1022 err = update_tpsram(adap);
1023 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1024 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1025 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1029 * Clear interrupts now to catch errors if t3_init_hw fails.
1030 * We clear them again later as initialization may trigger
1031 * conditions that can interrupt.
1033 t3_intr_clear(adap);
1035 err = t3_init_hw(adap, 0);
1039 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1040 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1042 err = setup_sge_qsets(adap);
1047 if (!(adap->flags & NAPI_INIT))
1049 adap->flags |= FULL_INIT_DONE;
1052 t3_intr_clear(adap);
1054 if (adap->flags & USING_MSIX) {
1055 name_msix_vecs(adap);
1056 err = request_irq(adap->msix_info[0].vec,
1057 t3_async_intr_handler, 0,
1058 adap->msix_info[0].desc, adap);
1062 err = request_msix_data_irqs(adap);
1064 free_irq(adap->msix_info[0].vec, adap);
1067 } else if ((err = request_irq(adap->pdev->irq,
1068 t3_intr_handler(adap,
1069 adap->sge.qs[0].rspq.
1071 (adap->flags & USING_MSI) ?
1076 enable_all_napi(adap);
1078 t3_intr_enable(adap);
1080 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1081 is_offload(adap) && init_tp_parity(adap) == 0)
1082 adap->flags |= TP_PARITY_INIT;
1084 if (adap->flags & TP_PARITY_INIT) {
1085 t3_write_reg(adap, A_TP_INT_CAUSE,
1086 F_CMCACHEPERR | F_ARPLUTPERR);
1087 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1090 if (!(adap->flags & QUEUES_BOUND)) {
1091 err = bind_qsets(adap);
1093 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1094 t3_intr_disable(adap);
1095 free_irq_resources(adap);
1098 adap->flags |= QUEUES_BOUND;
1104 CH_ERR(adap, "request_irq failed, err %d\n", err);
1109 * Release resources when all the ports and offloading have been stopped.
1111 static void cxgb_down(struct adapter *adapter)
1113 t3_sge_stop(adapter);
1114 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1115 t3_intr_disable(adapter);
1116 spin_unlock_irq(&adapter->work_lock);
1118 free_irq_resources(adapter);
1119 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1120 quiesce_rx(adapter);
1123 static void schedule_chk_task(struct adapter *adap)
1127 timeo = adap->params.linkpoll_period ?
1128 (HZ * adap->params.linkpoll_period) / 10 :
1129 adap->params.stats_update_period * HZ;
1131 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1134 static int offload_open(struct net_device *dev)
1136 struct port_info *pi = netdev_priv(dev);
1137 struct adapter *adapter = pi->adapter;
1138 struct t3cdev *tdev = dev2t3cdev(dev);
1139 int adap_up = adapter->open_device_map & PORT_MASK;
1142 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1145 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1148 t3_tp_set_offload_mode(adapter, 1);
1149 tdev->lldev = adapter->port[0];
1150 err = cxgb3_offload_activate(adapter);
1154 init_port_mtus(adapter);
1155 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1156 adapter->params.b_wnd,
1157 adapter->params.rev == 0 ?
1158 adapter->port[0]->mtu : 0xffff);
1161 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1162 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1164 /* Call back all registered clients */
1165 cxgb3_add_clients(tdev);
1168 /* restore them in case the offload module has changed them */
1170 t3_tp_set_offload_mode(adapter, 0);
1171 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1172 cxgb3_set_dummy_ops(tdev);
1177 static int offload_close(struct t3cdev *tdev)
1179 struct adapter *adapter = tdev2adap(tdev);
1181 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1184 /* Call back all registered clients */
1185 cxgb3_remove_clients(tdev);
1187 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1190 cxgb3_set_dummy_ops(tdev);
1191 t3_tp_set_offload_mode(adapter, 0);
1192 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1194 if (!adapter->open_device_map)
1197 cxgb3_offload_deactivate(adapter);
1201 static int cxgb_open(struct net_device *dev)
1203 struct port_info *pi = netdev_priv(dev);
1204 struct adapter *adapter = pi->adapter;
1205 int other_ports = adapter->open_device_map & PORT_MASK;
1208 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1211 set_bit(pi->port_id, &adapter->open_device_map);
1212 if (is_offload(adapter) && !ofld_disable) {
1213 err = offload_open(dev);
1216 "Could not initialize offload capabilities\n");
1219 dev->real_num_tx_queues = pi->nqsets;
1221 t3_port_intr_enable(adapter, pi->port_id);
1222 netif_tx_start_all_queues(dev);
1224 schedule_chk_task(adapter);
1229 static int cxgb_close(struct net_device *dev)
1231 struct port_info *pi = netdev_priv(dev);
1232 struct adapter *adapter = pi->adapter;
1234 /* Stop link fault interrupts */
1235 t3_xgm_intr_disable(adapter, pi->port_id);
1236 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1238 t3_port_intr_disable(adapter, pi->port_id);
1239 netif_tx_stop_all_queues(dev);
1240 pi->phy.ops->power_down(&pi->phy, 1);
1241 netif_carrier_off(dev);
1242 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1244 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1245 clear_bit(pi->port_id, &adapter->open_device_map);
1246 spin_unlock_irq(&adapter->work_lock);
1248 if (!(adapter->open_device_map & PORT_MASK))
1249 cancel_rearming_delayed_workqueue(cxgb3_wq,
1250 &adapter->adap_check_task);
1252 if (!adapter->open_device_map)
1258 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1260 struct port_info *pi = netdev_priv(dev);
1261 struct adapter *adapter = pi->adapter;
1262 struct net_device_stats *ns = &pi->netstats;
1263 const struct mac_stats *pstats;
1265 spin_lock(&adapter->stats_lock);
1266 pstats = t3_mac_update_stats(&pi->mac);
1267 spin_unlock(&adapter->stats_lock);
1269 ns->tx_bytes = pstats->tx_octets;
1270 ns->tx_packets = pstats->tx_frames;
1271 ns->rx_bytes = pstats->rx_octets;
1272 ns->rx_packets = pstats->rx_frames;
1273 ns->multicast = pstats->rx_mcast_frames;
1275 ns->tx_errors = pstats->tx_underrun;
1276 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1277 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1278 pstats->rx_fifo_ovfl;
1280 /* detailed rx_errors */
1281 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1282 ns->rx_over_errors = 0;
1283 ns->rx_crc_errors = pstats->rx_fcs_errs;
1284 ns->rx_frame_errors = pstats->rx_symbol_errs;
1285 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1286 ns->rx_missed_errors = pstats->rx_cong_drops;
1288 /* detailed tx_errors */
1289 ns->tx_aborted_errors = 0;
1290 ns->tx_carrier_errors = 0;
1291 ns->tx_fifo_errors = pstats->tx_underrun;
1292 ns->tx_heartbeat_errors = 0;
1293 ns->tx_window_errors = 0;
1297 static u32 get_msglevel(struct net_device *dev)
1299 struct port_info *pi = netdev_priv(dev);
1300 struct adapter *adapter = pi->adapter;
1302 return adapter->msg_enable;
1305 static void set_msglevel(struct net_device *dev, u32 val)
1307 struct port_info *pi = netdev_priv(dev);
1308 struct adapter *adapter = pi->adapter;
1310 adapter->msg_enable = val;
1313 static char stats_strings[][ETH_GSTRING_LEN] = {
1316 "TxMulticastFramesOK",
1317 "TxBroadcastFramesOK",
1324 "TxFrames128To255 ",
1325 "TxFrames256To511 ",
1326 "TxFrames512To1023 ",
1327 "TxFrames1024To1518 ",
1328 "TxFrames1519ToMax ",
1332 "RxMulticastFramesOK",
1333 "RxBroadcastFramesOK",
1344 "RxFrames128To255 ",
1345 "RxFrames256To511 ",
1346 "RxFrames512To1023 ",
1347 "RxFrames1024To1518 ",
1348 "RxFrames1519ToMax ",
1361 "CheckTXEnToggled ",
1367 static int get_sset_count(struct net_device *dev, int sset)
1371 return ARRAY_SIZE(stats_strings);
1377 #define T3_REGMAP_SIZE (3 * 1024)
1379 static int get_regs_len(struct net_device *dev)
1381 return T3_REGMAP_SIZE;
1384 static int get_eeprom_len(struct net_device *dev)
1389 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1391 struct port_info *pi = netdev_priv(dev);
1392 struct adapter *adapter = pi->adapter;
1396 spin_lock(&adapter->stats_lock);
1397 t3_get_fw_version(adapter, &fw_vers);
1398 t3_get_tp_version(adapter, &tp_vers);
1399 spin_unlock(&adapter->stats_lock);
1401 strcpy(info->driver, DRV_NAME);
1402 strcpy(info->version, DRV_VERSION);
1403 strcpy(info->bus_info, pci_name(adapter->pdev));
1405 strcpy(info->fw_version, "N/A");
1407 snprintf(info->fw_version, sizeof(info->fw_version),
1408 "%s %u.%u.%u TP %u.%u.%u",
1409 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1410 G_FW_VERSION_MAJOR(fw_vers),
1411 G_FW_VERSION_MINOR(fw_vers),
1412 G_FW_VERSION_MICRO(fw_vers),
1413 G_TP_VERSION_MAJOR(tp_vers),
1414 G_TP_VERSION_MINOR(tp_vers),
1415 G_TP_VERSION_MICRO(tp_vers));
1419 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1421 if (stringset == ETH_SS_STATS)
1422 memcpy(data, stats_strings, sizeof(stats_strings));
1425 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1426 struct port_info *p, int idx)
1429 unsigned long tot = 0;
1431 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1432 tot += adapter->sge.qs[i].port_stats[idx];
1436 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1439 struct port_info *pi = netdev_priv(dev);
1440 struct adapter *adapter = pi->adapter;
1441 const struct mac_stats *s;
1443 spin_lock(&adapter->stats_lock);
1444 s = t3_mac_update_stats(&pi->mac);
1445 spin_unlock(&adapter->stats_lock);
1447 *data++ = s->tx_octets;
1448 *data++ = s->tx_frames;
1449 *data++ = s->tx_mcast_frames;
1450 *data++ = s->tx_bcast_frames;
1451 *data++ = s->tx_pause;
1452 *data++ = s->tx_underrun;
1453 *data++ = s->tx_fifo_urun;
1455 *data++ = s->tx_frames_64;
1456 *data++ = s->tx_frames_65_127;
1457 *data++ = s->tx_frames_128_255;
1458 *data++ = s->tx_frames_256_511;
1459 *data++ = s->tx_frames_512_1023;
1460 *data++ = s->tx_frames_1024_1518;
1461 *data++ = s->tx_frames_1519_max;
1463 *data++ = s->rx_octets;
1464 *data++ = s->rx_frames;
1465 *data++ = s->rx_mcast_frames;
1466 *data++ = s->rx_bcast_frames;
1467 *data++ = s->rx_pause;
1468 *data++ = s->rx_fcs_errs;
1469 *data++ = s->rx_symbol_errs;
1470 *data++ = s->rx_short;
1471 *data++ = s->rx_jabber;
1472 *data++ = s->rx_too_long;
1473 *data++ = s->rx_fifo_ovfl;
1475 *data++ = s->rx_frames_64;
1476 *data++ = s->rx_frames_65_127;
1477 *data++ = s->rx_frames_128_255;
1478 *data++ = s->rx_frames_256_511;
1479 *data++ = s->rx_frames_512_1023;
1480 *data++ = s->rx_frames_1024_1518;
1481 *data++ = s->rx_frames_1519_max;
1483 *data++ = pi->phy.fifo_errors;
1485 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1486 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1487 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1488 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1489 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1493 *data++ = s->rx_cong_drops;
1495 *data++ = s->num_toggled;
1496 *data++ = s->num_resets;
1498 *data++ = s->link_faults;
1501 static inline void reg_block_dump(struct adapter *ap, void *buf,
1502 unsigned int start, unsigned int end)
1504 u32 *p = buf + start;
1506 for (; start <= end; start += sizeof(u32))
1507 *p++ = t3_read_reg(ap, start);
1510 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1513 struct port_info *pi = netdev_priv(dev);
1514 struct adapter *ap = pi->adapter;
1518 * bits 0..9: chip version
1519 * bits 10..15: chip revision
1520 * bit 31: set for PCIe cards
1522 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1525 * We skip the MAC statistics registers because they are clear-on-read.
1526 * Also reading multi-register stats would need to synchronize with the
1527 * periodic mac stats accumulation. Hard to justify the complexity.
1529 memset(buf, 0, T3_REGMAP_SIZE);
1530 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1531 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1532 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1533 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1534 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1535 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1536 XGM_REG(A_XGM_SERDES_STAT3, 1));
1537 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1538 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1541 static int restart_autoneg(struct net_device *dev)
1543 struct port_info *p = netdev_priv(dev);
1545 if (!netif_running(dev))
1547 if (p->link_config.autoneg != AUTONEG_ENABLE)
1549 p->phy.ops->autoneg_restart(&p->phy);
1553 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1555 struct port_info *pi = netdev_priv(dev);
1556 struct adapter *adapter = pi->adapter;
1562 for (i = 0; i < data * 2; i++) {
1563 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1564 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1565 if (msleep_interruptible(500))
1568 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1573 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1575 struct port_info *p = netdev_priv(dev);
1577 cmd->supported = p->link_config.supported;
1578 cmd->advertising = p->link_config.advertising;
1580 if (netif_carrier_ok(dev)) {
1581 cmd->speed = p->link_config.speed;
1582 cmd->duplex = p->link_config.duplex;
1588 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1589 cmd->phy_address = p->phy.addr;
1590 cmd->transceiver = XCVR_EXTERNAL;
1591 cmd->autoneg = p->link_config.autoneg;
1597 static int speed_duplex_to_caps(int speed, int duplex)
1603 if (duplex == DUPLEX_FULL)
1604 cap = SUPPORTED_10baseT_Full;
1606 cap = SUPPORTED_10baseT_Half;
1609 if (duplex == DUPLEX_FULL)
1610 cap = SUPPORTED_100baseT_Full;
1612 cap = SUPPORTED_100baseT_Half;
1615 if (duplex == DUPLEX_FULL)
1616 cap = SUPPORTED_1000baseT_Full;
1618 cap = SUPPORTED_1000baseT_Half;
1621 if (duplex == DUPLEX_FULL)
1622 cap = SUPPORTED_10000baseT_Full;
1627 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1628 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1629 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1630 ADVERTISED_10000baseT_Full)
1632 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1634 struct port_info *p = netdev_priv(dev);
1635 struct link_config *lc = &p->link_config;
1637 if (!(lc->supported & SUPPORTED_Autoneg)) {
1639 * PHY offers a single speed/duplex. See if that's what's
1642 if (cmd->autoneg == AUTONEG_DISABLE) {
1643 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1644 if (lc->supported & cap)
1650 if (cmd->autoneg == AUTONEG_DISABLE) {
1651 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1653 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1655 lc->requested_speed = cmd->speed;
1656 lc->requested_duplex = cmd->duplex;
1657 lc->advertising = 0;
1659 cmd->advertising &= ADVERTISED_MASK;
1660 cmd->advertising &= lc->supported;
1661 if (!cmd->advertising)
1663 lc->requested_speed = SPEED_INVALID;
1664 lc->requested_duplex = DUPLEX_INVALID;
1665 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1667 lc->autoneg = cmd->autoneg;
1668 if (netif_running(dev))
1669 t3_link_start(&p->phy, &p->mac, lc);
1673 static void get_pauseparam(struct net_device *dev,
1674 struct ethtool_pauseparam *epause)
1676 struct port_info *p = netdev_priv(dev);
1678 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1679 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1680 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1683 static int set_pauseparam(struct net_device *dev,
1684 struct ethtool_pauseparam *epause)
1686 struct port_info *p = netdev_priv(dev);
1687 struct link_config *lc = &p->link_config;
1689 if (epause->autoneg == AUTONEG_DISABLE)
1690 lc->requested_fc = 0;
1691 else if (lc->supported & SUPPORTED_Autoneg)
1692 lc->requested_fc = PAUSE_AUTONEG;
1696 if (epause->rx_pause)
1697 lc->requested_fc |= PAUSE_RX;
1698 if (epause->tx_pause)
1699 lc->requested_fc |= PAUSE_TX;
1700 if (lc->autoneg == AUTONEG_ENABLE) {
1701 if (netif_running(dev))
1702 t3_link_start(&p->phy, &p->mac, lc);
1704 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1705 if (netif_running(dev))
1706 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1711 static u32 get_rx_csum(struct net_device *dev)
1713 struct port_info *p = netdev_priv(dev);
1715 return p->rx_offload & T3_RX_CSUM;
1718 static int set_rx_csum(struct net_device *dev, u32 data)
1720 struct port_info *p = netdev_priv(dev);
1723 p->rx_offload |= T3_RX_CSUM;
1727 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1728 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1729 set_qset_lro(dev, i, 0);
1734 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1736 struct port_info *pi = netdev_priv(dev);
1737 struct adapter *adapter = pi->adapter;
1738 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1740 e->rx_max_pending = MAX_RX_BUFFERS;
1741 e->rx_mini_max_pending = 0;
1742 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1743 e->tx_max_pending = MAX_TXQ_ENTRIES;
1745 e->rx_pending = q->fl_size;
1746 e->rx_mini_pending = q->rspq_size;
1747 e->rx_jumbo_pending = q->jumbo_size;
1748 e->tx_pending = q->txq_size[0];
1751 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1753 struct port_info *pi = netdev_priv(dev);
1754 struct adapter *adapter = pi->adapter;
1755 struct qset_params *q;
1758 if (e->rx_pending > MAX_RX_BUFFERS ||
1759 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1760 e->tx_pending > MAX_TXQ_ENTRIES ||
1761 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1762 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1763 e->rx_pending < MIN_FL_ENTRIES ||
1764 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1765 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1768 if (adapter->flags & FULL_INIT_DONE)
1771 q = &adapter->params.sge.qset[pi->first_qset];
1772 for (i = 0; i < pi->nqsets; ++i, ++q) {
1773 q->rspq_size = e->rx_mini_pending;
1774 q->fl_size = e->rx_pending;
1775 q->jumbo_size = e->rx_jumbo_pending;
1776 q->txq_size[0] = e->tx_pending;
1777 q->txq_size[1] = e->tx_pending;
1778 q->txq_size[2] = e->tx_pending;
1783 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1785 struct port_info *pi = netdev_priv(dev);
1786 struct adapter *adapter = pi->adapter;
1787 struct qset_params *qsp = &adapter->params.sge.qset[0];
1788 struct sge_qset *qs = &adapter->sge.qs[0];
1790 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1793 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1794 t3_update_qset_coalesce(qs, qsp);
1798 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1800 struct port_info *pi = netdev_priv(dev);
1801 struct adapter *adapter = pi->adapter;
1802 struct qset_params *q = adapter->params.sge.qset;
1804 c->rx_coalesce_usecs = q->coalesce_usecs;
1808 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1811 struct port_info *pi = netdev_priv(dev);
1812 struct adapter *adapter = pi->adapter;
1815 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1819 e->magic = EEPROM_MAGIC;
1820 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1821 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1824 memcpy(data, buf + e->offset, e->len);
1829 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1832 struct port_info *pi = netdev_priv(dev);
1833 struct adapter *adapter = pi->adapter;
1834 u32 aligned_offset, aligned_len;
1839 if (eeprom->magic != EEPROM_MAGIC)
1842 aligned_offset = eeprom->offset & ~3;
1843 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1845 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1846 buf = kmalloc(aligned_len, GFP_KERNEL);
1849 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1850 if (!err && aligned_len > 4)
1851 err = t3_seeprom_read(adapter,
1852 aligned_offset + aligned_len - 4,
1853 (__le32 *) & buf[aligned_len - 4]);
1856 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1860 err = t3_seeprom_wp(adapter, 0);
1864 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1865 err = t3_seeprom_write(adapter, aligned_offset, *p);
1866 aligned_offset += 4;
1870 err = t3_seeprom_wp(adapter, 1);
1877 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1881 memset(&wol->sopass, 0, sizeof(wol->sopass));
1884 static const struct ethtool_ops cxgb_ethtool_ops = {
1885 .get_settings = get_settings,
1886 .set_settings = set_settings,
1887 .get_drvinfo = get_drvinfo,
1888 .get_msglevel = get_msglevel,
1889 .set_msglevel = set_msglevel,
1890 .get_ringparam = get_sge_param,
1891 .set_ringparam = set_sge_param,
1892 .get_coalesce = get_coalesce,
1893 .set_coalesce = set_coalesce,
1894 .get_eeprom_len = get_eeprom_len,
1895 .get_eeprom = get_eeprom,
1896 .set_eeprom = set_eeprom,
1897 .get_pauseparam = get_pauseparam,
1898 .set_pauseparam = set_pauseparam,
1899 .get_rx_csum = get_rx_csum,
1900 .set_rx_csum = set_rx_csum,
1901 .set_tx_csum = ethtool_op_set_tx_csum,
1902 .set_sg = ethtool_op_set_sg,
1903 .get_link = ethtool_op_get_link,
1904 .get_strings = get_strings,
1905 .phys_id = cxgb3_phys_id,
1906 .nway_reset = restart_autoneg,
1907 .get_sset_count = get_sset_count,
1908 .get_ethtool_stats = get_stats,
1909 .get_regs_len = get_regs_len,
1910 .get_regs = get_regs,
1912 .set_tso = ethtool_op_set_tso,
1915 static int in_range(int val, int lo, int hi)
1917 return val < 0 || (val <= hi && val >= lo);
1920 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1922 struct port_info *pi = netdev_priv(dev);
1923 struct adapter *adapter = pi->adapter;
1927 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1931 case CHELSIO_SET_QSET_PARAMS:{
1933 struct qset_params *q;
1934 struct ch_qset_params t;
1935 int q1 = pi->first_qset;
1936 int nqsets = pi->nqsets;
1938 if (!capable(CAP_NET_ADMIN))
1940 if (copy_from_user(&t, useraddr, sizeof(t)))
1942 if (t.qset_idx >= SGE_QSETS)
1944 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1945 !in_range(t.cong_thres, 0, 255) ||
1946 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1948 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1950 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1951 MAX_CTRL_TXQ_ENTRIES) ||
1952 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1954 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1955 MAX_RX_JUMBO_BUFFERS)
1956 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1960 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1961 for_each_port(adapter, i) {
1962 pi = adap2pinfo(adapter, i);
1963 if (t.qset_idx >= pi->first_qset &&
1964 t.qset_idx < pi->first_qset + pi->nqsets &&
1965 !(pi->rx_offload & T3_RX_CSUM))
1969 if ((adapter->flags & FULL_INIT_DONE) &&
1970 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1971 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1972 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1973 t.polling >= 0 || t.cong_thres >= 0))
1976 /* Allow setting of any available qset when offload enabled */
1977 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1979 for_each_port(adapter, i) {
1980 pi = adap2pinfo(adapter, i);
1981 nqsets += pi->first_qset + pi->nqsets;
1985 if (t.qset_idx < q1)
1987 if (t.qset_idx > q1 + nqsets - 1)
1990 q = &adapter->params.sge.qset[t.qset_idx];
1992 if (t.rspq_size >= 0)
1993 q->rspq_size = t.rspq_size;
1994 if (t.fl_size[0] >= 0)
1995 q->fl_size = t.fl_size[0];
1996 if (t.fl_size[1] >= 0)
1997 q->jumbo_size = t.fl_size[1];
1998 if (t.txq_size[0] >= 0)
1999 q->txq_size[0] = t.txq_size[0];
2000 if (t.txq_size[1] >= 0)
2001 q->txq_size[1] = t.txq_size[1];
2002 if (t.txq_size[2] >= 0)
2003 q->txq_size[2] = t.txq_size[2];
2004 if (t.cong_thres >= 0)
2005 q->cong_thres = t.cong_thres;
2006 if (t.intr_lat >= 0) {
2007 struct sge_qset *qs =
2008 &adapter->sge.qs[t.qset_idx];
2010 q->coalesce_usecs = t.intr_lat;
2011 t3_update_qset_coalesce(qs, q);
2013 if (t.polling >= 0) {
2014 if (adapter->flags & USING_MSIX)
2015 q->polling = t.polling;
2017 /* No polling with INTx for T3A */
2018 if (adapter->params.rev == 0 &&
2019 !(adapter->flags & USING_MSI))
2022 for (i = 0; i < SGE_QSETS; i++) {
2023 q = &adapter->params.sge.
2025 q->polling = t.polling;
2030 set_qset_lro(dev, t.qset_idx, t.lro);
2034 case CHELSIO_GET_QSET_PARAMS:{
2035 struct qset_params *q;
2036 struct ch_qset_params t;
2037 int q1 = pi->first_qset;
2038 int nqsets = pi->nqsets;
2041 if (copy_from_user(&t, useraddr, sizeof(t)))
2044 /* Display qsets for all ports when offload enabled */
2045 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2047 for_each_port(adapter, i) {
2048 pi = adap2pinfo(adapter, i);
2049 nqsets = pi->first_qset + pi->nqsets;
2053 if (t.qset_idx >= nqsets)
2056 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2057 t.rspq_size = q->rspq_size;
2058 t.txq_size[0] = q->txq_size[0];
2059 t.txq_size[1] = q->txq_size[1];
2060 t.txq_size[2] = q->txq_size[2];
2061 t.fl_size[0] = q->fl_size;
2062 t.fl_size[1] = q->jumbo_size;
2063 t.polling = q->polling;
2065 t.intr_lat = q->coalesce_usecs;
2066 t.cong_thres = q->cong_thres;
2069 if (adapter->flags & USING_MSIX)
2070 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2072 t.vector = adapter->pdev->irq;
2074 if (copy_to_user(useraddr, &t, sizeof(t)))
2078 case CHELSIO_SET_QSET_NUM:{
2079 struct ch_reg edata;
2080 unsigned int i, first_qset = 0, other_qsets = 0;
2082 if (!capable(CAP_NET_ADMIN))
2084 if (adapter->flags & FULL_INIT_DONE)
2086 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2088 if (edata.val < 1 ||
2089 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2092 for_each_port(adapter, i)
2093 if (adapter->port[i] && adapter->port[i] != dev)
2094 other_qsets += adap2pinfo(adapter, i)->nqsets;
2096 if (edata.val + other_qsets > SGE_QSETS)
2099 pi->nqsets = edata.val;
2101 for_each_port(adapter, i)
2102 if (adapter->port[i]) {
2103 pi = adap2pinfo(adapter, i);
2104 pi->first_qset = first_qset;
2105 first_qset += pi->nqsets;
2109 case CHELSIO_GET_QSET_NUM:{
2110 struct ch_reg edata;
2112 edata.cmd = CHELSIO_GET_QSET_NUM;
2113 edata.val = pi->nqsets;
2114 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2118 case CHELSIO_LOAD_FW:{
2120 struct ch_mem_range t;
2122 if (!capable(CAP_SYS_RAWIO))
2124 if (copy_from_user(&t, useraddr, sizeof(t)))
2126 /* Check t.len sanity ? */
2127 fw_data = kmalloc(t.len, GFP_KERNEL);
2132 (fw_data, useraddr + sizeof(t), t.len)) {
2137 ret = t3_load_fw(adapter, fw_data, t.len);
2143 case CHELSIO_SETMTUTAB:{
2147 if (!is_offload(adapter))
2149 if (!capable(CAP_NET_ADMIN))
2151 if (offload_running(adapter))
2153 if (copy_from_user(&m, useraddr, sizeof(m)))
2155 if (m.nmtus != NMTUS)
2157 if (m.mtus[0] < 81) /* accommodate SACK */
2160 /* MTUs must be in ascending order */
2161 for (i = 1; i < NMTUS; ++i)
2162 if (m.mtus[i] < m.mtus[i - 1])
2165 memcpy(adapter->params.mtus, m.mtus,
2166 sizeof(adapter->params.mtus));
2169 case CHELSIO_GET_PM:{
2170 struct tp_params *p = &adapter->params.tp;
2171 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2173 if (!is_offload(adapter))
2175 m.tx_pg_sz = p->tx_pg_size;
2176 m.tx_num_pg = p->tx_num_pgs;
2177 m.rx_pg_sz = p->rx_pg_size;
2178 m.rx_num_pg = p->rx_num_pgs;
2179 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2180 if (copy_to_user(useraddr, &m, sizeof(m)))
2184 case CHELSIO_SET_PM:{
2186 struct tp_params *p = &adapter->params.tp;
2188 if (!is_offload(adapter))
2190 if (!capable(CAP_NET_ADMIN))
2192 if (adapter->flags & FULL_INIT_DONE)
2194 if (copy_from_user(&m, useraddr, sizeof(m)))
2196 if (!is_power_of_2(m.rx_pg_sz) ||
2197 !is_power_of_2(m.tx_pg_sz))
2198 return -EINVAL; /* not power of 2 */
2199 if (!(m.rx_pg_sz & 0x14000))
2200 return -EINVAL; /* not 16KB or 64KB */
2201 if (!(m.tx_pg_sz & 0x1554000))
2203 if (m.tx_num_pg == -1)
2204 m.tx_num_pg = p->tx_num_pgs;
2205 if (m.rx_num_pg == -1)
2206 m.rx_num_pg = p->rx_num_pgs;
2207 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2209 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2210 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2212 p->rx_pg_size = m.rx_pg_sz;
2213 p->tx_pg_size = m.tx_pg_sz;
2214 p->rx_num_pgs = m.rx_num_pg;
2215 p->tx_num_pgs = m.tx_num_pg;
2218 case CHELSIO_GET_MEM:{
2219 struct ch_mem_range t;
2223 if (!is_offload(adapter))
2225 if (!(adapter->flags & FULL_INIT_DONE))
2226 return -EIO; /* need the memory controllers */
2227 if (copy_from_user(&t, useraddr, sizeof(t)))
2229 if ((t.addr & 7) || (t.len & 7))
2231 if (t.mem_id == MEM_CM)
2233 else if (t.mem_id == MEM_PMRX)
2234 mem = &adapter->pmrx;
2235 else if (t.mem_id == MEM_PMTX)
2236 mem = &adapter->pmtx;
2242 * bits 0..9: chip version
2243 * bits 10..15: chip revision
2245 t.version = 3 | (adapter->params.rev << 10);
2246 if (copy_to_user(useraddr, &t, sizeof(t)))
2250 * Read 256 bytes at a time as len can be large and we don't
2251 * want to use huge intermediate buffers.
2253 useraddr += sizeof(t); /* advance to start of buffer */
2255 unsigned int chunk =
2256 min_t(unsigned int, t.len, sizeof(buf));
2259 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2263 if (copy_to_user(useraddr, buf, chunk))
2271 case CHELSIO_SET_TRACE_FILTER:{
2273 const struct trace_params *tp;
2275 if (!capable(CAP_NET_ADMIN))
2277 if (!offload_running(adapter))
2279 if (copy_from_user(&t, useraddr, sizeof(t)))
2282 tp = (const struct trace_params *)&t.sip;
2284 t3_config_trace_filter(adapter, tp, 0,
2288 t3_config_trace_filter(adapter, tp, 1,
2299 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2301 struct mii_ioctl_data *data = if_mii(req);
2302 struct port_info *pi = netdev_priv(dev);
2303 struct adapter *adapter = pi->adapter;
2308 data->phy_id = pi->phy.addr;
2312 struct cphy *phy = &pi->phy;
2314 if (!phy->mdio_read)
2316 if (is_10G(adapter)) {
2317 mmd = data->phy_id >> 8;
2320 else if (mmd > MDIO_DEV_VEND2)
2324 phy->mdio_read(adapter, data->phy_id & 0x1f,
2325 mmd, data->reg_num, &val);
2328 phy->mdio_read(adapter, data->phy_id & 0x1f,
2329 0, data->reg_num & 0x1f,
2332 data->val_out = val;
2336 struct cphy *phy = &pi->phy;
2338 if (!capable(CAP_NET_ADMIN))
2340 if (!phy->mdio_write)
2342 if (is_10G(adapter)) {
2343 mmd = data->phy_id >> 8;
2346 else if (mmd > MDIO_DEV_VEND2)
2350 phy->mdio_write(adapter,
2351 data->phy_id & 0x1f, mmd,
2356 phy->mdio_write(adapter,
2357 data->phy_id & 0x1f, 0,
2358 data->reg_num & 0x1f,
2363 return cxgb_extension_ioctl(dev, req->ifr_data);
2370 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2372 struct port_info *pi = netdev_priv(dev);
2373 struct adapter *adapter = pi->adapter;
2376 if (new_mtu < 81) /* accommodate SACK */
2378 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2381 init_port_mtus(adapter);
2382 if (adapter->params.rev == 0 && offload_running(adapter))
2383 t3_load_mtus(adapter, adapter->params.mtus,
2384 adapter->params.a_wnd, adapter->params.b_wnd,
2385 adapter->port[0]->mtu);
2389 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2391 struct port_info *pi = netdev_priv(dev);
2392 struct adapter *adapter = pi->adapter;
2393 struct sockaddr *addr = p;
2395 if (!is_valid_ether_addr(addr->sa_data))
2398 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2399 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2400 if (offload_running(adapter))
2401 write_smt_entry(adapter, pi->port_id);
2406 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2407 * @adap: the adapter
2410 * Ensures that current Rx processing on any of the queues associated with
2411 * the given port completes before returning. We do this by acquiring and
2412 * releasing the locks of the response queues associated with the port.
2414 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2418 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2419 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2421 spin_lock_irq(&q->lock);
2422 spin_unlock_irq(&q->lock);
2426 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2428 struct port_info *pi = netdev_priv(dev);
2429 struct adapter *adapter = pi->adapter;
2432 if (adapter->params.rev > 0)
2433 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2435 /* single control for all ports */
2436 unsigned int i, have_vlans = 0;
2437 for_each_port(adapter, i)
2438 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2440 t3_set_vlan_accel(adapter, 1, have_vlans);
2442 t3_synchronize_rx(adapter, pi);
2445 #ifdef CONFIG_NET_POLL_CONTROLLER
2446 static void cxgb_netpoll(struct net_device *dev)
2448 struct port_info *pi = netdev_priv(dev);
2449 struct adapter *adapter = pi->adapter;
2452 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2453 struct sge_qset *qs = &adapter->sge.qs[qidx];
2456 if (adapter->flags & USING_MSIX)
2461 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2467 * Periodic accumulation of MAC statistics.
2469 static void mac_stats_update(struct adapter *adapter)
2473 for_each_port(adapter, i) {
2474 struct net_device *dev = adapter->port[i];
2475 struct port_info *p = netdev_priv(dev);
2477 if (netif_running(dev)) {
2478 spin_lock(&adapter->stats_lock);
2479 t3_mac_update_stats(&p->mac);
2480 spin_unlock(&adapter->stats_lock);
2485 static void check_link_status(struct adapter *adapter)
2489 for_each_port(adapter, i) {
2490 struct net_device *dev = adapter->port[i];
2491 struct port_info *p = netdev_priv(dev);
2493 spin_lock_irq(&adapter->work_lock);
2494 if (p->link_fault) {
2495 spin_unlock_irq(&adapter->work_lock);
2498 spin_unlock_irq(&adapter->work_lock);
2500 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2501 t3_xgm_intr_disable(adapter, i);
2502 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2504 t3_link_changed(adapter, i);
2505 t3_xgm_intr_enable(adapter, i);
2510 static void check_t3b2_mac(struct adapter *adapter)
2514 if (!rtnl_trylock()) /* synchronize with ifdown */
2517 for_each_port(adapter, i) {
2518 struct net_device *dev = adapter->port[i];
2519 struct port_info *p = netdev_priv(dev);
2522 if (!netif_running(dev))
2526 if (netif_running(dev) && netif_carrier_ok(dev))
2527 status = t3b2_mac_watchdog_task(&p->mac);
2529 p->mac.stats.num_toggled++;
2530 else if (status == 2) {
2531 struct cmac *mac = &p->mac;
2533 t3_mac_set_mtu(mac, dev->mtu);
2534 t3_mac_set_address(mac, 0, dev->dev_addr);
2535 cxgb_set_rxmode(dev);
2536 t3_link_start(&p->phy, mac, &p->link_config);
2537 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2538 t3_port_intr_enable(adapter, p->port_id);
2539 p->mac.stats.num_resets++;
2546 static void t3_adap_check_task(struct work_struct *work)
2548 struct adapter *adapter = container_of(work, struct adapter,
2549 adap_check_task.work);
2550 const struct adapter_params *p = &adapter->params;
2552 unsigned int v, status, reset;
2554 adapter->check_task_cnt++;
2556 /* Check link status for PHYs without interrupts */
2557 if (p->linkpoll_period)
2558 check_link_status(adapter);
2560 /* Accumulate MAC stats if needed */
2561 if (!p->linkpoll_period ||
2562 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2563 p->stats_update_period) {
2564 mac_stats_update(adapter);
2565 adapter->check_task_cnt = 0;
2568 if (p->rev == T3_REV_B2)
2569 check_t3b2_mac(adapter);
2572 * Scan the XGMAC's to check for various conditions which we want to
2573 * monitor in a periodic polling manner rather than via an interrupt
2574 * condition. This is used for conditions which would otherwise flood
2575 * the system with interrupts and we only really need to know that the
2576 * conditions are "happening" ... For each condition we count the
2577 * detection of the condition and reset it for the next polling loop.
2579 for_each_port(adapter, port) {
2580 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2583 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2585 if (cause & F_RXFIFO_OVERFLOW) {
2586 mac->stats.rx_fifo_ovfl++;
2587 reset |= F_RXFIFO_OVERFLOW;
2590 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2594 * We do the same as above for FL_EMPTY interrupts.
2596 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2599 if (status & F_FLEMPTY) {
2600 struct sge_qset *qs = &adapter->sge.qs[0];
2605 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2609 qs->fl[i].empty += (v & 1);
2617 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2619 /* Schedule the next check update if any port is active. */
2620 spin_lock_irq(&adapter->work_lock);
2621 if (adapter->open_device_map & PORT_MASK)
2622 schedule_chk_task(adapter);
2623 spin_unlock_irq(&adapter->work_lock);
2627 * Processes external (PHY) interrupts in process context.
2629 static void ext_intr_task(struct work_struct *work)
2631 struct adapter *adapter = container_of(work, struct adapter,
2632 ext_intr_handler_task);
2635 /* Disable link fault interrupts */
2636 for_each_port(adapter, i) {
2637 struct net_device *dev = adapter->port[i];
2638 struct port_info *p = netdev_priv(dev);
2640 t3_xgm_intr_disable(adapter, i);
2641 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2644 /* Re-enable link fault interrupts */
2645 t3_phy_intr_handler(adapter);
2647 for_each_port(adapter, i)
2648 t3_xgm_intr_enable(adapter, i);
2650 /* Now reenable external interrupts */
2651 spin_lock_irq(&adapter->work_lock);
2652 if (adapter->slow_intr_mask) {
2653 adapter->slow_intr_mask |= F_T3DBG;
2654 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2655 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2656 adapter->slow_intr_mask);
2658 spin_unlock_irq(&adapter->work_lock);
2662 * Interrupt-context handler for external (PHY) interrupts.
2664 void t3_os_ext_intr_handler(struct adapter *adapter)
2667 * Schedule a task to handle external interrupts as they may be slow
2668 * and we use a mutex to protect MDIO registers. We disable PHY
2669 * interrupts in the meantime and let the task reenable them when
2672 spin_lock(&adapter->work_lock);
2673 if (adapter->slow_intr_mask) {
2674 adapter->slow_intr_mask &= ~F_T3DBG;
2675 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2676 adapter->slow_intr_mask);
2677 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2679 spin_unlock(&adapter->work_lock);
2682 static void link_fault_task(struct work_struct *work)
2684 struct adapter *adapter = container_of(work, struct adapter,
2685 link_fault_handler_task);
2688 for_each_port(adapter, i) {
2689 struct net_device *netdev = adapter->port[i];
2690 struct port_info *pi = netdev_priv(netdev);
2693 t3_link_fault(adapter, i);
2697 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2699 struct net_device *netdev = adapter->port[port_id];
2700 struct port_info *pi = netdev_priv(netdev);
2702 spin_lock(&adapter->work_lock);
2704 queue_work(cxgb3_wq, &adapter->link_fault_handler_task);
2705 spin_unlock(&adapter->work_lock);
2708 static int t3_adapter_error(struct adapter *adapter, int reset)
2712 if (is_offload(adapter) &&
2713 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2714 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2715 offload_close(&adapter->tdev);
2718 /* Stop all ports */
2719 for_each_port(adapter, i) {
2720 struct net_device *netdev = adapter->port[i];
2722 if (netif_running(netdev))
2726 /* Stop SGE timers */
2727 t3_stop_sge_timers(adapter);
2729 adapter->flags &= ~FULL_INIT_DONE;
2732 ret = t3_reset_adapter(adapter);
2734 pci_disable_device(adapter->pdev);
2739 static int t3_reenable_adapter(struct adapter *adapter)
2741 if (pci_enable_device(adapter->pdev)) {
2742 dev_err(&adapter->pdev->dev,
2743 "Cannot re-enable PCI device after reset.\n");
2746 pci_set_master(adapter->pdev);
2747 pci_restore_state(adapter->pdev);
2749 /* Free sge resources */
2750 t3_free_sge_resources(adapter);
2752 if (t3_replay_prep_adapter(adapter))
2760 static void t3_resume_ports(struct adapter *adapter)
2764 /* Restart the ports */
2765 for_each_port(adapter, i) {
2766 struct net_device *netdev = adapter->port[i];
2768 if (netif_running(netdev)) {
2769 if (cxgb_open(netdev)) {
2770 dev_err(&adapter->pdev->dev,
2771 "can't bring device back up"
2778 if (is_offload(adapter) && !ofld_disable)
2779 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2783 * processes a fatal error.
2784 * Bring the ports down, reset the chip, bring the ports back up.
2786 static void fatal_error_task(struct work_struct *work)
2788 struct adapter *adapter = container_of(work, struct adapter,
2789 fatal_error_handler_task);
2793 err = t3_adapter_error(adapter, 1);
2795 err = t3_reenable_adapter(adapter);
2797 t3_resume_ports(adapter);
2799 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2803 void t3_fatal_err(struct adapter *adapter)
2805 unsigned int fw_status[4];
2807 if (adapter->flags & FULL_INIT_DONE) {
2808 t3_sge_stop(adapter);
2809 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2810 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2811 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2812 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2814 spin_lock(&adapter->work_lock);
2815 t3_intr_disable(adapter);
2816 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2817 spin_unlock(&adapter->work_lock);
2819 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2820 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2821 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2822 fw_status[0], fw_status[1],
2823 fw_status[2], fw_status[3]);
2827 * t3_io_error_detected - called when PCI error is detected
2828 * @pdev: Pointer to PCI device
2829 * @state: The current pci connection state
2831 * This function is called after a PCI bus error affecting
2832 * this device has been detected.
2834 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2835 pci_channel_state_t state)
2837 struct adapter *adapter = pci_get_drvdata(pdev);
2840 ret = t3_adapter_error(adapter, 0);
2842 /* Request a slot reset. */
2843 return PCI_ERS_RESULT_NEED_RESET;
2847 * t3_io_slot_reset - called after the pci bus has been reset.
2848 * @pdev: Pointer to PCI device
2850 * Restart the card from scratch, as if from a cold-boot.
2852 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2854 struct adapter *adapter = pci_get_drvdata(pdev);
2856 if (!t3_reenable_adapter(adapter))
2857 return PCI_ERS_RESULT_RECOVERED;
2859 return PCI_ERS_RESULT_DISCONNECT;
2863 * t3_io_resume - called when traffic can start flowing again.
2864 * @pdev: Pointer to PCI device
2866 * This callback is called when the error recovery driver tells us that
2867 * its OK to resume normal operation.
2869 static void t3_io_resume(struct pci_dev *pdev)
2871 struct adapter *adapter = pci_get_drvdata(pdev);
2873 t3_resume_ports(adapter);
2876 static struct pci_error_handlers t3_err_handler = {
2877 .error_detected = t3_io_error_detected,
2878 .slot_reset = t3_io_slot_reset,
2879 .resume = t3_io_resume,
2883 * Set the number of qsets based on the number of CPUs and the number of ports,
2884 * not to exceed the number of available qsets, assuming there are enough qsets
2887 static void set_nqsets(struct adapter *adap)
2890 int num_cpus = num_online_cpus();
2891 int hwports = adap->params.nports;
2892 int nqsets = adap->msix_nvectors - 1;
2894 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2896 (hwports * nqsets > SGE_QSETS ||
2897 num_cpus >= nqsets / hwports))
2899 if (nqsets > num_cpus)
2901 if (nqsets < 1 || hwports == 4)
2906 for_each_port(adap, i) {
2907 struct port_info *pi = adap2pinfo(adap, i);
2910 pi->nqsets = nqsets;
2911 j = pi->first_qset + nqsets;
2913 dev_info(&adap->pdev->dev,
2914 "Port %d using %d queue sets.\n", i, nqsets);
2918 static int __devinit cxgb_enable_msix(struct adapter *adap)
2920 struct msix_entry entries[SGE_QSETS + 1];
2924 vectors = ARRAY_SIZE(entries);
2925 for (i = 0; i < vectors; ++i)
2926 entries[i].entry = i;
2928 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2931 if (!err && vectors < (adap->params.nports + 1))
2935 for (i = 0; i < vectors; ++i)
2936 adap->msix_info[i].vec = entries[i].vector;
2937 adap->msix_nvectors = vectors;
2943 static void __devinit print_port_info(struct adapter *adap,
2944 const struct adapter_info *ai)
2946 static const char *pci_variant[] = {
2947 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2954 snprintf(buf, sizeof(buf), "%s x%d",
2955 pci_variant[adap->params.pci.variant],
2956 adap->params.pci.width);
2958 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2959 pci_variant[adap->params.pci.variant],
2960 adap->params.pci.speed, adap->params.pci.width);
2962 for_each_port(adap, i) {
2963 struct net_device *dev = adap->port[i];
2964 const struct port_info *pi = netdev_priv(dev);
2966 if (!test_bit(i, &adap->registered_device_map))
2968 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2969 dev->name, ai->desc, pi->phy.desc,
2970 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2971 (adap->flags & USING_MSIX) ? " MSI-X" :
2972 (adap->flags & USING_MSI) ? " MSI" : "");
2973 if (adap->name == dev->name && adap->params.vpd.mclk)
2975 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2976 adap->name, t3_mc7_size(&adap->cm) >> 20,
2977 t3_mc7_size(&adap->pmtx) >> 20,
2978 t3_mc7_size(&adap->pmrx) >> 20,
2979 adap->params.vpd.sn);
2983 static const struct net_device_ops cxgb_netdev_ops = {
2984 .ndo_open = cxgb_open,
2985 .ndo_stop = cxgb_close,
2986 .ndo_start_xmit = t3_eth_xmit,
2987 .ndo_get_stats = cxgb_get_stats,
2988 .ndo_validate_addr = eth_validate_addr,
2989 .ndo_set_multicast_list = cxgb_set_rxmode,
2990 .ndo_do_ioctl = cxgb_ioctl,
2991 .ndo_change_mtu = cxgb_change_mtu,
2992 .ndo_set_mac_address = cxgb_set_mac_addr,
2993 .ndo_vlan_rx_register = vlan_rx_register,
2994 #ifdef CONFIG_NET_POLL_CONTROLLER
2995 .ndo_poll_controller = cxgb_netpoll,
2999 static int __devinit init_one(struct pci_dev *pdev,
3000 const struct pci_device_id *ent)
3002 static int version_printed;
3004 int i, err, pci_using_dac = 0;
3005 unsigned long mmio_start, mmio_len;
3006 const struct adapter_info *ai;
3007 struct adapter *adapter = NULL;
3008 struct port_info *pi;
3010 if (!version_printed) {
3011 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3016 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3018 printk(KERN_ERR DRV_NAME
3019 ": cannot initialize work queue\n");
3024 err = pci_request_regions(pdev, DRV_NAME);
3026 /* Just info, some other driver may have claimed the device. */
3027 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3031 err = pci_enable_device(pdev);
3033 dev_err(&pdev->dev, "cannot enable PCI device\n");
3034 goto out_release_regions;
3037 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3039 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3041 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3042 "coherent allocations\n");
3043 goto out_disable_device;
3045 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
3046 dev_err(&pdev->dev, "no usable DMA configuration\n");
3047 goto out_disable_device;
3050 pci_set_master(pdev);
3051 pci_save_state(pdev);
3053 mmio_start = pci_resource_start(pdev, 0);
3054 mmio_len = pci_resource_len(pdev, 0);
3055 ai = t3_get_adapter_info(ent->driver_data);
3057 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3060 goto out_disable_device;
3063 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3064 if (!adapter->regs) {
3065 dev_err(&pdev->dev, "cannot map device registers\n");
3067 goto out_free_adapter;
3070 adapter->pdev = pdev;
3071 adapter->name = pci_name(pdev);
3072 adapter->msg_enable = dflt_msg_enable;
3073 adapter->mmio_len = mmio_len;
3075 mutex_init(&adapter->mdio_lock);
3076 spin_lock_init(&adapter->work_lock);
3077 spin_lock_init(&adapter->stats_lock);
3079 INIT_LIST_HEAD(&adapter->adapter_list);
3080 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3081 INIT_WORK(&adapter->link_fault_handler_task, link_fault_task);
3082 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3083 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3085 for (i = 0; i < ai->nports; ++i) {
3086 struct net_device *netdev;
3088 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3094 SET_NETDEV_DEV(netdev, &pdev->dev);
3096 adapter->port[i] = netdev;
3097 pi = netdev_priv(netdev);
3098 pi->adapter = adapter;
3099 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3101 netif_carrier_off(netdev);
3102 netif_tx_stop_all_queues(netdev);
3103 netdev->irq = pdev->irq;
3104 netdev->mem_start = mmio_start;
3105 netdev->mem_end = mmio_start + mmio_len - 1;
3106 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3107 netdev->features |= NETIF_F_LLTX;
3108 netdev->features |= NETIF_F_GRO;
3110 netdev->features |= NETIF_F_HIGHDMA;
3112 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3113 netdev->netdev_ops = &cxgb_netdev_ops;
3114 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3117 pci_set_drvdata(pdev, adapter);
3118 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3124 * The card is now ready to go. If any errors occur during device
3125 * registration we do not fail the whole card but rather proceed only
3126 * with the ports we manage to register successfully. However we must
3127 * register at least one net device.
3129 for_each_port(adapter, i) {
3130 err = register_netdev(adapter->port[i]);
3132 dev_warn(&pdev->dev,
3133 "cannot register net device %s, skipping\n",
3134 adapter->port[i]->name);
3137 * Change the name we use for messages to the name of
3138 * the first successfully registered interface.
3140 if (!adapter->registered_device_map)
3141 adapter->name = adapter->port[i]->name;
3143 __set_bit(i, &adapter->registered_device_map);
3146 if (!adapter->registered_device_map) {
3147 dev_err(&pdev->dev, "could not register any net devices\n");
3151 /* Driver's ready. Reflect it on LEDs */
3152 t3_led_ready(adapter);
3154 if (is_offload(adapter)) {
3155 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3156 cxgb3_adapter_ofld(adapter);
3159 /* See what interrupts we'll be using */
3160 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3161 adapter->flags |= USING_MSIX;
3162 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3163 adapter->flags |= USING_MSI;
3165 set_nqsets(adapter);
3167 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3170 print_port_info(adapter, ai);
3174 iounmap(adapter->regs);
3175 for (i = ai->nports - 1; i >= 0; --i)
3176 if (adapter->port[i])
3177 free_netdev(adapter->port[i]);
3183 pci_disable_device(pdev);
3184 out_release_regions:
3185 pci_release_regions(pdev);
3186 pci_set_drvdata(pdev, NULL);
3190 static void __devexit remove_one(struct pci_dev *pdev)
3192 struct adapter *adapter = pci_get_drvdata(pdev);
3197 t3_sge_stop(adapter);
3198 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3201 if (is_offload(adapter)) {
3202 cxgb3_adapter_unofld(adapter);
3203 if (test_bit(OFFLOAD_DEVMAP_BIT,
3204 &adapter->open_device_map))
3205 offload_close(&adapter->tdev);
3208 for_each_port(adapter, i)
3209 if (test_bit(i, &adapter->registered_device_map))
3210 unregister_netdev(adapter->port[i]);
3212 t3_stop_sge_timers(adapter);
3213 t3_free_sge_resources(adapter);
3214 cxgb_disable_msi(adapter);
3216 for_each_port(adapter, i)
3217 if (adapter->port[i])
3218 free_netdev(adapter->port[i]);
3220 iounmap(adapter->regs);
3222 pci_release_regions(pdev);
3223 pci_disable_device(pdev);
3224 pci_set_drvdata(pdev, NULL);
3228 static struct pci_driver driver = {
3230 .id_table = cxgb3_pci_tbl,
3232 .remove = __devexit_p(remove_one),
3233 .err_handler = &t3_err_handler,
3236 static int __init cxgb3_init_module(void)
3240 cxgb3_offload_init();
3242 ret = pci_register_driver(&driver);
3246 static void __exit cxgb3_cleanup_module(void)
3248 pci_unregister_driver(&driver);
3250 destroy_workqueue(cxgb3_wq);
3253 module_init(cxgb3_init_module);
3254 module_exit(cxgb3_cleanup_module);