2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, ssid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 1, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1, 1), /* T302E */
85 CH_DEVICE(0x22, 1, 2), /* T310E */
86 CH_DEVICE(0x23, 1, 3), /* T320X */
87 CH_DEVICE(0x24, 1, 1), /* T302X */
88 CH_DEVICE(0x25, 1, 3), /* T320E */
89 CH_DEVICE(0x26, 1, 2), /* T310X */
90 CH_DEVICE(0x30, 1, 2), /* T3B10 */
91 CH_DEVICE(0x31, 1, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1, 1), /* T3B02 */
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
126 static int ofld_disable = 0;
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
139 static struct workqueue_struct *cxgb3_wq;
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
145 * Shows the link status, speed, and duplex of a port.
147 static void link_report(struct net_device *dev)
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
155 switch (p->link_config.speed) {
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
188 struct net_device *dev = adapter->port[port_id];
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
196 if (link_stat != netif_carrier_ok(dev)) {
198 t3_mac_enable(mac, MAC_DIRECTION_RX);
199 netif_carrier_on(dev);
201 netif_carrier_off(dev);
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
211 static void cxgb_set_rxmode(struct net_device *dev)
213 struct t3_rx_mode rm;
214 struct port_info *pi = netdev_priv(dev);
216 init_rx_mode(&rm, dev, dev->mc_list);
217 t3_mac_set_rx_mode(&pi->mac, &rm);
221 * link_start - enable a port
222 * @dev: the device to enable
224 * Performs the MAC and PHY actions needed to enable a port.
226 static void link_start(struct net_device *dev)
228 struct t3_rx_mode rm;
229 struct port_info *pi = netdev_priv(dev);
230 struct cmac *mac = &pi->mac;
232 init_rx_mode(&rm, dev, dev->mc_list);
234 t3_mac_set_mtu(mac, dev->mtu);
235 t3_mac_set_address(mac, 0, dev->dev_addr);
236 t3_mac_set_rx_mode(mac, &rm);
237 t3_link_start(&pi->phy, mac, &pi->link_config);
238 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
241 static inline void cxgb_disable_msi(struct adapter *adapter)
243 if (adapter->flags & USING_MSIX) {
244 pci_disable_msix(adapter->pdev);
245 adapter->flags &= ~USING_MSIX;
246 } else if (adapter->flags & USING_MSI) {
247 pci_disable_msi(adapter->pdev);
248 adapter->flags &= ~USING_MSI;
253 * Interrupt handler for asynchronous events used with MSI-X.
255 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
257 t3_slow_intr_handler(cookie);
262 * Name the MSI-X interrupts.
264 static void name_msix_vecs(struct adapter *adap)
266 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
268 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269 adap->msix_info[0].desc[n] = 0;
271 for_each_port(adap, j) {
272 struct net_device *d = adap->port[j];
273 const struct port_info *pi = netdev_priv(d);
275 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276 snprintf(adap->msix_info[msi_idx].desc, n,
277 "%s (queue %d)", d->name, i);
278 adap->msix_info[msi_idx].desc[n] = 0;
283 static int request_msix_data_irqs(struct adapter *adap)
285 int i, j, err, qidx = 0;
287 for_each_port(adap, i) {
288 int nqsets = adap2pinfo(adap, i)->nqsets;
290 for (j = 0; j < nqsets; ++j) {
291 err = request_irq(adap->msix_info[qidx + 1].vec,
292 t3_intr_handler(adap,
295 adap->msix_info[qidx + 1].desc,
296 &adap->sge.qs[qidx]);
299 free_irq(adap->msix_info[qidx + 1].vec,
300 &adap->sge.qs[qidx]);
310 * setup_rss - configure RSS
313 * Sets up RSS to distribute packets to multiple receive queues. We
314 * configure the RSS CPU lookup table to distribute to the number of HW
315 * receive queues, and the response queue lookup table to narrow that
316 * down to the response queues actually configured for each port.
317 * We always configure the RSS mapping for two ports since the mapping
318 * table has plenty of entries.
320 static void setup_rss(struct adapter *adap)
323 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
324 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
325 u8 cpus[SGE_QSETS + 1];
326 u16 rspq_map[RSS_TABLE_SIZE];
328 for (i = 0; i < SGE_QSETS; ++i)
330 cpus[SGE_QSETS] = 0xff; /* terminator */
332 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
333 rspq_map[i] = i % nq0;
334 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
337 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
338 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
339 V_RRCPLCPUSIZE(6), cpus, rspq_map);
343 * If we have multiple receive queues per port serviced by NAPI we need one
344 * netdevice per queue as NAPI operates on netdevices. We already have one
345 * netdevice, namely the one associated with the interface, so we use dummy
346 * ones for any additional queues. Note that these netdevices exist purely
347 * so that NAPI has something to work with, they do not represent network
348 * ports and are not registered.
350 static int init_dummy_netdevs(struct adapter *adap)
352 int i, j, dummy_idx = 0;
353 struct net_device *nd;
355 for_each_port(adap, i) {
356 struct net_device *dev = adap->port[i];
357 const struct port_info *pi = netdev_priv(dev);
359 for (j = 0; j < pi->nqsets - 1; j++) {
360 if (!adap->dummy_netdev[dummy_idx]) {
363 nd = alloc_netdev(sizeof(*p), "", ether_setup);
370 set_bit(__LINK_STATE_START, &nd->state);
371 adap->dummy_netdev[dummy_idx] = nd;
373 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
380 while (--dummy_idx >= 0) {
381 free_netdev(adap->dummy_netdev[dummy_idx]);
382 adap->dummy_netdev[dummy_idx] = NULL;
388 * Wait until all NAPI handlers are descheduled. This includes the handlers of
389 * both netdevices representing interfaces and the dummy ones for the extra
392 static void quiesce_rx(struct adapter *adap)
395 struct net_device *dev;
397 for_each_port(adap, i) {
399 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
403 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
404 dev = adap->dummy_netdev[i];
406 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
412 * setup_sge_qsets - configure SGE Tx/Rx/response queues
415 * Determines how many sets of SGE queues to use and initializes them.
416 * We support multiple queue sets per port if we have MSI-X, otherwise
417 * just one queue set per port.
419 static int setup_sge_qsets(struct adapter *adap)
421 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
422 unsigned int ntxq = SGE_TXQ_PER_SET;
424 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
427 for_each_port(adap, i) {
428 struct net_device *dev = adap->port[i];
429 const struct port_info *pi = netdev_priv(dev);
431 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
432 err = t3_sge_alloc_qset(adap, qset_idx, 1,
433 (adap->flags & USING_MSIX) ? qset_idx + 1 :
435 &adap->params.sge.qset[qset_idx], ntxq,
437 adap-> dummy_netdev[dummy_dev_idx++]);
439 t3_free_sge_resources(adap);
448 static ssize_t attr_show(struct device *d, struct device_attribute *attr,
450 ssize_t(*format) (struct net_device *, char *))
454 /* Synchronize with ioctls that may shut down the device */
456 len = (*format) (to_net_dev(d), buf);
461 static ssize_t attr_store(struct device *d, struct device_attribute *attr,
462 const char *buf, size_t len,
463 ssize_t(*set) (struct net_device *, unsigned int),
464 unsigned int min_val, unsigned int max_val)
470 if (!capable(CAP_NET_ADMIN))
473 val = simple_strtoul(buf, &endp, 0);
474 if (endp == buf || val < min_val || val > max_val)
478 ret = (*set) (to_net_dev(d), val);
485 #define CXGB3_SHOW(name, val_expr) \
486 static ssize_t format_##name(struct net_device *dev, char *buf) \
488 struct port_info *pi = netdev_priv(dev); \
489 struct adapter *adap = pi->adapter; \
490 return sprintf(buf, "%u\n", val_expr); \
492 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
495 return attr_show(d, attr, buf, format_##name); \
498 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
500 struct port_info *pi = netdev_priv(dev);
501 struct adapter *adap = pi->adapter;
502 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
504 if (adap->flags & FULL_INIT_DONE)
506 if (val && adap->params.rev == 0)
508 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
511 adap->params.mc5.nfilters = val;
515 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
516 const char *buf, size_t len)
518 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
521 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
523 struct port_info *pi = netdev_priv(dev);
524 struct adapter *adap = pi->adapter;
526 if (adap->flags & FULL_INIT_DONE)
528 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
531 adap->params.mc5.nservers = val;
535 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
536 const char *buf, size_t len)
538 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
541 #define CXGB3_ATTR_R(name, val_expr) \
542 CXGB3_SHOW(name, val_expr) \
543 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
545 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
546 CXGB3_SHOW(name, val_expr) \
547 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
549 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
550 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
551 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
553 static struct attribute *cxgb3_attrs[] = {
554 &dev_attr_cam_size.attr,
555 &dev_attr_nfilters.attr,
556 &dev_attr_nservers.attr,
560 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
562 static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
563 char *buf, int sched)
565 struct port_info *pi = netdev_priv(to_net_dev(d));
566 struct adapter *adap = pi->adapter;
567 unsigned int v, addr, bpt, cpt;
570 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
572 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
573 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
576 bpt = (v >> 8) & 0xff;
579 len = sprintf(buf, "disabled\n");
581 v = (adap->params.vpd.cclk * 1000) / cpt;
582 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
588 static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
589 const char *buf, size_t len, int sched)
591 struct port_info *pi = netdev_priv(to_net_dev(d));
592 struct adapter *adap = pi->adapter;
597 if (!capable(CAP_NET_ADMIN))
600 val = simple_strtoul(buf, &endp, 0);
601 if (endp == buf || val > 10000000)
605 ret = t3_config_sched(adap, val, sched);
612 #define TM_ATTR(name, sched) \
613 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
616 return tm_attr_show(d, attr, buf, sched); \
618 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
619 const char *buf, size_t len) \
621 return tm_attr_store(d, attr, buf, len, sched); \
623 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
634 static struct attribute *offload_attrs[] = {
635 &dev_attr_sched0.attr,
636 &dev_attr_sched1.attr,
637 &dev_attr_sched2.attr,
638 &dev_attr_sched3.attr,
639 &dev_attr_sched4.attr,
640 &dev_attr_sched5.attr,
641 &dev_attr_sched6.attr,
642 &dev_attr_sched7.attr,
646 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
649 * Sends an sk_buff to an offload queue driver
650 * after dealing with any active network taps.
652 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
657 ret = t3_offload_tx(tdev, skb);
662 static int write_smt_entry(struct adapter *adapter, int idx)
664 struct cpl_smt_write_req *req;
665 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
670 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
671 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
672 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
673 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
675 memset(req->src_mac1, 0, sizeof(req->src_mac1));
676 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
678 offload_tx(&adapter->tdev, skb);
682 static int init_smt(struct adapter *adapter)
686 for_each_port(adapter, i)
687 write_smt_entry(adapter, i);
691 static void init_port_mtus(struct adapter *adapter)
693 unsigned int mtus = adapter->port[0]->mtu;
695 if (adapter->port[1])
696 mtus |= adapter->port[1]->mtu << 16;
697 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
700 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
704 struct mngt_pktsched_wr *req;
706 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
707 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
708 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
709 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
715 t3_mgmt_tx(adap, skb);
718 static void bind_qsets(struct adapter *adap)
722 for_each_port(adap, i) {
723 const struct port_info *pi = adap2pinfo(adap, i);
725 for (j = 0; j < pi->nqsets; ++j)
726 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
731 #define FW_FNAME "t3fw-%d.%d.%d.bin"
733 static int upgrade_fw(struct adapter *adap)
737 const struct firmware *fw;
738 struct device *dev = &adap->pdev->dev;
740 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
741 FW_VERSION_MINOR, FW_VERSION_MICRO);
742 ret = request_firmware(&fw, buf, dev);
744 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
748 ret = t3_load_fw(adap, fw->data, fw->size);
749 release_firmware(fw);
754 * cxgb_up - enable the adapter
755 * @adapter: adapter being enabled
757 * Called when the first port is enabled, this function performs the
758 * actions necessary to make an adapter operational, such as completing
759 * the initialization of HW modules, and enabling interrupts.
761 * Must be called with the rtnl lock held.
763 static int cxgb_up(struct adapter *adap)
767 if (!(adap->flags & FULL_INIT_DONE)) {
768 err = t3_check_fw_version(adap);
770 err = upgrade_fw(adap);
774 err = init_dummy_netdevs(adap);
778 err = t3_init_hw(adap, 0);
782 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
784 err = setup_sge_qsets(adap);
789 adap->flags |= FULL_INIT_DONE;
794 if (adap->flags & USING_MSIX) {
795 name_msix_vecs(adap);
796 err = request_irq(adap->msix_info[0].vec,
797 t3_async_intr_handler, 0,
798 adap->msix_info[0].desc, adap);
802 if (request_msix_data_irqs(adap)) {
803 free_irq(adap->msix_info[0].vec, adap);
806 } else if ((err = request_irq(adap->pdev->irq,
807 t3_intr_handler(adap,
808 adap->sge.qs[0].rspq.
810 (adap->flags & USING_MSI) ?
816 t3_intr_enable(adap);
818 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
820 adap->flags |= QUEUES_BOUND;
825 CH_ERR(adap, "request_irq failed, err %d\n", err);
830 * Release resources when all the ports and offloading have been stopped.
832 static void cxgb_down(struct adapter *adapter)
834 t3_sge_stop(adapter);
835 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
836 t3_intr_disable(adapter);
837 spin_unlock_irq(&adapter->work_lock);
839 if (adapter->flags & USING_MSIX) {
842 free_irq(adapter->msix_info[0].vec, adapter);
843 for_each_port(adapter, i)
844 n += adap2pinfo(adapter, i)->nqsets;
846 for (i = 0; i < n; ++i)
847 free_irq(adapter->msix_info[i + 1].vec,
848 &adapter->sge.qs[i]);
850 free_irq(adapter->pdev->irq, adapter);
852 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
856 static void schedule_chk_task(struct adapter *adap)
860 timeo = adap->params.linkpoll_period ?
861 (HZ * adap->params.linkpoll_period) / 10 :
862 adap->params.stats_update_period * HZ;
864 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
867 static int offload_open(struct net_device *dev)
869 struct port_info *pi = netdev_priv(dev);
870 struct adapter *adapter = pi->adapter;
871 struct t3cdev *tdev = dev2t3cdev(dev);
872 int adap_up = adapter->open_device_map & PORT_MASK;
875 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
878 if (!adap_up && (err = cxgb_up(adapter)) < 0)
881 t3_tp_set_offload_mode(adapter, 1);
882 tdev->lldev = adapter->port[0];
883 err = cxgb3_offload_activate(adapter);
887 init_port_mtus(adapter);
888 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
889 adapter->params.b_wnd,
890 adapter->params.rev == 0 ?
891 adapter->port[0]->mtu : 0xffff);
894 /* Never mind if the next step fails */
895 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
897 /* Call back all registered clients */
898 cxgb3_add_clients(tdev);
901 /* restore them in case the offload module has changed them */
903 t3_tp_set_offload_mode(adapter, 0);
904 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
905 cxgb3_set_dummy_ops(tdev);
910 static int offload_close(struct t3cdev *tdev)
912 struct adapter *adapter = tdev2adap(tdev);
914 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
917 /* Call back all registered clients */
918 cxgb3_remove_clients(tdev);
920 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
923 cxgb3_set_dummy_ops(tdev);
924 t3_tp_set_offload_mode(adapter, 0);
925 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
927 if (!adapter->open_device_map)
930 cxgb3_offload_deactivate(adapter);
934 static int cxgb_open(struct net_device *dev)
936 struct port_info *pi = netdev_priv(dev);
937 struct adapter *adapter = pi->adapter;
938 int other_ports = adapter->open_device_map & PORT_MASK;
941 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
944 set_bit(pi->port_id, &adapter->open_device_map);
945 if (is_offload(adapter) && !ofld_disable) {
946 err = offload_open(dev);
949 "Could not initialize offload capabilities\n");
953 t3_port_intr_enable(adapter, pi->port_id);
954 netif_start_queue(dev);
956 schedule_chk_task(adapter);
961 static int cxgb_close(struct net_device *dev)
963 struct port_info *pi = netdev_priv(dev);
964 struct adapter *adapter = pi->adapter;
966 t3_port_intr_disable(adapter, pi->port_id);
967 netif_stop_queue(dev);
968 pi->phy.ops->power_down(&pi->phy, 1);
969 netif_carrier_off(dev);
970 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
972 spin_lock(&adapter->work_lock); /* sync with update task */
973 clear_bit(pi->port_id, &adapter->open_device_map);
974 spin_unlock(&adapter->work_lock);
976 if (!(adapter->open_device_map & PORT_MASK))
977 cancel_rearming_delayed_workqueue(cxgb3_wq,
978 &adapter->adap_check_task);
980 if (!adapter->open_device_map)
986 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
988 struct port_info *pi = netdev_priv(dev);
989 struct adapter *adapter = pi->adapter;
990 struct net_device_stats *ns = &pi->netstats;
991 const struct mac_stats *pstats;
993 spin_lock(&adapter->stats_lock);
994 pstats = t3_mac_update_stats(&pi->mac);
995 spin_unlock(&adapter->stats_lock);
997 ns->tx_bytes = pstats->tx_octets;
998 ns->tx_packets = pstats->tx_frames;
999 ns->rx_bytes = pstats->rx_octets;
1000 ns->rx_packets = pstats->rx_frames;
1001 ns->multicast = pstats->rx_mcast_frames;
1003 ns->tx_errors = pstats->tx_underrun;
1004 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1005 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1006 pstats->rx_fifo_ovfl;
1008 /* detailed rx_errors */
1009 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1010 ns->rx_over_errors = 0;
1011 ns->rx_crc_errors = pstats->rx_fcs_errs;
1012 ns->rx_frame_errors = pstats->rx_symbol_errs;
1013 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1014 ns->rx_missed_errors = pstats->rx_cong_drops;
1016 /* detailed tx_errors */
1017 ns->tx_aborted_errors = 0;
1018 ns->tx_carrier_errors = 0;
1019 ns->tx_fifo_errors = pstats->tx_underrun;
1020 ns->tx_heartbeat_errors = 0;
1021 ns->tx_window_errors = 0;
1025 static u32 get_msglevel(struct net_device *dev)
1027 struct port_info *pi = netdev_priv(dev);
1028 struct adapter *adapter = pi->adapter;
1030 return adapter->msg_enable;
1033 static void set_msglevel(struct net_device *dev, u32 val)
1035 struct port_info *pi = netdev_priv(dev);
1036 struct adapter *adapter = pi->adapter;
1038 adapter->msg_enable = val;
1041 static char stats_strings[][ETH_GSTRING_LEN] = {
1044 "TxMulticastFramesOK",
1045 "TxBroadcastFramesOK",
1052 "TxFrames128To255 ",
1053 "TxFrames256To511 ",
1054 "TxFrames512To1023 ",
1055 "TxFrames1024To1518 ",
1056 "TxFrames1519ToMax ",
1060 "RxMulticastFramesOK",
1061 "RxBroadcastFramesOK",
1072 "RxFrames128To255 ",
1073 "RxFrames256To511 ",
1074 "RxFrames512To1023 ",
1075 "RxFrames1024To1518 ",
1076 "RxFrames1519ToMax ",
1086 "CheckTXEnToggled ",
1091 static int get_stats_count(struct net_device *dev)
1093 return ARRAY_SIZE(stats_strings);
1096 #define T3_REGMAP_SIZE (3 * 1024)
1098 static int get_regs_len(struct net_device *dev)
1100 return T3_REGMAP_SIZE;
1103 static int get_eeprom_len(struct net_device *dev)
1108 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1110 struct port_info *pi = netdev_priv(dev);
1111 struct adapter *adapter = pi->adapter;
1114 t3_get_fw_version(adapter, &fw_vers);
1116 strcpy(info->driver, DRV_NAME);
1117 strcpy(info->version, DRV_VERSION);
1118 strcpy(info->bus_info, pci_name(adapter->pdev));
1120 strcpy(info->fw_version, "N/A");
1122 snprintf(info->fw_version, sizeof(info->fw_version),
1124 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1125 G_FW_VERSION_MAJOR(fw_vers),
1126 G_FW_VERSION_MINOR(fw_vers),
1127 G_FW_VERSION_MICRO(fw_vers));
1131 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1133 if (stringset == ETH_SS_STATS)
1134 memcpy(data, stats_strings, sizeof(stats_strings));
1137 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1138 struct port_info *p, int idx)
1141 unsigned long tot = 0;
1143 for (i = 0; i < p->nqsets; ++i)
1144 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1148 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1151 struct port_info *pi = netdev_priv(dev);
1152 struct adapter *adapter = pi->adapter;
1153 const struct mac_stats *s;
1155 spin_lock(&adapter->stats_lock);
1156 s = t3_mac_update_stats(&pi->mac);
1157 spin_unlock(&adapter->stats_lock);
1159 *data++ = s->tx_octets;
1160 *data++ = s->tx_frames;
1161 *data++ = s->tx_mcast_frames;
1162 *data++ = s->tx_bcast_frames;
1163 *data++ = s->tx_pause;
1164 *data++ = s->tx_underrun;
1165 *data++ = s->tx_fifo_urun;
1167 *data++ = s->tx_frames_64;
1168 *data++ = s->tx_frames_65_127;
1169 *data++ = s->tx_frames_128_255;
1170 *data++ = s->tx_frames_256_511;
1171 *data++ = s->tx_frames_512_1023;
1172 *data++ = s->tx_frames_1024_1518;
1173 *data++ = s->tx_frames_1519_max;
1175 *data++ = s->rx_octets;
1176 *data++ = s->rx_frames;
1177 *data++ = s->rx_mcast_frames;
1178 *data++ = s->rx_bcast_frames;
1179 *data++ = s->rx_pause;
1180 *data++ = s->rx_fcs_errs;
1181 *data++ = s->rx_symbol_errs;
1182 *data++ = s->rx_short;
1183 *data++ = s->rx_jabber;
1184 *data++ = s->rx_too_long;
1185 *data++ = s->rx_fifo_ovfl;
1187 *data++ = s->rx_frames_64;
1188 *data++ = s->rx_frames_65_127;
1189 *data++ = s->rx_frames_128_255;
1190 *data++ = s->rx_frames_256_511;
1191 *data++ = s->rx_frames_512_1023;
1192 *data++ = s->rx_frames_1024_1518;
1193 *data++ = s->rx_frames_1519_max;
1195 *data++ = pi->phy.fifo_errors;
1197 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1198 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1199 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1200 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1201 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1202 *data++ = s->rx_cong_drops;
1204 *data++ = s->num_toggled;
1205 *data++ = s->num_resets;
1208 static inline void reg_block_dump(struct adapter *ap, void *buf,
1209 unsigned int start, unsigned int end)
1211 u32 *p = buf + start;
1213 for (; start <= end; start += sizeof(u32))
1214 *p++ = t3_read_reg(ap, start);
1217 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1220 struct port_info *pi = netdev_priv(dev);
1221 struct adapter *ap = pi->adapter;
1225 * bits 0..9: chip version
1226 * bits 10..15: chip revision
1227 * bit 31: set for PCIe cards
1229 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1232 * We skip the MAC statistics registers because they are clear-on-read.
1233 * Also reading multi-register stats would need to synchronize with the
1234 * periodic mac stats accumulation. Hard to justify the complexity.
1236 memset(buf, 0, T3_REGMAP_SIZE);
1237 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1238 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1239 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1240 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1241 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1242 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1243 XGM_REG(A_XGM_SERDES_STAT3, 1));
1244 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1245 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1248 static int restart_autoneg(struct net_device *dev)
1250 struct port_info *p = netdev_priv(dev);
1252 if (!netif_running(dev))
1254 if (p->link_config.autoneg != AUTONEG_ENABLE)
1256 p->phy.ops->autoneg_restart(&p->phy);
1260 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1262 struct port_info *pi = netdev_priv(dev);
1263 struct adapter *adapter = pi->adapter;
1269 for (i = 0; i < data * 2; i++) {
1270 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1271 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1272 if (msleep_interruptible(500))
1275 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1280 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1282 struct port_info *p = netdev_priv(dev);
1284 cmd->supported = p->link_config.supported;
1285 cmd->advertising = p->link_config.advertising;
1287 if (netif_carrier_ok(dev)) {
1288 cmd->speed = p->link_config.speed;
1289 cmd->duplex = p->link_config.duplex;
1295 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1296 cmd->phy_address = p->phy.addr;
1297 cmd->transceiver = XCVR_EXTERNAL;
1298 cmd->autoneg = p->link_config.autoneg;
1304 static int speed_duplex_to_caps(int speed, int duplex)
1310 if (duplex == DUPLEX_FULL)
1311 cap = SUPPORTED_10baseT_Full;
1313 cap = SUPPORTED_10baseT_Half;
1316 if (duplex == DUPLEX_FULL)
1317 cap = SUPPORTED_100baseT_Full;
1319 cap = SUPPORTED_100baseT_Half;
1322 if (duplex == DUPLEX_FULL)
1323 cap = SUPPORTED_1000baseT_Full;
1325 cap = SUPPORTED_1000baseT_Half;
1328 if (duplex == DUPLEX_FULL)
1329 cap = SUPPORTED_10000baseT_Full;
1334 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1335 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1336 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1337 ADVERTISED_10000baseT_Full)
1339 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1341 struct port_info *p = netdev_priv(dev);
1342 struct link_config *lc = &p->link_config;
1344 if (!(lc->supported & SUPPORTED_Autoneg))
1345 return -EOPNOTSUPP; /* can't change speed/duplex */
1347 if (cmd->autoneg == AUTONEG_DISABLE) {
1348 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1350 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1352 lc->requested_speed = cmd->speed;
1353 lc->requested_duplex = cmd->duplex;
1354 lc->advertising = 0;
1356 cmd->advertising &= ADVERTISED_MASK;
1357 cmd->advertising &= lc->supported;
1358 if (!cmd->advertising)
1360 lc->requested_speed = SPEED_INVALID;
1361 lc->requested_duplex = DUPLEX_INVALID;
1362 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1364 lc->autoneg = cmd->autoneg;
1365 if (netif_running(dev))
1366 t3_link_start(&p->phy, &p->mac, lc);
1370 static void get_pauseparam(struct net_device *dev,
1371 struct ethtool_pauseparam *epause)
1373 struct port_info *p = netdev_priv(dev);
1375 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1376 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1377 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1380 static int set_pauseparam(struct net_device *dev,
1381 struct ethtool_pauseparam *epause)
1383 struct port_info *p = netdev_priv(dev);
1384 struct link_config *lc = &p->link_config;
1386 if (epause->autoneg == AUTONEG_DISABLE)
1387 lc->requested_fc = 0;
1388 else if (lc->supported & SUPPORTED_Autoneg)
1389 lc->requested_fc = PAUSE_AUTONEG;
1393 if (epause->rx_pause)
1394 lc->requested_fc |= PAUSE_RX;
1395 if (epause->tx_pause)
1396 lc->requested_fc |= PAUSE_TX;
1397 if (lc->autoneg == AUTONEG_ENABLE) {
1398 if (netif_running(dev))
1399 t3_link_start(&p->phy, &p->mac, lc);
1401 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1402 if (netif_running(dev))
1403 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1408 static u32 get_rx_csum(struct net_device *dev)
1410 struct port_info *p = netdev_priv(dev);
1412 return p->rx_csum_offload;
1415 static int set_rx_csum(struct net_device *dev, u32 data)
1417 struct port_info *p = netdev_priv(dev);
1419 p->rx_csum_offload = data;
1423 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1425 struct port_info *pi = netdev_priv(dev);
1426 struct adapter *adapter = pi->adapter;
1427 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1429 e->rx_max_pending = MAX_RX_BUFFERS;
1430 e->rx_mini_max_pending = 0;
1431 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1432 e->tx_max_pending = MAX_TXQ_ENTRIES;
1434 e->rx_pending = q->fl_size;
1435 e->rx_mini_pending = q->rspq_size;
1436 e->rx_jumbo_pending = q->jumbo_size;
1437 e->tx_pending = q->txq_size[0];
1440 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1442 struct port_info *pi = netdev_priv(dev);
1443 struct adapter *adapter = pi->adapter;
1444 struct qset_params *q;
1447 if (e->rx_pending > MAX_RX_BUFFERS ||
1448 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1449 e->tx_pending > MAX_TXQ_ENTRIES ||
1450 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1451 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1452 e->rx_pending < MIN_FL_ENTRIES ||
1453 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1454 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1457 if (adapter->flags & FULL_INIT_DONE)
1460 q = &adapter->params.sge.qset[pi->first_qset];
1461 for (i = 0; i < pi->nqsets; ++i, ++q) {
1462 q->rspq_size = e->rx_mini_pending;
1463 q->fl_size = e->rx_pending;
1464 q->jumbo_size = e->rx_jumbo_pending;
1465 q->txq_size[0] = e->tx_pending;
1466 q->txq_size[1] = e->tx_pending;
1467 q->txq_size[2] = e->tx_pending;
1472 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1474 struct port_info *pi = netdev_priv(dev);
1475 struct adapter *adapter = pi->adapter;
1476 struct qset_params *qsp = &adapter->params.sge.qset[0];
1477 struct sge_qset *qs = &adapter->sge.qs[0];
1479 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1482 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1483 t3_update_qset_coalesce(qs, qsp);
1487 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1489 struct port_info *pi = netdev_priv(dev);
1490 struct adapter *adapter = pi->adapter;
1491 struct qset_params *q = adapter->params.sge.qset;
1493 c->rx_coalesce_usecs = q->coalesce_usecs;
1497 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1500 struct port_info *pi = netdev_priv(dev);
1501 struct adapter *adapter = pi->adapter;
1504 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1508 e->magic = EEPROM_MAGIC;
1509 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1510 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1513 memcpy(data, buf + e->offset, e->len);
1518 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1521 struct port_info *pi = netdev_priv(dev);
1522 struct adapter *adapter = pi->adapter;
1523 u32 aligned_offset, aligned_len, *p;
1527 if (eeprom->magic != EEPROM_MAGIC)
1530 aligned_offset = eeprom->offset & ~3;
1531 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1533 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1534 buf = kmalloc(aligned_len, GFP_KERNEL);
1537 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1538 if (!err && aligned_len > 4)
1539 err = t3_seeprom_read(adapter,
1540 aligned_offset + aligned_len - 4,
1541 (u32 *) & buf[aligned_len - 4]);
1544 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1548 err = t3_seeprom_wp(adapter, 0);
1552 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1553 err = t3_seeprom_write(adapter, aligned_offset, *p);
1554 aligned_offset += 4;
1558 err = t3_seeprom_wp(adapter, 1);
1565 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1569 memset(&wol->sopass, 0, sizeof(wol->sopass));
1572 static const struct ethtool_ops cxgb_ethtool_ops = {
1573 .get_settings = get_settings,
1574 .set_settings = set_settings,
1575 .get_drvinfo = get_drvinfo,
1576 .get_msglevel = get_msglevel,
1577 .set_msglevel = set_msglevel,
1578 .get_ringparam = get_sge_param,
1579 .set_ringparam = set_sge_param,
1580 .get_coalesce = get_coalesce,
1581 .set_coalesce = set_coalesce,
1582 .get_eeprom_len = get_eeprom_len,
1583 .get_eeprom = get_eeprom,
1584 .set_eeprom = set_eeprom,
1585 .get_pauseparam = get_pauseparam,
1586 .set_pauseparam = set_pauseparam,
1587 .get_rx_csum = get_rx_csum,
1588 .set_rx_csum = set_rx_csum,
1589 .get_tx_csum = ethtool_op_get_tx_csum,
1590 .set_tx_csum = ethtool_op_set_tx_csum,
1591 .get_sg = ethtool_op_get_sg,
1592 .set_sg = ethtool_op_set_sg,
1593 .get_link = ethtool_op_get_link,
1594 .get_strings = get_strings,
1595 .phys_id = cxgb3_phys_id,
1596 .nway_reset = restart_autoneg,
1597 .get_stats_count = get_stats_count,
1598 .get_ethtool_stats = get_stats,
1599 .get_regs_len = get_regs_len,
1600 .get_regs = get_regs,
1602 .get_tso = ethtool_op_get_tso,
1603 .set_tso = ethtool_op_set_tso,
1606 static int in_range(int val, int lo, int hi)
1608 return val < 0 || (val <= hi && val >= lo);
1611 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1613 struct port_info *pi = netdev_priv(dev);
1614 struct adapter *adapter = pi->adapter;
1618 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1622 case CHELSIO_SET_QSET_PARAMS:{
1624 struct qset_params *q;
1625 struct ch_qset_params t;
1627 if (!capable(CAP_NET_ADMIN))
1629 if (copy_from_user(&t, useraddr, sizeof(t)))
1631 if (t.qset_idx >= SGE_QSETS)
1633 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1634 !in_range(t.cong_thres, 0, 255) ||
1635 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1637 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1639 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1640 MAX_CTRL_TXQ_ENTRIES) ||
1641 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1643 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1644 MAX_RX_JUMBO_BUFFERS)
1645 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1648 if ((adapter->flags & FULL_INIT_DONE) &&
1649 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1650 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1651 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1652 t.polling >= 0 || t.cong_thres >= 0))
1655 q = &adapter->params.sge.qset[t.qset_idx];
1657 if (t.rspq_size >= 0)
1658 q->rspq_size = t.rspq_size;
1659 if (t.fl_size[0] >= 0)
1660 q->fl_size = t.fl_size[0];
1661 if (t.fl_size[1] >= 0)
1662 q->jumbo_size = t.fl_size[1];
1663 if (t.txq_size[0] >= 0)
1664 q->txq_size[0] = t.txq_size[0];
1665 if (t.txq_size[1] >= 0)
1666 q->txq_size[1] = t.txq_size[1];
1667 if (t.txq_size[2] >= 0)
1668 q->txq_size[2] = t.txq_size[2];
1669 if (t.cong_thres >= 0)
1670 q->cong_thres = t.cong_thres;
1671 if (t.intr_lat >= 0) {
1672 struct sge_qset *qs =
1673 &adapter->sge.qs[t.qset_idx];
1675 q->coalesce_usecs = t.intr_lat;
1676 t3_update_qset_coalesce(qs, q);
1678 if (t.polling >= 0) {
1679 if (adapter->flags & USING_MSIX)
1680 q->polling = t.polling;
1682 /* No polling with INTx for T3A */
1683 if (adapter->params.rev == 0 &&
1684 !(adapter->flags & USING_MSI))
1687 for (i = 0; i < SGE_QSETS; i++) {
1688 q = &adapter->params.sge.
1690 q->polling = t.polling;
1696 case CHELSIO_GET_QSET_PARAMS:{
1697 struct qset_params *q;
1698 struct ch_qset_params t;
1700 if (copy_from_user(&t, useraddr, sizeof(t)))
1702 if (t.qset_idx >= SGE_QSETS)
1705 q = &adapter->params.sge.qset[t.qset_idx];
1706 t.rspq_size = q->rspq_size;
1707 t.txq_size[0] = q->txq_size[0];
1708 t.txq_size[1] = q->txq_size[1];
1709 t.txq_size[2] = q->txq_size[2];
1710 t.fl_size[0] = q->fl_size;
1711 t.fl_size[1] = q->jumbo_size;
1712 t.polling = q->polling;
1713 t.intr_lat = q->coalesce_usecs;
1714 t.cong_thres = q->cong_thres;
1716 if (copy_to_user(useraddr, &t, sizeof(t)))
1720 case CHELSIO_SET_QSET_NUM:{
1721 struct ch_reg edata;
1722 struct port_info *pi = netdev_priv(dev);
1723 unsigned int i, first_qset = 0, other_qsets = 0;
1725 if (!capable(CAP_NET_ADMIN))
1727 if (adapter->flags & FULL_INIT_DONE)
1729 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1731 if (edata.val < 1 ||
1732 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1735 for_each_port(adapter, i)
1736 if (adapter->port[i] && adapter->port[i] != dev)
1737 other_qsets += adap2pinfo(adapter, i)->nqsets;
1739 if (edata.val + other_qsets > SGE_QSETS)
1742 pi->nqsets = edata.val;
1744 for_each_port(adapter, i)
1745 if (adapter->port[i]) {
1746 pi = adap2pinfo(adapter, i);
1747 pi->first_qset = first_qset;
1748 first_qset += pi->nqsets;
1752 case CHELSIO_GET_QSET_NUM:{
1753 struct ch_reg edata;
1754 struct port_info *pi = netdev_priv(dev);
1756 edata.cmd = CHELSIO_GET_QSET_NUM;
1757 edata.val = pi->nqsets;
1758 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1762 case CHELSIO_LOAD_FW:{
1764 struct ch_mem_range t;
1766 if (!capable(CAP_NET_ADMIN))
1768 if (copy_from_user(&t, useraddr, sizeof(t)))
1771 fw_data = kmalloc(t.len, GFP_KERNEL);
1776 (fw_data, useraddr + sizeof(t), t.len)) {
1781 ret = t3_load_fw(adapter, fw_data, t.len);
1787 case CHELSIO_SETMTUTAB:{
1791 if (!is_offload(adapter))
1793 if (!capable(CAP_NET_ADMIN))
1795 if (offload_running(adapter))
1797 if (copy_from_user(&m, useraddr, sizeof(m)))
1799 if (m.nmtus != NMTUS)
1801 if (m.mtus[0] < 81) /* accommodate SACK */
1804 /* MTUs must be in ascending order */
1805 for (i = 1; i < NMTUS; ++i)
1806 if (m.mtus[i] < m.mtus[i - 1])
1809 memcpy(adapter->params.mtus, m.mtus,
1810 sizeof(adapter->params.mtus));
1813 case CHELSIO_GET_PM:{
1814 struct tp_params *p = &adapter->params.tp;
1815 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1817 if (!is_offload(adapter))
1819 m.tx_pg_sz = p->tx_pg_size;
1820 m.tx_num_pg = p->tx_num_pgs;
1821 m.rx_pg_sz = p->rx_pg_size;
1822 m.rx_num_pg = p->rx_num_pgs;
1823 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1824 if (copy_to_user(useraddr, &m, sizeof(m)))
1828 case CHELSIO_SET_PM:{
1830 struct tp_params *p = &adapter->params.tp;
1832 if (!is_offload(adapter))
1834 if (!capable(CAP_NET_ADMIN))
1836 if (adapter->flags & FULL_INIT_DONE)
1838 if (copy_from_user(&m, useraddr, sizeof(m)))
1840 if (!is_power_of_2(m.rx_pg_sz) ||
1841 !is_power_of_2(m.tx_pg_sz))
1842 return -EINVAL; /* not power of 2 */
1843 if (!(m.rx_pg_sz & 0x14000))
1844 return -EINVAL; /* not 16KB or 64KB */
1845 if (!(m.tx_pg_sz & 0x1554000))
1847 if (m.tx_num_pg == -1)
1848 m.tx_num_pg = p->tx_num_pgs;
1849 if (m.rx_num_pg == -1)
1850 m.rx_num_pg = p->rx_num_pgs;
1851 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1853 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1854 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1856 p->rx_pg_size = m.rx_pg_sz;
1857 p->tx_pg_size = m.tx_pg_sz;
1858 p->rx_num_pgs = m.rx_num_pg;
1859 p->tx_num_pgs = m.tx_num_pg;
1862 case CHELSIO_GET_MEM:{
1863 struct ch_mem_range t;
1867 if (!is_offload(adapter))
1869 if (!(adapter->flags & FULL_INIT_DONE))
1870 return -EIO; /* need the memory controllers */
1871 if (copy_from_user(&t, useraddr, sizeof(t)))
1873 if ((t.addr & 7) || (t.len & 7))
1875 if (t.mem_id == MEM_CM)
1877 else if (t.mem_id == MEM_PMRX)
1878 mem = &adapter->pmrx;
1879 else if (t.mem_id == MEM_PMTX)
1880 mem = &adapter->pmtx;
1886 * bits 0..9: chip version
1887 * bits 10..15: chip revision
1889 t.version = 3 | (adapter->params.rev << 10);
1890 if (copy_to_user(useraddr, &t, sizeof(t)))
1894 * Read 256 bytes at a time as len can be large and we don't
1895 * want to use huge intermediate buffers.
1897 useraddr += sizeof(t); /* advance to start of buffer */
1899 unsigned int chunk =
1900 min_t(unsigned int, t.len, sizeof(buf));
1903 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1907 if (copy_to_user(useraddr, buf, chunk))
1915 case CHELSIO_SET_TRACE_FILTER:{
1917 const struct trace_params *tp;
1919 if (!capable(CAP_NET_ADMIN))
1921 if (!offload_running(adapter))
1923 if (copy_from_user(&t, useraddr, sizeof(t)))
1926 tp = (const struct trace_params *)&t.sip;
1928 t3_config_trace_filter(adapter, tp, 0,
1932 t3_config_trace_filter(adapter, tp, 1,
1943 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1945 struct mii_ioctl_data *data = if_mii(req);
1946 struct port_info *pi = netdev_priv(dev);
1947 struct adapter *adapter = pi->adapter;
1952 data->phy_id = pi->phy.addr;
1956 struct cphy *phy = &pi->phy;
1958 if (!phy->mdio_read)
1960 if (is_10G(adapter)) {
1961 mmd = data->phy_id >> 8;
1964 else if (mmd > MDIO_DEV_XGXS)
1968 phy->mdio_read(adapter, data->phy_id & 0x1f,
1969 mmd, data->reg_num, &val);
1972 phy->mdio_read(adapter, data->phy_id & 0x1f,
1973 0, data->reg_num & 0x1f,
1976 data->val_out = val;
1980 struct cphy *phy = &pi->phy;
1982 if (!capable(CAP_NET_ADMIN))
1984 if (!phy->mdio_write)
1986 if (is_10G(adapter)) {
1987 mmd = data->phy_id >> 8;
1990 else if (mmd > MDIO_DEV_XGXS)
1994 phy->mdio_write(adapter,
1995 data->phy_id & 0x1f, mmd,
2000 phy->mdio_write(adapter,
2001 data->phy_id & 0x1f, 0,
2002 data->reg_num & 0x1f,
2007 return cxgb_extension_ioctl(dev, req->ifr_data);
2014 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2016 struct port_info *pi = netdev_priv(dev);
2017 struct adapter *adapter = pi->adapter;
2020 if (new_mtu < 81) /* accommodate SACK */
2022 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2025 init_port_mtus(adapter);
2026 if (adapter->params.rev == 0 && offload_running(adapter))
2027 t3_load_mtus(adapter, adapter->params.mtus,
2028 adapter->params.a_wnd, adapter->params.b_wnd,
2029 adapter->port[0]->mtu);
2033 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2035 struct port_info *pi = netdev_priv(dev);
2036 struct adapter *adapter = pi->adapter;
2037 struct sockaddr *addr = p;
2039 if (!is_valid_ether_addr(addr->sa_data))
2042 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2043 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2044 if (offload_running(adapter))
2045 write_smt_entry(adapter, pi->port_id);
2050 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2051 * @adap: the adapter
2054 * Ensures that current Rx processing on any of the queues associated with
2055 * the given port completes before returning. We do this by acquiring and
2056 * releasing the locks of the response queues associated with the port.
2058 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2062 for (i = 0; i < p->nqsets; i++) {
2063 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2065 spin_lock_irq(&q->lock);
2066 spin_unlock_irq(&q->lock);
2070 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2072 struct port_info *pi = netdev_priv(dev);
2073 struct adapter *adapter = pi->adapter;
2076 if (adapter->params.rev > 0)
2077 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2079 /* single control for all ports */
2080 unsigned int i, have_vlans = 0;
2081 for_each_port(adapter, i)
2082 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2084 t3_set_vlan_accel(adapter, 1, have_vlans);
2086 t3_synchronize_rx(adapter, pi);
2089 #ifdef CONFIG_NET_POLL_CONTROLLER
2090 static void cxgb_netpoll(struct net_device *dev)
2092 struct port_info *pi = netdev_priv(dev);
2093 struct adapter *adapter = pi->adapter;
2096 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2097 struct sge_qset *qs = &adapter->sge.qs[qidx];
2100 if (adapter->flags & USING_MSIX)
2105 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2110 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
2111 int update_tpsram(struct adapter *adap)
2113 const struct firmware *tpsram;
2115 struct device *dev = &adap->pdev->dev;
2119 rev = adap->params.rev == T3_REV_B2 ? 'b' : 'a';
2121 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
2122 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
2124 ret = request_firmware(&tpsram, buf, dev);
2126 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
2131 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
2133 goto release_tpsram;
2135 ret = t3_set_proto_sram(adap, tpsram->data);
2137 dev_err(dev, "loading protocol SRAM failed\n");
2140 release_firmware(tpsram);
2147 * Periodic accumulation of MAC statistics.
2149 static void mac_stats_update(struct adapter *adapter)
2153 for_each_port(adapter, i) {
2154 struct net_device *dev = adapter->port[i];
2155 struct port_info *p = netdev_priv(dev);
2157 if (netif_running(dev)) {
2158 spin_lock(&adapter->stats_lock);
2159 t3_mac_update_stats(&p->mac);
2160 spin_unlock(&adapter->stats_lock);
2165 static void check_link_status(struct adapter *adapter)
2169 for_each_port(adapter, i) {
2170 struct net_device *dev = adapter->port[i];
2171 struct port_info *p = netdev_priv(dev);
2173 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2174 t3_link_changed(adapter, i);
2178 static void check_t3b2_mac(struct adapter *adapter)
2182 if (!rtnl_trylock()) /* synchronize with ifdown */
2185 for_each_port(adapter, i) {
2186 struct net_device *dev = adapter->port[i];
2187 struct port_info *p = netdev_priv(dev);
2190 if (!netif_running(dev))
2194 if (netif_running(dev) && netif_carrier_ok(dev))
2195 status = t3b2_mac_watchdog_task(&p->mac);
2197 p->mac.stats.num_toggled++;
2198 else if (status == 2) {
2199 struct cmac *mac = &p->mac;
2201 t3_mac_set_mtu(mac, dev->mtu);
2202 t3_mac_set_address(mac, 0, dev->dev_addr);
2203 cxgb_set_rxmode(dev);
2204 t3_link_start(&p->phy, mac, &p->link_config);
2205 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2206 t3_port_intr_enable(adapter, p->port_id);
2207 p->mac.stats.num_resets++;
2214 static void t3_adap_check_task(struct work_struct *work)
2216 struct adapter *adapter = container_of(work, struct adapter,
2217 adap_check_task.work);
2218 const struct adapter_params *p = &adapter->params;
2220 adapter->check_task_cnt++;
2222 /* Check link status for PHYs without interrupts */
2223 if (p->linkpoll_period)
2224 check_link_status(adapter);
2226 /* Accumulate MAC stats if needed */
2227 if (!p->linkpoll_period ||
2228 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2229 p->stats_update_period) {
2230 mac_stats_update(adapter);
2231 adapter->check_task_cnt = 0;
2234 if (p->rev == T3_REV_B2)
2235 check_t3b2_mac(adapter);
2237 /* Schedule the next check update if any port is active. */
2238 spin_lock(&adapter->work_lock);
2239 if (adapter->open_device_map & PORT_MASK)
2240 schedule_chk_task(adapter);
2241 spin_unlock(&adapter->work_lock);
2245 * Processes external (PHY) interrupts in process context.
2247 static void ext_intr_task(struct work_struct *work)
2249 struct adapter *adapter = container_of(work, struct adapter,
2250 ext_intr_handler_task);
2252 t3_phy_intr_handler(adapter);
2254 /* Now reenable external interrupts */
2255 spin_lock_irq(&adapter->work_lock);
2256 if (adapter->slow_intr_mask) {
2257 adapter->slow_intr_mask |= F_T3DBG;
2258 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2259 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2260 adapter->slow_intr_mask);
2262 spin_unlock_irq(&adapter->work_lock);
2266 * Interrupt-context handler for external (PHY) interrupts.
2268 void t3_os_ext_intr_handler(struct adapter *adapter)
2271 * Schedule a task to handle external interrupts as they may be slow
2272 * and we use a mutex to protect MDIO registers. We disable PHY
2273 * interrupts in the meantime and let the task reenable them when
2276 spin_lock(&adapter->work_lock);
2277 if (adapter->slow_intr_mask) {
2278 adapter->slow_intr_mask &= ~F_T3DBG;
2279 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2280 adapter->slow_intr_mask);
2281 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2283 spin_unlock(&adapter->work_lock);
2286 void t3_fatal_err(struct adapter *adapter)
2288 unsigned int fw_status[4];
2290 if (adapter->flags & FULL_INIT_DONE) {
2291 t3_sge_stop(adapter);
2292 t3_intr_disable(adapter);
2294 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2295 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2296 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2297 fw_status[0], fw_status[1],
2298 fw_status[2], fw_status[3]);
2302 static int __devinit cxgb_enable_msix(struct adapter *adap)
2304 struct msix_entry entries[SGE_QSETS + 1];
2307 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2308 entries[i].entry = i;
2310 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2312 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2313 adap->msix_info[i].vec = entries[i].vector;
2315 dev_info(&adap->pdev->dev,
2316 "only %d MSI-X vectors left, not using MSI-X\n", err);
2320 static void __devinit print_port_info(struct adapter *adap,
2321 const struct adapter_info *ai)
2323 static const char *pci_variant[] = {
2324 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2331 snprintf(buf, sizeof(buf), "%s x%d",
2332 pci_variant[adap->params.pci.variant],
2333 adap->params.pci.width);
2335 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2336 pci_variant[adap->params.pci.variant],
2337 adap->params.pci.speed, adap->params.pci.width);
2339 for_each_port(adap, i) {
2340 struct net_device *dev = adap->port[i];
2341 const struct port_info *pi = netdev_priv(dev);
2343 if (!test_bit(i, &adap->registered_device_map))
2345 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2346 dev->name, ai->desc, pi->port_type->desc,
2347 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2348 (adap->flags & USING_MSIX) ? " MSI-X" :
2349 (adap->flags & USING_MSI) ? " MSI" : "");
2350 if (adap->name == dev->name && adap->params.vpd.mclk)
2351 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2352 adap->name, t3_mc7_size(&adap->cm) >> 20,
2353 t3_mc7_size(&adap->pmtx) >> 20,
2354 t3_mc7_size(&adap->pmrx) >> 20);
2358 static int __devinit init_one(struct pci_dev *pdev,
2359 const struct pci_device_id *ent)
2361 static int version_printed;
2363 int i, err, pci_using_dac = 0;
2364 unsigned long mmio_start, mmio_len;
2365 const struct adapter_info *ai;
2366 struct adapter *adapter = NULL;
2367 struct port_info *pi;
2369 if (!version_printed) {
2370 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2375 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2377 printk(KERN_ERR DRV_NAME
2378 ": cannot initialize work queue\n");
2383 err = pci_request_regions(pdev, DRV_NAME);
2385 /* Just info, some other driver may have claimed the device. */
2386 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2390 err = pci_enable_device(pdev);
2392 dev_err(&pdev->dev, "cannot enable PCI device\n");
2393 goto out_release_regions;
2396 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2398 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2400 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2401 "coherent allocations\n");
2402 goto out_disable_device;
2404 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2405 dev_err(&pdev->dev, "no usable DMA configuration\n");
2406 goto out_disable_device;
2409 pci_set_master(pdev);
2411 mmio_start = pci_resource_start(pdev, 0);
2412 mmio_len = pci_resource_len(pdev, 0);
2413 ai = t3_get_adapter_info(ent->driver_data);
2415 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2418 goto out_disable_device;
2421 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2422 if (!adapter->regs) {
2423 dev_err(&pdev->dev, "cannot map device registers\n");
2425 goto out_free_adapter;
2428 adapter->pdev = pdev;
2429 adapter->name = pci_name(pdev);
2430 adapter->msg_enable = dflt_msg_enable;
2431 adapter->mmio_len = mmio_len;
2433 mutex_init(&adapter->mdio_lock);
2434 spin_lock_init(&adapter->work_lock);
2435 spin_lock_init(&adapter->stats_lock);
2437 INIT_LIST_HEAD(&adapter->adapter_list);
2438 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2439 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2441 for (i = 0; i < ai->nports; ++i) {
2442 struct net_device *netdev;
2444 netdev = alloc_etherdev(sizeof(struct port_info));
2450 SET_MODULE_OWNER(netdev);
2451 SET_NETDEV_DEV(netdev, &pdev->dev);
2453 adapter->port[i] = netdev;
2454 pi = netdev_priv(netdev);
2455 pi->adapter = adapter;
2456 pi->rx_csum_offload = 1;
2461 netif_carrier_off(netdev);
2462 netdev->irq = pdev->irq;
2463 netdev->mem_start = mmio_start;
2464 netdev->mem_end = mmio_start + mmio_len - 1;
2465 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2466 netdev->features |= NETIF_F_LLTX;
2468 netdev->features |= NETIF_F_HIGHDMA;
2470 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2471 netdev->vlan_rx_register = vlan_rx_register;
2473 netdev->open = cxgb_open;
2474 netdev->stop = cxgb_close;
2475 netdev->hard_start_xmit = t3_eth_xmit;
2476 netdev->get_stats = cxgb_get_stats;
2477 netdev->set_multicast_list = cxgb_set_rxmode;
2478 netdev->do_ioctl = cxgb_ioctl;
2479 netdev->change_mtu = cxgb_change_mtu;
2480 netdev->set_mac_address = cxgb_set_mac_addr;
2481 #ifdef CONFIG_NET_POLL_CONTROLLER
2482 netdev->poll_controller = cxgb_netpoll;
2484 netdev->weight = 64;
2486 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2489 pci_set_drvdata(pdev, adapter);
2490 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2495 err = t3_check_tpsram_version(adapter);
2497 err = update_tpsram(adapter);
2503 * The card is now ready to go. If any errors occur during device
2504 * registration we do not fail the whole card but rather proceed only
2505 * with the ports we manage to register successfully. However we must
2506 * register at least one net device.
2508 for_each_port(adapter, i) {
2509 err = register_netdev(adapter->port[i]);
2511 dev_warn(&pdev->dev,
2512 "cannot register net device %s, skipping\n",
2513 adapter->port[i]->name);
2516 * Change the name we use for messages to the name of
2517 * the first successfully registered interface.
2519 if (!adapter->registered_device_map)
2520 adapter->name = adapter->port[i]->name;
2522 __set_bit(i, &adapter->registered_device_map);
2525 if (!adapter->registered_device_map) {
2526 dev_err(&pdev->dev, "could not register any net devices\n");
2530 /* Driver's ready. Reflect it on LEDs */
2531 t3_led_ready(adapter);
2533 if (is_offload(adapter)) {
2534 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2535 cxgb3_adapter_ofld(adapter);
2538 /* See what interrupts we'll be using */
2539 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2540 adapter->flags |= USING_MSIX;
2541 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2542 adapter->flags |= USING_MSI;
2544 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2547 print_port_info(adapter, ai);
2551 iounmap(adapter->regs);
2552 for (i = ai->nports - 1; i >= 0; --i)
2553 if (adapter->port[i])
2554 free_netdev(adapter->port[i]);
2560 pci_disable_device(pdev);
2561 out_release_regions:
2562 pci_release_regions(pdev);
2563 pci_set_drvdata(pdev, NULL);
2567 static void __devexit remove_one(struct pci_dev *pdev)
2569 struct adapter *adapter = pci_get_drvdata(pdev);
2574 t3_sge_stop(adapter);
2575 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2578 for_each_port(adapter, i)
2579 if (test_bit(i, &adapter->registered_device_map))
2580 unregister_netdev(adapter->port[i]);
2582 if (is_offload(adapter)) {
2583 cxgb3_adapter_unofld(adapter);
2584 if (test_bit(OFFLOAD_DEVMAP_BIT,
2585 &adapter->open_device_map))
2586 offload_close(&adapter->tdev);
2589 t3_free_sge_resources(adapter);
2590 cxgb_disable_msi(adapter);
2592 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2593 if (adapter->dummy_netdev[i]) {
2594 free_netdev(adapter->dummy_netdev[i]);
2595 adapter->dummy_netdev[i] = NULL;
2598 for_each_port(adapter, i)
2599 if (adapter->port[i])
2600 free_netdev(adapter->port[i]);
2602 iounmap(adapter->regs);
2604 pci_release_regions(pdev);
2605 pci_disable_device(pdev);
2606 pci_set_drvdata(pdev, NULL);
2610 static struct pci_driver driver = {
2612 .id_table = cxgb3_pci_tbl,
2614 .remove = __devexit_p(remove_one),
2617 static int __init cxgb3_init_module(void)
2621 cxgb3_offload_init();
2623 ret = pci_register_driver(&driver);
2627 static void __exit cxgb3_cleanup_module(void)
2629 pci_unregister_driver(&driver);
2631 destroy_workqueue(cxgb3_wq);
2634 module_init(cxgb3_init_module);
2635 module_exit(cxgb3_cleanup_module);