2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, ssid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 1, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1, 1), /* T302E */
85 CH_DEVICE(0x22, 1, 2), /* T310E */
86 CH_DEVICE(0x23, 1, 3), /* T320X */
87 CH_DEVICE(0x24, 1, 1), /* T302X */
88 CH_DEVICE(0x25, 1, 3), /* T320E */
89 CH_DEVICE(0x26, 1, 2), /* T310X */
90 CH_DEVICE(0x30, 1, 2), /* T3B10 */
91 CH_DEVICE(0x31, 1, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1, 1), /* T3B02 */
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
126 static int ofld_disable = 0;
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
139 static struct workqueue_struct *cxgb3_wq;
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
145 * Shows the link status, speed, and duplex of a port.
147 static void link_report(struct net_device *dev)
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
155 switch (p->link_config.speed) {
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
188 struct net_device *dev = adapter->port[port_id];
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
196 if (link_stat != netif_carrier_ok(dev)) {
198 t3_mac_enable(mac, MAC_DIRECTION_RX);
199 netif_carrier_on(dev);
201 netif_carrier_off(dev);
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
211 static void cxgb_set_rxmode(struct net_device *dev)
213 struct t3_rx_mode rm;
214 struct port_info *pi = netdev_priv(dev);
216 init_rx_mode(&rm, dev, dev->mc_list);
217 t3_mac_set_rx_mode(&pi->mac, &rm);
221 * link_start - enable a port
222 * @dev: the device to enable
224 * Performs the MAC and PHY actions needed to enable a port.
226 static void link_start(struct net_device *dev)
228 struct t3_rx_mode rm;
229 struct port_info *pi = netdev_priv(dev);
230 struct cmac *mac = &pi->mac;
232 init_rx_mode(&rm, dev, dev->mc_list);
234 t3_mac_set_mtu(mac, dev->mtu);
235 t3_mac_set_address(mac, 0, dev->dev_addr);
236 t3_mac_set_rx_mode(mac, &rm);
237 t3_link_start(&pi->phy, mac, &pi->link_config);
238 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
241 static inline void cxgb_disable_msi(struct adapter *adapter)
243 if (adapter->flags & USING_MSIX) {
244 pci_disable_msix(adapter->pdev);
245 adapter->flags &= ~USING_MSIX;
246 } else if (adapter->flags & USING_MSI) {
247 pci_disable_msi(adapter->pdev);
248 adapter->flags &= ~USING_MSI;
253 * Interrupt handler for asynchronous events used with MSI-X.
255 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
257 t3_slow_intr_handler(cookie);
262 * Name the MSI-X interrupts.
264 static void name_msix_vecs(struct adapter *adap)
266 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
268 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269 adap->msix_info[0].desc[n] = 0;
271 for_each_port(adap, j) {
272 struct net_device *d = adap->port[j];
273 const struct port_info *pi = netdev_priv(d);
275 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276 snprintf(adap->msix_info[msi_idx].desc, n,
277 "%s (queue %d)", d->name, i);
278 adap->msix_info[msi_idx].desc[n] = 0;
283 static int request_msix_data_irqs(struct adapter *adap)
285 int i, j, err, qidx = 0;
287 for_each_port(adap, i) {
288 int nqsets = adap2pinfo(adap, i)->nqsets;
290 for (j = 0; j < nqsets; ++j) {
291 err = request_irq(adap->msix_info[qidx + 1].vec,
292 t3_intr_handler(adap,
295 adap->msix_info[qidx + 1].desc,
296 &adap->sge.qs[qidx]);
299 free_irq(adap->msix_info[qidx + 1].vec,
300 &adap->sge.qs[qidx]);
310 * setup_rss - configure RSS
313 * Sets up RSS to distribute packets to multiple receive queues. We
314 * configure the RSS CPU lookup table to distribute to the number of HW
315 * receive queues, and the response queue lookup table to narrow that
316 * down to the response queues actually configured for each port.
317 * We always configure the RSS mapping for two ports since the mapping
318 * table has plenty of entries.
320 static void setup_rss(struct adapter *adap)
323 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
324 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
325 u8 cpus[SGE_QSETS + 1];
326 u16 rspq_map[RSS_TABLE_SIZE];
328 for (i = 0; i < SGE_QSETS; ++i)
330 cpus[SGE_QSETS] = 0xff; /* terminator */
332 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
333 rspq_map[i] = i % nq0;
334 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
337 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
338 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
339 V_RRCPLCPUSIZE(6), cpus, rspq_map);
343 * If we have multiple receive queues per port serviced by NAPI we need one
344 * netdevice per queue as NAPI operates on netdevices. We already have one
345 * netdevice, namely the one associated with the interface, so we use dummy
346 * ones for any additional queues. Note that these netdevices exist purely
347 * so that NAPI has something to work with, they do not represent network
348 * ports and are not registered.
350 static int init_dummy_netdevs(struct adapter *adap)
352 int i, j, dummy_idx = 0;
353 struct net_device *nd;
355 for_each_port(adap, i) {
356 struct net_device *dev = adap->port[i];
357 const struct port_info *pi = netdev_priv(dev);
359 for (j = 0; j < pi->nqsets - 1; j++) {
360 if (!adap->dummy_netdev[dummy_idx]) {
363 nd = alloc_netdev(sizeof(*p), "", ether_setup);
370 set_bit(__LINK_STATE_START, &nd->state);
371 adap->dummy_netdev[dummy_idx] = nd;
373 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
380 while (--dummy_idx >= 0) {
381 free_netdev(adap->dummy_netdev[dummy_idx]);
382 adap->dummy_netdev[dummy_idx] = NULL;
388 * Wait until all NAPI handlers are descheduled. This includes the handlers of
389 * both netdevices representing interfaces and the dummy ones for the extra
392 static void quiesce_rx(struct adapter *adap)
395 struct net_device *dev;
397 for_each_port(adap, i) {
399 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
403 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
404 dev = adap->dummy_netdev[i];
406 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
412 * setup_sge_qsets - configure SGE Tx/Rx/response queues
415 * Determines how many sets of SGE queues to use and initializes them.
416 * We support multiple queue sets per port if we have MSI-X, otherwise
417 * just one queue set per port.
419 static int setup_sge_qsets(struct adapter *adap)
421 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
422 unsigned int ntxq = SGE_TXQ_PER_SET;
424 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
427 for_each_port(adap, i) {
428 struct net_device *dev = adap->port[i];
429 const struct port_info *pi = netdev_priv(dev);
431 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
432 err = t3_sge_alloc_qset(adap, qset_idx, 1,
433 (adap->flags & USING_MSIX) ? qset_idx + 1 :
435 &adap->params.sge.qset[qset_idx], ntxq,
437 adap-> dummy_netdev[dummy_dev_idx++]);
439 t3_free_sge_resources(adap);
448 static ssize_t attr_show(struct device *d, struct device_attribute *attr,
450 ssize_t(*format) (struct net_device *, char *))
454 /* Synchronize with ioctls that may shut down the device */
456 len = (*format) (to_net_dev(d), buf);
461 static ssize_t attr_store(struct device *d, struct device_attribute *attr,
462 const char *buf, size_t len,
463 ssize_t(*set) (struct net_device *, unsigned int),
464 unsigned int min_val, unsigned int max_val)
470 if (!capable(CAP_NET_ADMIN))
473 val = simple_strtoul(buf, &endp, 0);
474 if (endp == buf || val < min_val || val > max_val)
478 ret = (*set) (to_net_dev(d), val);
485 #define CXGB3_SHOW(name, val_expr) \
486 static ssize_t format_##name(struct net_device *dev, char *buf) \
488 struct port_info *pi = netdev_priv(dev); \
489 struct adapter *adap = pi->adapter; \
490 return sprintf(buf, "%u\n", val_expr); \
492 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
495 return attr_show(d, attr, buf, format_##name); \
498 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
500 struct port_info *pi = netdev_priv(dev);
501 struct adapter *adap = pi->adapter;
502 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
504 if (adap->flags & FULL_INIT_DONE)
506 if (val && adap->params.rev == 0)
508 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
511 adap->params.mc5.nfilters = val;
515 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
516 const char *buf, size_t len)
518 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
521 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
523 struct port_info *pi = netdev_priv(dev);
524 struct adapter *adap = pi->adapter;
526 if (adap->flags & FULL_INIT_DONE)
528 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
531 adap->params.mc5.nservers = val;
535 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
536 const char *buf, size_t len)
538 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
541 #define CXGB3_ATTR_R(name, val_expr) \
542 CXGB3_SHOW(name, val_expr) \
543 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
545 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
546 CXGB3_SHOW(name, val_expr) \
547 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
549 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
550 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
551 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
553 static struct attribute *cxgb3_attrs[] = {
554 &dev_attr_cam_size.attr,
555 &dev_attr_nfilters.attr,
556 &dev_attr_nservers.attr,
560 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
562 static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
563 char *buf, int sched)
565 struct port_info *pi = netdev_priv(to_net_dev(d));
566 struct adapter *adap = pi->adapter;
567 unsigned int v, addr, bpt, cpt;
570 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
572 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
573 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
576 bpt = (v >> 8) & 0xff;
579 len = sprintf(buf, "disabled\n");
581 v = (adap->params.vpd.cclk * 1000) / cpt;
582 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
588 static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
589 const char *buf, size_t len, int sched)
591 struct port_info *pi = netdev_priv(to_net_dev(d));
592 struct adapter *adap = pi->adapter;
597 if (!capable(CAP_NET_ADMIN))
600 val = simple_strtoul(buf, &endp, 0);
601 if (endp == buf || val > 10000000)
605 ret = t3_config_sched(adap, val, sched);
612 #define TM_ATTR(name, sched) \
613 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
616 return tm_attr_show(d, attr, buf, sched); \
618 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
619 const char *buf, size_t len) \
621 return tm_attr_store(d, attr, buf, len, sched); \
623 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
634 static struct attribute *offload_attrs[] = {
635 &dev_attr_sched0.attr,
636 &dev_attr_sched1.attr,
637 &dev_attr_sched2.attr,
638 &dev_attr_sched3.attr,
639 &dev_attr_sched4.attr,
640 &dev_attr_sched5.attr,
641 &dev_attr_sched6.attr,
642 &dev_attr_sched7.attr,
646 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
649 * Sends an sk_buff to an offload queue driver
650 * after dealing with any active network taps.
652 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
657 ret = t3_offload_tx(tdev, skb);
662 static int write_smt_entry(struct adapter *adapter, int idx)
664 struct cpl_smt_write_req *req;
665 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
670 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
671 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
672 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
673 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
675 memset(req->src_mac1, 0, sizeof(req->src_mac1));
676 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
678 offload_tx(&adapter->tdev, skb);
682 static int init_smt(struct adapter *adapter)
686 for_each_port(adapter, i)
687 write_smt_entry(adapter, i);
691 static void init_port_mtus(struct adapter *adapter)
693 unsigned int mtus = adapter->port[0]->mtu;
695 if (adapter->port[1])
696 mtus |= adapter->port[1]->mtu << 16;
697 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
700 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
704 struct mngt_pktsched_wr *req;
706 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
707 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
708 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
709 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
715 t3_mgmt_tx(adap, skb);
718 static void bind_qsets(struct adapter *adap)
722 for_each_port(adap, i) {
723 const struct port_info *pi = adap2pinfo(adap, i);
725 for (j = 0; j < pi->nqsets; ++j)
726 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
731 #define FW_FNAME "t3fw-%d.%d.%d.bin"
732 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
734 static int upgrade_fw(struct adapter *adap)
738 const struct firmware *fw;
739 struct device *dev = &adap->pdev->dev;
741 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
742 FW_VERSION_MINOR, FW_VERSION_MICRO);
743 ret = request_firmware(&fw, buf, dev);
745 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
749 ret = t3_load_fw(adap, fw->data, fw->size);
750 release_firmware(fw);
753 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
754 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
756 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
757 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
762 static inline char t3rev2char(struct adapter *adapter)
766 switch(adapter->params.rev) {
775 int update_tpsram(struct adapter *adap)
777 const struct firmware *tpsram;
779 struct device *dev = &adap->pdev->dev;
783 rev = t3rev2char(adap);
787 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
788 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
790 ret = request_firmware(&tpsram, buf, dev);
792 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
797 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
801 ret = t3_set_proto_sram(adap, tpsram->data);
804 "successful update of protocol engine "
806 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
808 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
809 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
811 dev_err(dev, "loading protocol SRAM failed\n");
814 release_firmware(tpsram);
820 * cxgb_up - enable the adapter
821 * @adapter: adapter being enabled
823 * Called when the first port is enabled, this function performs the
824 * actions necessary to make an adapter operational, such as completing
825 * the initialization of HW modules, and enabling interrupts.
827 * Must be called with the rtnl lock held.
829 static int cxgb_up(struct adapter *adap)
834 if (!(adap->flags & FULL_INIT_DONE)) {
835 err = t3_check_fw_version(adap);
837 err = upgrade_fw(adap);
841 err = t3_check_tpsram_version(adap, &must_load);
842 if (err == -EINVAL) {
843 err = update_tpsram(adap);
844 if (err && must_load)
848 err = init_dummy_netdevs(adap);
852 err = t3_init_hw(adap, 0);
856 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
858 err = setup_sge_qsets(adap);
863 adap->flags |= FULL_INIT_DONE;
868 if (adap->flags & USING_MSIX) {
869 name_msix_vecs(adap);
870 err = request_irq(adap->msix_info[0].vec,
871 t3_async_intr_handler, 0,
872 adap->msix_info[0].desc, adap);
876 if (request_msix_data_irqs(adap)) {
877 free_irq(adap->msix_info[0].vec, adap);
880 } else if ((err = request_irq(adap->pdev->irq,
881 t3_intr_handler(adap,
882 adap->sge.qs[0].rspq.
884 (adap->flags & USING_MSI) ?
890 t3_intr_enable(adap);
892 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
894 adap->flags |= QUEUES_BOUND;
899 CH_ERR(adap, "request_irq failed, err %d\n", err);
904 * Release resources when all the ports and offloading have been stopped.
906 static void cxgb_down(struct adapter *adapter)
908 t3_sge_stop(adapter);
909 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
910 t3_intr_disable(adapter);
911 spin_unlock_irq(&adapter->work_lock);
913 if (adapter->flags & USING_MSIX) {
916 free_irq(adapter->msix_info[0].vec, adapter);
917 for_each_port(adapter, i)
918 n += adap2pinfo(adapter, i)->nqsets;
920 for (i = 0; i < n; ++i)
921 free_irq(adapter->msix_info[i + 1].vec,
922 &adapter->sge.qs[i]);
924 free_irq(adapter->pdev->irq, adapter);
926 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
930 static void schedule_chk_task(struct adapter *adap)
934 timeo = adap->params.linkpoll_period ?
935 (HZ * adap->params.linkpoll_period) / 10 :
936 adap->params.stats_update_period * HZ;
938 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
941 static int offload_open(struct net_device *dev)
943 struct port_info *pi = netdev_priv(dev);
944 struct adapter *adapter = pi->adapter;
945 struct t3cdev *tdev = dev2t3cdev(dev);
946 int adap_up = adapter->open_device_map & PORT_MASK;
949 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
952 if (!adap_up && (err = cxgb_up(adapter)) < 0)
955 t3_tp_set_offload_mode(adapter, 1);
956 tdev->lldev = adapter->port[0];
957 err = cxgb3_offload_activate(adapter);
961 init_port_mtus(adapter);
962 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
963 adapter->params.b_wnd,
964 adapter->params.rev == 0 ?
965 adapter->port[0]->mtu : 0xffff);
968 /* Never mind if the next step fails */
969 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
971 /* Call back all registered clients */
972 cxgb3_add_clients(tdev);
975 /* restore them in case the offload module has changed them */
977 t3_tp_set_offload_mode(adapter, 0);
978 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
979 cxgb3_set_dummy_ops(tdev);
984 static int offload_close(struct t3cdev *tdev)
986 struct adapter *adapter = tdev2adap(tdev);
988 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
991 /* Call back all registered clients */
992 cxgb3_remove_clients(tdev);
994 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
997 cxgb3_set_dummy_ops(tdev);
998 t3_tp_set_offload_mode(adapter, 0);
999 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1001 if (!adapter->open_device_map)
1004 cxgb3_offload_deactivate(adapter);
1008 static int cxgb_open(struct net_device *dev)
1010 struct port_info *pi = netdev_priv(dev);
1011 struct adapter *adapter = pi->adapter;
1012 int other_ports = adapter->open_device_map & PORT_MASK;
1015 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1018 set_bit(pi->port_id, &adapter->open_device_map);
1019 if (is_offload(adapter) && !ofld_disable) {
1020 err = offload_open(dev);
1023 "Could not initialize offload capabilities\n");
1027 t3_port_intr_enable(adapter, pi->port_id);
1028 netif_start_queue(dev);
1030 schedule_chk_task(adapter);
1035 static int cxgb_close(struct net_device *dev)
1037 struct port_info *pi = netdev_priv(dev);
1038 struct adapter *adapter = pi->adapter;
1040 t3_port_intr_disable(adapter, pi->port_id);
1041 netif_stop_queue(dev);
1042 pi->phy.ops->power_down(&pi->phy, 1);
1043 netif_carrier_off(dev);
1044 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1046 spin_lock(&adapter->work_lock); /* sync with update task */
1047 clear_bit(pi->port_id, &adapter->open_device_map);
1048 spin_unlock(&adapter->work_lock);
1050 if (!(adapter->open_device_map & PORT_MASK))
1051 cancel_rearming_delayed_workqueue(cxgb3_wq,
1052 &adapter->adap_check_task);
1054 if (!adapter->open_device_map)
1060 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1062 struct port_info *pi = netdev_priv(dev);
1063 struct adapter *adapter = pi->adapter;
1064 struct net_device_stats *ns = &pi->netstats;
1065 const struct mac_stats *pstats;
1067 spin_lock(&adapter->stats_lock);
1068 pstats = t3_mac_update_stats(&pi->mac);
1069 spin_unlock(&adapter->stats_lock);
1071 ns->tx_bytes = pstats->tx_octets;
1072 ns->tx_packets = pstats->tx_frames;
1073 ns->rx_bytes = pstats->rx_octets;
1074 ns->rx_packets = pstats->rx_frames;
1075 ns->multicast = pstats->rx_mcast_frames;
1077 ns->tx_errors = pstats->tx_underrun;
1078 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1079 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1080 pstats->rx_fifo_ovfl;
1082 /* detailed rx_errors */
1083 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1084 ns->rx_over_errors = 0;
1085 ns->rx_crc_errors = pstats->rx_fcs_errs;
1086 ns->rx_frame_errors = pstats->rx_symbol_errs;
1087 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1088 ns->rx_missed_errors = pstats->rx_cong_drops;
1090 /* detailed tx_errors */
1091 ns->tx_aborted_errors = 0;
1092 ns->tx_carrier_errors = 0;
1093 ns->tx_fifo_errors = pstats->tx_underrun;
1094 ns->tx_heartbeat_errors = 0;
1095 ns->tx_window_errors = 0;
1099 static u32 get_msglevel(struct net_device *dev)
1101 struct port_info *pi = netdev_priv(dev);
1102 struct adapter *adapter = pi->adapter;
1104 return adapter->msg_enable;
1107 static void set_msglevel(struct net_device *dev, u32 val)
1109 struct port_info *pi = netdev_priv(dev);
1110 struct adapter *adapter = pi->adapter;
1112 adapter->msg_enable = val;
1115 static char stats_strings[][ETH_GSTRING_LEN] = {
1118 "TxMulticastFramesOK",
1119 "TxBroadcastFramesOK",
1126 "TxFrames128To255 ",
1127 "TxFrames256To511 ",
1128 "TxFrames512To1023 ",
1129 "TxFrames1024To1518 ",
1130 "TxFrames1519ToMax ",
1134 "RxMulticastFramesOK",
1135 "RxBroadcastFramesOK",
1146 "RxFrames128To255 ",
1147 "RxFrames256To511 ",
1148 "RxFrames512To1023 ",
1149 "RxFrames1024To1518 ",
1150 "RxFrames1519ToMax ",
1160 "CheckTXEnToggled ",
1165 static int get_stats_count(struct net_device *dev)
1167 return ARRAY_SIZE(stats_strings);
1170 #define T3_REGMAP_SIZE (3 * 1024)
1172 static int get_regs_len(struct net_device *dev)
1174 return T3_REGMAP_SIZE;
1177 static int get_eeprom_len(struct net_device *dev)
1182 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1184 struct port_info *pi = netdev_priv(dev);
1185 struct adapter *adapter = pi->adapter;
1189 t3_get_fw_version(adapter, &fw_vers);
1190 t3_get_tp_version(adapter, &tp_vers);
1192 strcpy(info->driver, DRV_NAME);
1193 strcpy(info->version, DRV_VERSION);
1194 strcpy(info->bus_info, pci_name(adapter->pdev));
1196 strcpy(info->fw_version, "N/A");
1198 snprintf(info->fw_version, sizeof(info->fw_version),
1199 "%s %u.%u.%u TP %u.%u.%u",
1200 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1201 G_FW_VERSION_MAJOR(fw_vers),
1202 G_FW_VERSION_MINOR(fw_vers),
1203 G_FW_VERSION_MICRO(fw_vers),
1204 G_TP_VERSION_MAJOR(tp_vers),
1205 G_TP_VERSION_MINOR(tp_vers),
1206 G_TP_VERSION_MICRO(tp_vers));
1210 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1212 if (stringset == ETH_SS_STATS)
1213 memcpy(data, stats_strings, sizeof(stats_strings));
1216 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1217 struct port_info *p, int idx)
1220 unsigned long tot = 0;
1222 for (i = 0; i < p->nqsets; ++i)
1223 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1227 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1230 struct port_info *pi = netdev_priv(dev);
1231 struct adapter *adapter = pi->adapter;
1232 const struct mac_stats *s;
1234 spin_lock(&adapter->stats_lock);
1235 s = t3_mac_update_stats(&pi->mac);
1236 spin_unlock(&adapter->stats_lock);
1238 *data++ = s->tx_octets;
1239 *data++ = s->tx_frames;
1240 *data++ = s->tx_mcast_frames;
1241 *data++ = s->tx_bcast_frames;
1242 *data++ = s->tx_pause;
1243 *data++ = s->tx_underrun;
1244 *data++ = s->tx_fifo_urun;
1246 *data++ = s->tx_frames_64;
1247 *data++ = s->tx_frames_65_127;
1248 *data++ = s->tx_frames_128_255;
1249 *data++ = s->tx_frames_256_511;
1250 *data++ = s->tx_frames_512_1023;
1251 *data++ = s->tx_frames_1024_1518;
1252 *data++ = s->tx_frames_1519_max;
1254 *data++ = s->rx_octets;
1255 *data++ = s->rx_frames;
1256 *data++ = s->rx_mcast_frames;
1257 *data++ = s->rx_bcast_frames;
1258 *data++ = s->rx_pause;
1259 *data++ = s->rx_fcs_errs;
1260 *data++ = s->rx_symbol_errs;
1261 *data++ = s->rx_short;
1262 *data++ = s->rx_jabber;
1263 *data++ = s->rx_too_long;
1264 *data++ = s->rx_fifo_ovfl;
1266 *data++ = s->rx_frames_64;
1267 *data++ = s->rx_frames_65_127;
1268 *data++ = s->rx_frames_128_255;
1269 *data++ = s->rx_frames_256_511;
1270 *data++ = s->rx_frames_512_1023;
1271 *data++ = s->rx_frames_1024_1518;
1272 *data++ = s->rx_frames_1519_max;
1274 *data++ = pi->phy.fifo_errors;
1276 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1277 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1278 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1279 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1280 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1281 *data++ = s->rx_cong_drops;
1283 *data++ = s->num_toggled;
1284 *data++ = s->num_resets;
1287 static inline void reg_block_dump(struct adapter *ap, void *buf,
1288 unsigned int start, unsigned int end)
1290 u32 *p = buf + start;
1292 for (; start <= end; start += sizeof(u32))
1293 *p++ = t3_read_reg(ap, start);
1296 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1299 struct port_info *pi = netdev_priv(dev);
1300 struct adapter *ap = pi->adapter;
1304 * bits 0..9: chip version
1305 * bits 10..15: chip revision
1306 * bit 31: set for PCIe cards
1308 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1311 * We skip the MAC statistics registers because they are clear-on-read.
1312 * Also reading multi-register stats would need to synchronize with the
1313 * periodic mac stats accumulation. Hard to justify the complexity.
1315 memset(buf, 0, T3_REGMAP_SIZE);
1316 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1317 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1318 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1319 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1320 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1321 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1322 XGM_REG(A_XGM_SERDES_STAT3, 1));
1323 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1324 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1327 static int restart_autoneg(struct net_device *dev)
1329 struct port_info *p = netdev_priv(dev);
1331 if (!netif_running(dev))
1333 if (p->link_config.autoneg != AUTONEG_ENABLE)
1335 p->phy.ops->autoneg_restart(&p->phy);
1339 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1341 struct port_info *pi = netdev_priv(dev);
1342 struct adapter *adapter = pi->adapter;
1348 for (i = 0; i < data * 2; i++) {
1349 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1350 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1351 if (msleep_interruptible(500))
1354 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1359 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1361 struct port_info *p = netdev_priv(dev);
1363 cmd->supported = p->link_config.supported;
1364 cmd->advertising = p->link_config.advertising;
1366 if (netif_carrier_ok(dev)) {
1367 cmd->speed = p->link_config.speed;
1368 cmd->duplex = p->link_config.duplex;
1374 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1375 cmd->phy_address = p->phy.addr;
1376 cmd->transceiver = XCVR_EXTERNAL;
1377 cmd->autoneg = p->link_config.autoneg;
1383 static int speed_duplex_to_caps(int speed, int duplex)
1389 if (duplex == DUPLEX_FULL)
1390 cap = SUPPORTED_10baseT_Full;
1392 cap = SUPPORTED_10baseT_Half;
1395 if (duplex == DUPLEX_FULL)
1396 cap = SUPPORTED_100baseT_Full;
1398 cap = SUPPORTED_100baseT_Half;
1401 if (duplex == DUPLEX_FULL)
1402 cap = SUPPORTED_1000baseT_Full;
1404 cap = SUPPORTED_1000baseT_Half;
1407 if (duplex == DUPLEX_FULL)
1408 cap = SUPPORTED_10000baseT_Full;
1413 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1414 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1415 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1416 ADVERTISED_10000baseT_Full)
1418 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1420 struct port_info *p = netdev_priv(dev);
1421 struct link_config *lc = &p->link_config;
1423 if (!(lc->supported & SUPPORTED_Autoneg))
1424 return -EOPNOTSUPP; /* can't change speed/duplex */
1426 if (cmd->autoneg == AUTONEG_DISABLE) {
1427 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1429 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1431 lc->requested_speed = cmd->speed;
1432 lc->requested_duplex = cmd->duplex;
1433 lc->advertising = 0;
1435 cmd->advertising &= ADVERTISED_MASK;
1436 cmd->advertising &= lc->supported;
1437 if (!cmd->advertising)
1439 lc->requested_speed = SPEED_INVALID;
1440 lc->requested_duplex = DUPLEX_INVALID;
1441 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1443 lc->autoneg = cmd->autoneg;
1444 if (netif_running(dev))
1445 t3_link_start(&p->phy, &p->mac, lc);
1449 static void get_pauseparam(struct net_device *dev,
1450 struct ethtool_pauseparam *epause)
1452 struct port_info *p = netdev_priv(dev);
1454 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1455 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1456 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1459 static int set_pauseparam(struct net_device *dev,
1460 struct ethtool_pauseparam *epause)
1462 struct port_info *p = netdev_priv(dev);
1463 struct link_config *lc = &p->link_config;
1465 if (epause->autoneg == AUTONEG_DISABLE)
1466 lc->requested_fc = 0;
1467 else if (lc->supported & SUPPORTED_Autoneg)
1468 lc->requested_fc = PAUSE_AUTONEG;
1472 if (epause->rx_pause)
1473 lc->requested_fc |= PAUSE_RX;
1474 if (epause->tx_pause)
1475 lc->requested_fc |= PAUSE_TX;
1476 if (lc->autoneg == AUTONEG_ENABLE) {
1477 if (netif_running(dev))
1478 t3_link_start(&p->phy, &p->mac, lc);
1480 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1481 if (netif_running(dev))
1482 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1487 static u32 get_rx_csum(struct net_device *dev)
1489 struct port_info *p = netdev_priv(dev);
1491 return p->rx_csum_offload;
1494 static int set_rx_csum(struct net_device *dev, u32 data)
1496 struct port_info *p = netdev_priv(dev);
1498 p->rx_csum_offload = data;
1502 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1504 struct port_info *pi = netdev_priv(dev);
1505 struct adapter *adapter = pi->adapter;
1506 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1508 e->rx_max_pending = MAX_RX_BUFFERS;
1509 e->rx_mini_max_pending = 0;
1510 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1511 e->tx_max_pending = MAX_TXQ_ENTRIES;
1513 e->rx_pending = q->fl_size;
1514 e->rx_mini_pending = q->rspq_size;
1515 e->rx_jumbo_pending = q->jumbo_size;
1516 e->tx_pending = q->txq_size[0];
1519 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1521 struct port_info *pi = netdev_priv(dev);
1522 struct adapter *adapter = pi->adapter;
1523 struct qset_params *q;
1526 if (e->rx_pending > MAX_RX_BUFFERS ||
1527 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1528 e->tx_pending > MAX_TXQ_ENTRIES ||
1529 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1530 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1531 e->rx_pending < MIN_FL_ENTRIES ||
1532 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1533 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1536 if (adapter->flags & FULL_INIT_DONE)
1539 q = &adapter->params.sge.qset[pi->first_qset];
1540 for (i = 0; i < pi->nqsets; ++i, ++q) {
1541 q->rspq_size = e->rx_mini_pending;
1542 q->fl_size = e->rx_pending;
1543 q->jumbo_size = e->rx_jumbo_pending;
1544 q->txq_size[0] = e->tx_pending;
1545 q->txq_size[1] = e->tx_pending;
1546 q->txq_size[2] = e->tx_pending;
1551 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1553 struct port_info *pi = netdev_priv(dev);
1554 struct adapter *adapter = pi->adapter;
1555 struct qset_params *qsp = &adapter->params.sge.qset[0];
1556 struct sge_qset *qs = &adapter->sge.qs[0];
1558 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1561 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1562 t3_update_qset_coalesce(qs, qsp);
1566 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1568 struct port_info *pi = netdev_priv(dev);
1569 struct adapter *adapter = pi->adapter;
1570 struct qset_params *q = adapter->params.sge.qset;
1572 c->rx_coalesce_usecs = q->coalesce_usecs;
1576 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1579 struct port_info *pi = netdev_priv(dev);
1580 struct adapter *adapter = pi->adapter;
1583 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1587 e->magic = EEPROM_MAGIC;
1588 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1589 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1592 memcpy(data, buf + e->offset, e->len);
1597 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1600 struct port_info *pi = netdev_priv(dev);
1601 struct adapter *adapter = pi->adapter;
1602 u32 aligned_offset, aligned_len, *p;
1606 if (eeprom->magic != EEPROM_MAGIC)
1609 aligned_offset = eeprom->offset & ~3;
1610 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1612 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1613 buf = kmalloc(aligned_len, GFP_KERNEL);
1616 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1617 if (!err && aligned_len > 4)
1618 err = t3_seeprom_read(adapter,
1619 aligned_offset + aligned_len - 4,
1620 (u32 *) & buf[aligned_len - 4]);
1623 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1627 err = t3_seeprom_wp(adapter, 0);
1631 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1632 err = t3_seeprom_write(adapter, aligned_offset, *p);
1633 aligned_offset += 4;
1637 err = t3_seeprom_wp(adapter, 1);
1644 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1648 memset(&wol->sopass, 0, sizeof(wol->sopass));
1651 static const struct ethtool_ops cxgb_ethtool_ops = {
1652 .get_settings = get_settings,
1653 .set_settings = set_settings,
1654 .get_drvinfo = get_drvinfo,
1655 .get_msglevel = get_msglevel,
1656 .set_msglevel = set_msglevel,
1657 .get_ringparam = get_sge_param,
1658 .set_ringparam = set_sge_param,
1659 .get_coalesce = get_coalesce,
1660 .set_coalesce = set_coalesce,
1661 .get_eeprom_len = get_eeprom_len,
1662 .get_eeprom = get_eeprom,
1663 .set_eeprom = set_eeprom,
1664 .get_pauseparam = get_pauseparam,
1665 .set_pauseparam = set_pauseparam,
1666 .get_rx_csum = get_rx_csum,
1667 .set_rx_csum = set_rx_csum,
1668 .get_tx_csum = ethtool_op_get_tx_csum,
1669 .set_tx_csum = ethtool_op_set_tx_csum,
1670 .get_sg = ethtool_op_get_sg,
1671 .set_sg = ethtool_op_set_sg,
1672 .get_link = ethtool_op_get_link,
1673 .get_strings = get_strings,
1674 .phys_id = cxgb3_phys_id,
1675 .nway_reset = restart_autoneg,
1676 .get_stats_count = get_stats_count,
1677 .get_ethtool_stats = get_stats,
1678 .get_regs_len = get_regs_len,
1679 .get_regs = get_regs,
1681 .get_tso = ethtool_op_get_tso,
1682 .set_tso = ethtool_op_set_tso,
1685 static int in_range(int val, int lo, int hi)
1687 return val < 0 || (val <= hi && val >= lo);
1690 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1692 struct port_info *pi = netdev_priv(dev);
1693 struct adapter *adapter = pi->adapter;
1697 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1701 case CHELSIO_SET_QSET_PARAMS:{
1703 struct qset_params *q;
1704 struct ch_qset_params t;
1706 if (!capable(CAP_NET_ADMIN))
1708 if (copy_from_user(&t, useraddr, sizeof(t)))
1710 if (t.qset_idx >= SGE_QSETS)
1712 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1713 !in_range(t.cong_thres, 0, 255) ||
1714 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1716 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1718 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1719 MAX_CTRL_TXQ_ENTRIES) ||
1720 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1722 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1723 MAX_RX_JUMBO_BUFFERS)
1724 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1727 if ((adapter->flags & FULL_INIT_DONE) &&
1728 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1729 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1730 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1731 t.polling >= 0 || t.cong_thres >= 0))
1734 q = &adapter->params.sge.qset[t.qset_idx];
1736 if (t.rspq_size >= 0)
1737 q->rspq_size = t.rspq_size;
1738 if (t.fl_size[0] >= 0)
1739 q->fl_size = t.fl_size[0];
1740 if (t.fl_size[1] >= 0)
1741 q->jumbo_size = t.fl_size[1];
1742 if (t.txq_size[0] >= 0)
1743 q->txq_size[0] = t.txq_size[0];
1744 if (t.txq_size[1] >= 0)
1745 q->txq_size[1] = t.txq_size[1];
1746 if (t.txq_size[2] >= 0)
1747 q->txq_size[2] = t.txq_size[2];
1748 if (t.cong_thres >= 0)
1749 q->cong_thres = t.cong_thres;
1750 if (t.intr_lat >= 0) {
1751 struct sge_qset *qs =
1752 &adapter->sge.qs[t.qset_idx];
1754 q->coalesce_usecs = t.intr_lat;
1755 t3_update_qset_coalesce(qs, q);
1757 if (t.polling >= 0) {
1758 if (adapter->flags & USING_MSIX)
1759 q->polling = t.polling;
1761 /* No polling with INTx for T3A */
1762 if (adapter->params.rev == 0 &&
1763 !(adapter->flags & USING_MSI))
1766 for (i = 0; i < SGE_QSETS; i++) {
1767 q = &adapter->params.sge.
1769 q->polling = t.polling;
1775 case CHELSIO_GET_QSET_PARAMS:{
1776 struct qset_params *q;
1777 struct ch_qset_params t;
1779 if (copy_from_user(&t, useraddr, sizeof(t)))
1781 if (t.qset_idx >= SGE_QSETS)
1784 q = &adapter->params.sge.qset[t.qset_idx];
1785 t.rspq_size = q->rspq_size;
1786 t.txq_size[0] = q->txq_size[0];
1787 t.txq_size[1] = q->txq_size[1];
1788 t.txq_size[2] = q->txq_size[2];
1789 t.fl_size[0] = q->fl_size;
1790 t.fl_size[1] = q->jumbo_size;
1791 t.polling = q->polling;
1792 t.intr_lat = q->coalesce_usecs;
1793 t.cong_thres = q->cong_thres;
1795 if (copy_to_user(useraddr, &t, sizeof(t)))
1799 case CHELSIO_SET_QSET_NUM:{
1800 struct ch_reg edata;
1801 struct port_info *pi = netdev_priv(dev);
1802 unsigned int i, first_qset = 0, other_qsets = 0;
1804 if (!capable(CAP_NET_ADMIN))
1806 if (adapter->flags & FULL_INIT_DONE)
1808 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1810 if (edata.val < 1 ||
1811 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1814 for_each_port(adapter, i)
1815 if (adapter->port[i] && adapter->port[i] != dev)
1816 other_qsets += adap2pinfo(adapter, i)->nqsets;
1818 if (edata.val + other_qsets > SGE_QSETS)
1821 pi->nqsets = edata.val;
1823 for_each_port(adapter, i)
1824 if (adapter->port[i]) {
1825 pi = adap2pinfo(adapter, i);
1826 pi->first_qset = first_qset;
1827 first_qset += pi->nqsets;
1831 case CHELSIO_GET_QSET_NUM:{
1832 struct ch_reg edata;
1833 struct port_info *pi = netdev_priv(dev);
1835 edata.cmd = CHELSIO_GET_QSET_NUM;
1836 edata.val = pi->nqsets;
1837 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1841 case CHELSIO_LOAD_FW:{
1843 struct ch_mem_range t;
1845 if (!capable(CAP_NET_ADMIN))
1847 if (copy_from_user(&t, useraddr, sizeof(t)))
1850 fw_data = kmalloc(t.len, GFP_KERNEL);
1855 (fw_data, useraddr + sizeof(t), t.len)) {
1860 ret = t3_load_fw(adapter, fw_data, t.len);
1866 case CHELSIO_SETMTUTAB:{
1870 if (!is_offload(adapter))
1872 if (!capable(CAP_NET_ADMIN))
1874 if (offload_running(adapter))
1876 if (copy_from_user(&m, useraddr, sizeof(m)))
1878 if (m.nmtus != NMTUS)
1880 if (m.mtus[0] < 81) /* accommodate SACK */
1883 /* MTUs must be in ascending order */
1884 for (i = 1; i < NMTUS; ++i)
1885 if (m.mtus[i] < m.mtus[i - 1])
1888 memcpy(adapter->params.mtus, m.mtus,
1889 sizeof(adapter->params.mtus));
1892 case CHELSIO_GET_PM:{
1893 struct tp_params *p = &adapter->params.tp;
1894 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1896 if (!is_offload(adapter))
1898 m.tx_pg_sz = p->tx_pg_size;
1899 m.tx_num_pg = p->tx_num_pgs;
1900 m.rx_pg_sz = p->rx_pg_size;
1901 m.rx_num_pg = p->rx_num_pgs;
1902 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1903 if (copy_to_user(useraddr, &m, sizeof(m)))
1907 case CHELSIO_SET_PM:{
1909 struct tp_params *p = &adapter->params.tp;
1911 if (!is_offload(adapter))
1913 if (!capable(CAP_NET_ADMIN))
1915 if (adapter->flags & FULL_INIT_DONE)
1917 if (copy_from_user(&m, useraddr, sizeof(m)))
1919 if (!is_power_of_2(m.rx_pg_sz) ||
1920 !is_power_of_2(m.tx_pg_sz))
1921 return -EINVAL; /* not power of 2 */
1922 if (!(m.rx_pg_sz & 0x14000))
1923 return -EINVAL; /* not 16KB or 64KB */
1924 if (!(m.tx_pg_sz & 0x1554000))
1926 if (m.tx_num_pg == -1)
1927 m.tx_num_pg = p->tx_num_pgs;
1928 if (m.rx_num_pg == -1)
1929 m.rx_num_pg = p->rx_num_pgs;
1930 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1932 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1933 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1935 p->rx_pg_size = m.rx_pg_sz;
1936 p->tx_pg_size = m.tx_pg_sz;
1937 p->rx_num_pgs = m.rx_num_pg;
1938 p->tx_num_pgs = m.tx_num_pg;
1941 case CHELSIO_GET_MEM:{
1942 struct ch_mem_range t;
1946 if (!is_offload(adapter))
1948 if (!(adapter->flags & FULL_INIT_DONE))
1949 return -EIO; /* need the memory controllers */
1950 if (copy_from_user(&t, useraddr, sizeof(t)))
1952 if ((t.addr & 7) || (t.len & 7))
1954 if (t.mem_id == MEM_CM)
1956 else if (t.mem_id == MEM_PMRX)
1957 mem = &adapter->pmrx;
1958 else if (t.mem_id == MEM_PMTX)
1959 mem = &adapter->pmtx;
1965 * bits 0..9: chip version
1966 * bits 10..15: chip revision
1968 t.version = 3 | (adapter->params.rev << 10);
1969 if (copy_to_user(useraddr, &t, sizeof(t)))
1973 * Read 256 bytes at a time as len can be large and we don't
1974 * want to use huge intermediate buffers.
1976 useraddr += sizeof(t); /* advance to start of buffer */
1978 unsigned int chunk =
1979 min_t(unsigned int, t.len, sizeof(buf));
1982 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1986 if (copy_to_user(useraddr, buf, chunk))
1994 case CHELSIO_SET_TRACE_FILTER:{
1996 const struct trace_params *tp;
1998 if (!capable(CAP_NET_ADMIN))
2000 if (!offload_running(adapter))
2002 if (copy_from_user(&t, useraddr, sizeof(t)))
2005 tp = (const struct trace_params *)&t.sip;
2007 t3_config_trace_filter(adapter, tp, 0,
2011 t3_config_trace_filter(adapter, tp, 1,
2022 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2024 struct mii_ioctl_data *data = if_mii(req);
2025 struct port_info *pi = netdev_priv(dev);
2026 struct adapter *adapter = pi->adapter;
2031 data->phy_id = pi->phy.addr;
2035 struct cphy *phy = &pi->phy;
2037 if (!phy->mdio_read)
2039 if (is_10G(adapter)) {
2040 mmd = data->phy_id >> 8;
2043 else if (mmd > MDIO_DEV_XGXS)
2047 phy->mdio_read(adapter, data->phy_id & 0x1f,
2048 mmd, data->reg_num, &val);
2051 phy->mdio_read(adapter, data->phy_id & 0x1f,
2052 0, data->reg_num & 0x1f,
2055 data->val_out = val;
2059 struct cphy *phy = &pi->phy;
2061 if (!capable(CAP_NET_ADMIN))
2063 if (!phy->mdio_write)
2065 if (is_10G(adapter)) {
2066 mmd = data->phy_id >> 8;
2069 else if (mmd > MDIO_DEV_XGXS)
2073 phy->mdio_write(adapter,
2074 data->phy_id & 0x1f, mmd,
2079 phy->mdio_write(adapter,
2080 data->phy_id & 0x1f, 0,
2081 data->reg_num & 0x1f,
2086 return cxgb_extension_ioctl(dev, req->ifr_data);
2093 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2095 struct port_info *pi = netdev_priv(dev);
2096 struct adapter *adapter = pi->adapter;
2099 if (new_mtu < 81) /* accommodate SACK */
2101 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2104 init_port_mtus(adapter);
2105 if (adapter->params.rev == 0 && offload_running(adapter))
2106 t3_load_mtus(adapter, adapter->params.mtus,
2107 adapter->params.a_wnd, adapter->params.b_wnd,
2108 adapter->port[0]->mtu);
2112 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2114 struct port_info *pi = netdev_priv(dev);
2115 struct adapter *adapter = pi->adapter;
2116 struct sockaddr *addr = p;
2118 if (!is_valid_ether_addr(addr->sa_data))
2121 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2122 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2123 if (offload_running(adapter))
2124 write_smt_entry(adapter, pi->port_id);
2129 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2130 * @adap: the adapter
2133 * Ensures that current Rx processing on any of the queues associated with
2134 * the given port completes before returning. We do this by acquiring and
2135 * releasing the locks of the response queues associated with the port.
2137 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2141 for (i = 0; i < p->nqsets; i++) {
2142 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2144 spin_lock_irq(&q->lock);
2145 spin_unlock_irq(&q->lock);
2149 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2151 struct port_info *pi = netdev_priv(dev);
2152 struct adapter *adapter = pi->adapter;
2155 if (adapter->params.rev > 0)
2156 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2158 /* single control for all ports */
2159 unsigned int i, have_vlans = 0;
2160 for_each_port(adapter, i)
2161 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2163 t3_set_vlan_accel(adapter, 1, have_vlans);
2165 t3_synchronize_rx(adapter, pi);
2168 #ifdef CONFIG_NET_POLL_CONTROLLER
2169 static void cxgb_netpoll(struct net_device *dev)
2171 struct port_info *pi = netdev_priv(dev);
2172 struct adapter *adapter = pi->adapter;
2175 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2176 struct sge_qset *qs = &adapter->sge.qs[qidx];
2179 if (adapter->flags & USING_MSIX)
2184 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2190 * Periodic accumulation of MAC statistics.
2192 static void mac_stats_update(struct adapter *adapter)
2196 for_each_port(adapter, i) {
2197 struct net_device *dev = adapter->port[i];
2198 struct port_info *p = netdev_priv(dev);
2200 if (netif_running(dev)) {
2201 spin_lock(&adapter->stats_lock);
2202 t3_mac_update_stats(&p->mac);
2203 spin_unlock(&adapter->stats_lock);
2208 static void check_link_status(struct adapter *adapter)
2212 for_each_port(adapter, i) {
2213 struct net_device *dev = adapter->port[i];
2214 struct port_info *p = netdev_priv(dev);
2216 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2217 t3_link_changed(adapter, i);
2221 static void check_t3b2_mac(struct adapter *adapter)
2225 if (!rtnl_trylock()) /* synchronize with ifdown */
2228 for_each_port(adapter, i) {
2229 struct net_device *dev = adapter->port[i];
2230 struct port_info *p = netdev_priv(dev);
2233 if (!netif_running(dev))
2237 if (netif_running(dev) && netif_carrier_ok(dev))
2238 status = t3b2_mac_watchdog_task(&p->mac);
2240 p->mac.stats.num_toggled++;
2241 else if (status == 2) {
2242 struct cmac *mac = &p->mac;
2244 t3_mac_set_mtu(mac, dev->mtu);
2245 t3_mac_set_address(mac, 0, dev->dev_addr);
2246 cxgb_set_rxmode(dev);
2247 t3_link_start(&p->phy, mac, &p->link_config);
2248 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2249 t3_port_intr_enable(adapter, p->port_id);
2250 p->mac.stats.num_resets++;
2257 static void t3_adap_check_task(struct work_struct *work)
2259 struct adapter *adapter = container_of(work, struct adapter,
2260 adap_check_task.work);
2261 const struct adapter_params *p = &adapter->params;
2263 adapter->check_task_cnt++;
2265 /* Check link status for PHYs without interrupts */
2266 if (p->linkpoll_period)
2267 check_link_status(adapter);
2269 /* Accumulate MAC stats if needed */
2270 if (!p->linkpoll_period ||
2271 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2272 p->stats_update_period) {
2273 mac_stats_update(adapter);
2274 adapter->check_task_cnt = 0;
2277 if (p->rev == T3_REV_B2)
2278 check_t3b2_mac(adapter);
2280 /* Schedule the next check update if any port is active. */
2281 spin_lock(&adapter->work_lock);
2282 if (adapter->open_device_map & PORT_MASK)
2283 schedule_chk_task(adapter);
2284 spin_unlock(&adapter->work_lock);
2288 * Processes external (PHY) interrupts in process context.
2290 static void ext_intr_task(struct work_struct *work)
2292 struct adapter *adapter = container_of(work, struct adapter,
2293 ext_intr_handler_task);
2295 t3_phy_intr_handler(adapter);
2297 /* Now reenable external interrupts */
2298 spin_lock_irq(&adapter->work_lock);
2299 if (adapter->slow_intr_mask) {
2300 adapter->slow_intr_mask |= F_T3DBG;
2301 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2302 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2303 adapter->slow_intr_mask);
2305 spin_unlock_irq(&adapter->work_lock);
2309 * Interrupt-context handler for external (PHY) interrupts.
2311 void t3_os_ext_intr_handler(struct adapter *adapter)
2314 * Schedule a task to handle external interrupts as they may be slow
2315 * and we use a mutex to protect MDIO registers. We disable PHY
2316 * interrupts in the meantime and let the task reenable them when
2319 spin_lock(&adapter->work_lock);
2320 if (adapter->slow_intr_mask) {
2321 adapter->slow_intr_mask &= ~F_T3DBG;
2322 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2323 adapter->slow_intr_mask);
2324 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2326 spin_unlock(&adapter->work_lock);
2329 void t3_fatal_err(struct adapter *adapter)
2331 unsigned int fw_status[4];
2333 if (adapter->flags & FULL_INIT_DONE) {
2334 t3_sge_stop(adapter);
2335 t3_intr_disable(adapter);
2337 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2338 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2339 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2340 fw_status[0], fw_status[1],
2341 fw_status[2], fw_status[3]);
2345 static int __devinit cxgb_enable_msix(struct adapter *adap)
2347 struct msix_entry entries[SGE_QSETS + 1];
2350 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2351 entries[i].entry = i;
2353 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2355 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2356 adap->msix_info[i].vec = entries[i].vector;
2358 dev_info(&adap->pdev->dev,
2359 "only %d MSI-X vectors left, not using MSI-X\n", err);
2363 static void __devinit print_port_info(struct adapter *adap,
2364 const struct adapter_info *ai)
2366 static const char *pci_variant[] = {
2367 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2374 snprintf(buf, sizeof(buf), "%s x%d",
2375 pci_variant[adap->params.pci.variant],
2376 adap->params.pci.width);
2378 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2379 pci_variant[adap->params.pci.variant],
2380 adap->params.pci.speed, adap->params.pci.width);
2382 for_each_port(adap, i) {
2383 struct net_device *dev = adap->port[i];
2384 const struct port_info *pi = netdev_priv(dev);
2386 if (!test_bit(i, &adap->registered_device_map))
2388 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2389 dev->name, ai->desc, pi->port_type->desc,
2390 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2391 (adap->flags & USING_MSIX) ? " MSI-X" :
2392 (adap->flags & USING_MSI) ? " MSI" : "");
2393 if (adap->name == dev->name && adap->params.vpd.mclk)
2394 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2395 adap->name, t3_mc7_size(&adap->cm) >> 20,
2396 t3_mc7_size(&adap->pmtx) >> 20,
2397 t3_mc7_size(&adap->pmrx) >> 20);
2401 static int __devinit init_one(struct pci_dev *pdev,
2402 const struct pci_device_id *ent)
2404 static int version_printed;
2406 int i, err, pci_using_dac = 0;
2407 unsigned long mmio_start, mmio_len;
2408 const struct adapter_info *ai;
2409 struct adapter *adapter = NULL;
2410 struct port_info *pi;
2412 if (!version_printed) {
2413 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2418 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2420 printk(KERN_ERR DRV_NAME
2421 ": cannot initialize work queue\n");
2426 err = pci_request_regions(pdev, DRV_NAME);
2428 /* Just info, some other driver may have claimed the device. */
2429 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2433 err = pci_enable_device(pdev);
2435 dev_err(&pdev->dev, "cannot enable PCI device\n");
2436 goto out_release_regions;
2439 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2441 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2443 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2444 "coherent allocations\n");
2445 goto out_disable_device;
2447 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2448 dev_err(&pdev->dev, "no usable DMA configuration\n");
2449 goto out_disable_device;
2452 pci_set_master(pdev);
2454 mmio_start = pci_resource_start(pdev, 0);
2455 mmio_len = pci_resource_len(pdev, 0);
2456 ai = t3_get_adapter_info(ent->driver_data);
2458 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2461 goto out_disable_device;
2464 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2465 if (!adapter->regs) {
2466 dev_err(&pdev->dev, "cannot map device registers\n");
2468 goto out_free_adapter;
2471 adapter->pdev = pdev;
2472 adapter->name = pci_name(pdev);
2473 adapter->msg_enable = dflt_msg_enable;
2474 adapter->mmio_len = mmio_len;
2476 mutex_init(&adapter->mdio_lock);
2477 spin_lock_init(&adapter->work_lock);
2478 spin_lock_init(&adapter->stats_lock);
2480 INIT_LIST_HEAD(&adapter->adapter_list);
2481 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2482 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2484 for (i = 0; i < ai->nports; ++i) {
2485 struct net_device *netdev;
2487 netdev = alloc_etherdev(sizeof(struct port_info));
2493 SET_MODULE_OWNER(netdev);
2494 SET_NETDEV_DEV(netdev, &pdev->dev);
2496 adapter->port[i] = netdev;
2497 pi = netdev_priv(netdev);
2498 pi->adapter = adapter;
2499 pi->rx_csum_offload = 1;
2504 netif_carrier_off(netdev);
2505 netdev->irq = pdev->irq;
2506 netdev->mem_start = mmio_start;
2507 netdev->mem_end = mmio_start + mmio_len - 1;
2508 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2509 netdev->features |= NETIF_F_LLTX;
2511 netdev->features |= NETIF_F_HIGHDMA;
2513 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2514 netdev->vlan_rx_register = vlan_rx_register;
2516 netdev->open = cxgb_open;
2517 netdev->stop = cxgb_close;
2518 netdev->hard_start_xmit = t3_eth_xmit;
2519 netdev->get_stats = cxgb_get_stats;
2520 netdev->set_multicast_list = cxgb_set_rxmode;
2521 netdev->do_ioctl = cxgb_ioctl;
2522 netdev->change_mtu = cxgb_change_mtu;
2523 netdev->set_mac_address = cxgb_set_mac_addr;
2524 #ifdef CONFIG_NET_POLL_CONTROLLER
2525 netdev->poll_controller = cxgb_netpoll;
2527 netdev->weight = 64;
2529 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2532 pci_set_drvdata(pdev, adapter);
2533 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2539 * The card is now ready to go. If any errors occur during device
2540 * registration we do not fail the whole card but rather proceed only
2541 * with the ports we manage to register successfully. However we must
2542 * register at least one net device.
2544 for_each_port(adapter, i) {
2545 err = register_netdev(adapter->port[i]);
2547 dev_warn(&pdev->dev,
2548 "cannot register net device %s, skipping\n",
2549 adapter->port[i]->name);
2552 * Change the name we use for messages to the name of
2553 * the first successfully registered interface.
2555 if (!adapter->registered_device_map)
2556 adapter->name = adapter->port[i]->name;
2558 __set_bit(i, &adapter->registered_device_map);
2561 if (!adapter->registered_device_map) {
2562 dev_err(&pdev->dev, "could not register any net devices\n");
2566 /* Driver's ready. Reflect it on LEDs */
2567 t3_led_ready(adapter);
2569 if (is_offload(adapter)) {
2570 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2571 cxgb3_adapter_ofld(adapter);
2574 /* See what interrupts we'll be using */
2575 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2576 adapter->flags |= USING_MSIX;
2577 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2578 adapter->flags |= USING_MSI;
2580 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2583 print_port_info(adapter, ai);
2587 iounmap(adapter->regs);
2588 for (i = ai->nports - 1; i >= 0; --i)
2589 if (adapter->port[i])
2590 free_netdev(adapter->port[i]);
2596 pci_disable_device(pdev);
2597 out_release_regions:
2598 pci_release_regions(pdev);
2599 pci_set_drvdata(pdev, NULL);
2603 static void __devexit remove_one(struct pci_dev *pdev)
2605 struct adapter *adapter = pci_get_drvdata(pdev);
2610 t3_sge_stop(adapter);
2611 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2614 for_each_port(adapter, i)
2615 if (test_bit(i, &adapter->registered_device_map))
2616 unregister_netdev(adapter->port[i]);
2618 if (is_offload(adapter)) {
2619 cxgb3_adapter_unofld(adapter);
2620 if (test_bit(OFFLOAD_DEVMAP_BIT,
2621 &adapter->open_device_map))
2622 offload_close(&adapter->tdev);
2625 t3_free_sge_resources(adapter);
2626 cxgb_disable_msi(adapter);
2628 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2629 if (adapter->dummy_netdev[i]) {
2630 free_netdev(adapter->dummy_netdev[i]);
2631 adapter->dummy_netdev[i] = NULL;
2634 for_each_port(adapter, i)
2635 if (adapter->port[i])
2636 free_netdev(adapter->port[i]);
2638 iounmap(adapter->regs);
2640 pci_release_regions(pdev);
2641 pci_disable_device(pdev);
2642 pci_set_drvdata(pdev, NULL);
2646 static struct pci_driver driver = {
2648 .id_table = cxgb3_pci_tbl,
2650 .remove = __devexit_p(remove_one),
2653 static int __init cxgb3_init_module(void)
2657 cxgb3_offload_init();
2659 ret = pci_register_driver(&driver);
2663 static void __exit cxgb3_cleanup_module(void)
2665 pci_unregister_driver(&driver);
2667 destroy_workqueue(cxgb3_wq);
2670 module_init(cxgb3_init_module);
2671 module_exit(cxgb3_cleanup_module);