2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, ssid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 1, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1, 1), /* T302E */
85 CH_DEVICE(0x22, 1, 2), /* T310E */
86 CH_DEVICE(0x23, 1, 3), /* T320X */
87 CH_DEVICE(0x24, 1, 1), /* T302X */
88 CH_DEVICE(0x25, 1, 3), /* T320E */
89 CH_DEVICE(0x26, 1, 2), /* T310X */
90 CH_DEVICE(0x30, 1, 2), /* T3B10 */
91 CH_DEVICE(0x31, 1, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1, 1), /* T3B02 */
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
126 static int ofld_disable = 0;
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
139 static struct workqueue_struct *cxgb3_wq;
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
145 * Shows the link status, speed, and duplex of a port.
147 static void link_report(struct net_device *dev)
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
155 switch (p->link_config.speed) {
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
188 struct net_device *dev = adapter->port[port_id];
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
196 if (link_stat != netif_carrier_ok(dev)) {
198 t3_mac_enable(mac, MAC_DIRECTION_RX);
199 netif_carrier_on(dev);
201 netif_carrier_off(dev);
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
211 static void cxgb_set_rxmode(struct net_device *dev)
213 struct t3_rx_mode rm;
214 struct port_info *pi = netdev_priv(dev);
216 init_rx_mode(&rm, dev, dev->mc_list);
217 t3_mac_set_rx_mode(&pi->mac, &rm);
221 * link_start - enable a port
222 * @dev: the device to enable
224 * Performs the MAC and PHY actions needed to enable a port.
226 static void link_start(struct net_device *dev)
228 struct t3_rx_mode rm;
229 struct port_info *pi = netdev_priv(dev);
230 struct cmac *mac = &pi->mac;
232 init_rx_mode(&rm, dev, dev->mc_list);
234 t3_mac_set_mtu(mac, dev->mtu);
235 t3_mac_set_address(mac, 0, dev->dev_addr);
236 t3_mac_set_rx_mode(mac, &rm);
237 t3_link_start(&pi->phy, mac, &pi->link_config);
238 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
241 static inline void cxgb_disable_msi(struct adapter *adapter)
243 if (adapter->flags & USING_MSIX) {
244 pci_disable_msix(adapter->pdev);
245 adapter->flags &= ~USING_MSIX;
246 } else if (adapter->flags & USING_MSI) {
247 pci_disable_msi(adapter->pdev);
248 adapter->flags &= ~USING_MSI;
253 * Interrupt handler for asynchronous events used with MSI-X.
255 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
257 t3_slow_intr_handler(cookie);
262 * Name the MSI-X interrupts.
264 static void name_msix_vecs(struct adapter *adap)
266 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
268 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269 adap->msix_info[0].desc[n] = 0;
271 for_each_port(adap, j) {
272 struct net_device *d = adap->port[j];
273 const struct port_info *pi = netdev_priv(d);
275 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276 snprintf(adap->msix_info[msi_idx].desc, n,
277 "%s (queue %d)", d->name, i);
278 adap->msix_info[msi_idx].desc[n] = 0;
283 static int request_msix_data_irqs(struct adapter *adap)
285 int i, j, err, qidx = 0;
287 for_each_port(adap, i) {
288 int nqsets = adap2pinfo(adap, i)->nqsets;
290 for (j = 0; j < nqsets; ++j) {
291 err = request_irq(adap->msix_info[qidx + 1].vec,
292 t3_intr_handler(adap,
295 adap->msix_info[qidx + 1].desc,
296 &adap->sge.qs[qidx]);
299 free_irq(adap->msix_info[qidx + 1].vec,
300 &adap->sge.qs[qidx]);
310 * setup_rss - configure RSS
313 * Sets up RSS to distribute packets to multiple receive queues. We
314 * configure the RSS CPU lookup table to distribute to the number of HW
315 * receive queues, and the response queue lookup table to narrow that
316 * down to the response queues actually configured for each port.
317 * We always configure the RSS mapping for two ports since the mapping
318 * table has plenty of entries.
320 static void setup_rss(struct adapter *adap)
323 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
324 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
325 u8 cpus[SGE_QSETS + 1];
326 u16 rspq_map[RSS_TABLE_SIZE];
328 for (i = 0; i < SGE_QSETS; ++i)
330 cpus[SGE_QSETS] = 0xff; /* terminator */
332 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
333 rspq_map[i] = i % nq0;
334 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
337 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
338 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
339 V_RRCPLCPUSIZE(6), cpus, rspq_map);
342 static void init_napi(struct adapter *adap)
346 for (i = 0; i < SGE_QSETS; i++) {
347 struct sge_qset *qs = &adap->sge.qs[i];
350 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
356 * Wait until all NAPI handlers are descheduled. This includes the handlers of
357 * both netdevices representing interfaces and the dummy ones for the extra
360 static void quiesce_rx(struct adapter *adap)
364 for (i = 0; i < SGE_QSETS; i++)
365 if (adap->sge.qs[i].adap)
366 napi_disable(&adap->sge.qs[i].napi);
369 static void enable_all_napi(struct adapter *adap)
372 for (i = 0; i < SGE_QSETS; i++)
373 if (adap->sge.qs[i].adap)
374 napi_enable(&adap->sge.qs[i].napi);
378 * setup_sge_qsets - configure SGE Tx/Rx/response queues
381 * Determines how many sets of SGE queues to use and initializes them.
382 * We support multiple queue sets per port if we have MSI-X, otherwise
383 * just one queue set per port.
385 static int setup_sge_qsets(struct adapter *adap)
387 int i, j, err, irq_idx = 0, qset_idx = 0;
388 unsigned int ntxq = SGE_TXQ_PER_SET;
390 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
393 for_each_port(adap, i) {
394 struct net_device *dev = adap->port[i];
395 struct port_info *pi = netdev_priv(dev);
397 pi->qs = &adap->sge.qs[pi->first_qset];
398 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
399 err = t3_sge_alloc_qset(adap, qset_idx, 1,
400 (adap->flags & USING_MSIX) ? qset_idx + 1 :
402 &adap->params.sge.qset[qset_idx], ntxq, dev);
404 t3_free_sge_resources(adap);
413 static ssize_t attr_show(struct device *d, struct device_attribute *attr,
415 ssize_t(*format) (struct net_device *, char *))
419 /* Synchronize with ioctls that may shut down the device */
421 len = (*format) (to_net_dev(d), buf);
426 static ssize_t attr_store(struct device *d, struct device_attribute *attr,
427 const char *buf, size_t len,
428 ssize_t(*set) (struct net_device *, unsigned int),
429 unsigned int min_val, unsigned int max_val)
435 if (!capable(CAP_NET_ADMIN))
438 val = simple_strtoul(buf, &endp, 0);
439 if (endp == buf || val < min_val || val > max_val)
443 ret = (*set) (to_net_dev(d), val);
450 #define CXGB3_SHOW(name, val_expr) \
451 static ssize_t format_##name(struct net_device *dev, char *buf) \
453 struct port_info *pi = netdev_priv(dev); \
454 struct adapter *adap = pi->adapter; \
455 return sprintf(buf, "%u\n", val_expr); \
457 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
460 return attr_show(d, attr, buf, format_##name); \
463 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
465 struct port_info *pi = netdev_priv(dev);
466 struct adapter *adap = pi->adapter;
467 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
469 if (adap->flags & FULL_INIT_DONE)
471 if (val && adap->params.rev == 0)
473 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
476 adap->params.mc5.nfilters = val;
480 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
481 const char *buf, size_t len)
483 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
486 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
488 struct port_info *pi = netdev_priv(dev);
489 struct adapter *adap = pi->adapter;
491 if (adap->flags & FULL_INIT_DONE)
493 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
496 adap->params.mc5.nservers = val;
500 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
501 const char *buf, size_t len)
503 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
506 #define CXGB3_ATTR_R(name, val_expr) \
507 CXGB3_SHOW(name, val_expr) \
508 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
510 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
511 CXGB3_SHOW(name, val_expr) \
512 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
514 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
515 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
516 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
518 static struct attribute *cxgb3_attrs[] = {
519 &dev_attr_cam_size.attr,
520 &dev_attr_nfilters.attr,
521 &dev_attr_nservers.attr,
525 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
527 static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
528 char *buf, int sched)
530 struct port_info *pi = netdev_priv(to_net_dev(d));
531 struct adapter *adap = pi->adapter;
532 unsigned int v, addr, bpt, cpt;
535 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
537 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
538 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
541 bpt = (v >> 8) & 0xff;
544 len = sprintf(buf, "disabled\n");
546 v = (adap->params.vpd.cclk * 1000) / cpt;
547 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
553 static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
554 const char *buf, size_t len, int sched)
556 struct port_info *pi = netdev_priv(to_net_dev(d));
557 struct adapter *adap = pi->adapter;
562 if (!capable(CAP_NET_ADMIN))
565 val = simple_strtoul(buf, &endp, 0);
566 if (endp == buf || val > 10000000)
570 ret = t3_config_sched(adap, val, sched);
577 #define TM_ATTR(name, sched) \
578 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
581 return tm_attr_show(d, attr, buf, sched); \
583 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
584 const char *buf, size_t len) \
586 return tm_attr_store(d, attr, buf, len, sched); \
588 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
599 static struct attribute *offload_attrs[] = {
600 &dev_attr_sched0.attr,
601 &dev_attr_sched1.attr,
602 &dev_attr_sched2.attr,
603 &dev_attr_sched3.attr,
604 &dev_attr_sched4.attr,
605 &dev_attr_sched5.attr,
606 &dev_attr_sched6.attr,
607 &dev_attr_sched7.attr,
611 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
614 * Sends an sk_buff to an offload queue driver
615 * after dealing with any active network taps.
617 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
622 ret = t3_offload_tx(tdev, skb);
627 static int write_smt_entry(struct adapter *adapter, int idx)
629 struct cpl_smt_write_req *req;
630 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
635 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
636 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
637 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
638 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
640 memset(req->src_mac1, 0, sizeof(req->src_mac1));
641 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
643 offload_tx(&adapter->tdev, skb);
647 static int init_smt(struct adapter *adapter)
651 for_each_port(adapter, i)
652 write_smt_entry(adapter, i);
656 static void init_port_mtus(struct adapter *adapter)
658 unsigned int mtus = adapter->port[0]->mtu;
660 if (adapter->port[1])
661 mtus |= adapter->port[1]->mtu << 16;
662 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
665 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
669 struct mngt_pktsched_wr *req;
671 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
672 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
673 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
674 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
680 t3_mgmt_tx(adap, skb);
683 static void bind_qsets(struct adapter *adap)
687 for_each_port(adap, i) {
688 const struct port_info *pi = adap2pinfo(adap, i);
690 for (j = 0; j < pi->nqsets; ++j)
691 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
696 #define FW_FNAME "t3fw-%d.%d.%d.bin"
697 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
699 static int upgrade_fw(struct adapter *adap)
703 const struct firmware *fw;
704 struct device *dev = &adap->pdev->dev;
706 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
707 FW_VERSION_MINOR, FW_VERSION_MICRO);
708 ret = request_firmware(&fw, buf, dev);
710 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
714 ret = t3_load_fw(adap, fw->data, fw->size);
715 release_firmware(fw);
718 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
719 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
721 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
722 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
727 static inline char t3rev2char(struct adapter *adapter)
731 switch(adapter->params.rev) {
740 int update_tpsram(struct adapter *adap)
742 const struct firmware *tpsram;
744 struct device *dev = &adap->pdev->dev;
748 rev = t3rev2char(adap);
752 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
753 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
755 ret = request_firmware(&tpsram, buf, dev);
757 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
762 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
766 ret = t3_set_proto_sram(adap, tpsram->data);
769 "successful update of protocol engine "
771 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
773 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
774 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
776 dev_err(dev, "loading protocol SRAM failed\n");
779 release_firmware(tpsram);
785 * cxgb_up - enable the adapter
786 * @adapter: adapter being enabled
788 * Called when the first port is enabled, this function performs the
789 * actions necessary to make an adapter operational, such as completing
790 * the initialization of HW modules, and enabling interrupts.
792 * Must be called with the rtnl lock held.
794 static int cxgb_up(struct adapter *adap)
799 if (!(adap->flags & FULL_INIT_DONE)) {
800 err = t3_check_fw_version(adap, &must_load);
801 if (err == -EINVAL) {
802 err = upgrade_fw(adap);
803 if (err && must_load)
807 err = t3_check_tpsram_version(adap, &must_load);
808 if (err == -EINVAL) {
809 err = update_tpsram(adap);
810 if (err && must_load)
814 err = t3_init_hw(adap, 0);
818 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
820 err = setup_sge_qsets(adap);
826 adap->flags |= FULL_INIT_DONE;
831 if (adap->flags & USING_MSIX) {
832 name_msix_vecs(adap);
833 err = request_irq(adap->msix_info[0].vec,
834 t3_async_intr_handler, 0,
835 adap->msix_info[0].desc, adap);
839 if (request_msix_data_irqs(adap)) {
840 free_irq(adap->msix_info[0].vec, adap);
843 } else if ((err = request_irq(adap->pdev->irq,
844 t3_intr_handler(adap,
845 adap->sge.qs[0].rspq.
847 (adap->flags & USING_MSI) ?
852 enable_all_napi(adap);
854 t3_intr_enable(adap);
856 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
858 adap->flags |= QUEUES_BOUND;
863 CH_ERR(adap, "request_irq failed, err %d\n", err);
868 * Release resources when all the ports and offloading have been stopped.
870 static void cxgb_down(struct adapter *adapter)
872 t3_sge_stop(adapter);
873 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
874 t3_intr_disable(adapter);
875 spin_unlock_irq(&adapter->work_lock);
877 if (adapter->flags & USING_MSIX) {
880 free_irq(adapter->msix_info[0].vec, adapter);
881 for_each_port(adapter, i)
882 n += adap2pinfo(adapter, i)->nqsets;
884 for (i = 0; i < n; ++i)
885 free_irq(adapter->msix_info[i + 1].vec,
886 &adapter->sge.qs[i]);
888 free_irq(adapter->pdev->irq, adapter);
890 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
894 static void schedule_chk_task(struct adapter *adap)
898 timeo = adap->params.linkpoll_period ?
899 (HZ * adap->params.linkpoll_period) / 10 :
900 adap->params.stats_update_period * HZ;
902 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
905 static int offload_open(struct net_device *dev)
907 struct port_info *pi = netdev_priv(dev);
908 struct adapter *adapter = pi->adapter;
909 struct t3cdev *tdev = dev2t3cdev(dev);
910 int adap_up = adapter->open_device_map & PORT_MASK;
913 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
916 if (!adap_up && (err = cxgb_up(adapter)) < 0)
919 t3_tp_set_offload_mode(adapter, 1);
920 tdev->lldev = adapter->port[0];
921 err = cxgb3_offload_activate(adapter);
925 init_port_mtus(adapter);
926 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
927 adapter->params.b_wnd,
928 adapter->params.rev == 0 ?
929 adapter->port[0]->mtu : 0xffff);
932 /* Never mind if the next step fails */
933 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
935 /* Call back all registered clients */
936 cxgb3_add_clients(tdev);
939 /* restore them in case the offload module has changed them */
941 t3_tp_set_offload_mode(adapter, 0);
942 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
943 cxgb3_set_dummy_ops(tdev);
948 static int offload_close(struct t3cdev *tdev)
950 struct adapter *adapter = tdev2adap(tdev);
952 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
955 /* Call back all registered clients */
956 cxgb3_remove_clients(tdev);
958 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
961 cxgb3_set_dummy_ops(tdev);
962 t3_tp_set_offload_mode(adapter, 0);
963 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
965 if (!adapter->open_device_map)
968 cxgb3_offload_deactivate(adapter);
972 static int cxgb_open(struct net_device *dev)
974 struct port_info *pi = netdev_priv(dev);
975 struct adapter *adapter = pi->adapter;
976 int other_ports = adapter->open_device_map & PORT_MASK;
979 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
984 set_bit(pi->port_id, &adapter->open_device_map);
985 if (is_offload(adapter) && !ofld_disable) {
986 err = offload_open(dev);
989 "Could not initialize offload capabilities\n");
993 t3_port_intr_enable(adapter, pi->port_id);
994 netif_start_queue(dev);
996 schedule_chk_task(adapter);
1001 static int cxgb_close(struct net_device *dev)
1003 struct port_info *pi = netdev_priv(dev);
1004 struct adapter *adapter = pi->adapter;
1006 t3_port_intr_disable(adapter, pi->port_id);
1007 netif_stop_queue(dev);
1008 pi->phy.ops->power_down(&pi->phy, 1);
1009 netif_carrier_off(dev);
1010 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1012 spin_lock(&adapter->work_lock); /* sync with update task */
1013 clear_bit(pi->port_id, &adapter->open_device_map);
1014 spin_unlock(&adapter->work_lock);
1016 if (!(adapter->open_device_map & PORT_MASK))
1017 cancel_rearming_delayed_workqueue(cxgb3_wq,
1018 &adapter->adap_check_task);
1020 if (!adapter->open_device_map)
1026 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1028 struct port_info *pi = netdev_priv(dev);
1029 struct adapter *adapter = pi->adapter;
1030 struct net_device_stats *ns = &pi->netstats;
1031 const struct mac_stats *pstats;
1033 spin_lock(&adapter->stats_lock);
1034 pstats = t3_mac_update_stats(&pi->mac);
1035 spin_unlock(&adapter->stats_lock);
1037 ns->tx_bytes = pstats->tx_octets;
1038 ns->tx_packets = pstats->tx_frames;
1039 ns->rx_bytes = pstats->rx_octets;
1040 ns->rx_packets = pstats->rx_frames;
1041 ns->multicast = pstats->rx_mcast_frames;
1043 ns->tx_errors = pstats->tx_underrun;
1044 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1045 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1046 pstats->rx_fifo_ovfl;
1048 /* detailed rx_errors */
1049 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1050 ns->rx_over_errors = 0;
1051 ns->rx_crc_errors = pstats->rx_fcs_errs;
1052 ns->rx_frame_errors = pstats->rx_symbol_errs;
1053 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1054 ns->rx_missed_errors = pstats->rx_cong_drops;
1056 /* detailed tx_errors */
1057 ns->tx_aborted_errors = 0;
1058 ns->tx_carrier_errors = 0;
1059 ns->tx_fifo_errors = pstats->tx_underrun;
1060 ns->tx_heartbeat_errors = 0;
1061 ns->tx_window_errors = 0;
1065 static u32 get_msglevel(struct net_device *dev)
1067 struct port_info *pi = netdev_priv(dev);
1068 struct adapter *adapter = pi->adapter;
1070 return adapter->msg_enable;
1073 static void set_msglevel(struct net_device *dev, u32 val)
1075 struct port_info *pi = netdev_priv(dev);
1076 struct adapter *adapter = pi->adapter;
1078 adapter->msg_enable = val;
1081 static char stats_strings[][ETH_GSTRING_LEN] = {
1084 "TxMulticastFramesOK",
1085 "TxBroadcastFramesOK",
1092 "TxFrames128To255 ",
1093 "TxFrames256To511 ",
1094 "TxFrames512To1023 ",
1095 "TxFrames1024To1518 ",
1096 "TxFrames1519ToMax ",
1100 "RxMulticastFramesOK",
1101 "RxBroadcastFramesOK",
1112 "RxFrames128To255 ",
1113 "RxFrames256To511 ",
1114 "RxFrames512To1023 ",
1115 "RxFrames1024To1518 ",
1116 "RxFrames1519ToMax ",
1126 "CheckTXEnToggled ",
1131 static int get_stats_count(struct net_device *dev)
1133 return ARRAY_SIZE(stats_strings);
1136 #define T3_REGMAP_SIZE (3 * 1024)
1138 static int get_regs_len(struct net_device *dev)
1140 return T3_REGMAP_SIZE;
1143 static int get_eeprom_len(struct net_device *dev)
1148 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1150 struct port_info *pi = netdev_priv(dev);
1151 struct adapter *adapter = pi->adapter;
1155 t3_get_fw_version(adapter, &fw_vers);
1156 t3_get_tp_version(adapter, &tp_vers);
1158 strcpy(info->driver, DRV_NAME);
1159 strcpy(info->version, DRV_VERSION);
1160 strcpy(info->bus_info, pci_name(adapter->pdev));
1162 strcpy(info->fw_version, "N/A");
1164 snprintf(info->fw_version, sizeof(info->fw_version),
1165 "%s %u.%u.%u TP %u.%u.%u",
1166 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1167 G_FW_VERSION_MAJOR(fw_vers),
1168 G_FW_VERSION_MINOR(fw_vers),
1169 G_FW_VERSION_MICRO(fw_vers),
1170 G_TP_VERSION_MAJOR(tp_vers),
1171 G_TP_VERSION_MINOR(tp_vers),
1172 G_TP_VERSION_MICRO(tp_vers));
1176 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1178 if (stringset == ETH_SS_STATS)
1179 memcpy(data, stats_strings, sizeof(stats_strings));
1182 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1183 struct port_info *p, int idx)
1186 unsigned long tot = 0;
1188 for (i = 0; i < p->nqsets; ++i)
1189 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1193 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1196 struct port_info *pi = netdev_priv(dev);
1197 struct adapter *adapter = pi->adapter;
1198 const struct mac_stats *s;
1200 spin_lock(&adapter->stats_lock);
1201 s = t3_mac_update_stats(&pi->mac);
1202 spin_unlock(&adapter->stats_lock);
1204 *data++ = s->tx_octets;
1205 *data++ = s->tx_frames;
1206 *data++ = s->tx_mcast_frames;
1207 *data++ = s->tx_bcast_frames;
1208 *data++ = s->tx_pause;
1209 *data++ = s->tx_underrun;
1210 *data++ = s->tx_fifo_urun;
1212 *data++ = s->tx_frames_64;
1213 *data++ = s->tx_frames_65_127;
1214 *data++ = s->tx_frames_128_255;
1215 *data++ = s->tx_frames_256_511;
1216 *data++ = s->tx_frames_512_1023;
1217 *data++ = s->tx_frames_1024_1518;
1218 *data++ = s->tx_frames_1519_max;
1220 *data++ = s->rx_octets;
1221 *data++ = s->rx_frames;
1222 *data++ = s->rx_mcast_frames;
1223 *data++ = s->rx_bcast_frames;
1224 *data++ = s->rx_pause;
1225 *data++ = s->rx_fcs_errs;
1226 *data++ = s->rx_symbol_errs;
1227 *data++ = s->rx_short;
1228 *data++ = s->rx_jabber;
1229 *data++ = s->rx_too_long;
1230 *data++ = s->rx_fifo_ovfl;
1232 *data++ = s->rx_frames_64;
1233 *data++ = s->rx_frames_65_127;
1234 *data++ = s->rx_frames_128_255;
1235 *data++ = s->rx_frames_256_511;
1236 *data++ = s->rx_frames_512_1023;
1237 *data++ = s->rx_frames_1024_1518;
1238 *data++ = s->rx_frames_1519_max;
1240 *data++ = pi->phy.fifo_errors;
1242 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1243 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1244 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1245 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1246 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1247 *data++ = s->rx_cong_drops;
1249 *data++ = s->num_toggled;
1250 *data++ = s->num_resets;
1253 static inline void reg_block_dump(struct adapter *ap, void *buf,
1254 unsigned int start, unsigned int end)
1256 u32 *p = buf + start;
1258 for (; start <= end; start += sizeof(u32))
1259 *p++ = t3_read_reg(ap, start);
1262 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1265 struct port_info *pi = netdev_priv(dev);
1266 struct adapter *ap = pi->adapter;
1270 * bits 0..9: chip version
1271 * bits 10..15: chip revision
1272 * bit 31: set for PCIe cards
1274 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1277 * We skip the MAC statistics registers because they are clear-on-read.
1278 * Also reading multi-register stats would need to synchronize with the
1279 * periodic mac stats accumulation. Hard to justify the complexity.
1281 memset(buf, 0, T3_REGMAP_SIZE);
1282 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1283 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1284 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1285 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1286 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1287 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1288 XGM_REG(A_XGM_SERDES_STAT3, 1));
1289 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1290 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1293 static int restart_autoneg(struct net_device *dev)
1295 struct port_info *p = netdev_priv(dev);
1297 if (!netif_running(dev))
1299 if (p->link_config.autoneg != AUTONEG_ENABLE)
1301 p->phy.ops->autoneg_restart(&p->phy);
1305 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1307 struct port_info *pi = netdev_priv(dev);
1308 struct adapter *adapter = pi->adapter;
1314 for (i = 0; i < data * 2; i++) {
1315 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1316 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1317 if (msleep_interruptible(500))
1320 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1325 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1327 struct port_info *p = netdev_priv(dev);
1329 cmd->supported = p->link_config.supported;
1330 cmd->advertising = p->link_config.advertising;
1332 if (netif_carrier_ok(dev)) {
1333 cmd->speed = p->link_config.speed;
1334 cmd->duplex = p->link_config.duplex;
1340 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1341 cmd->phy_address = p->phy.addr;
1342 cmd->transceiver = XCVR_EXTERNAL;
1343 cmd->autoneg = p->link_config.autoneg;
1349 static int speed_duplex_to_caps(int speed, int duplex)
1355 if (duplex == DUPLEX_FULL)
1356 cap = SUPPORTED_10baseT_Full;
1358 cap = SUPPORTED_10baseT_Half;
1361 if (duplex == DUPLEX_FULL)
1362 cap = SUPPORTED_100baseT_Full;
1364 cap = SUPPORTED_100baseT_Half;
1367 if (duplex == DUPLEX_FULL)
1368 cap = SUPPORTED_1000baseT_Full;
1370 cap = SUPPORTED_1000baseT_Half;
1373 if (duplex == DUPLEX_FULL)
1374 cap = SUPPORTED_10000baseT_Full;
1379 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1380 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1381 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1382 ADVERTISED_10000baseT_Full)
1384 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1386 struct port_info *p = netdev_priv(dev);
1387 struct link_config *lc = &p->link_config;
1389 if (!(lc->supported & SUPPORTED_Autoneg))
1390 return -EOPNOTSUPP; /* can't change speed/duplex */
1392 if (cmd->autoneg == AUTONEG_DISABLE) {
1393 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1395 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1397 lc->requested_speed = cmd->speed;
1398 lc->requested_duplex = cmd->duplex;
1399 lc->advertising = 0;
1401 cmd->advertising &= ADVERTISED_MASK;
1402 cmd->advertising &= lc->supported;
1403 if (!cmd->advertising)
1405 lc->requested_speed = SPEED_INVALID;
1406 lc->requested_duplex = DUPLEX_INVALID;
1407 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1409 lc->autoneg = cmd->autoneg;
1410 if (netif_running(dev))
1411 t3_link_start(&p->phy, &p->mac, lc);
1415 static void get_pauseparam(struct net_device *dev,
1416 struct ethtool_pauseparam *epause)
1418 struct port_info *p = netdev_priv(dev);
1420 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1421 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1422 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1425 static int set_pauseparam(struct net_device *dev,
1426 struct ethtool_pauseparam *epause)
1428 struct port_info *p = netdev_priv(dev);
1429 struct link_config *lc = &p->link_config;
1431 if (epause->autoneg == AUTONEG_DISABLE)
1432 lc->requested_fc = 0;
1433 else if (lc->supported & SUPPORTED_Autoneg)
1434 lc->requested_fc = PAUSE_AUTONEG;
1438 if (epause->rx_pause)
1439 lc->requested_fc |= PAUSE_RX;
1440 if (epause->tx_pause)
1441 lc->requested_fc |= PAUSE_TX;
1442 if (lc->autoneg == AUTONEG_ENABLE) {
1443 if (netif_running(dev))
1444 t3_link_start(&p->phy, &p->mac, lc);
1446 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1447 if (netif_running(dev))
1448 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1453 static u32 get_rx_csum(struct net_device *dev)
1455 struct port_info *p = netdev_priv(dev);
1457 return p->rx_csum_offload;
1460 static int set_rx_csum(struct net_device *dev, u32 data)
1462 struct port_info *p = netdev_priv(dev);
1464 p->rx_csum_offload = data;
1468 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1470 struct port_info *pi = netdev_priv(dev);
1471 struct adapter *adapter = pi->adapter;
1472 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1474 e->rx_max_pending = MAX_RX_BUFFERS;
1475 e->rx_mini_max_pending = 0;
1476 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1477 e->tx_max_pending = MAX_TXQ_ENTRIES;
1479 e->rx_pending = q->fl_size;
1480 e->rx_mini_pending = q->rspq_size;
1481 e->rx_jumbo_pending = q->jumbo_size;
1482 e->tx_pending = q->txq_size[0];
1485 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1487 struct port_info *pi = netdev_priv(dev);
1488 struct adapter *adapter = pi->adapter;
1489 struct qset_params *q;
1492 if (e->rx_pending > MAX_RX_BUFFERS ||
1493 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1494 e->tx_pending > MAX_TXQ_ENTRIES ||
1495 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1496 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1497 e->rx_pending < MIN_FL_ENTRIES ||
1498 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1499 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1502 if (adapter->flags & FULL_INIT_DONE)
1505 q = &adapter->params.sge.qset[pi->first_qset];
1506 for (i = 0; i < pi->nqsets; ++i, ++q) {
1507 q->rspq_size = e->rx_mini_pending;
1508 q->fl_size = e->rx_pending;
1509 q->jumbo_size = e->rx_jumbo_pending;
1510 q->txq_size[0] = e->tx_pending;
1511 q->txq_size[1] = e->tx_pending;
1512 q->txq_size[2] = e->tx_pending;
1517 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1519 struct port_info *pi = netdev_priv(dev);
1520 struct adapter *adapter = pi->adapter;
1521 struct qset_params *qsp = &adapter->params.sge.qset[0];
1522 struct sge_qset *qs = &adapter->sge.qs[0];
1524 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1527 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1528 t3_update_qset_coalesce(qs, qsp);
1532 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1534 struct port_info *pi = netdev_priv(dev);
1535 struct adapter *adapter = pi->adapter;
1536 struct qset_params *q = adapter->params.sge.qset;
1538 c->rx_coalesce_usecs = q->coalesce_usecs;
1542 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1545 struct port_info *pi = netdev_priv(dev);
1546 struct adapter *adapter = pi->adapter;
1549 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1553 e->magic = EEPROM_MAGIC;
1554 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1555 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1558 memcpy(data, buf + e->offset, e->len);
1563 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1566 struct port_info *pi = netdev_priv(dev);
1567 struct adapter *adapter = pi->adapter;
1568 u32 aligned_offset, aligned_len, *p;
1572 if (eeprom->magic != EEPROM_MAGIC)
1575 aligned_offset = eeprom->offset & ~3;
1576 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1578 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1579 buf = kmalloc(aligned_len, GFP_KERNEL);
1582 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1583 if (!err && aligned_len > 4)
1584 err = t3_seeprom_read(adapter,
1585 aligned_offset + aligned_len - 4,
1586 (u32 *) & buf[aligned_len - 4]);
1589 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1593 err = t3_seeprom_wp(adapter, 0);
1597 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1598 err = t3_seeprom_write(adapter, aligned_offset, *p);
1599 aligned_offset += 4;
1603 err = t3_seeprom_wp(adapter, 1);
1610 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1614 memset(&wol->sopass, 0, sizeof(wol->sopass));
1617 static const struct ethtool_ops cxgb_ethtool_ops = {
1618 .get_settings = get_settings,
1619 .set_settings = set_settings,
1620 .get_drvinfo = get_drvinfo,
1621 .get_msglevel = get_msglevel,
1622 .set_msglevel = set_msglevel,
1623 .get_ringparam = get_sge_param,
1624 .set_ringparam = set_sge_param,
1625 .get_coalesce = get_coalesce,
1626 .set_coalesce = set_coalesce,
1627 .get_eeprom_len = get_eeprom_len,
1628 .get_eeprom = get_eeprom,
1629 .set_eeprom = set_eeprom,
1630 .get_pauseparam = get_pauseparam,
1631 .set_pauseparam = set_pauseparam,
1632 .get_rx_csum = get_rx_csum,
1633 .set_rx_csum = set_rx_csum,
1634 .get_tx_csum = ethtool_op_get_tx_csum,
1635 .set_tx_csum = ethtool_op_set_tx_csum,
1636 .get_sg = ethtool_op_get_sg,
1637 .set_sg = ethtool_op_set_sg,
1638 .get_link = ethtool_op_get_link,
1639 .get_strings = get_strings,
1640 .phys_id = cxgb3_phys_id,
1641 .nway_reset = restart_autoneg,
1642 .get_stats_count = get_stats_count,
1643 .get_ethtool_stats = get_stats,
1644 .get_regs_len = get_regs_len,
1645 .get_regs = get_regs,
1647 .get_tso = ethtool_op_get_tso,
1648 .set_tso = ethtool_op_set_tso,
1651 static int in_range(int val, int lo, int hi)
1653 return val < 0 || (val <= hi && val >= lo);
1656 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1658 struct port_info *pi = netdev_priv(dev);
1659 struct adapter *adapter = pi->adapter;
1663 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1667 case CHELSIO_SET_QSET_PARAMS:{
1669 struct qset_params *q;
1670 struct ch_qset_params t;
1672 if (!capable(CAP_NET_ADMIN))
1674 if (copy_from_user(&t, useraddr, sizeof(t)))
1676 if (t.qset_idx >= SGE_QSETS)
1678 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1679 !in_range(t.cong_thres, 0, 255) ||
1680 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1682 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1684 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1685 MAX_CTRL_TXQ_ENTRIES) ||
1686 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1688 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1689 MAX_RX_JUMBO_BUFFERS)
1690 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1693 if ((adapter->flags & FULL_INIT_DONE) &&
1694 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1695 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1696 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1697 t.polling >= 0 || t.cong_thres >= 0))
1700 q = &adapter->params.sge.qset[t.qset_idx];
1702 if (t.rspq_size >= 0)
1703 q->rspq_size = t.rspq_size;
1704 if (t.fl_size[0] >= 0)
1705 q->fl_size = t.fl_size[0];
1706 if (t.fl_size[1] >= 0)
1707 q->jumbo_size = t.fl_size[1];
1708 if (t.txq_size[0] >= 0)
1709 q->txq_size[0] = t.txq_size[0];
1710 if (t.txq_size[1] >= 0)
1711 q->txq_size[1] = t.txq_size[1];
1712 if (t.txq_size[2] >= 0)
1713 q->txq_size[2] = t.txq_size[2];
1714 if (t.cong_thres >= 0)
1715 q->cong_thres = t.cong_thres;
1716 if (t.intr_lat >= 0) {
1717 struct sge_qset *qs =
1718 &adapter->sge.qs[t.qset_idx];
1720 q->coalesce_usecs = t.intr_lat;
1721 t3_update_qset_coalesce(qs, q);
1723 if (t.polling >= 0) {
1724 if (adapter->flags & USING_MSIX)
1725 q->polling = t.polling;
1727 /* No polling with INTx for T3A */
1728 if (adapter->params.rev == 0 &&
1729 !(adapter->flags & USING_MSI))
1732 for (i = 0; i < SGE_QSETS; i++) {
1733 q = &adapter->params.sge.
1735 q->polling = t.polling;
1741 case CHELSIO_GET_QSET_PARAMS:{
1742 struct qset_params *q;
1743 struct ch_qset_params t;
1745 if (copy_from_user(&t, useraddr, sizeof(t)))
1747 if (t.qset_idx >= SGE_QSETS)
1750 q = &adapter->params.sge.qset[t.qset_idx];
1751 t.rspq_size = q->rspq_size;
1752 t.txq_size[0] = q->txq_size[0];
1753 t.txq_size[1] = q->txq_size[1];
1754 t.txq_size[2] = q->txq_size[2];
1755 t.fl_size[0] = q->fl_size;
1756 t.fl_size[1] = q->jumbo_size;
1757 t.polling = q->polling;
1758 t.intr_lat = q->coalesce_usecs;
1759 t.cong_thres = q->cong_thres;
1761 if (copy_to_user(useraddr, &t, sizeof(t)))
1765 case CHELSIO_SET_QSET_NUM:{
1766 struct ch_reg edata;
1767 struct port_info *pi = netdev_priv(dev);
1768 unsigned int i, first_qset = 0, other_qsets = 0;
1770 if (!capable(CAP_NET_ADMIN))
1772 if (adapter->flags & FULL_INIT_DONE)
1774 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1776 if (edata.val < 1 ||
1777 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1780 for_each_port(adapter, i)
1781 if (adapter->port[i] && adapter->port[i] != dev)
1782 other_qsets += adap2pinfo(adapter, i)->nqsets;
1784 if (edata.val + other_qsets > SGE_QSETS)
1787 pi->nqsets = edata.val;
1789 for_each_port(adapter, i)
1790 if (adapter->port[i]) {
1791 pi = adap2pinfo(adapter, i);
1792 pi->first_qset = first_qset;
1793 first_qset += pi->nqsets;
1797 case CHELSIO_GET_QSET_NUM:{
1798 struct ch_reg edata;
1799 struct port_info *pi = netdev_priv(dev);
1801 edata.cmd = CHELSIO_GET_QSET_NUM;
1802 edata.val = pi->nqsets;
1803 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1807 case CHELSIO_LOAD_FW:{
1809 struct ch_mem_range t;
1811 if (!capable(CAP_NET_ADMIN))
1813 if (copy_from_user(&t, useraddr, sizeof(t)))
1816 fw_data = kmalloc(t.len, GFP_KERNEL);
1821 (fw_data, useraddr + sizeof(t), t.len)) {
1826 ret = t3_load_fw(adapter, fw_data, t.len);
1832 case CHELSIO_SETMTUTAB:{
1836 if (!is_offload(adapter))
1838 if (!capable(CAP_NET_ADMIN))
1840 if (offload_running(adapter))
1842 if (copy_from_user(&m, useraddr, sizeof(m)))
1844 if (m.nmtus != NMTUS)
1846 if (m.mtus[0] < 81) /* accommodate SACK */
1849 /* MTUs must be in ascending order */
1850 for (i = 1; i < NMTUS; ++i)
1851 if (m.mtus[i] < m.mtus[i - 1])
1854 memcpy(adapter->params.mtus, m.mtus,
1855 sizeof(adapter->params.mtus));
1858 case CHELSIO_GET_PM:{
1859 struct tp_params *p = &adapter->params.tp;
1860 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1862 if (!is_offload(adapter))
1864 m.tx_pg_sz = p->tx_pg_size;
1865 m.tx_num_pg = p->tx_num_pgs;
1866 m.rx_pg_sz = p->rx_pg_size;
1867 m.rx_num_pg = p->rx_num_pgs;
1868 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1869 if (copy_to_user(useraddr, &m, sizeof(m)))
1873 case CHELSIO_SET_PM:{
1875 struct tp_params *p = &adapter->params.tp;
1877 if (!is_offload(adapter))
1879 if (!capable(CAP_NET_ADMIN))
1881 if (adapter->flags & FULL_INIT_DONE)
1883 if (copy_from_user(&m, useraddr, sizeof(m)))
1885 if (!is_power_of_2(m.rx_pg_sz) ||
1886 !is_power_of_2(m.tx_pg_sz))
1887 return -EINVAL; /* not power of 2 */
1888 if (!(m.rx_pg_sz & 0x14000))
1889 return -EINVAL; /* not 16KB or 64KB */
1890 if (!(m.tx_pg_sz & 0x1554000))
1892 if (m.tx_num_pg == -1)
1893 m.tx_num_pg = p->tx_num_pgs;
1894 if (m.rx_num_pg == -1)
1895 m.rx_num_pg = p->rx_num_pgs;
1896 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1898 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1899 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1901 p->rx_pg_size = m.rx_pg_sz;
1902 p->tx_pg_size = m.tx_pg_sz;
1903 p->rx_num_pgs = m.rx_num_pg;
1904 p->tx_num_pgs = m.tx_num_pg;
1907 case CHELSIO_GET_MEM:{
1908 struct ch_mem_range t;
1912 if (!is_offload(adapter))
1914 if (!(adapter->flags & FULL_INIT_DONE))
1915 return -EIO; /* need the memory controllers */
1916 if (copy_from_user(&t, useraddr, sizeof(t)))
1918 if ((t.addr & 7) || (t.len & 7))
1920 if (t.mem_id == MEM_CM)
1922 else if (t.mem_id == MEM_PMRX)
1923 mem = &adapter->pmrx;
1924 else if (t.mem_id == MEM_PMTX)
1925 mem = &adapter->pmtx;
1931 * bits 0..9: chip version
1932 * bits 10..15: chip revision
1934 t.version = 3 | (adapter->params.rev << 10);
1935 if (copy_to_user(useraddr, &t, sizeof(t)))
1939 * Read 256 bytes at a time as len can be large and we don't
1940 * want to use huge intermediate buffers.
1942 useraddr += sizeof(t); /* advance to start of buffer */
1944 unsigned int chunk =
1945 min_t(unsigned int, t.len, sizeof(buf));
1948 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1952 if (copy_to_user(useraddr, buf, chunk))
1960 case CHELSIO_SET_TRACE_FILTER:{
1962 const struct trace_params *tp;
1964 if (!capable(CAP_NET_ADMIN))
1966 if (!offload_running(adapter))
1968 if (copy_from_user(&t, useraddr, sizeof(t)))
1971 tp = (const struct trace_params *)&t.sip;
1973 t3_config_trace_filter(adapter, tp, 0,
1977 t3_config_trace_filter(adapter, tp, 1,
1988 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1990 struct mii_ioctl_data *data = if_mii(req);
1991 struct port_info *pi = netdev_priv(dev);
1992 struct adapter *adapter = pi->adapter;
1997 data->phy_id = pi->phy.addr;
2001 struct cphy *phy = &pi->phy;
2003 if (!phy->mdio_read)
2005 if (is_10G(adapter)) {
2006 mmd = data->phy_id >> 8;
2009 else if (mmd > MDIO_DEV_XGXS)
2013 phy->mdio_read(adapter, data->phy_id & 0x1f,
2014 mmd, data->reg_num, &val);
2017 phy->mdio_read(adapter, data->phy_id & 0x1f,
2018 0, data->reg_num & 0x1f,
2021 data->val_out = val;
2025 struct cphy *phy = &pi->phy;
2027 if (!capable(CAP_NET_ADMIN))
2029 if (!phy->mdio_write)
2031 if (is_10G(adapter)) {
2032 mmd = data->phy_id >> 8;
2035 else if (mmd > MDIO_DEV_XGXS)
2039 phy->mdio_write(adapter,
2040 data->phy_id & 0x1f, mmd,
2045 phy->mdio_write(adapter,
2046 data->phy_id & 0x1f, 0,
2047 data->reg_num & 0x1f,
2052 return cxgb_extension_ioctl(dev, req->ifr_data);
2059 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2061 struct port_info *pi = netdev_priv(dev);
2062 struct adapter *adapter = pi->adapter;
2065 if (new_mtu < 81) /* accommodate SACK */
2067 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2070 init_port_mtus(adapter);
2071 if (adapter->params.rev == 0 && offload_running(adapter))
2072 t3_load_mtus(adapter, adapter->params.mtus,
2073 adapter->params.a_wnd, adapter->params.b_wnd,
2074 adapter->port[0]->mtu);
2078 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2080 struct port_info *pi = netdev_priv(dev);
2081 struct adapter *adapter = pi->adapter;
2082 struct sockaddr *addr = p;
2084 if (!is_valid_ether_addr(addr->sa_data))
2087 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2088 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2089 if (offload_running(adapter))
2090 write_smt_entry(adapter, pi->port_id);
2095 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2096 * @adap: the adapter
2099 * Ensures that current Rx processing on any of the queues associated with
2100 * the given port completes before returning. We do this by acquiring and
2101 * releasing the locks of the response queues associated with the port.
2103 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2107 for (i = 0; i < p->nqsets; i++) {
2108 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2110 spin_lock_irq(&q->lock);
2111 spin_unlock_irq(&q->lock);
2115 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2117 struct port_info *pi = netdev_priv(dev);
2118 struct adapter *adapter = pi->adapter;
2121 if (adapter->params.rev > 0)
2122 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2124 /* single control for all ports */
2125 unsigned int i, have_vlans = 0;
2126 for_each_port(adapter, i)
2127 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2129 t3_set_vlan_accel(adapter, 1, have_vlans);
2131 t3_synchronize_rx(adapter, pi);
2134 #ifdef CONFIG_NET_POLL_CONTROLLER
2135 static void cxgb_netpoll(struct net_device *dev)
2137 struct port_info *pi = netdev_priv(dev);
2138 struct adapter *adapter = pi->adapter;
2141 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2142 struct sge_qset *qs = &adapter->sge.qs[qidx];
2145 if (adapter->flags & USING_MSIX)
2150 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2156 * Periodic accumulation of MAC statistics.
2158 static void mac_stats_update(struct adapter *adapter)
2162 for_each_port(adapter, i) {
2163 struct net_device *dev = adapter->port[i];
2164 struct port_info *p = netdev_priv(dev);
2166 if (netif_running(dev)) {
2167 spin_lock(&adapter->stats_lock);
2168 t3_mac_update_stats(&p->mac);
2169 spin_unlock(&adapter->stats_lock);
2174 static void check_link_status(struct adapter *adapter)
2178 for_each_port(adapter, i) {
2179 struct net_device *dev = adapter->port[i];
2180 struct port_info *p = netdev_priv(dev);
2182 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2183 t3_link_changed(adapter, i);
2187 static void check_t3b2_mac(struct adapter *adapter)
2191 if (!rtnl_trylock()) /* synchronize with ifdown */
2194 for_each_port(adapter, i) {
2195 struct net_device *dev = adapter->port[i];
2196 struct port_info *p = netdev_priv(dev);
2199 if (!netif_running(dev))
2203 if (netif_running(dev) && netif_carrier_ok(dev))
2204 status = t3b2_mac_watchdog_task(&p->mac);
2206 p->mac.stats.num_toggled++;
2207 else if (status == 2) {
2208 struct cmac *mac = &p->mac;
2210 t3_mac_set_mtu(mac, dev->mtu);
2211 t3_mac_set_address(mac, 0, dev->dev_addr);
2212 cxgb_set_rxmode(dev);
2213 t3_link_start(&p->phy, mac, &p->link_config);
2214 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2215 t3_port_intr_enable(adapter, p->port_id);
2216 p->mac.stats.num_resets++;
2223 static void t3_adap_check_task(struct work_struct *work)
2225 struct adapter *adapter = container_of(work, struct adapter,
2226 adap_check_task.work);
2227 const struct adapter_params *p = &adapter->params;
2229 adapter->check_task_cnt++;
2231 /* Check link status for PHYs without interrupts */
2232 if (p->linkpoll_period)
2233 check_link_status(adapter);
2235 /* Accumulate MAC stats if needed */
2236 if (!p->linkpoll_period ||
2237 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2238 p->stats_update_period) {
2239 mac_stats_update(adapter);
2240 adapter->check_task_cnt = 0;
2243 if (p->rev == T3_REV_B2)
2244 check_t3b2_mac(adapter);
2246 /* Schedule the next check update if any port is active. */
2247 spin_lock(&adapter->work_lock);
2248 if (adapter->open_device_map & PORT_MASK)
2249 schedule_chk_task(adapter);
2250 spin_unlock(&adapter->work_lock);
2254 * Processes external (PHY) interrupts in process context.
2256 static void ext_intr_task(struct work_struct *work)
2258 struct adapter *adapter = container_of(work, struct adapter,
2259 ext_intr_handler_task);
2261 t3_phy_intr_handler(adapter);
2263 /* Now reenable external interrupts */
2264 spin_lock_irq(&adapter->work_lock);
2265 if (adapter->slow_intr_mask) {
2266 adapter->slow_intr_mask |= F_T3DBG;
2267 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2268 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2269 adapter->slow_intr_mask);
2271 spin_unlock_irq(&adapter->work_lock);
2275 * Interrupt-context handler for external (PHY) interrupts.
2277 void t3_os_ext_intr_handler(struct adapter *adapter)
2280 * Schedule a task to handle external interrupts as they may be slow
2281 * and we use a mutex to protect MDIO registers. We disable PHY
2282 * interrupts in the meantime and let the task reenable them when
2285 spin_lock(&adapter->work_lock);
2286 if (adapter->slow_intr_mask) {
2287 adapter->slow_intr_mask &= ~F_T3DBG;
2288 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2289 adapter->slow_intr_mask);
2290 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2292 spin_unlock(&adapter->work_lock);
2295 void t3_fatal_err(struct adapter *adapter)
2297 unsigned int fw_status[4];
2299 if (adapter->flags & FULL_INIT_DONE) {
2300 t3_sge_stop(adapter);
2301 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2302 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2303 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2304 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2305 t3_intr_disable(adapter);
2307 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2308 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2309 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2310 fw_status[0], fw_status[1],
2311 fw_status[2], fw_status[3]);
2315 static int __devinit cxgb_enable_msix(struct adapter *adap)
2317 struct msix_entry entries[SGE_QSETS + 1];
2320 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2321 entries[i].entry = i;
2323 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2325 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2326 adap->msix_info[i].vec = entries[i].vector;
2328 dev_info(&adap->pdev->dev,
2329 "only %d MSI-X vectors left, not using MSI-X\n", err);
2333 static void __devinit print_port_info(struct adapter *adap,
2334 const struct adapter_info *ai)
2336 static const char *pci_variant[] = {
2337 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2344 snprintf(buf, sizeof(buf), "%s x%d",
2345 pci_variant[adap->params.pci.variant],
2346 adap->params.pci.width);
2348 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2349 pci_variant[adap->params.pci.variant],
2350 adap->params.pci.speed, adap->params.pci.width);
2352 for_each_port(adap, i) {
2353 struct net_device *dev = adap->port[i];
2354 const struct port_info *pi = netdev_priv(dev);
2356 if (!test_bit(i, &adap->registered_device_map))
2358 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2359 dev->name, ai->desc, pi->port_type->desc,
2360 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2361 (adap->flags & USING_MSIX) ? " MSI-X" :
2362 (adap->flags & USING_MSI) ? " MSI" : "");
2363 if (adap->name == dev->name && adap->params.vpd.mclk)
2365 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2366 adap->name, t3_mc7_size(&adap->cm) >> 20,
2367 t3_mc7_size(&adap->pmtx) >> 20,
2368 t3_mc7_size(&adap->pmrx) >> 20,
2369 adap->params.vpd.sn);
2373 static int __devinit init_one(struct pci_dev *pdev,
2374 const struct pci_device_id *ent)
2376 static int version_printed;
2378 int i, err, pci_using_dac = 0;
2379 unsigned long mmio_start, mmio_len;
2380 const struct adapter_info *ai;
2381 struct adapter *adapter = NULL;
2382 struct port_info *pi;
2384 if (!version_printed) {
2385 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2390 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2392 printk(KERN_ERR DRV_NAME
2393 ": cannot initialize work queue\n");
2398 err = pci_request_regions(pdev, DRV_NAME);
2400 /* Just info, some other driver may have claimed the device. */
2401 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2405 err = pci_enable_device(pdev);
2407 dev_err(&pdev->dev, "cannot enable PCI device\n");
2408 goto out_release_regions;
2411 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2413 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2415 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2416 "coherent allocations\n");
2417 goto out_disable_device;
2419 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2420 dev_err(&pdev->dev, "no usable DMA configuration\n");
2421 goto out_disable_device;
2424 pci_set_master(pdev);
2426 mmio_start = pci_resource_start(pdev, 0);
2427 mmio_len = pci_resource_len(pdev, 0);
2428 ai = t3_get_adapter_info(ent->driver_data);
2430 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2433 goto out_disable_device;
2436 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2437 if (!adapter->regs) {
2438 dev_err(&pdev->dev, "cannot map device registers\n");
2440 goto out_free_adapter;
2443 adapter->pdev = pdev;
2444 adapter->name = pci_name(pdev);
2445 adapter->msg_enable = dflt_msg_enable;
2446 adapter->mmio_len = mmio_len;
2448 mutex_init(&adapter->mdio_lock);
2449 spin_lock_init(&adapter->work_lock);
2450 spin_lock_init(&adapter->stats_lock);
2452 INIT_LIST_HEAD(&adapter->adapter_list);
2453 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2454 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2456 for (i = 0; i < ai->nports; ++i) {
2457 struct net_device *netdev;
2459 netdev = alloc_etherdev(sizeof(struct port_info));
2465 SET_MODULE_OWNER(netdev);
2466 SET_NETDEV_DEV(netdev, &pdev->dev);
2468 adapter->port[i] = netdev;
2469 pi = netdev_priv(netdev);
2470 pi->adapter = adapter;
2471 pi->rx_csum_offload = 1;
2476 netif_carrier_off(netdev);
2477 netdev->irq = pdev->irq;
2478 netdev->mem_start = mmio_start;
2479 netdev->mem_end = mmio_start + mmio_len - 1;
2480 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2481 netdev->features |= NETIF_F_LLTX;
2483 netdev->features |= NETIF_F_HIGHDMA;
2485 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2486 netdev->vlan_rx_register = vlan_rx_register;
2488 netdev->open = cxgb_open;
2489 netdev->stop = cxgb_close;
2490 netdev->hard_start_xmit = t3_eth_xmit;
2491 netdev->get_stats = cxgb_get_stats;
2492 netdev->set_multicast_list = cxgb_set_rxmode;
2493 netdev->do_ioctl = cxgb_ioctl;
2494 netdev->change_mtu = cxgb_change_mtu;
2495 netdev->set_mac_address = cxgb_set_mac_addr;
2496 #ifdef CONFIG_NET_POLL_CONTROLLER
2497 netdev->poll_controller = cxgb_netpoll;
2500 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2503 pci_set_drvdata(pdev, adapter);
2504 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2510 * The card is now ready to go. If any errors occur during device
2511 * registration we do not fail the whole card but rather proceed only
2512 * with the ports we manage to register successfully. However we must
2513 * register at least one net device.
2515 for_each_port(adapter, i) {
2516 err = register_netdev(adapter->port[i]);
2518 dev_warn(&pdev->dev,
2519 "cannot register net device %s, skipping\n",
2520 adapter->port[i]->name);
2523 * Change the name we use for messages to the name of
2524 * the first successfully registered interface.
2526 if (!adapter->registered_device_map)
2527 adapter->name = adapter->port[i]->name;
2529 __set_bit(i, &adapter->registered_device_map);
2532 if (!adapter->registered_device_map) {
2533 dev_err(&pdev->dev, "could not register any net devices\n");
2537 /* Driver's ready. Reflect it on LEDs */
2538 t3_led_ready(adapter);
2540 if (is_offload(adapter)) {
2541 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2542 cxgb3_adapter_ofld(adapter);
2545 /* See what interrupts we'll be using */
2546 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2547 adapter->flags |= USING_MSIX;
2548 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2549 adapter->flags |= USING_MSI;
2551 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2554 print_port_info(adapter, ai);
2558 iounmap(adapter->regs);
2559 for (i = ai->nports - 1; i >= 0; --i)
2560 if (adapter->port[i])
2561 free_netdev(adapter->port[i]);
2567 pci_disable_device(pdev);
2568 out_release_regions:
2569 pci_release_regions(pdev);
2570 pci_set_drvdata(pdev, NULL);
2574 static void __devexit remove_one(struct pci_dev *pdev)
2576 struct adapter *adapter = pci_get_drvdata(pdev);
2581 t3_sge_stop(adapter);
2582 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2585 for_each_port(adapter, i)
2586 if (test_bit(i, &adapter->registered_device_map))
2587 unregister_netdev(adapter->port[i]);
2589 if (is_offload(adapter)) {
2590 cxgb3_adapter_unofld(adapter);
2591 if (test_bit(OFFLOAD_DEVMAP_BIT,
2592 &adapter->open_device_map))
2593 offload_close(&adapter->tdev);
2596 t3_free_sge_resources(adapter);
2597 cxgb_disable_msi(adapter);
2599 for_each_port(adapter, i)
2600 if (adapter->port[i])
2601 free_netdev(adapter->port[i]);
2603 iounmap(adapter->regs);
2605 pci_release_regions(pdev);
2606 pci_disable_device(pdev);
2607 pci_set_drvdata(pdev, NULL);
2611 static struct pci_driver driver = {
2613 .id_table = cxgb3_pci_tbl,
2615 .remove = __devexit_p(remove_one),
2618 static int __init cxgb3_init_module(void)
2622 cxgb3_offload_init();
2624 ret = pci_register_driver(&driver);
2628 static void __exit cxgb3_cleanup_module(void)
2630 pci_unregister_driver(&driver);
2632 destroy_workqueue(cxgb3_wq);
2635 module_init(cxgb3_init_module);
2636 module_exit(cxgb3_cleanup_module);