]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/cxgb3/cxgb3_main.c
cxgb3 - Fix dev->priv usage
[linux-2.6-omap-h63xx.git] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
48
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
54
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
58
59 enum {
60         MAX_TXQ_ENTRIES = 16384,
61         MAX_CTRL_TXQ_ENTRIES = 1024,
62         MAX_RSPQ_ENTRIES = 16384,
63         MAX_RX_BUFFERS = 16384,
64         MAX_RX_JUMBO_BUFFERS = 16384,
65         MIN_TXQ_ENTRIES = 4,
66         MIN_CTRL_TXQ_ENTRIES = 4,
67         MIN_RSPQ_ENTRIES = 32,
68         MIN_FL_ENTRIES = 32
69 };
70
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77 #define EEPROM_MAGIC 0x38E2F10C
78
79 #define CH_DEVICE(devid, ssid, idx) \
80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
81
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83         CH_DEVICE(0x20, 1, 0),  /* PE9000 */
84         CH_DEVICE(0x21, 1, 1),  /* T302E */
85         CH_DEVICE(0x22, 1, 2),  /* T310E */
86         CH_DEVICE(0x23, 1, 3),  /* T320X */
87         CH_DEVICE(0x24, 1, 1),  /* T302X */
88         CH_DEVICE(0x25, 1, 3),  /* T320E */
89         CH_DEVICE(0x26, 1, 2),  /* T310X */
90         CH_DEVICE(0x30, 1, 2),  /* T3B10 */
91         CH_DEVICE(0x31, 1, 3),  /* T3B20 */
92         CH_DEVICE(0x32, 1, 1),  /* T3B02 */
93         {0,}
94 };
95
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107 /*
108  * The driver uses the best interrupt scheme available on a platform in the
109  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
110  * of these schemes the driver may consider as follows:
111  *
112  * msi = 2: choose from among all three options
113  * msi = 1: only consider MSI and pin interrupts
114  * msi = 0: force pin interrupts
115  */
116 static int msi = 2;
117
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121 /*
122  * The driver enables offload as a default.
123  * To disable it, use ofld_disable = 1.
124  */
125
126 static int ofld_disable = 0;
127
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131 /*
132  * We have work elements that we need to cancel when an interface is taken
133  * down.  Normally the work elements would be executed by keventd but that
134  * can deadlock because of linkwatch.  If our close method takes the rtnl
135  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137  * for our work to complete.  Get our own work queue to solve this.
138  */
139 static struct workqueue_struct *cxgb3_wq;
140
141 /**
142  *      link_report - show link status and link speed/duplex
143  *      @p: the port whose settings are to be reported
144  *
145  *      Shows the link status, speed, and duplex of a port.
146  */
147 static void link_report(struct net_device *dev)
148 {
149         if (!netif_carrier_ok(dev))
150                 printk(KERN_INFO "%s: link down\n", dev->name);
151         else {
152                 const char *s = "10Mbps";
153                 const struct port_info *p = netdev_priv(dev);
154
155                 switch (p->link_config.speed) {
156                 case SPEED_10000:
157                         s = "10Gbps";
158                         break;
159                 case SPEED_1000:
160                         s = "1000Mbps";
161                         break;
162                 case SPEED_100:
163                         s = "100Mbps";
164                         break;
165                 }
166
167                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169         }
170 }
171
172 /**
173  *      t3_os_link_changed - handle link status changes
174  *      @adapter: the adapter associated with the link change
175  *      @port_id: the port index whose limk status has changed
176  *      @link_stat: the new status of the link
177  *      @speed: the new speed setting
178  *      @duplex: the new duplex setting
179  *      @pause: the new flow-control setting
180  *
181  *      This is the OS-dependent handler for link status changes.  The OS
182  *      neutral handler takes care of most of the processing for these events,
183  *      then calls this handler for any OS-specific processing.
184  */
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186                         int speed, int duplex, int pause)
187 {
188         struct net_device *dev = adapter->port[port_id];
189         struct port_info *pi = netdev_priv(dev);
190         struct cmac *mac = &pi->mac;
191
192         /* Skip changes from disabled ports. */
193         if (!netif_running(dev))
194                 return;
195
196         if (link_stat != netif_carrier_ok(dev)) {
197                 if (link_stat) {
198                         t3_mac_enable(mac, MAC_DIRECTION_RX);
199                         netif_carrier_on(dev);
200                 } else {
201                         netif_carrier_off(dev);
202                         pi->phy.ops->power_down(&pi->phy, 1);
203                         t3_mac_disable(mac, MAC_DIRECTION_RX);
204                         t3_link_start(&pi->phy, mac, &pi->link_config);
205                 }
206
207                 link_report(dev);
208         }
209 }
210
211 static void cxgb_set_rxmode(struct net_device *dev)
212 {
213         struct t3_rx_mode rm;
214         struct port_info *pi = netdev_priv(dev);
215
216         init_rx_mode(&rm, dev, dev->mc_list);
217         t3_mac_set_rx_mode(&pi->mac, &rm);
218 }
219
220 /**
221  *      link_start - enable a port
222  *      @dev: the device to enable
223  *
224  *      Performs the MAC and PHY actions needed to enable a port.
225  */
226 static void link_start(struct net_device *dev)
227 {
228         struct t3_rx_mode rm;
229         struct port_info *pi = netdev_priv(dev);
230         struct cmac *mac = &pi->mac;
231
232         init_rx_mode(&rm, dev, dev->mc_list);
233         t3_mac_reset(mac);
234         t3_mac_set_mtu(mac, dev->mtu);
235         t3_mac_set_address(mac, 0, dev->dev_addr);
236         t3_mac_set_rx_mode(mac, &rm);
237         t3_link_start(&pi->phy, mac, &pi->link_config);
238         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
239 }
240
241 static inline void cxgb_disable_msi(struct adapter *adapter)
242 {
243         if (adapter->flags & USING_MSIX) {
244                 pci_disable_msix(adapter->pdev);
245                 adapter->flags &= ~USING_MSIX;
246         } else if (adapter->flags & USING_MSI) {
247                 pci_disable_msi(adapter->pdev);
248                 adapter->flags &= ~USING_MSI;
249         }
250 }
251
252 /*
253  * Interrupt handler for asynchronous events used with MSI-X.
254  */
255 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
256 {
257         t3_slow_intr_handler(cookie);
258         return IRQ_HANDLED;
259 }
260
261 /*
262  * Name the MSI-X interrupts.
263  */
264 static void name_msix_vecs(struct adapter *adap)
265 {
266         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
267
268         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269         adap->msix_info[0].desc[n] = 0;
270
271         for_each_port(adap, j) {
272                 struct net_device *d = adap->port[j];
273                 const struct port_info *pi = netdev_priv(d);
274
275                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276                         snprintf(adap->msix_info[msi_idx].desc, n,
277                                  "%s (queue %d)", d->name, i);
278                         adap->msix_info[msi_idx].desc[n] = 0;
279                 }
280         }
281 }
282
283 static int request_msix_data_irqs(struct adapter *adap)
284 {
285         int i, j, err, qidx = 0;
286
287         for_each_port(adap, i) {
288                 int nqsets = adap2pinfo(adap, i)->nqsets;
289
290                 for (j = 0; j < nqsets; ++j) {
291                         err = request_irq(adap->msix_info[qidx + 1].vec,
292                                           t3_intr_handler(adap,
293                                                           adap->sge.qs[qidx].
294                                                           rspq.polling), 0,
295                                           adap->msix_info[qidx + 1].desc,
296                                           &adap->sge.qs[qidx]);
297                         if (err) {
298                                 while (--qidx >= 0)
299                                         free_irq(adap->msix_info[qidx + 1].vec,
300                                                  &adap->sge.qs[qidx]);
301                                 return err;
302                         }
303                         qidx++;
304                 }
305         }
306         return 0;
307 }
308
309 /**
310  *      setup_rss - configure RSS
311  *      @adap: the adapter
312  *
313  *      Sets up RSS to distribute packets to multiple receive queues.  We
314  *      configure the RSS CPU lookup table to distribute to the number of HW
315  *      receive queues, and the response queue lookup table to narrow that
316  *      down to the response queues actually configured for each port.
317  *      We always configure the RSS mapping for two ports since the mapping
318  *      table has plenty of entries.
319  */
320 static void setup_rss(struct adapter *adap)
321 {
322         int i;
323         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
324         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
325         u8 cpus[SGE_QSETS + 1];
326         u16 rspq_map[RSS_TABLE_SIZE];
327
328         for (i = 0; i < SGE_QSETS; ++i)
329                 cpus[i] = i;
330         cpus[SGE_QSETS] = 0xff; /* terminator */
331
332         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
333                 rspq_map[i] = i % nq0;
334                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
335         }
336
337         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
338                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
339                       V_RRCPLCPUSIZE(6), cpus, rspq_map);
340 }
341
342 /*
343  * If we have multiple receive queues per port serviced by NAPI we need one
344  * netdevice per queue as NAPI operates on netdevices.  We already have one
345  * netdevice, namely the one associated with the interface, so we use dummy
346  * ones for any additional queues.  Note that these netdevices exist purely
347  * so that NAPI has something to work with, they do not represent network
348  * ports and are not registered.
349  */
350 static int init_dummy_netdevs(struct adapter *adap)
351 {
352         int i, j, dummy_idx = 0;
353         struct net_device *nd;
354
355         for_each_port(adap, i) {
356                 struct net_device *dev = adap->port[i];
357                 const struct port_info *pi = netdev_priv(dev);
358
359                 for (j = 0; j < pi->nqsets - 1; j++) {
360                         if (!adap->dummy_netdev[dummy_idx]) {
361                                 struct port_info *p;
362
363                                 nd = alloc_netdev(sizeof(*p), "", ether_setup);
364                                 if (!nd)
365                                         goto free_all;
366
367                                 p = netdev_priv(nd);
368                                 p->adapter = adap;
369                                 nd->weight = 64;
370                                 set_bit(__LINK_STATE_START, &nd->state);
371                                 adap->dummy_netdev[dummy_idx] = nd;
372                         }
373                         strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
374                         dummy_idx++;
375                 }
376         }
377         return 0;
378
379 free_all:
380         while (--dummy_idx >= 0) {
381                 free_netdev(adap->dummy_netdev[dummy_idx]);
382                 adap->dummy_netdev[dummy_idx] = NULL;
383         }
384         return -ENOMEM;
385 }
386
387 /*
388  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
389  * both netdevices representing interfaces and the dummy ones for the extra
390  * queues.
391  */
392 static void quiesce_rx(struct adapter *adap)
393 {
394         int i;
395         struct net_device *dev;
396
397         for_each_port(adap, i) {
398                 dev = adap->port[i];
399                 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
400                         msleep(1);
401         }
402
403         for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
404                 dev = adap->dummy_netdev[i];
405                 if (dev)
406                         while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
407                                 msleep(1);
408         }
409 }
410
411 /**
412  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
413  *      @adap: the adapter
414  *
415  *      Determines how many sets of SGE queues to use and initializes them.
416  *      We support multiple queue sets per port if we have MSI-X, otherwise
417  *      just one queue set per port.
418  */
419 static int setup_sge_qsets(struct adapter *adap)
420 {
421         int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
422         unsigned int ntxq = SGE_TXQ_PER_SET;
423
424         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
425                 irq_idx = -1;
426
427         for_each_port(adap, i) {
428                 struct net_device *dev = adap->port[i];
429                 const struct port_info *pi = netdev_priv(dev);
430
431                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
432                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
433                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
434                                                              irq_idx,
435                                 &adap->params.sge.qset[qset_idx], ntxq,
436                                 j == 0 ? dev :
437                                          adap-> dummy_netdev[dummy_dev_idx++]);
438                         if (err) {
439                                 t3_free_sge_resources(adap);
440                                 return err;
441                         }
442                 }
443         }
444
445         return 0;
446 }
447
448 static ssize_t attr_show(struct device *d, struct device_attribute *attr,
449                          char *buf,
450                          ssize_t(*format) (struct net_device *, char *))
451 {
452         ssize_t len;
453
454         /* Synchronize with ioctls that may shut down the device */
455         rtnl_lock();
456         len = (*format) (to_net_dev(d), buf);
457         rtnl_unlock();
458         return len;
459 }
460
461 static ssize_t attr_store(struct device *d, struct device_attribute *attr,
462                           const char *buf, size_t len,
463                           ssize_t(*set) (struct net_device *, unsigned int),
464                           unsigned int min_val, unsigned int max_val)
465 {
466         char *endp;
467         ssize_t ret;
468         unsigned int val;
469
470         if (!capable(CAP_NET_ADMIN))
471                 return -EPERM;
472
473         val = simple_strtoul(buf, &endp, 0);
474         if (endp == buf || val < min_val || val > max_val)
475                 return -EINVAL;
476
477         rtnl_lock();
478         ret = (*set) (to_net_dev(d), val);
479         if (!ret)
480                 ret = len;
481         rtnl_unlock();
482         return ret;
483 }
484
485 #define CXGB3_SHOW(name, val_expr) \
486 static ssize_t format_##name(struct net_device *dev, char *buf) \
487 { \
488         struct port_info *pi = netdev_priv(dev); \
489         struct adapter *adap = pi->adapter; \
490         return sprintf(buf, "%u\n", val_expr); \
491 } \
492 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
493                            char *buf) \
494 { \
495         return attr_show(d, attr, buf, format_##name); \
496 }
497
498 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
499 {
500         struct port_info *pi = netdev_priv(dev);
501         struct adapter *adap = pi->adapter;
502         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
503
504         if (adap->flags & FULL_INIT_DONE)
505                 return -EBUSY;
506         if (val && adap->params.rev == 0)
507                 return -EINVAL;
508         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
509             min_tids)
510                 return -EINVAL;
511         adap->params.mc5.nfilters = val;
512         return 0;
513 }
514
515 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
516                               const char *buf, size_t len)
517 {
518         return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
519 }
520
521 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
522 {
523         struct port_info *pi = netdev_priv(dev);
524         struct adapter *adap = pi->adapter;
525
526         if (adap->flags & FULL_INIT_DONE)
527                 return -EBUSY;
528         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
529             MC5_MIN_TIDS)
530                 return -EINVAL;
531         adap->params.mc5.nservers = val;
532         return 0;
533 }
534
535 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
536                               const char *buf, size_t len)
537 {
538         return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
539 }
540
541 #define CXGB3_ATTR_R(name, val_expr) \
542 CXGB3_SHOW(name, val_expr) \
543 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
544
545 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
546 CXGB3_SHOW(name, val_expr) \
547 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
548
549 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
550 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
551 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
552
553 static struct attribute *cxgb3_attrs[] = {
554         &dev_attr_cam_size.attr,
555         &dev_attr_nfilters.attr,
556         &dev_attr_nservers.attr,
557         NULL
558 };
559
560 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
561
562 static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
563                             char *buf, int sched)
564 {
565         struct port_info *pi = netdev_priv(to_net_dev(d));
566         struct adapter *adap = pi->adapter;
567         unsigned int v, addr, bpt, cpt;
568         ssize_t len;
569
570         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
571         rtnl_lock();
572         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
573         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
574         if (sched & 1)
575                 v >>= 16;
576         bpt = (v >> 8) & 0xff;
577         cpt = v & 0xff;
578         if (!cpt)
579                 len = sprintf(buf, "disabled\n");
580         else {
581                 v = (adap->params.vpd.cclk * 1000) / cpt;
582                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
583         }
584         rtnl_unlock();
585         return len;
586 }
587
588 static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
589                              const char *buf, size_t len, int sched)
590 {
591         struct port_info *pi = netdev_priv(to_net_dev(d));
592         struct adapter *adap = pi->adapter;
593         unsigned int val;
594         char *endp;
595         ssize_t ret;
596
597         if (!capable(CAP_NET_ADMIN))
598                 return -EPERM;
599
600         val = simple_strtoul(buf, &endp, 0);
601         if (endp == buf || val > 10000000)
602                 return -EINVAL;
603
604         rtnl_lock();
605         ret = t3_config_sched(adap, val, sched);
606         if (!ret)
607                 ret = len;
608         rtnl_unlock();
609         return ret;
610 }
611
612 #define TM_ATTR(name, sched) \
613 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
614                            char *buf) \
615 { \
616         return tm_attr_show(d, attr, buf, sched); \
617 } \
618 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
619                             const char *buf, size_t len) \
620 { \
621         return tm_attr_store(d, attr, buf, len, sched); \
622 } \
623 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
624
625 TM_ATTR(sched0, 0);
626 TM_ATTR(sched1, 1);
627 TM_ATTR(sched2, 2);
628 TM_ATTR(sched3, 3);
629 TM_ATTR(sched4, 4);
630 TM_ATTR(sched5, 5);
631 TM_ATTR(sched6, 6);
632 TM_ATTR(sched7, 7);
633
634 static struct attribute *offload_attrs[] = {
635         &dev_attr_sched0.attr,
636         &dev_attr_sched1.attr,
637         &dev_attr_sched2.attr,
638         &dev_attr_sched3.attr,
639         &dev_attr_sched4.attr,
640         &dev_attr_sched5.attr,
641         &dev_attr_sched6.attr,
642         &dev_attr_sched7.attr,
643         NULL
644 };
645
646 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
647
648 /*
649  * Sends an sk_buff to an offload queue driver
650  * after dealing with any active network taps.
651  */
652 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
653 {
654         int ret;
655
656         local_bh_disable();
657         ret = t3_offload_tx(tdev, skb);
658         local_bh_enable();
659         return ret;
660 }
661
662 static int write_smt_entry(struct adapter *adapter, int idx)
663 {
664         struct cpl_smt_write_req *req;
665         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
666
667         if (!skb)
668                 return -ENOMEM;
669
670         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
671         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
672         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
673         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
674         req->iff = idx;
675         memset(req->src_mac1, 0, sizeof(req->src_mac1));
676         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
677         skb->priority = 1;
678         offload_tx(&adapter->tdev, skb);
679         return 0;
680 }
681
682 static int init_smt(struct adapter *adapter)
683 {
684         int i;
685
686         for_each_port(adapter, i)
687             write_smt_entry(adapter, i);
688         return 0;
689 }
690
691 static void init_port_mtus(struct adapter *adapter)
692 {
693         unsigned int mtus = adapter->port[0]->mtu;
694
695         if (adapter->port[1])
696                 mtus |= adapter->port[1]->mtu << 16;
697         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
698 }
699
700 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
701                               int hi, int port)
702 {
703         struct sk_buff *skb;
704         struct mngt_pktsched_wr *req;
705
706         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
707         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
708         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
709         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
710         req->sched = sched;
711         req->idx = qidx;
712         req->min = lo;
713         req->max = hi;
714         req->binding = port;
715         t3_mgmt_tx(adap, skb);
716 }
717
718 static void bind_qsets(struct adapter *adap)
719 {
720         int i, j;
721
722         for_each_port(adap, i) {
723                 const struct port_info *pi = adap2pinfo(adap, i);
724
725                 for (j = 0; j < pi->nqsets; ++j)
726                         send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
727                                           -1, i);
728         }
729 }
730
731 #define FW_FNAME "t3fw-%d.%d.%d.bin"
732
733 static int upgrade_fw(struct adapter *adap)
734 {
735         int ret;
736         char buf[64];
737         const struct firmware *fw;
738         struct device *dev = &adap->pdev->dev;
739
740         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
741                  FW_VERSION_MINOR, FW_VERSION_MICRO);
742         ret = request_firmware(&fw, buf, dev);
743         if (ret < 0) {
744                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
745                         buf);
746                 return ret;
747         }
748         ret = t3_load_fw(adap, fw->data, fw->size);
749         release_firmware(fw);
750         return ret;
751 }
752
753 /**
754  *      cxgb_up - enable the adapter
755  *      @adapter: adapter being enabled
756  *
757  *      Called when the first port is enabled, this function performs the
758  *      actions necessary to make an adapter operational, such as completing
759  *      the initialization of HW modules, and enabling interrupts.
760  *
761  *      Must be called with the rtnl lock held.
762  */
763 static int cxgb_up(struct adapter *adap)
764 {
765         int err = 0;
766
767         if (!(adap->flags & FULL_INIT_DONE)) {
768                 err = t3_check_fw_version(adap);
769                 if (err == -EINVAL)
770                         err = upgrade_fw(adap);
771                 if (err)
772                         goto out;
773
774                 err = init_dummy_netdevs(adap);
775                 if (err)
776                         goto out;
777
778                 err = t3_init_hw(adap, 0);
779                 if (err)
780                         goto out;
781
782                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
783                 
784                 err = setup_sge_qsets(adap);
785                 if (err)
786                         goto out;
787
788                 setup_rss(adap);
789                 adap->flags |= FULL_INIT_DONE;
790         }
791
792         t3_intr_clear(adap);
793
794         if (adap->flags & USING_MSIX) {
795                 name_msix_vecs(adap);
796                 err = request_irq(adap->msix_info[0].vec,
797                                   t3_async_intr_handler, 0,
798                                   adap->msix_info[0].desc, adap);
799                 if (err)
800                         goto irq_err;
801
802                 if (request_msix_data_irqs(adap)) {
803                         free_irq(adap->msix_info[0].vec, adap);
804                         goto irq_err;
805                 }
806         } else if ((err = request_irq(adap->pdev->irq,
807                                       t3_intr_handler(adap,
808                                                       adap->sge.qs[0].rspq.
809                                                       polling),
810                                       (adap->flags & USING_MSI) ?
811                                        0 : IRQF_SHARED,
812                                       adap->name, adap)))
813                 goto irq_err;
814
815         t3_sge_start(adap);
816         t3_intr_enable(adap);
817
818         if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
819                 bind_qsets(adap);
820         adap->flags |= QUEUES_BOUND;
821
822 out:
823         return err;
824 irq_err:
825         CH_ERR(adap, "request_irq failed, err %d\n", err);
826         goto out;
827 }
828
829 /*
830  * Release resources when all the ports and offloading have been stopped.
831  */
832 static void cxgb_down(struct adapter *adapter)
833 {
834         t3_sge_stop(adapter);
835         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
836         t3_intr_disable(adapter);
837         spin_unlock_irq(&adapter->work_lock);
838
839         if (adapter->flags & USING_MSIX) {
840                 int i, n = 0;
841
842                 free_irq(adapter->msix_info[0].vec, adapter);
843                 for_each_port(adapter, i)
844                     n += adap2pinfo(adapter, i)->nqsets;
845
846                 for (i = 0; i < n; ++i)
847                         free_irq(adapter->msix_info[i + 1].vec,
848                                  &adapter->sge.qs[i]);
849         } else
850                 free_irq(adapter->pdev->irq, adapter);
851
852         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
853         quiesce_rx(adapter);
854 }
855
856 static void schedule_chk_task(struct adapter *adap)
857 {
858         unsigned int timeo;
859
860         timeo = adap->params.linkpoll_period ?
861             (HZ * adap->params.linkpoll_period) / 10 :
862             adap->params.stats_update_period * HZ;
863         if (timeo)
864                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
865 }
866
867 static int offload_open(struct net_device *dev)
868 {
869         struct port_info *pi = netdev_priv(dev);
870         struct adapter *adapter = pi->adapter;
871         struct t3cdev *tdev = dev2t3cdev(dev);
872         int adap_up = adapter->open_device_map & PORT_MASK;
873         int err = 0;
874
875         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
876                 return 0;
877
878         if (!adap_up && (err = cxgb_up(adapter)) < 0)
879                 return err;
880
881         t3_tp_set_offload_mode(adapter, 1);
882         tdev->lldev = adapter->port[0];
883         err = cxgb3_offload_activate(adapter);
884         if (err)
885                 goto out;
886
887         init_port_mtus(adapter);
888         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
889                      adapter->params.b_wnd,
890                      adapter->params.rev == 0 ?
891                      adapter->port[0]->mtu : 0xffff);
892         init_smt(adapter);
893
894         /* Never mind if the next step fails */
895         sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
896
897         /* Call back all registered clients */
898         cxgb3_add_clients(tdev);
899
900 out:
901         /* restore them in case the offload module has changed them */
902         if (err) {
903                 t3_tp_set_offload_mode(adapter, 0);
904                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
905                 cxgb3_set_dummy_ops(tdev);
906         }
907         return err;
908 }
909
910 static int offload_close(struct t3cdev *tdev)
911 {
912         struct adapter *adapter = tdev2adap(tdev);
913
914         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
915                 return 0;
916
917         /* Call back all registered clients */
918         cxgb3_remove_clients(tdev);
919
920         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
921
922         tdev->lldev = NULL;
923         cxgb3_set_dummy_ops(tdev);
924         t3_tp_set_offload_mode(adapter, 0);
925         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
926
927         if (!adapter->open_device_map)
928                 cxgb_down(adapter);
929
930         cxgb3_offload_deactivate(adapter);
931         return 0;
932 }
933
934 static int cxgb_open(struct net_device *dev)
935 {
936         struct port_info *pi = netdev_priv(dev);
937         struct adapter *adapter = pi->adapter;
938         int other_ports = adapter->open_device_map & PORT_MASK;
939         int err;
940
941         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
942                 return err;
943
944         set_bit(pi->port_id, &adapter->open_device_map);
945         if (is_offload(adapter) && !ofld_disable) {
946                 err = offload_open(dev);
947                 if (err)
948                         printk(KERN_WARNING
949                                "Could not initialize offload capabilities\n");
950         }
951
952         link_start(dev);
953         t3_port_intr_enable(adapter, pi->port_id);
954         netif_start_queue(dev);
955         if (!other_ports)
956                 schedule_chk_task(adapter);
957
958         return 0;
959 }
960
961 static int cxgb_close(struct net_device *dev)
962 {
963         struct port_info *pi = netdev_priv(dev);
964         struct adapter *adapter = pi->adapter;
965
966         t3_port_intr_disable(adapter, pi->port_id);
967         netif_stop_queue(dev);
968         pi->phy.ops->power_down(&pi->phy, 1);
969         netif_carrier_off(dev);
970         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
971
972         spin_lock(&adapter->work_lock); /* sync with update task */
973         clear_bit(pi->port_id, &adapter->open_device_map);
974         spin_unlock(&adapter->work_lock);
975
976         if (!(adapter->open_device_map & PORT_MASK))
977                 cancel_rearming_delayed_workqueue(cxgb3_wq,
978                                                   &adapter->adap_check_task);
979
980         if (!adapter->open_device_map)
981                 cxgb_down(adapter);
982
983         return 0;
984 }
985
986 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
987 {
988         struct port_info *pi = netdev_priv(dev);
989         struct adapter *adapter = pi->adapter;
990         struct net_device_stats *ns = &pi->netstats;
991         const struct mac_stats *pstats;
992
993         spin_lock(&adapter->stats_lock);
994         pstats = t3_mac_update_stats(&pi->mac);
995         spin_unlock(&adapter->stats_lock);
996
997         ns->tx_bytes = pstats->tx_octets;
998         ns->tx_packets = pstats->tx_frames;
999         ns->rx_bytes = pstats->rx_octets;
1000         ns->rx_packets = pstats->rx_frames;
1001         ns->multicast = pstats->rx_mcast_frames;
1002
1003         ns->tx_errors = pstats->tx_underrun;
1004         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1005             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1006             pstats->rx_fifo_ovfl;
1007
1008         /* detailed rx_errors */
1009         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1010         ns->rx_over_errors = 0;
1011         ns->rx_crc_errors = pstats->rx_fcs_errs;
1012         ns->rx_frame_errors = pstats->rx_symbol_errs;
1013         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1014         ns->rx_missed_errors = pstats->rx_cong_drops;
1015
1016         /* detailed tx_errors */
1017         ns->tx_aborted_errors = 0;
1018         ns->tx_carrier_errors = 0;
1019         ns->tx_fifo_errors = pstats->tx_underrun;
1020         ns->tx_heartbeat_errors = 0;
1021         ns->tx_window_errors = 0;
1022         return ns;
1023 }
1024
1025 static u32 get_msglevel(struct net_device *dev)
1026 {
1027         struct port_info *pi = netdev_priv(dev);
1028         struct adapter *adapter = pi->adapter;
1029
1030         return adapter->msg_enable;
1031 }
1032
1033 static void set_msglevel(struct net_device *dev, u32 val)
1034 {
1035         struct port_info *pi = netdev_priv(dev);
1036         struct adapter *adapter = pi->adapter;
1037
1038         adapter->msg_enable = val;
1039 }
1040
1041 static char stats_strings[][ETH_GSTRING_LEN] = {
1042         "TxOctetsOK         ",
1043         "TxFramesOK         ",
1044         "TxMulticastFramesOK",
1045         "TxBroadcastFramesOK",
1046         "TxPauseFrames      ",
1047         "TxUnderrun         ",
1048         "TxExtUnderrun      ",
1049
1050         "TxFrames64         ",
1051         "TxFrames65To127    ",
1052         "TxFrames128To255   ",
1053         "TxFrames256To511   ",
1054         "TxFrames512To1023  ",
1055         "TxFrames1024To1518 ",
1056         "TxFrames1519ToMax  ",
1057
1058         "RxOctetsOK         ",
1059         "RxFramesOK         ",
1060         "RxMulticastFramesOK",
1061         "RxBroadcastFramesOK",
1062         "RxPauseFrames      ",
1063         "RxFCSErrors        ",
1064         "RxSymbolErrors     ",
1065         "RxShortErrors      ",
1066         "RxJabberErrors     ",
1067         "RxLengthErrors     ",
1068         "RxFIFOoverflow     ",
1069
1070         "RxFrames64         ",
1071         "RxFrames65To127    ",
1072         "RxFrames128To255   ",
1073         "RxFrames256To511   ",
1074         "RxFrames512To1023  ",
1075         "RxFrames1024To1518 ",
1076         "RxFrames1519ToMax  ",
1077
1078         "PhyFIFOErrors      ",
1079         "TSO                ",
1080         "VLANextractions    ",
1081         "VLANinsertions     ",
1082         "TxCsumOffload      ",
1083         "RxCsumGood         ",
1084         "RxDrops            ",
1085
1086         "CheckTXEnToggled   ",
1087         "CheckResets        ",
1088
1089 };
1090
1091 static int get_stats_count(struct net_device *dev)
1092 {
1093         return ARRAY_SIZE(stats_strings);
1094 }
1095
1096 #define T3_REGMAP_SIZE (3 * 1024)
1097
1098 static int get_regs_len(struct net_device *dev)
1099 {
1100         return T3_REGMAP_SIZE;
1101 }
1102
1103 static int get_eeprom_len(struct net_device *dev)
1104 {
1105         return EEPROMSIZE;
1106 }
1107
1108 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1109 {
1110         struct port_info *pi = netdev_priv(dev);
1111         struct adapter *adapter = pi->adapter;
1112         u32 fw_vers = 0;
1113
1114         t3_get_fw_version(adapter, &fw_vers);
1115
1116         strcpy(info->driver, DRV_NAME);
1117         strcpy(info->version, DRV_VERSION);
1118         strcpy(info->bus_info, pci_name(adapter->pdev));
1119         if (!fw_vers)
1120                 strcpy(info->fw_version, "N/A");
1121         else {
1122                 snprintf(info->fw_version, sizeof(info->fw_version),
1123                          "%s %u.%u.%u",
1124                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1125                          G_FW_VERSION_MAJOR(fw_vers),
1126                          G_FW_VERSION_MINOR(fw_vers),
1127                          G_FW_VERSION_MICRO(fw_vers));
1128         }
1129 }
1130
1131 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1132 {
1133         if (stringset == ETH_SS_STATS)
1134                 memcpy(data, stats_strings, sizeof(stats_strings));
1135 }
1136
1137 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1138                                             struct port_info *p, int idx)
1139 {
1140         int i;
1141         unsigned long tot = 0;
1142
1143         for (i = 0; i < p->nqsets; ++i)
1144                 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1145         return tot;
1146 }
1147
1148 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1149                       u64 *data)
1150 {
1151         struct port_info *pi = netdev_priv(dev);
1152         struct adapter *adapter = pi->adapter;
1153         const struct mac_stats *s;
1154
1155         spin_lock(&adapter->stats_lock);
1156         s = t3_mac_update_stats(&pi->mac);
1157         spin_unlock(&adapter->stats_lock);
1158
1159         *data++ = s->tx_octets;
1160         *data++ = s->tx_frames;
1161         *data++ = s->tx_mcast_frames;
1162         *data++ = s->tx_bcast_frames;
1163         *data++ = s->tx_pause;
1164         *data++ = s->tx_underrun;
1165         *data++ = s->tx_fifo_urun;
1166
1167         *data++ = s->tx_frames_64;
1168         *data++ = s->tx_frames_65_127;
1169         *data++ = s->tx_frames_128_255;
1170         *data++ = s->tx_frames_256_511;
1171         *data++ = s->tx_frames_512_1023;
1172         *data++ = s->tx_frames_1024_1518;
1173         *data++ = s->tx_frames_1519_max;
1174
1175         *data++ = s->rx_octets;
1176         *data++ = s->rx_frames;
1177         *data++ = s->rx_mcast_frames;
1178         *data++ = s->rx_bcast_frames;
1179         *data++ = s->rx_pause;
1180         *data++ = s->rx_fcs_errs;
1181         *data++ = s->rx_symbol_errs;
1182         *data++ = s->rx_short;
1183         *data++ = s->rx_jabber;
1184         *data++ = s->rx_too_long;
1185         *data++ = s->rx_fifo_ovfl;
1186
1187         *data++ = s->rx_frames_64;
1188         *data++ = s->rx_frames_65_127;
1189         *data++ = s->rx_frames_128_255;
1190         *data++ = s->rx_frames_256_511;
1191         *data++ = s->rx_frames_512_1023;
1192         *data++ = s->rx_frames_1024_1518;
1193         *data++ = s->rx_frames_1519_max;
1194
1195         *data++ = pi->phy.fifo_errors;
1196
1197         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1198         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1199         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1200         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1201         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1202         *data++ = s->rx_cong_drops;
1203
1204         *data++ = s->num_toggled;
1205         *data++ = s->num_resets;
1206 }
1207
1208 static inline void reg_block_dump(struct adapter *ap, void *buf,
1209                                   unsigned int start, unsigned int end)
1210 {
1211         u32 *p = buf + start;
1212
1213         for (; start <= end; start += sizeof(u32))
1214                 *p++ = t3_read_reg(ap, start);
1215 }
1216
1217 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1218                      void *buf)
1219 {
1220         struct port_info *pi = netdev_priv(dev);
1221         struct adapter *ap = pi->adapter;
1222
1223         /*
1224          * Version scheme:
1225          * bits 0..9: chip version
1226          * bits 10..15: chip revision
1227          * bit 31: set for PCIe cards
1228          */
1229         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1230
1231         /*
1232          * We skip the MAC statistics registers because they are clear-on-read.
1233          * Also reading multi-register stats would need to synchronize with the
1234          * periodic mac stats accumulation.  Hard to justify the complexity.
1235          */
1236         memset(buf, 0, T3_REGMAP_SIZE);
1237         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1238         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1239         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1240         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1241         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1242         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1243                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1244         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1245                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1246 }
1247
1248 static int restart_autoneg(struct net_device *dev)
1249 {
1250         struct port_info *p = netdev_priv(dev);
1251
1252         if (!netif_running(dev))
1253                 return -EAGAIN;
1254         if (p->link_config.autoneg != AUTONEG_ENABLE)
1255                 return -EINVAL;
1256         p->phy.ops->autoneg_restart(&p->phy);
1257         return 0;
1258 }
1259
1260 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1261 {
1262         struct port_info *pi = netdev_priv(dev);
1263         struct adapter *adapter = pi->adapter;
1264         int i;
1265
1266         if (data == 0)
1267                 data = 2;
1268
1269         for (i = 0; i < data * 2; i++) {
1270                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1271                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1272                 if (msleep_interruptible(500))
1273                         break;
1274         }
1275         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1276                          F_GPIO0_OUT_VAL);
1277         return 0;
1278 }
1279
1280 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1281 {
1282         struct port_info *p = netdev_priv(dev);
1283
1284         cmd->supported = p->link_config.supported;
1285         cmd->advertising = p->link_config.advertising;
1286
1287         if (netif_carrier_ok(dev)) {
1288                 cmd->speed = p->link_config.speed;
1289                 cmd->duplex = p->link_config.duplex;
1290         } else {
1291                 cmd->speed = -1;
1292                 cmd->duplex = -1;
1293         }
1294
1295         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1296         cmd->phy_address = p->phy.addr;
1297         cmd->transceiver = XCVR_EXTERNAL;
1298         cmd->autoneg = p->link_config.autoneg;
1299         cmd->maxtxpkt = 0;
1300         cmd->maxrxpkt = 0;
1301         return 0;
1302 }
1303
1304 static int speed_duplex_to_caps(int speed, int duplex)
1305 {
1306         int cap = 0;
1307
1308         switch (speed) {
1309         case SPEED_10:
1310                 if (duplex == DUPLEX_FULL)
1311                         cap = SUPPORTED_10baseT_Full;
1312                 else
1313                         cap = SUPPORTED_10baseT_Half;
1314                 break;
1315         case SPEED_100:
1316                 if (duplex == DUPLEX_FULL)
1317                         cap = SUPPORTED_100baseT_Full;
1318                 else
1319                         cap = SUPPORTED_100baseT_Half;
1320                 break;
1321         case SPEED_1000:
1322                 if (duplex == DUPLEX_FULL)
1323                         cap = SUPPORTED_1000baseT_Full;
1324                 else
1325                         cap = SUPPORTED_1000baseT_Half;
1326                 break;
1327         case SPEED_10000:
1328                 if (duplex == DUPLEX_FULL)
1329                         cap = SUPPORTED_10000baseT_Full;
1330         }
1331         return cap;
1332 }
1333
1334 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1335                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1336                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1337                       ADVERTISED_10000baseT_Full)
1338
1339 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1340 {
1341         struct port_info *p = netdev_priv(dev);
1342         struct link_config *lc = &p->link_config;
1343
1344         if (!(lc->supported & SUPPORTED_Autoneg))
1345                 return -EOPNOTSUPP;     /* can't change speed/duplex */
1346
1347         if (cmd->autoneg == AUTONEG_DISABLE) {
1348                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1349
1350                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1351                         return -EINVAL;
1352                 lc->requested_speed = cmd->speed;
1353                 lc->requested_duplex = cmd->duplex;
1354                 lc->advertising = 0;
1355         } else {
1356                 cmd->advertising &= ADVERTISED_MASK;
1357                 cmd->advertising &= lc->supported;
1358                 if (!cmd->advertising)
1359                         return -EINVAL;
1360                 lc->requested_speed = SPEED_INVALID;
1361                 lc->requested_duplex = DUPLEX_INVALID;
1362                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1363         }
1364         lc->autoneg = cmd->autoneg;
1365         if (netif_running(dev))
1366                 t3_link_start(&p->phy, &p->mac, lc);
1367         return 0;
1368 }
1369
1370 static void get_pauseparam(struct net_device *dev,
1371                            struct ethtool_pauseparam *epause)
1372 {
1373         struct port_info *p = netdev_priv(dev);
1374
1375         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1376         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1377         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1378 }
1379
1380 static int set_pauseparam(struct net_device *dev,
1381                           struct ethtool_pauseparam *epause)
1382 {
1383         struct port_info *p = netdev_priv(dev);
1384         struct link_config *lc = &p->link_config;
1385
1386         if (epause->autoneg == AUTONEG_DISABLE)
1387                 lc->requested_fc = 0;
1388         else if (lc->supported & SUPPORTED_Autoneg)
1389                 lc->requested_fc = PAUSE_AUTONEG;
1390         else
1391                 return -EINVAL;
1392
1393         if (epause->rx_pause)
1394                 lc->requested_fc |= PAUSE_RX;
1395         if (epause->tx_pause)
1396                 lc->requested_fc |= PAUSE_TX;
1397         if (lc->autoneg == AUTONEG_ENABLE) {
1398                 if (netif_running(dev))
1399                         t3_link_start(&p->phy, &p->mac, lc);
1400         } else {
1401                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1402                 if (netif_running(dev))
1403                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1404         }
1405         return 0;
1406 }
1407
1408 static u32 get_rx_csum(struct net_device *dev)
1409 {
1410         struct port_info *p = netdev_priv(dev);
1411
1412         return p->rx_csum_offload;
1413 }
1414
1415 static int set_rx_csum(struct net_device *dev, u32 data)
1416 {
1417         struct port_info *p = netdev_priv(dev);
1418
1419         p->rx_csum_offload = data;
1420         return 0;
1421 }
1422
1423 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1424 {
1425         struct port_info *pi = netdev_priv(dev);
1426         struct adapter *adapter = pi->adapter;
1427         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1428
1429         e->rx_max_pending = MAX_RX_BUFFERS;
1430         e->rx_mini_max_pending = 0;
1431         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1432         e->tx_max_pending = MAX_TXQ_ENTRIES;
1433
1434         e->rx_pending = q->fl_size;
1435         e->rx_mini_pending = q->rspq_size;
1436         e->rx_jumbo_pending = q->jumbo_size;
1437         e->tx_pending = q->txq_size[0];
1438 }
1439
1440 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1441 {
1442         struct port_info *pi = netdev_priv(dev);
1443         struct adapter *adapter = pi->adapter;
1444         struct qset_params *q;
1445         int i;
1446
1447         if (e->rx_pending > MAX_RX_BUFFERS ||
1448             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1449             e->tx_pending > MAX_TXQ_ENTRIES ||
1450             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1451             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1452             e->rx_pending < MIN_FL_ENTRIES ||
1453             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1454             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1455                 return -EINVAL;
1456
1457         if (adapter->flags & FULL_INIT_DONE)
1458                 return -EBUSY;
1459
1460         q = &adapter->params.sge.qset[pi->first_qset];
1461         for (i = 0; i < pi->nqsets; ++i, ++q) {
1462                 q->rspq_size = e->rx_mini_pending;
1463                 q->fl_size = e->rx_pending;
1464                 q->jumbo_size = e->rx_jumbo_pending;
1465                 q->txq_size[0] = e->tx_pending;
1466                 q->txq_size[1] = e->tx_pending;
1467                 q->txq_size[2] = e->tx_pending;
1468         }
1469         return 0;
1470 }
1471
1472 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1473 {
1474         struct port_info *pi = netdev_priv(dev);
1475         struct adapter *adapter = pi->adapter;
1476         struct qset_params *qsp = &adapter->params.sge.qset[0];
1477         struct sge_qset *qs = &adapter->sge.qs[0];
1478
1479         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1480                 return -EINVAL;
1481
1482         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1483         t3_update_qset_coalesce(qs, qsp);
1484         return 0;
1485 }
1486
1487 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1488 {
1489         struct port_info *pi = netdev_priv(dev);
1490         struct adapter *adapter = pi->adapter;
1491         struct qset_params *q = adapter->params.sge.qset;
1492
1493         c->rx_coalesce_usecs = q->coalesce_usecs;
1494         return 0;
1495 }
1496
1497 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1498                       u8 * data)
1499 {
1500         struct port_info *pi = netdev_priv(dev);
1501         struct adapter *adapter = pi->adapter;
1502         int i, err = 0;
1503
1504         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1505         if (!buf)
1506                 return -ENOMEM;
1507
1508         e->magic = EEPROM_MAGIC;
1509         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1510                 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1511
1512         if (!err)
1513                 memcpy(data, buf + e->offset, e->len);
1514         kfree(buf);
1515         return err;
1516 }
1517
1518 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1519                       u8 * data)
1520 {
1521         struct port_info *pi = netdev_priv(dev);
1522         struct adapter *adapter = pi->adapter;
1523         u32 aligned_offset, aligned_len, *p;
1524         u8 *buf;
1525         int err = 0;
1526
1527         if (eeprom->magic != EEPROM_MAGIC)
1528                 return -EINVAL;
1529
1530         aligned_offset = eeprom->offset & ~3;
1531         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1532
1533         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1534                 buf = kmalloc(aligned_len, GFP_KERNEL);
1535                 if (!buf)
1536                         return -ENOMEM;
1537                 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1538                 if (!err && aligned_len > 4)
1539                         err = t3_seeprom_read(adapter,
1540                                               aligned_offset + aligned_len - 4,
1541                                               (u32 *) & buf[aligned_len - 4]);
1542                 if (err)
1543                         goto out;
1544                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1545         } else
1546                 buf = data;
1547
1548         err = t3_seeprom_wp(adapter, 0);
1549         if (err)
1550                 goto out;
1551
1552         for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1553                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1554                 aligned_offset += 4;
1555         }
1556
1557         if (!err)
1558                 err = t3_seeprom_wp(adapter, 1);
1559 out:
1560         if (buf != data)
1561                 kfree(buf);
1562         return err;
1563 }
1564
1565 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1566 {
1567         wol->supported = 0;
1568         wol->wolopts = 0;
1569         memset(&wol->sopass, 0, sizeof(wol->sopass));
1570 }
1571
1572 static const struct ethtool_ops cxgb_ethtool_ops = {
1573         .get_settings = get_settings,
1574         .set_settings = set_settings,
1575         .get_drvinfo = get_drvinfo,
1576         .get_msglevel = get_msglevel,
1577         .set_msglevel = set_msglevel,
1578         .get_ringparam = get_sge_param,
1579         .set_ringparam = set_sge_param,
1580         .get_coalesce = get_coalesce,
1581         .set_coalesce = set_coalesce,
1582         .get_eeprom_len = get_eeprom_len,
1583         .get_eeprom = get_eeprom,
1584         .set_eeprom = set_eeprom,
1585         .get_pauseparam = get_pauseparam,
1586         .set_pauseparam = set_pauseparam,
1587         .get_rx_csum = get_rx_csum,
1588         .set_rx_csum = set_rx_csum,
1589         .get_tx_csum = ethtool_op_get_tx_csum,
1590         .set_tx_csum = ethtool_op_set_tx_csum,
1591         .get_sg = ethtool_op_get_sg,
1592         .set_sg = ethtool_op_set_sg,
1593         .get_link = ethtool_op_get_link,
1594         .get_strings = get_strings,
1595         .phys_id = cxgb3_phys_id,
1596         .nway_reset = restart_autoneg,
1597         .get_stats_count = get_stats_count,
1598         .get_ethtool_stats = get_stats,
1599         .get_regs_len = get_regs_len,
1600         .get_regs = get_regs,
1601         .get_wol = get_wol,
1602         .get_tso = ethtool_op_get_tso,
1603         .set_tso = ethtool_op_set_tso,
1604 };
1605
1606 static int in_range(int val, int lo, int hi)
1607 {
1608         return val < 0 || (val <= hi && val >= lo);
1609 }
1610
1611 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1612 {
1613         struct port_info *pi = netdev_priv(dev);
1614         struct adapter *adapter = pi->adapter;
1615         u32 cmd;
1616         int ret;
1617
1618         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1619                 return -EFAULT;
1620
1621         switch (cmd) {
1622         case CHELSIO_SET_QSET_PARAMS:{
1623                 int i;
1624                 struct qset_params *q;
1625                 struct ch_qset_params t;
1626
1627                 if (!capable(CAP_NET_ADMIN))
1628                         return -EPERM;
1629                 if (copy_from_user(&t, useraddr, sizeof(t)))
1630                         return -EFAULT;
1631                 if (t.qset_idx >= SGE_QSETS)
1632                         return -EINVAL;
1633                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1634                         !in_range(t.cong_thres, 0, 255) ||
1635                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1636                                 MAX_TXQ_ENTRIES) ||
1637                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1638                                 MAX_TXQ_ENTRIES) ||
1639                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1640                                 MAX_CTRL_TXQ_ENTRIES) ||
1641                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1642                                 MAX_RX_BUFFERS)
1643                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1644                                         MAX_RX_JUMBO_BUFFERS)
1645                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1646                                         MAX_RSPQ_ENTRIES))
1647                         return -EINVAL;
1648                 if ((adapter->flags & FULL_INIT_DONE) &&
1649                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1650                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1651                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1652                         t.polling >= 0 || t.cong_thres >= 0))
1653                         return -EBUSY;
1654
1655                 q = &adapter->params.sge.qset[t.qset_idx];
1656
1657                 if (t.rspq_size >= 0)
1658                         q->rspq_size = t.rspq_size;
1659                 if (t.fl_size[0] >= 0)
1660                         q->fl_size = t.fl_size[0];
1661                 if (t.fl_size[1] >= 0)
1662                         q->jumbo_size = t.fl_size[1];
1663                 if (t.txq_size[0] >= 0)
1664                         q->txq_size[0] = t.txq_size[0];
1665                 if (t.txq_size[1] >= 0)
1666                         q->txq_size[1] = t.txq_size[1];
1667                 if (t.txq_size[2] >= 0)
1668                         q->txq_size[2] = t.txq_size[2];
1669                 if (t.cong_thres >= 0)
1670                         q->cong_thres = t.cong_thres;
1671                 if (t.intr_lat >= 0) {
1672                         struct sge_qset *qs =
1673                                 &adapter->sge.qs[t.qset_idx];
1674
1675                         q->coalesce_usecs = t.intr_lat;
1676                         t3_update_qset_coalesce(qs, q);
1677                 }
1678                 if (t.polling >= 0) {
1679                         if (adapter->flags & USING_MSIX)
1680                                 q->polling = t.polling;
1681                         else {
1682                                 /* No polling with INTx for T3A */
1683                                 if (adapter->params.rev == 0 &&
1684                                         !(adapter->flags & USING_MSI))
1685                                         t.polling = 0;
1686
1687                                 for (i = 0; i < SGE_QSETS; i++) {
1688                                         q = &adapter->params.sge.
1689                                                 qset[i];
1690                                         q->polling = t.polling;
1691                                 }
1692                         }
1693                 }
1694                 break;
1695         }
1696         case CHELSIO_GET_QSET_PARAMS:{
1697                 struct qset_params *q;
1698                 struct ch_qset_params t;
1699
1700                 if (copy_from_user(&t, useraddr, sizeof(t)))
1701                         return -EFAULT;
1702                 if (t.qset_idx >= SGE_QSETS)
1703                         return -EINVAL;
1704
1705                 q = &adapter->params.sge.qset[t.qset_idx];
1706                 t.rspq_size = q->rspq_size;
1707                 t.txq_size[0] = q->txq_size[0];
1708                 t.txq_size[1] = q->txq_size[1];
1709                 t.txq_size[2] = q->txq_size[2];
1710                 t.fl_size[0] = q->fl_size;
1711                 t.fl_size[1] = q->jumbo_size;
1712                 t.polling = q->polling;
1713                 t.intr_lat = q->coalesce_usecs;
1714                 t.cong_thres = q->cong_thres;
1715
1716                 if (copy_to_user(useraddr, &t, sizeof(t)))
1717                         return -EFAULT;
1718                 break;
1719         }
1720         case CHELSIO_SET_QSET_NUM:{
1721                 struct ch_reg edata;
1722                 struct port_info *pi = netdev_priv(dev);
1723                 unsigned int i, first_qset = 0, other_qsets = 0;
1724
1725                 if (!capable(CAP_NET_ADMIN))
1726                         return -EPERM;
1727                 if (adapter->flags & FULL_INIT_DONE)
1728                         return -EBUSY;
1729                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1730                         return -EFAULT;
1731                 if (edata.val < 1 ||
1732                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1733                         return -EINVAL;
1734
1735                 for_each_port(adapter, i)
1736                         if (adapter->port[i] && adapter->port[i] != dev)
1737                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
1738
1739                 if (edata.val + other_qsets > SGE_QSETS)
1740                         return -EINVAL;
1741
1742                 pi->nqsets = edata.val;
1743
1744                 for_each_port(adapter, i)
1745                         if (adapter->port[i]) {
1746                                 pi = adap2pinfo(adapter, i);
1747                                 pi->first_qset = first_qset;
1748                                 first_qset += pi->nqsets;
1749                         }
1750                 break;
1751         }
1752         case CHELSIO_GET_QSET_NUM:{
1753                 struct ch_reg edata;
1754                 struct port_info *pi = netdev_priv(dev);
1755
1756                 edata.cmd = CHELSIO_GET_QSET_NUM;
1757                 edata.val = pi->nqsets;
1758                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1759                         return -EFAULT;
1760                 break;
1761         }
1762         case CHELSIO_LOAD_FW:{
1763                 u8 *fw_data;
1764                 struct ch_mem_range t;
1765
1766                 if (!capable(CAP_NET_ADMIN))
1767                         return -EPERM;
1768                 if (copy_from_user(&t, useraddr, sizeof(t)))
1769                         return -EFAULT;
1770
1771                 fw_data = kmalloc(t.len, GFP_KERNEL);
1772                 if (!fw_data)
1773                         return -ENOMEM;
1774
1775                 if (copy_from_user
1776                         (fw_data, useraddr + sizeof(t), t.len)) {
1777                         kfree(fw_data);
1778                         return -EFAULT;
1779                 }
1780
1781                 ret = t3_load_fw(adapter, fw_data, t.len);
1782                 kfree(fw_data);
1783                 if (ret)
1784                         return ret;
1785                 break;
1786         }
1787         case CHELSIO_SETMTUTAB:{
1788                 struct ch_mtus m;
1789                 int i;
1790
1791                 if (!is_offload(adapter))
1792                         return -EOPNOTSUPP;
1793                 if (!capable(CAP_NET_ADMIN))
1794                         return -EPERM;
1795                 if (offload_running(adapter))
1796                         return -EBUSY;
1797                 if (copy_from_user(&m, useraddr, sizeof(m)))
1798                         return -EFAULT;
1799                 if (m.nmtus != NMTUS)
1800                         return -EINVAL;
1801                 if (m.mtus[0] < 81)     /* accommodate SACK */
1802                         return -EINVAL;
1803
1804                 /* MTUs must be in ascending order */
1805                 for (i = 1; i < NMTUS; ++i)
1806                         if (m.mtus[i] < m.mtus[i - 1])
1807                                 return -EINVAL;
1808
1809                 memcpy(adapter->params.mtus, m.mtus,
1810                         sizeof(adapter->params.mtus));
1811                 break;
1812         }
1813         case CHELSIO_GET_PM:{
1814                 struct tp_params *p = &adapter->params.tp;
1815                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1816
1817                 if (!is_offload(adapter))
1818                         return -EOPNOTSUPP;
1819                 m.tx_pg_sz = p->tx_pg_size;
1820                 m.tx_num_pg = p->tx_num_pgs;
1821                 m.rx_pg_sz = p->rx_pg_size;
1822                 m.rx_num_pg = p->rx_num_pgs;
1823                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1824                 if (copy_to_user(useraddr, &m, sizeof(m)))
1825                         return -EFAULT;
1826                 break;
1827         }
1828         case CHELSIO_SET_PM:{
1829                 struct ch_pm m;
1830                 struct tp_params *p = &adapter->params.tp;
1831
1832                 if (!is_offload(adapter))
1833                         return -EOPNOTSUPP;
1834                 if (!capable(CAP_NET_ADMIN))
1835                         return -EPERM;
1836                 if (adapter->flags & FULL_INIT_DONE)
1837                         return -EBUSY;
1838                 if (copy_from_user(&m, useraddr, sizeof(m)))
1839                         return -EFAULT;
1840                 if (!is_power_of_2(m.rx_pg_sz) ||
1841                         !is_power_of_2(m.tx_pg_sz))
1842                         return -EINVAL; /* not power of 2 */
1843                 if (!(m.rx_pg_sz & 0x14000))
1844                         return -EINVAL; /* not 16KB or 64KB */
1845                 if (!(m.tx_pg_sz & 0x1554000))
1846                         return -EINVAL;
1847                 if (m.tx_num_pg == -1)
1848                         m.tx_num_pg = p->tx_num_pgs;
1849                 if (m.rx_num_pg == -1)
1850                         m.rx_num_pg = p->rx_num_pgs;
1851                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1852                         return -EINVAL;
1853                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1854                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1855                         return -EINVAL;
1856                 p->rx_pg_size = m.rx_pg_sz;
1857                 p->tx_pg_size = m.tx_pg_sz;
1858                 p->rx_num_pgs = m.rx_num_pg;
1859                 p->tx_num_pgs = m.tx_num_pg;
1860                 break;
1861         }
1862         case CHELSIO_GET_MEM:{
1863                 struct ch_mem_range t;
1864                 struct mc7 *mem;
1865                 u64 buf[32];
1866
1867                 if (!is_offload(adapter))
1868                         return -EOPNOTSUPP;
1869                 if (!(adapter->flags & FULL_INIT_DONE))
1870                         return -EIO;    /* need the memory controllers */
1871                 if (copy_from_user(&t, useraddr, sizeof(t)))
1872                         return -EFAULT;
1873                 if ((t.addr & 7) || (t.len & 7))
1874                         return -EINVAL;
1875                 if (t.mem_id == MEM_CM)
1876                         mem = &adapter->cm;
1877                 else if (t.mem_id == MEM_PMRX)
1878                         mem = &adapter->pmrx;
1879                 else if (t.mem_id == MEM_PMTX)
1880                         mem = &adapter->pmtx;
1881                 else
1882                         return -EINVAL;
1883
1884                 /*
1885                  * Version scheme:
1886                  * bits 0..9: chip version
1887                  * bits 10..15: chip revision
1888                  */
1889                 t.version = 3 | (adapter->params.rev << 10);
1890                 if (copy_to_user(useraddr, &t, sizeof(t)))
1891                         return -EFAULT;
1892
1893                 /*
1894                  * Read 256 bytes at a time as len can be large and we don't
1895                  * want to use huge intermediate buffers.
1896                  */
1897                 useraddr += sizeof(t);  /* advance to start of buffer */
1898                 while (t.len) {
1899                         unsigned int chunk =
1900                                 min_t(unsigned int, t.len, sizeof(buf));
1901
1902                         ret =
1903                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1904                                                 buf);
1905                         if (ret)
1906                                 return ret;
1907                         if (copy_to_user(useraddr, buf, chunk))
1908                                 return -EFAULT;
1909                         useraddr += chunk;
1910                         t.addr += chunk;
1911                         t.len -= chunk;
1912                 }
1913                 break;
1914         }
1915         case CHELSIO_SET_TRACE_FILTER:{
1916                 struct ch_trace t;
1917                 const struct trace_params *tp;
1918
1919                 if (!capable(CAP_NET_ADMIN))
1920                         return -EPERM;
1921                 if (!offload_running(adapter))
1922                         return -EAGAIN;
1923                 if (copy_from_user(&t, useraddr, sizeof(t)))
1924                         return -EFAULT;
1925
1926                 tp = (const struct trace_params *)&t.sip;
1927                 if (t.config_tx)
1928                         t3_config_trace_filter(adapter, tp, 0,
1929                                                 t.invert_match,
1930                                                 t.trace_tx);
1931                 if (t.config_rx)
1932                         t3_config_trace_filter(adapter, tp, 1,
1933                                                 t.invert_match,
1934                                                 t.trace_rx);
1935                 break;
1936         }
1937         default:
1938                 return -EOPNOTSUPP;
1939         }
1940         return 0;
1941 }
1942
1943 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1944 {
1945         struct mii_ioctl_data *data = if_mii(req);
1946         struct port_info *pi = netdev_priv(dev);
1947         struct adapter *adapter = pi->adapter;
1948         int ret, mmd;
1949
1950         switch (cmd) {
1951         case SIOCGMIIPHY:
1952                 data->phy_id = pi->phy.addr;
1953                 /* FALLTHRU */
1954         case SIOCGMIIREG:{
1955                 u32 val;
1956                 struct cphy *phy = &pi->phy;
1957
1958                 if (!phy->mdio_read)
1959                         return -EOPNOTSUPP;
1960                 if (is_10G(adapter)) {
1961                         mmd = data->phy_id >> 8;
1962                         if (!mmd)
1963                                 mmd = MDIO_DEV_PCS;
1964                         else if (mmd > MDIO_DEV_XGXS)
1965                                 return -EINVAL;
1966
1967                         ret =
1968                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
1969                                                 mmd, data->reg_num, &val);
1970                 } else
1971                         ret =
1972                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
1973                                                 0, data->reg_num & 0x1f,
1974                                                 &val);
1975                 if (!ret)
1976                         data->val_out = val;
1977                 break;
1978         }
1979         case SIOCSMIIREG:{
1980                 struct cphy *phy = &pi->phy;
1981
1982                 if (!capable(CAP_NET_ADMIN))
1983                         return -EPERM;
1984                 if (!phy->mdio_write)
1985                         return -EOPNOTSUPP;
1986                 if (is_10G(adapter)) {
1987                         mmd = data->phy_id >> 8;
1988                         if (!mmd)
1989                                 mmd = MDIO_DEV_PCS;
1990                         else if (mmd > MDIO_DEV_XGXS)
1991                                 return -EINVAL;
1992
1993                         ret =
1994                                 phy->mdio_write(adapter,
1995                                                 data->phy_id & 0x1f, mmd,
1996                                                 data->reg_num,
1997                                                 data->val_in);
1998                 } else
1999                         ret =
2000                                 phy->mdio_write(adapter,
2001                                                 data->phy_id & 0x1f, 0,
2002                                                 data->reg_num & 0x1f,
2003                                                 data->val_in);
2004                 break;
2005         }
2006         case SIOCCHIOCTL:
2007                 return cxgb_extension_ioctl(dev, req->ifr_data);
2008         default:
2009                 return -EOPNOTSUPP;
2010         }
2011         return ret;
2012 }
2013
2014 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2015 {
2016         struct port_info *pi = netdev_priv(dev);
2017         struct adapter *adapter = pi->adapter;
2018         int ret;
2019
2020         if (new_mtu < 81)       /* accommodate SACK */
2021                 return -EINVAL;
2022         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2023                 return ret;
2024         dev->mtu = new_mtu;
2025         init_port_mtus(adapter);
2026         if (adapter->params.rev == 0 && offload_running(adapter))
2027                 t3_load_mtus(adapter, adapter->params.mtus,
2028                              adapter->params.a_wnd, adapter->params.b_wnd,
2029                              adapter->port[0]->mtu);
2030         return 0;
2031 }
2032
2033 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2034 {
2035         struct port_info *pi = netdev_priv(dev);
2036         struct adapter *adapter = pi->adapter;
2037         struct sockaddr *addr = p;
2038
2039         if (!is_valid_ether_addr(addr->sa_data))
2040                 return -EINVAL;
2041
2042         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2043         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2044         if (offload_running(adapter))
2045                 write_smt_entry(adapter, pi->port_id);
2046         return 0;
2047 }
2048
2049 /**
2050  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2051  * @adap: the adapter
2052  * @p: the port
2053  *
2054  * Ensures that current Rx processing on any of the queues associated with
2055  * the given port completes before returning.  We do this by acquiring and
2056  * releasing the locks of the response queues associated with the port.
2057  */
2058 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2059 {
2060         int i;
2061
2062         for (i = 0; i < p->nqsets; i++) {
2063                 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2064
2065                 spin_lock_irq(&q->lock);
2066                 spin_unlock_irq(&q->lock);
2067         }
2068 }
2069
2070 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2071 {
2072         struct port_info *pi = netdev_priv(dev);
2073         struct adapter *adapter = pi->adapter;
2074
2075         pi->vlan_grp = grp;
2076         if (adapter->params.rev > 0)
2077                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2078         else {
2079                 /* single control for all ports */
2080                 unsigned int i, have_vlans = 0;
2081                 for_each_port(adapter, i)
2082                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2083
2084                 t3_set_vlan_accel(adapter, 1, have_vlans);
2085         }
2086         t3_synchronize_rx(adapter, pi);
2087 }
2088
2089 #ifdef CONFIG_NET_POLL_CONTROLLER
2090 static void cxgb_netpoll(struct net_device *dev)
2091 {
2092         struct port_info *pi = netdev_priv(dev);
2093         struct adapter *adapter = pi->adapter;
2094         int qidx;
2095
2096         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2097                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2098                 void *source;
2099                 
2100                 if (adapter->flags & USING_MSIX)
2101                         source = qs;
2102                 else
2103                         source = adapter;
2104
2105                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2106         }
2107 }
2108 #endif
2109
2110 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
2111 int update_tpsram(struct adapter *adap)
2112 {
2113         const struct firmware *tpsram;
2114         char buf[64];
2115         struct device *dev = &adap->pdev->dev;
2116         int ret;
2117         char rev;
2118         
2119         rev = adap->params.rev == T3_REV_B2 ? 'b' : 'a';
2120
2121         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
2122                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
2123
2124         ret = request_firmware(&tpsram, buf, dev);
2125         if (ret < 0) {
2126                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
2127                         buf);
2128                 return ret;
2129         }
2130         
2131         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
2132         if (ret)
2133                 goto release_tpsram;    
2134
2135         ret = t3_set_proto_sram(adap, tpsram->data);
2136         if (ret)
2137                 dev_err(dev, "loading protocol SRAM failed\n");
2138
2139 release_tpsram:
2140         release_firmware(tpsram);
2141         
2142         return ret;
2143 }
2144
2145
2146 /*
2147  * Periodic accumulation of MAC statistics.
2148  */
2149 static void mac_stats_update(struct adapter *adapter)
2150 {
2151         int i;
2152
2153         for_each_port(adapter, i) {
2154                 struct net_device *dev = adapter->port[i];
2155                 struct port_info *p = netdev_priv(dev);
2156
2157                 if (netif_running(dev)) {
2158                         spin_lock(&adapter->stats_lock);
2159                         t3_mac_update_stats(&p->mac);
2160                         spin_unlock(&adapter->stats_lock);
2161                 }
2162         }
2163 }
2164
2165 static void check_link_status(struct adapter *adapter)
2166 {
2167         int i;
2168
2169         for_each_port(adapter, i) {
2170                 struct net_device *dev = adapter->port[i];
2171                 struct port_info *p = netdev_priv(dev);
2172
2173                 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2174                         t3_link_changed(adapter, i);
2175         }
2176 }
2177
2178 static void check_t3b2_mac(struct adapter *adapter)
2179 {
2180         int i;
2181
2182         if (!rtnl_trylock())    /* synchronize with ifdown */
2183                 return;
2184
2185         for_each_port(adapter, i) {
2186                 struct net_device *dev = adapter->port[i];
2187                 struct port_info *p = netdev_priv(dev);
2188                 int status;
2189
2190                 if (!netif_running(dev))
2191                         continue;
2192
2193                 status = 0;
2194                 if (netif_running(dev) && netif_carrier_ok(dev))
2195                         status = t3b2_mac_watchdog_task(&p->mac);
2196                 if (status == 1)
2197                         p->mac.stats.num_toggled++;
2198                 else if (status == 2) {
2199                         struct cmac *mac = &p->mac;
2200
2201                         t3_mac_set_mtu(mac, dev->mtu);
2202                         t3_mac_set_address(mac, 0, dev->dev_addr);
2203                         cxgb_set_rxmode(dev);
2204                         t3_link_start(&p->phy, mac, &p->link_config);
2205                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2206                         t3_port_intr_enable(adapter, p->port_id);
2207                         p->mac.stats.num_resets++;
2208                 }
2209         }
2210         rtnl_unlock();
2211 }
2212
2213
2214 static void t3_adap_check_task(struct work_struct *work)
2215 {
2216         struct adapter *adapter = container_of(work, struct adapter,
2217                                                adap_check_task.work);
2218         const struct adapter_params *p = &adapter->params;
2219
2220         adapter->check_task_cnt++;
2221
2222         /* Check link status for PHYs without interrupts */
2223         if (p->linkpoll_period)
2224                 check_link_status(adapter);
2225
2226         /* Accumulate MAC stats if needed */
2227         if (!p->linkpoll_period ||
2228             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2229             p->stats_update_period) {
2230                 mac_stats_update(adapter);
2231                 adapter->check_task_cnt = 0;
2232         }
2233
2234         if (p->rev == T3_REV_B2)
2235                 check_t3b2_mac(adapter);
2236
2237         /* Schedule the next check update if any port is active. */
2238         spin_lock(&adapter->work_lock);
2239         if (adapter->open_device_map & PORT_MASK)
2240                 schedule_chk_task(adapter);
2241         spin_unlock(&adapter->work_lock);
2242 }
2243
2244 /*
2245  * Processes external (PHY) interrupts in process context.
2246  */
2247 static void ext_intr_task(struct work_struct *work)
2248 {
2249         struct adapter *adapter = container_of(work, struct adapter,
2250                                                ext_intr_handler_task);
2251
2252         t3_phy_intr_handler(adapter);
2253
2254         /* Now reenable external interrupts */
2255         spin_lock_irq(&adapter->work_lock);
2256         if (adapter->slow_intr_mask) {
2257                 adapter->slow_intr_mask |= F_T3DBG;
2258                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2259                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2260                              adapter->slow_intr_mask);
2261         }
2262         spin_unlock_irq(&adapter->work_lock);
2263 }
2264
2265 /*
2266  * Interrupt-context handler for external (PHY) interrupts.
2267  */
2268 void t3_os_ext_intr_handler(struct adapter *adapter)
2269 {
2270         /*
2271          * Schedule a task to handle external interrupts as they may be slow
2272          * and we use a mutex to protect MDIO registers.  We disable PHY
2273          * interrupts in the meantime and let the task reenable them when
2274          * it's done.
2275          */
2276         spin_lock(&adapter->work_lock);
2277         if (adapter->slow_intr_mask) {
2278                 adapter->slow_intr_mask &= ~F_T3DBG;
2279                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2280                              adapter->slow_intr_mask);
2281                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2282         }
2283         spin_unlock(&adapter->work_lock);
2284 }
2285
2286 void t3_fatal_err(struct adapter *adapter)
2287 {
2288         unsigned int fw_status[4];
2289
2290         if (adapter->flags & FULL_INIT_DONE) {
2291                 t3_sge_stop(adapter);
2292                 t3_intr_disable(adapter);
2293         }
2294         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2295         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2296                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2297                          fw_status[0], fw_status[1],
2298                          fw_status[2], fw_status[3]);
2299
2300 }
2301
2302 static int __devinit cxgb_enable_msix(struct adapter *adap)
2303 {
2304         struct msix_entry entries[SGE_QSETS + 1];
2305         int i, err;
2306
2307         for (i = 0; i < ARRAY_SIZE(entries); ++i)
2308                 entries[i].entry = i;
2309
2310         err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2311         if (!err) {
2312                 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2313                         adap->msix_info[i].vec = entries[i].vector;
2314         } else if (err > 0)
2315                 dev_info(&adap->pdev->dev,
2316                        "only %d MSI-X vectors left, not using MSI-X\n", err);
2317         return err;
2318 }
2319
2320 static void __devinit print_port_info(struct adapter *adap,
2321                                       const struct adapter_info *ai)
2322 {
2323         static const char *pci_variant[] = {
2324                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2325         };
2326
2327         int i;
2328         char buf[80];
2329
2330         if (is_pcie(adap))
2331                 snprintf(buf, sizeof(buf), "%s x%d",
2332                          pci_variant[adap->params.pci.variant],
2333                          adap->params.pci.width);
2334         else
2335                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2336                          pci_variant[adap->params.pci.variant],
2337                          adap->params.pci.speed, adap->params.pci.width);
2338
2339         for_each_port(adap, i) {
2340                 struct net_device *dev = adap->port[i];
2341                 const struct port_info *pi = netdev_priv(dev);
2342
2343                 if (!test_bit(i, &adap->registered_device_map))
2344                         continue;
2345                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2346                        dev->name, ai->desc, pi->port_type->desc,
2347                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
2348                        (adap->flags & USING_MSIX) ? " MSI-X" :
2349                        (adap->flags & USING_MSI) ? " MSI" : "");
2350                 if (adap->name == dev->name && adap->params.vpd.mclk)
2351                         printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2352                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2353                                t3_mc7_size(&adap->pmtx) >> 20,
2354                                t3_mc7_size(&adap->pmrx) >> 20);
2355         }
2356 }
2357
2358 static int __devinit init_one(struct pci_dev *pdev,
2359                               const struct pci_device_id *ent)
2360 {
2361         static int version_printed;
2362
2363         int i, err, pci_using_dac = 0;
2364         unsigned long mmio_start, mmio_len;
2365         const struct adapter_info *ai;
2366         struct adapter *adapter = NULL;
2367         struct port_info *pi;
2368
2369         if (!version_printed) {
2370                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2371                 ++version_printed;
2372         }
2373
2374         if (!cxgb3_wq) {
2375                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2376                 if (!cxgb3_wq) {
2377                         printk(KERN_ERR DRV_NAME
2378                                ": cannot initialize work queue\n");
2379                         return -ENOMEM;
2380                 }
2381         }
2382
2383         err = pci_request_regions(pdev, DRV_NAME);
2384         if (err) {
2385                 /* Just info, some other driver may have claimed the device. */
2386                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2387                 return err;
2388         }
2389
2390         err = pci_enable_device(pdev);
2391         if (err) {
2392                 dev_err(&pdev->dev, "cannot enable PCI device\n");
2393                 goto out_release_regions;
2394         }
2395
2396         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2397                 pci_using_dac = 1;
2398                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2399                 if (err) {
2400                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2401                                "coherent allocations\n");
2402                         goto out_disable_device;
2403                 }
2404         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2405                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2406                 goto out_disable_device;
2407         }
2408
2409         pci_set_master(pdev);
2410
2411         mmio_start = pci_resource_start(pdev, 0);
2412         mmio_len = pci_resource_len(pdev, 0);
2413         ai = t3_get_adapter_info(ent->driver_data);
2414
2415         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2416         if (!adapter) {
2417                 err = -ENOMEM;
2418                 goto out_disable_device;
2419         }
2420
2421         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2422         if (!adapter->regs) {
2423                 dev_err(&pdev->dev, "cannot map device registers\n");
2424                 err = -ENOMEM;
2425                 goto out_free_adapter;
2426         }
2427
2428         adapter->pdev = pdev;
2429         adapter->name = pci_name(pdev);
2430         adapter->msg_enable = dflt_msg_enable;
2431         adapter->mmio_len = mmio_len;
2432
2433         mutex_init(&adapter->mdio_lock);
2434         spin_lock_init(&adapter->work_lock);
2435         spin_lock_init(&adapter->stats_lock);
2436
2437         INIT_LIST_HEAD(&adapter->adapter_list);
2438         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2439         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2440
2441         for (i = 0; i < ai->nports; ++i) {
2442                 struct net_device *netdev;
2443
2444                 netdev = alloc_etherdev(sizeof(struct port_info));
2445                 if (!netdev) {
2446                         err = -ENOMEM;
2447                         goto out_free_dev;
2448                 }
2449
2450                 SET_MODULE_OWNER(netdev);
2451                 SET_NETDEV_DEV(netdev, &pdev->dev);
2452
2453                 adapter->port[i] = netdev;
2454                 pi = netdev_priv(netdev);
2455                 pi->adapter = adapter;
2456                 pi->rx_csum_offload = 1;
2457                 pi->nqsets = 1;
2458                 pi->first_qset = i;
2459                 pi->activity = 0;
2460                 pi->port_id = i;
2461                 netif_carrier_off(netdev);
2462                 netdev->irq = pdev->irq;
2463                 netdev->mem_start = mmio_start;
2464                 netdev->mem_end = mmio_start + mmio_len - 1;
2465                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2466                 netdev->features |= NETIF_F_LLTX;
2467                 if (pci_using_dac)
2468                         netdev->features |= NETIF_F_HIGHDMA;
2469
2470                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2471                 netdev->vlan_rx_register = vlan_rx_register;
2472
2473                 netdev->open = cxgb_open;
2474                 netdev->stop = cxgb_close;
2475                 netdev->hard_start_xmit = t3_eth_xmit;
2476                 netdev->get_stats = cxgb_get_stats;
2477                 netdev->set_multicast_list = cxgb_set_rxmode;
2478                 netdev->do_ioctl = cxgb_ioctl;
2479                 netdev->change_mtu = cxgb_change_mtu;
2480                 netdev->set_mac_address = cxgb_set_mac_addr;
2481 #ifdef CONFIG_NET_POLL_CONTROLLER
2482                 netdev->poll_controller = cxgb_netpoll;
2483 #endif
2484                 netdev->weight = 64;
2485
2486                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2487         }
2488
2489         pci_set_drvdata(pdev, adapter);
2490         if (t3_prep_adapter(adapter, ai, 1) < 0) {
2491                 err = -ENODEV;
2492                 goto out_free_dev;
2493         }
2494
2495         err = t3_check_tpsram_version(adapter);
2496         if (err == -EINVAL)
2497                 err = update_tpsram(adapter);
2498
2499         if (err)
2500                 goto out_free_dev;
2501                 
2502         /*
2503          * The card is now ready to go.  If any errors occur during device
2504          * registration we do not fail the whole card but rather proceed only
2505          * with the ports we manage to register successfully.  However we must
2506          * register at least one net device.
2507          */
2508         for_each_port(adapter, i) {
2509                 err = register_netdev(adapter->port[i]);
2510                 if (err)
2511                         dev_warn(&pdev->dev,
2512                                  "cannot register net device %s, skipping\n",
2513                                  adapter->port[i]->name);
2514                 else {
2515                         /*
2516                          * Change the name we use for messages to the name of
2517                          * the first successfully registered interface.
2518                          */
2519                         if (!adapter->registered_device_map)
2520                                 adapter->name = adapter->port[i]->name;
2521
2522                         __set_bit(i, &adapter->registered_device_map);
2523                 }
2524         }
2525         if (!adapter->registered_device_map) {
2526                 dev_err(&pdev->dev, "could not register any net devices\n");
2527                 goto out_free_dev;
2528         }
2529
2530         /* Driver's ready. Reflect it on LEDs */
2531         t3_led_ready(adapter);
2532
2533         if (is_offload(adapter)) {
2534                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2535                 cxgb3_adapter_ofld(adapter);
2536         }
2537
2538         /* See what interrupts we'll be using */
2539         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2540                 adapter->flags |= USING_MSIX;
2541         else if (msi > 0 && pci_enable_msi(pdev) == 0)
2542                 adapter->flags |= USING_MSI;
2543
2544         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2545                                  &cxgb3_attr_group);
2546
2547         print_port_info(adapter, ai);
2548         return 0;
2549
2550 out_free_dev:
2551         iounmap(adapter->regs);
2552         for (i = ai->nports - 1; i >= 0; --i)
2553                 if (adapter->port[i])
2554                         free_netdev(adapter->port[i]);
2555
2556 out_free_adapter:
2557         kfree(adapter);
2558
2559 out_disable_device:
2560         pci_disable_device(pdev);
2561 out_release_regions:
2562         pci_release_regions(pdev);
2563         pci_set_drvdata(pdev, NULL);
2564         return err;
2565 }
2566
2567 static void __devexit remove_one(struct pci_dev *pdev)
2568 {
2569         struct adapter *adapter = pci_get_drvdata(pdev);
2570
2571         if (adapter) {
2572                 int i;
2573
2574                 t3_sge_stop(adapter);
2575                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2576                                    &cxgb3_attr_group);
2577
2578                 for_each_port(adapter, i)
2579                     if (test_bit(i, &adapter->registered_device_map))
2580                         unregister_netdev(adapter->port[i]);
2581
2582                 if (is_offload(adapter)) {
2583                         cxgb3_adapter_unofld(adapter);
2584                         if (test_bit(OFFLOAD_DEVMAP_BIT,
2585                                      &adapter->open_device_map))
2586                                 offload_close(&adapter->tdev);
2587                 }
2588
2589                 t3_free_sge_resources(adapter);
2590                 cxgb_disable_msi(adapter);
2591
2592                 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2593                         if (adapter->dummy_netdev[i]) {
2594                                 free_netdev(adapter->dummy_netdev[i]);
2595                                 adapter->dummy_netdev[i] = NULL;
2596                         }
2597
2598                 for_each_port(adapter, i)
2599                         if (adapter->port[i])
2600                                 free_netdev(adapter->port[i]);
2601
2602                 iounmap(adapter->regs);
2603                 kfree(adapter);
2604                 pci_release_regions(pdev);
2605                 pci_disable_device(pdev);
2606                 pci_set_drvdata(pdev, NULL);
2607         }
2608 }
2609
2610 static struct pci_driver driver = {
2611         .name = DRV_NAME,
2612         .id_table = cxgb3_pci_tbl,
2613         .probe = init_one,
2614         .remove = __devexit_p(remove_one),
2615 };
2616
2617 static int __init cxgb3_init_module(void)
2618 {
2619         int ret;
2620
2621         cxgb3_offload_init();
2622
2623         ret = pci_register_driver(&driver);
2624         return ret;
2625 }
2626
2627 static void __exit cxgb3_cleanup_module(void)
2628 {
2629         pci_unregister_driver(&driver);
2630         if (cxgb3_wq)
2631                 destroy_workqueue(cxgb3_wq);
2632 }
2633
2634 module_init(cxgb3_init_module);
2635 module_exit(cxgb3_cleanup_module);