]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/cxgb3/cxgb3_main.c
cxgb3 - Fix EEH, missing softirq blocking
[linux-2.6-omap-h63xx.git] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
48
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
54
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
58
59 enum {
60         MAX_TXQ_ENTRIES = 16384,
61         MAX_CTRL_TXQ_ENTRIES = 1024,
62         MAX_RSPQ_ENTRIES = 16384,
63         MAX_RX_BUFFERS = 16384,
64         MAX_RX_JUMBO_BUFFERS = 16384,
65         MIN_TXQ_ENTRIES = 4,
66         MIN_CTRL_TXQ_ENTRIES = 4,
67         MIN_RSPQ_ENTRIES = 32,
68         MIN_FL_ENTRIES = 32
69 };
70
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77 #define EEPROM_MAGIC 0x38E2F10C
78
79 #define CH_DEVICE(devid, idx) \
80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
81
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83         CH_DEVICE(0x20, 0),     /* PE9000 */
84         CH_DEVICE(0x21, 1),     /* T302E */
85         CH_DEVICE(0x22, 2),     /* T310E */
86         CH_DEVICE(0x23, 3),     /* T320X */
87         CH_DEVICE(0x24, 1),     /* T302X */
88         CH_DEVICE(0x25, 3),     /* T320E */
89         CH_DEVICE(0x26, 2),     /* T310X */
90         CH_DEVICE(0x30, 2),     /* T3B10 */
91         CH_DEVICE(0x31, 3),     /* T3B20 */
92         CH_DEVICE(0x32, 1),     /* T3B02 */
93         {0,}
94 };
95
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107 /*
108  * The driver uses the best interrupt scheme available on a platform in the
109  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
110  * of these schemes the driver may consider as follows:
111  *
112  * msi = 2: choose from among all three options
113  * msi = 1: only consider MSI and pin interrupts
114  * msi = 0: force pin interrupts
115  */
116 static int msi = 2;
117
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121 /*
122  * The driver enables offload as a default.
123  * To disable it, use ofld_disable = 1.
124  */
125
126 static int ofld_disable = 0;
127
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131 /*
132  * We have work elements that we need to cancel when an interface is taken
133  * down.  Normally the work elements would be executed by keventd but that
134  * can deadlock because of linkwatch.  If our close method takes the rtnl
135  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137  * for our work to complete.  Get our own work queue to solve this.
138  */
139 static struct workqueue_struct *cxgb3_wq;
140
141 /**
142  *      link_report - show link status and link speed/duplex
143  *      @p: the port whose settings are to be reported
144  *
145  *      Shows the link status, speed, and duplex of a port.
146  */
147 static void link_report(struct net_device *dev)
148 {
149         if (!netif_carrier_ok(dev))
150                 printk(KERN_INFO "%s: link down\n", dev->name);
151         else {
152                 const char *s = "10Mbps";
153                 const struct port_info *p = netdev_priv(dev);
154
155                 switch (p->link_config.speed) {
156                 case SPEED_10000:
157                         s = "10Gbps";
158                         break;
159                 case SPEED_1000:
160                         s = "1000Mbps";
161                         break;
162                 case SPEED_100:
163                         s = "100Mbps";
164                         break;
165                 }
166
167                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169         }
170 }
171
172 /**
173  *      t3_os_link_changed - handle link status changes
174  *      @adapter: the adapter associated with the link change
175  *      @port_id: the port index whose limk status has changed
176  *      @link_stat: the new status of the link
177  *      @speed: the new speed setting
178  *      @duplex: the new duplex setting
179  *      @pause: the new flow-control setting
180  *
181  *      This is the OS-dependent handler for link status changes.  The OS
182  *      neutral handler takes care of most of the processing for these events,
183  *      then calls this handler for any OS-specific processing.
184  */
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186                         int speed, int duplex, int pause)
187 {
188         struct net_device *dev = adapter->port[port_id];
189         struct port_info *pi = netdev_priv(dev);
190         struct cmac *mac = &pi->mac;
191
192         /* Skip changes from disabled ports. */
193         if (!netif_running(dev))
194                 return;
195
196         if (link_stat != netif_carrier_ok(dev)) {
197                 if (link_stat) {
198                         t3_mac_enable(mac, MAC_DIRECTION_RX);
199                         netif_carrier_on(dev);
200                 } else {
201                         netif_carrier_off(dev);
202                         pi->phy.ops->power_down(&pi->phy, 1);
203                         t3_mac_disable(mac, MAC_DIRECTION_RX);
204                         t3_link_start(&pi->phy, mac, &pi->link_config);
205                 }
206
207                 link_report(dev);
208         }
209 }
210
211 static void cxgb_set_rxmode(struct net_device *dev)
212 {
213         struct t3_rx_mode rm;
214         struct port_info *pi = netdev_priv(dev);
215
216         init_rx_mode(&rm, dev, dev->mc_list);
217         t3_mac_set_rx_mode(&pi->mac, &rm);
218 }
219
220 /**
221  *      link_start - enable a port
222  *      @dev: the device to enable
223  *
224  *      Performs the MAC and PHY actions needed to enable a port.
225  */
226 static void link_start(struct net_device *dev)
227 {
228         struct t3_rx_mode rm;
229         struct port_info *pi = netdev_priv(dev);
230         struct cmac *mac = &pi->mac;
231
232         init_rx_mode(&rm, dev, dev->mc_list);
233         t3_mac_reset(mac);
234         t3_mac_set_mtu(mac, dev->mtu);
235         t3_mac_set_address(mac, 0, dev->dev_addr);
236         t3_mac_set_rx_mode(mac, &rm);
237         t3_link_start(&pi->phy, mac, &pi->link_config);
238         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
239 }
240
241 static inline void cxgb_disable_msi(struct adapter *adapter)
242 {
243         if (adapter->flags & USING_MSIX) {
244                 pci_disable_msix(adapter->pdev);
245                 adapter->flags &= ~USING_MSIX;
246         } else if (adapter->flags & USING_MSI) {
247                 pci_disable_msi(adapter->pdev);
248                 adapter->flags &= ~USING_MSI;
249         }
250 }
251
252 /*
253  * Interrupt handler for asynchronous events used with MSI-X.
254  */
255 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
256 {
257         t3_slow_intr_handler(cookie);
258         return IRQ_HANDLED;
259 }
260
261 /*
262  * Name the MSI-X interrupts.
263  */
264 static void name_msix_vecs(struct adapter *adap)
265 {
266         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
267
268         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269         adap->msix_info[0].desc[n] = 0;
270
271         for_each_port(adap, j) {
272                 struct net_device *d = adap->port[j];
273                 const struct port_info *pi = netdev_priv(d);
274
275                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276                         snprintf(adap->msix_info[msi_idx].desc, n,
277                                  "%s (queue %d)", d->name, i);
278                         adap->msix_info[msi_idx].desc[n] = 0;
279                 }
280         }
281 }
282
283 static int request_msix_data_irqs(struct adapter *adap)
284 {
285         int i, j, err, qidx = 0;
286
287         for_each_port(adap, i) {
288                 int nqsets = adap2pinfo(adap, i)->nqsets;
289
290                 for (j = 0; j < nqsets; ++j) {
291                         err = request_irq(adap->msix_info[qidx + 1].vec,
292                                           t3_intr_handler(adap,
293                                                           adap->sge.qs[qidx].
294                                                           rspq.polling), 0,
295                                           adap->msix_info[qidx + 1].desc,
296                                           &adap->sge.qs[qidx]);
297                         if (err) {
298                                 while (--qidx >= 0)
299                                         free_irq(adap->msix_info[qidx + 1].vec,
300                                                  &adap->sge.qs[qidx]);
301                                 return err;
302                         }
303                         qidx++;
304                 }
305         }
306         return 0;
307 }
308
309 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
310                               unsigned long n)
311 {
312         int attempts = 5;
313
314         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
315                 if (!--attempts)
316                         return -ETIMEDOUT;
317                 msleep(10);
318         }
319         return 0;
320 }
321
322 static int init_tp_parity(struct adapter *adap)
323 {
324         int i;
325         struct sk_buff *skb;
326         struct cpl_set_tcb_field *greq;
327         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
328
329         t3_tp_set_offload_mode(adap, 1);
330
331         for (i = 0; i < 16; i++) {
332                 struct cpl_smt_write_req *req;
333
334                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
335                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
336                 memset(req, 0, sizeof(*req));
337                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
338                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
339                 req->iff = i;
340                 t3_mgmt_tx(adap, skb);
341         }
342
343         for (i = 0; i < 2048; i++) {
344                 struct cpl_l2t_write_req *req;
345
346                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
347                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
348                 memset(req, 0, sizeof(*req));
349                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
350                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
351                 req->params = htonl(V_L2T_W_IDX(i));
352                 t3_mgmt_tx(adap, skb);
353         }
354
355         for (i = 0; i < 2048; i++) {
356                 struct cpl_rte_write_req *req;
357
358                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
359                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
360                 memset(req, 0, sizeof(*req));
361                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
362                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
363                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
364                 t3_mgmt_tx(adap, skb);
365         }
366
367         skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
368         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
369         memset(greq, 0, sizeof(*greq));
370         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
371         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
372         greq->mask = cpu_to_be64(1);
373         t3_mgmt_tx(adap, skb);
374
375         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
376         t3_tp_set_offload_mode(adap, 0);
377         return i;
378 }
379
380 /**
381  *      setup_rss - configure RSS
382  *      @adap: the adapter
383  *
384  *      Sets up RSS to distribute packets to multiple receive queues.  We
385  *      configure the RSS CPU lookup table to distribute to the number of HW
386  *      receive queues, and the response queue lookup table to narrow that
387  *      down to the response queues actually configured for each port.
388  *      We always configure the RSS mapping for two ports since the mapping
389  *      table has plenty of entries.
390  */
391 static void setup_rss(struct adapter *adap)
392 {
393         int i;
394         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
395         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
396         u8 cpus[SGE_QSETS + 1];
397         u16 rspq_map[RSS_TABLE_SIZE];
398
399         for (i = 0; i < SGE_QSETS; ++i)
400                 cpus[i] = i;
401         cpus[SGE_QSETS] = 0xff; /* terminator */
402
403         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
404                 rspq_map[i] = i % nq0;
405                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
406         }
407
408         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
409                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
410                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
411 }
412
413 static void init_napi(struct adapter *adap)
414 {
415         int i;
416
417         for (i = 0; i < SGE_QSETS; i++) {
418                 struct sge_qset *qs = &adap->sge.qs[i];
419
420                 if (qs->adap)
421                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
422                                        64);
423         }
424 }
425
426 /*
427  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
428  * both netdevices representing interfaces and the dummy ones for the extra
429  * queues.
430  */
431 static void quiesce_rx(struct adapter *adap)
432 {
433         int i;
434
435         for (i = 0; i < SGE_QSETS; i++)
436                 if (adap->sge.qs[i].adap)
437                         napi_disable(&adap->sge.qs[i].napi);
438 }
439
440 static void enable_all_napi(struct adapter *adap)
441 {
442         int i;
443         for (i = 0; i < SGE_QSETS; i++)
444                 if (adap->sge.qs[i].adap)
445                         napi_enable(&adap->sge.qs[i].napi);
446 }
447
448 /**
449  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
450  *      @adap: the adapter
451  *
452  *      Determines how many sets of SGE queues to use and initializes them.
453  *      We support multiple queue sets per port if we have MSI-X, otherwise
454  *      just one queue set per port.
455  */
456 static int setup_sge_qsets(struct adapter *adap)
457 {
458         int i, j, err, irq_idx = 0, qset_idx = 0;
459         unsigned int ntxq = SGE_TXQ_PER_SET;
460
461         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
462                 irq_idx = -1;
463
464         for_each_port(adap, i) {
465                 struct net_device *dev = adap->port[i];
466                 struct port_info *pi = netdev_priv(dev);
467
468                 pi->qs = &adap->sge.qs[pi->first_qset];
469                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
470                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
471                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
472                                                              irq_idx,
473                                 &adap->params.sge.qset[qset_idx], ntxq, dev);
474                         if (err) {
475                                 t3_free_sge_resources(adap);
476                                 return err;
477                         }
478                 }
479         }
480
481         return 0;
482 }
483
484 static ssize_t attr_show(struct device *d, char *buf,
485                          ssize_t(*format) (struct net_device *, char *))
486 {
487         ssize_t len;
488
489         /* Synchronize with ioctls that may shut down the device */
490         rtnl_lock();
491         len = (*format) (to_net_dev(d), buf);
492         rtnl_unlock();
493         return len;
494 }
495
496 static ssize_t attr_store(struct device *d,
497                           const char *buf, size_t len,
498                           ssize_t(*set) (struct net_device *, unsigned int),
499                           unsigned int min_val, unsigned int max_val)
500 {
501         char *endp;
502         ssize_t ret;
503         unsigned int val;
504
505         if (!capable(CAP_NET_ADMIN))
506                 return -EPERM;
507
508         val = simple_strtoul(buf, &endp, 0);
509         if (endp == buf || val < min_val || val > max_val)
510                 return -EINVAL;
511
512         rtnl_lock();
513         ret = (*set) (to_net_dev(d), val);
514         if (!ret)
515                 ret = len;
516         rtnl_unlock();
517         return ret;
518 }
519
520 #define CXGB3_SHOW(name, val_expr) \
521 static ssize_t format_##name(struct net_device *dev, char *buf) \
522 { \
523         struct port_info *pi = netdev_priv(dev); \
524         struct adapter *adap = pi->adapter; \
525         return sprintf(buf, "%u\n", val_expr); \
526 } \
527 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
528                            char *buf) \
529 { \
530         return attr_show(d, buf, format_##name); \
531 }
532
533 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
534 {
535         struct port_info *pi = netdev_priv(dev);
536         struct adapter *adap = pi->adapter;
537         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
538
539         if (adap->flags & FULL_INIT_DONE)
540                 return -EBUSY;
541         if (val && adap->params.rev == 0)
542                 return -EINVAL;
543         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
544             min_tids)
545                 return -EINVAL;
546         adap->params.mc5.nfilters = val;
547         return 0;
548 }
549
550 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
551                               const char *buf, size_t len)
552 {
553         return attr_store(d, buf, len, set_nfilters, 0, ~0);
554 }
555
556 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
557 {
558         struct port_info *pi = netdev_priv(dev);
559         struct adapter *adap = pi->adapter;
560
561         if (adap->flags & FULL_INIT_DONE)
562                 return -EBUSY;
563         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
564             MC5_MIN_TIDS)
565                 return -EINVAL;
566         adap->params.mc5.nservers = val;
567         return 0;
568 }
569
570 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
571                               const char *buf, size_t len)
572 {
573         return attr_store(d, buf, len, set_nservers, 0, ~0);
574 }
575
576 #define CXGB3_ATTR_R(name, val_expr) \
577 CXGB3_SHOW(name, val_expr) \
578 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
579
580 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
581 CXGB3_SHOW(name, val_expr) \
582 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
583
584 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
585 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
586 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
587
588 static struct attribute *cxgb3_attrs[] = {
589         &dev_attr_cam_size.attr,
590         &dev_attr_nfilters.attr,
591         &dev_attr_nservers.attr,
592         NULL
593 };
594
595 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
596
597 static ssize_t tm_attr_show(struct device *d,
598                             char *buf, int sched)
599 {
600         struct port_info *pi = netdev_priv(to_net_dev(d));
601         struct adapter *adap = pi->adapter;
602         unsigned int v, addr, bpt, cpt;
603         ssize_t len;
604
605         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
606         rtnl_lock();
607         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
608         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
609         if (sched & 1)
610                 v >>= 16;
611         bpt = (v >> 8) & 0xff;
612         cpt = v & 0xff;
613         if (!cpt)
614                 len = sprintf(buf, "disabled\n");
615         else {
616                 v = (adap->params.vpd.cclk * 1000) / cpt;
617                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
618         }
619         rtnl_unlock();
620         return len;
621 }
622
623 static ssize_t tm_attr_store(struct device *d,
624                              const char *buf, size_t len, int sched)
625 {
626         struct port_info *pi = netdev_priv(to_net_dev(d));
627         struct adapter *adap = pi->adapter;
628         unsigned int val;
629         char *endp;
630         ssize_t ret;
631
632         if (!capable(CAP_NET_ADMIN))
633                 return -EPERM;
634
635         val = simple_strtoul(buf, &endp, 0);
636         if (endp == buf || val > 10000000)
637                 return -EINVAL;
638
639         rtnl_lock();
640         ret = t3_config_sched(adap, val, sched);
641         if (!ret)
642                 ret = len;
643         rtnl_unlock();
644         return ret;
645 }
646
647 #define TM_ATTR(name, sched) \
648 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
649                            char *buf) \
650 { \
651         return tm_attr_show(d, buf, sched); \
652 } \
653 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
654                             const char *buf, size_t len) \
655 { \
656         return tm_attr_store(d, buf, len, sched); \
657 } \
658 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
659
660 TM_ATTR(sched0, 0);
661 TM_ATTR(sched1, 1);
662 TM_ATTR(sched2, 2);
663 TM_ATTR(sched3, 3);
664 TM_ATTR(sched4, 4);
665 TM_ATTR(sched5, 5);
666 TM_ATTR(sched6, 6);
667 TM_ATTR(sched7, 7);
668
669 static struct attribute *offload_attrs[] = {
670         &dev_attr_sched0.attr,
671         &dev_attr_sched1.attr,
672         &dev_attr_sched2.attr,
673         &dev_attr_sched3.attr,
674         &dev_attr_sched4.attr,
675         &dev_attr_sched5.attr,
676         &dev_attr_sched6.attr,
677         &dev_attr_sched7.attr,
678         NULL
679 };
680
681 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
682
683 /*
684  * Sends an sk_buff to an offload queue driver
685  * after dealing with any active network taps.
686  */
687 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
688 {
689         int ret;
690
691         local_bh_disable();
692         ret = t3_offload_tx(tdev, skb);
693         local_bh_enable();
694         return ret;
695 }
696
697 static int write_smt_entry(struct adapter *adapter, int idx)
698 {
699         struct cpl_smt_write_req *req;
700         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
701
702         if (!skb)
703                 return -ENOMEM;
704
705         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
706         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
707         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
708         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
709         req->iff = idx;
710         memset(req->src_mac1, 0, sizeof(req->src_mac1));
711         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
712         skb->priority = 1;
713         offload_tx(&adapter->tdev, skb);
714         return 0;
715 }
716
717 static int init_smt(struct adapter *adapter)
718 {
719         int i;
720
721         for_each_port(adapter, i)
722             write_smt_entry(adapter, i);
723         return 0;
724 }
725
726 static void init_port_mtus(struct adapter *adapter)
727 {
728         unsigned int mtus = adapter->port[0]->mtu;
729
730         if (adapter->port[1])
731                 mtus |= adapter->port[1]->mtu << 16;
732         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
733 }
734
735 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
736                               int hi, int port)
737 {
738         struct sk_buff *skb;
739         struct mngt_pktsched_wr *req;
740
741         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
742         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
743         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
744         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
745         req->sched = sched;
746         req->idx = qidx;
747         req->min = lo;
748         req->max = hi;
749         req->binding = port;
750         t3_mgmt_tx(adap, skb);
751 }
752
753 static void bind_qsets(struct adapter *adap)
754 {
755         int i, j;
756
757         for_each_port(adap, i) {
758                 const struct port_info *pi = adap2pinfo(adap, i);
759
760                 for (j = 0; j < pi->nqsets; ++j)
761                         send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
762                                           -1, i);
763         }
764 }
765
766 #define FW_FNAME "t3fw-%d.%d.%d.bin"
767 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
768
769 static int upgrade_fw(struct adapter *adap)
770 {
771         int ret;
772         char buf[64];
773         const struct firmware *fw;
774         struct device *dev = &adap->pdev->dev;
775
776         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
777                  FW_VERSION_MINOR, FW_VERSION_MICRO);
778         ret = request_firmware(&fw, buf, dev);
779         if (ret < 0) {
780                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
781                         buf);
782                 return ret;
783         }
784         ret = t3_load_fw(adap, fw->data, fw->size);
785         release_firmware(fw);
786
787         if (ret == 0)
788                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
789                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
790         else
791                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
792                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
793
794         return ret;
795 }
796
797 static inline char t3rev2char(struct adapter *adapter)
798 {
799         char rev = 0;
800
801         switch(adapter->params.rev) {
802         case T3_REV_B:
803         case T3_REV_B2:
804                 rev = 'b';
805                 break;
806         case T3_REV_C:
807                 rev = 'c';
808                 break;
809         }
810         return rev;
811 }
812
813 static int update_tpsram(struct adapter *adap)
814 {
815         const struct firmware *tpsram;
816         char buf[64];
817         struct device *dev = &adap->pdev->dev;
818         int ret;
819         char rev;
820
821         rev = t3rev2char(adap);
822         if (!rev)
823                 return 0;
824
825         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
826                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
827
828         ret = request_firmware(&tpsram, buf, dev);
829         if (ret < 0) {
830                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
831                         buf);
832                 return ret;
833         }
834
835         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
836         if (ret)
837                 goto release_tpsram;
838
839         ret = t3_set_proto_sram(adap, tpsram->data);
840         if (ret == 0)
841                 dev_info(dev,
842                          "successful update of protocol engine "
843                          "to %d.%d.%d\n",
844                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
845         else
846                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
847                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
848         if (ret)
849                 dev_err(dev, "loading protocol SRAM failed\n");
850
851 release_tpsram:
852         release_firmware(tpsram);
853
854         return ret;
855 }
856
857 /**
858  *      cxgb_up - enable the adapter
859  *      @adapter: adapter being enabled
860  *
861  *      Called when the first port is enabled, this function performs the
862  *      actions necessary to make an adapter operational, such as completing
863  *      the initialization of HW modules, and enabling interrupts.
864  *
865  *      Must be called with the rtnl lock held.
866  */
867 static int cxgb_up(struct adapter *adap)
868 {
869         int err;
870         int must_load;
871
872         if (!(adap->flags & FULL_INIT_DONE)) {
873                 err = t3_check_fw_version(adap, &must_load);
874                 if (err == -EINVAL) {
875                         err = upgrade_fw(adap);
876                         if (err && must_load)
877                                 goto out;
878                 }
879
880                 err = t3_check_tpsram_version(adap, &must_load);
881                 if (err == -EINVAL) {
882                         err = update_tpsram(adap);
883                         if (err && must_load)
884                                 goto out;
885                 }
886
887                 err = t3_init_hw(adap, 0);
888                 if (err)
889                         goto out;
890
891                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
892                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
893
894                 err = setup_sge_qsets(adap);
895                 if (err)
896                         goto out;
897
898                 setup_rss(adap);
899                 init_napi(adap);
900                 adap->flags |= FULL_INIT_DONE;
901         }
902
903         t3_intr_clear(adap);
904
905         if (adap->flags & USING_MSIX) {
906                 name_msix_vecs(adap);
907                 err = request_irq(adap->msix_info[0].vec,
908                                   t3_async_intr_handler, 0,
909                                   adap->msix_info[0].desc, adap);
910                 if (err)
911                         goto irq_err;
912
913                 err = request_msix_data_irqs(adap);
914                 if (err) {
915                         free_irq(adap->msix_info[0].vec, adap);
916                         goto irq_err;
917                 }
918         } else if ((err = request_irq(adap->pdev->irq,
919                                       t3_intr_handler(adap,
920                                                       adap->sge.qs[0].rspq.
921                                                       polling),
922                                       (adap->flags & USING_MSI) ?
923                                        0 : IRQF_SHARED,
924                                       adap->name, adap)))
925                 goto irq_err;
926
927         enable_all_napi(adap);
928         t3_sge_start(adap);
929         t3_intr_enable(adap);
930
931         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
932             is_offload(adap) && init_tp_parity(adap) == 0)
933                 adap->flags |= TP_PARITY_INIT;
934
935         if (adap->flags & TP_PARITY_INIT) {
936                 t3_write_reg(adap, A_TP_INT_CAUSE,
937                              F_CMCACHEPERR | F_ARPLUTPERR);
938                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
939         }
940
941         if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
942                 bind_qsets(adap);
943         adap->flags |= QUEUES_BOUND;
944
945 out:
946         return err;
947 irq_err:
948         CH_ERR(adap, "request_irq failed, err %d\n", err);
949         goto out;
950 }
951
952 /*
953  * Release resources when all the ports and offloading have been stopped.
954  */
955 static void cxgb_down(struct adapter *adapter)
956 {
957         t3_sge_stop(adapter);
958         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
959         t3_intr_disable(adapter);
960         spin_unlock_irq(&adapter->work_lock);
961
962         if (adapter->flags & USING_MSIX) {
963                 int i, n = 0;
964
965                 free_irq(adapter->msix_info[0].vec, adapter);
966                 for_each_port(adapter, i)
967                     n += adap2pinfo(adapter, i)->nqsets;
968
969                 for (i = 0; i < n; ++i)
970                         free_irq(adapter->msix_info[i + 1].vec,
971                                  &adapter->sge.qs[i]);
972         } else
973                 free_irq(adapter->pdev->irq, adapter);
974
975         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
976         quiesce_rx(adapter);
977 }
978
979 static void schedule_chk_task(struct adapter *adap)
980 {
981         unsigned int timeo;
982
983         timeo = adap->params.linkpoll_period ?
984             (HZ * adap->params.linkpoll_period) / 10 :
985             adap->params.stats_update_period * HZ;
986         if (timeo)
987                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
988 }
989
990 static int offload_open(struct net_device *dev)
991 {
992         struct port_info *pi = netdev_priv(dev);
993         struct adapter *adapter = pi->adapter;
994         struct t3cdev *tdev = dev2t3cdev(dev);
995         int adap_up = adapter->open_device_map & PORT_MASK;
996         int err;
997
998         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
999                 return 0;
1000
1001         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1002                 return err;
1003
1004         t3_tp_set_offload_mode(adapter, 1);
1005         tdev->lldev = adapter->port[0];
1006         err = cxgb3_offload_activate(adapter);
1007         if (err)
1008                 goto out;
1009
1010         init_port_mtus(adapter);
1011         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1012                      adapter->params.b_wnd,
1013                      adapter->params.rev == 0 ?
1014                      adapter->port[0]->mtu : 0xffff);
1015         init_smt(adapter);
1016
1017         /* Never mind if the next step fails */
1018         sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1019
1020         /* Call back all registered clients */
1021         cxgb3_add_clients(tdev);
1022
1023 out:
1024         /* restore them in case the offload module has changed them */
1025         if (err) {
1026                 t3_tp_set_offload_mode(adapter, 0);
1027                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1028                 cxgb3_set_dummy_ops(tdev);
1029         }
1030         return err;
1031 }
1032
1033 static int offload_close(struct t3cdev *tdev)
1034 {
1035         struct adapter *adapter = tdev2adap(tdev);
1036
1037         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1038                 return 0;
1039
1040         /* Call back all registered clients */
1041         cxgb3_remove_clients(tdev);
1042
1043         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1044
1045         tdev->lldev = NULL;
1046         cxgb3_set_dummy_ops(tdev);
1047         t3_tp_set_offload_mode(adapter, 0);
1048         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1049
1050         if (!adapter->open_device_map)
1051                 cxgb_down(adapter);
1052
1053         cxgb3_offload_deactivate(adapter);
1054         return 0;
1055 }
1056
1057 static int cxgb_open(struct net_device *dev)
1058 {
1059         struct port_info *pi = netdev_priv(dev);
1060         struct adapter *adapter = pi->adapter;
1061         int other_ports = adapter->open_device_map & PORT_MASK;
1062         int err;
1063
1064         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
1065                 quiesce_rx(adapter);
1066                 return err;
1067         }
1068
1069         set_bit(pi->port_id, &adapter->open_device_map);
1070         if (is_offload(adapter) && !ofld_disable) {
1071                 err = offload_open(dev);
1072                 if (err)
1073                         printk(KERN_WARNING
1074                                "Could not initialize offload capabilities\n");
1075         }
1076
1077         link_start(dev);
1078         t3_port_intr_enable(adapter, pi->port_id);
1079         netif_start_queue(dev);
1080         if (!other_ports)
1081                 schedule_chk_task(adapter);
1082
1083         return 0;
1084 }
1085
1086 static int cxgb_close(struct net_device *dev)
1087 {
1088         struct port_info *pi = netdev_priv(dev);
1089         struct adapter *adapter = pi->adapter;
1090
1091         t3_port_intr_disable(adapter, pi->port_id);
1092         netif_stop_queue(dev);
1093         pi->phy.ops->power_down(&pi->phy, 1);
1094         netif_carrier_off(dev);
1095         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1096
1097         spin_lock(&adapter->work_lock); /* sync with update task */
1098         clear_bit(pi->port_id, &adapter->open_device_map);
1099         spin_unlock(&adapter->work_lock);
1100
1101         if (!(adapter->open_device_map & PORT_MASK))
1102                 cancel_rearming_delayed_workqueue(cxgb3_wq,
1103                                                   &adapter->adap_check_task);
1104
1105         if (!adapter->open_device_map)
1106                 cxgb_down(adapter);
1107
1108         return 0;
1109 }
1110
1111 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1112 {
1113         struct port_info *pi = netdev_priv(dev);
1114         struct adapter *adapter = pi->adapter;
1115         struct net_device_stats *ns = &pi->netstats;
1116         const struct mac_stats *pstats;
1117
1118         spin_lock(&adapter->stats_lock);
1119         pstats = t3_mac_update_stats(&pi->mac);
1120         spin_unlock(&adapter->stats_lock);
1121
1122         ns->tx_bytes = pstats->tx_octets;
1123         ns->tx_packets = pstats->tx_frames;
1124         ns->rx_bytes = pstats->rx_octets;
1125         ns->rx_packets = pstats->rx_frames;
1126         ns->multicast = pstats->rx_mcast_frames;
1127
1128         ns->tx_errors = pstats->tx_underrun;
1129         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1130             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1131             pstats->rx_fifo_ovfl;
1132
1133         /* detailed rx_errors */
1134         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1135         ns->rx_over_errors = 0;
1136         ns->rx_crc_errors = pstats->rx_fcs_errs;
1137         ns->rx_frame_errors = pstats->rx_symbol_errs;
1138         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1139         ns->rx_missed_errors = pstats->rx_cong_drops;
1140
1141         /* detailed tx_errors */
1142         ns->tx_aborted_errors = 0;
1143         ns->tx_carrier_errors = 0;
1144         ns->tx_fifo_errors = pstats->tx_underrun;
1145         ns->tx_heartbeat_errors = 0;
1146         ns->tx_window_errors = 0;
1147         return ns;
1148 }
1149
1150 static u32 get_msglevel(struct net_device *dev)
1151 {
1152         struct port_info *pi = netdev_priv(dev);
1153         struct adapter *adapter = pi->adapter;
1154
1155         return adapter->msg_enable;
1156 }
1157
1158 static void set_msglevel(struct net_device *dev, u32 val)
1159 {
1160         struct port_info *pi = netdev_priv(dev);
1161         struct adapter *adapter = pi->adapter;
1162
1163         adapter->msg_enable = val;
1164 }
1165
1166 static char stats_strings[][ETH_GSTRING_LEN] = {
1167         "TxOctetsOK         ",
1168         "TxFramesOK         ",
1169         "TxMulticastFramesOK",
1170         "TxBroadcastFramesOK",
1171         "TxPauseFrames      ",
1172         "TxUnderrun         ",
1173         "TxExtUnderrun      ",
1174
1175         "TxFrames64         ",
1176         "TxFrames65To127    ",
1177         "TxFrames128To255   ",
1178         "TxFrames256To511   ",
1179         "TxFrames512To1023  ",
1180         "TxFrames1024To1518 ",
1181         "TxFrames1519ToMax  ",
1182
1183         "RxOctetsOK         ",
1184         "RxFramesOK         ",
1185         "RxMulticastFramesOK",
1186         "RxBroadcastFramesOK",
1187         "RxPauseFrames      ",
1188         "RxFCSErrors        ",
1189         "RxSymbolErrors     ",
1190         "RxShortErrors      ",
1191         "RxJabberErrors     ",
1192         "RxLengthErrors     ",
1193         "RxFIFOoverflow     ",
1194
1195         "RxFrames64         ",
1196         "RxFrames65To127    ",
1197         "RxFrames128To255   ",
1198         "RxFrames256To511   ",
1199         "RxFrames512To1023  ",
1200         "RxFrames1024To1518 ",
1201         "RxFrames1519ToMax  ",
1202
1203         "PhyFIFOErrors      ",
1204         "TSO                ",
1205         "VLANextractions    ",
1206         "VLANinsertions     ",
1207         "TxCsumOffload      ",
1208         "RxCsumGood         ",
1209         "RxDrops            ",
1210
1211         "CheckTXEnToggled   ",
1212         "CheckResets        ",
1213
1214 };
1215
1216 static int get_sset_count(struct net_device *dev, int sset)
1217 {
1218         switch (sset) {
1219         case ETH_SS_STATS:
1220                 return ARRAY_SIZE(stats_strings);
1221         default:
1222                 return -EOPNOTSUPP;
1223         }
1224 }
1225
1226 #define T3_REGMAP_SIZE (3 * 1024)
1227
1228 static int get_regs_len(struct net_device *dev)
1229 {
1230         return T3_REGMAP_SIZE;
1231 }
1232
1233 static int get_eeprom_len(struct net_device *dev)
1234 {
1235         return EEPROMSIZE;
1236 }
1237
1238 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1239 {
1240         struct port_info *pi = netdev_priv(dev);
1241         struct adapter *adapter = pi->adapter;
1242         u32 fw_vers = 0;
1243         u32 tp_vers = 0;
1244
1245         t3_get_fw_version(adapter, &fw_vers);
1246         t3_get_tp_version(adapter, &tp_vers);
1247
1248         strcpy(info->driver, DRV_NAME);
1249         strcpy(info->version, DRV_VERSION);
1250         strcpy(info->bus_info, pci_name(adapter->pdev));
1251         if (!fw_vers)
1252                 strcpy(info->fw_version, "N/A");
1253         else {
1254                 snprintf(info->fw_version, sizeof(info->fw_version),
1255                          "%s %u.%u.%u TP %u.%u.%u",
1256                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1257                          G_FW_VERSION_MAJOR(fw_vers),
1258                          G_FW_VERSION_MINOR(fw_vers),
1259                          G_FW_VERSION_MICRO(fw_vers),
1260                          G_TP_VERSION_MAJOR(tp_vers),
1261                          G_TP_VERSION_MINOR(tp_vers),
1262                          G_TP_VERSION_MICRO(tp_vers));
1263         }
1264 }
1265
1266 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1267 {
1268         if (stringset == ETH_SS_STATS)
1269                 memcpy(data, stats_strings, sizeof(stats_strings));
1270 }
1271
1272 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1273                                             struct port_info *p, int idx)
1274 {
1275         int i;
1276         unsigned long tot = 0;
1277
1278         for (i = 0; i < p->nqsets; ++i)
1279                 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1280         return tot;
1281 }
1282
1283 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1284                       u64 *data)
1285 {
1286         struct port_info *pi = netdev_priv(dev);
1287         struct adapter *adapter = pi->adapter;
1288         const struct mac_stats *s;
1289
1290         spin_lock(&adapter->stats_lock);
1291         s = t3_mac_update_stats(&pi->mac);
1292         spin_unlock(&adapter->stats_lock);
1293
1294         *data++ = s->tx_octets;
1295         *data++ = s->tx_frames;
1296         *data++ = s->tx_mcast_frames;
1297         *data++ = s->tx_bcast_frames;
1298         *data++ = s->tx_pause;
1299         *data++ = s->tx_underrun;
1300         *data++ = s->tx_fifo_urun;
1301
1302         *data++ = s->tx_frames_64;
1303         *data++ = s->tx_frames_65_127;
1304         *data++ = s->tx_frames_128_255;
1305         *data++ = s->tx_frames_256_511;
1306         *data++ = s->tx_frames_512_1023;
1307         *data++ = s->tx_frames_1024_1518;
1308         *data++ = s->tx_frames_1519_max;
1309
1310         *data++ = s->rx_octets;
1311         *data++ = s->rx_frames;
1312         *data++ = s->rx_mcast_frames;
1313         *data++ = s->rx_bcast_frames;
1314         *data++ = s->rx_pause;
1315         *data++ = s->rx_fcs_errs;
1316         *data++ = s->rx_symbol_errs;
1317         *data++ = s->rx_short;
1318         *data++ = s->rx_jabber;
1319         *data++ = s->rx_too_long;
1320         *data++ = s->rx_fifo_ovfl;
1321
1322         *data++ = s->rx_frames_64;
1323         *data++ = s->rx_frames_65_127;
1324         *data++ = s->rx_frames_128_255;
1325         *data++ = s->rx_frames_256_511;
1326         *data++ = s->rx_frames_512_1023;
1327         *data++ = s->rx_frames_1024_1518;
1328         *data++ = s->rx_frames_1519_max;
1329
1330         *data++ = pi->phy.fifo_errors;
1331
1332         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1333         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1334         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1335         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1336         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1337         *data++ = s->rx_cong_drops;
1338
1339         *data++ = s->num_toggled;
1340         *data++ = s->num_resets;
1341 }
1342
1343 static inline void reg_block_dump(struct adapter *ap, void *buf,
1344                                   unsigned int start, unsigned int end)
1345 {
1346         u32 *p = buf + start;
1347
1348         for (; start <= end; start += sizeof(u32))
1349                 *p++ = t3_read_reg(ap, start);
1350 }
1351
1352 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1353                      void *buf)
1354 {
1355         struct port_info *pi = netdev_priv(dev);
1356         struct adapter *ap = pi->adapter;
1357
1358         /*
1359          * Version scheme:
1360          * bits 0..9: chip version
1361          * bits 10..15: chip revision
1362          * bit 31: set for PCIe cards
1363          */
1364         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1365
1366         /*
1367          * We skip the MAC statistics registers because they are clear-on-read.
1368          * Also reading multi-register stats would need to synchronize with the
1369          * periodic mac stats accumulation.  Hard to justify the complexity.
1370          */
1371         memset(buf, 0, T3_REGMAP_SIZE);
1372         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1373         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1374         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1375         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1376         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1377         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1378                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1379         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1380                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1381 }
1382
1383 static int restart_autoneg(struct net_device *dev)
1384 {
1385         struct port_info *p = netdev_priv(dev);
1386
1387         if (!netif_running(dev))
1388                 return -EAGAIN;
1389         if (p->link_config.autoneg != AUTONEG_ENABLE)
1390                 return -EINVAL;
1391         p->phy.ops->autoneg_restart(&p->phy);
1392         return 0;
1393 }
1394
1395 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1396 {
1397         struct port_info *pi = netdev_priv(dev);
1398         struct adapter *adapter = pi->adapter;
1399         int i;
1400
1401         if (data == 0)
1402                 data = 2;
1403
1404         for (i = 0; i < data * 2; i++) {
1405                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1406                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1407                 if (msleep_interruptible(500))
1408                         break;
1409         }
1410         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1411                          F_GPIO0_OUT_VAL);
1412         return 0;
1413 }
1414
1415 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1416 {
1417         struct port_info *p = netdev_priv(dev);
1418
1419         cmd->supported = p->link_config.supported;
1420         cmd->advertising = p->link_config.advertising;
1421
1422         if (netif_carrier_ok(dev)) {
1423                 cmd->speed = p->link_config.speed;
1424                 cmd->duplex = p->link_config.duplex;
1425         } else {
1426                 cmd->speed = -1;
1427                 cmd->duplex = -1;
1428         }
1429
1430         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1431         cmd->phy_address = p->phy.addr;
1432         cmd->transceiver = XCVR_EXTERNAL;
1433         cmd->autoneg = p->link_config.autoneg;
1434         cmd->maxtxpkt = 0;
1435         cmd->maxrxpkt = 0;
1436         return 0;
1437 }
1438
1439 static int speed_duplex_to_caps(int speed, int duplex)
1440 {
1441         int cap = 0;
1442
1443         switch (speed) {
1444         case SPEED_10:
1445                 if (duplex == DUPLEX_FULL)
1446                         cap = SUPPORTED_10baseT_Full;
1447                 else
1448                         cap = SUPPORTED_10baseT_Half;
1449                 break;
1450         case SPEED_100:
1451                 if (duplex == DUPLEX_FULL)
1452                         cap = SUPPORTED_100baseT_Full;
1453                 else
1454                         cap = SUPPORTED_100baseT_Half;
1455                 break;
1456         case SPEED_1000:
1457                 if (duplex == DUPLEX_FULL)
1458                         cap = SUPPORTED_1000baseT_Full;
1459                 else
1460                         cap = SUPPORTED_1000baseT_Half;
1461                 break;
1462         case SPEED_10000:
1463                 if (duplex == DUPLEX_FULL)
1464                         cap = SUPPORTED_10000baseT_Full;
1465         }
1466         return cap;
1467 }
1468
1469 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1470                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1471                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1472                       ADVERTISED_10000baseT_Full)
1473
1474 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1475 {
1476         struct port_info *p = netdev_priv(dev);
1477         struct link_config *lc = &p->link_config;
1478
1479         if (!(lc->supported & SUPPORTED_Autoneg))
1480                 return -EOPNOTSUPP;     /* can't change speed/duplex */
1481
1482         if (cmd->autoneg == AUTONEG_DISABLE) {
1483                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1484
1485                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1486                         return -EINVAL;
1487                 lc->requested_speed = cmd->speed;
1488                 lc->requested_duplex = cmd->duplex;
1489                 lc->advertising = 0;
1490         } else {
1491                 cmd->advertising &= ADVERTISED_MASK;
1492                 cmd->advertising &= lc->supported;
1493                 if (!cmd->advertising)
1494                         return -EINVAL;
1495                 lc->requested_speed = SPEED_INVALID;
1496                 lc->requested_duplex = DUPLEX_INVALID;
1497                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1498         }
1499         lc->autoneg = cmd->autoneg;
1500         if (netif_running(dev))
1501                 t3_link_start(&p->phy, &p->mac, lc);
1502         return 0;
1503 }
1504
1505 static void get_pauseparam(struct net_device *dev,
1506                            struct ethtool_pauseparam *epause)
1507 {
1508         struct port_info *p = netdev_priv(dev);
1509
1510         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1511         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1512         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1513 }
1514
1515 static int set_pauseparam(struct net_device *dev,
1516                           struct ethtool_pauseparam *epause)
1517 {
1518         struct port_info *p = netdev_priv(dev);
1519         struct link_config *lc = &p->link_config;
1520
1521         if (epause->autoneg == AUTONEG_DISABLE)
1522                 lc->requested_fc = 0;
1523         else if (lc->supported & SUPPORTED_Autoneg)
1524                 lc->requested_fc = PAUSE_AUTONEG;
1525         else
1526                 return -EINVAL;
1527
1528         if (epause->rx_pause)
1529                 lc->requested_fc |= PAUSE_RX;
1530         if (epause->tx_pause)
1531                 lc->requested_fc |= PAUSE_TX;
1532         if (lc->autoneg == AUTONEG_ENABLE) {
1533                 if (netif_running(dev))
1534                         t3_link_start(&p->phy, &p->mac, lc);
1535         } else {
1536                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1537                 if (netif_running(dev))
1538                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1539         }
1540         return 0;
1541 }
1542
1543 static u32 get_rx_csum(struct net_device *dev)
1544 {
1545         struct port_info *p = netdev_priv(dev);
1546
1547         return p->rx_csum_offload;
1548 }
1549
1550 static int set_rx_csum(struct net_device *dev, u32 data)
1551 {
1552         struct port_info *p = netdev_priv(dev);
1553
1554         p->rx_csum_offload = data;
1555         return 0;
1556 }
1557
1558 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1559 {
1560         struct port_info *pi = netdev_priv(dev);
1561         struct adapter *adapter = pi->adapter;
1562         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1563
1564         e->rx_max_pending = MAX_RX_BUFFERS;
1565         e->rx_mini_max_pending = 0;
1566         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1567         e->tx_max_pending = MAX_TXQ_ENTRIES;
1568
1569         e->rx_pending = q->fl_size;
1570         e->rx_mini_pending = q->rspq_size;
1571         e->rx_jumbo_pending = q->jumbo_size;
1572         e->tx_pending = q->txq_size[0];
1573 }
1574
1575 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1576 {
1577         struct port_info *pi = netdev_priv(dev);
1578         struct adapter *adapter = pi->adapter;
1579         struct qset_params *q;
1580         int i;
1581
1582         if (e->rx_pending > MAX_RX_BUFFERS ||
1583             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1584             e->tx_pending > MAX_TXQ_ENTRIES ||
1585             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1586             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1587             e->rx_pending < MIN_FL_ENTRIES ||
1588             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1589             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1590                 return -EINVAL;
1591
1592         if (adapter->flags & FULL_INIT_DONE)
1593                 return -EBUSY;
1594
1595         q = &adapter->params.sge.qset[pi->first_qset];
1596         for (i = 0; i < pi->nqsets; ++i, ++q) {
1597                 q->rspq_size = e->rx_mini_pending;
1598                 q->fl_size = e->rx_pending;
1599                 q->jumbo_size = e->rx_jumbo_pending;
1600                 q->txq_size[0] = e->tx_pending;
1601                 q->txq_size[1] = e->tx_pending;
1602                 q->txq_size[2] = e->tx_pending;
1603         }
1604         return 0;
1605 }
1606
1607 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1608 {
1609         struct port_info *pi = netdev_priv(dev);
1610         struct adapter *adapter = pi->adapter;
1611         struct qset_params *qsp = &adapter->params.sge.qset[0];
1612         struct sge_qset *qs = &adapter->sge.qs[0];
1613
1614         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1615                 return -EINVAL;
1616
1617         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1618         t3_update_qset_coalesce(qs, qsp);
1619         return 0;
1620 }
1621
1622 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1623 {
1624         struct port_info *pi = netdev_priv(dev);
1625         struct adapter *adapter = pi->adapter;
1626         struct qset_params *q = adapter->params.sge.qset;
1627
1628         c->rx_coalesce_usecs = q->coalesce_usecs;
1629         return 0;
1630 }
1631
1632 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1633                       u8 * data)
1634 {
1635         struct port_info *pi = netdev_priv(dev);
1636         struct adapter *adapter = pi->adapter;
1637         int i, err = 0;
1638
1639         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1640         if (!buf)
1641                 return -ENOMEM;
1642
1643         e->magic = EEPROM_MAGIC;
1644         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1645                 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1646
1647         if (!err)
1648                 memcpy(data, buf + e->offset, e->len);
1649         kfree(buf);
1650         return err;
1651 }
1652
1653 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1654                       u8 * data)
1655 {
1656         struct port_info *pi = netdev_priv(dev);
1657         struct adapter *adapter = pi->adapter;
1658         u32 aligned_offset, aligned_len, *p;
1659         u8 *buf;
1660         int err;
1661
1662         if (eeprom->magic != EEPROM_MAGIC)
1663                 return -EINVAL;
1664
1665         aligned_offset = eeprom->offset & ~3;
1666         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1667
1668         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1669                 buf = kmalloc(aligned_len, GFP_KERNEL);
1670                 if (!buf)
1671                         return -ENOMEM;
1672                 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1673                 if (!err && aligned_len > 4)
1674                         err = t3_seeprom_read(adapter,
1675                                               aligned_offset + aligned_len - 4,
1676                                               (u32 *) & buf[aligned_len - 4]);
1677                 if (err)
1678                         goto out;
1679                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1680         } else
1681                 buf = data;
1682
1683         err = t3_seeprom_wp(adapter, 0);
1684         if (err)
1685                 goto out;
1686
1687         for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1688                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1689                 aligned_offset += 4;
1690         }
1691
1692         if (!err)
1693                 err = t3_seeprom_wp(adapter, 1);
1694 out:
1695         if (buf != data)
1696                 kfree(buf);
1697         return err;
1698 }
1699
1700 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1701 {
1702         wol->supported = 0;
1703         wol->wolopts = 0;
1704         memset(&wol->sopass, 0, sizeof(wol->sopass));
1705 }
1706
1707 static const struct ethtool_ops cxgb_ethtool_ops = {
1708         .get_settings = get_settings,
1709         .set_settings = set_settings,
1710         .get_drvinfo = get_drvinfo,
1711         .get_msglevel = get_msglevel,
1712         .set_msglevel = set_msglevel,
1713         .get_ringparam = get_sge_param,
1714         .set_ringparam = set_sge_param,
1715         .get_coalesce = get_coalesce,
1716         .set_coalesce = set_coalesce,
1717         .get_eeprom_len = get_eeprom_len,
1718         .get_eeprom = get_eeprom,
1719         .set_eeprom = set_eeprom,
1720         .get_pauseparam = get_pauseparam,
1721         .set_pauseparam = set_pauseparam,
1722         .get_rx_csum = get_rx_csum,
1723         .set_rx_csum = set_rx_csum,
1724         .set_tx_csum = ethtool_op_set_tx_csum,
1725         .set_sg = ethtool_op_set_sg,
1726         .get_link = ethtool_op_get_link,
1727         .get_strings = get_strings,
1728         .phys_id = cxgb3_phys_id,
1729         .nway_reset = restart_autoneg,
1730         .get_sset_count = get_sset_count,
1731         .get_ethtool_stats = get_stats,
1732         .get_regs_len = get_regs_len,
1733         .get_regs = get_regs,
1734         .get_wol = get_wol,
1735         .set_tso = ethtool_op_set_tso,
1736 };
1737
1738 static int in_range(int val, int lo, int hi)
1739 {
1740         return val < 0 || (val <= hi && val >= lo);
1741 }
1742
1743 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1744 {
1745         struct port_info *pi = netdev_priv(dev);
1746         struct adapter *adapter = pi->adapter;
1747         u32 cmd;
1748         int ret;
1749
1750         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1751                 return -EFAULT;
1752
1753         switch (cmd) {
1754         case CHELSIO_SET_QSET_PARAMS:{
1755                 int i;
1756                 struct qset_params *q;
1757                 struct ch_qset_params t;
1758
1759                 if (!capable(CAP_NET_ADMIN))
1760                         return -EPERM;
1761                 if (copy_from_user(&t, useraddr, sizeof(t)))
1762                         return -EFAULT;
1763                 if (t.qset_idx >= SGE_QSETS)
1764                         return -EINVAL;
1765                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1766                         !in_range(t.cong_thres, 0, 255) ||
1767                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1768                                 MAX_TXQ_ENTRIES) ||
1769                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1770                                 MAX_TXQ_ENTRIES) ||
1771                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1772                                 MAX_CTRL_TXQ_ENTRIES) ||
1773                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1774                                 MAX_RX_BUFFERS)
1775                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1776                                         MAX_RX_JUMBO_BUFFERS)
1777                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1778                                         MAX_RSPQ_ENTRIES))
1779                         return -EINVAL;
1780                 if ((adapter->flags & FULL_INIT_DONE) &&
1781                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1782                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1783                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1784                         t.polling >= 0 || t.cong_thres >= 0))
1785                         return -EBUSY;
1786
1787                 q = &adapter->params.sge.qset[t.qset_idx];
1788
1789                 if (t.rspq_size >= 0)
1790                         q->rspq_size = t.rspq_size;
1791                 if (t.fl_size[0] >= 0)
1792                         q->fl_size = t.fl_size[0];
1793                 if (t.fl_size[1] >= 0)
1794                         q->jumbo_size = t.fl_size[1];
1795                 if (t.txq_size[0] >= 0)
1796                         q->txq_size[0] = t.txq_size[0];
1797                 if (t.txq_size[1] >= 0)
1798                         q->txq_size[1] = t.txq_size[1];
1799                 if (t.txq_size[2] >= 0)
1800                         q->txq_size[2] = t.txq_size[2];
1801                 if (t.cong_thres >= 0)
1802                         q->cong_thres = t.cong_thres;
1803                 if (t.intr_lat >= 0) {
1804                         struct sge_qset *qs =
1805                                 &adapter->sge.qs[t.qset_idx];
1806
1807                         q->coalesce_usecs = t.intr_lat;
1808                         t3_update_qset_coalesce(qs, q);
1809                 }
1810                 if (t.polling >= 0) {
1811                         if (adapter->flags & USING_MSIX)
1812                                 q->polling = t.polling;
1813                         else {
1814                                 /* No polling with INTx for T3A */
1815                                 if (adapter->params.rev == 0 &&
1816                                         !(adapter->flags & USING_MSI))
1817                                         t.polling = 0;
1818
1819                                 for (i = 0; i < SGE_QSETS; i++) {
1820                                         q = &adapter->params.sge.
1821                                                 qset[i];
1822                                         q->polling = t.polling;
1823                                 }
1824                         }
1825                 }
1826                 break;
1827         }
1828         case CHELSIO_GET_QSET_PARAMS:{
1829                 struct qset_params *q;
1830                 struct ch_qset_params t;
1831
1832                 if (copy_from_user(&t, useraddr, sizeof(t)))
1833                         return -EFAULT;
1834                 if (t.qset_idx >= SGE_QSETS)
1835                         return -EINVAL;
1836
1837                 q = &adapter->params.sge.qset[t.qset_idx];
1838                 t.rspq_size = q->rspq_size;
1839                 t.txq_size[0] = q->txq_size[0];
1840                 t.txq_size[1] = q->txq_size[1];
1841                 t.txq_size[2] = q->txq_size[2];
1842                 t.fl_size[0] = q->fl_size;
1843                 t.fl_size[1] = q->jumbo_size;
1844                 t.polling = q->polling;
1845                 t.intr_lat = q->coalesce_usecs;
1846                 t.cong_thres = q->cong_thres;
1847
1848                 if (copy_to_user(useraddr, &t, sizeof(t)))
1849                         return -EFAULT;
1850                 break;
1851         }
1852         case CHELSIO_SET_QSET_NUM:{
1853                 struct ch_reg edata;
1854                 unsigned int i, first_qset = 0, other_qsets = 0;
1855
1856                 if (!capable(CAP_NET_ADMIN))
1857                         return -EPERM;
1858                 if (adapter->flags & FULL_INIT_DONE)
1859                         return -EBUSY;
1860                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1861                         return -EFAULT;
1862                 if (edata.val < 1 ||
1863                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1864                         return -EINVAL;
1865
1866                 for_each_port(adapter, i)
1867                         if (adapter->port[i] && adapter->port[i] != dev)
1868                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
1869
1870                 if (edata.val + other_qsets > SGE_QSETS)
1871                         return -EINVAL;
1872
1873                 pi->nqsets = edata.val;
1874
1875                 for_each_port(adapter, i)
1876                         if (adapter->port[i]) {
1877                                 pi = adap2pinfo(adapter, i);
1878                                 pi->first_qset = first_qset;
1879                                 first_qset += pi->nqsets;
1880                         }
1881                 break;
1882         }
1883         case CHELSIO_GET_QSET_NUM:{
1884                 struct ch_reg edata;
1885
1886                 edata.cmd = CHELSIO_GET_QSET_NUM;
1887                 edata.val = pi->nqsets;
1888                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1889                         return -EFAULT;
1890                 break;
1891         }
1892         case CHELSIO_LOAD_FW:{
1893                 u8 *fw_data;
1894                 struct ch_mem_range t;
1895
1896                 if (!capable(CAP_NET_ADMIN))
1897                         return -EPERM;
1898                 if (copy_from_user(&t, useraddr, sizeof(t)))
1899                         return -EFAULT;
1900
1901                 fw_data = kmalloc(t.len, GFP_KERNEL);
1902                 if (!fw_data)
1903                         return -ENOMEM;
1904
1905                 if (copy_from_user
1906                         (fw_data, useraddr + sizeof(t), t.len)) {
1907                         kfree(fw_data);
1908                         return -EFAULT;
1909                 }
1910
1911                 ret = t3_load_fw(adapter, fw_data, t.len);
1912                 kfree(fw_data);
1913                 if (ret)
1914                         return ret;
1915                 break;
1916         }
1917         case CHELSIO_SETMTUTAB:{
1918                 struct ch_mtus m;
1919                 int i;
1920
1921                 if (!is_offload(adapter))
1922                         return -EOPNOTSUPP;
1923                 if (!capable(CAP_NET_ADMIN))
1924                         return -EPERM;
1925                 if (offload_running(adapter))
1926                         return -EBUSY;
1927                 if (copy_from_user(&m, useraddr, sizeof(m)))
1928                         return -EFAULT;
1929                 if (m.nmtus != NMTUS)
1930                         return -EINVAL;
1931                 if (m.mtus[0] < 81)     /* accommodate SACK */
1932                         return -EINVAL;
1933
1934                 /* MTUs must be in ascending order */
1935                 for (i = 1; i < NMTUS; ++i)
1936                         if (m.mtus[i] < m.mtus[i - 1])
1937                                 return -EINVAL;
1938
1939                 memcpy(adapter->params.mtus, m.mtus,
1940                         sizeof(adapter->params.mtus));
1941                 break;
1942         }
1943         case CHELSIO_GET_PM:{
1944                 struct tp_params *p = &adapter->params.tp;
1945                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1946
1947                 if (!is_offload(adapter))
1948                         return -EOPNOTSUPP;
1949                 m.tx_pg_sz = p->tx_pg_size;
1950                 m.tx_num_pg = p->tx_num_pgs;
1951                 m.rx_pg_sz = p->rx_pg_size;
1952                 m.rx_num_pg = p->rx_num_pgs;
1953                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1954                 if (copy_to_user(useraddr, &m, sizeof(m)))
1955                         return -EFAULT;
1956                 break;
1957         }
1958         case CHELSIO_SET_PM:{
1959                 struct ch_pm m;
1960                 struct tp_params *p = &adapter->params.tp;
1961
1962                 if (!is_offload(adapter))
1963                         return -EOPNOTSUPP;
1964                 if (!capable(CAP_NET_ADMIN))
1965                         return -EPERM;
1966                 if (adapter->flags & FULL_INIT_DONE)
1967                         return -EBUSY;
1968                 if (copy_from_user(&m, useraddr, sizeof(m)))
1969                         return -EFAULT;
1970                 if (!is_power_of_2(m.rx_pg_sz) ||
1971                         !is_power_of_2(m.tx_pg_sz))
1972                         return -EINVAL; /* not power of 2 */
1973                 if (!(m.rx_pg_sz & 0x14000))
1974                         return -EINVAL; /* not 16KB or 64KB */
1975                 if (!(m.tx_pg_sz & 0x1554000))
1976                         return -EINVAL;
1977                 if (m.tx_num_pg == -1)
1978                         m.tx_num_pg = p->tx_num_pgs;
1979                 if (m.rx_num_pg == -1)
1980                         m.rx_num_pg = p->rx_num_pgs;
1981                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1982                         return -EINVAL;
1983                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1984                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1985                         return -EINVAL;
1986                 p->rx_pg_size = m.rx_pg_sz;
1987                 p->tx_pg_size = m.tx_pg_sz;
1988                 p->rx_num_pgs = m.rx_num_pg;
1989                 p->tx_num_pgs = m.tx_num_pg;
1990                 break;
1991         }
1992         case CHELSIO_GET_MEM:{
1993                 struct ch_mem_range t;
1994                 struct mc7 *mem;
1995                 u64 buf[32];
1996
1997                 if (!is_offload(adapter))
1998                         return -EOPNOTSUPP;
1999                 if (!(adapter->flags & FULL_INIT_DONE))
2000                         return -EIO;    /* need the memory controllers */
2001                 if (copy_from_user(&t, useraddr, sizeof(t)))
2002                         return -EFAULT;
2003                 if ((t.addr & 7) || (t.len & 7))
2004                         return -EINVAL;
2005                 if (t.mem_id == MEM_CM)
2006                         mem = &adapter->cm;
2007                 else if (t.mem_id == MEM_PMRX)
2008                         mem = &adapter->pmrx;
2009                 else if (t.mem_id == MEM_PMTX)
2010                         mem = &adapter->pmtx;
2011                 else
2012                         return -EINVAL;
2013
2014                 /*
2015                  * Version scheme:
2016                  * bits 0..9: chip version
2017                  * bits 10..15: chip revision
2018                  */
2019                 t.version = 3 | (adapter->params.rev << 10);
2020                 if (copy_to_user(useraddr, &t, sizeof(t)))
2021                         return -EFAULT;
2022
2023                 /*
2024                  * Read 256 bytes at a time as len can be large and we don't
2025                  * want to use huge intermediate buffers.
2026                  */
2027                 useraddr += sizeof(t);  /* advance to start of buffer */
2028                 while (t.len) {
2029                         unsigned int chunk =
2030                                 min_t(unsigned int, t.len, sizeof(buf));
2031
2032                         ret =
2033                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2034                                                 buf);
2035                         if (ret)
2036                                 return ret;
2037                         if (copy_to_user(useraddr, buf, chunk))
2038                                 return -EFAULT;
2039                         useraddr += chunk;
2040                         t.addr += chunk;
2041                         t.len -= chunk;
2042                 }
2043                 break;
2044         }
2045         case CHELSIO_SET_TRACE_FILTER:{
2046                 struct ch_trace t;
2047                 const struct trace_params *tp;
2048
2049                 if (!capable(CAP_NET_ADMIN))
2050                         return -EPERM;
2051                 if (!offload_running(adapter))
2052                         return -EAGAIN;
2053                 if (copy_from_user(&t, useraddr, sizeof(t)))
2054                         return -EFAULT;
2055
2056                 tp = (const struct trace_params *)&t.sip;
2057                 if (t.config_tx)
2058                         t3_config_trace_filter(adapter, tp, 0,
2059                                                 t.invert_match,
2060                                                 t.trace_tx);
2061                 if (t.config_rx)
2062                         t3_config_trace_filter(adapter, tp, 1,
2063                                                 t.invert_match,
2064                                                 t.trace_rx);
2065                 break;
2066         }
2067         default:
2068                 return -EOPNOTSUPP;
2069         }
2070         return 0;
2071 }
2072
2073 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2074 {
2075         struct mii_ioctl_data *data = if_mii(req);
2076         struct port_info *pi = netdev_priv(dev);
2077         struct adapter *adapter = pi->adapter;
2078         int ret, mmd;
2079
2080         switch (cmd) {
2081         case SIOCGMIIPHY:
2082                 data->phy_id = pi->phy.addr;
2083                 /* FALLTHRU */
2084         case SIOCGMIIREG:{
2085                 u32 val;
2086                 struct cphy *phy = &pi->phy;
2087
2088                 if (!phy->mdio_read)
2089                         return -EOPNOTSUPP;
2090                 if (is_10G(adapter)) {
2091                         mmd = data->phy_id >> 8;
2092                         if (!mmd)
2093                                 mmd = MDIO_DEV_PCS;
2094                         else if (mmd > MDIO_DEV_XGXS)
2095                                 return -EINVAL;
2096
2097                         ret =
2098                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2099                                                 mmd, data->reg_num, &val);
2100                 } else
2101                         ret =
2102                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2103                                                 0, data->reg_num & 0x1f,
2104                                                 &val);
2105                 if (!ret)
2106                         data->val_out = val;
2107                 break;
2108         }
2109         case SIOCSMIIREG:{
2110                 struct cphy *phy = &pi->phy;
2111
2112                 if (!capable(CAP_NET_ADMIN))
2113                         return -EPERM;
2114                 if (!phy->mdio_write)
2115                         return -EOPNOTSUPP;
2116                 if (is_10G(adapter)) {
2117                         mmd = data->phy_id >> 8;
2118                         if (!mmd)
2119                                 mmd = MDIO_DEV_PCS;
2120                         else if (mmd > MDIO_DEV_XGXS)
2121                                 return -EINVAL;
2122
2123                         ret =
2124                                 phy->mdio_write(adapter,
2125                                                 data->phy_id & 0x1f, mmd,
2126                                                 data->reg_num,
2127                                                 data->val_in);
2128                 } else
2129                         ret =
2130                                 phy->mdio_write(adapter,
2131                                                 data->phy_id & 0x1f, 0,
2132                                                 data->reg_num & 0x1f,
2133                                                 data->val_in);
2134                 break;
2135         }
2136         case SIOCCHIOCTL:
2137                 return cxgb_extension_ioctl(dev, req->ifr_data);
2138         default:
2139                 return -EOPNOTSUPP;
2140         }
2141         return ret;
2142 }
2143
2144 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2145 {
2146         struct port_info *pi = netdev_priv(dev);
2147         struct adapter *adapter = pi->adapter;
2148         int ret;
2149
2150         if (new_mtu < 81)       /* accommodate SACK */
2151                 return -EINVAL;
2152         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2153                 return ret;
2154         dev->mtu = new_mtu;
2155         init_port_mtus(adapter);
2156         if (adapter->params.rev == 0 && offload_running(adapter))
2157                 t3_load_mtus(adapter, adapter->params.mtus,
2158                              adapter->params.a_wnd, adapter->params.b_wnd,
2159                              adapter->port[0]->mtu);
2160         return 0;
2161 }
2162
2163 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2164 {
2165         struct port_info *pi = netdev_priv(dev);
2166         struct adapter *adapter = pi->adapter;
2167         struct sockaddr *addr = p;
2168
2169         if (!is_valid_ether_addr(addr->sa_data))
2170                 return -EINVAL;
2171
2172         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2173         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2174         if (offload_running(adapter))
2175                 write_smt_entry(adapter, pi->port_id);
2176         return 0;
2177 }
2178
2179 /**
2180  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2181  * @adap: the adapter
2182  * @p: the port
2183  *
2184  * Ensures that current Rx processing on any of the queues associated with
2185  * the given port completes before returning.  We do this by acquiring and
2186  * releasing the locks of the response queues associated with the port.
2187  */
2188 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2189 {
2190         int i;
2191
2192         for (i = 0; i < p->nqsets; i++) {
2193                 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2194
2195                 spin_lock_irq(&q->lock);
2196                 spin_unlock_irq(&q->lock);
2197         }
2198 }
2199
2200 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2201 {
2202         struct port_info *pi = netdev_priv(dev);
2203         struct adapter *adapter = pi->adapter;
2204
2205         pi->vlan_grp = grp;
2206         if (adapter->params.rev > 0)
2207                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2208         else {
2209                 /* single control for all ports */
2210                 unsigned int i, have_vlans = 0;
2211                 for_each_port(adapter, i)
2212                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2213
2214                 t3_set_vlan_accel(adapter, 1, have_vlans);
2215         }
2216         t3_synchronize_rx(adapter, pi);
2217 }
2218
2219 #ifdef CONFIG_NET_POLL_CONTROLLER
2220 static void cxgb_netpoll(struct net_device *dev)
2221 {
2222         struct port_info *pi = netdev_priv(dev);
2223         struct adapter *adapter = pi->adapter;
2224         int qidx;
2225
2226         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2227                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2228                 void *source;
2229
2230                 if (adapter->flags & USING_MSIX)
2231                         source = qs;
2232                 else
2233                         source = adapter;
2234
2235                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2236         }
2237 }
2238 #endif
2239
2240 /*
2241  * Periodic accumulation of MAC statistics.
2242  */
2243 static void mac_stats_update(struct adapter *adapter)
2244 {
2245         int i;
2246
2247         for_each_port(adapter, i) {
2248                 struct net_device *dev = adapter->port[i];
2249                 struct port_info *p = netdev_priv(dev);
2250
2251                 if (netif_running(dev)) {
2252                         spin_lock(&adapter->stats_lock);
2253                         t3_mac_update_stats(&p->mac);
2254                         spin_unlock(&adapter->stats_lock);
2255                 }
2256         }
2257 }
2258
2259 static void check_link_status(struct adapter *adapter)
2260 {
2261         int i;
2262
2263         for_each_port(adapter, i) {
2264                 struct net_device *dev = adapter->port[i];
2265                 struct port_info *p = netdev_priv(dev);
2266
2267                 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2268                         t3_link_changed(adapter, i);
2269         }
2270 }
2271
2272 static void check_t3b2_mac(struct adapter *adapter)
2273 {
2274         int i;
2275
2276         if (!rtnl_trylock())    /* synchronize with ifdown */
2277                 return;
2278
2279         for_each_port(adapter, i) {
2280                 struct net_device *dev = adapter->port[i];
2281                 struct port_info *p = netdev_priv(dev);
2282                 int status;
2283
2284                 if (!netif_running(dev))
2285                         continue;
2286
2287                 status = 0;
2288                 if (netif_running(dev) && netif_carrier_ok(dev))
2289                         status = t3b2_mac_watchdog_task(&p->mac);
2290                 if (status == 1)
2291                         p->mac.stats.num_toggled++;
2292                 else if (status == 2) {
2293                         struct cmac *mac = &p->mac;
2294
2295                         t3_mac_set_mtu(mac, dev->mtu);
2296                         t3_mac_set_address(mac, 0, dev->dev_addr);
2297                         cxgb_set_rxmode(dev);
2298                         t3_link_start(&p->phy, mac, &p->link_config);
2299                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2300                         t3_port_intr_enable(adapter, p->port_id);
2301                         p->mac.stats.num_resets++;
2302                 }
2303         }
2304         rtnl_unlock();
2305 }
2306
2307
2308 static void t3_adap_check_task(struct work_struct *work)
2309 {
2310         struct adapter *adapter = container_of(work, struct adapter,
2311                                                adap_check_task.work);
2312         const struct adapter_params *p = &adapter->params;
2313
2314         adapter->check_task_cnt++;
2315
2316         /* Check link status for PHYs without interrupts */
2317         if (p->linkpoll_period)
2318                 check_link_status(adapter);
2319
2320         /* Accumulate MAC stats if needed */
2321         if (!p->linkpoll_period ||
2322             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2323             p->stats_update_period) {
2324                 mac_stats_update(adapter);
2325                 adapter->check_task_cnt = 0;
2326         }
2327
2328         if (p->rev == T3_REV_B2)
2329                 check_t3b2_mac(adapter);
2330
2331         /* Schedule the next check update if any port is active. */
2332         spin_lock(&adapter->work_lock);
2333         if (adapter->open_device_map & PORT_MASK)
2334                 schedule_chk_task(adapter);
2335         spin_unlock(&adapter->work_lock);
2336 }
2337
2338 /*
2339  * Processes external (PHY) interrupts in process context.
2340  */
2341 static void ext_intr_task(struct work_struct *work)
2342 {
2343         struct adapter *adapter = container_of(work, struct adapter,
2344                                                ext_intr_handler_task);
2345
2346         t3_phy_intr_handler(adapter);
2347
2348         /* Now reenable external interrupts */
2349         spin_lock_irq(&adapter->work_lock);
2350         if (adapter->slow_intr_mask) {
2351                 adapter->slow_intr_mask |= F_T3DBG;
2352                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2353                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2354                              adapter->slow_intr_mask);
2355         }
2356         spin_unlock_irq(&adapter->work_lock);
2357 }
2358
2359 /*
2360  * Interrupt-context handler for external (PHY) interrupts.
2361  */
2362 void t3_os_ext_intr_handler(struct adapter *adapter)
2363 {
2364         /*
2365          * Schedule a task to handle external interrupts as they may be slow
2366          * and we use a mutex to protect MDIO registers.  We disable PHY
2367          * interrupts in the meantime and let the task reenable them when
2368          * it's done.
2369          */
2370         spin_lock(&adapter->work_lock);
2371         if (adapter->slow_intr_mask) {
2372                 adapter->slow_intr_mask &= ~F_T3DBG;
2373                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2374                              adapter->slow_intr_mask);
2375                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2376         }
2377         spin_unlock(&adapter->work_lock);
2378 }
2379
2380 void t3_fatal_err(struct adapter *adapter)
2381 {
2382         unsigned int fw_status[4];
2383
2384         if (adapter->flags & FULL_INIT_DONE) {
2385                 t3_sge_stop(adapter);
2386                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2387                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2388                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2389                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2390                 t3_intr_disable(adapter);
2391         }
2392         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2393         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2394                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2395                          fw_status[0], fw_status[1],
2396                          fw_status[2], fw_status[3]);
2397
2398 }
2399
2400 /**
2401  * t3_io_error_detected - called when PCI error is detected
2402  * @pdev: Pointer to PCI device
2403  * @state: The current pci connection state
2404  *
2405  * This function is called after a PCI bus error affecting
2406  * this device has been detected.
2407  */
2408 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2409                                              pci_channel_state_t state)
2410 {
2411         struct adapter *adapter = pci_get_drvdata(pdev);
2412         int i;
2413
2414         /* Stop all ports */
2415         for_each_port(adapter, i) {
2416                 struct net_device *netdev = adapter->port[i];
2417
2418                 if (netif_running(netdev))
2419                         cxgb_close(netdev);
2420         }
2421
2422         if (is_offload(adapter) &&
2423             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2424                 offload_close(&adapter->tdev);
2425
2426         /* Free sge resources */
2427         t3_free_sge_resources(adapter);
2428
2429         adapter->flags &= ~FULL_INIT_DONE;
2430
2431         pci_disable_device(pdev);
2432
2433         /* Request a slot slot reset. */
2434         return PCI_ERS_RESULT_NEED_RESET;
2435 }
2436
2437 /**
2438  * t3_io_slot_reset - called after the pci bus has been reset.
2439  * @pdev: Pointer to PCI device
2440  *
2441  * Restart the card from scratch, as if from a cold-boot.
2442  */
2443 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2444 {
2445         struct adapter *adapter = pci_get_drvdata(pdev);
2446
2447         if (pci_enable_device(pdev)) {
2448                 dev_err(&pdev->dev,
2449                         "Cannot re-enable PCI device after reset.\n");
2450                 return PCI_ERS_RESULT_DISCONNECT;
2451         }
2452         pci_set_master(pdev);
2453
2454         t3_prep_adapter(adapter, adapter->params.info, 1);
2455
2456         return PCI_ERS_RESULT_RECOVERED;
2457 }
2458
2459 /**
2460  * t3_io_resume - called when traffic can start flowing again.
2461  * @pdev: Pointer to PCI device
2462  *
2463  * This callback is called when the error recovery driver tells us that
2464  * its OK to resume normal operation.
2465  */
2466 static void t3_io_resume(struct pci_dev *pdev)
2467 {
2468         struct adapter *adapter = pci_get_drvdata(pdev);
2469         int i;
2470
2471         /* Restart the ports */
2472         for_each_port(adapter, i) {
2473                 struct net_device *netdev = adapter->port[i];
2474
2475                 if (netif_running(netdev)) {
2476                         if (cxgb_open(netdev)) {
2477                                 dev_err(&pdev->dev,
2478                                         "can't bring device back up"
2479                                         " after reset\n");
2480                                 continue;
2481                         }
2482                         netif_device_attach(netdev);
2483                 }
2484         }
2485
2486         if (is_offload(adapter)) {
2487                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2488                 if (offload_open(adapter->port[0]))
2489                         printk(KERN_WARNING
2490                                "Could not bring back offload capabilities\n");
2491         }
2492 }
2493
2494 static struct pci_error_handlers t3_err_handler = {
2495         .error_detected = t3_io_error_detected,
2496         .slot_reset = t3_io_slot_reset,
2497         .resume = t3_io_resume,
2498 };
2499
2500 static int __devinit cxgb_enable_msix(struct adapter *adap)
2501 {
2502         struct msix_entry entries[SGE_QSETS + 1];
2503         int i, err;
2504
2505         for (i = 0; i < ARRAY_SIZE(entries); ++i)
2506                 entries[i].entry = i;
2507
2508         err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2509         if (!err) {
2510                 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2511                         adap->msix_info[i].vec = entries[i].vector;
2512         } else if (err > 0)
2513                 dev_info(&adap->pdev->dev,
2514                        "only %d MSI-X vectors left, not using MSI-X\n", err);
2515         return err;
2516 }
2517
2518 static void __devinit print_port_info(struct adapter *adap,
2519                                       const struct adapter_info *ai)
2520 {
2521         static const char *pci_variant[] = {
2522                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2523         };
2524
2525         int i;
2526         char buf[80];
2527
2528         if (is_pcie(adap))
2529                 snprintf(buf, sizeof(buf), "%s x%d",
2530                          pci_variant[adap->params.pci.variant],
2531                          adap->params.pci.width);
2532         else
2533                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2534                          pci_variant[adap->params.pci.variant],
2535                          adap->params.pci.speed, adap->params.pci.width);
2536
2537         for_each_port(adap, i) {
2538                 struct net_device *dev = adap->port[i];
2539                 const struct port_info *pi = netdev_priv(dev);
2540
2541                 if (!test_bit(i, &adap->registered_device_map))
2542                         continue;
2543                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2544                        dev->name, ai->desc, pi->port_type->desc,
2545                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
2546                        (adap->flags & USING_MSIX) ? " MSI-X" :
2547                        (adap->flags & USING_MSI) ? " MSI" : "");
2548                 if (adap->name == dev->name && adap->params.vpd.mclk)
2549                         printk(KERN_INFO
2550                                "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2551                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2552                                t3_mc7_size(&adap->pmtx) >> 20,
2553                                t3_mc7_size(&adap->pmrx) >> 20,
2554                                adap->params.vpd.sn);
2555         }
2556 }
2557
2558 static int __devinit init_one(struct pci_dev *pdev,
2559                               const struct pci_device_id *ent)
2560 {
2561         static int version_printed;
2562
2563         int i, err, pci_using_dac = 0;
2564         unsigned long mmio_start, mmio_len;
2565         const struct adapter_info *ai;
2566         struct adapter *adapter = NULL;
2567         struct port_info *pi;
2568
2569         if (!version_printed) {
2570                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2571                 ++version_printed;
2572         }
2573
2574         if (!cxgb3_wq) {
2575                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2576                 if (!cxgb3_wq) {
2577                         printk(KERN_ERR DRV_NAME
2578                                ": cannot initialize work queue\n");
2579                         return -ENOMEM;
2580                 }
2581         }
2582
2583         err = pci_request_regions(pdev, DRV_NAME);
2584         if (err) {
2585                 /* Just info, some other driver may have claimed the device. */
2586                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2587                 return err;
2588         }
2589
2590         err = pci_enable_device(pdev);
2591         if (err) {
2592                 dev_err(&pdev->dev, "cannot enable PCI device\n");
2593                 goto out_release_regions;
2594         }
2595
2596         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2597                 pci_using_dac = 1;
2598                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2599                 if (err) {
2600                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2601                                "coherent allocations\n");
2602                         goto out_disable_device;
2603                 }
2604         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2605                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2606                 goto out_disable_device;
2607         }
2608
2609         pci_set_master(pdev);
2610
2611         mmio_start = pci_resource_start(pdev, 0);
2612         mmio_len = pci_resource_len(pdev, 0);
2613         ai = t3_get_adapter_info(ent->driver_data);
2614
2615         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2616         if (!adapter) {
2617                 err = -ENOMEM;
2618                 goto out_disable_device;
2619         }
2620
2621         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2622         if (!adapter->regs) {
2623                 dev_err(&pdev->dev, "cannot map device registers\n");
2624                 err = -ENOMEM;
2625                 goto out_free_adapter;
2626         }
2627
2628         adapter->pdev = pdev;
2629         adapter->name = pci_name(pdev);
2630         adapter->msg_enable = dflt_msg_enable;
2631         adapter->mmio_len = mmio_len;
2632
2633         mutex_init(&adapter->mdio_lock);
2634         spin_lock_init(&adapter->work_lock);
2635         spin_lock_init(&adapter->stats_lock);
2636
2637         INIT_LIST_HEAD(&adapter->adapter_list);
2638         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2639         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2640
2641         for (i = 0; i < ai->nports; ++i) {
2642                 struct net_device *netdev;
2643
2644                 netdev = alloc_etherdev(sizeof(struct port_info));
2645                 if (!netdev) {
2646                         err = -ENOMEM;
2647                         goto out_free_dev;
2648                 }
2649
2650                 SET_NETDEV_DEV(netdev, &pdev->dev);
2651
2652                 adapter->port[i] = netdev;
2653                 pi = netdev_priv(netdev);
2654                 pi->adapter = adapter;
2655                 pi->rx_csum_offload = 1;
2656                 pi->nqsets = 1;
2657                 pi->first_qset = i;
2658                 pi->activity = 0;
2659                 pi->port_id = i;
2660                 netif_carrier_off(netdev);
2661                 netdev->irq = pdev->irq;
2662                 netdev->mem_start = mmio_start;
2663                 netdev->mem_end = mmio_start + mmio_len - 1;
2664                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2665                 netdev->features |= NETIF_F_LLTX;
2666                 if (pci_using_dac)
2667                         netdev->features |= NETIF_F_HIGHDMA;
2668
2669                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2670                 netdev->vlan_rx_register = vlan_rx_register;
2671
2672                 netdev->open = cxgb_open;
2673                 netdev->stop = cxgb_close;
2674                 netdev->hard_start_xmit = t3_eth_xmit;
2675                 netdev->get_stats = cxgb_get_stats;
2676                 netdev->set_multicast_list = cxgb_set_rxmode;
2677                 netdev->do_ioctl = cxgb_ioctl;
2678                 netdev->change_mtu = cxgb_change_mtu;
2679                 netdev->set_mac_address = cxgb_set_mac_addr;
2680 #ifdef CONFIG_NET_POLL_CONTROLLER
2681                 netdev->poll_controller = cxgb_netpoll;
2682 #endif
2683
2684                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2685         }
2686
2687         pci_set_drvdata(pdev, adapter);
2688         if (t3_prep_adapter(adapter, ai, 1) < 0) {
2689                 err = -ENODEV;
2690                 goto out_free_dev;
2691         }
2692
2693         /*
2694          * The card is now ready to go.  If any errors occur during device
2695          * registration we do not fail the whole card but rather proceed only
2696          * with the ports we manage to register successfully.  However we must
2697          * register at least one net device.
2698          */
2699         for_each_port(adapter, i) {
2700                 err = register_netdev(adapter->port[i]);
2701                 if (err)
2702                         dev_warn(&pdev->dev,
2703                                  "cannot register net device %s, skipping\n",
2704                                  adapter->port[i]->name);
2705                 else {
2706                         /*
2707                          * Change the name we use for messages to the name of
2708                          * the first successfully registered interface.
2709                          */
2710                         if (!adapter->registered_device_map)
2711                                 adapter->name = adapter->port[i]->name;
2712
2713                         __set_bit(i, &adapter->registered_device_map);
2714                 }
2715         }
2716         if (!adapter->registered_device_map) {
2717                 dev_err(&pdev->dev, "could not register any net devices\n");
2718                 goto out_free_dev;
2719         }
2720
2721         /* Driver's ready. Reflect it on LEDs */
2722         t3_led_ready(adapter);
2723
2724         if (is_offload(adapter)) {
2725                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2726                 cxgb3_adapter_ofld(adapter);
2727         }
2728
2729         /* See what interrupts we'll be using */
2730         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2731                 adapter->flags |= USING_MSIX;
2732         else if (msi > 0 && pci_enable_msi(pdev) == 0)
2733                 adapter->flags |= USING_MSI;
2734
2735         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2736                                  &cxgb3_attr_group);
2737
2738         print_port_info(adapter, ai);
2739         return 0;
2740
2741 out_free_dev:
2742         iounmap(adapter->regs);
2743         for (i = ai->nports - 1; i >= 0; --i)
2744                 if (adapter->port[i])
2745                         free_netdev(adapter->port[i]);
2746
2747 out_free_adapter:
2748         kfree(adapter);
2749
2750 out_disable_device:
2751         pci_disable_device(pdev);
2752 out_release_regions:
2753         pci_release_regions(pdev);
2754         pci_set_drvdata(pdev, NULL);
2755         return err;
2756 }
2757
2758 static void __devexit remove_one(struct pci_dev *pdev)
2759 {
2760         struct adapter *adapter = pci_get_drvdata(pdev);
2761
2762         if (adapter) {
2763                 int i;
2764
2765                 t3_sge_stop(adapter);
2766                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2767                                    &cxgb3_attr_group);
2768
2769                 if (is_offload(adapter)) {
2770                         cxgb3_adapter_unofld(adapter);
2771                         if (test_bit(OFFLOAD_DEVMAP_BIT,
2772                                      &adapter->open_device_map))
2773                                 offload_close(&adapter->tdev);
2774                 }
2775
2776                 for_each_port(adapter, i)
2777                     if (test_bit(i, &adapter->registered_device_map))
2778                         unregister_netdev(adapter->port[i]);
2779
2780                 t3_free_sge_resources(adapter);
2781                 cxgb_disable_msi(adapter);
2782
2783                 for_each_port(adapter, i)
2784                         if (adapter->port[i])
2785                                 free_netdev(adapter->port[i]);
2786
2787                 iounmap(adapter->regs);
2788                 kfree(adapter);
2789                 pci_release_regions(pdev);
2790                 pci_disable_device(pdev);
2791                 pci_set_drvdata(pdev, NULL);
2792         }
2793 }
2794
2795 static struct pci_driver driver = {
2796         .name = DRV_NAME,
2797         .id_table = cxgb3_pci_tbl,
2798         .probe = init_one,
2799         .remove = __devexit_p(remove_one),
2800         .err_handler = &t3_err_handler,
2801 };
2802
2803 static int __init cxgb3_init_module(void)
2804 {
2805         int ret;
2806
2807         cxgb3_offload_init();
2808
2809         ret = pci_register_driver(&driver);
2810         return ret;
2811 }
2812
2813 static void __exit cxgb3_cleanup_module(void)
2814 {
2815         pci_unregister_driver(&driver);
2816         if (cxgb3_wq)
2817                 destroy_workqueue(cxgb3_wq);
2818 }
2819
2820 module_init(cxgb3_init_module);
2821 module_exit(cxgb3_cleanup_module);