]> pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/net/bnx2.c
[BNX2]: Add init. code to handle RX pages.
[linux-2.6-omap-h63xx.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2007 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
50
51 #include "bnx2.h"
52 #include "bnx2_fw.h"
53 #include "bnx2_fw2.h"
54
55 #define FW_BUF_SIZE             0x10000
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.6.9"
60 #define DRV_MODULE_RELDATE      "December 8, 2007"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88         BCM5709,
89         BCM5709S,
90 } board_t;
91
92 /* indexed by board_t, above */
93 static const struct {
94         char *name;
95 } board_info[] __devinitdata = {
96         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97         { "HP NC370T Multifunction Gigabit Server Adapter" },
98         { "HP NC370i Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100         { "HP NC370F Multifunction Gigabit Server Adapter" },
101         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
104         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105         };
106
107 static struct pci_device_id bnx2_pci_tbl[] = {
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
120         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
122         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
126         { 0, }
127 };
128
129 static struct flash_spec flash_table[] =
130 {
131 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
133         /* Slow EEPROM */
134         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
135          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
136          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137          "EEPROM - slow"},
138         /* Expansion entry 0001 */
139         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
140          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
141          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142          "Entry 0001"},
143         /* Saifun SA25F010 (non-buffered flash) */
144         /* strap, cfg1, & write1 need updates */
145         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
146          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
147          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148          "Non-buffered flash (128kB)"},
149         /* Saifun SA25F020 (non-buffered flash) */
150         /* strap, cfg1, & write1 need updates */
151         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
152          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
153          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154          "Non-buffered flash (256kB)"},
155         /* Expansion entry 0100 */
156         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
157          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
158          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159          "Entry 0100"},
160         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
161         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
162          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
163          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
168          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170         /* Saifun SA25F005 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175          "Non-buffered flash (64kB)"},
176         /* Fast EEPROM */
177         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
178          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
179          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180          "EEPROM - fast"},
181         /* Expansion entry 1001 */
182         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
184          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185          "Entry 1001"},
186         /* Expansion entry 1010 */
187         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1010"},
191         /* ATMEL AT45DB011B (buffered flash) */
192         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
193          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
194          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195          "Buffered flash (128kB)"},
196         /* Expansion entry 1100 */
197         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
198          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
199          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1100"},
201         /* Expansion entry 1101 */
202         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1101"},
206         /* Ateml Expansion entry 1110 */
207         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
208          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
209          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1110 (Atmel)"},
211         /* ATMEL AT45DB021B (buffered flash) */
212         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215          "Buffered flash (256kB)"},
216 };
217
218 static struct flash_spec flash_5709 = {
219         .flags          = BNX2_NV_BUFFERED,
220         .page_bits      = BCM5709_FLASH_PAGE_BITS,
221         .page_size      = BCM5709_FLASH_PAGE_SIZE,
222         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
223         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
224         .name           = "5709 Buffered flash (256kB)",
225 };
226
227 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
229 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
230 {
231         u32 diff;
232
233         smp_mb();
234
235         /* The ring uses 256 indices for 255 entries, one of them
236          * needs to be skipped.
237          */
238         diff = bp->tx_prod - bp->tx_cons;
239         if (unlikely(diff >= TX_DESC_CNT)) {
240                 diff &= 0xffff;
241                 if (diff == TX_DESC_CNT)
242                         diff = MAX_TX_DESC_CNT;
243         }
244         return (bp->tx_ring_size - diff);
245 }
246
247 static u32
248 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249 {
250         u32 val;
251
252         spin_lock_bh(&bp->indirect_lock);
253         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
254         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255         spin_unlock_bh(&bp->indirect_lock);
256         return val;
257 }
258
259 static void
260 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261 {
262         spin_lock_bh(&bp->indirect_lock);
263         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
265         spin_unlock_bh(&bp->indirect_lock);
266 }
267
268 static void
269 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270 {
271         offset += cid_addr;
272         spin_lock_bh(&bp->indirect_lock);
273         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274                 int i;
275
276                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279                 for (i = 0; i < 5; i++) {
280                         u32 val;
281                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283                                 break;
284                         udelay(5);
285                 }
286         } else {
287                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288                 REG_WR(bp, BNX2_CTX_DATA, val);
289         }
290         spin_unlock_bh(&bp->indirect_lock);
291 }
292
293 static int
294 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295 {
296         u32 val1;
297         int i, ret;
298
299         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306                 udelay(40);
307         }
308
309         val1 = (bp->phy_addr << 21) | (reg << 16) |
310                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311                 BNX2_EMAC_MDIO_COMM_START_BUSY;
312         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314         for (i = 0; i < 50; i++) {
315                 udelay(10);
316
317                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319                         udelay(5);
320
321                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324                         break;
325                 }
326         }
327
328         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329                 *val = 0x0;
330                 ret = -EBUSY;
331         }
332         else {
333                 *val = val1;
334                 ret = 0;
335         }
336
337         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344                 udelay(40);
345         }
346
347         return ret;
348 }
349
350 static int
351 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352 {
353         u32 val1;
354         int i, ret;
355
356         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363                 udelay(40);
364         }
365
366         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
370
371         for (i = 0; i < 50; i++) {
372                 udelay(10);
373
374                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376                         udelay(5);
377                         break;
378                 }
379         }
380
381         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382                 ret = -EBUSY;
383         else
384                 ret = 0;
385
386         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393                 udelay(40);
394         }
395
396         return ret;
397 }
398
399 static void
400 bnx2_disable_int(struct bnx2 *bp)
401 {
402         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405 }
406
407 static void
408 bnx2_enable_int(struct bnx2 *bp)
409 {
410         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
413
414         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
416
417         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
418 }
419
420 static void
421 bnx2_disable_int_sync(struct bnx2 *bp)
422 {
423         atomic_inc(&bp->intr_sem);
424         bnx2_disable_int(bp);
425         synchronize_irq(bp->pdev->irq);
426 }
427
428 static void
429 bnx2_netif_stop(struct bnx2 *bp)
430 {
431         bnx2_disable_int_sync(bp);
432         if (netif_running(bp->dev)) {
433                 napi_disable(&bp->napi);
434                 netif_tx_disable(bp->dev);
435                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
436         }
437 }
438
439 static void
440 bnx2_netif_start(struct bnx2 *bp)
441 {
442         if (atomic_dec_and_test(&bp->intr_sem)) {
443                 if (netif_running(bp->dev)) {
444                         netif_wake_queue(bp->dev);
445                         napi_enable(&bp->napi);
446                         bnx2_enable_int(bp);
447                 }
448         }
449 }
450
451 static void
452 bnx2_free_mem(struct bnx2 *bp)
453 {
454         int i;
455
456         for (i = 0; i < bp->ctx_pages; i++) {
457                 if (bp->ctx_blk[i]) {
458                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
459                                             bp->ctx_blk[i],
460                                             bp->ctx_blk_mapping[i]);
461                         bp->ctx_blk[i] = NULL;
462                 }
463         }
464         if (bp->status_blk) {
465                 pci_free_consistent(bp->pdev, bp->status_stats_size,
466                                     bp->status_blk, bp->status_blk_mapping);
467                 bp->status_blk = NULL;
468                 bp->stats_blk = NULL;
469         }
470         if (bp->tx_desc_ring) {
471                 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
472                                     bp->tx_desc_ring, bp->tx_desc_mapping);
473                 bp->tx_desc_ring = NULL;
474         }
475         kfree(bp->tx_buf_ring);
476         bp->tx_buf_ring = NULL;
477         for (i = 0; i < bp->rx_max_ring; i++) {
478                 if (bp->rx_desc_ring[i])
479                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
480                                             bp->rx_desc_ring[i],
481                                             bp->rx_desc_mapping[i]);
482                 bp->rx_desc_ring[i] = NULL;
483         }
484         vfree(bp->rx_buf_ring);
485         bp->rx_buf_ring = NULL;
486         for (i = 0; i < bp->rx_max_pg_ring; i++) {
487                 if (bp->rx_pg_desc_ring[i])
488                         pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
489                                             bp->rx_pg_desc_ring[i],
490                                             bp->rx_pg_desc_mapping[i]);
491                 bp->rx_pg_desc_ring[i] = NULL;
492         }
493         if (bp->rx_pg_ring)
494                 vfree(bp->rx_pg_ring);
495         bp->rx_pg_ring = NULL;
496 }
497
498 static int
499 bnx2_alloc_mem(struct bnx2 *bp)
500 {
501         int i, status_blk_size;
502
503         bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
504         if (bp->tx_buf_ring == NULL)
505                 return -ENOMEM;
506
507         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
508                                                 &bp->tx_desc_mapping);
509         if (bp->tx_desc_ring == NULL)
510                 goto alloc_mem_err;
511
512         bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
513         if (bp->rx_buf_ring == NULL)
514                 goto alloc_mem_err;
515
516         memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
517
518         for (i = 0; i < bp->rx_max_ring; i++) {
519                 bp->rx_desc_ring[i] =
520                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
521                                              &bp->rx_desc_mapping[i]);
522                 if (bp->rx_desc_ring[i] == NULL)
523                         goto alloc_mem_err;
524
525         }
526
527         if (bp->rx_pg_ring_size) {
528                 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
529                                          bp->rx_max_pg_ring);
530                 if (bp->rx_pg_ring == NULL)
531                         goto alloc_mem_err;
532
533                 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
534                        bp->rx_max_pg_ring);
535         }
536
537         for (i = 0; i < bp->rx_max_pg_ring; i++) {
538                 bp->rx_pg_desc_ring[i] =
539                         pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
540                                              &bp->rx_pg_desc_mapping[i]);
541                 if (bp->rx_pg_desc_ring[i] == NULL)
542                         goto alloc_mem_err;
543
544         }
545
546         /* Combine status and statistics blocks into one allocation. */
547         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
548         bp->status_stats_size = status_blk_size +
549                                 sizeof(struct statistics_block);
550
551         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
552                                               &bp->status_blk_mapping);
553         if (bp->status_blk == NULL)
554                 goto alloc_mem_err;
555
556         memset(bp->status_blk, 0, bp->status_stats_size);
557
558         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
559                                   status_blk_size);
560
561         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
562
563         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
564                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
565                 if (bp->ctx_pages == 0)
566                         bp->ctx_pages = 1;
567                 for (i = 0; i < bp->ctx_pages; i++) {
568                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
569                                                 BCM_PAGE_SIZE,
570                                                 &bp->ctx_blk_mapping[i]);
571                         if (bp->ctx_blk[i] == NULL)
572                                 goto alloc_mem_err;
573                 }
574         }
575         return 0;
576
577 alloc_mem_err:
578         bnx2_free_mem(bp);
579         return -ENOMEM;
580 }
581
582 static void
583 bnx2_report_fw_link(struct bnx2 *bp)
584 {
585         u32 fw_link_status = 0;
586
587         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
588                 return;
589
590         if (bp->link_up) {
591                 u32 bmsr;
592
593                 switch (bp->line_speed) {
594                 case SPEED_10:
595                         if (bp->duplex == DUPLEX_HALF)
596                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
597                         else
598                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
599                         break;
600                 case SPEED_100:
601                         if (bp->duplex == DUPLEX_HALF)
602                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
603                         else
604                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
605                         break;
606                 case SPEED_1000:
607                         if (bp->duplex == DUPLEX_HALF)
608                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
609                         else
610                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
611                         break;
612                 case SPEED_2500:
613                         if (bp->duplex == DUPLEX_HALF)
614                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
615                         else
616                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
617                         break;
618                 }
619
620                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
621
622                 if (bp->autoneg) {
623                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
624
625                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
626                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
627
628                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
629                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
630                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
631                         else
632                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
633                 }
634         }
635         else
636                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
637
638         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
639 }
640
641 static char *
642 bnx2_xceiver_str(struct bnx2 *bp)
643 {
644         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
645                 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
646                  "Copper"));
647 }
648
649 static void
650 bnx2_report_link(struct bnx2 *bp)
651 {
652         if (bp->link_up) {
653                 netif_carrier_on(bp->dev);
654                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
655                        bnx2_xceiver_str(bp));
656
657                 printk("%d Mbps ", bp->line_speed);
658
659                 if (bp->duplex == DUPLEX_FULL)
660                         printk("full duplex");
661                 else
662                         printk("half duplex");
663
664                 if (bp->flow_ctrl) {
665                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
666                                 printk(", receive ");
667                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
668                                         printk("& transmit ");
669                         }
670                         else {
671                                 printk(", transmit ");
672                         }
673                         printk("flow control ON");
674                 }
675                 printk("\n");
676         }
677         else {
678                 netif_carrier_off(bp->dev);
679                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
680                        bnx2_xceiver_str(bp));
681         }
682
683         bnx2_report_fw_link(bp);
684 }
685
686 static void
687 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
688 {
689         u32 local_adv, remote_adv;
690
691         bp->flow_ctrl = 0;
692         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
693                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
694
695                 if (bp->duplex == DUPLEX_FULL) {
696                         bp->flow_ctrl = bp->req_flow_ctrl;
697                 }
698                 return;
699         }
700
701         if (bp->duplex != DUPLEX_FULL) {
702                 return;
703         }
704
705         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
706             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
707                 u32 val;
708
709                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
710                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
711                         bp->flow_ctrl |= FLOW_CTRL_TX;
712                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
713                         bp->flow_ctrl |= FLOW_CTRL_RX;
714                 return;
715         }
716
717         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
718         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
719
720         if (bp->phy_flags & PHY_SERDES_FLAG) {
721                 u32 new_local_adv = 0;
722                 u32 new_remote_adv = 0;
723
724                 if (local_adv & ADVERTISE_1000XPAUSE)
725                         new_local_adv |= ADVERTISE_PAUSE_CAP;
726                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
727                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
728                 if (remote_adv & ADVERTISE_1000XPAUSE)
729                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
730                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
731                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
732
733                 local_adv = new_local_adv;
734                 remote_adv = new_remote_adv;
735         }
736
737         /* See Table 28B-3 of 802.3ab-1999 spec. */
738         if (local_adv & ADVERTISE_PAUSE_CAP) {
739                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
740                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
741                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
742                         }
743                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
744                                 bp->flow_ctrl = FLOW_CTRL_RX;
745                         }
746                 }
747                 else {
748                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
749                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
750                         }
751                 }
752         }
753         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
754                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
755                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
756
757                         bp->flow_ctrl = FLOW_CTRL_TX;
758                 }
759         }
760 }
761
762 static int
763 bnx2_5709s_linkup(struct bnx2 *bp)
764 {
765         u32 val, speed;
766
767         bp->link_up = 1;
768
769         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
770         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
771         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
772
773         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
774                 bp->line_speed = bp->req_line_speed;
775                 bp->duplex = bp->req_duplex;
776                 return 0;
777         }
778         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
779         switch (speed) {
780                 case MII_BNX2_GP_TOP_AN_SPEED_10:
781                         bp->line_speed = SPEED_10;
782                         break;
783                 case MII_BNX2_GP_TOP_AN_SPEED_100:
784                         bp->line_speed = SPEED_100;
785                         break;
786                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
787                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
788                         bp->line_speed = SPEED_1000;
789                         break;
790                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
791                         bp->line_speed = SPEED_2500;
792                         break;
793         }
794         if (val & MII_BNX2_GP_TOP_AN_FD)
795                 bp->duplex = DUPLEX_FULL;
796         else
797                 bp->duplex = DUPLEX_HALF;
798         return 0;
799 }
800
801 static int
802 bnx2_5708s_linkup(struct bnx2 *bp)
803 {
804         u32 val;
805
806         bp->link_up = 1;
807         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
808         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
809                 case BCM5708S_1000X_STAT1_SPEED_10:
810                         bp->line_speed = SPEED_10;
811                         break;
812                 case BCM5708S_1000X_STAT1_SPEED_100:
813                         bp->line_speed = SPEED_100;
814                         break;
815                 case BCM5708S_1000X_STAT1_SPEED_1G:
816                         bp->line_speed = SPEED_1000;
817                         break;
818                 case BCM5708S_1000X_STAT1_SPEED_2G5:
819                         bp->line_speed = SPEED_2500;
820                         break;
821         }
822         if (val & BCM5708S_1000X_STAT1_FD)
823                 bp->duplex = DUPLEX_FULL;
824         else
825                 bp->duplex = DUPLEX_HALF;
826
827         return 0;
828 }
829
830 static int
831 bnx2_5706s_linkup(struct bnx2 *bp)
832 {
833         u32 bmcr, local_adv, remote_adv, common;
834
835         bp->link_up = 1;
836         bp->line_speed = SPEED_1000;
837
838         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
839         if (bmcr & BMCR_FULLDPLX) {
840                 bp->duplex = DUPLEX_FULL;
841         }
842         else {
843                 bp->duplex = DUPLEX_HALF;
844         }
845
846         if (!(bmcr & BMCR_ANENABLE)) {
847                 return 0;
848         }
849
850         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
851         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
852
853         common = local_adv & remote_adv;
854         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
855
856                 if (common & ADVERTISE_1000XFULL) {
857                         bp->duplex = DUPLEX_FULL;
858                 }
859                 else {
860                         bp->duplex = DUPLEX_HALF;
861                 }
862         }
863
864         return 0;
865 }
866
867 static int
868 bnx2_copper_linkup(struct bnx2 *bp)
869 {
870         u32 bmcr;
871
872         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
873         if (bmcr & BMCR_ANENABLE) {
874                 u32 local_adv, remote_adv, common;
875
876                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
877                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
878
879                 common = local_adv & (remote_adv >> 2);
880                 if (common & ADVERTISE_1000FULL) {
881                         bp->line_speed = SPEED_1000;
882                         bp->duplex = DUPLEX_FULL;
883                 }
884                 else if (common & ADVERTISE_1000HALF) {
885                         bp->line_speed = SPEED_1000;
886                         bp->duplex = DUPLEX_HALF;
887                 }
888                 else {
889                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
890                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
891
892                         common = local_adv & remote_adv;
893                         if (common & ADVERTISE_100FULL) {
894                                 bp->line_speed = SPEED_100;
895                                 bp->duplex = DUPLEX_FULL;
896                         }
897                         else if (common & ADVERTISE_100HALF) {
898                                 bp->line_speed = SPEED_100;
899                                 bp->duplex = DUPLEX_HALF;
900                         }
901                         else if (common & ADVERTISE_10FULL) {
902                                 bp->line_speed = SPEED_10;
903                                 bp->duplex = DUPLEX_FULL;
904                         }
905                         else if (common & ADVERTISE_10HALF) {
906                                 bp->line_speed = SPEED_10;
907                                 bp->duplex = DUPLEX_HALF;
908                         }
909                         else {
910                                 bp->line_speed = 0;
911                                 bp->link_up = 0;
912                         }
913                 }
914         }
915         else {
916                 if (bmcr & BMCR_SPEED100) {
917                         bp->line_speed = SPEED_100;
918                 }
919                 else {
920                         bp->line_speed = SPEED_10;
921                 }
922                 if (bmcr & BMCR_FULLDPLX) {
923                         bp->duplex = DUPLEX_FULL;
924                 }
925                 else {
926                         bp->duplex = DUPLEX_HALF;
927                 }
928         }
929
930         return 0;
931 }
932
933 static int
934 bnx2_set_mac_link(struct bnx2 *bp)
935 {
936         u32 val;
937
938         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
939         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
940                 (bp->duplex == DUPLEX_HALF)) {
941                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
942         }
943
944         /* Configure the EMAC mode register. */
945         val = REG_RD(bp, BNX2_EMAC_MODE);
946
947         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
948                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
949                 BNX2_EMAC_MODE_25G_MODE);
950
951         if (bp->link_up) {
952                 switch (bp->line_speed) {
953                         case SPEED_10:
954                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
955                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
956                                         break;
957                                 }
958                                 /* fall through */
959                         case SPEED_100:
960                                 val |= BNX2_EMAC_MODE_PORT_MII;
961                                 break;
962                         case SPEED_2500:
963                                 val |= BNX2_EMAC_MODE_25G_MODE;
964                                 /* fall through */
965                         case SPEED_1000:
966                                 val |= BNX2_EMAC_MODE_PORT_GMII;
967                                 break;
968                 }
969         }
970         else {
971                 val |= BNX2_EMAC_MODE_PORT_GMII;
972         }
973
974         /* Set the MAC to operate in the appropriate duplex mode. */
975         if (bp->duplex == DUPLEX_HALF)
976                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
977         REG_WR(bp, BNX2_EMAC_MODE, val);
978
979         /* Enable/disable rx PAUSE. */
980         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
981
982         if (bp->flow_ctrl & FLOW_CTRL_RX)
983                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
984         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
985
986         /* Enable/disable tx PAUSE. */
987         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
988         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
989
990         if (bp->flow_ctrl & FLOW_CTRL_TX)
991                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
992         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
993
994         /* Acknowledge the interrupt. */
995         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
996
997         return 0;
998 }
999
1000 static void
1001 bnx2_enable_bmsr1(struct bnx2 *bp)
1002 {
1003         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1004             (CHIP_NUM(bp) == CHIP_NUM_5709))
1005                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1006                                MII_BNX2_BLK_ADDR_GP_STATUS);
1007 }
1008
1009 static void
1010 bnx2_disable_bmsr1(struct bnx2 *bp)
1011 {
1012         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1013             (CHIP_NUM(bp) == CHIP_NUM_5709))
1014                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1015                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1016 }
1017
1018 static int
1019 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1020 {
1021         u32 up1;
1022         int ret = 1;
1023
1024         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1025                 return 0;
1026
1027         if (bp->autoneg & AUTONEG_SPEED)
1028                 bp->advertising |= ADVERTISED_2500baseX_Full;
1029
1030         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1031                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1032
1033         bnx2_read_phy(bp, bp->mii_up1, &up1);
1034         if (!(up1 & BCM5708S_UP1_2G5)) {
1035                 up1 |= BCM5708S_UP1_2G5;
1036                 bnx2_write_phy(bp, bp->mii_up1, up1);
1037                 ret = 0;
1038         }
1039
1040         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1041                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1042                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1043
1044         return ret;
1045 }
1046
1047 static int
1048 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1049 {
1050         u32 up1;
1051         int ret = 0;
1052
1053         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1054                 return 0;
1055
1056         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1057                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1058
1059         bnx2_read_phy(bp, bp->mii_up1, &up1);
1060         if (up1 & BCM5708S_UP1_2G5) {
1061                 up1 &= ~BCM5708S_UP1_2G5;
1062                 bnx2_write_phy(bp, bp->mii_up1, up1);
1063                 ret = 1;
1064         }
1065
1066         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1067                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1068                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1069
1070         return ret;
1071 }
1072
1073 static void
1074 bnx2_enable_forced_2g5(struct bnx2 *bp)
1075 {
1076         u32 bmcr;
1077
1078         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1079                 return;
1080
1081         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1082                 u32 val;
1083
1084                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1085                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1086                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1087                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1088                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1089                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1090
1091                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1094
1095         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1096                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1097                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1098         }
1099
1100         if (bp->autoneg & AUTONEG_SPEED) {
1101                 bmcr &= ~BMCR_ANENABLE;
1102                 if (bp->req_duplex == DUPLEX_FULL)
1103                         bmcr |= BMCR_FULLDPLX;
1104         }
1105         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1106 }
1107
1108 static void
1109 bnx2_disable_forced_2g5(struct bnx2 *bp)
1110 {
1111         u32 bmcr;
1112
1113         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1114                 return;
1115
1116         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1117                 u32 val;
1118
1119                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1120                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1121                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1122                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1123                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1124
1125                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1126                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1127                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1128
1129         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1130                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1131                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1132         }
1133
1134         if (bp->autoneg & AUTONEG_SPEED)
1135                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1136         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1137 }
1138
1139 static int
1140 bnx2_set_link(struct bnx2 *bp)
1141 {
1142         u32 bmsr;
1143         u8 link_up;
1144
1145         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1146                 bp->link_up = 1;
1147                 return 0;
1148         }
1149
1150         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1151                 return 0;
1152
1153         link_up = bp->link_up;
1154
1155         bnx2_enable_bmsr1(bp);
1156         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1157         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1158         bnx2_disable_bmsr1(bp);
1159
1160         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1161             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1162                 u32 val;
1163
1164                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1165                 if (val & BNX2_EMAC_STATUS_LINK)
1166                         bmsr |= BMSR_LSTATUS;
1167                 else
1168                         bmsr &= ~BMSR_LSTATUS;
1169         }
1170
1171         if (bmsr & BMSR_LSTATUS) {
1172                 bp->link_up = 1;
1173
1174                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1175                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1176                                 bnx2_5706s_linkup(bp);
1177                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1178                                 bnx2_5708s_linkup(bp);
1179                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1180                                 bnx2_5709s_linkup(bp);
1181                 }
1182                 else {
1183                         bnx2_copper_linkup(bp);
1184                 }
1185                 bnx2_resolve_flow_ctrl(bp);
1186         }
1187         else {
1188                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1189                     (bp->autoneg & AUTONEG_SPEED))
1190                         bnx2_disable_forced_2g5(bp);
1191
1192                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1193                 bp->link_up = 0;
1194         }
1195
1196         if (bp->link_up != link_up) {
1197                 bnx2_report_link(bp);
1198         }
1199
1200         bnx2_set_mac_link(bp);
1201
1202         return 0;
1203 }
1204
1205 static int
1206 bnx2_reset_phy(struct bnx2 *bp)
1207 {
1208         int i;
1209         u32 reg;
1210
1211         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1212
1213 #define PHY_RESET_MAX_WAIT 100
1214         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1215                 udelay(10);
1216
1217                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1218                 if (!(reg & BMCR_RESET)) {
1219                         udelay(20);
1220                         break;
1221                 }
1222         }
1223         if (i == PHY_RESET_MAX_WAIT) {
1224                 return -EBUSY;
1225         }
1226         return 0;
1227 }
1228
1229 static u32
1230 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1231 {
1232         u32 adv = 0;
1233
1234         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1235                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1236
1237                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1238                         adv = ADVERTISE_1000XPAUSE;
1239                 }
1240                 else {
1241                         adv = ADVERTISE_PAUSE_CAP;
1242                 }
1243         }
1244         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1245                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1246                         adv = ADVERTISE_1000XPSE_ASYM;
1247                 }
1248                 else {
1249                         adv = ADVERTISE_PAUSE_ASYM;
1250                 }
1251         }
1252         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1253                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1254                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1255                 }
1256                 else {
1257                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1258                 }
1259         }
1260         return adv;
1261 }
1262
1263 static int bnx2_fw_sync(struct bnx2 *, u32, int);
1264
1265 static int
1266 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1267 {
1268         u32 speed_arg = 0, pause_adv;
1269
1270         pause_adv = bnx2_phy_get_pause_adv(bp);
1271
1272         if (bp->autoneg & AUTONEG_SPEED) {
1273                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1274                 if (bp->advertising & ADVERTISED_10baseT_Half)
1275                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1276                 if (bp->advertising & ADVERTISED_10baseT_Full)
1277                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1278                 if (bp->advertising & ADVERTISED_100baseT_Half)
1279                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1280                 if (bp->advertising & ADVERTISED_100baseT_Full)
1281                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1282                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1283                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1284                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1285                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1286         } else {
1287                 if (bp->req_line_speed == SPEED_2500)
1288                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1289                 else if (bp->req_line_speed == SPEED_1000)
1290                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1291                 else if (bp->req_line_speed == SPEED_100) {
1292                         if (bp->req_duplex == DUPLEX_FULL)
1293                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1294                         else
1295                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1296                 } else if (bp->req_line_speed == SPEED_10) {
1297                         if (bp->req_duplex == DUPLEX_FULL)
1298                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1299                         else
1300                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1301                 }
1302         }
1303
1304         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1305                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1306         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1307                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1308
1309         if (port == PORT_TP)
1310                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1311                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1312
1313         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1314
1315         spin_unlock_bh(&bp->phy_lock);
1316         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1317         spin_lock_bh(&bp->phy_lock);
1318
1319         return 0;
1320 }
1321
1322 static int
1323 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1324 {
1325         u32 adv, bmcr;
1326         u32 new_adv = 0;
1327
1328         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1329                 return (bnx2_setup_remote_phy(bp, port));
1330
1331         if (!(bp->autoneg & AUTONEG_SPEED)) {
1332                 u32 new_bmcr;
1333                 int force_link_down = 0;
1334
1335                 if (bp->req_line_speed == SPEED_2500) {
1336                         if (!bnx2_test_and_enable_2g5(bp))
1337                                 force_link_down = 1;
1338                 } else if (bp->req_line_speed == SPEED_1000) {
1339                         if (bnx2_test_and_disable_2g5(bp))
1340                                 force_link_down = 1;
1341                 }
1342                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1343                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1344
1345                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1346                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1347                 new_bmcr |= BMCR_SPEED1000;
1348
1349                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1350                         if (bp->req_line_speed == SPEED_2500)
1351                                 bnx2_enable_forced_2g5(bp);
1352                         else if (bp->req_line_speed == SPEED_1000) {
1353                                 bnx2_disable_forced_2g5(bp);
1354                                 new_bmcr &= ~0x2000;
1355                         }
1356
1357                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1358                         if (bp->req_line_speed == SPEED_2500)
1359                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1360                         else
1361                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1362                 }
1363
1364                 if (bp->req_duplex == DUPLEX_FULL) {
1365                         adv |= ADVERTISE_1000XFULL;
1366                         new_bmcr |= BMCR_FULLDPLX;
1367                 }
1368                 else {
1369                         adv |= ADVERTISE_1000XHALF;
1370                         new_bmcr &= ~BMCR_FULLDPLX;
1371                 }
1372                 if ((new_bmcr != bmcr) || (force_link_down)) {
1373                         /* Force a link down visible on the other side */
1374                         if (bp->link_up) {
1375                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1376                                                ~(ADVERTISE_1000XFULL |
1377                                                  ADVERTISE_1000XHALF));
1378                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1379                                         BMCR_ANRESTART | BMCR_ANENABLE);
1380
1381                                 bp->link_up = 0;
1382                                 netif_carrier_off(bp->dev);
1383                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1384                                 bnx2_report_link(bp);
1385                         }
1386                         bnx2_write_phy(bp, bp->mii_adv, adv);
1387                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1388                 } else {
1389                         bnx2_resolve_flow_ctrl(bp);
1390                         bnx2_set_mac_link(bp);
1391                 }
1392                 return 0;
1393         }
1394
1395         bnx2_test_and_enable_2g5(bp);
1396
1397         if (bp->advertising & ADVERTISED_1000baseT_Full)
1398                 new_adv |= ADVERTISE_1000XFULL;
1399
1400         new_adv |= bnx2_phy_get_pause_adv(bp);
1401
1402         bnx2_read_phy(bp, bp->mii_adv, &adv);
1403         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1404
1405         bp->serdes_an_pending = 0;
1406         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1407                 /* Force a link down visible on the other side */
1408                 if (bp->link_up) {
1409                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1410                         spin_unlock_bh(&bp->phy_lock);
1411                         msleep(20);
1412                         spin_lock_bh(&bp->phy_lock);
1413                 }
1414
1415                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1416                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1417                         BMCR_ANENABLE);
1418                 /* Speed up link-up time when the link partner
1419                  * does not autonegotiate which is very common
1420                  * in blade servers. Some blade servers use
1421                  * IPMI for kerboard input and it's important
1422                  * to minimize link disruptions. Autoneg. involves
1423                  * exchanging base pages plus 3 next pages and
1424                  * normally completes in about 120 msec.
1425                  */
1426                 bp->current_interval = SERDES_AN_TIMEOUT;
1427                 bp->serdes_an_pending = 1;
1428                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1429         } else {
1430                 bnx2_resolve_flow_ctrl(bp);
1431                 bnx2_set_mac_link(bp);
1432         }
1433
1434         return 0;
1435 }
1436
1437 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1438         (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ?                       \
1439                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1440                 (ADVERTISED_1000baseT_Full)
1441
1442 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1443         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1444         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1445         ADVERTISED_1000baseT_Full)
1446
1447 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1448         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1449
1450 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1451
1452 static void
1453 bnx2_set_default_remote_link(struct bnx2 *bp)
1454 {
1455         u32 link;
1456
1457         if (bp->phy_port == PORT_TP)
1458                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1459         else
1460                 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1461
1462         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1463                 bp->req_line_speed = 0;
1464                 bp->autoneg |= AUTONEG_SPEED;
1465                 bp->advertising = ADVERTISED_Autoneg;
1466                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1467                         bp->advertising |= ADVERTISED_10baseT_Half;
1468                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1469                         bp->advertising |= ADVERTISED_10baseT_Full;
1470                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1471                         bp->advertising |= ADVERTISED_100baseT_Half;
1472                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1473                         bp->advertising |= ADVERTISED_100baseT_Full;
1474                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1475                         bp->advertising |= ADVERTISED_1000baseT_Full;
1476                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1477                         bp->advertising |= ADVERTISED_2500baseX_Full;
1478         } else {
1479                 bp->autoneg = 0;
1480                 bp->advertising = 0;
1481                 bp->req_duplex = DUPLEX_FULL;
1482                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1483                         bp->req_line_speed = SPEED_10;
1484                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1485                                 bp->req_duplex = DUPLEX_HALF;
1486                 }
1487                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1488                         bp->req_line_speed = SPEED_100;
1489                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1490                                 bp->req_duplex = DUPLEX_HALF;
1491                 }
1492                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1493                         bp->req_line_speed = SPEED_1000;
1494                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1495                         bp->req_line_speed = SPEED_2500;
1496         }
1497 }
1498
1499 static void
1500 bnx2_set_default_link(struct bnx2 *bp)
1501 {
1502         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1503                 return bnx2_set_default_remote_link(bp);
1504
1505         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1506         bp->req_line_speed = 0;
1507         if (bp->phy_flags & PHY_SERDES_FLAG) {
1508                 u32 reg;
1509
1510                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1511
1512                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1513                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1514                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1515                         bp->autoneg = 0;
1516                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1517                         bp->req_duplex = DUPLEX_FULL;
1518                 }
1519         } else
1520                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1521 }
1522
1523 static void
1524 bnx2_send_heart_beat(struct bnx2 *bp)
1525 {
1526         u32 msg;
1527         u32 addr;
1528
1529         spin_lock(&bp->indirect_lock);
1530         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1531         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1532         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1533         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1534         spin_unlock(&bp->indirect_lock);
1535 }
1536
1537 static void
1538 bnx2_remote_phy_event(struct bnx2 *bp)
1539 {
1540         u32 msg;
1541         u8 link_up = bp->link_up;
1542         u8 old_port;
1543
1544         msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1545
1546         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1547                 bnx2_send_heart_beat(bp);
1548
1549         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1550
1551         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1552                 bp->link_up = 0;
1553         else {
1554                 u32 speed;
1555
1556                 bp->link_up = 1;
1557                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1558                 bp->duplex = DUPLEX_FULL;
1559                 switch (speed) {
1560                         case BNX2_LINK_STATUS_10HALF:
1561                                 bp->duplex = DUPLEX_HALF;
1562                         case BNX2_LINK_STATUS_10FULL:
1563                                 bp->line_speed = SPEED_10;
1564                                 break;
1565                         case BNX2_LINK_STATUS_100HALF:
1566                                 bp->duplex = DUPLEX_HALF;
1567                         case BNX2_LINK_STATUS_100BASE_T4:
1568                         case BNX2_LINK_STATUS_100FULL:
1569                                 bp->line_speed = SPEED_100;
1570                                 break;
1571                         case BNX2_LINK_STATUS_1000HALF:
1572                                 bp->duplex = DUPLEX_HALF;
1573                         case BNX2_LINK_STATUS_1000FULL:
1574                                 bp->line_speed = SPEED_1000;
1575                                 break;
1576                         case BNX2_LINK_STATUS_2500HALF:
1577                                 bp->duplex = DUPLEX_HALF;
1578                         case BNX2_LINK_STATUS_2500FULL:
1579                                 bp->line_speed = SPEED_2500;
1580                                 break;
1581                         default:
1582                                 bp->line_speed = 0;
1583                                 break;
1584                 }
1585
1586                 spin_lock(&bp->phy_lock);
1587                 bp->flow_ctrl = 0;
1588                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1589                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1590                         if (bp->duplex == DUPLEX_FULL)
1591                                 bp->flow_ctrl = bp->req_flow_ctrl;
1592                 } else {
1593                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1594                                 bp->flow_ctrl |= FLOW_CTRL_TX;
1595                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1596                                 bp->flow_ctrl |= FLOW_CTRL_RX;
1597                 }
1598
1599                 old_port = bp->phy_port;
1600                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1601                         bp->phy_port = PORT_FIBRE;
1602                 else
1603                         bp->phy_port = PORT_TP;
1604
1605                 if (old_port != bp->phy_port)
1606                         bnx2_set_default_link(bp);
1607
1608                 spin_unlock(&bp->phy_lock);
1609         }
1610         if (bp->link_up != link_up)
1611                 bnx2_report_link(bp);
1612
1613         bnx2_set_mac_link(bp);
1614 }
1615
1616 static int
1617 bnx2_set_remote_link(struct bnx2 *bp)
1618 {
1619         u32 evt_code;
1620
1621         evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1622         switch (evt_code) {
1623                 case BNX2_FW_EVT_CODE_LINK_EVENT:
1624                         bnx2_remote_phy_event(bp);
1625                         break;
1626                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1627                 default:
1628                         bnx2_send_heart_beat(bp);
1629                         break;
1630         }
1631         return 0;
1632 }
1633
1634 static int
1635 bnx2_setup_copper_phy(struct bnx2 *bp)
1636 {
1637         u32 bmcr;
1638         u32 new_bmcr;
1639
1640         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1641
1642         if (bp->autoneg & AUTONEG_SPEED) {
1643                 u32 adv_reg, adv1000_reg;
1644                 u32 new_adv_reg = 0;
1645                 u32 new_adv1000_reg = 0;
1646
1647                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1648                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1649                         ADVERTISE_PAUSE_ASYM);
1650
1651                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1652                 adv1000_reg &= PHY_ALL_1000_SPEED;
1653
1654                 if (bp->advertising & ADVERTISED_10baseT_Half)
1655                         new_adv_reg |= ADVERTISE_10HALF;
1656                 if (bp->advertising & ADVERTISED_10baseT_Full)
1657                         new_adv_reg |= ADVERTISE_10FULL;
1658                 if (bp->advertising & ADVERTISED_100baseT_Half)
1659                         new_adv_reg |= ADVERTISE_100HALF;
1660                 if (bp->advertising & ADVERTISED_100baseT_Full)
1661                         new_adv_reg |= ADVERTISE_100FULL;
1662                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1663                         new_adv1000_reg |= ADVERTISE_1000FULL;
1664
1665                 new_adv_reg |= ADVERTISE_CSMA;
1666
1667                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1668
1669                 if ((adv1000_reg != new_adv1000_reg) ||
1670                         (adv_reg != new_adv_reg) ||
1671                         ((bmcr & BMCR_ANENABLE) == 0)) {
1672
1673                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1674                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1675                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1676                                 BMCR_ANENABLE);
1677                 }
1678                 else if (bp->link_up) {
1679                         /* Flow ctrl may have changed from auto to forced */
1680                         /* or vice-versa. */
1681
1682                         bnx2_resolve_flow_ctrl(bp);
1683                         bnx2_set_mac_link(bp);
1684                 }
1685                 return 0;
1686         }
1687
1688         new_bmcr = 0;
1689         if (bp->req_line_speed == SPEED_100) {
1690                 new_bmcr |= BMCR_SPEED100;
1691         }
1692         if (bp->req_duplex == DUPLEX_FULL) {
1693                 new_bmcr |= BMCR_FULLDPLX;
1694         }
1695         if (new_bmcr != bmcr) {
1696                 u32 bmsr;
1697
1698                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1699                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1700
1701                 if (bmsr & BMSR_LSTATUS) {
1702                         /* Force link down */
1703                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1704                         spin_unlock_bh(&bp->phy_lock);
1705                         msleep(50);
1706                         spin_lock_bh(&bp->phy_lock);
1707
1708                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1709                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1710                 }
1711
1712                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1713
1714                 /* Normally, the new speed is setup after the link has
1715                  * gone down and up again. In some cases, link will not go
1716                  * down so we need to set up the new speed here.
1717                  */
1718                 if (bmsr & BMSR_LSTATUS) {
1719                         bp->line_speed = bp->req_line_speed;
1720                         bp->duplex = bp->req_duplex;
1721                         bnx2_resolve_flow_ctrl(bp);
1722                         bnx2_set_mac_link(bp);
1723                 }
1724         } else {
1725                 bnx2_resolve_flow_ctrl(bp);
1726                 bnx2_set_mac_link(bp);
1727         }
1728         return 0;
1729 }
1730
1731 static int
1732 bnx2_setup_phy(struct bnx2 *bp, u8 port)
1733 {
1734         if (bp->loopback == MAC_LOOPBACK)
1735                 return 0;
1736
1737         if (bp->phy_flags & PHY_SERDES_FLAG) {
1738                 return (bnx2_setup_serdes_phy(bp, port));
1739         }
1740         else {
1741                 return (bnx2_setup_copper_phy(bp));
1742         }
1743 }
1744
1745 static int
1746 bnx2_init_5709s_phy(struct bnx2 *bp)
1747 {
1748         u32 val;
1749
1750         bp->mii_bmcr = MII_BMCR + 0x10;
1751         bp->mii_bmsr = MII_BMSR + 0x10;
1752         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1753         bp->mii_adv = MII_ADVERTISE + 0x10;
1754         bp->mii_lpa = MII_LPA + 0x10;
1755         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1756
1757         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1758         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1759
1760         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1761         bnx2_reset_phy(bp);
1762
1763         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1764
1765         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1766         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1767         val |= MII_BNX2_SD_1000XCTL1_FIBER;
1768         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1769
1770         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1771         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1772         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1773                 val |= BCM5708S_UP1_2G5;
1774         else
1775                 val &= ~BCM5708S_UP1_2G5;
1776         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1777
1778         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1779         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1780         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1781         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1782
1783         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1784
1785         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1786               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1787         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1788
1789         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1790
1791         return 0;
1792 }
1793
1794 static int
1795 bnx2_init_5708s_phy(struct bnx2 *bp)
1796 {
1797         u32 val;
1798
1799         bnx2_reset_phy(bp);
1800
1801         bp->mii_up1 = BCM5708S_UP1;
1802
1803         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1804         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1805         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1806
1807         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1808         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1809         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1810
1811         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1812         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1813         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1814
1815         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1816                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1817                 val |= BCM5708S_UP1_2G5;
1818                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1819         }
1820
1821         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1822             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1823             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1824                 /* increase tx signal amplitude */
1825                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1826                                BCM5708S_BLK_ADDR_TX_MISC);
1827                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1828                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1829                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1830                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1831         }
1832
1833         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1834               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1835
1836         if (val) {
1837                 u32 is_backplane;
1838
1839                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1840                                           BNX2_SHARED_HW_CFG_CONFIG);
1841                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1842                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1843                                        BCM5708S_BLK_ADDR_TX_MISC);
1844                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1845                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1846                                        BCM5708S_BLK_ADDR_DIG);
1847                 }
1848         }
1849         return 0;
1850 }
1851
1852 static int
1853 bnx2_init_5706s_phy(struct bnx2 *bp)
1854 {
1855         bnx2_reset_phy(bp);
1856
1857         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1858
1859         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1860                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1861
1862         if (bp->dev->mtu > 1500) {
1863                 u32 val;
1864
1865                 /* Set extended packet length bit */
1866                 bnx2_write_phy(bp, 0x18, 0x7);
1867                 bnx2_read_phy(bp, 0x18, &val);
1868                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1869
1870                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1871                 bnx2_read_phy(bp, 0x1c, &val);
1872                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1873         }
1874         else {
1875                 u32 val;
1876
1877                 bnx2_write_phy(bp, 0x18, 0x7);
1878                 bnx2_read_phy(bp, 0x18, &val);
1879                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1880
1881                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1882                 bnx2_read_phy(bp, 0x1c, &val);
1883                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1884         }
1885
1886         return 0;
1887 }
1888
1889 static int
1890 bnx2_init_copper_phy(struct bnx2 *bp)
1891 {
1892         u32 val;
1893
1894         bnx2_reset_phy(bp);
1895
1896         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1897                 bnx2_write_phy(bp, 0x18, 0x0c00);
1898                 bnx2_write_phy(bp, 0x17, 0x000a);
1899                 bnx2_write_phy(bp, 0x15, 0x310b);
1900                 bnx2_write_phy(bp, 0x17, 0x201f);
1901                 bnx2_write_phy(bp, 0x15, 0x9506);
1902                 bnx2_write_phy(bp, 0x17, 0x401f);
1903                 bnx2_write_phy(bp, 0x15, 0x14e2);
1904                 bnx2_write_phy(bp, 0x18, 0x0400);
1905         }
1906
1907         if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1908                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1909                                MII_BNX2_DSP_EXPAND_REG | 0x8);
1910                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1911                 val &= ~(1 << 8);
1912                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1913         }
1914
1915         if (bp->dev->mtu > 1500) {
1916                 /* Set extended packet length bit */
1917                 bnx2_write_phy(bp, 0x18, 0x7);
1918                 bnx2_read_phy(bp, 0x18, &val);
1919                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1920
1921                 bnx2_read_phy(bp, 0x10, &val);
1922                 bnx2_write_phy(bp, 0x10, val | 0x1);
1923         }
1924         else {
1925                 bnx2_write_phy(bp, 0x18, 0x7);
1926                 bnx2_read_phy(bp, 0x18, &val);
1927                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1928
1929                 bnx2_read_phy(bp, 0x10, &val);
1930                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1931         }
1932
1933         /* ethernet@wirespeed */
1934         bnx2_write_phy(bp, 0x18, 0x7007);
1935         bnx2_read_phy(bp, 0x18, &val);
1936         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1937         return 0;
1938 }
1939
1940
1941 static int
1942 bnx2_init_phy(struct bnx2 *bp)
1943 {
1944         u32 val;
1945         int rc = 0;
1946
1947         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1948         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1949
1950         bp->mii_bmcr = MII_BMCR;
1951         bp->mii_bmsr = MII_BMSR;
1952         bp->mii_bmsr1 = MII_BMSR;
1953         bp->mii_adv = MII_ADVERTISE;
1954         bp->mii_lpa = MII_LPA;
1955
1956         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1957
1958         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1959                 goto setup_phy;
1960
1961         bnx2_read_phy(bp, MII_PHYSID1, &val);
1962         bp->phy_id = val << 16;
1963         bnx2_read_phy(bp, MII_PHYSID2, &val);
1964         bp->phy_id |= val & 0xffff;
1965
1966         if (bp->phy_flags & PHY_SERDES_FLAG) {
1967                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1968                         rc = bnx2_init_5706s_phy(bp);
1969                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1970                         rc = bnx2_init_5708s_phy(bp);
1971                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1972                         rc = bnx2_init_5709s_phy(bp);
1973         }
1974         else {
1975                 rc = bnx2_init_copper_phy(bp);
1976         }
1977
1978 setup_phy:
1979         if (!rc)
1980                 rc = bnx2_setup_phy(bp, bp->phy_port);
1981
1982         return rc;
1983 }
1984
1985 static int
1986 bnx2_set_mac_loopback(struct bnx2 *bp)
1987 {
1988         u32 mac_mode;
1989
1990         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1991         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1992         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1993         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1994         bp->link_up = 1;
1995         return 0;
1996 }
1997
1998 static int bnx2_test_link(struct bnx2 *);
1999
2000 static int
2001 bnx2_set_phy_loopback(struct bnx2 *bp)
2002 {
2003         u32 mac_mode;
2004         int rc, i;
2005
2006         spin_lock_bh(&bp->phy_lock);
2007         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2008                             BMCR_SPEED1000);
2009         spin_unlock_bh(&bp->phy_lock);
2010         if (rc)
2011                 return rc;
2012
2013         for (i = 0; i < 10; i++) {
2014                 if (bnx2_test_link(bp) == 0)
2015                         break;
2016                 msleep(100);
2017         }
2018
2019         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2020         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2021                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2022                       BNX2_EMAC_MODE_25G_MODE);
2023
2024         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2025         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2026         bp->link_up = 1;
2027         return 0;
2028 }
2029
2030 static int
2031 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
2032 {
2033         int i;
2034         u32 val;
2035
2036         bp->fw_wr_seq++;
2037         msg_data |= bp->fw_wr_seq;
2038
2039         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2040
2041         /* wait for an acknowledgement. */
2042         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2043                 msleep(10);
2044
2045                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
2046
2047                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2048                         break;
2049         }
2050         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2051                 return 0;
2052
2053         /* If we timed out, inform the firmware that this is the case. */
2054         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2055                 if (!silent)
2056                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2057                                             "%x\n", msg_data);
2058
2059                 msg_data &= ~BNX2_DRV_MSG_CODE;
2060                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2061
2062                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
2063
2064                 return -EBUSY;
2065         }
2066
2067         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2068                 return -EIO;
2069
2070         return 0;
2071 }
2072
2073 static int
2074 bnx2_init_5709_context(struct bnx2 *bp)
2075 {
2076         int i, ret = 0;
2077         u32 val;
2078
2079         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2080         val |= (BCM_PAGE_BITS - 8) << 16;
2081         REG_WR(bp, BNX2_CTX_COMMAND, val);
2082         for (i = 0; i < 10; i++) {
2083                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2084                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2085                         break;
2086                 udelay(2);
2087         }
2088         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2089                 return -EBUSY;
2090
2091         for (i = 0; i < bp->ctx_pages; i++) {
2092                 int j;
2093
2094                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2095                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2096                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2097                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2098                        (u64) bp->ctx_blk_mapping[i] >> 32);
2099                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2100                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2101                 for (j = 0; j < 10; j++) {
2102
2103                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2104                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2105                                 break;
2106                         udelay(5);
2107                 }
2108                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2109                         ret = -EBUSY;
2110                         break;
2111                 }
2112         }
2113         return ret;
2114 }
2115
2116 static void
2117 bnx2_init_context(struct bnx2 *bp)
2118 {
2119         u32 vcid;
2120
2121         vcid = 96;
2122         while (vcid) {
2123                 u32 vcid_addr, pcid_addr, offset;
2124                 int i;
2125
2126                 vcid--;
2127
2128                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2129                         u32 new_vcid;
2130
2131                         vcid_addr = GET_PCID_ADDR(vcid);
2132                         if (vcid & 0x8) {
2133                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2134                         }
2135                         else {
2136                                 new_vcid = vcid;
2137                         }
2138                         pcid_addr = GET_PCID_ADDR(new_vcid);
2139                 }
2140                 else {
2141                         vcid_addr = GET_CID_ADDR(vcid);
2142                         pcid_addr = vcid_addr;
2143                 }
2144
2145                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2146                         vcid_addr += (i << PHY_CTX_SHIFT);
2147                         pcid_addr += (i << PHY_CTX_SHIFT);
2148
2149                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2150                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2151
2152                         /* Zero out the context. */
2153                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2154                                 CTX_WR(bp, vcid_addr, offset, 0);
2155                 }
2156         }
2157 }
2158
2159 static int
2160 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2161 {
2162         u16 *good_mbuf;
2163         u32 good_mbuf_cnt;
2164         u32 val;
2165
2166         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2167         if (good_mbuf == NULL) {
2168                 printk(KERN_ERR PFX "Failed to allocate memory in "
2169                                     "bnx2_alloc_bad_rbuf\n");
2170                 return -ENOMEM;
2171         }
2172
2173         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2174                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2175
2176         good_mbuf_cnt = 0;
2177
2178         /* Allocate a bunch of mbufs and save the good ones in an array. */
2179         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2180         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2181                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2182
2183                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2184
2185                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2186
2187                 /* The addresses with Bit 9 set are bad memory blocks. */
2188                 if (!(val & (1 << 9))) {
2189                         good_mbuf[good_mbuf_cnt] = (u16) val;
2190                         good_mbuf_cnt++;
2191                 }
2192
2193                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2194         }
2195
2196         /* Free the good ones back to the mbuf pool thus discarding
2197          * all the bad ones. */
2198         while (good_mbuf_cnt) {
2199                 good_mbuf_cnt--;
2200
2201                 val = good_mbuf[good_mbuf_cnt];
2202                 val = (val << 9) | val | 1;
2203
2204                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2205         }
2206         kfree(good_mbuf);
2207         return 0;
2208 }
2209
2210 static void
2211 bnx2_set_mac_addr(struct bnx2 *bp)
2212 {
2213         u32 val;
2214         u8 *mac_addr = bp->dev->dev_addr;
2215
2216         val = (mac_addr[0] << 8) | mac_addr[1];
2217
2218         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2219
2220         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2221                 (mac_addr[4] << 8) | mac_addr[5];
2222
2223         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2224 }
2225
2226 static inline int
2227 bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2228 {
2229         dma_addr_t mapping;
2230         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2231         struct rx_bd *rxbd =
2232                 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2233         struct page *page = alloc_page(GFP_ATOMIC);
2234
2235         if (!page)
2236                 return -ENOMEM;
2237         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2238                                PCI_DMA_FROMDEVICE);
2239         rx_pg->page = page;
2240         pci_unmap_addr_set(rx_pg, mapping, mapping);
2241         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2242         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2243         return 0;
2244 }
2245
2246 static void
2247 bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2248 {
2249         struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2250         struct page *page = rx_pg->page;
2251
2252         if (!page)
2253                 return;
2254
2255         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2256                        PCI_DMA_FROMDEVICE);
2257
2258         __free_page(page);
2259         rx_pg->page = NULL;
2260 }
2261
2262 static inline int
2263 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2264 {
2265         struct sk_buff *skb;
2266         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2267         dma_addr_t mapping;
2268         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2269         unsigned long align;
2270
2271         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2272         if (skb == NULL) {
2273                 return -ENOMEM;
2274         }
2275
2276         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2277                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2278
2279         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2280                 PCI_DMA_FROMDEVICE);
2281
2282         rx_buf->skb = skb;
2283         pci_unmap_addr_set(rx_buf, mapping, mapping);
2284
2285         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2286         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2287
2288         bp->rx_prod_bseq += bp->rx_buf_use_size;
2289
2290         return 0;
2291 }
2292
2293 static int
2294 bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
2295 {
2296         struct status_block *sblk = bp->status_blk;
2297         u32 new_link_state, old_link_state;
2298         int is_set = 1;
2299
2300         new_link_state = sblk->status_attn_bits & event;
2301         old_link_state = sblk->status_attn_bits_ack & event;
2302         if (new_link_state != old_link_state) {
2303                 if (new_link_state)
2304                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2305                 else
2306                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2307         } else
2308                 is_set = 0;
2309
2310         return is_set;
2311 }
2312
2313 static void
2314 bnx2_phy_int(struct bnx2 *bp)
2315 {
2316         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2317                 spin_lock(&bp->phy_lock);
2318                 bnx2_set_link(bp);
2319                 spin_unlock(&bp->phy_lock);
2320         }
2321         if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2322                 bnx2_set_remote_link(bp);
2323
2324 }
2325
2326 static void
2327 bnx2_tx_int(struct bnx2 *bp)
2328 {
2329         struct status_block *sblk = bp->status_blk;
2330         u16 hw_cons, sw_cons, sw_ring_cons;
2331         int tx_free_bd = 0;
2332
2333         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
2334         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2335                 hw_cons++;
2336         }
2337         sw_cons = bp->tx_cons;
2338
2339         while (sw_cons != hw_cons) {
2340                 struct sw_bd *tx_buf;
2341                 struct sk_buff *skb;
2342                 int i, last;
2343
2344                 sw_ring_cons = TX_RING_IDX(sw_cons);
2345
2346                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2347                 skb = tx_buf->skb;
2348
2349                 /* partial BD completions possible with TSO packets */
2350                 if (skb_is_gso(skb)) {
2351                         u16 last_idx, last_ring_idx;
2352
2353                         last_idx = sw_cons +
2354                                 skb_shinfo(skb)->nr_frags + 1;
2355                         last_ring_idx = sw_ring_cons +
2356                                 skb_shinfo(skb)->nr_frags + 1;
2357                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2358                                 last_idx++;
2359                         }
2360                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2361                                 break;
2362                         }
2363                 }
2364
2365                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2366                         skb_headlen(skb), PCI_DMA_TODEVICE);
2367
2368                 tx_buf->skb = NULL;
2369                 last = skb_shinfo(skb)->nr_frags;
2370
2371                 for (i = 0; i < last; i++) {
2372                         sw_cons = NEXT_TX_BD(sw_cons);
2373
2374                         pci_unmap_page(bp->pdev,
2375                                 pci_unmap_addr(
2376                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2377                                         mapping),
2378                                 skb_shinfo(skb)->frags[i].size,
2379                                 PCI_DMA_TODEVICE);
2380                 }
2381
2382                 sw_cons = NEXT_TX_BD(sw_cons);
2383
2384                 tx_free_bd += last + 1;
2385
2386                 dev_kfree_skb(skb);
2387
2388                 hw_cons = bp->hw_tx_cons =
2389                         sblk->status_tx_quick_consumer_index0;
2390
2391                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2392                         hw_cons++;
2393                 }
2394         }
2395
2396         bp->tx_cons = sw_cons;
2397         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2398          * before checking for netif_queue_stopped().  Without the
2399          * memory barrier, there is a small possibility that bnx2_start_xmit()
2400          * will miss it and cause the queue to be stopped forever.
2401          */
2402         smp_mb();
2403
2404         if (unlikely(netif_queue_stopped(bp->dev)) &&
2405                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2406                 netif_tx_lock(bp->dev);
2407                 if ((netif_queue_stopped(bp->dev)) &&
2408                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2409                         netif_wake_queue(bp->dev);
2410                 netif_tx_unlock(bp->dev);
2411         }
2412 }
2413
2414 static inline void
2415 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2416         u16 cons, u16 prod)
2417 {
2418         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2419         struct rx_bd *cons_bd, *prod_bd;
2420
2421         cons_rx_buf = &bp->rx_buf_ring[cons];
2422         prod_rx_buf = &bp->rx_buf_ring[prod];
2423
2424         pci_dma_sync_single_for_device(bp->pdev,
2425                 pci_unmap_addr(cons_rx_buf, mapping),
2426                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2427
2428         bp->rx_prod_bseq += bp->rx_buf_use_size;
2429
2430         prod_rx_buf->skb = skb;
2431
2432         if (cons == prod)
2433                 return;
2434
2435         pci_unmap_addr_set(prod_rx_buf, mapping,
2436                         pci_unmap_addr(cons_rx_buf, mapping));
2437
2438         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2439         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2440         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2441         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2442 }
2443
2444 static int
2445 bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
2446             dma_addr_t dma_addr, u32 ring_idx)
2447 {
2448         int err;
2449         u16 prod = ring_idx & 0xffff;
2450
2451         err = bnx2_alloc_rx_skb(bp, prod);
2452         if (unlikely(err)) {
2453                 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
2454                 return err;
2455         }
2456
2457         skb_reserve(skb, bp->rx_offset);
2458         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2459                          PCI_DMA_FROMDEVICE);
2460
2461         skb_put(skb, len);
2462         return 0;
2463 }
2464
2465 static inline u16
2466 bnx2_get_hw_rx_cons(struct bnx2 *bp)
2467 {
2468         u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
2469
2470         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2471                 cons++;
2472         return cons;
2473 }
2474
2475 static int
2476 bnx2_rx_int(struct bnx2 *bp, int budget)
2477 {
2478         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2479         struct l2_fhdr *rx_hdr;
2480         int rx_pkt = 0;
2481
2482         hw_cons = bnx2_get_hw_rx_cons(bp);
2483         sw_cons = bp->rx_cons;
2484         sw_prod = bp->rx_prod;
2485
2486         /* Memory barrier necessary as speculative reads of the rx
2487          * buffer can be ahead of the index in the status block
2488          */
2489         rmb();
2490         while (sw_cons != hw_cons) {
2491                 unsigned int len;
2492                 u32 status;
2493                 struct sw_bd *rx_buf;
2494                 struct sk_buff *skb;
2495                 dma_addr_t dma_addr;
2496
2497                 sw_ring_cons = RX_RING_IDX(sw_cons);
2498                 sw_ring_prod = RX_RING_IDX(sw_prod);
2499
2500                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2501                 skb = rx_buf->skb;
2502
2503                 rx_buf->skb = NULL;
2504
2505                 dma_addr = pci_unmap_addr(rx_buf, mapping);
2506
2507                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2508                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2509
2510                 rx_hdr = (struct l2_fhdr *) skb->data;
2511                 len = rx_hdr->l2_fhdr_pkt_len - 4;
2512
2513                 if ((status = rx_hdr->l2_fhdr_status) &
2514                         (L2_FHDR_ERRORS_BAD_CRC |
2515                         L2_FHDR_ERRORS_PHY_DECODE |
2516                         L2_FHDR_ERRORS_ALIGNMENT |
2517                         L2_FHDR_ERRORS_TOO_SHORT |
2518                         L2_FHDR_ERRORS_GIANT_FRAME)) {
2519
2520                         bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2521                         goto next_rx;
2522                 }
2523
2524                 if (len <= bp->rx_copy_thresh) {
2525                         struct sk_buff *new_skb;
2526
2527                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
2528                         if (new_skb == NULL) {
2529                                 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2530                                                   sw_ring_prod);
2531                                 goto next_rx;
2532                         }
2533
2534                         /* aligned copy */
2535                         skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2536                                       new_skb->data, len + 2);
2537                         skb_reserve(new_skb, 2);
2538                         skb_put(new_skb, len);
2539
2540                         bnx2_reuse_rx_skb(bp, skb,
2541                                 sw_ring_cons, sw_ring_prod);
2542
2543                         skb = new_skb;
2544                 } else if (unlikely(bnx2_rx_skb(bp, skb, len, dma_addr,
2545                                     (sw_ring_cons << 16) | sw_ring_prod)))
2546                         goto next_rx;
2547
2548                 skb->protocol = eth_type_trans(skb, bp->dev);
2549
2550                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2551                         (ntohs(skb->protocol) != 0x8100)) {
2552
2553                         dev_kfree_skb(skb);
2554                         goto next_rx;
2555
2556                 }
2557
2558                 skb->ip_summed = CHECKSUM_NONE;
2559                 if (bp->rx_csum &&
2560                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2561                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
2562
2563                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2564                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2565                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2566                 }
2567
2568 #ifdef BCM_VLAN
2569                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2570                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2571                                 rx_hdr->l2_fhdr_vlan_tag);
2572                 }
2573                 else
2574 #endif
2575                         netif_receive_skb(skb);
2576
2577                 bp->dev->last_rx = jiffies;
2578                 rx_pkt++;
2579
2580 next_rx:
2581                 sw_cons = NEXT_RX_BD(sw_cons);
2582                 sw_prod = NEXT_RX_BD(sw_prod);
2583
2584                 if ((rx_pkt == budget))
2585                         break;
2586
2587                 /* Refresh hw_cons to see if there is new work */
2588                 if (sw_cons == hw_cons) {
2589                         hw_cons = bnx2_get_hw_rx_cons(bp);
2590                         rmb();
2591                 }
2592         }
2593         bp->rx_cons = sw_cons;
2594         bp->rx_prod = sw_prod;
2595
2596         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2597
2598         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2599
2600         mmiowb();
2601
2602         return rx_pkt;
2603
2604 }
2605
2606 /* MSI ISR - The only difference between this and the INTx ISR
2607  * is that the MSI interrupt is always serviced.
2608  */
2609 static irqreturn_t
2610 bnx2_msi(int irq, void *dev_instance)
2611 {
2612         struct net_device *dev = dev_instance;
2613         struct bnx2 *bp = netdev_priv(dev);
2614
2615         prefetch(bp->status_blk);
2616         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2617                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2618                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2619
2620         /* Return here if interrupt is disabled. */
2621         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2622                 return IRQ_HANDLED;
2623
2624         netif_rx_schedule(dev, &bp->napi);
2625
2626         return IRQ_HANDLED;
2627 }
2628
2629 static irqreturn_t
2630 bnx2_msi_1shot(int irq, void *dev_instance)
2631 {
2632         struct net_device *dev = dev_instance;
2633         struct bnx2 *bp = netdev_priv(dev);
2634
2635         prefetch(bp->status_blk);
2636
2637         /* Return here if interrupt is disabled. */
2638         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2639                 return IRQ_HANDLED;
2640
2641         netif_rx_schedule(dev, &bp->napi);
2642
2643         return IRQ_HANDLED;
2644 }
2645
2646 static irqreturn_t
2647 bnx2_interrupt(int irq, void *dev_instance)
2648 {
2649         struct net_device *dev = dev_instance;
2650         struct bnx2 *bp = netdev_priv(dev);
2651         struct status_block *sblk = bp->status_blk;
2652
2653         /* When using INTx, it is possible for the interrupt to arrive
2654          * at the CPU before the status block posted prior to the
2655          * interrupt. Reading a register will flush the status block.
2656          * When using MSI, the MSI message will always complete after
2657          * the status block write.
2658          */
2659         if ((sblk->status_idx == bp->last_status_idx) &&
2660             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2661              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2662                 return IRQ_NONE;
2663
2664         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2665                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2666                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2667
2668         /* Read back to deassert IRQ immediately to avoid too many
2669          * spurious interrupts.
2670          */
2671         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2672
2673         /* Return here if interrupt is shared and is disabled. */
2674         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2675                 return IRQ_HANDLED;
2676
2677         if (netif_rx_schedule_prep(dev, &bp->napi)) {
2678                 bp->last_status_idx = sblk->status_idx;
2679                 __netif_rx_schedule(dev, &bp->napi);
2680         }
2681
2682         return IRQ_HANDLED;
2683 }
2684
2685 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
2686                                  STATUS_ATTN_BITS_TIMER_ABORT)
2687
2688 static inline int
2689 bnx2_has_work(struct bnx2 *bp)
2690 {
2691         struct status_block *sblk = bp->status_blk;
2692
2693         if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
2694             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2695                 return 1;
2696
2697         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2698             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2699                 return 1;
2700
2701         return 0;
2702 }
2703
2704 static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
2705 {
2706         struct status_block *sblk = bp->status_blk;
2707         u32 status_attn_bits = sblk->status_attn_bits;
2708         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2709
2710         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2711             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2712
2713                 bnx2_phy_int(bp);
2714
2715                 /* This is needed to take care of transient status
2716                  * during link changes.
2717                  */
2718                 REG_WR(bp, BNX2_HC_COMMAND,
2719                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2720                 REG_RD(bp, BNX2_HC_COMMAND);
2721         }
2722
2723         if (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2724                 bnx2_tx_int(bp);
2725
2726         if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
2727                 work_done += bnx2_rx_int(bp, budget - work_done);
2728
2729         return work_done;
2730 }
2731
2732 static int bnx2_poll(struct napi_struct *napi, int budget)
2733 {
2734         struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2735         int work_done = 0;
2736         struct status_block *sblk = bp->status_blk;
2737
2738         while (1) {
2739                 work_done = bnx2_poll_work(bp, work_done, budget);
2740
2741                 if (unlikely(work_done >= budget))
2742                         break;
2743
2744                 /* bp->last_status_idx is used below to tell the hw how
2745                  * much work has been processed, so we must read it before
2746                  * checking for more work.
2747                  */
2748                 bp->last_status_idx = sblk->status_idx;
2749                 rmb();
2750                 if (likely(!bnx2_has_work(bp))) {
2751                         netif_rx_complete(bp->dev, napi);
2752                         if (likely(bp->flags & USING_MSI_FLAG)) {
2753                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2754                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2755                                        bp->last_status_idx);
2756                                 break;
2757                         }
2758                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2759                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2760                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2761                                bp->last_status_idx);
2762
2763                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2764                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2765                                bp->last_status_idx);
2766                         break;
2767                 }
2768         }
2769
2770         return work_done;
2771 }
2772
2773 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2774  * from set_multicast.
2775  */
2776 static void
2777 bnx2_set_rx_mode(struct net_device *dev)
2778 {
2779         struct bnx2 *bp = netdev_priv(dev);
2780         u32 rx_mode, sort_mode;
2781         int i;
2782
2783         spin_lock_bh(&bp->phy_lock);
2784
2785         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2786                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2787         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2788 #ifdef BCM_VLAN
2789         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2790                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2791 #else
2792         if (!(bp->flags & ASF_ENABLE_FLAG))
2793                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2794 #endif
2795         if (dev->flags & IFF_PROMISC) {
2796                 /* Promiscuous mode. */
2797                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2798                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2799                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2800         }
2801         else if (dev->flags & IFF_ALLMULTI) {
2802                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2803                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2804                                0xffffffff);
2805                 }
2806                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2807         }
2808         else {
2809                 /* Accept one or more multicast(s). */
2810                 struct dev_mc_list *mclist;
2811                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2812                 u32 regidx;
2813                 u32 bit;
2814                 u32 crc;
2815
2816                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2817
2818                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2819                      i++, mclist = mclist->next) {
2820
2821                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2822                         bit = crc & 0xff;
2823                         regidx = (bit & 0xe0) >> 5;
2824                         bit &= 0x1f;
2825                         mc_filter[regidx] |= (1 << bit);
2826                 }
2827
2828                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2829                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2830                                mc_filter[i]);
2831                 }
2832
2833                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2834         }
2835
2836         if (rx_mode != bp->rx_mode) {
2837                 bp->rx_mode = rx_mode;
2838                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2839         }
2840
2841         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2842         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2843         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2844
2845         spin_unlock_bh(&bp->phy_lock);
2846 }
2847
2848 static void
2849 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2850         u32 rv2p_proc)
2851 {
2852         int i;
2853         u32 val;
2854
2855
2856         for (i = 0; i < rv2p_code_len; i += 8) {
2857                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2858                 rv2p_code++;
2859                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2860                 rv2p_code++;
2861
2862                 if (rv2p_proc == RV2P_PROC1) {
2863                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2864                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2865                 }
2866                 else {
2867                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2868                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2869                 }
2870         }
2871
2872         /* Reset the processor, un-stall is done later. */
2873         if (rv2p_proc == RV2P_PROC1) {
2874                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2875         }
2876         else {
2877                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2878         }
2879 }
2880
2881 static int
2882 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2883 {
2884         u32 offset;
2885         u32 val;
2886         int rc;
2887
2888         /* Halt the CPU. */
2889         val = REG_RD_IND(bp, cpu_reg->mode);
2890         val |= cpu_reg->mode_value_halt;
2891         REG_WR_IND(bp, cpu_reg->mode, val);
2892         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2893
2894         /* Load the Text area. */
2895         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2896         if (fw->gz_text) {
2897                 int j;
2898
2899                 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
2900                                        fw->gz_text_len);
2901                 if (rc < 0)
2902                         return rc;
2903
2904                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2905                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2906                 }
2907         }
2908
2909         /* Load the Data area. */
2910         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2911         if (fw->data) {
2912                 int j;
2913
2914                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2915                         REG_WR_IND(bp, offset, fw->data[j]);
2916                 }
2917         }
2918
2919         /* Load the SBSS area. */
2920         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2921         if (fw->sbss_len) {
2922                 int j;
2923
2924                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2925                         REG_WR_IND(bp, offset, 0);
2926                 }
2927         }
2928
2929         /* Load the BSS area. */
2930         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2931         if (fw->bss_len) {
2932                 int j;
2933
2934                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2935                         REG_WR_IND(bp, offset, 0);
2936                 }
2937         }
2938
2939         /* Load the Read-Only area. */
2940         offset = cpu_reg->spad_base +
2941                 (fw->rodata_addr - cpu_reg->mips_view_base);
2942         if (fw->rodata) {
2943                 int j;
2944
2945                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2946                         REG_WR_IND(bp, offset, fw->rodata[j]);
2947                 }
2948         }
2949
2950         /* Clear the pre-fetch instruction. */
2951         REG_WR_IND(bp, cpu_reg->inst, 0);
2952         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2953
2954         /* Start the CPU. */
2955         val = REG_RD_IND(bp, cpu_reg->mode);
2956         val &= ~cpu_reg->mode_value_halt;
2957         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2958         REG_WR_IND(bp, cpu_reg->mode, val);
2959
2960         return 0;
2961 }
2962
2963 static int
2964 bnx2_init_cpus(struct bnx2 *bp)
2965 {
2966         struct cpu_reg cpu_reg;
2967         struct fw_info *fw;
2968         int rc, rv2p_len;
2969         void *text, *rv2p;
2970
2971         /* Initialize the RV2P processor. */
2972         text = vmalloc(FW_BUF_SIZE);
2973         if (!text)
2974                 return -ENOMEM;
2975         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2976                 rv2p = bnx2_xi_rv2p_proc1;
2977                 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
2978         } else {
2979                 rv2p = bnx2_rv2p_proc1;
2980                 rv2p_len = sizeof(bnx2_rv2p_proc1);
2981         }
2982         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
2983         if (rc < 0)
2984                 goto init_cpu_err;
2985
2986         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
2987
2988         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2989                 rv2p = bnx2_xi_rv2p_proc2;
2990                 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
2991         } else {
2992                 rv2p = bnx2_rv2p_proc2;
2993                 rv2p_len = sizeof(bnx2_rv2p_proc2);
2994         }
2995         rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
2996         if (rc < 0)
2997                 goto init_cpu_err;
2998
2999         load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
3000
3001         /* Initialize the RX Processor. */
3002         cpu_reg.mode = BNX2_RXP_CPU_MODE;
3003         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3004         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3005         cpu_reg.state = BNX2_RXP_CPU_STATE;
3006         cpu_reg.state_value_clear = 0xffffff;
3007         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3008         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3009         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3010         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3011         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3012         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3013         cpu_reg.mips_view_base = 0x8000000;
3014
3015         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3016                 fw = &bnx2_rxp_fw_09;
3017         else
3018                 fw = &bnx2_rxp_fw_06;
3019
3020         fw->text = text;
3021         rc = load_cpu_fw(bp, &cpu_reg, fw);
3022         if (rc)
3023                 goto init_cpu_err;
3024
3025         /* Initialize the TX Processor. */
3026         cpu_reg.mode = BNX2_TXP_CPU_MODE;
3027         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3028         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3029         cpu_reg.state = BNX2_TXP_CPU_STATE;
3030         cpu_reg.state_value_clear = 0xffffff;
3031         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3032         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3033         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3034         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3035         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3036         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3037         cpu_reg.mips_view_base = 0x8000000;
3038
3039         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3040                 fw = &bnx2_txp_fw_09;
3041         else
3042                 fw = &bnx2_txp_fw_06;
3043
3044         fw->text = text;
3045         rc = load_cpu_fw(bp, &cpu_reg, fw);
3046         if (rc)
3047                 goto init_cpu_err;
3048
3049         /* Initialize the TX Patch-up Processor. */
3050         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3051         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3052         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3053         cpu_reg.state = BNX2_TPAT_CPU_STATE;
3054         cpu_reg.state_value_clear = 0xffffff;
3055         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3056         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3057         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3058         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3059         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3060         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3061         cpu_reg.mips_view_base = 0x8000000;
3062
3063         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3064                 fw = &bnx2_tpat_fw_09;
3065         else
3066                 fw = &bnx2_tpat_fw_06;
3067
3068         fw->text = text;
3069         rc = load_cpu_fw(bp, &cpu_reg, fw);
3070         if (rc)
3071                 goto init_cpu_err;
3072
3073         /* Initialize the Completion Processor. */
3074         cpu_reg.mode = BNX2_COM_CPU_MODE;
3075         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3076         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3077         cpu_reg.state = BNX2_COM_CPU_STATE;
3078         cpu_reg.state_value_clear = 0xffffff;
3079         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3080         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3081         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3082         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3083         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3084         cpu_reg.spad_base = BNX2_COM_SCRATCH;
3085         cpu_reg.mips_view_base = 0x8000000;
3086
3087         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3088                 fw = &bnx2_com_fw_09;
3089         else
3090                 fw = &bnx2_com_fw_06;
3091
3092         fw->text = text;
3093         rc = load_cpu_fw(bp, &cpu_reg, fw);
3094         if (rc)
3095                 goto init_cpu_err;
3096
3097         /* Initialize the Command Processor. */
3098         cpu_reg.mode = BNX2_CP_CPU_MODE;
3099         cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3100         cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3101         cpu_reg.state = BNX2_CP_CPU_STATE;
3102         cpu_reg.state_value_clear = 0xffffff;
3103         cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3104         cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3105         cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3106         cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3107         cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3108         cpu_reg.spad_base = BNX2_CP_SCRATCH;
3109         cpu_reg.mips_view_base = 0x8000000;
3110
3111         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3112                 fw = &bnx2_cp_fw_09;
3113         else
3114                 fw = &bnx2_cp_fw_06;
3115
3116         fw->text = text;
3117         rc = load_cpu_fw(bp, &cpu_reg, fw);
3118
3119 init_cpu_err:
3120         vfree(text);
3121         return rc;
3122 }
3123
3124 static int
3125 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3126 {
3127         u16 pmcsr;
3128
3129         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3130
3131         switch (state) {
3132         case PCI_D0: {
3133                 u32 val;
3134
3135                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3136                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3137                         PCI_PM_CTRL_PME_STATUS);
3138
3139                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3140                         /* delay required during transition out of D3hot */
3141                         msleep(20);
3142
3143                 val = REG_RD(bp, BNX2_EMAC_MODE);
3144                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3145                 val &= ~BNX2_EMAC_MODE_MPKT;
3146                 REG_WR(bp, BNX2_EMAC_MODE, val);
3147
3148                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3149                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3150                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3151                 break;
3152         }
3153         case PCI_D3hot: {
3154                 int i;
3155                 u32 val, wol_msg;
3156
3157                 if (bp->wol) {
3158                         u32 advertising;
3159                         u8 autoneg;
3160
3161                         autoneg = bp->autoneg;
3162                         advertising = bp->advertising;
3163
3164                         if (bp->phy_port == PORT_TP) {
3165                                 bp->autoneg = AUTONEG_SPEED;
3166                                 bp->advertising = ADVERTISED_10baseT_Half |
3167                                         ADVERTISED_10baseT_Full |
3168                                         ADVERTISED_100baseT_Half |
3169                                         ADVERTISED_100baseT_Full |
3170                                         ADVERTISED_Autoneg;
3171                         }
3172
3173                         spin_lock_bh(&bp->phy_lock);
3174                         bnx2_setup_phy(bp, bp->phy_port);
3175                         spin_unlock_bh(&bp->phy_lock);
3176
3177                         bp->autoneg = autoneg;
3178                         bp->advertising = advertising;
3179
3180                         bnx2_set_mac_addr(bp);
3181
3182                         val = REG_RD(bp, BNX2_EMAC_MODE);
3183
3184                         /* Enable port mode. */
3185                         val &= ~BNX2_EMAC_MODE_PORT;
3186                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3187                                BNX2_EMAC_MODE_ACPI_RCVD |
3188                                BNX2_EMAC_MODE_MPKT;
3189                         if (bp->phy_port == PORT_TP)
3190                                 val |= BNX2_EMAC_MODE_PORT_MII;
3191                         else {
3192                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3193                                 if (bp->line_speed == SPEED_2500)
3194                                         val |= BNX2_EMAC_MODE_25G_MODE;
3195                         }
3196
3197                         REG_WR(bp, BNX2_EMAC_MODE, val);
3198
3199                         /* receive all multicast */
3200                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3201                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3202                                        0xffffffff);
3203                         }
3204                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3205                                BNX2_EMAC_RX_MODE_SORT_MODE);
3206
3207                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3208                               BNX2_RPM_SORT_USER0_MC_EN;
3209                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3210                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3211                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3212                                BNX2_RPM_SORT_USER0_ENA);
3213
3214                         /* Need to enable EMAC and RPM for WOL. */
3215                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3216                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3217                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3218                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3219
3220                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3221                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3222                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3223
3224                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3225                 }
3226                 else {
3227                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3228                 }
3229
3230                 if (!(bp->flags & NO_WOL_FLAG))
3231                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
3232
3233                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3234                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3235                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3236
3237                         if (bp->wol)
3238                                 pmcsr |= 3;
3239                 }
3240                 else {
3241                         pmcsr |= 3;
3242                 }
3243                 if (bp->wol) {
3244                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3245                 }
3246                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3247                                       pmcsr);
3248
3249                 /* No more memory access after this point until
3250                  * device is brought back to D0.
3251                  */
3252                 udelay(50);
3253                 break;
3254         }
3255         default:
3256                 return -EINVAL;
3257         }
3258         return 0;
3259 }
3260
3261 static int
3262 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3263 {
3264         u32 val;
3265         int j;
3266
3267         /* Request access to the flash interface. */
3268         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3269         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3270                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3271                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3272                         break;
3273
3274                 udelay(5);
3275         }
3276
3277         if (j >= NVRAM_TIMEOUT_COUNT)
3278                 return -EBUSY;
3279
3280         return 0;
3281 }
3282
3283 static int
3284 bnx2_release_nvram_lock(struct bnx2 *bp)
3285 {
3286         int j;
3287         u32 val;
3288
3289         /* Relinquish nvram interface. */
3290         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3291
3292         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3293                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3294                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3295                         break;
3296
3297                 udelay(5);
3298         }
3299
3300         if (j >= NVRAM_TIMEOUT_COUNT)
3301                 return -EBUSY;
3302
3303         return 0;
3304 }
3305
3306
3307 static int
3308 bnx2_enable_nvram_write(struct bnx2 *bp)
3309 {
3310         u32 val;
3311
3312         val = REG_RD(bp, BNX2_MISC_CFG);
3313         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3314
3315         if (bp->flash_info->flags & BNX2_NV_WREN) {
3316                 int j;
3317
3318                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3319                 REG_WR(bp, BNX2_NVM_COMMAND,
3320                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3321
3322                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3323                         udelay(5);
3324
3325                         val = REG_RD(bp, BNX2_NVM_COMMAND);
3326                         if (val & BNX2_NVM_COMMAND_DONE)
3327                                 break;
3328                 }
3329
3330                 if (j >= NVRAM_TIMEOUT_COUNT)
3331                         return -EBUSY;
3332         }
3333         return 0;
3334 }
3335
3336 static void
3337 bnx2_disable_nvram_write(struct bnx2 *bp)
3338 {
3339         u32 val;
3340
3341         val = REG_RD(bp, BNX2_MISC_CFG);
3342         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3343 }
3344
3345
3346 static void
3347 bnx2_enable_nvram_access(struct bnx2 *bp)
3348 {
3349         u32 val;
3350
3351         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3352         /* Enable both bits, even on read. */
3353         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3354                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3355 }
3356
3357 static void
3358 bnx2_disable_nvram_access(struct bnx2 *bp)
3359 {
3360         u32 val;
3361
3362         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3363         /* Disable both bits, even after read. */
3364         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3365                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3366                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
3367 }
3368
3369 static int
3370 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3371 {
3372         u32 cmd;
3373         int j;
3374
3375         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
3376                 /* Buffered flash, no erase needed */
3377                 return 0;
3378
3379         /* Build an erase command */
3380         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3381               BNX2_NVM_COMMAND_DOIT;
3382
3383         /* Need to clear DONE bit separately. */
3384         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3385
3386         /* Address of the NVRAM to read from. */
3387         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3388
3389         /* Issue an erase command. */
3390         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3391
3392         /* Wait for completion. */
3393         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3394                 u32 val;
3395
3396                 udelay(5);
3397
3398                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3399                 if (val & BNX2_NVM_COMMAND_DONE)
3400                         break;
3401         }
3402
3403         if (j >= NVRAM_TIMEOUT_COUNT)
3404                 return -EBUSY;
3405
3406         return 0;
3407 }
3408
3409 static int
3410 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3411 {
3412         u32 cmd;
3413         int j;
3414
3415         /* Build the command word. */
3416         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3417
3418         /* Calculate an offset of a buffered flash, not needed for 5709. */
3419         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3420                 offset = ((offset / bp->flash_info->page_size) <<
3421                            bp->flash_info->page_bits) +
3422                           (offset % bp->flash_info->page_size);
3423         }
3424
3425         /* Need to clear DONE bit separately. */
3426         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3427
3428         /* Address of the NVRAM to read from. */
3429         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3430
3431         /* Issue a read command. */
3432         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3433
3434         /* Wait for completion. */
3435         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3436                 u32 val;
3437
3438                 udelay(5);
3439
3440                 val = REG_RD(bp, BNX2_NVM_COMMAND);
3441                 if (val & BNX2_NVM_COMMAND_DONE) {
3442                         val = REG_RD(bp, BNX2_NVM_READ);
3443
3444                         val = be32_to_cpu(val);
3445                         memcpy(ret_val, &val, 4);
3446                         break;
3447                 }
3448         }
3449         if (j >= NVRAM_TIMEOUT_COUNT)
3450                 return -EBUSY;
3451
3452         return 0;
3453 }
3454
3455
3456 static int
3457 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3458 {
3459         u32 cmd, val32;
3460         int j;
3461
3462         /* Build the command word. */
3463         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3464
3465         /* Calculate an offset of a buffered flash, not needed for 5709. */
3466         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
3467                 offset = ((offset / bp->flash_info->page_size) <<
3468                           bp->flash_info->page_bits) +
3469                          (offset % bp->flash_info->page_size);
3470         }
3471
3472         /* Need to clear DONE bit separately. */
3473         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3474
3475         memcpy(&val32, val, 4);
3476         val32 = cpu_to_be32(val32);
3477
3478         /* Write the data. */
3479         REG_WR(bp, BNX2_NVM_WRITE, val32);
3480
3481         /* Address of the NVRAM to write to. */
3482         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3483
3484         /* Issue the write command. */
3485         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3486
3487         /* Wait for completion. */
3488         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3489                 udelay(5);
3490
3491                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3492                         break;
3493         }
3494         if (j >= NVRAM_TIMEOUT_COUNT)
3495                 return -EBUSY;
3496
3497         return 0;
3498 }
3499
3500 static int
3501 bnx2_init_nvram(struct bnx2 *bp)
3502 {
3503         u32 val;
3504         int j, entry_count, rc = 0;
3505         struct flash_spec *flash;
3506
3507         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3508                 bp->flash_info = &flash_5709;
3509                 goto get_flash_size;
3510         }
3511
3512         /* Determine the selected interface. */
3513         val = REG_RD(bp, BNX2_NVM_CFG1);
3514
3515         entry_count = ARRAY_SIZE(flash_table);
3516
3517         if (val & 0x40000000) {
3518
3519                 /* Flash interface has been reconfigured */
3520                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3521                      j++, flash++) {
3522                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
3523                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3524                                 bp->flash_info = flash;
3525                                 break;
3526                         }
3527                 }
3528         }
3529         else {
3530                 u32 mask;
3531                 /* Not yet been reconfigured */
3532
3533                 if (val & (1 << 23))
3534                         mask = FLASH_BACKUP_STRAP_MASK;
3535                 else
3536                         mask = FLASH_STRAP_MASK;
3537
3538                 for (j = 0, flash = &flash_table[0]; j < entry_count;
3539                         j++, flash++) {
3540
3541                         if ((val & mask) == (flash->strapping & mask)) {
3542                                 bp->flash_info = flash;
3543
3544                                 /* Request access to the flash interface. */
3545                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3546                                         return rc;
3547
3548                                 /* Enable access to flash interface */
3549                                 bnx2_enable_nvram_access(bp);
3550
3551                                 /* Reconfigure the flash interface */
3552                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3553                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3554                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3555                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3556
3557                                 /* Disable access to flash interface */
3558                                 bnx2_disable_nvram_access(bp);
3559                                 bnx2_release_nvram_lock(bp);
3560
3561                                 break;
3562                         }
3563                 }
3564         } /* if (val & 0x40000000) */
3565
3566         if (j == entry_count) {
3567                 bp->flash_info = NULL;
3568                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3569                 return -ENODEV;
3570         }
3571
3572 get_flash_size:
3573         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3574         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3575         if (val)
3576                 bp->flash_size = val;
3577         else
3578                 bp->flash_size = bp->flash_info->total_size;
3579
3580         return rc;
3581 }
3582
3583 static int
3584 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3585                 int buf_size)
3586 {
3587         int rc = 0;
3588         u32 cmd_flags, offset32, len32, extra;
3589
3590         if (buf_size == 0)
3591                 return 0;
3592
3593         /* Request access to the flash interface. */
3594         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3595                 return rc;
3596
3597         /* Enable access to flash interface */
3598         bnx2_enable_nvram_access(bp);
3599
3600         len32 = buf_size;
3601         offset32 = offset;
3602         extra = 0;
3603
3604         cmd_flags = 0;
3605
3606         if (offset32 & 3) {
3607                 u8 buf[4];
3608                 u32 pre_len;
3609
3610                 offset32 &= ~3;
3611                 pre_len = 4 - (offset & 3);
3612
3613                 if (pre_len >= len32) {
3614                         pre_len = len32;
3615                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3616                                     BNX2_NVM_COMMAND_LAST;
3617                 }
3618                 else {
3619                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3620                 }
3621
3622                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3623
3624                 if (rc)
3625                         return rc;
3626
3627                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3628
3629                 offset32 += 4;
3630                 ret_buf += pre_len;
3631                 len32 -= pre_len;
3632         }
3633         if (len32 & 3) {
3634                 extra = 4 - (len32 & 3);
3635                 len32 = (len32 + 4) & ~3;
3636         }
3637
3638         if (len32 == 4) {
3639                 u8 buf[4];
3640
3641                 if (cmd_flags)
3642                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3643                 else
3644                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3645                                     BNX2_NVM_COMMAND_LAST;
3646
3647                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3648
3649                 memcpy(ret_buf, buf, 4 - extra);
3650         }
3651         else if (len32 > 0) {
3652                 u8 buf[4];
3653
3654                 /* Read the first word. */
3655                 if (cmd_flags)
3656                         cmd_flags = 0;
3657                 else
3658                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3659
3660                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3661
3662                 /* Advance to the next dword. */
3663                 offset32 += 4;
3664                 ret_buf += 4;
3665                 len32 -= 4;
3666
3667                 while (len32 > 4 && rc == 0) {
3668                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3669
3670                         /* Advance to the next dword. */
3671                         offset32 += 4;
3672                         ret_buf += 4;
3673                         len32 -= 4;
3674                 }
3675
3676                 if (rc)
3677                         return rc;
3678
3679                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3680                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3681
3682                 memcpy(ret_buf, buf, 4 - extra);
3683         }
3684
3685         /* Disable access to flash interface */
3686         bnx2_disable_nvram_access(bp);
3687
3688         bnx2_release_nvram_lock(bp);
3689
3690         return rc;
3691 }
3692
3693 static int
3694 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3695                 int buf_size)
3696 {
3697         u32 written, offset32, len32;
3698         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3699         int rc = 0;
3700         int align_start, align_end;
3701
3702         buf = data_buf;
3703         offset32 = offset;
3704         len32 = buf_size;
3705         align_start = align_end = 0;
3706
3707         if ((align_start = (offset32 & 3))) {
3708                 offset32 &= ~3;
3709                 len32 += align_start;
3710                 if (len32 < 4)
3711                         len32 = 4;
3712                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3713                         return rc;
3714         }
3715
3716         if (len32 & 3) {
3717                 align_end = 4 - (len32 & 3);
3718                 len32 += align_end;
3719                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3720                         return rc;
3721         }
3722
3723         if (align_start || align_end) {
3724                 align_buf = kmalloc(len32, GFP_KERNEL);
3725                 if (align_buf == NULL)
3726                         return -ENOMEM;
3727                 if (align_start) {
3728                         memcpy(align_buf, start, 4);
3729                 }
3730                 if (align_end) {
3731                         memcpy(align_buf + len32 - 4, end, 4);
3732                 }
3733                 memcpy(align_buf + align_start, data_buf, buf_size);
3734                 buf = align_buf;
3735         }
3736
3737         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3738                 flash_buffer = kmalloc(264, GFP_KERNEL);
3739                 if (flash_buffer == NULL) {
3740                         rc = -ENOMEM;
3741                         goto nvram_write_end;
3742                 }
3743         }
3744
3745         written = 0;
3746         while ((written < len32) && (rc == 0)) {
3747                 u32 page_start, page_end, data_start, data_end;
3748                 u32 addr, cmd_flags;
3749                 int i;
3750
3751                 /* Find the page_start addr */
3752                 page_start = offset32 + written;
3753                 page_start -= (page_start % bp->flash_info->page_size);
3754                 /* Find the page_end addr */
3755                 page_end = page_start + bp->flash_info->page_size;
3756                 /* Find the data_start addr */
3757                 data_start = (written == 0) ? offset32 : page_start;
3758                 /* Find the data_end addr */
3759                 data_end = (page_end > offset32 + len32) ?
3760                         (offset32 + len32) : page_end;
3761
3762                 /* Request access to the flash interface. */
3763                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3764                         goto nvram_write_end;
3765
3766                 /* Enable access to flash interface */
3767                 bnx2_enable_nvram_access(bp);
3768
3769                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3770                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3771                         int j;
3772
3773                         /* Read the whole page into the buffer
3774                          * (non-buffer flash only) */
3775                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3776                                 if (j == (bp->flash_info->page_size - 4)) {
3777                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3778                                 }
3779                                 rc = bnx2_nvram_read_dword(bp,
3780                                         page_start + j,
3781                                         &flash_buffer[j],
3782                                         cmd_flags);
3783
3784                                 if (rc)
3785                                         goto nvram_write_end;
3786
3787                                 cmd_flags = 0;
3788                         }
3789                 }
3790
3791                 /* Enable writes to flash interface (unlock write-protect) */
3792                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3793                         goto nvram_write_end;
3794
3795                 /* Loop to write back the buffer data from page_start to
3796                  * data_start */
3797                 i = 0;
3798                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3799                         /* Erase the page */
3800                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3801                                 goto nvram_write_end;
3802
3803                         /* Re-enable the write again for the actual write */
3804                         bnx2_enable_nvram_write(bp);
3805
3806                         for (addr = page_start; addr < data_start;
3807                                 addr += 4, i += 4) {
3808
3809                                 rc = bnx2_nvram_write_dword(bp, addr,
3810                                         &flash_buffer[i], cmd_flags);
3811
3812                                 if (rc != 0)
3813                                         goto nvram_write_end;
3814
3815                                 cmd_flags = 0;
3816                         }
3817                 }
3818
3819                 /* Loop to write the new data from data_start to data_end */
3820                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3821                         if ((addr == page_end - 4) ||
3822                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
3823                                  (addr == data_end - 4))) {
3824
3825                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3826                         }
3827                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3828                                 cmd_flags);
3829
3830                         if (rc != 0)
3831                                 goto nvram_write_end;
3832
3833                         cmd_flags = 0;
3834                         buf += 4;
3835                 }
3836
3837                 /* Loop to write back the buffer data from data_end
3838                  * to page_end */
3839                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
3840                         for (addr = data_end; addr < page_end;
3841                                 addr += 4, i += 4) {
3842
3843                                 if (addr == page_end-4) {
3844                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3845                                 }
3846                                 rc = bnx2_nvram_write_dword(bp, addr,
3847                                         &flash_buffer[i], cmd_flags);
3848
3849                                 if (rc != 0)
3850                                         goto nvram_write_end;
3851
3852                                 cmd_flags = 0;
3853                         }
3854                 }
3855
3856                 /* Disable writes to flash interface (lock write-protect) */
3857                 bnx2_disable_nvram_write(bp);
3858
3859                 /* Disable access to flash interface */
3860                 bnx2_disable_nvram_access(bp);
3861                 bnx2_release_nvram_lock(bp);
3862
3863                 /* Increment written */
3864                 written += data_end - data_start;
3865         }
3866
3867 nvram_write_end:
3868         kfree(flash_buffer);
3869         kfree(align_buf);
3870         return rc;
3871 }
3872
3873 static void
3874 bnx2_init_remote_phy(struct bnx2 *bp)
3875 {
3876         u32 val;
3877
3878         bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3879         if (!(bp->phy_flags & PHY_SERDES_FLAG))
3880                 return;
3881
3882         val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3883         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3884                 return;
3885
3886         if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3887                 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3888
3889                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3890                 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3891                         bp->phy_port = PORT_FIBRE;
3892                 else
3893                         bp->phy_port = PORT_TP;
3894
3895                 if (netif_running(bp->dev)) {
3896                         u32 sig;
3897
3898                         if (val & BNX2_LINK_STATUS_LINK_UP) {
3899                                 bp->link_up = 1;
3900                                 netif_carrier_on(bp->dev);
3901                         } else {
3902                                 bp->link_up = 0;
3903                                 netif_carrier_off(bp->dev);
3904                         }
3905                         sig = BNX2_DRV_ACK_CAP_SIGNATURE |
3906                               BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3907                         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3908                                    sig);
3909                 }
3910         }
3911 }
3912
3913 static int
3914 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3915 {
3916         u32 val;
3917         int i, rc = 0;
3918         u8 old_port;
3919
3920         /* Wait for the current PCI transaction to complete before
3921          * issuing a reset. */
3922         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3923                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3924                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3925                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3926                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3927         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3928         udelay(5);
3929
3930         /* Wait for the firmware to tell us it is ok to issue a reset. */
3931         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3932
3933         /* Deposit a driver reset signature so the firmware knows that
3934          * this is a soft reset. */
3935         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3936                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3937
3938         /* Do a dummy read to force the chip to complete all current transaction
3939          * before we issue a reset. */
3940         val = REG_RD(bp, BNX2_MISC_ID);
3941
3942         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3943                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3944                 REG_RD(bp, BNX2_MISC_COMMAND);
3945                 udelay(5);
3946
3947                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3948                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3949
3950                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3951
3952         } else {
3953                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3954                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3955                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3956
3957                 /* Chip reset. */
3958                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3959
3960                 /* Reading back any register after chip reset will hang the
3961                  * bus on 5706 A0 and A1.  The msleep below provides plenty
3962                  * of margin for write posting.
3963                  */
3964                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3965                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
3966                         msleep(20);
3967
3968                 /* Reset takes approximate 30 usec */
3969                 for (i = 0; i < 10; i++) {
3970                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3971                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3972                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3973                                 break;
3974                         udelay(10);
3975                 }
3976
3977                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3978                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3979                         printk(KERN_ERR PFX "Chip reset did not complete\n");
3980                         return -EBUSY;
3981                 }
3982         }
3983
3984         /* Make sure byte swapping is properly configured. */
3985         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3986         if (val != 0x01020304) {
3987                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3988                 return -ENODEV;
3989         }
3990
3991         /* Wait for the firmware to finish its initialization. */
3992         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3993         if (rc)
3994                 return rc;
3995
3996         spin_lock_bh(&bp->phy_lock);
3997         old_port = bp->phy_port;
3998         bnx2_init_remote_phy(bp);
3999         if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
4000                 bnx2_set_default_remote_link(bp);
4001         spin_unlock_bh(&bp->phy_lock);
4002
4003         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4004                 /* Adjust the voltage regular to two steps lower.  The default
4005                  * of this register is 0x0000000e. */
4006                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4007
4008                 /* Remove bad rbuf memory from the free pool. */
4009                 rc = bnx2_alloc_bad_rbuf(bp);
4010         }
4011
4012         return rc;
4013 }
4014
4015 static int
4016 bnx2_init_chip(struct bnx2 *bp)
4017 {
4018         u32 val;
4019         int rc;
4020
4021         /* Make sure the interrupt is not active. */
4022         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4023
4024         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4025               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4026 #ifdef __BIG_ENDIAN
4027               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4028 #endif
4029               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4030               DMA_READ_CHANS << 12 |
4031               DMA_WRITE_CHANS << 16;
4032
4033         val |= (0x2 << 20) | (1 << 11);
4034
4035         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
4036                 val |= (1 << 23);
4037
4038         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4039             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4040                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4041
4042         REG_WR(bp, BNX2_DMA_CONFIG, val);
4043
4044         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4045                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4046                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4047                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4048         }
4049
4050         if (bp->flags & PCIX_FLAG) {
4051                 u16 val16;
4052
4053                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4054                                      &val16);
4055                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4056                                       val16 & ~PCI_X_CMD_ERO);
4057         }
4058
4059         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4060                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4061                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4062                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4063
4064         /* Initialize context mapping and zero out the quick contexts.  The
4065          * context block must have already been enabled. */
4066         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4067                 rc = bnx2_init_5709_context(bp);
4068                 if (rc)
4069                         return rc;
4070         } else
4071                 bnx2_init_context(bp);
4072
4073         if ((rc = bnx2_init_cpus(bp)) != 0)
4074                 return rc;
4075
4076         bnx2_init_nvram(bp);
4077
4078         bnx2_set_mac_addr(bp);
4079
4080         val = REG_RD(bp, BNX2_MQ_CONFIG);
4081         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4082         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4083         if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4084                 val |= BNX2_MQ_CONFIG_HALT_DIS;
4085
4086         REG_WR(bp, BNX2_MQ_CONFIG, val);
4087
4088         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4089         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4090         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4091
4092         val = (BCM_PAGE_BITS - 8) << 24;
4093         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4094
4095         /* Configure page size. */
4096         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4097         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4098         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4099         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4100
4101         val = bp->mac_addr[0] +
4102               (bp->mac_addr[1] << 8) +
4103               (bp->mac_addr[2] << 16) +
4104               bp->mac_addr[3] +
4105               (bp->mac_addr[4] << 8) +
4106               (bp->mac_addr[5] << 16);
4107         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4108
4109         /* Program the MTU.  Also include 4 bytes for CRC32. */
4110         val = bp->dev->mtu + ETH_HLEN + 4;
4111         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4112                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4113         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4114
4115         bp->last_status_idx = 0;
4116         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4117
4118         /* Set up how to generate a link change interrupt. */
4119         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4120
4121         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4122                (u64) bp->status_blk_mapping & 0xffffffff);
4123         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4124
4125         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4126                (u64) bp->stats_blk_mapping & 0xffffffff);
4127         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4128                (u64) bp->stats_blk_mapping >> 32);
4129
4130         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4131                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4132
4133         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4134                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4135
4136         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4137                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4138
4139         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4140
4141         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4142
4143         REG_WR(bp, BNX2_HC_COM_TICKS,
4144                (bp->com_ticks_int << 16) | bp->com_ticks);
4145
4146         REG_WR(bp, BNX2_HC_CMD_TICKS,
4147                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4148
4149         if (CHIP_NUM(bp) == CHIP_NUM_5708)
4150                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4151         else
4152                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4153         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4154
4155         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4156                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4157         else {
4158                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4159                       BNX2_HC_CONFIG_COLLECT_STATS;
4160         }
4161
4162         if (bp->flags & ONE_SHOT_MSI_FLAG)
4163                 val |= BNX2_HC_CONFIG_ONE_SHOT;
4164
4165         REG_WR(bp, BNX2_HC_CONFIG, val);
4166
4167         /* Clear internal stats counters. */
4168         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4169
4170         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4171
4172         /* Initialize the receive filter. */
4173         bnx2_set_rx_mode(bp->dev);
4174
4175         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4176                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4177                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4178                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4179         }
4180         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4181                           0);
4182
4183         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4184         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4185
4186         udelay(20);
4187
4188         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4189
4190         return rc;
4191 }
4192
4193 static void
4194 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4195 {
4196         u32 val, offset0, offset1, offset2, offset3;
4197
4198         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4199                 offset0 = BNX2_L2CTX_TYPE_XI;
4200                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4201                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4202                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4203         } else {
4204                 offset0 = BNX2_L2CTX_TYPE;
4205                 offset1 = BNX2_L2CTX_CMD_TYPE;
4206                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4207                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4208         }
4209         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4210         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4211
4212         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4213         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4214
4215         val = (u64) bp->tx_desc_mapping >> 32;
4216         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4217
4218         val = (u64) bp->tx_desc_mapping & 0xffffffff;
4219         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4220 }
4221
4222 static void
4223 bnx2_init_tx_ring(struct bnx2 *bp)
4224 {
4225         struct tx_bd *txbd;
4226         u32 cid;
4227
4228         bp->tx_wake_thresh = bp->tx_ring_size / 2;
4229
4230         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
4231
4232         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4233         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4234
4235         bp->tx_prod = 0;
4236         bp->tx_cons = 0;
4237         bp->hw_tx_cons = 0;
4238         bp->tx_prod_bseq = 0;
4239
4240         cid = TX_CID;
4241         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4242         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
4243
4244         bnx2_init_tx_context(bp, cid);
4245 }
4246
4247 static void
4248 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4249                      int num_rings)
4250 {
4251         int i;
4252         struct rx_bd *rxbd;
4253
4254         for (i = 0; i < num_rings; i++) {
4255                 int j;
4256
4257                 rxbd = &rx_ring[i][0];
4258                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4259                         rxbd->rx_bd_len = buf_size;
4260                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4261                 }
4262                 if (i == (num_rings - 1))
4263                         j = 0;
4264                 else
4265                         j = i + 1;
4266                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4267                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
4268         }
4269 }
4270
4271 static void
4272 bnx2_init_rx_ring(struct bnx2 *bp)
4273 {
4274         int i;
4275         u16 prod, ring_prod;
4276         u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4277
4278         bp->rx_prod = 0;
4279         bp->rx_cons = 0;
4280         bp->rx_prod_bseq = 0;
4281         bp->rx_pg_prod = 0;
4282         bp->rx_pg_cons = 0;
4283
4284         bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4285                              bp->rx_buf_use_size, bp->rx_max_ring);
4286
4287         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
4288         if (bp->rx_pg_ring_size) {
4289                 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4290                                      bp->rx_pg_desc_mapping,
4291                                      PAGE_SIZE, bp->rx_max_pg_ring);
4292                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4293                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4294                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4295                        BNX2_L2CTX_RBDC_JUMBO_KEY);
4296
4297                 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4298                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4299
4300                 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4301                 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4302
4303                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4304                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4305         }
4306
4307         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4308         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4309         val |= 0x02 << 8;
4310         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4311
4312         val = (u64) bp->rx_desc_mapping[0] >> 32;
4313         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4314
4315         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
4316         CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4317
4318         ring_prod = prod = bp->rx_pg_prod;
4319         for (i = 0; i < bp->rx_pg_ring_size; i++) {
4320                 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4321                         break;
4322                 prod = NEXT_RX_BD(prod);
4323                 ring_prod = RX_PG_RING_IDX(prod);
4324         }
4325         bp->rx_pg_prod = prod;
4326
4327         ring_prod = prod = bp->rx_prod;
4328         for (i = 0; i < bp->rx_ring_size; i++) {
4329                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4330                         break;
4331                 }
4332                 prod = NEXT_RX_BD(prod);
4333                 ring_prod = RX_RING_IDX(prod);
4334         }
4335         bp->rx_prod = prod;
4336
4337         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, bp->rx_pg_prod);
4338         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4339
4340         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4341 }
4342
4343 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
4344 {
4345         u32 max, num_rings = 1;
4346
4347         while (ring_size > MAX_RX_DESC_CNT) {
4348                 ring_size -= MAX_RX_DESC_CNT;
4349                 num_rings++;
4350         }
4351         /* round to next power of 2 */
4352         max = max_size;
4353         while ((max & num_rings) == 0)
4354                 max >>= 1;
4355
4356         if (num_rings != max)
4357                 max <<= 1;
4358
4359         return max;
4360 }
4361
4362 static void
4363 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4364 {
4365         u32 rx_size;
4366
4367         /* 8 for CRC and VLAN */
4368         rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4369
4370         bp->rx_copy_thresh = RX_COPY_THRESH;
4371         bp->rx_pg_ring_size = 0;
4372         bp->rx_max_pg_ring = 0;
4373         bp->rx_max_pg_ring_idx = 0;
4374
4375         bp->rx_buf_use_size = rx_size;
4376         /* hw alignment */
4377         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
4378         bp->rx_ring_size = size;
4379         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
4380         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4381 }
4382
4383 static void
4384 bnx2_free_tx_skbs(struct bnx2 *bp)
4385 {
4386         int i;
4387
4388         if (bp->tx_buf_ring == NULL)
4389                 return;
4390
4391         for (i = 0; i < TX_DESC_CNT; ) {
4392                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4393                 struct sk_buff *skb = tx_buf->skb;
4394                 int j, last;
4395
4396                 if (skb == NULL) {
4397                         i++;
4398                         continue;
4399                 }
4400
4401                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4402                         skb_headlen(skb), PCI_DMA_TODEVICE);
4403
4404                 tx_buf->skb = NULL;
4405
4406                 last = skb_shinfo(skb)->nr_frags;
4407                 for (j = 0; j < last; j++) {
4408                         tx_buf = &bp->tx_buf_ring[i + j + 1];
4409                         pci_unmap_page(bp->pdev,
4410                                 pci_unmap_addr(tx_buf, mapping),
4411                                 skb_shinfo(skb)->frags[j].size,
4412                                 PCI_DMA_TODEVICE);
4413                 }
4414                 dev_kfree_skb(skb);
4415                 i += j + 1;
4416         }
4417
4418 }
4419
4420 static void
4421 bnx2_free_rx_skbs(struct bnx2 *bp)
4422 {
4423         int i;
4424
4425         if (bp->rx_buf_ring == NULL)
4426                 return;
4427
4428         for (i = 0; i < bp->rx_max_ring_idx; i++) {
4429                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4430                 struct sk_buff *skb = rx_buf->skb;
4431
4432                 if (skb == NULL)
4433                         continue;
4434
4435                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4436                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4437
4438                 rx_buf->skb = NULL;
4439
4440                 dev_kfree_skb(skb);
4441         }
4442         for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4443                 bnx2_free_rx_page(bp, i);
4444 }
4445
4446 static void
4447 bnx2_free_skbs(struct bnx2 *bp)
4448 {
4449         bnx2_free_tx_skbs(bp);
4450         bnx2_free_rx_skbs(bp);
4451 }
4452
4453 static int
4454 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4455 {
4456         int rc;
4457
4458         rc = bnx2_reset_chip(bp, reset_code);
4459         bnx2_free_skbs(bp);
4460         if (rc)
4461                 return rc;
4462
4463         if ((rc = bnx2_init_chip(bp)) != 0)
4464                 return rc;
4465
4466         bnx2_init_tx_ring(bp);
4467         bnx2_init_rx_ring(bp);
4468         return 0;
4469 }
4470
4471 static int
4472 bnx2_init_nic(struct bnx2 *bp)
4473 {
4474         int rc;
4475
4476         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4477                 return rc;
4478
4479         spin_lock_bh(&bp->phy_lock);
4480         bnx2_init_phy(bp);
4481         bnx2_set_link(bp);
4482         spin_unlock_bh(&bp->phy_lock);
4483         return 0;
4484 }
4485
4486 static int
4487 bnx2_test_registers(struct bnx2 *bp)
4488 {
4489         int ret;
4490         int i, is_5709;
4491         static const struct {
4492                 u16   offset;
4493                 u16   flags;
4494 #define BNX2_FL_NOT_5709        1
4495                 u32   rw_mask;
4496                 u32   ro_mask;
4497         } reg_tbl[] = {
4498                 { 0x006c, 0, 0x00000000, 0x0000003f },
4499                 { 0x0090, 0, 0xffffffff, 0x00000000 },
4500                 { 0x0094, 0, 0x00000000, 0x00000000 },
4501
4502                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4503                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4504                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4505                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4506                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4507                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4508                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4509                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4510                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4511
4512                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4513                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4514                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4515                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4516                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4517                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4518
4519                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4520                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4521                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4522
4523                 { 0x1000, 0, 0x00000000, 0x00000001 },
4524                 { 0x1004, 0, 0x00000000, 0x000f0001 },
4525
4526                 { 0x1408, 0, 0x01c00800, 0x00000000 },
4527                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4528                 { 0x14a8, 0, 0x00000000, 0x000001ff },
4529                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4530                 { 0x14b0, 0, 0x00000002, 0x00000001 },
4531                 { 0x14b8, 0, 0x00000000, 0x00000000 },
4532                 { 0x14c0, 0, 0x00000000, 0x00000009 },
4533                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4534                 { 0x14cc, 0, 0x00000000, 0x00000001 },
4535                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4536
4537                 { 0x1800, 0, 0x00000000, 0x00000001 },
4538                 { 0x1804, 0, 0x00000000, 0x00000003 },
4539
4540                 { 0x2800, 0, 0x00000000, 0x00000001 },
4541                 { 0x2804, 0, 0x00000000, 0x00003f01 },
4542                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4543                 { 0x2810, 0, 0xffff0000, 0x00000000 },
4544                 { 0x2814, 0, 0xffff0000, 0x00000000 },
4545                 { 0x2818, 0, 0xffff0000, 0x00000000 },
4546                 { 0x281c, 0, 0xffff0000, 0x00000000 },
4547                 { 0x2834, 0, 0xffffffff, 0x00000000 },
4548                 { 0x2840, 0, 0x00000000, 0xffffffff },
4549                 { 0x2844, 0, 0x00000000, 0xffffffff },
4550                 { 0x2848, 0, 0xffffffff, 0x00000000 },
4551                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4552
4553                 { 0x2c00, 0, 0x00000000, 0x00000011 },
4554                 { 0x2c04, 0, 0x00000000, 0x00030007 },
4555
4556                 { 0x3c00, 0, 0x00000000, 0x00000001 },
4557                 { 0x3c04, 0, 0x00000000, 0x00070000 },
4558                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4559                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4560                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4561                 { 0x3c14, 0, 0x00000000, 0xffffffff },
4562                 { 0x3c18, 0, 0x00000000, 0xffffffff },
4563                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4564                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4565
4566                 { 0x5004, 0, 0x00000000, 0x0000007f },
4567                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4568
4569                 { 0x5c00, 0, 0x00000000, 0x00000001 },
4570                 { 0x5c04, 0, 0x00000000, 0x0003000f },
4571                 { 0x5c08, 0, 0x00000003, 0x00000000 },
4572                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4573                 { 0x5c10, 0, 0x00000000, 0xffffffff },
4574                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4575                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4576                 { 0x5c88, 0, 0x00000000, 0x00077373 },
4577                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4578
4579                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4580                 { 0x680c, 0, 0xffffffff, 0x00000000 },
4581                 { 0x6810, 0, 0xffffffff, 0x00000000 },
4582                 { 0x6814, 0, 0xffffffff, 0x00000000 },
4583                 { 0x6818, 0, 0xffffffff, 0x00000000 },
4584                 { 0x681c, 0, 0xffffffff, 0x00000000 },
4585                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4586                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4587                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4588                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4589                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4590                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4591                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4592                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4593                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4594                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4595                 { 0x684c, 0, 0xffffffff, 0x00000000 },
4596                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4597                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4598                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4599                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4600                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4601                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4602
4603                 { 0xffff, 0, 0x00000000, 0x00000000 },
4604         };
4605
4606         ret = 0;
4607         is_5709 = 0;
4608         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4609                 is_5709 = 1;
4610
4611         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4612                 u32 offset, rw_mask, ro_mask, save_val, val;
4613                 u16 flags = reg_tbl[i].flags;
4614
4615                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4616                         continue;
4617
4618                 offset = (u32) reg_tbl[i].offset;
4619                 rw_mask = reg_tbl[i].rw_mask;
4620                 ro_mask = reg_tbl[i].ro_mask;
4621
4622                 save_val = readl(bp->regview + offset);
4623
4624                 writel(0, bp->regview + offset);
4625
4626                 val = readl(bp->regview + offset);
4627                 if ((val & rw_mask) != 0) {
4628                         goto reg_test_err;
4629                 }
4630
4631                 if ((val & ro_mask) != (save_val & ro_mask)) {
4632                         goto reg_test_err;
4633                 }
4634
4635                 writel(0xffffffff, bp->regview + offset);
4636
4637                 val = readl(bp->regview + offset);
4638                 if ((val & rw_mask) != rw_mask) {
4639                         goto reg_test_err;
4640                 }
4641
4642                 if ((val & ro_mask) != (save_val & ro_mask)) {
4643                         goto reg_test_err;
4644                 }
4645
4646                 writel(save_val, bp->regview + offset);
4647                 continue;
4648
4649 reg_test_err:
4650                 writel(save_val, bp->regview + offset);
4651                 ret = -ENODEV;
4652                 break;
4653         }
4654         return ret;
4655 }
4656
4657 static int
4658 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4659 {
4660         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4661                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4662         int i;
4663
4664         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4665                 u32 offset;
4666
4667                 for (offset = 0; offset < size; offset += 4) {
4668
4669                         REG_WR_IND(bp, start + offset, test_pattern[i]);
4670
4671                         if (REG_RD_IND(bp, start + offset) !=
4672                                 test_pattern[i]) {
4673                                 return -ENODEV;
4674                         }
4675                 }
4676         }
4677         return 0;
4678 }
4679
4680 static int
4681 bnx2_test_memory(struct bnx2 *bp)
4682 {
4683         int ret = 0;
4684         int i;
4685         static struct mem_entry {
4686                 u32   offset;
4687                 u32   len;
4688         } mem_tbl_5706[] = {
4689                 { 0x60000,  0x4000 },
4690                 { 0xa0000,  0x3000 },
4691                 { 0xe0000,  0x4000 },
4692                 { 0x120000, 0x4000 },
4693                 { 0x1a0000, 0x4000 },
4694                 { 0x160000, 0x4000 },
4695                 { 0xffffffff, 0    },
4696         },
4697         mem_tbl_5709[] = {
4698                 { 0x60000,  0x4000 },
4699                 { 0xa0000,  0x3000 },
4700                 { 0xe0000,  0x4000 },
4701                 { 0x120000, 0x4000 },
4702                 { 0x1a0000, 0x4000 },
4703                 { 0xffffffff, 0    },
4704         };
4705         struct mem_entry *mem_tbl;
4706
4707         if (CHIP_NUM(bp) == CHIP_NUM_5709)
4708                 mem_tbl = mem_tbl_5709;
4709         else
4710                 mem_tbl = mem_tbl_5706;
4711
4712         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4713                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4714                         mem_tbl[i].len)) != 0) {
4715                         return ret;
4716                 }
4717         }
4718
4719         return ret;
4720 }
4721
4722 #define BNX2_MAC_LOOPBACK       0
4723 #define BNX2_PHY_LOOPBACK       1
4724
4725 static int
4726 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4727 {
4728         unsigned int pkt_size, num_pkts, i;
4729         struct sk_buff *skb, *rx_skb;
4730         unsigned char *packet;
4731         u16 rx_start_idx, rx_idx;
4732         dma_addr_t map;
4733         struct tx_bd *txbd;
4734         struct sw_bd *rx_buf;
4735         struct l2_fhdr *rx_hdr;
4736         int ret = -ENODEV;
4737
4738         if (loopback_mode == BNX2_MAC_LOOPBACK) {
4739                 bp->loopback = MAC_LOOPBACK;
4740                 bnx2_set_mac_loopback(bp);
4741         }
4742         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4743                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4744                         return 0;
4745
4746                 bp->loopback = PHY_LOOPBACK;
4747                 bnx2_set_phy_loopback(bp);
4748         }
4749         else
4750                 return -EINVAL;
4751
4752         pkt_size = 1514;
4753         skb = netdev_alloc_skb(bp->dev, pkt_size);
4754         if (!skb)
4755                 return -ENOMEM;
4756         packet = skb_put(skb, pkt_size);
4757         memcpy(packet, bp->dev->dev_addr, 6);
4758         memset(packet + 6, 0x0, 8);
4759         for (i = 14; i < pkt_size; i++)
4760                 packet[i] = (unsigned char) (i & 0xff);
4761
4762         map = pci_map_single(bp->pdev, skb->data, pkt_size,
4763                 PCI_DMA_TODEVICE);
4764
4765         REG_WR(bp, BNX2_HC_COMMAND,
4766                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4767
4768         REG_RD(bp, BNX2_HC_COMMAND);
4769
4770         udelay(5);
4771         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4772
4773         num_pkts = 0;
4774
4775         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4776
4777         txbd->tx_bd_haddr_hi = (u64) map >> 32;
4778         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4779         txbd->tx_bd_mss_nbytes = pkt_size;
4780         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4781
4782         num_pkts++;
4783         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4784         bp->tx_prod_bseq += pkt_size;
4785
4786         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4787         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4788
4789         udelay(100);
4790
4791         REG_WR(bp, BNX2_HC_COMMAND,
4792                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4793
4794         REG_RD(bp, BNX2_HC_COMMAND);
4795
4796         udelay(5);
4797
4798         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4799         dev_kfree_skb(skb);
4800
4801         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4802                 goto loopback_test_done;
4803         }
4804
4805         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4806         if (rx_idx != rx_start_idx + num_pkts) {
4807                 goto loopback_test_done;
4808         }
4809
4810         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4811         rx_skb = rx_buf->skb;
4812
4813         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4814         skb_reserve(rx_skb, bp->rx_offset);
4815
4816         pci_dma_sync_single_for_cpu(bp->pdev,
4817                 pci_unmap_addr(rx_buf, mapping),
4818                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4819
4820         if (rx_hdr->l2_fhdr_status &
4821                 (L2_FHDR_ERRORS_BAD_CRC |
4822                 L2_FHDR_ERRORS_PHY_DECODE |
4823                 L2_FHDR_ERRORS_ALIGNMENT |
4824                 L2_FHDR_ERRORS_TOO_SHORT |
4825                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4826
4827                 goto loopback_test_done;
4828         }
4829
4830         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4831                 goto loopback_test_done;
4832         }
4833
4834         for (i = 14; i < pkt_size; i++) {
4835                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4836                         goto loopback_test_done;
4837                 }
4838         }
4839
4840         ret = 0;
4841
4842 loopback_test_done:
4843         bp->loopback = 0;
4844         return ret;
4845 }
4846
4847 #define BNX2_MAC_LOOPBACK_FAILED        1
4848 #define BNX2_PHY_LOOPBACK_FAILED        2
4849 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4850                                          BNX2_PHY_LOOPBACK_FAILED)
4851
4852 static int
4853 bnx2_test_loopback(struct bnx2 *bp)
4854 {
4855         int rc = 0;
4856
4857         if (!netif_running(bp->dev))
4858                 return BNX2_LOOPBACK_FAILED;
4859
4860         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4861         spin_lock_bh(&bp->phy_lock);
4862         bnx2_init_phy(bp);
4863         spin_unlock_bh(&bp->phy_lock);
4864         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4865                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4866         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4867                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4868         return rc;
4869 }
4870
4871 #define NVRAM_SIZE 0x200
4872 #define CRC32_RESIDUAL 0xdebb20e3
4873
4874 static int
4875 bnx2_test_nvram(struct bnx2 *bp)
4876 {
4877         u32 buf[NVRAM_SIZE / 4];
4878         u8 *data = (u8 *) buf;
4879         int rc = 0;
4880         u32 magic, csum;
4881
4882         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4883                 goto test_nvram_done;
4884
4885         magic = be32_to_cpu(buf[0]);
4886         if (magic != 0x669955aa) {
4887                 rc = -ENODEV;
4888                 goto test_nvram_done;
4889         }
4890
4891         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4892                 goto test_nvram_done;
4893
4894         csum = ether_crc_le(0x100, data);
4895         if (csum != CRC32_RESIDUAL) {
4896                 rc = -ENODEV;
4897                 goto test_nvram_done;
4898         }
4899
4900         csum = ether_crc_le(0x100, data + 0x100);
4901         if (csum != CRC32_RESIDUAL) {
4902                 rc = -ENODEV;
4903         }
4904
4905 test_nvram_done:
4906         return rc;
4907 }
4908
4909 static int
4910 bnx2_test_link(struct bnx2 *bp)
4911 {
4912         u32 bmsr;
4913
4914         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
4915                 if (bp->link_up)
4916                         return 0;
4917                 return -ENODEV;
4918         }
4919         spin_lock_bh(&bp->phy_lock);
4920         bnx2_enable_bmsr1(bp);
4921         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4922         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4923         bnx2_disable_bmsr1(bp);
4924         spin_unlock_bh(&bp->phy_lock);
4925
4926         if (bmsr & BMSR_LSTATUS) {
4927                 return 0;
4928         }
4929         return -ENODEV;
4930 }
4931
4932 static int
4933 bnx2_test_intr(struct bnx2 *bp)
4934 {
4935         int i;
4936         u16 status_idx;
4937
4938         if (!netif_running(bp->dev))
4939                 return -ENODEV;
4940
4941         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4942
4943         /* This register is not touched during run-time. */
4944         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4945         REG_RD(bp, BNX2_HC_COMMAND);
4946
4947         for (i = 0; i < 10; i++) {
4948                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4949                         status_idx) {
4950
4951                         break;
4952                 }
4953
4954                 msleep_interruptible(10);
4955         }
4956         if (i < 10)
4957                 return 0;
4958
4959         return -ENODEV;
4960 }
4961
4962 static void
4963 bnx2_5706_serdes_timer(struct bnx2 *bp)
4964 {
4965         spin_lock(&bp->phy_lock);
4966         if (bp->serdes_an_pending)
4967                 bp->serdes_an_pending--;
4968         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4969                 u32 bmcr;
4970
4971                 bp->current_interval = bp->timer_interval;
4972
4973                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4974
4975                 if (bmcr & BMCR_ANENABLE) {
4976                         u32 phy1, phy2;
4977
4978                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4979                         bnx2_read_phy(bp, 0x1c, &phy1);
4980
4981                         bnx2_write_phy(bp, 0x17, 0x0f01);
4982                         bnx2_read_phy(bp, 0x15, &phy2);
4983                         bnx2_write_phy(bp, 0x17, 0x0f01);
4984                         bnx2_read_phy(bp, 0x15, &phy2);
4985
4986                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4987                                 !(phy2 & 0x20)) {       /* no CONFIG */
4988
4989                                 bmcr &= ~BMCR_ANENABLE;
4990                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4991                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4992                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4993                         }
4994                 }
4995         }
4996         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4997                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4998                 u32 phy2;
4999
5000                 bnx2_write_phy(bp, 0x17, 0x0f01);
5001                 bnx2_read_phy(bp, 0x15, &phy2);
5002                 if (phy2 & 0x20) {
5003                         u32 bmcr;
5004
5005                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5006                         bmcr |= BMCR_ANENABLE;
5007                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5008
5009                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5010                 }
5011         } else
5012                 bp->current_interval = bp->timer_interval;
5013
5014         spin_unlock(&bp->phy_lock);
5015 }
5016
5017 static void
5018 bnx2_5708_serdes_timer(struct bnx2 *bp)
5019 {
5020         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5021                 return;
5022
5023         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5024                 bp->serdes_an_pending = 0;
5025                 return;
5026         }
5027
5028         spin_lock(&bp->phy_lock);
5029         if (bp->serdes_an_pending)
5030                 bp->serdes_an_pending--;
5031         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5032                 u32 bmcr;
5033
5034                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5035                 if (bmcr & BMCR_ANENABLE) {
5036                         bnx2_enable_forced_2g5(bp);
5037                         bp->current_interval = SERDES_FORCED_TIMEOUT;
5038                 } else {
5039                         bnx2_disable_forced_2g5(bp);
5040                         bp->serdes_an_pending = 2;
5041                         bp->current_interval = bp->timer_interval;
5042                 }
5043
5044         } else
5045                 bp->current_interval = bp->timer_interval;
5046
5047         spin_unlock(&bp->phy_lock);
5048 }
5049
5050 static void
5051 bnx2_timer(unsigned long data)
5052 {
5053         struct bnx2 *bp = (struct bnx2 *) data;
5054
5055         if (!netif_running(bp->dev))
5056                 return;
5057
5058         if (atomic_read(&bp->intr_sem) != 0)
5059                 goto bnx2_restart_timer;
5060
5061         bnx2_send_heart_beat(bp);
5062
5063         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
5064
5065         /* workaround occasional corrupted counters */
5066         if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5067                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5068                                             BNX2_HC_COMMAND_STATS_NOW);
5069
5070         if (bp->phy_flags & PHY_SERDES_FLAG) {
5071                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5072                         bnx2_5706_serdes_timer(bp);
5073                 else
5074                         bnx2_5708_serdes_timer(bp);
5075         }
5076
5077 bnx2_restart_timer:
5078         mod_timer(&bp->timer, jiffies + bp->current_interval);
5079 }
5080
5081 static int
5082 bnx2_request_irq(struct bnx2 *bp)
5083 {
5084         struct net_device *dev = bp->dev;
5085         int rc = 0;
5086
5087         if (bp->flags & USING_MSI_FLAG) {
5088                 irq_handler_t   fn = bnx2_msi;
5089
5090                 if (bp->flags & ONE_SHOT_MSI_FLAG)
5091                         fn = bnx2_msi_1shot;
5092
5093                 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
5094         } else
5095                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
5096                                  IRQF_SHARED, dev->name, dev);
5097         return rc;
5098 }
5099
5100 static void
5101 bnx2_free_irq(struct bnx2 *bp)
5102 {
5103         struct net_device *dev = bp->dev;
5104
5105         if (bp->flags & USING_MSI_FLAG) {
5106                 free_irq(bp->pdev->irq, dev);
5107                 pci_disable_msi(bp->pdev);
5108                 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5109         } else
5110                 free_irq(bp->pdev->irq, dev);
5111 }
5112
5113 /* Called with rtnl_lock */
5114 static int
5115 bnx2_open(struct net_device *dev)
5116 {
5117         struct bnx2 *bp = netdev_priv(dev);
5118         int rc;
5119
5120         netif_carrier_off(dev);
5121
5122         bnx2_set_power_state(bp, PCI_D0);
5123         bnx2_disable_int(bp);
5124
5125         rc = bnx2_alloc_mem(bp);
5126         if (rc)
5127                 return rc;
5128
5129         napi_enable(&bp->napi);
5130
5131         if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
5132                 if (pci_enable_msi(bp->pdev) == 0) {
5133                         bp->flags |= USING_MSI_FLAG;
5134                         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5135                                 bp->flags |= ONE_SHOT_MSI_FLAG;
5136                 }
5137         }
5138         rc = bnx2_request_irq(bp);
5139
5140         if (rc) {
5141                 napi_disable(&bp->napi);
5142                 bnx2_free_mem(bp);
5143                 return rc;
5144         }
5145
5146         rc = bnx2_init_nic(bp);
5147
5148         if (rc) {
5149                 napi_disable(&bp->napi);
5150                 bnx2_free_irq(bp);
5151                 bnx2_free_skbs(bp);
5152                 bnx2_free_mem(bp);
5153                 return rc;
5154         }
5155
5156         mod_timer(&bp->timer, jiffies + bp->current_interval);
5157
5158         atomic_set(&bp->intr_sem, 0);
5159
5160         bnx2_enable_int(bp);
5161
5162         if (bp->flags & USING_MSI_FLAG) {
5163                 /* Test MSI to make sure it is working
5164                  * If MSI test fails, go back to INTx mode
5165                  */
5166                 if (bnx2_test_intr(bp) != 0) {
5167                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
5168                                " using MSI, switching to INTx mode. Please"
5169                                " report this failure to the PCI maintainer"
5170                                " and include system chipset information.\n",
5171                                bp->dev->name);
5172
5173                         bnx2_disable_int(bp);
5174                         bnx2_free_irq(bp);
5175
5176                         rc = bnx2_init_nic(bp);
5177
5178                         if (!rc)
5179                                 rc = bnx2_request_irq(bp);
5180
5181                         if (rc) {
5182                                 napi_disable(&bp->napi);
5183                                 bnx2_free_skbs(bp);
5184                                 bnx2_free_mem(bp);
5185                                 del_timer_sync(&bp->timer);
5186                                 return rc;
5187                         }
5188                         bnx2_enable_int(bp);
5189                 }
5190         }
5191         if (bp->flags & USING_MSI_FLAG) {
5192                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5193         }
5194
5195         netif_start_queue(dev);
5196
5197         return 0;
5198 }
5199
5200 static void
5201 bnx2_reset_task(struct work_struct *work)
5202 {
5203         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
5204
5205         if (!netif_running(bp->dev))
5206                 return;
5207
5208         bp->in_reset_task = 1;
5209         bnx2_netif_stop(bp);
5210
5211         bnx2_init_nic(bp);
5212
5213         atomic_set(&bp->intr_sem, 1);
5214         bnx2_netif_start(bp);
5215         bp->in_reset_task = 0;
5216 }
5217
5218 static void
5219 bnx2_tx_timeout(struct net_device *dev)
5220 {
5221         struct bnx2 *bp = netdev_priv(dev);
5222
5223         /* This allows the netif to be shutdown gracefully before resetting */
5224         schedule_work(&bp->reset_task);
5225 }
5226
5227 #ifdef BCM_VLAN
5228 /* Called with rtnl_lock */
5229 static void
5230 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5231 {
5232         struct bnx2 *bp = netdev_priv(dev);
5233
5234         bnx2_netif_stop(bp);
5235
5236         bp->vlgrp = vlgrp;
5237         bnx2_set_rx_mode(dev);
5238
5239         bnx2_netif_start(bp);
5240 }
5241 #endif
5242
5243 /* Called with netif_tx_lock.
5244  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5245  * netif_wake_queue().
5246  */
5247 static int
5248 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5249 {
5250         struct bnx2 *bp = netdev_priv(dev);
5251         dma_addr_t mapping;
5252         struct tx_bd *txbd;
5253         struct sw_bd *tx_buf;
5254         u32 len, vlan_tag_flags, last_frag, mss;
5255         u16 prod, ring_prod;
5256         int i;
5257
5258         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
5259                 netif_stop_queue(dev);
5260                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5261                         dev->name);
5262
5263                 return NETDEV_TX_BUSY;
5264         }
5265         len = skb_headlen(skb);
5266         prod = bp->tx_prod;
5267         ring_prod = TX_RING_IDX(prod);
5268
5269         vlan_tag_flags = 0;
5270         if (skb->ip_summed == CHECKSUM_PARTIAL) {
5271                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5272         }
5273
5274         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5275                 vlan_tag_flags |=
5276                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5277         }
5278         if ((mss = skb_shinfo(skb)->gso_size)) {
5279                 u32 tcp_opt_len, ip_tcp_len;
5280                 struct iphdr *iph;
5281
5282                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5283
5284                 tcp_opt_len = tcp_optlen(skb);
5285
5286                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5287                         u32 tcp_off = skb_transport_offset(skb) -
5288                                       sizeof(struct ipv6hdr) - ETH_HLEN;
5289
5290                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5291                                           TX_BD_FLAGS_SW_FLAGS;
5292                         if (likely(tcp_off == 0))
5293                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5294                         else {
5295                                 tcp_off >>= 3;
5296                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
5297                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
5298                                                   ((tcp_off & 0x10) <<
5299                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
5300                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5301                         }
5302                 } else {
5303                         if (skb_header_cloned(skb) &&
5304                             pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5305                                 dev_kfree_skb(skb);
5306                                 return NETDEV_TX_OK;
5307                         }
5308
5309                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5310
5311                         iph = ip_hdr(skb);
5312                         iph->check = 0;
5313                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5314                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5315                                                                  iph->daddr, 0,
5316                                                                  IPPROTO_TCP,
5317                                                                  0);
5318                         if (tcp_opt_len || (iph->ihl > 5)) {
5319                                 vlan_tag_flags |= ((iph->ihl - 5) +
5320                                                    (tcp_opt_len >> 2)) << 8;
5321                         }
5322                 }
5323         } else
5324                 mss = 0;
5325
5326         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5327
5328         tx_buf = &bp->tx_buf_ring[ring_prod];
5329         tx_buf->skb = skb;
5330         pci_unmap_addr_set(tx_buf, mapping, mapping);
5331
5332         txbd = &bp->tx_desc_ring[ring_prod];
5333
5334         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5335         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5336         txbd->tx_bd_mss_nbytes = len | (mss << 16);
5337         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5338
5339         last_frag = skb_shinfo(skb)->nr_frags;
5340
5341         for (i = 0; i < last_frag; i++) {
5342                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5343
5344                 prod = NEXT_TX_BD(prod);
5345                 ring_prod = TX_RING_IDX(prod);
5346                 txbd = &bp->tx_desc_ring[ring_prod];
5347
5348                 len = frag->size;
5349                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5350                         len, PCI_DMA_TODEVICE);
5351                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5352                                 mapping, mapping);
5353
5354                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5355                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5356                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5357                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5358
5359         }
5360         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5361
5362         prod = NEXT_TX_BD(prod);
5363         bp->tx_prod_bseq += skb->len;
5364
5365         REG_WR16(bp, bp->tx_bidx_addr, prod);
5366         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
5367
5368         mmiowb();
5369
5370         bp->tx_prod = prod;
5371         dev->trans_start = jiffies;
5372
5373         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
5374                 netif_stop_queue(dev);
5375                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
5376                         netif_wake_queue(dev);
5377         }
5378
5379         return NETDEV_TX_OK;
5380 }
5381
5382 /* Called with rtnl_lock */
5383 static int
5384 bnx2_close(struct net_device *dev)
5385 {
5386         struct bnx2 *bp = netdev_priv(dev);
5387         u32 reset_code;
5388
5389         /* Calling flush_scheduled_work() may deadlock because
5390          * linkwatch_event() may be on the workqueue and it will try to get
5391          * the rtnl_lock which we are holding.
5392          */
5393         while (bp->in_reset_task)
5394                 msleep(1);
5395
5396         bnx2_disable_int_sync(bp);
5397         napi_disable(&bp->napi);
5398         del_timer_sync(&bp->timer);
5399         if (bp->flags & NO_WOL_FLAG)
5400                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5401         else if (bp->wol)
5402                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5403         else
5404                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5405         bnx2_reset_chip(bp, reset_code);
5406         bnx2_free_irq(bp);
5407         bnx2_free_skbs(bp);
5408         bnx2_free_mem(bp);
5409         bp->link_up = 0;
5410         netif_carrier_off(bp->dev);
5411         bnx2_set_power_state(bp, PCI_D3hot);
5412         return 0;
5413 }
5414
5415 #define GET_NET_STATS64(ctr)                                    \
5416         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
5417         (unsigned long) (ctr##_lo)
5418
5419 #define GET_NET_STATS32(ctr)            \
5420         (ctr##_lo)
5421
5422 #if (BITS_PER_LONG == 64)
5423 #define GET_NET_STATS   GET_NET_STATS64
5424 #else
5425 #define GET_NET_STATS   GET_NET_STATS32
5426 #endif
5427
5428 static struct net_device_stats *
5429 bnx2_get_stats(struct net_device *dev)
5430 {
5431         struct bnx2 *bp = netdev_priv(dev);
5432         struct statistics_block *stats_blk = bp->stats_blk;
5433         struct net_device_stats *net_stats = &bp->net_stats;
5434
5435         if (bp->stats_blk == NULL) {
5436                 return net_stats;
5437         }
5438         net_stats->rx_packets =
5439                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5440                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5441                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5442
5443         net_stats->tx_packets =
5444                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5445                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5446                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5447
5448         net_stats->rx_bytes =
5449                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5450
5451         net_stats->tx_bytes =
5452                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5453
5454         net_stats->multicast =
5455                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5456
5457         net_stats->collisions =
5458                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5459
5460         net_stats->rx_length_errors =
5461                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5462                 stats_blk->stat_EtherStatsOverrsizePkts);
5463
5464         net_stats->rx_over_errors =
5465                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5466
5467         net_stats->rx_frame_errors =
5468                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5469
5470         net_stats->rx_crc_errors =
5471                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5472
5473         net_stats->rx_errors = net_stats->rx_length_errors +
5474                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5475                 net_stats->rx_crc_errors;
5476
5477         net_stats->tx_aborted_errors =
5478                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5479                 stats_blk->stat_Dot3StatsLateCollisions);
5480
5481         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5482             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5483                 net_stats->tx_carrier_errors = 0;
5484         else {
5485                 net_stats->tx_carrier_errors =
5486                         (unsigned long)
5487                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
5488         }
5489
5490         net_stats->tx_errors =
5491                 (unsigned long)
5492                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5493                 +
5494                 net_stats->tx_aborted_errors +
5495                 net_stats->tx_carrier_errors;
5496
5497         net_stats->rx_missed_errors =
5498                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5499                 stats_blk->stat_FwRxDrop);
5500
5501         return net_stats;
5502 }
5503
5504 /* All ethtool functions called with rtnl_lock */
5505
5506 static int
5507 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5508 {
5509         struct bnx2 *bp = netdev_priv(dev);
5510         int support_serdes = 0, support_copper = 0;
5511
5512         cmd->supported = SUPPORTED_Autoneg;
5513         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5514                 support_serdes = 1;
5515                 support_copper = 1;
5516         } else if (bp->phy_port == PORT_FIBRE)
5517                 support_serdes = 1;
5518         else
5519                 support_copper = 1;
5520
5521         if (support_serdes) {
5522                 cmd->supported |= SUPPORTED_1000baseT_Full |
5523                         SUPPORTED_FIBRE;
5524                 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5525                         cmd->supported |= SUPPORTED_2500baseX_Full;
5526
5527         }
5528         if (support_copper) {
5529                 cmd->supported |= SUPPORTED_10baseT_Half |
5530                         SUPPORTED_10baseT_Full |
5531                         SUPPORTED_100baseT_Half |
5532                         SUPPORTED_100baseT_Full |
5533                         SUPPORTED_1000baseT_Full |
5534                         SUPPORTED_TP;
5535
5536         }
5537
5538         spin_lock_bh(&bp->phy_lock);
5539         cmd->port = bp->phy_port;
5540         cmd->advertising = bp->advertising;
5541
5542         if (bp->autoneg & AUTONEG_SPEED) {
5543                 cmd->autoneg = AUTONEG_ENABLE;
5544         }
5545         else {
5546                 cmd->autoneg = AUTONEG_DISABLE;
5547         }
5548
5549         if (netif_carrier_ok(dev)) {
5550                 cmd->speed = bp->line_speed;
5551                 cmd->duplex = bp->duplex;
5552         }
5553         else {
5554                 cmd->speed = -1;
5555                 cmd->duplex = -1;
5556         }
5557         spin_unlock_bh(&bp->phy_lock);
5558
5559         cmd->transceiver = XCVR_INTERNAL;
5560         cmd->phy_address = bp->phy_addr;
5561
5562         return 0;
5563 }
5564
5565 static int
5566 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5567 {
5568         struct bnx2 *bp = netdev_priv(dev);
5569         u8 autoneg = bp->autoneg;
5570         u8 req_duplex = bp->req_duplex;
5571         u16 req_line_speed = bp->req_line_speed;
5572         u32 advertising = bp->advertising;
5573         int err = -EINVAL;
5574
5575         spin_lock_bh(&bp->phy_lock);
5576
5577         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5578                 goto err_out_unlock;
5579
5580         if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5581                 goto err_out_unlock;
5582
5583         if (cmd->autoneg == AUTONEG_ENABLE) {
5584                 autoneg |= AUTONEG_SPEED;
5585
5586                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5587
5588                 /* allow advertising 1 speed */
5589                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5590                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
5591                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
5592                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
5593
5594                         if (cmd->port == PORT_FIBRE)
5595                                 goto err_out_unlock;
5596
5597                         advertising = cmd->advertising;
5598
5599                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5600                         if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5601                             (cmd->port == PORT_TP))
5602                                 goto err_out_unlock;
5603                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
5604                         advertising = cmd->advertising;
5605                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5606                         goto err_out_unlock;
5607                 else {
5608                         if (cmd->port == PORT_FIBRE)
5609                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5610                         else
5611                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
5612                 }
5613                 advertising |= ADVERTISED_Autoneg;
5614         }
5615         else {
5616                 if (cmd->port == PORT_FIBRE) {
5617                         if ((cmd->speed != SPEED_1000 &&
5618                              cmd->speed != SPEED_2500) ||
5619                             (cmd->duplex != DUPLEX_FULL))
5620                                 goto err_out_unlock;
5621
5622                         if (cmd->speed == SPEED_2500 &&
5623                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5624                                 goto err_out_unlock;
5625                 }
5626                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5627                         goto err_out_unlock;
5628
5629                 autoneg &= ~AUTONEG_SPEED;
5630                 req_line_speed = cmd->speed;
5631                 req_duplex = cmd->duplex;
5632                 advertising = 0;
5633         }
5634
5635         bp->autoneg = autoneg;
5636         bp->advertising = advertising;
5637         bp->req_line_speed = req_line_speed;
5638         bp->req_duplex = req_duplex;
5639
5640         err = bnx2_setup_phy(bp, cmd->port);
5641
5642 err_out_unlock:
5643         spin_unlock_bh(&bp->phy_lock);
5644
5645         return err;
5646 }
5647
5648 static void
5649 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5650 {
5651         struct bnx2 *bp = netdev_priv(dev);
5652
5653         strcpy(info->driver, DRV_MODULE_NAME);
5654         strcpy(info->version, DRV_MODULE_VERSION);
5655         strcpy(info->bus_info, pci_name(bp->pdev));
5656         strcpy(info->fw_version, bp->fw_version);
5657 }
5658
5659 #define BNX2_REGDUMP_LEN                (32 * 1024)
5660
5661 static int
5662 bnx2_get_regs_len(struct net_device *dev)
5663 {
5664         return BNX2_REGDUMP_LEN;
5665 }
5666
5667 static void
5668 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5669 {
5670         u32 *p = _p, i, offset;
5671         u8 *orig_p = _p;
5672         struct bnx2 *bp = netdev_priv(dev);
5673         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5674                                  0x0800, 0x0880, 0x0c00, 0x0c10,
5675                                  0x0c30, 0x0d08, 0x1000, 0x101c,
5676                                  0x1040, 0x1048, 0x1080, 0x10a4,
5677                                  0x1400, 0x1490, 0x1498, 0x14f0,
5678                                  0x1500, 0x155c, 0x1580, 0x15dc,
5679                                  0x1600, 0x1658, 0x1680, 0x16d8,
5680                                  0x1800, 0x1820, 0x1840, 0x1854,
5681                                  0x1880, 0x1894, 0x1900, 0x1984,
5682                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5683                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
5684                                  0x2000, 0x2030, 0x23c0, 0x2400,
5685                                  0x2800, 0x2820, 0x2830, 0x2850,
5686                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
5687                                  0x3c00, 0x3c94, 0x4000, 0x4010,
5688                                  0x4080, 0x4090, 0x43c0, 0x4458,
5689                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
5690                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
5691                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
5692                                  0x5fc0, 0x6000, 0x6400, 0x6428,
5693                                  0x6800, 0x6848, 0x684c, 0x6860,
5694                                  0x6888, 0x6910, 0x8000 };
5695
5696         regs->version = 0;
5697
5698         memset(p, 0, BNX2_REGDUMP_LEN);
5699
5700         if (!netif_running(bp->dev))
5701                 return;
5702
5703         i = 0;
5704         offset = reg_boundaries[0];
5705         p += offset;
5706         while (offset < BNX2_REGDUMP_LEN) {
5707                 *p++ = REG_RD(bp, offset);
5708                 offset += 4;
5709                 if (offset == reg_boundaries[i + 1]) {
5710                         offset = reg_boundaries[i + 2];
5711                         p = (u32 *) (orig_p + offset);
5712                         i += 2;
5713                 }
5714         }
5715 }
5716
5717 static void
5718 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5719 {
5720         struct bnx2 *bp = netdev_priv(dev);
5721
5722         if (bp->flags & NO_WOL_FLAG) {
5723                 wol->supported = 0;
5724                 wol->wolopts = 0;
5725         }
5726         else {
5727                 wol->supported = WAKE_MAGIC;
5728                 if (bp->wol)
5729                         wol->wolopts = WAKE_MAGIC;
5730                 else
5731                         wol->wolopts = 0;
5732         }
5733         memset(&wol->sopass, 0, sizeof(wol->sopass));
5734 }
5735
5736 static int
5737 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5738 {
5739         struct bnx2 *bp = netdev_priv(dev);
5740
5741         if (wol->wolopts & ~WAKE_MAGIC)
5742                 return -EINVAL;
5743
5744         if (wol->wolopts & WAKE_MAGIC) {
5745                 if (bp->flags & NO_WOL_FLAG)
5746                         return -EINVAL;
5747
5748                 bp->wol = 1;
5749         }
5750         else {
5751                 bp->wol = 0;
5752         }
5753         return 0;
5754 }
5755
5756 static int
5757 bnx2_nway_reset(struct net_device *dev)
5758 {
5759         struct bnx2 *bp = netdev_priv(dev);
5760         u32 bmcr;
5761
5762         if (!(bp->autoneg & AUTONEG_SPEED)) {
5763                 return -EINVAL;
5764         }
5765
5766         spin_lock_bh(&bp->phy_lock);
5767
5768         if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5769                 int rc;
5770
5771                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5772                 spin_unlock_bh(&bp->phy_lock);
5773                 return rc;
5774         }
5775
5776         /* Force a link down visible on the other side */
5777         if (bp->phy_flags & PHY_SERDES_FLAG) {
5778                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5779                 spin_unlock_bh(&bp->phy_lock);
5780
5781                 msleep(20);
5782
5783                 spin_lock_bh(&bp->phy_lock);
5784
5785                 bp->current_interval = SERDES_AN_TIMEOUT;
5786                 bp->serdes_an_pending = 1;
5787                 mod_timer(&bp->timer, jiffies + bp->current_interval);
5788         }
5789
5790         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5791         bmcr &= ~BMCR_LOOPBACK;
5792         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5793
5794         spin_unlock_bh(&bp->phy_lock);
5795
5796         return 0;
5797 }
5798
5799 static int
5800 bnx2_get_eeprom_len(struct net_device *dev)
5801 {
5802         struct bnx2 *bp = netdev_priv(dev);
5803
5804         if (bp->flash_info == NULL)
5805                 return 0;
5806
5807         return (int) bp->flash_size;
5808 }
5809
5810 static int
5811 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5812                 u8 *eebuf)
5813 {
5814         struct bnx2 *bp = netdev_priv(dev);
5815         int rc;
5816
5817         /* parameters already validated in ethtool_get_eeprom */
5818
5819         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5820
5821         return rc;
5822 }
5823
5824 static int
5825 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5826                 u8 *eebuf)
5827 {
5828         struct bnx2 *bp = netdev_priv(dev);
5829         int rc;
5830
5831         /* parameters already validated in ethtool_set_eeprom */
5832
5833         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5834
5835         return rc;
5836 }
5837
5838 static int
5839 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5840 {
5841         struct bnx2 *bp = netdev_priv(dev);
5842
5843         memset(coal, 0, sizeof(struct ethtool_coalesce));
5844
5845         coal->rx_coalesce_usecs = bp->rx_ticks;
5846         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5847         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5848         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5849
5850         coal->tx_coalesce_usecs = bp->tx_ticks;
5851         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5852         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5853         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5854
5855         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5856
5857         return 0;
5858 }
5859
5860 static int
5861 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5862 {
5863         struct bnx2 *bp = netdev_priv(dev);
5864
5865         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5866         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5867
5868         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5869         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5870
5871         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5872         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5873
5874         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5875         if (bp->rx_quick_cons_trip_int > 0xff)
5876                 bp->rx_quick_cons_trip_int = 0xff;
5877
5878         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5879         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5880
5881         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5882         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5883
5884         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5885         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5886
5887         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5888         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5889                 0xff;
5890
5891         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5892         if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5893                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5894                         bp->stats_ticks = USEC_PER_SEC;
5895         }
5896         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
5897                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5898         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5899
5900         if (netif_running(bp->dev)) {
5901                 bnx2_netif_stop(bp);
5902                 bnx2_init_nic(bp);
5903                 bnx2_netif_start(bp);
5904         }
5905
5906         return 0;
5907 }
5908
5909 static void
5910 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5911 {
5912         struct bnx2 *bp = netdev_priv(dev);
5913
5914         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5915         ering->rx_mini_max_pending = 0;
5916         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
5917
5918         ering->rx_pending = bp->rx_ring_size;
5919         ering->rx_mini_pending = 0;
5920         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
5921
5922         ering->tx_max_pending = MAX_TX_DESC_CNT;
5923         ering->tx_pending = bp->tx_ring_size;
5924 }
5925
5926 static int
5927 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
5928 {
5929         if (netif_running(bp->dev)) {
5930                 bnx2_netif_stop(bp);
5931                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5932                 bnx2_free_skbs(bp);
5933                 bnx2_free_mem(bp);
5934         }
5935
5936         bnx2_set_rx_ring_size(bp, rx);
5937         bp->tx_ring_size = tx;
5938
5939         if (netif_running(bp->dev)) {
5940                 int rc;
5941
5942                 rc = bnx2_alloc_mem(bp);
5943                 if (rc)
5944                         return rc;
5945                 bnx2_init_nic(bp);
5946                 bnx2_netif_start(bp);
5947         }
5948         return 0;
5949 }
5950
5951 static int
5952 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5953 {
5954         struct bnx2 *bp = netdev_priv(dev);
5955         int rc;
5956
5957         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5958                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5959                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5960
5961                 return -EINVAL;
5962         }
5963         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
5964         return rc;
5965 }
5966
5967 static void
5968 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5969 {
5970         struct bnx2 *bp = netdev_priv(dev);
5971
5972         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5973         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5974         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5975 }
5976
5977 static int
5978 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5979 {
5980         struct bnx2 *bp = netdev_priv(dev);
5981
5982         bp->req_flow_ctrl = 0;
5983         if (epause->rx_pause)
5984                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5985         if (epause->tx_pause)
5986                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5987
5988         if (epause->autoneg) {
5989                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5990         }
5991         else {
5992                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5993         }
5994
5995         spin_lock_bh(&bp->phy_lock);
5996
5997         bnx2_setup_phy(bp, bp->phy_port);
5998
5999         spin_unlock_bh(&bp->phy_lock);
6000
6001         return 0;
6002 }
6003
6004 static u32
6005 bnx2_get_rx_csum(struct net_device *dev)
6006 {
6007         struct bnx2 *bp = netdev_priv(dev);
6008
6009         return bp->rx_csum;
6010 }
6011
6012 static int
6013 bnx2_set_rx_csum(struct net_device *dev, u32 data)
6014 {
6015         struct bnx2 *bp = netdev_priv(dev);
6016
6017         bp->rx_csum = data;
6018         return 0;
6019 }
6020
6021 static int
6022 bnx2_set_tso(struct net_device *dev, u32 data)
6023 {
6024         struct bnx2 *bp = netdev_priv(dev);
6025
6026         if (data) {
6027                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6028                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6029                         dev->features |= NETIF_F_TSO6;
6030         } else
6031                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6032                                    NETIF_F_TSO_ECN);
6033         return 0;
6034 }
6035
6036 #define BNX2_NUM_STATS 46
6037
6038 static struct {
6039         char string[ETH_GSTRING_LEN];
6040 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6041         { "rx_bytes" },
6042         { "rx_error_bytes" },
6043         { "tx_bytes" },
6044         { "tx_error_bytes" },
6045         { "rx_ucast_packets" },
6046         { "rx_mcast_packets" },
6047         { "rx_bcast_packets" },
6048         { "tx_ucast_packets" },
6049         { "tx_mcast_packets" },
6050         { "tx_bcast_packets" },
6051         { "tx_mac_errors" },
6052         { "tx_carrier_errors" },
6053         { "rx_crc_errors" },
6054         { "rx_align_errors" },
6055         { "tx_single_collisions" },
6056         { "tx_multi_collisions" },
6057         { "tx_deferred" },
6058         { "tx_excess_collisions" },
6059         { "tx_late_collisions" },
6060         { "tx_total_collisions" },
6061         { "rx_fragments" },
6062         { "rx_jabbers" },
6063         { "rx_undersize_packets" },
6064         { "rx_oversize_packets" },
6065         { "rx_64_byte_packets" },
6066         { "rx_65_to_127_byte_packets" },
6067         { "rx_128_to_255_byte_packets" },
6068         { "rx_256_to_511_byte_packets" },
6069         { "rx_512_to_1023_byte_packets" },
6070         { "rx_1024_to_1522_byte_packets" },
6071         { "rx_1523_to_9022_byte_packets" },
6072         { "tx_64_byte_packets" },
6073         { "tx_65_to_127_byte_packets" },
6074         { "tx_128_to_255_byte_packets" },
6075         { "tx_256_to_511_byte_packets" },
6076         { "tx_512_to_1023_byte_packets" },
6077         { "tx_1024_to_1522_byte_packets" },
6078         { "tx_1523_to_9022_byte_packets" },
6079         { "rx_xon_frames" },
6080         { "rx_xoff_frames" },
6081         { "tx_xon_frames" },
6082         { "tx_xoff_frames" },
6083         { "rx_mac_ctrl_frames" },
6084         { "rx_filtered_packets" },
6085         { "rx_discards" },
6086         { "rx_fw_discards" },
6087 };
6088
6089 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6090
6091 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
6092     STATS_OFFSET32(stat_IfHCInOctets_hi),
6093     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6094     STATS_OFFSET32(stat_IfHCOutOctets_hi),
6095     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6096     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6097     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6098     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6099     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6100     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6101     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6102     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6103     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6104     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6105     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6106     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6107     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6108     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6109     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6110     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6111     STATS_OFFSET32(stat_EtherStatsCollisions),
6112     STATS_OFFSET32(stat_EtherStatsFragments),
6113     STATS_OFFSET32(stat_EtherStatsJabbers),
6114     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6115     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6116     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6117     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6118     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6119     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6120     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6121     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6122     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6123     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6124     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6125     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6126     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6127     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6128     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6129     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6130     STATS_OFFSET32(stat_XonPauseFramesReceived),
6131     STATS_OFFSET32(stat_XoffPauseFramesReceived),
6132     STATS_OFFSET32(stat_OutXonSent),
6133     STATS_OFFSET32(stat_OutXoffSent),
6134     STATS_OFFSET32(stat_MacControlFramesReceived),
6135     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6136     STATS_OFFSET32(stat_IfInMBUFDiscards),
6137     STATS_OFFSET32(stat_FwRxDrop),
6138 };
6139
6140 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6141  * skipped because of errata.
6142  */
6143 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
6144         8,0,8,8,8,8,8,8,8,8,
6145         4,0,4,4,4,4,4,4,4,4,
6146         4,4,4,4,4,4,4,4,4,4,
6147         4,4,4,4,4,4,4,4,4,4,
6148         4,4,4,4,4,4,
6149 };
6150
6151 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6152         8,0,8,8,8,8,8,8,8,8,
6153         4,4,4,4,4,4,4,4,4,4,
6154         4,4,4,4,4,4,4,4,4,4,
6155         4,4,4,4,4,4,4,4,4,4,
6156         4,4,4,4,4,4,
6157 };
6158
6159 #define BNX2_NUM_TESTS 6
6160
6161 static struct {
6162         char string[ETH_GSTRING_LEN];
6163 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6164         { "register_test (offline)" },
6165         { "memory_test (offline)" },
6166         { "loopback_test (offline)" },
6167         { "nvram_test (online)" },
6168         { "interrupt_test (online)" },
6169         { "link_test (online)" },
6170 };
6171
6172 static int
6173 bnx2_get_sset_count(struct net_device *dev, int sset)
6174 {
6175         switch (sset) {
6176         case ETH_SS_TEST:
6177                 return BNX2_NUM_TESTS;
6178         case ETH_SS_STATS:
6179                 return BNX2_NUM_STATS;
6180         default:
6181                 return -EOPNOTSUPP;
6182         }
6183 }
6184
6185 static void
6186 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6187 {
6188         struct bnx2 *bp = netdev_priv(dev);
6189
6190         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6191         if (etest->flags & ETH_TEST_FL_OFFLINE) {
6192                 int i;
6193
6194                 bnx2_netif_stop(bp);
6195                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6196                 bnx2_free_skbs(bp);
6197
6198                 if (bnx2_test_registers(bp) != 0) {
6199                         buf[0] = 1;
6200                         etest->flags |= ETH_TEST_FL_FAILED;
6201                 }
6202                 if (bnx2_test_memory(bp) != 0) {
6203                         buf[1] = 1;
6204                         etest->flags |= ETH_TEST_FL_FAILED;
6205                 }
6206                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
6207                         etest->flags |= ETH_TEST_FL_FAILED;
6208
6209                 if (!netif_running(bp->dev)) {
6210                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6211                 }
6212                 else {
6213                         bnx2_init_nic(bp);
6214                         bnx2_netif_start(bp);
6215                 }
6216
6217                 /* wait for link up */
6218                 for (i = 0; i < 7; i++) {
6219                         if (bp->link_up)
6220                                 break;
6221                         msleep_interruptible(1000);
6222                 }
6223         }
6224
6225         if (bnx2_test_nvram(bp) != 0) {
6226                 buf[3] = 1;
6227                 etest->flags |= ETH_TEST_FL_FAILED;
6228         }
6229         if (bnx2_test_intr(bp) != 0) {
6230                 buf[4] = 1;
6231                 etest->flags |= ETH_TEST_FL_FAILED;
6232         }
6233
6234         if (bnx2_test_link(bp) != 0) {
6235                 buf[5] = 1;
6236                 etest->flags |= ETH_TEST_FL_FAILED;
6237
6238         }
6239 }
6240
6241 static void
6242 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6243 {
6244         switch (stringset) {
6245         case ETH_SS_STATS:
6246                 memcpy(buf, bnx2_stats_str_arr,
6247                         sizeof(bnx2_stats_str_arr));
6248                 break;
6249         case ETH_SS_TEST:
6250                 memcpy(buf, bnx2_tests_str_arr,
6251                         sizeof(bnx2_tests_str_arr));
6252                 break;
6253         }
6254 }
6255
6256 static void
6257 bnx2_get_ethtool_stats(struct net_device *dev,
6258                 struct ethtool_stats *stats, u64 *buf)
6259 {
6260         struct bnx2 *bp = netdev_priv(dev);
6261         int i;
6262         u32 *hw_stats = (u32 *) bp->stats_blk;
6263         u8 *stats_len_arr = NULL;
6264
6265         if (hw_stats == NULL) {
6266                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6267                 return;
6268         }
6269
6270         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6271             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6272             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6273             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6274                 stats_len_arr = bnx2_5706_stats_len_arr;
6275         else
6276                 stats_len_arr = bnx2_5708_stats_len_arr;
6277
6278         for (i = 0; i < BNX2_NUM_STATS; i++) {
6279                 if (stats_len_arr[i] == 0) {
6280                         /* skip this counter */
6281                         buf[i] = 0;
6282                         continue;
6283                 }
6284                 if (stats_len_arr[i] == 4) {
6285                         /* 4-byte counter */
6286                         buf[i] = (u64)
6287                                 *(hw_stats + bnx2_stats_offset_arr[i]);
6288                         continue;
6289                 }
6290                 /* 8-byte counter */
6291                 buf[i] = (((u64) *(hw_stats +
6292                                         bnx2_stats_offset_arr[i])) << 32) +
6293                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6294         }
6295 }
6296
6297 static int
6298 bnx2_phys_id(struct net_device *dev, u32 data)
6299 {
6300         struct bnx2 *bp = netdev_priv(dev);
6301         int i;
6302         u32 save;
6303
6304         if (data == 0)
6305                 data = 2;
6306
6307         save = REG_RD(bp, BNX2_MISC_CFG);
6308         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6309
6310         for (i = 0; i < (data * 2); i++) {
6311                 if ((i % 2) == 0) {
6312                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6313                 }
6314                 else {
6315                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6316                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
6317                                 BNX2_EMAC_LED_100MB_OVERRIDE |
6318                                 BNX2_EMAC_LED_10MB_OVERRIDE |
6319                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6320                                 BNX2_EMAC_LED_TRAFFIC);
6321                 }
6322                 msleep_interruptible(500);
6323                 if (signal_pending(current))
6324                         break;
6325         }
6326         REG_WR(bp, BNX2_EMAC_LED, 0);
6327         REG_WR(bp, BNX2_MISC_CFG, save);
6328         return 0;
6329 }
6330
6331 static int
6332 bnx2_set_tx_csum(struct net_device *dev, u32 data)
6333 {
6334         struct bnx2 *bp = netdev_priv(dev);
6335
6336         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6337                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
6338         else
6339                 return (ethtool_op_set_tx_csum(dev, data));
6340 }
6341
6342 static const struct ethtool_ops bnx2_ethtool_ops = {
6343         .get_settings           = bnx2_get_settings,
6344         .set_settings           = bnx2_set_settings,
6345         .get_drvinfo            = bnx2_get_drvinfo,
6346         .get_regs_len           = bnx2_get_regs_len,
6347         .get_regs               = bnx2_get_regs,
6348         .get_wol                = bnx2_get_wol,
6349         .set_wol                = bnx2_set_wol,
6350         .nway_reset             = bnx2_nway_reset,
6351         .get_link               = ethtool_op_get_link,
6352         .get_eeprom_len         = bnx2_get_eeprom_len,
6353         .get_eeprom             = bnx2_get_eeprom,
6354         .set_eeprom             = bnx2_set_eeprom,
6355         .get_coalesce           = bnx2_get_coalesce,
6356         .set_coalesce           = bnx2_set_coalesce,
6357         .get_ringparam          = bnx2_get_ringparam,
6358         .set_ringparam          = bnx2_set_ringparam,
6359         .get_pauseparam         = bnx2_get_pauseparam,
6360         .set_pauseparam         = bnx2_set_pauseparam,
6361         .get_rx_csum            = bnx2_get_rx_csum,
6362         .set_rx_csum            = bnx2_set_rx_csum,
6363         .set_tx_csum            = bnx2_set_tx_csum,
6364         .set_sg                 = ethtool_op_set_sg,
6365         .set_tso                = bnx2_set_tso,
6366         .self_test              = bnx2_self_test,
6367         .get_strings            = bnx2_get_strings,
6368         .phys_id                = bnx2_phys_id,
6369         .get_ethtool_stats      = bnx2_get_ethtool_stats,
6370         .get_sset_count         = bnx2_get_sset_count,
6371 };
6372
6373 /* Called with rtnl_lock */
6374 static int
6375 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6376 {
6377         struct mii_ioctl_data *data = if_mii(ifr);
6378         struct bnx2 *bp = netdev_priv(dev);
6379         int err;
6380
6381         switch(cmd) {
6382         case SIOCGMIIPHY:
6383                 data->phy_id = bp->phy_addr;
6384
6385                 /* fallthru */
6386         case SIOCGMIIREG: {
6387                 u32 mii_regval;
6388
6389                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6390                         return -EOPNOTSUPP;
6391
6392                 if (!netif_running(dev))
6393                         return -EAGAIN;
6394
6395                 spin_lock_bh(&bp->phy_lock);
6396                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
6397                 spin_unlock_bh(&bp->phy_lock);
6398
6399                 data->val_out = mii_regval;
6400
6401                 return err;
6402         }
6403
6404         case SIOCSMIIREG:
6405                 if (!capable(CAP_NET_ADMIN))
6406                         return -EPERM;
6407
6408                 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6409                         return -EOPNOTSUPP;
6410
6411                 if (!netif_running(dev))
6412                         return -EAGAIN;
6413
6414                 spin_lock_bh(&bp->phy_lock);
6415                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
6416                 spin_unlock_bh(&bp->phy_lock);
6417
6418                 return err;
6419
6420         default:
6421                 /* do nothing */
6422                 break;
6423         }
6424         return -EOPNOTSUPP;
6425 }
6426
6427 /* Called with rtnl_lock */
6428 static int
6429 bnx2_change_mac_addr(struct net_device *dev, void *p)
6430 {
6431         struct sockaddr *addr = p;
6432         struct bnx2 *bp = netdev_priv(dev);
6433
6434         if (!is_valid_ether_addr(addr->sa_data))
6435                 return -EINVAL;
6436
6437         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6438         if (netif_running(dev))
6439                 bnx2_set_mac_addr(bp);
6440
6441         return 0;
6442 }
6443
6444 /* Called with rtnl_lock */
6445 static int
6446 bnx2_change_mtu(struct net_device *dev, int new_mtu)
6447 {
6448         struct bnx2 *bp = netdev_priv(dev);
6449
6450         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6451                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6452                 return -EINVAL;
6453
6454         dev->mtu = new_mtu;
6455         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
6456 }
6457
6458 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6459 static void
6460 poll_bnx2(struct net_device *dev)
6461 {
6462         struct bnx2 *bp = netdev_priv(dev);
6463
6464         disable_irq(bp->pdev->irq);
6465         bnx2_interrupt(bp->pdev->irq, dev);
6466         enable_irq(bp->pdev->irq);
6467 }
6468 #endif
6469
6470 static void __devinit
6471 bnx2_get_5709_media(struct bnx2 *bp)
6472 {
6473         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6474         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6475         u32 strap;
6476
6477         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6478                 return;
6479         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6480                 bp->phy_flags |= PHY_SERDES_FLAG;
6481                 return;
6482         }
6483
6484         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6485                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6486         else
6487                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6488
6489         if (PCI_FUNC(bp->pdev->devfn) == 0) {
6490                 switch (strap) {
6491                 case 0x4:
6492                 case 0x5:
6493                 case 0x6:
6494                         bp->phy_flags |= PHY_SERDES_FLAG;
6495                         return;
6496                 }
6497         } else {
6498                 switch (strap) {
6499                 case 0x1:
6500                 case 0x2:
6501                 case 0x4:
6502                         bp->phy_flags |= PHY_SERDES_FLAG;
6503                         return;
6504                 }
6505         }
6506 }
6507
6508 static void __devinit
6509 bnx2_get_pci_speed(struct bnx2 *bp)
6510 {
6511         u32 reg;
6512
6513         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6514         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6515                 u32 clkreg;
6516
6517                 bp->flags |= PCIX_FLAG;
6518
6519                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6520
6521                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6522                 switch (clkreg) {
6523                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6524                         bp->bus_speed_mhz = 133;
6525                         break;
6526
6527                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6528                         bp->bus_speed_mhz = 100;
6529                         break;
6530
6531                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6532                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6533                         bp->bus_speed_mhz = 66;
6534                         break;
6535
6536                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6537                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6538                         bp->bus_speed_mhz = 50;
6539                         break;
6540
6541                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6542                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6543                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6544                         bp->bus_speed_mhz = 33;
6545                         break;
6546                 }
6547         }
6548         else {
6549                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6550                         bp->bus_speed_mhz = 66;
6551                 else
6552                         bp->bus_speed_mhz = 33;
6553         }
6554
6555         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6556                 bp->flags |= PCI_32BIT_FLAG;
6557
6558 }
6559
6560 static int __devinit
6561 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6562 {
6563         struct bnx2 *bp;
6564         unsigned long mem_len;
6565         int rc, i, j;
6566         u32 reg;
6567         u64 dma_mask, persist_dma_mask;
6568
6569         SET_NETDEV_DEV(dev, &pdev->dev);
6570         bp = netdev_priv(dev);
6571
6572         bp->flags = 0;
6573         bp->phy_flags = 0;
6574
6575         /* enable device (incl. PCI PM wakeup), and bus-mastering */
6576         rc = pci_enable_device(pdev);
6577         if (rc) {
6578                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
6579                 goto err_out;
6580         }
6581
6582         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6583                 dev_err(&pdev->dev,
6584                         "Cannot find PCI device base address, aborting.\n");
6585                 rc = -ENODEV;
6586                 goto err_out_disable;
6587         }
6588
6589         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6590         if (rc) {
6591                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6592                 goto err_out_disable;
6593         }
6594
6595         pci_set_master(pdev);
6596
6597         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6598         if (bp->pm_cap == 0) {
6599                 dev_err(&pdev->dev,
6600                         "Cannot find power management capability, aborting.\n");
6601                 rc = -EIO;
6602                 goto err_out_release;
6603         }
6604
6605         bp->dev = dev;
6606         bp->pdev = pdev;
6607
6608         spin_lock_init(&bp->phy_lock);
6609         spin_lock_init(&bp->indirect_lock);
6610         INIT_WORK(&bp->reset_task, bnx2_reset_task);
6611
6612         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6613         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6614         dev->mem_end = dev->mem_start + mem_len;
6615         dev->irq = pdev->irq;
6616
6617         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6618
6619         if (!bp->regview) {
6620                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6621                 rc = -ENOMEM;
6622                 goto err_out_release;
6623         }
6624
6625         /* Configure byte swap and enable write to the reg_window registers.
6626          * Rely on CPU to do target byte swapping on big endian systems
6627          * The chip's target access swapping will not swap all accesses
6628          */
6629         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6630                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6631                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6632
6633         bnx2_set_power_state(bp, PCI_D0);
6634
6635         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6636
6637         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6638                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6639                         dev_err(&pdev->dev,
6640                                 "Cannot find PCIE capability, aborting.\n");
6641                         rc = -EIO;
6642                         goto err_out_unmap;
6643                 }
6644                 bp->flags |= PCIE_FLAG;
6645         } else {
6646                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6647                 if (bp->pcix_cap == 0) {
6648                         dev_err(&pdev->dev,
6649                                 "Cannot find PCIX capability, aborting.\n");
6650                         rc = -EIO;
6651                         goto err_out_unmap;
6652                 }
6653         }
6654
6655         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6656                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6657                         bp->flags |= MSI_CAP_FLAG;
6658         }
6659
6660         /* 5708 cannot support DMA addresses > 40-bit.  */
6661         if (CHIP_NUM(bp) == CHIP_NUM_5708)
6662                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6663         else
6664                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6665
6666         /* Configure DMA attributes. */
6667         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6668                 dev->features |= NETIF_F_HIGHDMA;
6669                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6670                 if (rc) {
6671                         dev_err(&pdev->dev,
6672                                 "pci_set_consistent_dma_mask failed, aborting.\n");
6673                         goto err_out_unmap;
6674                 }
6675         } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6676                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6677                 goto err_out_unmap;
6678         }
6679
6680         if (!(bp->flags & PCIE_FLAG))
6681                 bnx2_get_pci_speed(bp);
6682
6683         /* 5706A0 may falsely detect SERR and PERR. */
6684         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6685                 reg = REG_RD(bp, PCI_COMMAND);
6686                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6687                 REG_WR(bp, PCI_COMMAND, reg);
6688         }
6689         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6690                 !(bp->flags & PCIX_FLAG)) {
6691
6692                 dev_err(&pdev->dev,
6693                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
6694                 goto err_out_unmap;
6695         }
6696
6697         bnx2_init_nvram(bp);
6698
6699         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6700
6701         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6702             BNX2_SHM_HDR_SIGNATURE_SIG) {
6703                 u32 off = PCI_FUNC(pdev->devfn) << 2;
6704
6705                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6706         } else
6707                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6708
6709         /* Get the permanent MAC address.  First we need to make sure the
6710          * firmware is actually running.
6711          */
6712         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6713
6714         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6715             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6716                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6717                 rc = -ENODEV;
6718                 goto err_out_unmap;
6719         }
6720
6721         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6722         for (i = 0, j = 0; i < 3; i++) {
6723                 u8 num, k, skip0;
6724
6725                 num = (u8) (reg >> (24 - (i * 8)));
6726                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6727                         if (num >= k || !skip0 || k == 1) {
6728                                 bp->fw_version[j++] = (num / k) + '0';
6729                                 skip0 = 0;
6730                         }
6731                 }
6732                 if (i != 2)
6733                         bp->fw_version[j++] = '.';
6734         }
6735         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6736         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6737                 bp->wol = 1;
6738
6739         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
6740                 bp->flags |= ASF_ENABLE_FLAG;
6741
6742                 for (i = 0; i < 30; i++) {
6743                         reg = REG_RD_IND(bp, bp->shmem_base +
6744                                              BNX2_BC_STATE_CONDITION);
6745                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6746                                 break;
6747                         msleep(10);
6748                 }
6749         }
6750         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6751         reg &= BNX2_CONDITION_MFW_RUN_MASK;
6752         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6753             reg != BNX2_CONDITION_MFW_RUN_NONE) {
6754                 int i;
6755                 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6756
6757                 bp->fw_version[j++] = ' ';
6758                 for (i = 0; i < 3; i++) {
6759                         reg = REG_RD_IND(bp, addr + i * 4);
6760                         reg = swab32(reg);
6761                         memcpy(&bp->fw_version[j], &reg, 4);
6762                         j += 4;
6763                 }
6764         }
6765
6766         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6767         bp->mac_addr[0] = (u8) (reg >> 8);
6768         bp->mac_addr[1] = (u8) reg;
6769
6770         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6771         bp->mac_addr[2] = (u8) (reg >> 24);
6772         bp->mac_addr[3] = (u8) (reg >> 16);
6773         bp->mac_addr[4] = (u8) (reg >> 8);
6774         bp->mac_addr[5] = (u8) reg;
6775
6776         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6777
6778         bp->tx_ring_size = MAX_TX_DESC_CNT;
6779         bnx2_set_rx_ring_size(bp, 255);
6780
6781         bp->rx_csum = 1;
6782
6783         bp->tx_quick_cons_trip_int = 20;
6784         bp->tx_quick_cons_trip = 20;
6785         bp->tx_ticks_int = 80;
6786         bp->tx_ticks = 80;
6787
6788         bp->rx_quick_cons_trip_int = 6;
6789         bp->rx_quick_cons_trip = 6;
6790         bp->rx_ticks_int = 18;
6791         bp->rx_ticks = 18;
6792
6793         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6794
6795         bp->timer_interval =  HZ;
6796         bp->current_interval =  HZ;
6797
6798         bp->phy_addr = 1;
6799
6800         /* Disable WOL support if we are running on a SERDES chip. */
6801         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6802                 bnx2_get_5709_media(bp);
6803         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6804                 bp->phy_flags |= PHY_SERDES_FLAG;
6805
6806         bp->phy_port = PORT_TP;
6807         if (bp->phy_flags & PHY_SERDES_FLAG) {
6808                 bp->phy_port = PORT_FIBRE;
6809                 reg = REG_RD_IND(bp, bp->shmem_base +
6810                                      BNX2_SHARED_HW_CFG_CONFIG);
6811                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
6812                         bp->flags |= NO_WOL_FLAG;
6813                         bp->wol = 0;
6814                 }
6815                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6816                         bp->phy_addr = 2;
6817                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6818                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6819                 }
6820                 bnx2_init_remote_phy(bp);
6821
6822         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6823                    CHIP_NUM(bp) == CHIP_NUM_5708)
6824                 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6825         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
6826                  (CHIP_REV(bp) == CHIP_REV_Ax ||
6827                   CHIP_REV(bp) == CHIP_REV_Bx))
6828                 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6829
6830         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6831             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6832             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
6833                 bp->flags |= NO_WOL_FLAG;
6834                 bp->wol = 0;
6835         }
6836
6837         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6838                 bp->tx_quick_cons_trip_int =
6839                         bp->tx_quick_cons_trip;
6840                 bp->tx_ticks_int = bp->tx_ticks;
6841                 bp->rx_quick_cons_trip_int =
6842                         bp->rx_quick_cons_trip;
6843                 bp->rx_ticks_int = bp->rx_ticks;
6844                 bp->comp_prod_trip_int = bp->comp_prod_trip;
6845                 bp->com_ticks_int = bp->com_ticks;
6846                 bp->cmd_ticks_int = bp->cmd_ticks;
6847         }
6848
6849         /* Disable MSI on 5706 if AMD 8132 bridge is found.
6850          *
6851          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
6852          * with byte enables disabled on the unused 32-bit word.  This is legal
6853          * but causes problems on the AMD 8132 which will eventually stop
6854          * responding after a while.
6855          *
6856          * AMD believes this incompatibility is unique to the 5706, and
6857          * prefers to locally disable MSI rather than globally disabling it.
6858          */
6859         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6860                 struct pci_dev *amd_8132 = NULL;
6861
6862                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6863                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
6864                                                   amd_8132))) {
6865
6866                         if (amd_8132->revision >= 0x10 &&
6867                             amd_8132->revision <= 0x13) {
6868                                 disable_msi = 1;
6869                                 pci_dev_put(amd_8132);
6870                                 break;
6871                         }
6872                 }
6873         }
6874
6875         bnx2_set_default_link(bp);
6876         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6877
6878         init_timer(&bp->timer);
6879         bp->timer.expires = RUN_AT(bp->timer_interval);
6880         bp->timer.data = (unsigned long) bp;
6881         bp->timer.function = bnx2_timer;
6882
6883         return 0;
6884
6885 err_out_unmap:
6886         if (bp->regview) {
6887                 iounmap(bp->regview);
6888                 bp->regview = NULL;
6889         }
6890
6891 err_out_release:
6892         pci_release_regions(pdev);
6893
6894 err_out_disable:
6895         pci_disable_device(pdev);
6896         pci_set_drvdata(pdev, NULL);
6897
6898 err_out:
6899         return rc;
6900 }
6901
6902 static char * __devinit
6903 bnx2_bus_string(struct bnx2 *bp, char *str)
6904 {
6905         char *s = str;
6906
6907         if (bp->flags & PCIE_FLAG) {
6908                 s += sprintf(s, "PCI Express");
6909         } else {
6910                 s += sprintf(s, "PCI");
6911                 if (bp->flags & PCIX_FLAG)
6912                         s += sprintf(s, "-X");
6913                 if (bp->flags & PCI_32BIT_FLAG)
6914                         s += sprintf(s, " 32-bit");
6915                 else
6916                         s += sprintf(s, " 64-bit");
6917                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6918         }
6919         return str;
6920 }
6921
6922 static int __devinit
6923 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6924 {
6925         static int version_printed = 0;
6926         struct net_device *dev = NULL;
6927         struct bnx2 *bp;
6928         int rc;
6929         char str[40];
6930         DECLARE_MAC_BUF(mac);
6931
6932         if (version_printed++ == 0)
6933                 printk(KERN_INFO "%s", version);
6934
6935         /* dev zeroed in init_etherdev */
6936         dev = alloc_etherdev(sizeof(*bp));
6937
6938         if (!dev)
6939                 return -ENOMEM;
6940
6941         rc = bnx2_init_board(pdev, dev);
6942         if (rc < 0) {
6943                 free_netdev(dev);
6944                 return rc;
6945         }
6946
6947         dev->open = bnx2_open;
6948         dev->hard_start_xmit = bnx2_start_xmit;
6949         dev->stop = bnx2_close;
6950         dev->get_stats = bnx2_get_stats;
6951         dev->set_multicast_list = bnx2_set_rx_mode;
6952         dev->do_ioctl = bnx2_ioctl;
6953         dev->set_mac_address = bnx2_change_mac_addr;
6954         dev->change_mtu = bnx2_change_mtu;
6955         dev->tx_timeout = bnx2_tx_timeout;
6956         dev->watchdog_timeo = TX_TIMEOUT;
6957 #ifdef BCM_VLAN
6958         dev->vlan_rx_register = bnx2_vlan_rx_register;
6959 #endif
6960         dev->ethtool_ops = &bnx2_ethtool_ops;
6961
6962         bp = netdev_priv(dev);
6963         netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
6964
6965 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6966         dev->poll_controller = poll_bnx2;
6967 #endif
6968
6969         pci_set_drvdata(pdev, dev);
6970
6971         memcpy(dev->dev_addr, bp->mac_addr, 6);
6972         memcpy(dev->perm_addr, bp->mac_addr, 6);
6973         bp->name = board_info[ent->driver_data].name;
6974
6975         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6976         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6977                 dev->features |= NETIF_F_IPV6_CSUM;
6978
6979 #ifdef BCM_VLAN
6980         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6981 #endif
6982         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6983         if (CHIP_NUM(bp) == CHIP_NUM_5709)
6984                 dev->features |= NETIF_F_TSO6;
6985
6986         if ((rc = register_netdev(dev))) {
6987                 dev_err(&pdev->dev, "Cannot register net device\n");
6988                 if (bp->regview)
6989                         iounmap(bp->regview);
6990                 pci_release_regions(pdev);
6991                 pci_disable_device(pdev);
6992                 pci_set_drvdata(pdev, NULL);
6993                 free_netdev(dev);
6994                 return rc;
6995         }
6996
6997         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6998                 "IRQ %d, node addr %s\n",
6999                 dev->name,
7000                 bp->name,
7001                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7002                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
7003                 bnx2_bus_string(bp, str),
7004                 dev->base_addr,
7005                 bp->pdev->irq, print_mac(mac, dev->dev_addr));
7006
7007         return 0;
7008 }
7009
7010 static void __devexit
7011 bnx2_remove_one(struct pci_dev *pdev)
7012 {
7013         struct net_device *dev = pci_get_drvdata(pdev);
7014         struct bnx2 *bp = netdev_priv(dev);
7015
7016         flush_scheduled_work();
7017
7018         unregister_netdev(dev);
7019
7020         if (bp->regview)
7021                 iounmap(bp->regview);
7022
7023         free_netdev(dev);
7024         pci_release_regions(pdev);
7025         pci_disable_device(pdev);
7026         pci_set_drvdata(pdev, NULL);
7027 }
7028
7029 static int
7030 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
7031 {
7032         struct net_device *dev = pci_get_drvdata(pdev);
7033         struct bnx2 *bp = netdev_priv(dev);
7034         u32 reset_code;
7035
7036         /* PCI register 4 needs to be saved whether netif_running() or not.
7037          * MSI address and data need to be saved if using MSI and
7038          * netif_running().
7039          */
7040         pci_save_state(pdev);
7041         if (!netif_running(dev))
7042                 return 0;
7043
7044         flush_scheduled_work();
7045         bnx2_netif_stop(bp);
7046         netif_device_detach(dev);
7047         del_timer_sync(&bp->timer);
7048         if (bp->flags & NO_WOL_FLAG)
7049                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
7050         else if (bp->wol)
7051                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7052         else
7053                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7054         bnx2_reset_chip(bp, reset_code);
7055         bnx2_free_skbs(bp);
7056         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
7057         return 0;
7058 }
7059
7060 static int
7061 bnx2_resume(struct pci_dev *pdev)
7062 {
7063         struct net_device *dev = pci_get_drvdata(pdev);
7064         struct bnx2 *bp = netdev_priv(dev);
7065
7066         pci_restore_state(pdev);
7067         if (!netif_running(dev))
7068                 return 0;
7069
7070         bnx2_set_power_state(bp, PCI_D0);
7071         netif_device_attach(dev);
7072         bnx2_init_nic(bp);
7073         bnx2_netif_start(bp);
7074         return 0;
7075 }
7076
7077 static struct pci_driver bnx2_pci_driver = {
7078         .name           = DRV_MODULE_NAME,
7079         .id_table       = bnx2_pci_tbl,
7080         .probe          = bnx2_init_one,
7081         .remove         = __devexit_p(bnx2_remove_one),
7082         .suspend        = bnx2_suspend,
7083         .resume         = bnx2_resume,
7084 };
7085
7086 static int __init bnx2_init(void)
7087 {
7088         return pci_register_driver(&bnx2_pci_driver);
7089 }
7090
7091 static void __exit bnx2_cleanup(void)
7092 {
7093         pci_unregister_driver(&bnx2_pci_driver);
7094 }
7095
7096 module_init(bnx2_init);
7097 module_exit(bnx2_cleanup);
7098
7099
7100